in_source_id
stringlengths
13
58
issue
stringlengths
3
241k
before_files
listlengths
0
3
after_files
listlengths
0
3
pr_diff
stringlengths
109
107M
streamlit__streamlit-6377
Streamlit logger working on root ### Summary Upon import, Streamlit adds a new **global** log handler that dumps logs in text format. Packages should not be doing that, because it might break the logging convention of the host systems. In our case for example, we dump logs in JSON format and push it all to our logging aggregation system. Streamlit's log message break the format and so it happens that the only service we can't debug properly is Streamlit. ### Steps to reproduce Nothing special, logging comes out of the box. **Expected behavior:** Streamlit should attach its handler to a specific logger namespace (e.g. `streamlit`) instead of attaching it to the root logger. **Actual behavior:** Streamlit attaches a stream handler to the root logger ### Is this a regression? That is, did this use to work the way you expected in the past? no ### Debug info - Streamlit version: 1.1.0 - Python version: 3.8 - Using Conda? PipEnv? PyEnv? Pex? - OS version: Any - Browser version: Irrelevant --- Community voting on feature requests enables the Streamlit team to understand which features are most important to our users. **If you'd like the Streamlit team to prioritize this feature request, please use the 👍 (thumbs up emoji) reaction in response to the initial post.**
[ { "content": "# Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2...
[ { "content": "# Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2...
diff --git a/lib/streamlit/logger.py b/lib/streamlit/logger.py index 6f91af7432e4..779195acc001 100644 --- a/lib/streamlit/logger.py +++ b/lib/streamlit/logger.py @@ -117,7 +117,7 @@ def get_logger(name: str) -> logging.Logger: return _loggers[name] if name == "root": - logger = logging.getLogger() + logger = logging.getLogger("streamlit") else: logger = logging.getLogger(name) diff --git a/lib/tests/streamlit/delta_generator_test.py b/lib/tests/streamlit/delta_generator_test.py index 65645ff0199c..05c57daacca3 100644 --- a/lib/tests/streamlit/delta_generator_test.py +++ b/lib/tests/streamlit/delta_generator_test.py @@ -55,7 +55,7 @@ class RunWarningTest(unittest.TestCase): @patch("streamlit.runtime.Runtime.exists", MagicMock(return_value=False)) def test_run_warning_presence(self): """Using Streamlit without `streamlit run` produces a warning.""" - with self.assertLogs(level=logging.WARNING) as logs: + with self.assertLogs("streamlit", level=logging.WARNING) as logs: delta_generator._use_warning_has_been_displayed = False st.write("Using delta generator") output = "".join(logs.output) @@ -66,7 +66,7 @@ def test_run_warning_presence(self): def test_run_warning_absence(self): """Using Streamlit through the CLI results in a Runtime being instantiated, so it produces no usage warning.""" - with self.assertLogs(level=logging.WARNING) as logs: + with self.assertLogs("streamlit", level=logging.WARNING) as logs: delta_generator._use_warning_has_been_displayed = False st.write("Using delta generator") # assertLogs is being used as a context manager, but it also checks diff --git a/lib/tests/streamlit/logger_test.py b/lib/tests/streamlit/logger_test.py index 42f83f40c84f..4b5a859d45de 100644 --- a/lib/tests/streamlit/logger_test.py +++ b/lib/tests/streamlit/logger_test.py @@ -56,7 +56,7 @@ def test_set_log_level_by_constant(self): ] for k in data: logger.set_log_level(k) - self.assertEqual(k, logging.getLogger().getEffectiveLevel()) + self.assertEqual(k, logging.getLogger("streamlit").getEffectiveLevel()) def test_set_log_level_error(self): """Test streamlit.logger.set_log_level."""
obspy__obspy-2148
FDSN routing client has a locale dependency There's a dummy call to `time.strptime` in the module init that uses locale-specific formatting, which fails under locales that don't use the same names (ie. "Nov" for the 11th month of the year). ``` >>> import locale >>> locale.setlocale(locale.LC_TIME, ('zh_CN', 'UTF-8')) 'zh_CN.UTF-8' >>> from obspy.clients.fdsn.routing.routing_client import RoutingClient Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/workspace/anaconda/envs/django/lib/python2.7/site-packages/obspy/clients/fdsn/__init__.py", line 242, in <module> from .routing.routing_client import RoutingClient # NOQA File "/workspace/anaconda/envs/django/lib/python2.7/site-packages/obspy/clients/fdsn/routing/__init__.py", line 25, in <module> time.strptime("30 Nov 00", "%d %b %y") File "/workspace/anaconda/envs/django/lib/python2.7/_strptime.py", line 478, in _strptime_time return _strptime(data_string, format)[0] File "/workspace/anaconda/envs/django/lib/python2.7/_strptime.py", line 332, in _strptime (data_string, format)) ValueError: time data u'30 Nov 00' does not match format u'%d %b %y' ``` I believe switching this to an ISO8601-like string would be locale-agnostic: time.strptime("2000/11/30", "%Y/%m/%d")
[ { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nobspy.clients.fdsn.routing - Routing services for FDSN web services\n===================================================================\n\n:copyright:\n The ObsPy Development Team (devs@obspy.org)\n Celso G Reyes, 2017\n IRIS-DMC\n:...
[ { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nobspy.clients.fdsn.routing - Routing services for FDSN web services\n===================================================================\n\n:copyright:\n The ObsPy Development Team (devs@obspy.org)\n Celso G Reyes, 2017\n IRIS-DMC\n:...
diff --git a/CHANGELOG.txt b/CHANGELOG.txt index 879f573662a..d9f50e50979 100644 --- a/CHANGELOG.txt +++ b/CHANGELOG.txt @@ -22,6 +22,7 @@ and/or `location` are set (see #1810, #2031, #2047). * A few fixes and stability improvements for the mass downloader (see #2081). + * Fixed routing startup error when running under certain locales (see #2147) - obspy.imaging: * Normalize moment tensors prior to plotting in the mopad wrapper to stabilize the algorithm (see #2114, #2125). diff --git a/obspy/clients/fdsn/routing/__init__.py b/obspy/clients/fdsn/routing/__init__.py index 372357b6d5f..ba4f8a7c8d2 100644 --- a/obspy/clients/fdsn/routing/__init__.py +++ b/obspy/clients/fdsn/routing/__init__.py @@ -22,7 +22,7 @@ # # See https://bugs.python.org/issue7980 import time -time.strptime("30 Nov 00", "%d %b %y") +time.strptime("2000/11/30", "%Y/%m/%d") if __name__ == '__main__': # pragma: no cover
cupy__cupy-7448
[RFC] Renaming the development branch to `main` Now that many projects around the scientific Python community converged to use `main` as the default branch for their repositories, I think it could make sense to do that for CuPy too. According to https://github.com/github/renaming, side-effects of renaming a branch are very limited and I believe that it is less likely to cause confusion: - Re-target any open pull requests - Update any draft releases based on the branch - Move any branch protection rules that explicitly reference the old name - Update the branch used to build GitHub Pages, if applicable - Show a notice to repository contributors, maintainers, and admins on the repository homepage with instructions to update local copies of the repository - Show a notice to contributors who `git push` to the old branch - Redirect web requests for the old branch name to the new branch name - Return a "Moved Permanently" response in API requests for the old branch name
[ { "content": "# -*- coding: utf-8 -*-\n#\n# CuPy documentation build configuration file, created by\n# sphinx-quickstart on Sun May 10 12:22:10 2015.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\...
[ { "content": "# -*- coding: utf-8 -*-\n#\n# CuPy documentation build configuration file, created by\n# sphinx-quickstart on Sun May 10 12:22:10 2015.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\...
diff --git a/.github/workflows/backport.yml b/.github/workflows/backport.yml index 151b0d18c1f..179694d231e 100644 --- a/.github/workflows/backport.yml +++ b/.github/workflows/backport.yml @@ -4,7 +4,7 @@ on: pull_request_target: types: [closed, labeled] branches: - - master + - main jobs: backport: diff --git a/.github/workflows/flexci.yml b/.github/workflows/flexci.yml index 85b4b9bceb3..30466a47953 100644 --- a/.github/workflows/flexci.yml +++ b/.github/workflows/flexci.yml @@ -2,7 +2,7 @@ name: "FlexCI" on: push: - branches: ["master", "v[0-9]+", "hotfix-*"] + branches: ["main", "v[0-9]+", "hotfix-*"] issue_comment: types: [created] diff --git a/.pfnci/BRANCH b/.pfnci/BRANCH index 1f7391f92b6..ba2906d0666 100644 --- a/.pfnci/BRANCH +++ b/.pfnci/BRANCH @@ -1 +1 @@ -master +main diff --git a/.pfnci/coverage.rst b/.pfnci/coverage.rst index b371ea85285..537480ac972 100644 --- a/.pfnci/coverage.rst +++ b/.pfnci/coverage.rst @@ -3168,13 +3168,13 @@ CuPy CI Test Coverage .. _t21: https://ci.preferred.jp/cupy.linux.cuda120.multi/ .. _d21: linux/tests/cuda120.multi.Dockerfile .. _s21: linux/tests/cuda120.multi.sh -.. _t22: https://jenkins.preferred.jp/job/chainer/job/cupy_master/TEST=rocm-4-3,label=mnj-mi50/ +.. _t22: https://jenkins.preferred.jp/job/chainer/job/cupy_main/TEST=rocm-4-3,label=mnj-mi50/ .. _d22: linux/tests/rocm-4-3.Dockerfile .. _s22: linux/tests/rocm-4-3.sh -.. _t23: https://jenkins.preferred.jp/job/chainer/job/cupy_master/TEST=rocm-5-0,label=mnj-mi50/ +.. _t23: https://jenkins.preferred.jp/job/chainer/job/cupy_main/TEST=rocm-5-0,label=mnj-mi50/ .. _d23: linux/tests/rocm-5-0.Dockerfile .. _s23: linux/tests/rocm-5-0.sh -.. _t24: https://jenkins.preferred.jp/job/chainer/job/cupy_master/TEST=rocm-5-3,label=mnj-mi50/ +.. _t24: https://jenkins.preferred.jp/job/chainer/job/cupy_main/TEST=rocm-5-3,label=mnj-mi50/ .. _d24: linux/tests/rocm-5-3.Dockerfile .. _s24: linux/tests/rocm-5-3.sh .. _t25: https://ci.preferred.jp/cupy.linux.cuda-slow/ diff --git a/.pfnci/linux/tests/actions/benchmark.sh b/.pfnci/linux/tests/actions/benchmark.sh index 7e537015a0c..452ec99c58c 100755 --- a/.pfnci/linux/tests/actions/benchmark.sh +++ b/.pfnci/linux/tests/actions/benchmark.sh @@ -12,7 +12,7 @@ python3 prof.py benchmarks/bench_ufunc_cupy.py -c mkdir target mv *.csv target/ -# Run benchmarks for master branch +# Run benchmarks for main branch # Since GCP instance may change and use diff gen processsors/GPUs # we just recompile and run to avoid false errors python3 -m pip uninstall -y cupy @@ -23,10 +23,10 @@ if [[ "${PULL_REQUEST:-}" == "" ]]; then # For branches we compare against the latest release # TODO(ecastill) find a programatical way of doing this # sorting tags, or just checking the dates may mix the - # stable & master branches + # stable & main branches git checkout tags/v11.0.0a2 -b v11.0.0a2 else - git checkout master + git checkout main fi git submodule update --init python3 -m pip install --user -v . diff --git a/.pfnci/matrix.yaml b/.pfnci/matrix.yaml index b10453beb07..248e6a069d2 100644 --- a/.pfnci/matrix.yaml +++ b/.pfnci/matrix.yaml @@ -347,7 +347,7 @@ # ROCm 4.3 | Linux # The lowest ROCm version matrix is intended to cover the lowest supported combination. - project: "cupy.linux.rocm-4-3" - _url: "https://jenkins.preferred.jp/job/chainer/job/cupy_master/TEST=rocm-4-3,label=mnj-mi50/" + _url: "https://jenkins.preferred.jp/job/chainer/job/cupy_main/TEST=rocm-4-3,label=mnj-mi50/" tags: null # Jenkins target: "rocm-4-3" system: "linux" @@ -370,7 +370,7 @@ # ROCm 5.0 | Linux - project: "cupy.linux.rocm-5-0" - _url: "https://jenkins.preferred.jp/job/chainer/job/cupy_master/TEST=rocm-5-0,label=mnj-mi50/" + _url: "https://jenkins.preferred.jp/job/chainer/job/cupy_main/TEST=rocm-5-0,label=mnj-mi50/" tags: null # Jenkins target: "rocm-5-0" system: "linux" @@ -394,7 +394,7 @@ # ROCm 5.3 | Linux # The latest ROCm version matrix is intended to cover the highest supported combination. - project: "cupy.linux.rocm-5-3" - _url: "https://jenkins.preferred.jp/job/chainer/job/cupy_master/TEST=rocm-5-3,label=mnj-mi50/" + _url: "https://jenkins.preferred.jp/job/chainer/job/cupy_main/TEST=rocm-5-3,label=mnj-mi50/" tags: null # Jenkins target: "rocm-5-3" system: "linux" diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 72f8c9706c8..14b2cd1710d 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -8,8 +8,8 @@ repos: # Git - id: check-added-large-files - id: no-commit-to-branch - name: "ensure no direct commit to master/vXX branch" - args: [--branch, "master", --pattern, "v\\d+"] + name: "ensure no direct commit to main/vXX branch" + args: [--branch, "main", --pattern, "v\\d+"] - id: check-case-conflict # Contents - id: mixed-line-ending diff --git a/README.md b/README.md index a3441672298..d324f2ab898 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -<div align="center"><img src="https://raw.githubusercontent.com/cupy/cupy/master/docs/image/cupy_logo_1000px.png" width="400"/></div> +<div align="center"><img src="https://raw.githubusercontent.com/cupy/cupy/main/docs/image/cupy_logo_1000px.png" width="400"/></div> # CuPy : NumPy & SciPy for GPU @@ -12,7 +12,7 @@ [**Website**](https://cupy.dev/) | [**Install**](https://docs.cupy.dev/en/stable/install.html) | [**Tutorial**](https://docs.cupy.dev/en/stable/user_guide/basic.html) -| [**Examples**](https://github.com/cupy/cupy/tree/master/examples) +| [**Examples**](https://github.com/cupy/cupy/tree/main/examples) | [**Documentation**](https://docs.cupy.dev/en/stable/) | [**API Reference**](https://docs.cupy.dev/en/stable/reference/) | [**Forum**](https://groups.google.com/forum/#!forum/cupy) diff --git a/docs/source/conf.py b/docs/source/conf.py index be8f7a583c8..0ab3dd19045 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -28,7 +28,7 @@ rtd_version = os.environ.get('READTHEDOCS_VERSION') if rtd_version == 'latest': - tag = 'master' + tag = 'main' else: tag = 'v{}'.format(__version__) extlinks = { diff --git a/docs/source/contribution.rst b/docs/source/contribution.rst index 9018369b3bd..b11a8422ac8 100644 --- a/docs/source/contribution.rst +++ b/docs/source/contribution.rst @@ -81,24 +81,24 @@ The GitHub milestone is basically used for collecting the issues and PRs resolve Git Branches ~~~~~~~~~~~~ -The ``master`` branch is used to develop pre-release versions. -It means that **alpha, beta, and RC updates are developed at the** ``master`` **branch**. +The ``main`` branch is used to develop pre-release versions. +It means that **alpha, beta, and RC updates are developed at the** ``main`` **branch**. This branch contains the most up-to-date source tree that includes features newly added after the latest major version. The stable version is developed at the individual branch named as ``vN`` where "N" reflects the version number (we call it a *versioned branch*). For example, v1.0.0, v1.0.1, and v1.0.2 will be developed at the ``v1`` branch. **Notes for contributors:** -When you send a pull request, you basically have to send it to the ``master`` branch. +When you send a pull request, you basically have to send it to the ``main`` branch. If the change can also be applied to the stable version, a core team member will apply the same change to the stable version so that the change is also included in the next revision update. -If the change is only applicable to the stable version and not to the ``master`` branch, please send it to the versioned branch. +If the change is only applicable to the stable version and not to the ``main`` branch, please send it to the versioned branch. We basically only accept changes to the latest versioned branch (where the stable version is developed) unless the fix is critical. -If you want to make a new feature of the ``master`` branch available in the current stable version, please send a *backport PR* to the stable version (the latest ``vN`` branch). +If you want to make a new feature of the ``main`` branch available in the current stable version, please send a *backport PR* to the stable version (the latest ``vN`` branch). See the next section for details. -*Note: a change that can be applied to both branches should be sent to the* ``master`` *branch.* +*Note: a change that can be applied to both branches should be sent to the* ``main`` *branch.* *Each release of the stable version is also merged to the development version so that the change is also reflected to the next major version.* Feature Backport PRs @@ -134,7 +134,7 @@ First of all, before starting to write any code, do not forget to confirm the fo - Read through the :ref:`coding-guide` and :ref:`testing-guide`. - Check the appropriate branch that you should send the PR following :ref:`contrib-git-branches`. - If you do not have any idea about selecting a branch, please choose the ``master`` branch. + If you do not have any idea about selecting a branch, please choose the ``main`` branch. In particular, **check the branch before writing any code.** The current source tree of the chosen branch is the starting point of your change. @@ -149,7 +149,7 @@ Note that this automatic PR test only includes CPU tests. .. note:: - We are also running continuous integration with GPU tests for the ``master`` branch and the versioned branch of the latest major version. + We are also running continuous integration with GPU tests for the ``main`` branch and the versioned branch of the latest major version. Since this service is currently running on our internal server, we do not use it for automatic PR tests to keep the server secure. If you are planning to add a new feature or modify existing APIs, **it is recommended to open an issue and discuss the design first.** @@ -389,7 +389,7 @@ When adding a new feature to the framework, you also need to document it in the If you are unsure about how to fix the documentation, you can submit a pull request without doing so. Reviewers will help you fix the documentation appropriately. -The documentation source is stored under `docs directory <https://github.com/cupy/cupy/tree/master/docs>`_ and written in `reStructuredText <http://www.sphinx-doc.org/en/master/usage/restructuredtext/index.html>`_ format. +The documentation source is stored under `docs directory <https://github.com/cupy/cupy/tree/main/docs>`_ and written in `reStructuredText <http://www.sphinx-doc.org/en/master/usage/restructuredtext/index.html>`_ format. To build the documentation, you need to install `Sphinx <http://www.sphinx-doc.org/>`_:: diff --git a/docs/source/user_guide/kernel.rst b/docs/source/user_guide/kernel.rst index a6814152066..b077886b220 100644 --- a/docs/source/user_guide/kernel.rst +++ b/docs/source/user_guide/kernel.rst @@ -381,7 +381,7 @@ It may be important to note that this dedicated memory bank is not shared with t For now, CuPy offers no helper routines to create user defined composite types. Such composite types can however be built recursively using NumPy dtype `offsets` and `itemsize` capabilities, -see `cupy/examples/custum_struct <https://github.com/cupy/cupy/tree/master/examples/custom_struct>`_ for examples of advanced usage. +see `cupy/examples/custum_struct <https://github.com/cupy/cupy/tree/main/examples/custom_struct>`_ for examples of advanced usage. .. warning:: You cannot directly pass static arrays as kernel arguments with the ``type arg[N]`` syntax where N is a compile time constant. The signature of ``__global__ void kernel(float arg[5])`` is seen as ``__global__ void kernel(float* arg)`` by the compiler. If you want to pass five floats to the kernel by value you need to define a custom structure ``struct float5 { float val[5]; };`` and modify the kernel signature to ``__global__ void kernel(float5 arg)``.
PrefectHQ__prefect-2056
AuthorizationError when watching logs from CLI When running with `prefect run cloud --logs`, after a few minutes I see the following error: ``` prefect.utilities.exceptions.AuthorizationError: [{'message': 'AuthenticationError', 'locations': [], 'path': ['flow_run'], 'extensions': {'code': 'UNAUTHENTICATED'}}] ``` The run itself succeeds but the logs stop at that point, so I guess the token is initially valid but just expires...? cc @joshmeek @cicdw
[ { "content": "import json\nimport time\n\nimport click\nfrom tabulate import tabulate\n\nfrom prefect.client import Client\nfrom prefect.utilities.graphql import EnumValue, with_args\n\n\n@click.group(hidden=True)\ndef run():\n \"\"\"\n Run Prefect flows.\n\n \\b\n Usage:\n $ prefect run [STO...
[ { "content": "import json\nimport time\n\nimport click\nfrom tabulate import tabulate\n\nfrom prefect.client import Client\nfrom prefect.utilities.graphql import EnumValue, with_args\n\n\n@click.group(hidden=True)\ndef run():\n \"\"\"\n Run Prefect flows.\n\n \\b\n Usage:\n $ prefect run [STO...
diff --git a/CHANGELOG.md b/CHANGELOG.md index 2669873011ce..72e62747e516 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -21,6 +21,7 @@ These changes are available in the [master branch](https://github.com/PrefectHQ/ ### Fixes - Ensure microseconds are respected on `start_date` provided to CronClock - [#2031](https://github.com/PrefectHQ/prefect/pull/2031) +- Fix duplicate Client connections when using `--logs` flag from `run cloud` CLI command - [#2056](https://github.com/PrefectHQ/prefect/pull/2056) ### Deprecations diff --git a/src/prefect/cli/run.py b/src/prefect/cli/run.py index 359394a650c1..19c3f8d15e85 100644 --- a/src/prefect/cli/run.py +++ b/src/prefect/cli/run.py @@ -202,7 +202,7 @@ def cloud( } while True: - result = Client().graphql(query) + result = client.graphql(query) flow_run = result.data.flow_run if not flow_run:
django-cms__django-cms-3415
Groups could not be deleted if custom user model is used If a custom user model is used one can't delete groups because in the pre_delete signal the user permissions are cleared. The users are accessed by their reversed descriptor but in the case of a custom user model this is not always called user_set, so there is an attribute error raised. def pre_delete_group(instance, **kwargs): for user in instance.user_set.all(): clear_user_permission_cache(user)
[ { "content": "# -*- coding: utf-8 -*-\n\nfrom cms.cache.permissions import clear_user_permission_cache\nfrom cms.models import PageUser, PageUserGroup\nfrom cms.utils.compat.dj import user_related_name\nfrom menus.menu_pool import menu_pool\n\n\ndef post_save_user(instance, raw, created, **kwargs):\n \"\"\"S...
[ { "content": "# -*- coding: utf-8 -*-\n\nfrom cms.cache.permissions import clear_user_permission_cache\nfrom cms.models import PageUser, PageUserGroup\nfrom cms.utils.compat.dj import user_related_name\nfrom menus.menu_pool import menu_pool\n\n\ndef post_save_user(instance, raw, created, **kwargs):\n \"\"\"S...
diff --git a/cms/signals/permissions.py b/cms/signals/permissions.py index 9d306bbd1a0..2a8f7075612 100644 --- a/cms/signals/permissions.py +++ b/cms/signals/permissions.py @@ -58,7 +58,8 @@ def pre_save_group(instance, raw, **kwargs): def pre_delete_group(instance, **kwargs): - for user in instance.user_set.all(): + user_set = getattr(instance, user_related_name) + for user in user_set.all(): clear_user_permission_cache(user)
networkx__networkx-4431
Documentation: Make classes AtlasView et. al. from networkx/classes/coreviews.py accessible from documentation Lest I seem ungrateful, I like networkx a lot, and rely on it for two of my main personal projects [fake-data-for-learning](https://github.com/munichpavel/fake-data-for-learning) and the WIP [clovek-ne-jezi-se](https://github.com/munichpavel/clovek-ne-jezi-se). As I was trying to understand `AtlasView`s, I could find only examples in the documentation (see [this search](https://networkx.org/documentation/stable//search.html?q=AtlasView&check_keywords=yes&area=default#)), none of which pointed to the (well-documented) source code [networkx/classes/coreviews.py](https://github.com/networkx/networkx/blob/master/networkx/classes/coreviews.py). I think the fix should just be a matter of tweaking how you have configured Sphinx to run.
[ { "content": "\"\"\"\n\"\"\"\nimport warnings\nfrom collections.abc import Mapping\n\n__all__ = [\n \"AtlasView\",\n \"AdjacencyView\",\n \"MultiAdjacencyView\",\n \"UnionAtlas\",\n \"UnionAdjacency\",\n \"UnionMultiInner\",\n \"UnionMultiAdjacency\",\n \"FilterAtlas\",\n \"FilterAdja...
[ { "content": "\"\"\"Views of core data structures such as nested Mappings (e.g. dict-of-dicts).\nThese ``Views`` often restrict element access, with either the entire view or\nlayers of nested mappings being read-only.\n\"\"\"\nimport warnings\nfrom collections.abc import Mapping\n\n__all__ = [\n \"AtlasView...
diff --git a/doc/reference/classes/index.rst b/doc/reference/classes/index.rst index 0747795410c..acd9e259099 100644 --- a/doc/reference/classes/index.rst +++ b/doc/reference/classes/index.rst @@ -59,6 +59,25 @@ Graph Views subgraph_view reverse_view +Core Views +========== + +.. automodule:: networkx.classes.coreviews +.. autosummary:: + :toctree: generated/ + + AtlasView + AdjacencyView + MultiAdjacencyView + UnionAtlas + UnionAdjacency + UnionMultiInner + UnionMultiAdjacency + FilterAtlas + FilterAdjacency + FilterMultiInner + FilterMultiAdjacency + Filters ======= diff --git a/networkx/classes/coreviews.py b/networkx/classes/coreviews.py index f824e45391e..61a0a768d70 100644 --- a/networkx/classes/coreviews.py +++ b/networkx/classes/coreviews.py @@ -1,4 +1,6 @@ -""" +"""Views of core data structures such as nested Mappings (e.g. dict-of-dicts). +These ``Views`` often restrict element access, with either the entire view or +layers of nested mappings being read-only. """ import warnings from collections.abc import Mapping
vyperlang__vyper-3202
`pc_pos_map` for small methods is empty ### Version Information * vyper Version (output of `vyper --version`): 0.3.7 * OS: osx * Python Version (output of `python --version`): 3.10.4 ### Bug ``` (vyper) ~/vyper $ cat tmp/baz.vy @external def foo(): pass (vyper) ~/vyper $ vyc -f source_map tmp/baz.vy {"breakpoints": [], "error_map": {"51": "fallback function"}, "pc_breakpoints": [], "pc_jump_map": {"0": "-", "7": "-", "11": "-", "12": "-", "23": "-", "34": "-", "42": "-", "44": "-", "46": "-", "52": "-"}, "pc_pos_map": {}, "pc_pos_map_compressed": "-1:-1:0:-;;;;:::-;;:::-;:::-;;;;;;;:::-;;;;;:::-;;;;;:::-;;:::-;;:::-;;;;:::-;;;"} ``` pc_pos_map should not be empty.
[ { "content": "from typing import Any, List\n\nimport vyper.utils as util\nfrom vyper.address_space import CALLDATA, DATA, MEMORY\nfrom vyper.ast.signatures.function_signature import FunctionSignature, VariableRecord\nfrom vyper.codegen.abi_encoder import abi_encoding_matches_vyper\nfrom vyper.codegen.context im...
[ { "content": "from typing import Any, List\n\nimport vyper.utils as util\nfrom vyper.address_space import CALLDATA, DATA, MEMORY\nfrom vyper.ast.signatures.function_signature import FunctionSignature, VariableRecord\nfrom vyper.codegen.abi_encoder import abi_encoding_matches_vyper\nfrom vyper.codegen.context im...
diff --git a/vyper/codegen/function_definitions/external_function.py b/vyper/codegen/function_definitions/external_function.py index 3f0d89c4d6..06d2946558 100644 --- a/vyper/codegen/function_definitions/external_function.py +++ b/vyper/codegen/function_definitions/external_function.py @@ -214,4 +214,4 @@ def generate_ir_for_external_function(code, sig, context, skip_nonpayable_check) # TODO rethink this / make it clearer ret[-1][-1].append(func_common_ir) - return IRnode.from_list(ret) + return IRnode.from_list(ret, source_pos=getpos(sig.func_ast_code))
dbt-labs__dbt-core-2599
yaml quoting not working with NativeEnvironment jinja evaluator ### Describe the bug dbt's NativeEnvironment introduced a functional change to how Jinja strings are evaluated. In dbt v0.17.0, a schema test can no longer be configured with a quoted column name. ### Steps To Reproduce ``` # schema.yml version: 2 models: - name: debug columns: - name: MyId quote: true tests: - relationships: to: ref('debug') field: '"MyId"' ``` ``` -- models/debug.sql select 1 as "MyId" ``` **Results:** ``` Database Error in test relationships_debug__MyId____MyId___ref_debug_ (models/schema.yml) column "myid" does not exist LINE 12: select MyId as id from "analytics"."test_schema"."debug" ^ HINT: Perhaps you meant to reference the column "debug.MyId" or the column "child.id". compiled SQL at target/compiled/neondwh/models/schema.yml/schema_test/relationships_debug__MyId____MyId___ref_debug_.sql ``` ### Expected behavior I would expect the yaml/jinja string `'"MyId"'` to be resolved to the string `"MyId"`, not `MyId`. **The output of `dbt --version`:** ``` dbt v0.17.0 ``` **The operating system you're using:** macOS **The output of `python --version`:** 3.7.7 ### Additional context Using `Jinja2==2.11.2`
[ { "content": "import codecs\nimport linecache\nimport os\nimport re\nimport tempfile\nimport threading\nfrom ast import literal_eval\nfrom contextlib import contextmanager\nfrom itertools import chain, islice\nfrom typing import (\n List, Union, Set, Optional, Dict, Any, Iterator, Type, NoReturn, Tuple\n)\n\...
[ { "content": "import codecs\nimport linecache\nimport os\nimport re\nimport tempfile\nimport threading\nfrom ast import literal_eval\nfrom contextlib import contextmanager\nfrom itertools import chain, islice\nfrom typing import (\n List, Union, Set, Optional, Dict, Any, Iterator, Type, NoReturn, Tuple\n)\n\...
diff --git a/CHANGELOG.md b/CHANGELOG.md index 91a5c7833b7..c7b4c5dbc9b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,11 +1,11 @@ ## dbt 0.17.1 (Release TBD) ### Fixes +- dbt native rendering now avoids turning quoted strings into unquoted strings ([#2597](https://github.com/fishtown-analytics/dbt/issues/2597), [#2599](https://github.com/fishtown-analytics/dbt/pull/2599)) - Hash name of local packages ([#2600](https://github.com/fishtown-analytics/dbt/pull/2600)) -## dbt 0.17.1rc2 (June 25, 2020) - +## dbt 0.17.1rc2 (June 25, 2020) ### Fixes - dbt config-version: 2 now properly defers rendering `+pre-hook` and `+post-hook` fields. ([#2583](https://github.com/fishtown-analytics/dbt/issues/2583), [#2854](https://github.com/fishtown-analytics/dbt/pull/2854)) diff --git a/core/dbt/clients/jinja.py b/core/dbt/clients/jinja.py index 6138e8902c8..5ebb206042f 100644 --- a/core/dbt/clients/jinja.py +++ b/core/dbt/clients/jinja.py @@ -133,6 +133,10 @@ def quoted_native_concat(nodes): except (ValueError, SyntaxError, MemoryError): return raw + # if it was a str and it still is a str, return it as-is. + if isinstance(result, str): + result = raw + return result diff --git a/test/unit/test_jinja.py b/test/unit/test_jinja.py index 491348de914..b3d273f4b65 100644 --- a/test/unit/test_jinja.py +++ b/test/unit/test_jinja.py @@ -1,4 +1,6 @@ +import pytest import unittest +import yaml from dbt.clients.jinja import get_rendered from dbt.clients.jinja import get_template @@ -413,3 +415,63 @@ def test_if_endfor_newlines(self): ''' +native_expected_behaviors = [ + # strings + ('''foo: bar''', 'bar'), + ('''foo: "bar"''', 'bar'), + ('''foo: "'bar'"''', "'bar'"), + ("""foo: '"bar"'""", '"bar"'), + # ints + ('''foo: 1''', 1), + ('''foo: "1"''', 1), + ('''foo: "'1'"''', "'1'"), + ('''foo: "{{ 1 }}"''', 1), + ('''foo: "{{ '1' }}"''', 1), + ('''foo: "'{{ 1 }}'"''', "'1'"), + ('''foo: "'{{ '1' }}'"''', "'1'"), + ('''foo: "{{ 1 | as_text }}"''', '1'), + ('''foo: "{{ '1' | as_text }}"''', '1'), + # booleans. + # Note the discrepancy with true vs True: `true` is recognized by jinja but + # not literal_eval, but `True` is recognized by ast.literal_eval. + # For extra fun, yaml recognizes both. + ('''foo: "{{ true }}"''', True), + ('''foo: "{{ 'true' }}"''', 'true'), + ('''foo: "'{{ true }}'"''', "'True'"), + ('''foo: "{{ true | as_text }}"''', "True"), # true -> boolean True -> text -> str(True) -> 'True' + ('''foo: "{{ 'true' | as_text }}"''', "true"), # 'true' -> string 'true' -> text -> str('true') -> 'true' + ('''foo: "{{ True }}"''', True), + ('''foo: "{{ 'True' }}"''', True), + ('''foo: "'{{ True }}'"''', "'True'"), + ('''foo: "{{ True | as_text }}"''', "True"), # True -> string 'True' -> text -> str('True') -> 'True' + ('''foo: "{{ 'True' | as_text }}"''', "True"), # 'True' -> string 'True' -> text -> str('True') -> 'True' + ('''foo: yes''', True), # yaml turns 'yes' into a boolean true + ('''foo: "yes"''', "yes"), + # concatenation + ('''foo: "{{ a_int + 100 }}"''', 200), + ('''foo: "{{ a_str ~ 100 }}"''', 100100), + ('''foo: "{{ a_int ~ 100 }}"''', 100100), + ('''foo: "{{ a_str }}{{ a_str }}"''', 100100), + ('''foo: "{{ a_int }}{{ a_int }}"''', 100100), + ('''foo: "'{{ a_int }}{{ a_int }}'"''', "'100100'"), + +] + + +def expected_id(arg): + if isinstance(arg, list): + return '_'.join(arg) + + +@pytest.mark.parametrize( + 'inputvalue,expected', native_expected_behaviors, ids=expected_id +) +def test_native_rendering(inputvalue, expected): + # this test is pretty useless without preprocessing things in yaml. + value = yaml.safe_load(inputvalue)['foo'] + ctx = { + 'a_str': '100', + 'a_int': 100, + 'b_str': 'hello' + } + assert get_rendered(value, ctx, native=True) == expected
ansible__ansible-42557
The ios_linkagg search for interfaces may return wrongs interfaces name <!--- Verify first that your issue/request is not already reported on GitHub. THIS FORM WILL BE READ BY A MACHINE, COMPLETE ALL SECTIONS AS DESCRIBED. Also test if the latest release, and devel branch are affected too. ALWAYS add information AFTER (OUTSIDE) these html comments. Otherwise it may end up being automatically closed by our bot. --> ##### SUMMARY <!--- Explain the problem briefly --> We are trying to create a Port-channel with the ios_linkagg module, we found the way the interfaces are parsed with the regexp seems wrong, and it takes wrong interface name in other configuration section than the parent section. ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME <!--- Insert, BELOW THIS COMMENT, the name of the module, plugin, task or feature. Do not include extra details here, e.g. "vyos_command" not "the network module vyos_command" or the full path--> ios_linkagg ##### ANSIBLE VERSION <!--- Paste, BELOW THIS COMMENT, verbatim output from "ansible --version" between quotes below --> ``` ansible 2.5.3 config file = /local/home/ta-admin-ng5898b/ansible.cfg configured module search path = [u'/local/home/ta-admin-ng5898b/.ansible/plugins/modules', u'/usr/share/ansible/plugins/modules'] ansible python module location = /dns/development/ctrebuchet_sandbox/zion_ansible/lib/python2.7/site-packages/ansible executable location = /dns/development/ctrebuchet_sandbox/zion_ansible/bin/ansible python version = 2.7.8 (default, Oct 9 2014, 10:48:46) [GCC 4.4.7 20120313 (Red Hat 4.4.7-4)] ``` ##### CONFIGURATION <!--- If using Ansible 2.4 or above, paste, BELOW THIS COMMENT, the results of "ansible-config dump --only-changed" Otherwise, mention any settings you have changed/added/removed in ansible.cfg (or using the ANSIBLE_* environment variables).--> ##### OS / ENVIRONMENT <!--- Mention, BELOW THIS COMMENT, the OS you are running Ansible from, and the OS you are managing, or say "N/A" for anything that is not platform-specific. Also mention the specific version of what you are trying to control, e.g. if this is a network bug the version of firmware on the network device.--> Red Hat Enterprise Linux Server release 6.9 (Santiago) ##### STEPS TO REPRODUCE <!--- For bugs, show exactly how to reproduce the problem, using a minimal test-case. For new features, show how the feature would be used. --> <!--- Paste example playbooks or commands between quotes below --> - Given a configuration like below: ```bash ! interface TenGigabitEthernet1/5/1 no switchport no ip address no cdp enable channel-group 1 mode on ! interface TenGigabitEthernet1/5/2 no switchport no ip address no cdp enable channel-group 1 mode on ! interface TenGigabitEthernet1/5/3 no switchport no ip address no cdp enable dual-active fast-hello ! interface TenGigabitEthernet1/5/4 description Link to m880gbca1 no switchport mtu 9216 no ip address logging event link-status logging event bundle-status channel-group 11 mode active ! interface TenGigabitEthernet1/5/5 no switchport no ip address shutdown ! interface TenGigabitEthernet1/5/6 no switchport no ip address logging event link-status logging event bundle-status shutdown ! interface TenGigabitEthernet1/5/7 no switchport no ip address logging event link-status logging event bundle-status shutdown ! interface TenGigabitEthernet1/5/8 no switchport no ip address shutdown ! interface TenGigabitEthernet1/5/9 no switchport no ip address shutdown ! interface TenGigabitEthernet1/5/10 no switchport no ip address shutdown ! interface TenGigabitEthernet1/5/11 no switchport no ip address shutdown ! interface TenGigabitEthernet1/5/12 no switchport no ip address shutdown ! interface TenGigabitEthernet1/5/13 no switchport no ip address shutdown ! interface TenGigabitEthernet1/5/14 no switchport no ip address shutdown ! interface TenGigabitEthernet1/5/15 no switchport no ip address shutdown ! interface TenGigabitEthernet1/5/16 no switchport no ip address shutdown ! interface mgmt0 ip address 10.126.127.51 255.255.255.0 ! interface TenGigabitEthernet2/5/1 no switchport no ip address no cdp enable channel-group 2 mode on ! interface TenGigabitEthernet2/5/2 no switchport no ip address no cdp enable channel-group 2 mode on ! interface TenGigabitEthernet2/5/3 no switchport no ip address no cdp enable dual-active fast-hello ! interface TenGigabitEthernet2/5/4 description Link to m880gbca1 no switchport mtu 9216 no ip address logging event link-status logging event bundle-status channel-group 11 mode active ! interface TenGigabitEthernet2/5/5 no switchport no ip address shutdown ! interface TenGigabitEthernet2/5/6 no switchport no ip address logging event link-status logging event bundle-status shutdown ! interface TenGigabitEthernet2/5/7 no switchport no ip address logging event link-status logging event bundle-status shutdown ! interface TenGigabitEthernet2/5/8 no switchport no ip address shutdown ! interface TenGigabitEthernet2/5/9 no switchport no ip address shutdown ! interface TenGigabitEthernet2/5/10 no switchport no ip address shutdown ! interface TenGigabitEthernet2/5/11 no switchport no ip address shutdown ! interface TenGigabitEthernet2/5/12 no switchport no ip address shutdown ! interface TenGigabitEthernet2/5/13 no switchport no ip address shutdown ! interface TenGigabitEthernet2/5/14 no switchport no ip address shutdown ! interface TenGigabitEthernet2/5/15 no switchport no ip address shutdown ! interface TenGigabitEthernet2/5/16 no switchport no ip address shutdown ! interface Vlan1 no ip address shutdown ! router ospf 1 router-id 10.126.16.4 passive-interface default no passive-interface Port-channel11 network 0.0.0.0 255.255.255.255 area 0 ! ``` - Given a task like below: ```yaml - name: 4/ create link aggregation group. ios_linkagg: group: "{{ item.group }}" state: present loop: "{{ network_interfaces }}" ``` - Given variables like below: ```yaml network_interfaces: - name: "Port-channel100" group: 100 interface_1: "TenGigabitEthernet1/5/6" interface_2: "TenGigabitEthernet2/5/6" - name: "Port-channel101" group: "101" interface_1: "TenGigabitEthernet1/5/7" interface_2: "TenGigabitEthernet2/5/7" ``` <!--- You can also paste gist.github.com links for larger files --> ##### EXPECTED RESULTS <!--- What did you expect to happen when running the steps above? --> - The PO is created ##### ACTUAL RESULTS <!--- What actually happened? If possible run with extra verbosity (-vvvv) --> - The PO is not created <!--- Paste verbatim command output between quotes below --> - When the playbook run we get this error: ```bash An exception occurred during task execution. To see the full traceback, use -vvv. The error was: TypeError: expected string or buffer failed: [smm88-mockup-cudi-1.mgt.airbus.corp] (item={'name': 'Port-channel100', 'group': 100, 'interface_1': 'TenGigabitEthernet1/5/6', 'interface_2': 'TenGigabitEthernet2/5/6'}) => changed=false item: group: 100 interface_1: TenGigabitEthernet1/5/6 interface_2: TenGigabitEthernet2/5/6 name: Port-channel100 module_stderr: |- Traceback (most recent call last): File "/tmp/ansible_vTMuhg/ansible_module_ios_linkagg.py", line 315, in <module> main() File "/tmp/ansible_vTMuhg/ansible_module_ios_linkagg.py", line 302, in main have = map_config_to_obj(module) File "/tmp/ansible_vTMuhg/ansible_module_ios_linkagg.py", line 254, in map_config_to_obj obj.update(get_channel(module, config, group)) File "/tmp/ansible_vTMuhg/ansible_module_ios_linkagg.py", line 237, in get_channel channel['mode'] = parse_mode(module, config, group, member) File "/tmp/ansible_vTMuhg/ansible_module_ios_linkagg.py", line 204, in parse_mode match_int = re.findall(r'interface {0}\n'.format(member), body, re.M) File "/usr/lib/python2.7/re.py", line 181, in findall return _compile(pattern, flags).findall(string) TypeError: expected string or buffer module_stdout: '' msg: MODULE FAILURE rc: 1 ``` - It seems that changing the regexp at the get_channel function did the tricks https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/ios/ios_linkagg.py#L230 ```python def get_channel(module, config, group): match = re.findall(r'interface (\S+)', config, re.M) def get_channel(module, config, group): match = re.findall(r'^interface (\S+)', config, re.M) ``` - With a "global" findall wrongs interfaces names (default and Port-channel11) are taken from the router ospf section. ```yaml ! router ospf 1 router-id 10.126.16.4 passive-interface default no passive-interface Port-channel11 network 0.0.0.0 255.255.255.255 area 0 ! ``` ```python >>> matches = re.findall(r'interface (\S+)', config, re.M) >>> matches ['Loopback0', 'Loopback1', 'Loopback12008', 'Port-channel1', 'Port-channel2', 'Port-channel11', 'Port-channel12', 'Tunnel1', 'TenGigabitEthernet1/5/1', 'TenGigabitEthernet1/5/2', 'TenGigabitEthernet1/5/3', 'TenGigabitEthernet1/5/4', 'TenGigabitEthernet1/5/5', 'TenGigabitEthernet1/5/6', 'TenGigabitEthernet1/5/7', 'TenGigabitEthernet1/5/8', 'TenGigabitEthernet1/5/9', 'TenGigabitEthernet1/5/10', 'TenGigabitEthernet1/5/11', 'TenGigabitEthernet1/5/12', 'TenGigabitEthernet1/5/13', 'TenGigabitEthernet1/5/14', 'TenGigabitEthernet1/5/15', 'TenGigabitEthernet1/5/16', 'mgmt0', 'TenGigabitEthernet2/5/1', 'TenGigabitEthernet2/5/2', 'TenGigabitEthernet2/5/3', 'TenGigabitEthernet2/5/4', 'TenGigabitEthernet2/5/5', 'TenGigabitEthernet2/5/6', 'TenGigabitEthernet2/5/7', 'TenGigabitEthernet2/5/8', 'TenGigabitEthernet2/5/9', 'TenGigabitEthernet2/5/10', 'TenGigabitEthernet2/5/11', 'TenGigabitEthernet2/5/12', 'TenGigabitEthernet2/5/13', 'TenGigabitEthernet2/5/14', 'TenGigabitEthernet2/5/15', 'TenGigabitEthernet2/5/16', 'Vlan1', 'default', 'Port-channel11', 'mgmt0'] ``` - Changing the regexp to take only line which begin with interface works. ```python >>> matches = re.findall(r'^interface (\S+)', config, re.M) >>> matches ['Loopback0', 'Loopback1', 'Loopback12008', 'Port-channel1', 'Port-channel2', 'Port-channel11', 'Port-channel12', 'Tunnel1', 'TenGigabitEthernet1/5/1', 'TenGigabitEthernet1/5/2', 'TenGigabitEthernet1/5/3', 'TenGigabitEthernet1/5/4', 'TenGigabitEthernet1/5/5', 'TenGigabitEthernet1/5/6', 'TenGigabitEthernet1/5/7', 'TenGigabitEthernet1/5/8', 'TenGigabitEthernet1/5/9', 'TenGigabitEthernet1/5/10', 'TenGigabitEthernet1/5/11', 'TenGigabitEthernet1/5/12', 'TenGigabitEthernet1/5/13', 'TenGigabitEthernet1/5/14', 'TenGigabitEthernet1/5/15', 'TenGigabitEthernet1/5/16', 'mgmt0', 'TenGigabitEthernet2/5/1', 'TenGigabitEthernet2/5/2', 'TenGigabitEthernet2/5/3', 'TenGigabitEthernet2/5/4', 'TenGigabitEthernet2/5/5', 'TenGigabitEthernet2/5/6', 'TenGigabitEthernet2/5/7', 'TenGigabitEthernet2/5/8', 'TenGigabitEthernet2/5/9', 'TenGigabitEthernet2/5/10', 'TenGigabitEthernet2/5/11', 'TenGigabitEthernet2/5/12', 'TenGigabitEthernet2/5/13', 'TenGigabitEthernet2/5/14', 'TenGigabitEthernet2/5/15', 'TenGigabitEthernet2/5/16', 'Vlan1'] ```
[ { "content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# (c) 2017, Ansible by Red Hat, inc\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\nANSIBLE_METADATA = {'metada...
[ { "content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# (c) 2017, Ansible by Red Hat, inc\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\nANSIBLE_METADATA = {'metada...
diff --git a/lib/ansible/modules/network/ios/ios_linkagg.py b/lib/ansible/modules/network/ios/ios_linkagg.py index 840576eee7bd1a..e07c18b0e57dca 100644 --- a/lib/ansible/modules/network/ios/ios_linkagg.py +++ b/lib/ansible/modules/network/ios/ios_linkagg.py @@ -227,7 +227,7 @@ def parse_members(module, config, group): def get_channel(module, config, group): - match = re.findall(r'interface (\S+)', config, re.M) + match = re.findall(r'^interface (\S+)', config, re.M) if not match: return {}
dotkom__onlineweb4-1359
Option to post video in article Make it possible to post video in article from dashboard.
[ { "content": "# -*- encoding: utf-8 -*-\nfrom django import forms\n\nfrom apps.article.models import Article\nfrom apps.dashboard.widgets import DatetimePickerInput, multiple_widget_generator\nfrom apps.gallery.widgets import SingleImageInput\n\nfrom taggit.forms import TagWidget\n\n\nclass ArticleForm(forms.Mo...
[ { "content": "# -*- encoding: utf-8 -*-\nfrom django import forms\n\nfrom apps.article.models import Article\nfrom apps.dashboard.widgets import DatetimePickerInput, multiple_widget_generator\nfrom apps.gallery.widgets import SingleImageInput\n\nfrom taggit.forms import TagWidget\n\n\nclass ArticleForm(forms.Mo...
diff --git a/apps/article/dashboard/forms.py b/apps/article/dashboard/forms.py index 43ba4ef9a..fed85caa3 100644 --- a/apps/article/dashboard/forms.py +++ b/apps/article/dashboard/forms.py @@ -22,6 +22,7 @@ class Meta(object): 'ingress', 'content', 'image', + 'video', 'published_date', 'authors', 'tags',
Gallopsled__pwntools-1893
'pwn cyclic -o afca' throws a BytesWarning ``` $ pwn cyclic -o afca /Users/heapcrash/pwntools/pwnlib/commandline/cyclic.py:74: BytesWarning: Text is not bytes; assuming ASCII, no guarantees. See https://docs.pwntools.com/#bytes pat = flat(pat, bytes=args.length) 506 ```
[ { "content": "#!/usr/bin/env python2\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nimport argparse\nimport six\nimport string\nimport sys\n\nimport pwnlib.args\npwnlib.args.free_form = False\n\nfrom pwn import *\nfrom pwnlib.commandline import common\n\nparser = common.parser_comma...
[ { "content": "#!/usr/bin/env python2\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nimport argparse\nimport six\nimport string\nimport sys\n\nimport pwnlib.args\npwnlib.args.free_form = False\n\nfrom pwn import *\nfrom pwnlib.commandline import common\n\nparser = common.parser_comma...
diff --git a/CHANGELOG.md b/CHANGELOG.md index 462646ddb..1d67a0b6d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -64,12 +64,14 @@ The table below shows which release corresponds to each branch, and what date th - [#1733][1733] Update libc headers -> more syscalls available! - [#1876][1876] add `self.message` and change `sys.exc_type` to `sys.exec_info()` in PwnlibException - [#1877][1877] encoders error message handles when `avoid` is bytes in python3 -- [#1892](1892) Silence SIGPIPE error for "pwn phd" +- [#1892][1892] Silence SIGPIPE error for "pwn phd" +- [#1893][1893] Fix bytes warning in "pwn cyclic" [1733]: https://github.com/Gallopsled/pwntools/pull/1733 [1876]: https://github.com/Gallopsled/pwntools/pull/1876 [1877]: https://github.com/Gallopsled/pwntools/pull/1877 [1892]: https://github.com/Gallopsled/pwntools/pull/1892 +[1893]: https://github.com/Gallopsled/pwntools/pull/1893 ## 4.6.0 (`beta`) diff --git a/pwnlib/commandline/cyclic.py b/pwnlib/commandline/cyclic.py index c0cb19002..9adac3b6c 100644 --- a/pwnlib/commandline/cyclic.py +++ b/pwnlib/commandline/cyclic.py @@ -67,6 +67,9 @@ def main(args): if args.lookup: pat = args.lookup + if six.PY3: + pat = bytes(pat, encoding='utf-8') + try: pat = int(pat, 0) except ValueError:
dynaconf__dynaconf-672
[bug] UnicodeEncodeError upon dynaconf init **Describe the bug** `dynaconf init -f yaml` results in a `UnicodeEncodeError ` **To Reproduce** Steps to reproduce the behavior: 1. `git clone -b dynaconf https://github.com/ebenh/django-flex-user.git` 2. `py -m pipenv install --dev` 3. `py -m pipenv shell` 4. `export DJANGO_SETTINGS_MODULE=test_project.settings` 5. `dynaconf init -f yaml` **Error Message** ``` Traceback (most recent call last): File "C:\Users\eben\AppData\Local\Programs\Python\Python37\lib\runpy.py", line 193, in _run_module_as_main "__main__", mod_spec) File "C:\Users\eben\AppData\Local\Programs\Python\Python37\lib\runpy.py", line 85, in _run_code exec(code, run_globals) File "C:\Users\eben\.virtualenvs\django-flex-user-ab_cVlY8\Scripts\dynaconf.exe\__main__.py", line 7, in <module> File "c:\users\eben\.virtualenvs\django-flex-user-ab_cvly8\lib\site-packages\dynaconf\vendor\click\core.py", line 221, in __call__ def __call__(A,*B,**C):return A.main(*B,**C) File "c:\users\eben\.virtualenvs\django-flex-user-ab_cvly8\lib\site-packages\dynaconf\vendor\click\core.py", line 205, in main H=E.invoke(F) File "c:\users\eben\.virtualenvs\django-flex-user-ab_cvly8\lib\site-packages\dynaconf\vendor\click\core.py", line 345, in invoke with C:return F(C.command.invoke(C)) File "c:\users\eben\.virtualenvs\django-flex-user-ab_cvly8\lib\site-packages\dynaconf\vendor\click\core.py", line 288, in invoke if A.callback is not _A:return ctx.invoke(A.callback,**ctx.params) File "c:\users\eben\.virtualenvs\django-flex-user-ab_cvly8\lib\site-packages\dynaconf\vendor\click\core.py", line 170, in invoke with G:return A(*B,**E) File "c:\users\eben\.virtualenvs\django-flex-user-ab_cvly8\lib\site-packages\dynaconf\vendor\click\decorators.py", line 21, in A def A(*A,**B):return f(get_current_context(),*A,**B) File "c:\users\eben\.virtualenvs\django-flex-user-ab_cvly8\lib\site-packages\dynaconf\cli.py", line 257, in init click.echo("\u2699\ufe0f Configuring your Dynaconf environment") File "c:\users\eben\.virtualenvs\django-flex-user-ab_cvly8\lib\site-packages\dynaconf\vendor\click\utils.py", line 82, in echo if A:B.write(A) File "C:\Users\eben\AppData\Local\Programs\Python\Python37\lib\encodings\cp1252.py", line 19, in encode return codecs.charmap_encode(input,self.errors,encoding_table)[0] UnicodeEncodeError: 'charmap' codec can't encode characters in position 0-1: character maps to <undefined> ```
[ { "content": "import importlib\nimport io\nimport os\nimport pprint\nimport sys\nimport warnings\nimport webbrowser\nfrom contextlib import suppress\nfrom pathlib import Path\n\nfrom dynaconf import constants\nfrom dynaconf import default_settings\nfrom dynaconf import LazySettings\nfrom dynaconf import loaders...
[ { "content": "import importlib\nimport io\nimport os\nimport pprint\nimport sys\nimport warnings\nimport webbrowser\nfrom contextlib import suppress\nfrom pathlib import Path\n\nfrom dynaconf import constants\nfrom dynaconf import default_settings\nfrom dynaconf import LazySettings\nfrom dynaconf import loaders...
diff --git a/dynaconf/cli.py b/dynaconf/cli.py index 5bb8316d3..5aae070cc 100644 --- a/dynaconf/cli.py +++ b/dynaconf/cli.py @@ -23,6 +23,7 @@ from dynaconf.vendor import click from dynaconf.vendor import toml +os.environ["PYTHONIOENCODING"] = "utf-8" CWD = Path.cwd() EXTS = ["ini", "toml", "yaml", "json", "py", "env"]
nyu-mll__jiant-615
${NFS_PROJECT_PREFIX} and ${JIANT_PROJECT_PREFIX} Do we need two separate set of environment variables? We also have ${NFS_DATA_DIR} and ${JIANT_DATA_DIR}. I don't know about potential users of jiant, at least for me, it's pretty confusing.
[ { "content": "\"\"\"Train a multi-task model using AllenNLP\n\nTo debug this, run with -m ipdb:\n\n python -m ipdb main.py --config_file ...\n\"\"\"\n# pylint: disable=no-member\nimport argparse\nimport glob\nimport io\nimport logging as log\nimport os\nimport random\nimport subprocess\nimport sys\nimport ti...
[ { "content": "\"\"\"Train a multi-task model using AllenNLP\n\nTo debug this, run with -m ipdb:\n\n python -m ipdb main.py --config_file ...\n\"\"\"\n# pylint: disable=no-member\nimport argparse\nimport glob\nimport io\nimport logging as log\nimport os\nimport random\nimport subprocess\nimport sys\nimport ti...
diff --git a/Dockerfile b/Dockerfile index 82304093d..ea07afb76 100644 --- a/Dockerfile +++ b/Dockerfile @@ -92,5 +92,4 @@ ENV PATH_TO_COVE "$JSALT_SHARE_DIR/cove" ENV ELMO_SRC_DIR "$JSALT_SHARE_DIR/elmo" # Set these manually with -e or via Kuberentes config YAML. -# ENV NFS_PROJECT_PREFIX "/nfs/jsalt/exp/docker" # ENV JIANT_PROJECT_PREFIX "$NFS_PROJECT_PREFIX" diff --git a/README.md b/README.md index a48ecc28c..51a1c038b 100644 --- a/README.md +++ b/README.md @@ -2,8 +2,6 @@ [![CircleCI](https://circleci.com/gh/nyu-mll/jiant/tree/master.svg?style=svg)](https://circleci.com/gh/nyu-mll/jiant/tree/master) -This repo contains the `jiant` sentence representation learning toolkit created at the [2018 JSALT Workshop](https://www.clsp.jhu.edu/workshops/18-workshop/) by the [General-Purpose Sentence Representation Learning](https://jsalt18-sentence-repl.github.io/) team. It is an extensible platform meant to make it easy to run experiments that involve multitask and transfer learning across sentence-level NLP tasks. - `jiant` is a work-in-progress software toolkit for natural language processing research, designed to facilitate work on multitask learning and transfer learning for sentence understanding tasks. A few things you might want to know about `jiant`: diff --git a/config/edgeprobe_bare.conf b/config/edgeprobe_bare.conf index e2ba73ec0..8f467dfdf 100644 --- a/config/edgeprobe_bare.conf +++ b/config/edgeprobe_bare.conf @@ -6,7 +6,6 @@ // This imports the defaults, which can be overridden below. include "defaults.conf" // relative path to this file -project_dir = ${JIANT_PROJECT_PREFIX} exp_name = "" // configure this run_name = "run" // configure this diff --git a/config/edgeprobe_bert.conf b/config/edgeprobe_bert.conf index 44a51f3d5..67f499bd5 100644 --- a/config/edgeprobe_bert.conf +++ b/config/edgeprobe_bert.conf @@ -14,7 +14,6 @@ // This imports the defaults, which can be overridden below. include "defaults.conf" // relative path to this file -project_dir = ${JIANT_PROJECT_PREFIX} exp_name = "" // configure this run_name = "run" // default diff --git a/config/edgeprobe_cove.conf b/config/edgeprobe_cove.conf index e7fc520ea..4d245aeaa 100644 --- a/config/edgeprobe_cove.conf +++ b/config/edgeprobe_cove.conf @@ -6,7 +6,6 @@ // This imports the defaults, which can be overridden below. include "defaults.conf" // relative path to this file -project_dir = ${JIANT_PROJECT_PREFIX} exp_name = "" // configure this run_name = "run" // configure this @@ -27,7 +26,6 @@ patience = 20 // vals until early-stopping tokenizer = "MosesTokenizer" cove = 1 word_embs = "glove" -word_embs_file = ${GLOVE_EMBS_FILE} elmo = 0 elmo_chars_only = 1 diff --git a/config/edgeprobe_demo.conf b/config/edgeprobe_demo.conf index 212c454f7..8b8588a7c 100644 --- a/config/edgeprobe_demo.conf +++ b/config/edgeprobe_demo.conf @@ -2,10 +2,8 @@ include "defaults.conf" // relative path to this file // write to local storage by default for this demo -project_dir = ${JIANT_PROJECT_PREFIX} exp_name = "edgeprobe-demo" run_name = "run" -global_ro_exp_dir = "/nfs/jsalt/share/exp/demo" reload_tasks = 1 diff --git a/config/edgeprobe_existing.conf b/config/edgeprobe_existing.conf index df85b5672..b16dbfebd 100644 --- a/config/edgeprobe_existing.conf +++ b/config/edgeprobe_existing.conf @@ -9,7 +9,6 @@ // Override paths from params.conf, since these might point to paths on a // different system. -global_ro_exp_dir = "/nfs/jsalt/share/exp/default" project_dir = ${JIANT_PROJECT_PREFIX} data_dir = ${JIANT_DATA_DIR} // required - should point to data on NFS. diff --git a/config/edgeprobe_glove.conf b/config/edgeprobe_glove.conf index 5bd7287ee..baf1f0f27 100644 --- a/config/edgeprobe_glove.conf +++ b/config/edgeprobe_glove.conf @@ -7,7 +7,6 @@ // This imports the defaults, which can be overridden below. include "defaults.conf" // relative path to this file -project_dir = ${JIANT_PROJECT_PREFIX} exp_name = "" // configure this run_name = "run" // configure this @@ -28,7 +27,6 @@ patience = 20 // vals until early-stopping tokenizer = "MosesTokenizer" // for consistency with CoVe cove = 0 word_embs = "glove" -word_embs_file = ${GLOVE_EMBS_FILE} elmo = 0 elmo_chars_only = 1 diff --git a/config/edgeprobe_openai.conf b/config/edgeprobe_openai.conf index 8ba23d69b..4c6278f33 100644 --- a/config/edgeprobe_openai.conf +++ b/config/edgeprobe_openai.conf @@ -6,7 +6,6 @@ // This imports the defaults, which can be overridden below. include "defaults.conf" // relative path to this file -project_dir = ${JIANT_PROJECT_PREFIX} exp_name = "" // configure this run_name = "run" // default diff --git a/config/edgeprobe_train.conf b/config/edgeprobe_train.conf index 055b9a734..62a160321 100644 --- a/config/edgeprobe_train.conf +++ b/config/edgeprobe_train.conf @@ -6,7 +6,6 @@ // This imports the defaults, which can be overridden below. include "defaults.conf" // relative path to this file -project_dir = ${JIANT_PROJECT_PREFIX} exp_name = "" // configure this run_name = "run" // configure this diff --git a/config/spring19_seminar/bert.conf b/config/spring19_seminar/bert.conf index c1a51eb11..5958c96b0 100644 --- a/config/spring19_seminar/bert.conf +++ b/config/spring19_seminar/bert.conf @@ -2,10 +2,6 @@ include "../final.conf" -// Output path -project_dir = ${JIANT_PROJECT_PREFIX} - - // Optimization batch_size = 16 dropout = 0.1 // following BERT paper @@ -38,4 +34,4 @@ bert_embeddings_mode = "none" // How to handle the embedding layer of the BERT // "none" for only top-layer activation, sep_embs_for_skip = 1 // Skip embedding uses the same embedder object as the original embedding (before skip) elmo = 0 -elmo_chars_only = 0 \ No newline at end of file +elmo_chars_only = 0 diff --git a/gcp/config/jsalt_paths.1.2.sh b/gcp/config/jsalt_paths.1.2.sh index 5f71f2ccd..0cb94d992 100644 --- a/gcp/config/jsalt_paths.1.2.sh +++ b/gcp/config/jsalt_paths.1.2.sh @@ -11,12 +11,10 @@ export JIANT_DATA_DIR="$JSALT_SHARE_DIR/glue_data" # Default experiment directory export JIANT_PROJECT_PREFIX="$HOME/exp" -export NFS_PROJECT_PREFIX="/nfs/jsalt/exp/$HOSTNAME" export GLOVE_EMBS_FILE="$JSALT_SHARE_DIR/glove/glove.840B.300d.txt" export FASTTEXT_EMBS_FILE="$JSALT_SHARE_DIR/fasttext/crawl-300d-2M.vec" export WORD_EMBS_FILE="$FASTTEXT_EMBS_FILE" -export FASTTEXT_MODEL_FILE="." # not yet supported export PATH_TO_COVE="$JSALT_SHARE_DIR/cove" diff --git a/gcp/kubernetes/run_batch.sh b/gcp/kubernetes/run_batch.sh index bb3c030c7..73b6b7853 100755 --- a/gcp/kubernetes/run_batch.sh +++ b/gcp/kubernetes/run_batch.sh @@ -93,8 +93,6 @@ spec: - mountPath: /nfs/jsalt name: nfs-jsalt env: - - name: NFS_PROJECT_PREFIX - value: ${PROJECT_DIR} - name: JIANT_PROJECT_PREFIX value: ${PROJECT_DIR} - name: NOTIFY_EMAIL diff --git a/main.py b/main.py index 6a17d40f6..e1c7289cc 100644 --- a/main.py +++ b/main.py @@ -39,6 +39,7 @@ def handle_arguments(cl_arguments): "-c", type=str, nargs="+", + default="config/defaults.conf", help="Config file(s) (.conf) for model parameters.", ) parser.add_argument( diff --git a/path_config.sh b/path_config.sh index 9912db2b6..14ed47718 100644 --- a/path_config.sh +++ b/path_config.sh @@ -17,7 +17,6 @@ # Example of custom paths for a local installation: # export JIANT_PROJECT_PREFIX=/Users/Bowman/Drive/JSALT # export JIANT_DATA_DIR=/Users/Bowman/Drive/JSALT/jiant/glue_data -# export WORD_EMBS_FILE=~/glove.840B.300d.txt # The base directory for model output. export JIANT_PROJECT_PREFIX=~ @@ -29,7 +28,10 @@ export JIANT_DATA_DIR=~ # A word embeddings file in GloVe/fastText format. Not used when using # ELMo, GPT, or BERT. To use more than one different set of embeddings # in your environment, create an additional environment variable (like) -# FASTTEXT_WORD_EMBS_FILE, and reference it in each of your config files -# with a line like: +# FASTTEXT_WORD_EMBS_FILE, and reference it in each of your .conf config +# files with a line like: # word_embs_file = ${FASTTEXT_WORD_EMBS_FILE} export WORD_EMBS_FILE=None + +# Optional: +# echo "Loaded custom config." diff --git a/scripts/demo.with_docker.sh b/scripts/demo.with_docker.sh index 786e4aff3..9253d7df2 100755 --- a/scripts/demo.with_docker.sh +++ b/scripts/demo.with_docker.sh @@ -44,7 +44,6 @@ COMMAND+=( -o "exp_name=jiant-demo" ) # Run demo.conf in the docker container. sudo docker run --runtime=nvidia --rm -v "$TEMP_DIR:/nfs/jsalt" \ -v "$JIANT_PATH:/share/jiant" \ - -e "NFS_PROJECT_PREFIX=/nfs/jsalt/exp" \ -e "JIANT_PROJECT_PREFIX=/nfs/jsalt/exp" \ -e "PYTORCH_PRETRAINED_BERT_CACHE=/nfs/jsalt/share/bert_cache" \ -e "ELMO_SRC_DIR=" \
holoviz__holoviews-5436
Game of Life example needs update ### Package versions ``` panel = 0.13.1 holoviews = 1.15.0 bokeh = 2.4.3 ``` ### Bug description In the Game of Life example in the holoviews documentation (https://holoviews.org/gallery/apps/bokeh/game_of_life.html) I needed to update the second to last line ```python panel.add_periodic_callback(advance, 50) ``` to ```python pn.state.add_periodic_callback(advance, period=50) # 50 msec # note: the `period=` is not necessary, but I think it adds clarity ``` It seems this is due to a change in the `panel` interface.
[ { "content": "import numpy as np\nimport holoviews as hv\nimport panel as pn\n\nfrom holoviews import opts\nfrom holoviews.streams import Tap, Counter, DoubleTap\nfrom scipy.signal import convolve2d\n\nhv.extension('bokeh')\n\ndiehard = [[0, 0, 0, 0, 0, 0, 1, 0],\n [1, 1, 0, 0, 0, 0, 0, 0],\n ...
[ { "content": "import numpy as np\nimport holoviews as hv\nimport panel as pn\n\nfrom holoviews import opts\nfrom holoviews.streams import Tap, Counter, DoubleTap\nfrom scipy.signal import convolve2d\n\nhv.extension('bokeh')\n\ndiehard = [[0, 0, 0, 0, 0, 0, 1, 0],\n [1, 1, 0, 0, 0, 0, 0, 0],\n ...
diff --git a/examples/gallery/apps/bokeh/game_of_life.py b/examples/gallery/apps/bokeh/game_of_life.py index 62ddf783be..37d0088f1e 100644 --- a/examples/gallery/apps/bokeh/game_of_life.py +++ b/examples/gallery/apps/bokeh/game_of_life.py @@ -91,6 +91,6 @@ def reset_data(x, y): def advance(): counter.event(counter=counter.counter+1) -panel.add_periodic_callback(advance, 50) +pn.state.add_periodic_callback(advance, period=50, start=False) panel.servable('Game of Life')
buildbot__buildbot-986
Remove googlecode This fixes the following test on Python 3: ``` trial buildbot.test.unit.test_www_hooks_googlecode ```
[ { "content": "# This file is part of Buildbot. Buildbot is free software: you can\n# redistribute it and/or modify it under the terms of the GNU General Public\n# License as published by the Free Software Foundation, version 2.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n...
[ { "content": "# This file is part of Buildbot. Buildbot is free software: you can\n# redistribute it and/or modify it under the terms of the GNU General Public\n# License as published by the Free Software Foundation, version 2.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n...
diff --git a/master/buildbot/status/web/waterfall.py b/master/buildbot/status/web/waterfall.py index 698a2ebf5f2a..6ffd5fd4e1bf 100644 --- a/master/buildbot/status/web/waterfall.py +++ b/master/buildbot/status/web/waterfall.py @@ -854,4 +854,4 @@ def phase2(self, request, sourceNames, timestamps, eventGrid, if strip[i]: strip[i] = strip[i].td() - return dict(grid=grid, gridlen=gridlen, no_bubble=noBubble, time=lastDate) + return dict(grid=grid, gridlen=gridlen, no_bubble=noBubble)
microsoft__botbuilder-python-1451
dependecy conflict between botframework 4.11.0 and azure-identity 1.5.0 ## Version 4.11 (also happening with 4.10) ## Describe the bug `botframework-connector == 4.11.0` (current) requires `msal == 1.2.0` `azure-identity == 1.5.0` (current) requires `msal >=1.6.0,<2.0.0` This created a dependency conflict where bot libraries can't coexist in the same program. This used to work a couple of months ago (I bumped into this issue after revisiting some code I had worked on before). ## To Reproduce This is my `requirements.txt` file, just add it and run `pipenv install -r requirements.txt` (versions pinned to : ``` botbuilder-core == 4.11 azure-keyvault-secrets azure-identity == 1.5 botbuilder-ai == 4.11 ``` ## Expected behavior Packages should install without conflict ## Screenshots Extract from the error message `pipenv install` shows: ``` [pipenv.exceptions.ResolutionFailure]: Warning: Your dependencies could not be resolved. You likely have a mismatch in your sub-dependencies. First try clearing your dependency cache with $ pipenv lock --clear, then try the original command again. Alternatively, you can use $ pipenv install --skip-lock to bypass this mechanism, then run $ pipenv graph to inspect the situation. Hint: try $ pipenv lock --pre if it is a pre-release dependency. ERROR: ERROR: Could not find a version that matches msal<2.0.0,==1.2.0,>=0.4.1,>=1.6.0 Tried: 0.1.0, 0.1.0, 0.2.0, 0.2.0, 0.3.0, 0.3.0, 0.3.1, 0.3.1, 0.4.0, 0.4.0, 0.4.1, 0.4.1, 0.5.0, 0.5.0, 0.5.1, 0.5.1, 0.6.0, 0.6.0, 0.6.1, 0.6.1, 0.7.0, 0.7.0, 0.8.0, 0.8.0, 0.8.0, 0.9.0, 0.9.0, 1.0.0, 1.0.0, 1.1.0, 1.1.0, 1.2.0, 1.2.0, 1.3.0, 1.3.0, 1.4.0, 1.4.0, 1.4.1, 1.4.1, 1.4.2, 1.4.2, 1.4.3, 1.4.3, 1.5.0, 1.5.0, 1.5.1, 1.5.1, 1.6.0, 1.6.0, 1.7.0, 1.7.0, 1.8.0, 1.8.0 There are incompatible versions in the resolved dependencies. ``` Relevant extract from the output of `pipenv graph` as per the suggestion above: ``` azure-identity==1.5.0 - msal [required: >=1.6.0,<2.0.0, installed: 1.2.0] - msal-extensions [required: ~=0.3.0, installed: 0.3.0] - msal [required: >=0.4.1,<2.0.0, installed: 1.2.0] azure-keyvault-secrets==4.2.0 botbuilder-ai==4.11.0 - botbuilder-core [required: ==4.11.0, installed: 4.11.0] - botframework-connector [required: ==4.11.0, installed: 4.11.0] - msal [required: ==1.2.0, installed: 1.2.0] ``` ## Additional context This issue was also reported in [botbuilder-samples repo's issue 2978](https://github.com/microsoft/BotBuilder-Samples/issues/2978)
[ { "content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\nimport os\nfrom setuptools import setup\n\nNAME = \"botframework-connector\"\nVERSION = os.environ[\"packageVersion\"] if \"packageVersion\" in os.environ else \"4.12.0\"\nREQUIRES = [\n \"msrest==0....
[ { "content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\nimport os\nfrom setuptools import setup\n\nNAME = \"botframework-connector\"\nVERSION = os.environ[\"packageVersion\"] if \"packageVersion\" in os.environ else \"4.12.0\"\nREQUIRES = [\n \"msrest==0....
diff --git a/libraries/botframework-connector/setup.py b/libraries/botframework-connector/setup.py index 04bf09257..09a82d646 100644 --- a/libraries/botframework-connector/setup.py +++ b/libraries/botframework-connector/setup.py @@ -12,7 +12,7 @@ "PyJWT==1.5.3", "botbuilder-schema==4.12.0", "adal==1.2.1", - "msal==1.2.0", + "msal==1.6.0", ] root = os.path.abspath(os.path.dirname(__file__))
pypi__warehouse-3598
Set samesite=lax on session cookies This is a strong defense-in-depth mechanism for protecting against CSRF. It's currently only respected by Chrome, but Firefox will add it as well.
[ { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, softw...
[ { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, softw...
diff --git a/requirements/main.txt b/requirements/main.txt index 9e51b303408e..defd6b964f59 100644 --- a/requirements/main.txt +++ b/requirements/main.txt @@ -464,9 +464,9 @@ vine==1.1.4 \ webencodings==0.5.1 \ --hash=sha256:a0af1213f3c2226497a97e2b3aa01a7e4bee4f403f95be16fc9acd2947514a78 \ --hash=sha256:b36a1c245f2d304965eb4e0a82848379241dc04b865afcc4aab16748587e1923 -WebOb==1.8.0 \ - --hash=sha256:ae809c05b667c3457a2937cdb4a7c7f07e90f26c651a340d37fdd1d5cf1fed27 \ - --hash=sha256:6fca7aa39bd2f6d2ff71f15a22223ff256c91f60b1ab52dac0ab38dc6ea9142f +WebOb==1.7.4 \ + --hash=sha256:63f4220492476c5c716b615baed7bf3d27040b3105014375787160dee0943115 \ + --hash=sha256:8d10af182fda4b92193113ee1edeb687ab9dc44336b37d6804e413f0240d40d9 whitenoise==3.3.1 \ --hash=sha256:15f43b2e701821b95c9016cf469d29e2a546cb1c7dead584ba82c36f843995cf \ --hash=sha256:9d81515f2b5b27051910996e1e860b1332e354d9e7bcf30c98f21dcb6713e0dd diff --git a/requirements/tests.txt b/requirements/tests.txt index f8094f4a357c..e701d7ddbb66 100644 --- a/requirements/tests.txt +++ b/requirements/tests.txt @@ -224,9 +224,9 @@ urllib3==1.22 \ waitress==1.1.0 \ --hash=sha256:40b0f297a7f3af61fbfbdc67e59090c70dc150a1601c39ecc9f5f1d283fb931b \ --hash=sha256:d33cd3d62426c0f1b3cd84ee3d65779c7003aae3fc060dee60524d10a57f05a9 -WebOb==1.8.0 \ - --hash=sha256:ae809c05b667c3457a2937cdb4a7c7f07e90f26c651a340d37fdd1d5cf1fed27 \ - --hash=sha256:6fca7aa39bd2f6d2ff71f15a22223ff256c91f60b1ab52dac0ab38dc6ea9142f +WebOb==1.7.4 \ + --hash=sha256:63f4220492476c5c716b615baed7bf3d27040b3105014375787160dee0943115 \ + --hash=sha256:8d10af182fda4b92193113ee1edeb687ab9dc44336b37d6804e413f0240d40d9 WebTest==2.0.29 \ --hash=sha256:9136514159a2e76a21751bf4ab5d3371e539c8ada8b950fcf68e307d9e584a07 \ --hash=sha256:dbbccc15ac2465066c95dc3a7de0d30cde3791e886ccbd7e91d5d2a2580c922d diff --git a/tests/unit/test_sessions.py b/tests/unit/test_sessions.py index 8bc57b3c27b0..0baee1c117b5 100644 --- a/tests/unit/test_sessions.py +++ b/tests/unit/test_sessions.py @@ -497,7 +497,7 @@ def test_invalidated_deletes_save_non_secure(self, monkeypatch, ) response = pretend.stub( set_cookie=pretend.call_recorder( - lambda cookie, data, max_age, httponly, secure, samesite: None + lambda cookie, data, max_age, httponly, secure: None ) ) session_factory._process_response(pyramid_request, response) @@ -532,7 +532,6 @@ def test_invalidated_deletes_save_non_secure(self, monkeypatch, max_age=12 * 60 * 60, httponly=True, secure=False, - samesite=b"lax", ), ] diff --git a/tests/unit/utils/test_compression.py b/tests/unit/utils/test_compression.py index ca30c68f575f..8fba42cc342e 100644 --- a/tests/unit/utils/test_compression.py +++ b/tests/unit/utils/test_compression.py @@ -14,7 +14,7 @@ import pytest from pyramid.response import Response -from webob.acceptparse import AcceptEncodingValidHeader, AcceptEncodingNoHeader +from webob.acceptparse import Accept, NoAccept from webob.response import gzip_app_iter from warehouse.utils.compression import _compressor as compressor @@ -54,7 +54,7 @@ def test_bails_if_content_encoding(self): ], ) def test_sets_vary(self, vary, expected): - request = pretend.stub(accept_encoding=AcceptEncodingNoHeader()) + request = pretend.stub(accept_encoding=NoAccept()) response = Response(body=b"foo") response.vary = vary @@ -66,9 +66,7 @@ def test_compresses_non_streaming(self): decompressed_body = b"foofoofoofoofoofoofoofoofoofoofoofoofoofoo" compressed_body = b"".join(list(gzip_app_iter([decompressed_body]))) - request = pretend.stub( - accept_encoding=AcceptEncodingValidHeader("gzip") - ) + request = pretend.stub(accept_encoding=Accept("gzip")) response = Response(body=decompressed_body) response.md5_etag() @@ -85,9 +83,7 @@ def test_compresses_streaming(self): decompressed_body = b"foofoofoofoofoofoofoofoofoofoofoofoofoofoo" compressed_body = b"".join(list(gzip_app_iter([decompressed_body]))) - request = pretend.stub( - accept_encoding=AcceptEncodingValidHeader("gzip") - ) + request = pretend.stub(accept_encoding=Accept("gzip")) response = Response(app_iter=iter([decompressed_body])) compressor(request, response) @@ -100,9 +96,7 @@ def test_compresses_streaming_with_etag(self): decompressed_body = b"foofoofoofoofoofoofoofoofoofoofoofoofoofoo" compressed_body = b"".join(list(gzip_app_iter([decompressed_body]))) - request = pretend.stub( - accept_encoding=AcceptEncodingValidHeader("gzip") - ) + request = pretend.stub(accept_encoding=Accept("gzip")) response = Response(app_iter=iter([decompressed_body])) response.etag = "foo" @@ -117,9 +111,7 @@ def test_buffers_small_streaming(self): decompressed_body = b"foofoofoofoofoofoofoofoofoofoofoofoofoofoo" compressed_body = b"".join(list(gzip_app_iter([decompressed_body]))) - request = pretend.stub( - accept_encoding=AcceptEncodingValidHeader("gzip") - ) + request = pretend.stub(accept_encoding=Accept("gzip")) response = Response( app_iter=iter([decompressed_body]), content_length=len(decompressed_body), @@ -132,9 +124,7 @@ def test_buffers_small_streaming(self): assert response.body == compressed_body def test_doesnt_compress_too_small(self): - request = pretend.stub( - accept_encoding=AcceptEncodingValidHeader("gzip") - ) + request = pretend.stub(accept_encoding=Accept("gzip")) response = Response(body=b"foo") compressor(request, response) diff --git a/warehouse/sessions.py b/warehouse/sessions.py index 548f760c757a..a52318f0eb7c 100644 --- a/warehouse/sessions.py +++ b/warehouse/sessions.py @@ -263,7 +263,6 @@ def _process_response(self, request, response): max_age=self.max_age, httponly=True, secure=request.scheme == "https", - samesite=b"lax" )
pypi__warehouse-3292
Warehouse file order differs from legacy PyPI file list Tonight, while load testing of pypi.org was ongoing, we saw some failures in automated systems that use `--require-hashes` with `pip install`, as ordering on the package file list page changed. The specific package we saw break was `pandas` at version `0.12.0`. We had a single hash for `pandas-0.12.0.tar.gz`. A few of our hosts were served from the legacy PyPI service, which succeeded as normal. The Warehouse endpoint, however, failed, since `pandas-0.12.0.zip` now preceded `pandas-0.12.0.tar.gz` in the file list. At the moment, you can see that https://pypi.org/simple/pandas/ and https://pypi.python.org/simple/pandas/ differ by searching for `pandas-0.12.0.tar.gz` and `pandas-0.12.0.zip` and comparing the position.
[ { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, softw...
[ { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, softw...
diff --git a/tests/unit/legacy/api/test_simple.py b/tests/unit/legacy/api/test_simple.py index 4a23369eeadb..004b99caa628 100644 --- a/tests/unit/legacy/api/test_simple.py +++ b/tests/unit/legacy/api/test_simple.py @@ -202,7 +202,7 @@ def test_with_files_with_version_multi_digit(self, db_request): files = [] for files_release in \ - zip(egg_files, wheel_files, tar_files): + zip(egg_files, tar_files, wheel_files): files += files_release db_request.matchdict["name"] = project.normalized_name @@ -212,9 +212,6 @@ def test_with_files_with_version_multi_digit(self, db_request): # Make sure that we get any changes made since the JournalEntry was # saved. db_request.db.refresh(project) - import pprint - pprint.pprint(simple.simple_detail(project, db_request)['files']) - pprint.pprint(files) assert simple.simple_detail(project, db_request) == { "project": project, diff --git a/warehouse/legacy/api/simple.py b/warehouse/legacy/api/simple.py index d26e2c2fe335..cea32ee4d6b2 100644 --- a/warehouse/legacy/api/simple.py +++ b/warehouse/legacy/api/simple.py @@ -87,7 +87,7 @@ def simple_detail(project, request): ) ) .all(), - key=lambda f: (parse(f.version), f.packagetype) + key=lambda f: (parse(f.version), f.filename) ) return {"project": project, "files": files}
digitalfabrik__integreat-cms-169
Change development environment from docker-compose to venv - [ ] Remove the django docker container - [ ] Install package and requirements in venv - [ ] Keep database docker container and manage connection to django
[ { "content": "\"\"\"\nDjango settings for backend project.\n\nGenerated by 'django-admin startproject' using Django 1.11.11.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.11/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/e...
[ { "content": "\"\"\"\nDjango settings for backend project.\n\nGenerated by 'django-admin startproject' using Django 1.11.11.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.11/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/e...
diff --git a/.gitignore b/.gitignore index 014fe4432f..44f1a9168c 100644 --- a/.gitignore +++ b/.gitignore @@ -46,3 +46,6 @@ backend/media/* # XLIFF files folder **/xliffs/ + +# Postgres folder +.postgres \ No newline at end of file diff --git a/README.md b/README.md index 097c6ea507..35a8db1883 100644 --- a/README.md +++ b/README.md @@ -1,40 +1,53 @@ # Integreat Django CMS This project aims to develop a content management system tailored to the needs of municipalities to provide multilingual local information. It aims at being easy to use and easy to maintain over a long time. This project uses Python3 and Django 1.11 and aims at being run on a Ubuntu 18.04. -## Development -There are several ways to run this project locally: install as a package (Ubuntu, openSUSE), run in local Python3 venv, and also in a Docker container. Each method is detailed below. +## Setup a local development environment +To run the project locally you can either install as a package (Ubuntu, openSUSE) or you can run in local Python3 **virtualenv**, and also in a Docker container. Using **virtualenv** is the recommended way for setting up a local development environment. -To get started, run +First of all, clone the project: ```` git clone git@github.com:Integreat/cms-django.git cd cms-django ```` -### Development Tools +### Setup the database +You can run Postgres either on your local machine or in a Docker container. + +* Install Postgres on your machine ([Tutorial for Ubuntu 18.04](https://www.digitalocean.com/community/tutorials/how-to-install-and-use-postgresql-on-ubuntu-18-04)) +* Run Postgres in a Docker container: `./dev-tools/start_db_docker.sh` + +### virtualenv +1. Run `./install-venv.sh` +2. If you have installed Postgres on your machine, you may have to adjust database credentials in `backend/backend/settings.py` +3. Do the database migrations: `integreat-cms migrate` +4. Create the initial superuser: `integreat-cms createsuperuser` +5. Fire up the CMS: `integreat-cms runserver localhost:8000` +6. Go to your browser and open the URL `http://localhost:8000` + +You may need to activate the `virtualenv` explicitly via `source .venv/bin/activate`. + +## Development +### Migrations +After changing a models you have to migrate via `./dev-tools/migrate.sh` + +### i18n +To make use of the translated backend, compile the django.po file as follows: + +`django-admin compilemessages` -- Delete docker environment to start over again: `dev-tools/prune_docker.sh` - (be careful: This will delete all your other docker images as well) -- Delete database to start over again: `dev-tools/prune_database.sh` -- Migrate database: `dev-tools/migrate.sh` -- Create superuser: `dev-tools/create_superuser.sh` +If you are using a virtual python environment, be sure to use the ´--exclude´ parameter or execute this command in the backend or cms directory, otherwise all the translation files in your venv will be compiled, too. -### Run CMS in Python3 venv -1. Install a local PostgreSQL server, for example with `apt install postgresql` and create a database and database user with the name `integreat`. -2. Run `./install-venv.sh` -3. Open the `backend/backend/settings.py` and adjust the database credentials. Also change the hostname to `localhost`. -4. Do the database migrations: `integreat-cms migrate` -5. Create the initial superuser: `integreat-cms createsuperuser` -6. Fire up the CMS: `integreat-cms runserver localhost:8000` -7. Go to your browser and open the URL `http://localhost:8000` -8. Run Django unittest: `integreat-cms test cms/` +### Testing +Run Django unittest: `integreat-cms test cms/` -### Run CMS in Docker container -A docker-compose file is provided in the the repository. It will start one container with a PostgreSQL database and another one with the CMS. -* `docker-compose up` -* enter [http://localhost:8000](http://localhost:8000) -* as long as there is no standard SQL dump, you have to create your own user: `docker exec -it $(docker-compose ps -q django) bash -ic "integreat-cms createsuperuser"` +### Miscellaneous +* Keep in mind that we are using Python 3.x, so use `python3` and `pip3` with any command +* Access the Postgres database running in Docker container: `docker exec -it integreat_django_postgres psql -U integreat` +* Too ensure that you do not accidentally push your changes in `settings.py`, you can ignore the file locally via `git update-index --assume-unchanged ./backend/backend/settings.py` +* Delete the database to start over again: `dev-tools/prune_database.sh` +* Create superuser: `dev-tools/create_superuser.sh` -### Packaging and installing on Ubuntu 18.04 +## Packaging and installing on Ubuntu 18.04 Packaging for Debian can be done with setuptools. ``` $ python3 -m venv .venv @@ -56,35 +69,4 @@ Then install both packages with gdebi: # gdebi django-widget-tweaks/deb_dist/python3-django-widget-tweaks_1.4.3-1_all.deb # gebi cms-django/deb_dist/python3-integreat-cms_0.0.13-1_all.deb ```` -In the end, create a PostgreSQL user and database and adjust the `/usr/lib/python3/dist-packages/backend/settings.py`. - - -### Troubleshooting -#### Cleaning up Docker environment -* stop all conntainers: `docker stop $(docker ps -a -q)` -* remove all images: `docker rmi $(docker images -a -q)` -* remove all volumes: `docker system prune` -#### Misc -* keep in mind that we are using Python 3.x, so use `python3` and `pip3` on your bash commands -* get a bash shell in the django container: `docker exec -it $(docker-compose ps -q django) bash` -* enter postgres container: `docker exec -it $(docker-compose ps -q postgres) psql -U"integreat" -d "integreat"` - -### Migrations -* change models -* `docker exec -it $(docker-compose ps -q django) bash -ic "integreat-cms makemigrations [app]"` -* optional, if you want to inspect the corresponding SQL syntax: `docker exec -it $(docker-compose ps -q django) bash -ic "integreat-cms sqlmigrate [app] [number]"` -* `docker exec -it $(docker-compose ps -q django) bash -ic "integreat-cms migrate"` - -### Docker clean up -* `docker stop $(docker ps -a -q)` -* `docker rm $(docker ps -a -q)` -* remove all images: `docker rmi $(docker images -a -q)` -* remove all volumes: `docker volume prune` - -### i18n -To make use of the translated backend, compile the django.po file as follows: - -`django-admin compilemessages` - -If you use a virtual python environment, be sure to use the ´--exclude´ parameter or execute this command in the backend or cms directory, otherwise all the translation files in your venv will be compiled, too. - +In the end, create a PostgreSQL user and database and adjust the `/usr/lib/python3/dist-packages/backend/settings.py`. \ No newline at end of file diff --git a/_docker/django/development/Dockerfile b/_docker/django/development/Dockerfile deleted file mode 100644 index 5cd8e41355..0000000000 --- a/_docker/django/development/Dockerfile +++ /dev/null @@ -1,18 +0,0 @@ -FROM ubuntu - -COPY ./ /opt/integreat-cms -RUN echo $PWD -WORKDIR /opt/integreat-cms -RUN who - -RUN apt update -RUN DEBIAN_FRONTEND=noninteractive apt install --yes --force-yes python3 python3-pip python3-setuptools libpq-dev python3-venv - -# remove deprecated pycrypto package -RUN DEBIAN_FRONTEND=noninteractive apt remove --yes --force-yes python3-crypto - -RUN python3 -m venv .venv - -RUN echo "source /opt/integreat-cms/.venv/bin/activate" >> /root/.bashrc - -EXPOSE 8000 \ No newline at end of file diff --git a/_docker/django/production/Dockerfile b/_docker/django/production/Dockerfile deleted file mode 100644 index 5d95d702f4..0000000000 --- a/_docker/django/production/Dockerfile +++ /dev/null @@ -1,13 +0,0 @@ -FROM ubuntu - -RUN echo $PWD -RUN who - -RUN apt-add-repository ... -RUN apt update -RUN DEBIAN_FRONTEND=noninteractive apt install --yes --force-yes integreat-cms - -# remove deprecated pycrypto package -RUN DEBIAN_FRONTEND=noninteractive apt remove --yes --force-yes python3-crypto - -EXPOSE 8000 diff --git a/backend/backend/settings.py b/backend/backend/settings.py index 03db85be54..3389027fa1 100644 --- a/backend/backend/settings.py +++ b/backend/backend/settings.py @@ -94,7 +94,7 @@ 'NAME': 'integreat', 'USER': 'integreat', 'PASSWORD': 'password', - 'HOST': 'postgres', + 'HOST': 'localhost', 'PORT': '5432', } } diff --git a/dev-tools/create_superuser.sh b/dev-tools/create_superuser.sh index f32495baca..8fe53e16f2 100755 --- a/dev-tools/create_superuser.sh +++ b/dev-tools/create_superuser.sh @@ -1,3 +1,4 @@ #!/bin/sh -docker exec -it $(docker-compose ps -q django) bash -ic "integreat-cms createsuperuser --username root --email ''" +source .venv/bin/activate +integreat-cms createsuperuser --username root --email '' \ No newline at end of file diff --git a/dev-tools/migrate.sh b/dev-tools/migrate.sh index 7bed4f66cf..02d1fa165e 100755 --- a/dev-tools/migrate.sh +++ b/dev-tools/migrate.sh @@ -1,5 +1,6 @@ #!/bin/sh -docker exec -it $(docker-compose ps -q django) bash -ic "integreat-cms makemigrations cms" -docker exec -it $(docker-compose ps -q django) bash -ic "integreat-cms migrate" -docker exec -it $(docker-compose ps -q django) bash -ic "integreat-cms loaddata backend/cms/fixtures/roles.json" +source .venv/bin/activate +integreat-cms makemigrations cms +integreat-cms migrate +integreat-cms loaddata backend/cms/fixtures/roles.json \ No newline at end of file diff --git a/dev-tools/prune_database.sh b/dev-tools/prune_database.sh index 0c5415b5f1..5a09d05a55 100755 --- a/dev-tools/prune_database.sh +++ b/dev-tools/prune_database.sh @@ -2,5 +2,5 @@ script_dir=$(dirname "$BASH_SOURCE") -rm -rfv $script_dir/../_postgres +rm -rfv $script_dir/../.postgres rm -rfv $script_dir/../backend/cms/migrations diff --git a/dev-tools/prune_docker.sh b/dev-tools/prune_docker.sh deleted file mode 100755 index b358204895..0000000000 --- a/dev-tools/prune_docker.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/sh - -docker-compose down --rmi local diff --git a/dev-tools/start_db_docker.sh b/dev-tools/start_db_docker.sh new file mode 100755 index 0000000000..02da169668 --- /dev/null +++ b/dev-tools/start_db_docker.sh @@ -0,0 +1,15 @@ +#!/bin/sh + +# Change connection string +sed -i 's/5432/5433/g' ./backend/backend/settings.py + +# Start Postgres Docker container +if [ ! "$(docker ps -q -f name='integreat_django_postgres')" ]; then + if [ "$(docker ps -aq -f status=exited -f name='integreat_django_postgres')" ]; then + # Start the existing container + docker start integreat_django_postgres + else + # Run new container + docker run --name "integreat_django_postgres" -e "POSTGRES_USER=integreat" -e "POSTGRES_PASSWORD=password" -e "POSTGRES_DB=integreat" -v "$(pwd)/.postgres:/var/lib/postgresql" -p 5433:5432 postgres + fi +fi \ No newline at end of file diff --git a/docker-compose.yml b/docker-compose.yml deleted file mode 100644 index abccfb632a..0000000000 --- a/docker-compose.yml +++ /dev/null @@ -1,26 +0,0 @@ -version: '2' - -services: - django: - build: - context: . - dockerfile: ./_docker/django/development/Dockerfile - command: bash -c "source .venv/bin/activate && python3 setup.py develop && integreat-cms runserver 0.0.0.0:8000" - depends_on: - - postgres - ports: - - 8000:8000 - restart: always - #tty: true - volumes: - - "./backend:/opt/integreat-cms/backend" - - postgres: - environment: - - POSTGRES_USER=integreat - - POSTGRES_PASSWORD=password - - POSTGRES_DB=integreat - image: postgres - restart: always - volumes: - - "./_postgres:/var/lib/postgresql" diff --git a/install-venv.sh b/install-venv.sh index 4d551e4375..52ce5df5a6 100755 --- a/install-venv.sh +++ b/install-venv.sh @@ -1,7 +1,9 @@ #!/bin/bash + # This script installs the CMS in a local virtual environment without # the need for docker or any other virtualization technology. A Postgres # SQL server is needed to run the CMS. python3 -m venv .venv source .venv/bin/activate python3 setup.py develop +source .venv/bin/activate \ No newline at end of file
modin-project__modin-2173
[OmniSci] Add float32 dtype support Looks like our calcite serializer doesn't support float32 type.
[ { "content": "# Licensed to Modin Development Team under one or more contributor license agreements.\n# See the NOTICE file distributed with this work for additional information regarding\n# copyright ownership. The Modin Development Team licenses this file to you under the\n# Apache License, Version 2.0 (the ...
[ { "content": "# Licensed to Modin Development Team under one or more contributor license agreements.\n# See the NOTICE file distributed with this work for additional information regarding\n# copyright ownership. The Modin Development Team licenses this file to you under the\n# Apache License, Version 2.0 (the ...
diff --git a/modin/experimental/engines/omnisci_on_ray/frame/calcite_serializer.py b/modin/experimental/engines/omnisci_on_ray/frame/calcite_serializer.py index 0156cfbc3d9..f460868cd5d 100644 --- a/modin/experimental/engines/omnisci_on_ray/frame/calcite_serializer.py +++ b/modin/experimental/engines/omnisci_on_ray/frame/calcite_serializer.py @@ -41,6 +41,7 @@ class CalciteSerializer: "int32": "INTEGER", "int64": "BIGINT", "bool": "BOOLEAN", + "float32": "FLOAT", "float64": "DOUBLE", } diff --git a/modin/experimental/engines/omnisci_on_ray/test/test_dataframe.py b/modin/experimental/engines/omnisci_on_ray/test/test_dataframe.py index 86632635e1b..3fca2092b7f 100644 --- a/modin/experimental/engines/omnisci_on_ray/test/test_dataframe.py +++ b/modin/experimental/engines/omnisci_on_ray/test/test_dataframe.py @@ -275,6 +275,20 @@ def test_sep_delimiter(self, kwargs): df_equals(modin_df, pandas_df) + @pytest.mark.skip(reason="https://github.com/modin-project/modin/issues/2174") + def test_float32(self): + csv_file = os.path.join(self.root, "modin/pandas/test/data", "test_usecols.csv") + kwargs = { + "dtype": {"a": "float32", "b": "float32"}, + } + + pandas_df = pandas.read_csv(csv_file, **kwargs) + pandas_df["a"] = pandas_df["a"] + pandas_df["b"] + modin_df = pd.read_csv(csv_file, **kwargs, engine="arrow") + modin_df["a"] = modin_df["a"] + modin_df["b"] + + df_equals(modin_df, pandas_df) + class TestMasks: data = {
huggingface__diffusers-680
LDM Bert `config.json` path ### Describe the bug ### Problem There is a reference to an LDM Bert that 404's ```bash src/diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion.py: "ldm-bert": "https://huggingface.co/ldm-bert/resolve/main/config.json", ``` I was able to locate a `config.json` at `https://huggingface.co/valhalla/ldm-bert/blob/main/config.json` Is this the correct `config.json`? #### Notes for reviewer Happy to send a PR if needed to update, feel free to do on your own if it's faster/easier :) ### Reproduction na ### Logs ```shell na ``` ### System Info na
[ { "content": "import inspect\nfrom typing import List, Optional, Tuple, Union\n\nimport torch\nimport torch.nn as nn\nimport torch.utils.checkpoint\n\nfrom transformers.activations import ACT2FN\nfrom transformers.configuration_utils import PretrainedConfig\nfrom transformers.modeling_outputs import BaseModelOu...
[ { "content": "import inspect\nimport warnings\nfrom typing import List, Optional, Tuple, Union\n\nimport torch\nimport torch.nn as nn\nimport torch.utils.checkpoint\n\nfrom transformers.activations import ACT2FN\nfrom transformers.configuration_utils import PretrainedConfig\nfrom transformers.modeling_outputs i...
diff --git a/src/diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion.py b/src/diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion.py index 4a4f29be7f75..2efde98f772e 100644 --- a/src/diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion.py +++ b/src/diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion.py @@ -192,7 +192,7 @@ def __call__( LDMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP = { - "ldm-bert": "https://huggingface.co/ldm-bert/resolve/main/config.json", + "ldm-bert": "https://huggingface.co/valhalla/ldm-bert/blob/main/config.json", }
hedyorg__hedy-654
Turtle should not be shown in level 6 programs with numbers Turtle is now shown in some cases: ![image](https://user-images.githubusercontent.com/1003685/130324043-1beec018-2dcb-497e-be75-035c5e6dc671.png) Violating code: ``` nummer is 5 nummertwee is 6 getal is nummer * nummertwee print getal ``` Turtle should not be shown in level 6 programs with numbers Turtle is now shown in some cases: ![image](https://user-images.githubusercontent.com/1003685/130324043-1beec018-2dcb-497e-be75-035c5e6dc671.png) Violating code: ``` nummer is 5 nummertwee is 6 getal is nummer * nummertwee print getal ```
[ { "content": "from lark import Lark\nfrom lark.exceptions import LarkError, UnexpectedEOF, UnexpectedCharacters\nfrom lark import Tree, Transformer, visitors\nfrom os import path\nimport sys\nimport utils\nfrom collections import namedtuple\n\n\n# Some useful constants\nHEDY_MAX_LEVEL = 22\n\nreserved_words = [...
[ { "content": "from lark import Lark\nfrom lark.exceptions import LarkError, UnexpectedEOF, UnexpectedCharacters\nfrom lark import Tree, Transformer, visitors\nfrom os import path\nimport sys\nimport utils\nfrom collections import namedtuple\n\n\n# Some useful constants\nHEDY_MAX_LEVEL = 22\n\nreserved_words = [...
diff --git a/coursedata/adventures/nl.yaml b/coursedata/adventures/nl.yaml index 1be31c858af..538e04c366a 100644 --- a/coursedata/adventures/nl.yaml +++ b/coursedata/adventures/nl.yaml @@ -484,7 +484,8 @@ adventures: keuzes is 1, 2, 3, 4, 5, regenworm worp is ... print 'je hebt ' ... ' gegooid' - if ... is regenworm print 'Je mag stoppen met gooien.' ... print 'Je moet nog een keer hoor!' + if ... is regenworm print 'Je mag stoppen met gooien.' + ... print 'Je moet nog een keer hoor!' ``` start_code: "print Wat zal de dobbelsteen deze keer aangeven?" 5: diff --git a/hedy.py b/hedy.py index d0d213ece0c..407371dd5c5 100644 --- a/hedy.py +++ b/hedy.py @@ -258,7 +258,13 @@ def forward(self, args): def turn(self, args): return True + # somehow a token (or only this token?) is not picked up by the default rule so it needs + # its own rule + def NUMBER(self, args): + return False + def NAME(self, args): + return False diff --git a/tests/tests_level_05.py b/tests/tests_level_05.py index 48971bccdb0..9f0a0d139cc 100644 --- a/tests/tests_level_05.py +++ b/tests/tests_level_05.py @@ -40,6 +40,7 @@ def test_print_with_var(self): print('ik heet'+naam)""") self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_print_with_comma(self): @@ -54,6 +55,7 @@ def test_print_with_comma(self): print('ik heet,'+naam)""") self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_transpile_turtle_basic(self): result = hedy.transpile("forward 50\nturn\nforward 100", self.level) @@ -62,6 +64,7 @@ def test_transpile_turtle_basic(self): t.right(90) t.forward(100)""") self.assertEqual(expected, result.code) + self.assertEqual(True, result.has_turtle) def test_transpile_turtle_with_ask(self): code = textwrap.dedent("""\ @@ -72,6 +75,7 @@ def test_transpile_turtle_with_ask(self): afstand = input('hoe ver dan?') t.forward(afstand)""") self.assertEqual(expected, result.code) + self.assertEqual(True, result.has_turtle) def test_print_Spanish(self): code = textwrap.dedent("""\ @@ -83,6 +87,7 @@ def test_print_Spanish(self): print('Cuál es tu color favorito?')""") self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_transpile_ask_Spanish(self): code = textwrap.dedent("""\ @@ -94,6 +99,7 @@ def test_transpile_ask_Spanish(self): color = input('Cuál es tu color favorito?')""") self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_transpile_other(self): with self.assertRaises(Exception) as context: @@ -115,6 +121,7 @@ def test_repeat_basic_print(self): print('me wants a cookie!')""") self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) expected_output = textwrap.dedent("""\ me wants a cookie! @@ -140,6 +147,7 @@ def test_repeat_with_variable_print(self): print('me wants a cookie!')""") self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) expected_output = textwrap.dedent("""\ me wants a cookie! @@ -165,6 +173,7 @@ def test_repeat_nested_in_if(self): print('mooi!')""") self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_repeat_over_9_times(self): @@ -178,6 +187,7 @@ def test_repeat_over_9_times(self): print('me wants a cookie!')""") self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) expected_output = textwrap.dedent("""\ me wants a cookie! diff --git a/tests/tests_level_06.py b/tests/tests_level_06.py index 7fd031d2d53..f42b2fb58a4 100644 --- a/tests/tests_level_06.py +++ b/tests/tests_level_06.py @@ -38,6 +38,7 @@ def test_print_with_var(self): print('ik heet'+str(naam))""") self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_transpile_ask(self): @@ -50,6 +51,7 @@ def test_transpile_ask(self): antwoord = input('wat is je lievelingskleur?')""") self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_repeat_nested_in_if(self): @@ -66,6 +68,7 @@ def test_repeat_nested_in_if(self): print('mooi!')""") self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_repeat_with_variable_print(self): code = textwrap.dedent("""\ @@ -80,6 +83,7 @@ def test_repeat_with_variable_print(self): print('me wants a cookie!')""") self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) expected_output = textwrap.dedent("""\ me wants a cookie! @@ -97,6 +101,7 @@ def test_simple_calculation(self): expected = "nummer = int(4) + int(5)" self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_simple_calculation_without_space(self): code = "nummer is 4+5" @@ -104,6 +109,7 @@ def test_simple_calculation_without_space(self): expected = "nummer = int(4) + int(5)" self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_transpile_turtle_basic(self): @@ -113,6 +119,7 @@ def test_transpile_turtle_basic(self): t.right(90) t.forward(100)""") self.assertEqual(expected, result.code) + self.assertEqual(True, result.has_turtle) def test_transpile_turtle_with_ask(self): code = textwrap.dedent("""\ @@ -123,6 +130,7 @@ def test_transpile_turtle_with_ask(self): afstand = input('hoe ver dan?') t.forward(afstand)""") self.assertEqual(expected, result.code) + self.assertEqual(True, result.has_turtle) def test_calculation_and_printing(self): @@ -137,6 +145,7 @@ def test_calculation_and_printing(self): print(str(nummer))""") self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) self.assertEqual("9", run_code(result)) def test_calculation_with_vars(self): @@ -155,6 +164,7 @@ def test_calculation_with_vars(self): print(str(getal))""") self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) self.assertEqual("30", run_code(result)) def test_print_calculation_times_directly(self): @@ -171,6 +181,7 @@ def test_print_calculation_times_directly(self): print(str(int(nummer) * int(nummertwee)))""") self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) self.assertEqual("30", run_code(result)) def test_print_calculation_divide_directly(self): @@ -187,6 +198,7 @@ def test_print_calculation_divide_directly(self): print(str(int(nummer) // int(nummertwee)))""") self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) self.assertEqual("0", run_code(result)) def test_issue_andras(self): @@ -213,5 +225,6 @@ def test_issue_andras(self): print('ok bedankt dan wordt het '+str(prijs)+' euro')""") self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) diff --git a/tests/tests_level_08.py b/tests/tests_level_08.py index 71218bb88a2..e1e1ed2e318 100644 --- a/tests/tests_level_08.py +++ b/tests/tests_level_08.py @@ -29,16 +29,19 @@ def test_print(self): result = hedy.transpile("print 'ik heet'", self.level) expected = "print('ik heet')" self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_print_with_var(self): result = hedy.transpile("naam is Hedy\nprint 'ik heet' naam", self.level) expected = "naam = 'Hedy'\nprint('ik heet'+str(naam))" self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_print_with_calc_no_spaces(self): result = hedy.transpile("print '5 keer 5 is ' 5*5", self.level) expected = "print('5 keer 5 is '+str(int(5) * int(5)))" self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_print_calculation_times_directly(self): code = textwrap.dedent("""\ @@ -54,6 +57,7 @@ def test_print_calculation_times_directly(self): print(str(int(nummer) * int(nummertwee)))""") self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) self.assertEqual("30", run_code(result)) @@ -61,6 +65,7 @@ def test_transpile_ask(self): result = hedy.transpile("antwoord is ask 'wat is je lievelingskleur?'", self.level) expected = "antwoord = input('wat is je lievelingskleur?')" self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_if_with_indent(self): code = textwrap.dedent("""\ @@ -74,6 +79,7 @@ def test_if_with_indent(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_if_else(self): code = textwrap.dedent("""\ @@ -97,6 +103,7 @@ def test_if_else(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_print_random(self): code = textwrap.dedent("""\ @@ -110,6 +117,7 @@ def test_print_random(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_for_loop(self): code = textwrap.dedent("""\ @@ -128,6 +136,7 @@ def test_for_loop(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_if__else(self): code = textwrap.dedent("""\ @@ -146,6 +155,7 @@ def test_if__else(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_forloop(self): code = textwrap.dedent("""\ @@ -160,6 +170,7 @@ def test_forloop(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_if_under_else_in_for(self): @@ -186,6 +197,7 @@ def test_if_under_else_in_for(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) #fails, issue 363 @@ -207,6 +219,7 @@ def test_for_ifbug(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_for_loopbug599(self): code = textwrap.dedent("""\ @@ -222,6 +235,7 @@ def test_for_loopbug599(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) #programs with issues to see if we catch them properly diff --git a/tests/tests_level_09.py b/tests/tests_level_09.py index 8f261d0380b..94fafba2005 100644 --- a/tests/tests_level_09.py +++ b/tests/tests_level_09.py @@ -29,16 +29,19 @@ def test_print(self): result = hedy.transpile("print 'ik heet'", self.level) expected = "print('ik heet')" self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_print_with_var(self): result = hedy.transpile("naam is Hedy\nprint 'ik heet' naam", self.level) expected = "naam = 'Hedy'\nprint('ik heet'+str(naam))" self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_print_with_calc_no_spaces(self): result = hedy.transpile("print '5 keer 5 is ' 5*5", self.level) expected = "print('5 keer 5 is '+str(int(5) * int(5)))" self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_print_calculation_times_directly(self): code = textwrap.dedent("""\ @@ -54,6 +57,7 @@ def test_print_calculation_times_directly(self): print(str(int(nummer) * int(nummertwee)))""") self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) self.assertEqual("30", run_code(result)) @@ -61,6 +65,7 @@ def test_transpile_ask(self): result = hedy.transpile("antwoord is ask 'wat is je lievelingskleur?'", self.level) expected = "antwoord = input('wat is je lievelingskleur?')" self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_if_with_indent(self): code = textwrap.dedent("""\ @@ -74,6 +79,7 @@ def test_if_with_indent(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_if_else(self): code = textwrap.dedent("""\ @@ -97,6 +103,7 @@ def test_if_else(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_print_random(self): code = textwrap.dedent("""\ @@ -110,6 +117,7 @@ def test_print_random(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_for_loop(self): code = textwrap.dedent("""\ @@ -128,6 +136,7 @@ def test_for_loop(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_if__else(self): code = textwrap.dedent("""\ @@ -146,6 +155,7 @@ def test_if__else(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_forloop(self): code = textwrap.dedent("""\ @@ -160,6 +170,7 @@ def test_forloop(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_if_under_else_in_for(self): @@ -186,6 +197,7 @@ def test_if_under_else_in_for(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_if_elif(self): code = textwrap.dedent("""\ @@ -204,6 +216,7 @@ def test_if_elif(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_if_with_multiple_elifs(self): code = textwrap.dedent("""\ @@ -226,6 +239,7 @@ def test_if_with_multiple_elifs(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) #programs with issues to see if we catch them properly # (so this should fail, for now) diff --git a/tests/tests_level_10.py b/tests/tests_level_10.py index c2d3939bc7a..111eeb56f26 100644 --- a/tests/tests_level_10.py +++ b/tests/tests_level_10.py @@ -29,16 +29,19 @@ def test_print(self): result = hedy.transpile("print 'ik heet'", self.level) expected = "print('ik heet')" self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_print_with_var(self): result = hedy.transpile("naam is Hedy\nprint 'ik heet' naam", self.level) expected = "naam = 'Hedy'\nprint('ik heet'+str(naam))" self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_print_with_calc_no_spaces(self): result = hedy.transpile("print '5 keer 5 is ' 5*5", self.level) expected = "print('5 keer 5 is '+str(int(5) * int(5)))" self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_print_calculation_times_directly(self): code = textwrap.dedent("""\ @@ -54,6 +57,7 @@ def test_print_calculation_times_directly(self): print(str(int(nummer) * int(nummertwee)))""") self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) self.assertEqual("30", run_code(result)) @@ -61,6 +65,7 @@ def test_transpile_ask(self): result = hedy.transpile("antwoord is ask 'wat is je lievelingskleur?'", self.level) expected = "antwoord = input('wat is je lievelingskleur?')" self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_if_with_indent(self): code = textwrap.dedent("""\ @@ -75,6 +80,7 @@ def test_if_with_indent(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_if_else(self): code = textwrap.dedent("""\ @@ -98,6 +104,7 @@ def test_if_else(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_print_random(self): code = textwrap.dedent("""\ @@ -112,6 +119,7 @@ def test_print_random(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_for_loop(self): code = textwrap.dedent("""\ @@ -130,6 +138,7 @@ def test_for_loop(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_if__else(self): code = textwrap.dedent("""\ @@ -148,6 +157,7 @@ def test_if__else(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_forloop(self): code = textwrap.dedent("""\ @@ -161,6 +171,7 @@ def test_forloop(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_for_nesting(self): code = textwrap.dedent("""\ @@ -175,6 +186,7 @@ def test_for_nesting(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_if_nesting(self): code = textwrap.dedent("""\ @@ -193,6 +205,7 @@ def test_if_nesting(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_if_under_else_in_for(self): code = textwrap.dedent("""\ @@ -218,6 +231,7 @@ def test_if_under_else_in_for(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) #programs with issues to see if we catch them properly # (so this should fail, for now) # at one point we want a real "Indent" error and a better error message diff --git a/tests/tests_level_11.py b/tests/tests_level_11.py index 2e5734f561b..1db8fdfa234 100644 --- a/tests/tests_level_11.py +++ b/tests/tests_level_11.py @@ -32,16 +32,19 @@ def test_print(self): result = hedy.transpile("print('ik heet')", self.level) expected = "print('ik heet')" self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_print_with_var(self): result = hedy.transpile("naam is Hedy\nprint('ik heet' naam)", self.level) expected = "naam = 'Hedy'\nprint('ik heet'+str(naam))" self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_print_with_calc_no_spaces(self): result = hedy.transpile("print('5 keer 5 is ' 5*5)", self.level) expected = "print('5 keer 5 is '+str(int(5) * int(5)))" self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_print_calculation_times_directly(self): code = textwrap.dedent("""\ @@ -57,6 +60,7 @@ def test_print_calculation_times_directly(self): print(str(int(nummer) * int(nummertwee)))""") self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) self.assertEqual("30", run_code(result)) @@ -64,6 +68,7 @@ def test_transpile_ask(self): result = hedy.transpile("antwoord is input('wat is je lievelingskleur?')", self.level) expected = "antwoord = input('wat is je lievelingskleur?')" self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_if_with_indent(self): code = textwrap.dedent("""\ @@ -78,6 +83,7 @@ def test_if_with_indent(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_if_else(self): code = textwrap.dedent("""\ @@ -101,6 +107,7 @@ def test_if_else(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_print_random(self): code = textwrap.dedent("""\ @@ -115,6 +122,7 @@ def test_print_random(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_for_loop(self): code = textwrap.dedent("""\ @@ -133,6 +141,7 @@ def test_for_loop(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_if__else(self): code = textwrap.dedent("""\ @@ -150,6 +159,7 @@ def test_if__else(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_forloop(self): code = textwrap.dedent("""\ @@ -164,6 +174,7 @@ def test_forloop(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_for_nesting(self): code = textwrap.dedent("""\ @@ -178,6 +189,7 @@ def test_for_nesting(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_if_nesting(self): code = textwrap.dedent("""\ @@ -196,6 +208,7 @@ def test_if_nesting(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_newprint(self): code = textwrap.dedent("""\ @@ -212,6 +225,7 @@ def test_newprint(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_if_under_else_in_for(self): code = textwrap.dedent("""\ @@ -237,6 +251,7 @@ def test_if_under_else_in_for(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) #programs with issues to see if we catch them properly # (so this should fail, for now) diff --git a/tests/tests_level_12.py b/tests/tests_level_12.py index ec79d68d36e..2ec9b5a0167 100644 --- a/tests/tests_level_12.py +++ b/tests/tests_level_12.py @@ -30,16 +30,19 @@ def test_print(self): result = hedy.transpile("print('ik heet')", self.level) expected = "print('ik heet')" self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_print_with_var(self): result = hedy.transpile("naam is Hedy\nprint('ik heet' naam)", self.level) expected = "naam = 'Hedy'\nprint('ik heet'+str(naam))" self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_print_with_calc_no_spaces(self): result = hedy.transpile("print('5 keer 5 is ' 5*5)", self.level) expected = "print('5 keer 5 is '+str(int(5) * int(5)))" self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_print_calculation_times_directly(self): code = textwrap.dedent("""\ @@ -55,6 +58,7 @@ def test_print_calculation_times_directly(self): print(str(int(nummer) * int(nummertwee)))""") self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) self.assertEqual("30", run_code(result)) @@ -62,6 +66,7 @@ def test_transpile_ask(self): result = hedy.transpile("antwoord is input('wat is je lievelingskleur?')", self.level) expected = "antwoord = input('wat is je lievelingskleur?')" self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_if_with_indent(self): code = textwrap.dedent("""\ @@ -76,6 +81,7 @@ def test_if_with_indent(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_if_else(self): code = textwrap.dedent("""\ @@ -99,6 +105,7 @@ def test_if_else(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_print_random(self): code = textwrap.dedent("""\ @@ -113,6 +120,7 @@ def test_print_random(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_for_loop(self): code = textwrap.dedent("""\ @@ -131,6 +139,7 @@ def test_for_loop(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_if__else(self): code = textwrap.dedent("""\ @@ -149,6 +158,7 @@ def test_if__else(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_forloop(self): code = textwrap.dedent("""\ @@ -163,6 +173,7 @@ def test_forloop(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_for_nesting(self): code = textwrap.dedent("""\ @@ -177,6 +188,7 @@ def test_for_nesting(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_if_nesting(self): code = textwrap.dedent("""\ @@ -195,6 +207,7 @@ def test_if_nesting(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_newprint(self): code = textwrap.dedent("""\ @@ -211,6 +224,7 @@ def test_newprint(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_list(self): code = textwrap.dedent("""\ @@ -223,6 +237,7 @@ def test_list(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_random(self): code = textwrap.dedent("""\ @@ -237,6 +252,7 @@ def test_random(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_specific_access(self): code = textwrap.dedent("""\ @@ -251,6 +267,7 @@ def test_specific_access(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) #note that print(str(highscore)) will not print as it will compare 'score[i]' as str to a variable def test_everything_combined(self): @@ -276,6 +293,7 @@ def test_everything_combined(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_if_under_else_in_for(self): code = textwrap.dedent("""\ @@ -301,6 +319,7 @@ def test_if_under_else_in_for(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) #programs with issues to see if we catch them properly # (so this should fail, for now) diff --git a/tests/tests_level_13.py b/tests/tests_level_13.py index 53bdb830e44..699b3d2c5d7 100644 --- a/tests/tests_level_13.py +++ b/tests/tests_level_13.py @@ -32,16 +32,19 @@ def test_print(self): result = hedy.transpile("print('ik heet')", self.level) expected = "print('ik heet')" self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_print_with_var(self): result = hedy.transpile("naam is Hedy\nprint('ik heet' naam)", self.level) expected = "naam = 'Hedy'\nprint('ik heet'+str(naam))" self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_print_with_calc_no_spaces(self): result = hedy.transpile("print('5 keer 5 is ' 5*5)", self.level) expected = "print('5 keer 5 is '+str(int(5) * int(5)))" self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_print_calculation_times_directly(self): @@ -58,13 +61,14 @@ def test_print_calculation_times_directly(self): print(str(int(nummer) * int(nummertwee)))""") self.assertEqual(expected, result.code) - + self.assertEqual(False, result.has_turtle) self.assertEqual("30", run_code(result)) def test_transpile_ask(self): result = hedy.transpile("antwoord is input('wat is je lievelingskleur?')", self.level) expected = "antwoord = input('wat is je lievelingskleur?')" self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_if_with_indent(self): code = textwrap.dedent("""\ @@ -78,6 +82,7 @@ def test_if_with_indent(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_if_else(self): code = textwrap.dedent("""\ @@ -100,6 +105,7 @@ def test_if_else(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_print_random(self): code = textwrap.dedent("""\ @@ -113,6 +119,7 @@ def test_print_random(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_for_loop(self): code = textwrap.dedent("""\ @@ -130,6 +137,7 @@ def test_for_loop(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_if__else(self): code = textwrap.dedent("""\ @@ -147,6 +155,7 @@ def test_if__else(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_forloop(self): code = textwrap.dedent("""\ @@ -160,6 +169,7 @@ def test_forloop(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_for_nesting(self): code = textwrap.dedent("""\ @@ -173,6 +183,7 @@ def test_for_nesting(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_if_nesting(self): code = textwrap.dedent("""\ @@ -190,6 +201,7 @@ def test_if_nesting(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_newprint(self): code = textwrap.dedent("""\ @@ -205,6 +217,7 @@ def test_newprint(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_list(self): code = textwrap.dedent("""\ @@ -216,6 +229,7 @@ def test_list(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_random(self): code = textwrap.dedent("""\ @@ -229,6 +243,7 @@ def test_random(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_specific_access(self): code = textwrap.dedent("""\ @@ -242,6 +257,7 @@ def test_specific_access(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) # note that print(str(highscore)) will not print as it will compare 'score[i]' as str to a variable def test_everything_combined(self): @@ -266,6 +282,7 @@ def test_everything_combined(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_if_under_else_in_for(self): code = textwrap.dedent("""\ @@ -290,6 +307,7 @@ def test_if_under_else_in_for(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_bool_true(self): code = textwrap.dedent("""\ @@ -303,6 +321,7 @@ def test_bool_true(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_bool_false(self): code = textwrap.dedent("""\ @@ -315,6 +334,7 @@ def test_bool_false(self): print('ja')""") result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_bool_true2(self): code = textwrap.dedent("""\ @@ -328,6 +348,7 @@ def test_bool_true2(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_bool_false2(self): code = textwrap.dedent("""\ @@ -341,6 +362,7 @@ def test_bool_false2(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_bool_total(self): code = textwrap.dedent("""\ @@ -364,6 +386,7 @@ def test_bool_total(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) # programs with issues to see if we catch them properly # (so this should fail, for now) diff --git a/tests/tests_level_14.py b/tests/tests_level_14.py index 9e41b33913d..5b661e512a8 100644 --- a/tests/tests_level_14.py +++ b/tests/tests_level_14.py @@ -33,21 +33,25 @@ def test_print(self): result = hedy.transpile("print('ik heet')", self.level) expected = "print('ik heet')" self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_print_with_var(self): result = hedy.transpile("naam is Hedy\nprint('ik heet' naam)", self.level) expected = "naam = 'Hedy'\nprint('ik heet'+str(naam))" self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_print_with_calc_no_spaces(self): result = hedy.transpile("print('5 keer 5 is ' 5*5)", self.level) expected = "print('5 keer 5 is '+str(int(5) * int(5)))" self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_transpile_ask(self): result = hedy.transpile("antwoord is input('wat is je lievelingskleur?')", self.level) expected = "antwoord = input('wat is je lievelingskleur?')" self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_print_calculation_times_directly(self): code = textwrap.dedent("""\ @@ -63,6 +67,7 @@ def test_print_calculation_times_directly(self): print(str(int(nummer) * int(nummertwee)))""") self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) self.assertEqual("30", run_code(result)) @@ -78,6 +83,7 @@ def test_if_with_indent(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_if_else(self): code = textwrap.dedent("""\ @@ -100,6 +106,7 @@ def test_if_else(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_print_random(self): code = textwrap.dedent("""\ @@ -113,6 +120,7 @@ def test_print_random(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_for_loop(self): code = textwrap.dedent("""\ @@ -130,6 +138,7 @@ def test_for_loop(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_if__else(self): code = textwrap.dedent("""\ @@ -147,6 +156,7 @@ def test_if__else(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_forloop(self): code = textwrap.dedent("""\ @@ -160,6 +170,7 @@ def test_forloop(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_for_nesting(self): code = textwrap.dedent("""\ @@ -173,6 +184,7 @@ def test_for_nesting(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_if_nesting(self): code = textwrap.dedent("""\ @@ -190,6 +202,7 @@ def test_if_nesting(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_newprint(self): code = textwrap.dedent("""\ @@ -205,6 +218,7 @@ def test_newprint(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_list(self): code = textwrap.dedent("""\ @@ -216,6 +230,7 @@ def test_list(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_random(self): code = textwrap.dedent("""\ @@ -229,6 +244,7 @@ def test_random(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_specific_access(self): code = textwrap.dedent("""\ @@ -242,6 +258,7 @@ def test_specific_access(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) # note that print(str(highscore)) will not print as it will compare 'score[i]' as str to a variable def test_everything_combined(self): @@ -266,6 +283,7 @@ def test_everything_combined(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_if_under_else_in_for(self): code = textwrap.dedent("""\ @@ -290,6 +308,7 @@ def test_if_under_else_in_for(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_bool_true(self): code = textwrap.dedent("""\ @@ -303,6 +322,7 @@ def test_bool_true(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_bool_false(self): code = textwrap.dedent("""\ @@ -315,6 +335,7 @@ def test_bool_false(self): print('ja')""") result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_bool_true2(self): code = textwrap.dedent("""\ @@ -328,6 +349,7 @@ def test_bool_true2(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_bool_false2(self): code = textwrap.dedent("""\ @@ -341,6 +363,7 @@ def test_bool_false2(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_bool_total(self): code = textwrap.dedent("""\ @@ -364,6 +387,7 @@ def test_bool_total(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_and(self): code = textwrap.dedent("""\ @@ -375,6 +399,7 @@ def test_and(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_or(self): code = textwrap.dedent("""\ @@ -386,6 +411,7 @@ def test_or(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) # programs with issues to see if we catch them properly # (so this should fail, for now) diff --git a/tests/tests_level_15.py b/tests/tests_level_15.py index 2fd9a0298db..9787f1048c0 100644 --- a/tests/tests_level_15.py +++ b/tests/tests_level_15.py @@ -32,21 +32,25 @@ def test_print(self): result = hedy.transpile("print('ik heet')", self.level) expected = "print('ik heet')" self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_print_with_var(self): result = hedy.transpile("naam is Hedy\nprint('ik heet' naam)", self.level) expected = "naam = 'Hedy'\nprint('ik heet'+str(naam))" self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_print_with_calc_no_spaces(self): result = hedy.transpile("print('5 keer 5 is ' 5*5)", self.level) expected = "print('5 keer 5 is '+str(int(5) * int(5)))" self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_transpile_ask(self): result = hedy.transpile("antwoord is input('wat is je lievelingskleur?')", self.level) expected = "antwoord = input('wat is je lievelingskleur?')" self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_print_calculation_times_directly(self): code = textwrap.dedent("""\ @@ -62,6 +66,7 @@ def test_print_calculation_times_directly(self): print(str(int(nummer) * int(nummertwee)))""") self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) self.assertEqual("30", run_code(result)) @@ -78,6 +83,7 @@ def test_if_with_indent(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_if_else(self): code = textwrap.dedent("""\ @@ -100,6 +106,7 @@ def test_if_else(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_print_random(self): code = textwrap.dedent("""\ @@ -113,6 +120,7 @@ def test_print_random(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_for_loop(self): code = textwrap.dedent("""\ @@ -130,6 +138,7 @@ def test_for_loop(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_if__else(self): code = textwrap.dedent("""\ @@ -147,6 +156,7 @@ def test_if__else(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_forloop(self): code = textwrap.dedent("""\ @@ -160,6 +170,7 @@ def test_forloop(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_for_nesting(self): code = textwrap.dedent("""\ @@ -173,6 +184,7 @@ def test_for_nesting(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_if_nesting(self): code = textwrap.dedent("""\ @@ -190,6 +202,7 @@ def test_if_nesting(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_newprint(self): code = textwrap.dedent("""\ @@ -205,6 +218,7 @@ def test_newprint(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_list(self): code = textwrap.dedent("""\ @@ -216,6 +230,7 @@ def test_list(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_random(self): code = textwrap.dedent("""\ @@ -229,6 +244,7 @@ def test_random(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_specific_access(self): code = textwrap.dedent("""\ @@ -242,6 +258,7 @@ def test_specific_access(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) # note that print(str(highscore)) will not print as it will compare 'score[i]' as str to a variable def test_everything_combined(self): @@ -266,6 +283,7 @@ def test_everything_combined(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_if_under_else_in_for(self): code = textwrap.dedent("""\ @@ -290,6 +308,7 @@ def test_if_under_else_in_for(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_bool_true(self): code = textwrap.dedent("""\ @@ -303,6 +322,7 @@ def test_bool_true(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_bool_false(self): code = textwrap.dedent("""\ @@ -315,6 +335,7 @@ def test_bool_false(self): print('ja')""") result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_bool_true2(self): code = textwrap.dedent("""\ @@ -328,6 +349,7 @@ def test_bool_true2(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_bool_false2(self): code = textwrap.dedent("""\ @@ -341,6 +363,7 @@ def test_bool_false2(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_bool_total(self): code = textwrap.dedent("""\ @@ -364,6 +387,7 @@ def test_bool_total(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_and(self): code = textwrap.dedent("""\ @@ -375,6 +399,7 @@ def test_and(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_or(self): code = textwrap.dedent("""\ @@ -386,6 +411,7 @@ def test_or(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_comment(self): code = textwrap.dedent("""\ @@ -398,6 +424,7 @@ def test_comment(self): # ['comment']""") result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_commentbegin(self): code = textwrap.dedent("""\ @@ -411,6 +438,7 @@ def test_commentbegin(self): print('hallo')""") result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_commentresult(self): code = textwrap.dedent("""\ @@ -424,6 +452,7 @@ def test_commentresult(self): print('hallo')""") result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) # programs with issues to see if we catch them properly # (so this should fail, for now) diff --git a/tests/tests_level_16.py b/tests/tests_level_16.py index 850d3908040..31ac3393875 100644 --- a/tests/tests_level_16.py +++ b/tests/tests_level_16.py @@ -32,21 +32,25 @@ def test_print(self): result = hedy.transpile("print('ik heet')", self.level) expected = "print('ik heet')" self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_print_with_var(self): result = hedy.transpile("naam is Hedy\nprint('ik heet' naam)", self.level) expected = "naam = 'Hedy'\nprint('ik heet'+str(naam))" self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_print_with_calc_no_spaces(self): result = hedy.transpile("print('5 keer 5 is ' 5*5)", self.level) expected = "print('5 keer 5 is '+str(int(5) * int(5)))" self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_transpile_ask(self): result = hedy.transpile("antwoord is input('wat is je lievelingskleur?')", self.level) expected = "antwoord = input('wat is je lievelingskleur?')" self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_print_calculation_times_directly(self): code = textwrap.dedent("""\ @@ -62,6 +66,7 @@ def test_print_calculation_times_directly(self): print(str(int(nummer) * int(nummertwee)))""") self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) self.assertEqual("30", run_code(result)) @@ -78,6 +83,7 @@ def test_if_with_indent(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_if_else(self): code = textwrap.dedent("""\ @@ -100,6 +106,7 @@ def test_if_else(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_print_random(self): code = textwrap.dedent("""\ @@ -113,6 +120,7 @@ def test_print_random(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_for_loop(self): code = textwrap.dedent("""\ @@ -130,6 +138,7 @@ def test_for_loop(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_if__else(self): code = textwrap.dedent("""\ @@ -147,6 +156,7 @@ def test_if__else(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_forloop(self): code = textwrap.dedent("""\ @@ -160,6 +170,7 @@ def test_forloop(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_for_nesting(self): code = textwrap.dedent("""\ @@ -173,6 +184,7 @@ def test_for_nesting(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_if_nesting(self): code = textwrap.dedent("""\ @@ -190,6 +202,7 @@ def test_if_nesting(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_newprint(self): code = textwrap.dedent("""\ @@ -205,6 +218,7 @@ def test_newprint(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_list(self): code = textwrap.dedent("""\ @@ -216,6 +230,7 @@ def test_list(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_random(self): code = textwrap.dedent("""\ @@ -229,6 +244,7 @@ def test_random(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_specific_access(self): code = textwrap.dedent("""\ @@ -242,6 +258,7 @@ def test_specific_access(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) # note that print(str(highscore)) will not print as it will compare 'score[i]' as str to a variable def test_everything_combined(self): @@ -266,6 +283,7 @@ def test_everything_combined(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_if_under_else_in_for(self): code = textwrap.dedent("""\ @@ -290,6 +308,7 @@ def test_if_under_else_in_for(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_bool_true(self): code = textwrap.dedent("""\ @@ -303,6 +322,7 @@ def test_bool_true(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_bool_false(self): code = textwrap.dedent("""\ @@ -315,6 +335,7 @@ def test_bool_false(self): print('ja')""") result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_bool_true2(self): code = textwrap.dedent("""\ @@ -328,6 +349,7 @@ def test_bool_true2(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_bool_false2(self): code = textwrap.dedent("""\ @@ -341,6 +363,7 @@ def test_bool_false2(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_bool_total(self): code = textwrap.dedent("""\ @@ -364,6 +387,7 @@ def test_bool_total(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_and(self): code = textwrap.dedent("""\ @@ -375,6 +399,7 @@ def test_and(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_or(self): code = textwrap.dedent("""\ @@ -386,6 +411,7 @@ def test_or(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_comment(self): code = textwrap.dedent("""\ @@ -398,6 +424,7 @@ def test_comment(self): # ['comment']""") result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_commentbegin(self): code = textwrap.dedent("""\ @@ -411,6 +438,7 @@ def test_commentbegin(self): print('hallo')""") result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_commentresult(self): code = textwrap.dedent("""\ @@ -424,6 +452,7 @@ def test_commentresult(self): print('hallo')""") result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_smaller(self): code = textwrap.dedent("""\ @@ -437,6 +466,7 @@ def test_smaller(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_bigger(self): code = textwrap.dedent("""\ @@ -450,6 +480,7 @@ def test_bigger(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_big_and_small(self): code = textwrap.dedent("""\ @@ -467,6 +498,7 @@ def test_big_and_small(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) # programs with issues to see if we catch them properly # (so this should fail, for now) diff --git a/tests/tests_level_17.py b/tests/tests_level_17.py index 8e11d910883..4f90748b95f 100644 --- a/tests/tests_level_17.py +++ b/tests/tests_level_17.py @@ -32,21 +32,25 @@ def test_print(self): result = hedy.transpile("print('ik heet')", self.level) expected = "print('ik heet')" self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_print_with_var(self): result = hedy.transpile("naam is Hedy\nprint('ik heet' naam)", self.level) expected = "naam = 'Hedy'\nprint('ik heet'+str(naam))" self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_print_with_calc_no_spaces(self): result = hedy.transpile("print('5 keer 5 is ' 5*5)", self.level) expected = "print('5 keer 5 is '+str(int(5) * int(5)))" self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_transpile_ask(self): result = hedy.transpile("antwoord is input('wat is je lievelingskleur?')", self.level) expected = "antwoord = input('wat is je lievelingskleur?')" self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_print_calculation_times_directly(self): code = textwrap.dedent("""\ @@ -62,6 +66,7 @@ def test_print_calculation_times_directly(self): print(str(int(nummer) * int(nummertwee)))""") self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) self.assertEqual("30", run_code(result)) @@ -77,6 +82,7 @@ def test_if_with_indent(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_if_else(self): code = textwrap.dedent("""\ @@ -99,6 +105,7 @@ def test_if_else(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_print_random(self): code = textwrap.dedent("""\ @@ -112,6 +119,7 @@ def test_print_random(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_for_loop(self): code = textwrap.dedent("""\ @@ -129,6 +137,7 @@ def test_for_loop(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_if__else(self): code = textwrap.dedent("""\ @@ -146,6 +155,7 @@ def test_if__else(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_forloop(self): code = textwrap.dedent("""\ @@ -159,6 +169,7 @@ def test_forloop(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_for_nesting(self): code = textwrap.dedent("""\ @@ -172,6 +183,7 @@ def test_for_nesting(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_if_nesting(self): code = textwrap.dedent("""\ @@ -189,6 +201,7 @@ def test_if_nesting(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_newprint(self): code = textwrap.dedent("""\ @@ -204,6 +217,7 @@ def test_newprint(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_list(self): code = textwrap.dedent("""\ @@ -215,6 +229,7 @@ def test_list(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_random(self): code = textwrap.dedent("""\ @@ -228,6 +243,7 @@ def test_random(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_specific_access(self): code = textwrap.dedent("""\ @@ -241,6 +257,7 @@ def test_specific_access(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) # note that print(str(highscore)) will not print as it will compare 'score[i]' as str to a variable def test_everything_combined(self): @@ -265,6 +282,7 @@ def test_everything_combined(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_if_under_else_in_for(self): code = textwrap.dedent("""\ @@ -289,6 +307,7 @@ def test_if_under_else_in_for(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_bool_true(self): code = textwrap.dedent("""\ @@ -302,6 +321,7 @@ def test_bool_true(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_bool_false(self): code = textwrap.dedent("""\ @@ -314,6 +334,7 @@ def test_bool_false(self): print('ja')""") result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_bool_true2(self): code = textwrap.dedent("""\ @@ -327,6 +348,7 @@ def test_bool_true2(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_bool_false2(self): code = textwrap.dedent("""\ @@ -340,6 +362,7 @@ def test_bool_false2(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_bool_total(self): code = textwrap.dedent("""\ @@ -363,6 +386,7 @@ def test_bool_total(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_and(self): code = textwrap.dedent("""\ @@ -374,6 +398,7 @@ def test_and(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_or(self): code = textwrap.dedent("""\ @@ -385,6 +410,7 @@ def test_or(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_comment(self): code = textwrap.dedent("""\ @@ -397,6 +423,7 @@ def test_comment(self): # ['comment']""") result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_commentbegin(self): code = textwrap.dedent("""\ @@ -410,6 +437,7 @@ def test_commentbegin(self): print('hallo')""") result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_commentresult(self): code = textwrap.dedent("""\ @@ -423,6 +451,7 @@ def test_commentresult(self): print('hallo')""") result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_smaller(self): code = textwrap.dedent("""\ @@ -436,6 +465,7 @@ def test_smaller(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_bigger(self): code = textwrap.dedent("""\ @@ -449,6 +479,7 @@ def test_bigger(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_big_and_small(self): code = textwrap.dedent("""\ @@ -466,6 +497,7 @@ def test_big_and_small(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_whileloop(self): code = textwrap.dedent("""\ @@ -483,6 +515,7 @@ def test_whileloop(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_whileloop2(self): code = textwrap.dedent("""\ @@ -502,6 +535,7 @@ def test_whileloop2(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_whileloop3(self): code = textwrap.dedent("""\ @@ -523,6 +557,7 @@ def test_whileloop3(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) # programs with issues to see if we catch them properly # (so this should fail, for now) diff --git a/tests/tests_level_18.py b/tests/tests_level_18.py index 4422ed0e455..06d7b6be1a2 100644 --- a/tests/tests_level_18.py +++ b/tests/tests_level_18.py @@ -32,21 +32,25 @@ def test_print(self): result = hedy.transpile("print('ik heet')", self.level) expected = "print('ik heet')" self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_print_with_var(self): result = hedy.transpile("naam is Hedy\nprint('ik heet' naam)", self.level) expected = "naam = 'Hedy'\nprint('ik heet'+str(naam))" self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_print_with_calc_no_spaces(self): result = hedy.transpile("print('5 keer 5 is ' 5*5)", self.level) expected = "print('5 keer 5 is '+str(int(5) * int(5)))" self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_transpile_ask(self): result = hedy.transpile("antwoord is input('wat is je lievelingskleur?')", self.level) expected = "antwoord = input('wat is je lievelingskleur?')" self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_print_calculation_times_directly(self): code = textwrap.dedent("""\ @@ -62,6 +66,7 @@ def test_print_calculation_times_directly(self): print(str(int(nummer) * int(nummertwee)))""") self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) self.assertEqual("30", run_code(result)) @@ -78,6 +83,7 @@ def test_if_with_indent(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_if_else(self): code = textwrap.dedent("""\ @@ -100,6 +106,7 @@ def test_if_else(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_print_random(self): code = textwrap.dedent("""\ @@ -113,6 +120,7 @@ def test_print_random(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_for_loop(self): code = textwrap.dedent("""\ @@ -130,6 +138,7 @@ def test_for_loop(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_if__else(self): code = textwrap.dedent("""\ @@ -147,6 +156,7 @@ def test_if__else(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_forloop(self): code = textwrap.dedent("""\ @@ -160,6 +170,7 @@ def test_forloop(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_for_nesting(self): code = textwrap.dedent("""\ @@ -173,6 +184,7 @@ def test_for_nesting(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_if_nesting(self): code = textwrap.dedent("""\ @@ -190,6 +202,7 @@ def test_if_nesting(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_newprint(self): code = textwrap.dedent("""\ @@ -205,6 +218,7 @@ def test_newprint(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_list(self): code = textwrap.dedent("""\ @@ -216,6 +230,7 @@ def test_list(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_random(self): code = textwrap.dedent("""\ @@ -229,6 +244,7 @@ def test_random(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_specific_access(self): code = textwrap.dedent("""\ @@ -242,6 +258,7 @@ def test_specific_access(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) # note that print(str(highscore)) will not print as it will compare 'score[i]' as str to a variable def test_everything_combined(self): @@ -266,6 +283,7 @@ def test_everything_combined(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_if_under_else_in_for(self): code = textwrap.dedent("""\ @@ -290,6 +308,7 @@ def test_if_under_else_in_for(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_bool_true(self): code = textwrap.dedent("""\ @@ -303,6 +322,7 @@ def test_bool_true(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_bool_false(self): code = textwrap.dedent("""\ @@ -315,6 +335,7 @@ def test_bool_false(self): print('ja')""") result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_bool_true2(self): code = textwrap.dedent("""\ @@ -328,6 +349,7 @@ def test_bool_true2(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_bool_false2(self): code = textwrap.dedent("""\ @@ -341,6 +363,7 @@ def test_bool_false2(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_bool_total(self): code = textwrap.dedent("""\ @@ -364,6 +387,7 @@ def test_bool_total(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_and(self): code = textwrap.dedent("""\ @@ -375,6 +399,7 @@ def test_and(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_or(self): code = textwrap.dedent("""\ @@ -386,6 +411,7 @@ def test_or(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_comment(self): code = textwrap.dedent("""\ @@ -398,6 +424,7 @@ def test_comment(self): # ['comment']""") result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_commentbegin(self): code = textwrap.dedent("""\ @@ -411,6 +438,7 @@ def test_commentbegin(self): print('hallo')""") result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_commentresult(self): code = textwrap.dedent("""\ @@ -424,6 +452,7 @@ def test_commentresult(self): print('hallo')""") result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_smaller(self): code = textwrap.dedent("""\ @@ -437,6 +466,7 @@ def test_smaller(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_bigger(self): code = textwrap.dedent("""\ @@ -450,6 +480,7 @@ def test_bigger(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_big_and_small(self): code = textwrap.dedent("""\ @@ -467,6 +498,7 @@ def test_big_and_small(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_whileloop(self): code = textwrap.dedent("""\ @@ -484,6 +516,7 @@ def test_whileloop(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_whileloop2(self): code = textwrap.dedent("""\ @@ -503,6 +536,7 @@ def test_whileloop2(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_whileloop3(self): code = textwrap.dedent("""\ @@ -524,6 +558,7 @@ def test_whileloop3(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_access_plus(self): code = textwrap.dedent("""\ @@ -541,6 +576,7 @@ def test_access_plus(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) # programs with issues to see if we catch them properly # (so this should fail, for now) diff --git a/tests/tests_level_19.py b/tests/tests_level_19.py index 232824a2473..4e2b69cd77b 100644 --- a/tests/tests_level_19.py +++ b/tests/tests_level_19.py @@ -30,21 +30,25 @@ def test_print(self): result = hedy.transpile("print('ik heet')", self.level) expected = "print('ik heet')" self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_print_with_var(self): result = hedy.transpile("naam is Hedy\nprint('ik heet' naam)", self.level) expected = "naam = 'Hedy'\nprint('ik heet'+str(naam))" self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_print_with_calc_no_spaces(self): result = hedy.transpile("print('5 keer 5 is ' 5*5)", self.level) expected = "print('5 keer 5 is '+str(int(5) * int(5)))" self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_transpile_ask(self): result = hedy.transpile("antwoord is input('wat is je lievelingskleur?')", self.level) expected = "antwoord = input('wat is je lievelingskleur?')" self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_print_calculation_times_directly(self): code = textwrap.dedent("""\ @@ -60,6 +64,7 @@ def test_print_calculation_times_directly(self): print(str(int(nummer) * int(nummertwee)))""") self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) self.assertEqual("30", run_code(result)) @@ -75,6 +80,7 @@ def test_if_with_indent(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_if_else(self): code = textwrap.dedent("""\ @@ -97,6 +103,7 @@ def test_if_else(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_print_random(self): code = textwrap.dedent("""\ @@ -110,6 +117,7 @@ def test_print_random(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_for_loop(self): code = textwrap.dedent("""\ @@ -127,6 +135,7 @@ def test_for_loop(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_if__else(self): code = textwrap.dedent("""\ @@ -144,6 +153,7 @@ def test_if__else(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_forloop(self): code = textwrap.dedent("""\ @@ -157,6 +167,7 @@ def test_forloop(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_for_nesting(self): code = textwrap.dedent("""\ @@ -170,6 +181,7 @@ def test_for_nesting(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_if_nesting(self): code = textwrap.dedent("""\ @@ -187,6 +199,7 @@ def test_if_nesting(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_newprint(self): code = textwrap.dedent("""\ @@ -202,6 +215,7 @@ def test_newprint(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_list(self): code = textwrap.dedent("""\ @@ -213,6 +227,7 @@ def test_list(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_random(self): code = textwrap.dedent("""\ @@ -226,6 +241,7 @@ def test_random(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_specific_access(self): code = textwrap.dedent("""\ @@ -239,6 +255,7 @@ def test_specific_access(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) # note that print(str(highscore)) will not print as it will compare 'score[i]' as str to a variable def test_everything_combined(self): @@ -263,6 +280,7 @@ def test_everything_combined(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_if_under_else_in_for(self): code = textwrap.dedent("""\ @@ -287,6 +305,7 @@ def test_if_under_else_in_for(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_bool_true(self): code = textwrap.dedent("""\ @@ -300,6 +319,7 @@ def test_bool_true(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_bool_false(self): code = textwrap.dedent("""\ @@ -312,6 +332,7 @@ def test_bool_false(self): print('ja')""") result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_bool_true2(self): code = textwrap.dedent("""\ @@ -325,6 +346,7 @@ def test_bool_true2(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_bool_false2(self): code = textwrap.dedent("""\ @@ -338,6 +360,7 @@ def test_bool_false2(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_bool_total(self): code = textwrap.dedent("""\ @@ -361,6 +384,7 @@ def test_bool_total(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_and(self): code = textwrap.dedent("""\ @@ -372,6 +396,7 @@ def test_and(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_or(self): code = textwrap.dedent("""\ @@ -383,6 +408,7 @@ def test_or(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_comment(self): code = textwrap.dedent("""\ @@ -395,6 +421,7 @@ def test_comment(self): # ['comment']""") result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_commentbegin(self): code = textwrap.dedent("""\ @@ -408,6 +435,7 @@ def test_commentbegin(self): print('hallo')""") result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_commentresult(self): code = textwrap.dedent("""\ @@ -421,6 +449,7 @@ def test_commentresult(self): print('hallo')""") result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_smaller(self): code = textwrap.dedent("""\ @@ -434,6 +463,7 @@ def test_smaller(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_bigger(self): code = textwrap.dedent("""\ @@ -447,6 +477,7 @@ def test_bigger(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_big_and_small(self): code = textwrap.dedent("""\ @@ -464,6 +495,7 @@ def test_big_and_small(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_whileloop(self): code = textwrap.dedent("""\ @@ -481,6 +513,7 @@ def test_whileloop(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_whileloop2(self): code = textwrap.dedent("""\ @@ -500,6 +533,7 @@ def test_whileloop2(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_whileloop3(self): code = textwrap.dedent("""\ @@ -521,6 +555,7 @@ def test_whileloop3(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_access_plus(self): code = textwrap.dedent("""\ @@ -538,6 +573,7 @@ def test_access_plus(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_length(self): code = textwrap.dedent("""\ fruit is ['appel', 'banaan', 'kers'] @@ -550,6 +586,7 @@ def test_length(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_length2(self): code = textwrap.dedent("""\ @@ -563,6 +600,7 @@ def test_length2(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_print_length(self): code = textwrap.dedent("""\ @@ -578,6 +616,7 @@ def test_print_length(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) # programs with issues to see if we catch them properly # (so this should fail, for now) diff --git a/tests/tests_level_20.py b/tests/tests_level_20.py index 4c87e96b8bb..eca801011b9 100644 --- a/tests/tests_level_20.py +++ b/tests/tests_level_20.py @@ -31,21 +31,25 @@ def test_print(self): result = hedy.transpile("print('ik heet')", self.level) expected = "print('ik heet')" self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_print_with_var(self): result = hedy.transpile("naam = Hedy\nprint('ik heet' naam)", self.level) expected = "naam = 'Hedy'\nprint('ik heet'+str(naam))" self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_print_with_calc_no_spaces(self): result = hedy.transpile("print('5 keer 5 is ' 5*5)", self.level) expected = "print('5 keer 5 is '+str(int(5) * int(5)))" self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_transpile_ask(self): result = hedy.transpile("antwoord = input('wat is je lievelingskleur?')", self.level) expected = "antwoord = input('wat is je lievelingskleur?')" self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_print_calculation_times_directly(self): code = textwrap.dedent("""\ @@ -61,6 +65,7 @@ def test_print_calculation_times_directly(self): print(str(int(nummer) * int(nummertwee)))""") self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) self.assertEqual("30", run_code(result)) @@ -76,6 +81,7 @@ def test_if_with_indent(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_if_else(self): code = textwrap.dedent("""\ @@ -98,6 +104,7 @@ def test_if_else(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_print_random(self): code = textwrap.dedent("""\ @@ -111,6 +118,7 @@ def test_print_random(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_for_loop(self): code = textwrap.dedent("""\ @@ -128,6 +136,7 @@ def test_for_loop(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_if__else(self): code = textwrap.dedent("""\ @@ -145,6 +154,7 @@ def test_if__else(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_forloop(self): code = textwrap.dedent("""\ @@ -158,6 +168,7 @@ def test_forloop(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_for_nesting(self): code = textwrap.dedent("""\ @@ -171,6 +182,7 @@ def test_for_nesting(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_if_nesting(self): code = textwrap.dedent("""\ @@ -188,6 +200,7 @@ def test_if_nesting(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_newprint(self): code = textwrap.dedent("""\ @@ -203,6 +216,7 @@ def test_newprint(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_list(self): code = textwrap.dedent("""\ @@ -214,6 +228,7 @@ def test_list(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_random(self): code = textwrap.dedent("""\ @@ -227,6 +242,7 @@ def test_random(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_specific_access(self): code = textwrap.dedent("""\ @@ -240,6 +256,7 @@ def test_specific_access(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) # note that print(str(highscore)) will not print as it will compare 'score[i]' as str to a variable def test_everything_combined(self): @@ -264,6 +281,7 @@ def test_everything_combined(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_if_under_else_in_for(self): code = textwrap.dedent("""\ @@ -288,6 +306,7 @@ def test_if_under_else_in_for(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_bool_true(self): code = textwrap.dedent("""\ @@ -301,6 +320,7 @@ def test_bool_true(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_bool_false(self): code = textwrap.dedent("""\ @@ -313,6 +333,7 @@ def test_bool_false(self): print('ja')""") result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_bool_true2(self): code = textwrap.dedent("""\ @@ -326,6 +347,7 @@ def test_bool_true2(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_bool_false2(self): code = textwrap.dedent("""\ @@ -339,6 +361,7 @@ def test_bool_false2(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_bool_total(self): code = textwrap.dedent("""\ @@ -362,6 +385,7 @@ def test_bool_total(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_and(self): code = textwrap.dedent("""\ @@ -373,6 +397,7 @@ def test_and(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_or(self): code = textwrap.dedent("""\ @@ -384,6 +409,7 @@ def test_or(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_comment(self): code = textwrap.dedent("""\ @@ -396,6 +422,7 @@ def test_comment(self): # ['comment']""") result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_commentbegin(self): code = textwrap.dedent("""\ @@ -409,6 +436,7 @@ def test_commentbegin(self): print('hallo')""") result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_commentresult(self): code = textwrap.dedent("""\ @@ -422,6 +450,7 @@ def test_commentresult(self): print('hallo')""") result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_smaller(self): code = textwrap.dedent("""\ @@ -435,6 +464,7 @@ def test_smaller(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_bigger(self): code = textwrap.dedent("""\ @@ -448,6 +478,7 @@ def test_bigger(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_big_and_small(self): code = textwrap.dedent("""\ @@ -465,6 +496,7 @@ def test_big_and_small(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_whileloop(self): code = textwrap.dedent("""\ @@ -482,6 +514,7 @@ def test_whileloop(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_whileloop2(self): code = textwrap.dedent("""\ @@ -501,6 +534,7 @@ def test_whileloop2(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_whileloop3(self): code = textwrap.dedent("""\ @@ -522,6 +556,7 @@ def test_whileloop3(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_access_plus(self): code = textwrap.dedent("""\ @@ -539,6 +574,7 @@ def test_access_plus(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_length(self): code = textwrap.dedent("""\ fruit = ['appel', 'banaan', 'kers'] @@ -551,6 +587,7 @@ def test_length(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_length2(self): code = textwrap.dedent("""\ @@ -564,6 +601,7 @@ def test_length2(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_print_length(self): code = textwrap.dedent("""\ @@ -579,6 +617,7 @@ def test_print_length(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_sum_in_if(self): code = textwrap.dedent("""\ @@ -594,6 +633,7 @@ def test_sum_in_if(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_sum_in_right_side_if(self): code = textwrap.dedent("""\ @@ -609,6 +649,7 @@ def test_sum_in_right_side_if(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_min_in_if(self): code = textwrap.dedent("""\ @@ -624,6 +665,7 @@ def test_min_in_if(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_multiply_in_if(self): code = textwrap.dedent("""\ @@ -639,6 +681,7 @@ def test_multiply_in_if(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) # programs with issues to see if we catch them properly # (so this should fail, for now) diff --git a/tests/tests_level_21.py b/tests/tests_level_21.py index 9b8a073f0d6..4cdaa3e47f5 100644 --- a/tests/tests_level_21.py +++ b/tests/tests_level_21.py @@ -31,21 +31,25 @@ def test_print(self): result = hedy.transpile("print('ik heet')", self.level) expected = "print('ik heet')" self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_print_with_var(self): result = hedy.transpile("naam = Hedy\nprint('ik heet' naam)", self.level) expected = "naam = 'Hedy'\nprint('ik heet'+str(naam))" self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_print_with_calc_no_spaces(self): result = hedy.transpile("print('5 keer 5 is ' 5*5)", self.level) expected = "print('5 keer 5 is '+str(int(5) * int(5)))" self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_transpile_ask(self): result = hedy.transpile("antwoord = input('wat is je lievelingskleur?')", self.level) expected = "antwoord = input('wat is je lievelingskleur?')" self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_print_calculation_times_directly(self): code = textwrap.dedent("""\ @@ -61,6 +65,7 @@ def test_print_calculation_times_directly(self): print(str(int(nummer) * int(nummertwee)))""") self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) self.assertEqual("30", run_code(result)) @@ -76,6 +81,7 @@ def test_if_with_indent(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_if_else(self): code = textwrap.dedent("""\ @@ -98,6 +104,7 @@ def test_if_else(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_print_random(self): code = textwrap.dedent("""\ @@ -111,6 +118,7 @@ def test_print_random(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_for_loop(self): code = textwrap.dedent("""\ @@ -128,6 +136,7 @@ def test_for_loop(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_if__else(self): code = textwrap.dedent("""\ @@ -145,6 +154,7 @@ def test_if__else(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_forloop(self): code = textwrap.dedent("""\ @@ -158,6 +168,7 @@ def test_forloop(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_for_nesting(self): code = textwrap.dedent("""\ @@ -171,6 +182,7 @@ def test_for_nesting(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_if_nesting(self): code = textwrap.dedent("""\ @@ -188,6 +200,7 @@ def test_if_nesting(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_newprint(self): code = textwrap.dedent("""\ @@ -203,6 +216,7 @@ def test_newprint(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_list(self): code = textwrap.dedent("""\ @@ -214,6 +228,7 @@ def test_list(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_random(self): code = textwrap.dedent("""\ @@ -227,6 +242,7 @@ def test_random(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_specific_access(self): code = textwrap.dedent("""\ @@ -240,6 +256,7 @@ def test_specific_access(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) # note that print(str(highscore)) will not print as it will compare 'score[i]' as str to a variable def test_everything_combined(self): @@ -264,6 +281,7 @@ def test_everything_combined(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_if_under_else_in_for(self): code = textwrap.dedent("""\ @@ -288,6 +306,7 @@ def test_if_under_else_in_for(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_bool_true(self): code = textwrap.dedent("""\ @@ -301,6 +320,7 @@ def test_bool_true(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_bool_false(self): code = textwrap.dedent("""\ @@ -313,6 +333,7 @@ def test_bool_false(self): print('ja')""") result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_bool_true2(self): code = textwrap.dedent("""\ @@ -326,6 +347,7 @@ def test_bool_true2(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_bool_false2(self): code = textwrap.dedent("""\ @@ -339,6 +361,7 @@ def test_bool_false2(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_bool_total(self): code = textwrap.dedent("""\ @@ -362,6 +385,7 @@ def test_bool_total(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_and(self): code = textwrap.dedent("""\ @@ -373,6 +397,7 @@ def test_and(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_or(self): code = textwrap.dedent("""\ @@ -384,6 +409,7 @@ def test_or(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_comment(self): code = textwrap.dedent("""\ @@ -396,6 +422,7 @@ def test_comment(self): # ['comment']""") result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_commentbegin(self): code = textwrap.dedent("""\ @@ -409,6 +436,7 @@ def test_commentbegin(self): print('hallo')""") result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_commentresult(self): code = textwrap.dedent("""\ @@ -422,6 +450,7 @@ def test_commentresult(self): print('hallo')""") result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_smaller(self): code = textwrap.dedent("""\ @@ -435,6 +464,7 @@ def test_smaller(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_bigger(self): code = textwrap.dedent("""\ @@ -448,6 +478,7 @@ def test_bigger(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_big_and_small(self): code = textwrap.dedent("""\ @@ -465,6 +496,7 @@ def test_big_and_small(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_whileloop(self): code = textwrap.dedent("""\ @@ -482,6 +514,7 @@ def test_whileloop(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_whileloop2(self): code = textwrap.dedent("""\ @@ -501,6 +534,7 @@ def test_whileloop2(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_whileloop3(self): code = textwrap.dedent("""\ @@ -522,6 +556,7 @@ def test_whileloop3(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_access_plus(self): code = textwrap.dedent("""\ @@ -539,6 +574,7 @@ def test_access_plus(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_length(self): code = textwrap.dedent("""\ fruit = ['appel', 'banaan', 'kers'] @@ -551,6 +587,7 @@ def test_length(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_length2(self): code = textwrap.dedent("""\ @@ -564,6 +601,7 @@ def test_length2(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_print_length(self): code = textwrap.dedent("""\ @@ -579,6 +617,7 @@ def test_print_length(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_not_equal_one(self): code = textwrap.dedent("""\ @@ -595,6 +634,7 @@ def test_not_equal_one(self): print('Ik kom ook uit Nederland!')""") result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_not_equal_two(self): code = textwrap.dedent("""\ @@ -611,6 +651,7 @@ def test_not_equal_two(self): print('Fout! Je mocht geen 5 zeggen')""") result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_sum_in_if(self): code = textwrap.dedent("""\ @@ -626,6 +667,7 @@ def test_sum_in_if(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_sum_in_right_side_if(self): code = textwrap.dedent("""\ @@ -641,6 +683,7 @@ def test_sum_in_right_side_if(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_min_in_if(self): code = textwrap.dedent("""\ @@ -656,6 +699,7 @@ def test_min_in_if(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_multiply_in_if(self): code = textwrap.dedent("""\ @@ -671,6 +715,7 @@ def test_multiply_in_if(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) # programs with issues to see if we catch them properly # (so this should fail, for now) # at one point we want a real "Indent" error and a better error message diff --git a/tests/tests_level_22.py b/tests/tests_level_22.py index 179e1cce48e..f181e0c1499 100644 --- a/tests/tests_level_22.py +++ b/tests/tests_level_22.py @@ -31,21 +31,25 @@ def test_print(self): result = hedy.transpile("print('ik heet')", self.level) expected = "print('ik heet')" self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_print_with_var(self): result = hedy.transpile("naam = Hedy\nprint('ik heet' naam)", self.level) expected = "naam = 'Hedy'\nprint('ik heet'+str(naam))" self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_print_with_calc_no_spaces(self): result = hedy.transpile("print('5 keer 5 is ' 5*5)", self.level) expected = "print('5 keer 5 is '+str(int(5) * int(5)))" self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_transpile_ask(self): result = hedy.transpile("antwoord = input('wat is je lievelingskleur?')", self.level) expected = "antwoord = input('wat is je lievelingskleur?')" self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_print_calculation_times_directly(self): code = textwrap.dedent("""\ @@ -61,6 +65,7 @@ def test_print_calculation_times_directly(self): print(str(int(nummer) * int(nummertwee)))""") self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) self.assertEqual("30", run_code(result)) @@ -76,6 +81,7 @@ def test_if_with_indent(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_if_else(self): code = textwrap.dedent("""\ @@ -98,6 +104,7 @@ def test_if_else(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_print_random(self): code = textwrap.dedent("""\ @@ -111,6 +118,7 @@ def test_print_random(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_for_loop(self): code = textwrap.dedent("""\ @@ -128,6 +136,7 @@ def test_for_loop(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_if__else(self): code = textwrap.dedent("""\ @@ -145,6 +154,7 @@ def test_if__else(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_forloop(self): code = textwrap.dedent("""\ @@ -158,6 +168,7 @@ def test_forloop(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_for_nesting(self): code = textwrap.dedent("""\ @@ -171,6 +182,7 @@ def test_for_nesting(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_if_nesting(self): code = textwrap.dedent("""\ @@ -188,6 +200,7 @@ def test_if_nesting(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_newprint(self): code = textwrap.dedent("""\ @@ -203,6 +216,7 @@ def test_newprint(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_list(self): code = textwrap.dedent("""\ @@ -214,6 +228,7 @@ def test_list(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_random(self): code = textwrap.dedent("""\ @@ -227,6 +242,7 @@ def test_random(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_specific_access(self): code = textwrap.dedent("""\ @@ -240,6 +256,7 @@ def test_specific_access(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) # note that print(str(highscore)) will not print as it will compare 'score[i]' as str to a variable def test_everything_combined(self): @@ -264,6 +281,7 @@ def test_everything_combined(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_if_under_else_in_for(self): code = textwrap.dedent("""\ @@ -288,6 +306,7 @@ def test_if_under_else_in_for(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_bool_true(self): code = textwrap.dedent("""\ @@ -301,6 +320,7 @@ def test_bool_true(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_bool_false(self): code = textwrap.dedent("""\ @@ -313,6 +333,7 @@ def test_bool_false(self): print('ja')""") result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_bool_true2(self): code = textwrap.dedent("""\ @@ -326,6 +347,7 @@ def test_bool_true2(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_bool_false2(self): code = textwrap.dedent("""\ @@ -339,6 +361,7 @@ def test_bool_false2(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_bool_total(self): code = textwrap.dedent("""\ @@ -362,6 +385,7 @@ def test_bool_total(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_and(self): code = textwrap.dedent("""\ @@ -373,6 +397,7 @@ def test_and(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_or(self): code = textwrap.dedent("""\ @@ -384,6 +409,7 @@ def test_or(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_comment(self): code = textwrap.dedent("""\ @@ -396,6 +422,7 @@ def test_comment(self): # ['comment']""") result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_commentbegin(self): code = textwrap.dedent("""\ @@ -409,6 +436,7 @@ def test_commentbegin(self): print('hallo')""") result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_commentresult(self): code = textwrap.dedent("""\ @@ -422,6 +450,7 @@ def test_commentresult(self): print('hallo')""") result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_smaller(self): code = textwrap.dedent("""\ @@ -435,6 +464,7 @@ def test_smaller(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_bigger(self): code = textwrap.dedent("""\ @@ -448,6 +478,7 @@ def test_bigger(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_big_and_small(self): code = textwrap.dedent("""\ @@ -465,6 +496,7 @@ def test_big_and_small(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_whileloop(self): code = textwrap.dedent("""\ @@ -482,6 +514,7 @@ def test_whileloop(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_whileloop2(self): code = textwrap.dedent("""\ @@ -501,6 +534,7 @@ def test_whileloop2(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_whileloop3(self): code = textwrap.dedent("""\ @@ -522,6 +556,7 @@ def test_whileloop3(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_access_plus(self): code = textwrap.dedent("""\ @@ -539,6 +574,7 @@ def test_access_plus(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_length(self): code = textwrap.dedent("""\ fruit = ['appel', 'banaan', 'kers'] @@ -551,6 +587,7 @@ def test_length(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_length2(self): code = textwrap.dedent("""\ @@ -564,6 +601,7 @@ def test_length2(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_print_length(self): code = textwrap.dedent("""\ @@ -579,6 +617,7 @@ def test_print_length(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_not_equal_one(self): code = textwrap.dedent("""\ @@ -595,6 +634,7 @@ def test_not_equal_one(self): print('Ik kom ook uit Nederland!')""") result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_not_equal_two(self): code = textwrap.dedent("""\ @@ -611,6 +651,7 @@ def test_not_equal_two(self): print('Fout! Je mocht geen 5 zeggen')""") result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_smaller_equal(self): code = textwrap.dedent("""\ @@ -623,6 +664,7 @@ def test_smaller_equal(self): print('Dan ben je jonger dan ik!')""") result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_bigger_equal(self): code = textwrap.dedent("""\ @@ -635,6 +677,7 @@ def test_bigger_equal(self): print('Dan ben je jonger dan ik!')""") result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_smaller_bigger_equal(self): code = textwrap.dedent("""\ @@ -651,6 +694,7 @@ def test_smaller_bigger_equal(self): print('Dan ben je ouder dan ik!')""") result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_sum_in_if(self): code = textwrap.dedent("""\ @@ -666,6 +710,7 @@ def test_sum_in_if(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_sum_in_right_side_if(self): code = textwrap.dedent("""\ @@ -681,6 +726,7 @@ def test_sum_in_right_side_if(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_min_in_if(self): code = textwrap.dedent("""\ @@ -696,6 +742,7 @@ def test_min_in_if(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) def test_multiply_in_if(self): code = textwrap.dedent("""\ @@ -711,6 +758,7 @@ def test_multiply_in_if(self): result = hedy.transpile(code, self.level) self.assertEqual(expected, result.code) + self.assertEqual(False, result.has_turtle) # programs with issues to see if we catch them properly # (so this should fail, for now) # at one point we want a real "Indent" error and a better error message
vacanza__python-holidays-451
Can't un-pickle a `HolidayBase` Seems that after a holidays class, e.g. `holidays.UnitedStates()` is used once, it can't be un-pickled. For example, this snippet: ```python import holidays import pickle from datetime import datetime # Works: us_holidays = holidays.UnitedStates() us_holidays_ = pickle.loads(pickle.dumps(us_holidays)) b = datetime.fromisoformat("2020-01-01") in us_holidays_ # Fails: us_holidays = holidays.UnitedStates() b = datetime.fromisoformat("2020-01-01") in us_holidays dump = pickle.dumps(us_holidays) pickle.loads(dump) # <- exception ``` Raises the following exception from the last line: ``` ~/.local/share/virtualenvs/sibylla-v2-LxBhzJgn/lib/python3.8/site-packages/holidays/holiday_base.py in __setitem__(self, key, value) 116 117 def __setitem__(self, key, value): --> 118 if key in self: 119 if self.get(key).find(value) < 0 \ 120 and value.find(self.get(key)) < 0: ~/.local/share/virtualenvs/sibylla-v2-LxBhzJgn/lib/python3.8/site-packages/holidays/holiday_base.py in __contains__(self, key) 73 74 def __contains__(self, key): ---> 75 return dict.__contains__(self, self.__keytransform__(key)) 76 77 def __getitem__(self, key): ~/.local/share/virtualenvs/sibylla-v2-LxBhzJgn/lib/python3.8/site-packages/holidays/holiday_base.py in __keytransform__(self, key) 67 raise TypeError("Cannot convert type '%s' to date." % type(key)) 68 ---> 69 if self.expand and key.year not in self.years: 70 self.years.add(key.year) 71 self._populate(key.year) ``` The `expand` attribute is set by `__init__`, but it's not there during deserialization via unpickling. I think it's because the `HolidayBase` inherits from dict and there's some weirdness there - it seems to first populate the dict in the deserialized object and only then sets the attributes from the state. But since `HolidayBase` overrides `__setitem__` and in this override it's using state attributes that weren't yet set on the object, the `expand` attribute is missing. Tested with `holidays=='0.10.4'`.
[ { "content": "# -*- coding: utf-8 -*-\n\n# python-holidays\n# ---------------\n# A fast, efficient Python library for generating country, province and state\n# specific sets of holidays on the fly. It aims to make determining whether a\n# specific date is a holiday as fast and flexible as possible.\n#\n# ...
[ { "content": "# -*- coding: utf-8 -*-\n\n# python-holidays\n# ---------------\n# A fast, efficient Python library for generating country, province and state\n# specific sets of holidays on the fly. It aims to make determining whether a\n# specific date is a holiday as fast and flexible as possible.\n#\n# ...
diff --git a/holidays/holiday_base.py b/holidays/holiday_base.py index 1ca61fccb..a24410150 100644 --- a/holidays/holiday_base.py +++ b/holidays/holiday_base.py @@ -209,6 +209,9 @@ def __radd__(self, other): def _populate(self, year): pass + def __reduce__(self): + return super(HolidayBase, self).__reduce__() + def createHolidaySum(h1, h2): class HolidaySum(HolidayBase): diff --git a/test/test_holiday_base.py b/test/test_holiday_base.py index 9211bf3c4..962616751 100644 --- a/test/test_holiday_base.py +++ b/test/test_holiday_base.py @@ -11,6 +11,7 @@ # Website: https://github.com/dr-prodigy/python-holidays # License: MIT (see LICENSE file) +import pickle import unittest from datetime import date, datetime, timedelta @@ -447,6 +448,16 @@ def test_observed(self): self.holidays.observed = True self.assertIn(date(2018, 7, 2), self.holidays) + def test_serialization(self): + loaded_holidays = pickle.loads(pickle.dumps(self.holidays)) + assert loaded_holidays == self.holidays + + dt = datetime(2020, 1, 1) + res = dt in self.holidays + loaded_holidays = pickle.loads(pickle.dumps(self.holidays)) + assert loaded_holidays == self.holidays + assert (dt in loaded_holidays) == res + class TestKeyTransforms(unittest.TestCase): def setUp(self):
ivy-llc__ivy-28478
Fix Frontend Failing Test: jax - manipulation.paddle.tile
[ { "content": "# global\nimport ivy\nfrom ivy.functional.frontends.paddle.func_wrapper import (\n to_ivy_arrays_and_back,\n)\nfrom ivy.func_wrapper import (\n with_unsupported_dtypes,\n with_supported_dtypes,\n with_supported_device_and_dtypes,\n)\n\n\n@with_unsupported_dtypes({\"2.6.0 and below\": (...
[ { "content": "# global\nimport ivy\nfrom ivy.functional.frontends.paddle.func_wrapper import (\n to_ivy_arrays_and_back,\n)\nfrom ivy.func_wrapper import (\n with_unsupported_dtypes,\n with_supported_dtypes,\n with_supported_device_and_dtypes,\n)\n\n\n@with_unsupported_dtypes({\"2.6.0 and below\": (...
diff --git a/ivy/functional/frontends/paddle/manipulation.py b/ivy/functional/frontends/paddle/manipulation.py index dd7c7e79a28f9..6c2c8d6a90adc 100644 --- a/ivy/functional/frontends/paddle/manipulation.py +++ b/ivy/functional/frontends/paddle/manipulation.py @@ -208,7 +208,7 @@ def take_along_axis(arr, indices, axis): @with_unsupported_dtypes( - {"2.6.0 and below": ("int8", "uint8", "int16", "float16")}, + {"2.6.0 and below": ("int8", "uint8", "int16", "float16", "bfloat16")}, "paddle", ) @to_ivy_arrays_and_back
kubeflow__pipelines-1666
`pip install kfp` does not install CLI **What happened:** ``` $ virtualenv .venv ... $ pip install kfp==0.1.23 ... $ kfp Traceback (most recent call last): File "/private/tmp/.venv/bin/kfp", line 6, in <module> from kfp.__main__ import main File "/private/tmp/.venv/lib/python3.7/site-packages/kfp/__main__.py", line 15, in <module> from .cli.cli import main ModuleNotFoundError: No module named 'kfp.cli' ``` **What did you expect to happen:** To run the CLI. **Anything else you would like to add:** I could be confused about what is expected to be available after installing the kfp package from pip - setup.py mentions an entrypoint named kfp in https://github.com/kubeflow/pipelines/blob/812ca7f8836c47039c3b1f3daf23e68fbcee1a92/sdk/python/setup.py#L74 but main.py imports a `kfp.cli` package https://github.com/kubeflow/pipelines/blob/812ca7f8836c47039c3b1f3daf23e68fbcee1a92/sdk/python/kfp/__main__.py#L15 which is not included in the distribution https://github.com/kubeflow/pipelines/blob/812ca7f8836c47039c3b1f3daf23e68fbcee1a92/sdk/python/setup.py#L46-L54
[ { "content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicab...
[ { "content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicab...
diff --git a/sdk/python/setup.py b/sdk/python/setup.py index ee581414370..dac70636a4e 100644 --- a/sdk/python/setup.py +++ b/sdk/python/setup.py @@ -45,6 +45,7 @@ install_requires=REQUIRES, packages=[ 'kfp', + 'kfp.cli', 'kfp.compiler', 'kfp.components', 'kfp.components.structures',
streamlit__streamlit-5184
It should be : https://github.com/streamlit/streamlit/blob/535f11765817657892506d6904bbbe04908dbdf3/lib/streamlit/elements/alert.py#L145
[ { "content": "# Copyright 2018-2022 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by a...
[ { "content": "# Copyright 2018-2022 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by a...
diff --git a/lib/streamlit/elements/alert.py b/lib/streamlit/elements/alert.py index d9d5f2fe5f82..65458e9162b3 100644 --- a/lib/streamlit/elements/alert.py +++ b/lib/streamlit/elements/alert.py @@ -142,7 +142,7 @@ def success( Example ------- - >>> st.success('This is a success message!', icon:"✅") + >>> st.success('This is a success message!', icon="✅") """ alert_proto = AlertProto()
django-cms__django-cms-1994
make django-admin-style a fixed dependency and add it to the tutorial
[ { "content": "from setuptools import setup, find_packages\nimport os\nimport cms\n\n\nCLASSIFIERS = [\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Web Environment',\n 'Framework :: Django',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n ...
[ { "content": "from setuptools import setup, find_packages\nimport os\nimport cms\n\n\nCLASSIFIERS = [\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Web Environment',\n 'Framework :: Django',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n ...
diff --git a/docs/getting_started/installation.rst b/docs/getting_started/installation.rst index fa131805ebc..6f425a8e2e2 100644 --- a/docs/getting_started/installation.rst +++ b/docs/getting_started/installation.rst @@ -16,6 +16,7 @@ Requirements * `django-mptt`_ 0.5.2 (strict due to API compatibility issues) * `django-sekizai`_ 0.7 or higher * `html5lib`_ 0.90 or higher +* `djangocms-admin-style`_ * `django-i18nurls`_ (if using django 1.3.X) * An installed and working instance of one of the databases listed in the `Databases`_ section. @@ -32,6 +33,7 @@ Requirements .. _django-sekizai: https://github.com/ojii/django-sekizai .. _html5lib: http://code.google.com/p/html5lib/ .. _django-i18nurls: https://github.com/brocaar/django-i18nurls +.. _djangocms-admin-style: https://github.com/divio/djangocms-admin-style Recommended =========== @@ -94,7 +96,7 @@ following is an example requirements.txt file that can be used with pip to insta :: # Bare minimum - django-cms==2.4.1 + django-cms==3.0 #These dependencies are brought in by django-cms, but if you want to lock-in their version, specify them Django==1.5.1 @@ -104,6 +106,7 @@ following is an example requirements.txt file that can be used with pip to insta django-mptt==0.5.2 django-sekizai==0.7 six==1.3.0 + djangocms-admin-style==0.1.2 #Optional, recommended packages Pillow==2.0.0 diff --git a/docs/getting_started/tutorial.rst b/docs/getting_started/tutorial.rst index 59b0d249264..635e4b2d207 100644 --- a/docs/getting_started/tutorial.rst +++ b/docs/getting_started/tutorial.rst @@ -72,6 +72,8 @@ other highly recommended applications/libraries: * ``'menus'``, helper for model independent hierarchical website navigation * ``'south'``, intelligent schema and data migrations * ``'sekizai'``, for javascript and css management +* ``'django_admin_style'``, for the admin skin. You **must** add + ``'django_admin_style'`` in the list before ``'django.contrib.admin'``. Also add any (or all) of the following plugins, depending on your needs: diff --git a/docs/upgrade/3.0.rst b/docs/upgrade/3.0.rst index 66686e7b242..9fbbd71a6a1 100644 --- a/docs/upgrade/3.0.rst +++ b/docs/upgrade/3.0.rst @@ -18,7 +18,8 @@ What's new in 3.0 New Frontend Editing ==================== -django CMS 3.0 introduces a new Frontend Editing system. +django CMS 3.0 introduces a new Frontend Editing system as well as a customizable +django admin skin. In the new system, Placeholders and their Plugins are no longer managed in the Admin site, but only from the Frontend. diff --git a/setup.py b/setup.py index 6ef04cabeb4..caaa0a16a63 100644 --- a/setup.py +++ b/setup.py @@ -37,6 +37,7 @@ 'html5lib', 'django-mptt>=0.5.1,<0.5.3', 'django-sekizai>=0.7', + 'djangocms-admin-style' ], tests_require=[ 'django-reversion>=1.6.6',
ansible__ansible-modules-extras-387
Freshly installed bower raises json error I ran into an issue where the ansible bower module when attempting to run bower install can't parse the json from `bower list --json` Here is the stacktrace ``` failed: [default] => {"failed": true, "parsed": false} BECOME-SUCCESS-bcokpjdhrlrcdlrfpmvdgmahrbmtzoqk Traceback (most recent call last): File "/home/vagrant/.ansible/tmp/ansible-tmp-1427221462.07-279423510478512/bower", line 1781, in <module> main() File "/home/vagrant/.ansible/tmp/ansible-tmp-1427221462.07-279423510478512/bower", line 168, in main installed, missing, outdated = bower.list() File "/home/vagrant/.ansible/tmp/ansible-tmp-1427221462.07-279423510478512/bower", line 116, in list data = json.loads(self._exec(cmd, True, False)) File "/usr/lib/python2.7/json/__init__.py", line 338, in loads return _default_decoder.decode(s) File "/usr/lib/python2.7/json/decoder.py", line 366, in decode obj, end = self.raw_decode(s, idx=_w(s, 0).end()) File "/usr/lib/python2.7/json/decoder.py", line 384, in raw_decode raise ValueError("No JSON object could be decoded") ``` So, when I logged into run the bower list --json command manually I saw this ``` vagrant@vagrant-ubuntu-trusty-64:~/catdoor/opus$ bower list --json [?] May bower anonymously report usage statistics to improve the tool over time? Yes ``` Which makes me wonder if a freshly installed bower will always ask that question, thus not producing json output. When i subsquently run the provision it fails the same way.
[ { "content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# (c) 2014, Michael Warkentin <mwarkentin@gmail.com>\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software F...
[ { "content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# (c) 2014, Michael Warkentin <mwarkentin@gmail.com>\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software F...
diff --git a/packaging/language/bower.py b/packaging/language/bower.py index 3fccf51056b..085f454e639 100644 --- a/packaging/language/bower.py +++ b/packaging/language/bower.py @@ -108,7 +108,7 @@ def _exec(self, args, run_in_check_mode=False, check_rc=True): return '' def list(self): - cmd = ['list', '--json'] + cmd = ['list', '--json', '--config.interactive=false', '--allow-root'] installed = list() missing = list()
cupy__cupy-764
cupy.array(cupy_array, oder=None) raises error When I do this: ```python >>> x = cupy.ones(3) >>> xx = cupy.array(x, order=None) ``` I get this traceback: ``` File "[...]/cupy/cupy/creation/from_data.py", line 41, in array return core.array(obj, dtype, copy, order, subok, ndmin) File "cupy/core/core.pyx", line 2026, in cupy.core.core.array cpdef ndarray array(obj, dtype=None, bint copy=True, str order='K', File "cupy/core/core.pyx", line 2039, in cupy.core.core.array a = src.astype(dtype, order=order, copy=copy) File "cupy/core/core.pyx", line 276, in cupy.core.core.ndarray.astype cpdef ndarray astype( File "cupy/core/core.pyx", line 313, in cupy.core.core.ndarray.astype raise TypeError('order not understood') TypeError: order not understood ```
[ { "content": "# flake8: NOQA\n# \"flake8: NOQA\" to suppress warning \"H104 File contains nothing but comments\"\n\n# class s_(object):\n\nimport numpy\nimport six\n\nimport cupy\nfrom cupy import core\nfrom cupy.creation import from_data\nfrom cupy.manipulation import join\n\n\nclass AxisConcatenator(object):...
[ { "content": "# flake8: NOQA\n# \"flake8: NOQA\" to suppress warning \"H104 File contains nothing but comments\"\n\n# class s_(object):\n\nimport numpy\nimport six\n\nimport cupy\nfrom cupy import core\nfrom cupy.creation import from_data\nfrom cupy.manipulation import join\n\n\nclass AxisConcatenator(object):...
diff --git a/cupy/core/core.pyx b/cupy/core/core.pyx index 940f2ce7d32..a27510307ca 100644 --- a/cupy/core/core.pyx +++ b/cupy/core/core.pyx @@ -97,7 +97,7 @@ cdef class ndarray: self.data = memptr self.base = None - if order == 'C': + if order in ('C', None): self._strides = internal.get_contiguous_strides( self._shape, self.itemsize, is_c_contiguous=True) self._c_contiguous = True @@ -309,6 +309,8 @@ cdef class ndarray: if subok is not None: raise TypeError('subok is not supported yet') + if order is None: + order = 'K' if order not in ['C', 'F', 'A', 'K']: raise TypeError('order not understood') diff --git a/cupy/indexing/generate.py b/cupy/indexing/generate.py index d04f53edbd8..bd4afe02696 100644 --- a/cupy/indexing/generate.py +++ b/cupy/indexing/generate.py @@ -275,7 +275,7 @@ def unravel_index(indices, dims, order='C'): .. seealso:: :func:`numpy.unravel_index` """ - if order == 'C': + if order in ('C', None): dims = reversed(dims) elif order == 'F': pass diff --git a/tests/cupy_tests/core_tests/test_ndarray.py b/tests/cupy_tests/core_tests/test_ndarray.py index d1fc1564e83..b0675ff83c1 100644 --- a/tests/cupy_tests/core_tests/test_ndarray.py +++ b/tests/cupy_tests/core_tests/test_ndarray.py @@ -63,6 +63,13 @@ def test_order(self): self.assertTrue(a.flags.f_contiguous) self.assertTrue(not a.flags.c_contiguous) + def test_order_none(self): + a = core.ndarray(self.shape, order=None) + a_cpu = numpy.ndarray(self.shape, order=None) + self.assertEqual(a.flags.c_contiguous, a_cpu.flags.c_contiguous) + self.assertEqual(a.flags.f_contiguous, a_cpu.flags.f_contiguous) + self.assertTupleEqual(a.strides, a_cpu.strides) + @testing.gpu class TestNdarrayInitRaise(unittest.TestCase): diff --git a/tests/cupy_tests/core_tests/test_ndarray_copy_and_view.py b/tests/cupy_tests/core_tests/test_ndarray_copy_and_view.py index b128b4fb712..ab9a85eb906 100644 --- a/tests/cupy_tests/core_tests/test_ndarray_copy_and_view.py +++ b/tests/cupy_tests/core_tests/test_ndarray_copy_and_view.py @@ -70,7 +70,7 @@ def test_transposed_fill(self, xp, dtype): b.fill(1) return b - @testing.for_orders('CFAK') + @testing.for_orders(['C', 'F', 'A', 'K', None]) @testing.for_all_dtypes(name='src_dtype', no_complex=True) @testing.for_all_dtypes(name='dst_dtype') @testing.numpy_cupy_array_equal()
databricks__koalas-747
[DO NOT MERGE] Test
[ { "content": "#!/usr/bin/env python\n\n#\n# Copyright (C) 2019 Databricks, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-...
[ { "content": "#!/usr/bin/env python\n\n#\n# Copyright (C) 2019 Databricks, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-...
diff --git a/requirements-dev.txt b/requirements-dev.txt index 4be44500ee..5e0c474b0f 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -1,5 +1,5 @@ # Dependencies in Koalas -pandas>=0.23 +pandas>=0.23.2 pyarrow>=0.10 matplotlib>=3.0.0 numpy>=1.14 diff --git a/setup.py b/setup.py index 6b84679c9e..9c7a5547cd 100644 --- a/setup.py +++ b/setup.py @@ -45,7 +45,7 @@ }, python_requires='>=3.5', install_requires=[ - 'pandas>=0.23', + 'pandas>=0.23.2', 'pyarrow>=0.10', 'numpy>=1.14', 'matplotlib>=3.0.0',
ansible-collections__community.aws-1712
Broken example in iam_access_key ### Summary The "Delete the access key" example in the `iam_access_key` module is broken. It's currently: ```yaml - name: Delete the access_key community.aws.iam_access_key: name: example_user access_key_id: AKIA1EXAMPLE1EXAMPLE state: absent ``` There are two issues: - the `name` attribute doesn't exist - it should be `user_name` (or the `username` alias). - the `access_key_id` attribute should just be `id`. The `access_key_id` attribute specifies credentials for the module to use to access the API, not the ID of the access key we're trying to delete (which is specified by `id`). Corrected example: ```yaml - name: Delete the access_key community.aws.iam_access_key: user_name: example_user id: AKIA1EXAMPLE1EXAMPLE state: absent ``` ### Issue Type Documentation Report ### Component Name iam_access_key ### Ansible Version ```console (paste below) ansible [core 2.14.2] config file = None configured module search path = ['/Users/grt006/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /Users/grt006/ws/argocd/.scratch/external_secrets/iam/ansible/.venv/lib/python3.10/site-packages/ansible ansible collection location = /Users/grt006/.ansible/collections:/usr/share/ansible/collections executable location = /Users/grt006/ws/argocd/.scratch/external_secrets/iam/ansible/.venv/bin/ansible python version = 3.10.9 (main, Dec 15 2022, 17:11:09) [Clang 14.0.0 (clang-1400.0.29.202)] (/Users/grt006/ws/argocd/.scratch/external_secrets/iam/ansible/.venv/bin/python) jinja version = 3.1.2 libyaml = True ``` ### Collection Versions ```console (paste below) Collection Version ----------------------------- ------- amazon.aws 5.2.0 ansible.netcommon 4.1.0 ansible.posix 1.5.1 ansible.utils 2.9.0 ansible.windows 1.13.0 arista.eos 6.0.0 awx.awx 21.11.0 azure.azcollection 1.14.0 check_point.mgmt 4.0.0 chocolatey.chocolatey 1.4.0 cisco.aci 2.3.0 cisco.asa 4.0.0 cisco.dnac 6.6.3 cisco.intersight 1.0.23 cisco.ios 4.3.1 cisco.iosxr 4.1.0 cisco.ise 2.5.12 cisco.meraki 2.15.0 cisco.mso 2.2.1 cisco.nso 1.0.3 cisco.nxos 4.0.1 cisco.ucs 1.8.0 cloud.common 2.1.2 cloudscale_ch.cloud 2.2.4 community.aws 5.2.0 community.azure 2.0.0 community.ciscosmb 1.0.5 community.crypto 2.10.0 community.digitalocean 1.23.0 community.dns 2.5.0 community.docker 3.4.0 community.fortios 1.0.0 community.general 6.3.0 community.google 1.0.0 community.grafana 1.5.3 community.hashi_vault 4.1.0 community.hrobot 1.7.0 community.libvirt 1.2.0 community.mongodb 1.4.2 community.mysql 3.5.1 community.network 5.0.0 community.okd 2.2.0 community.postgresql 2.3.2 community.proxysql 1.5.1 community.rabbitmq 1.2.3 community.routeros 2.7.0 community.sap 1.0.0 community.sap_libs 1.4.0 community.skydive 1.0.0 community.sops 1.6.0 community.vmware 3.3.0 community.windows 1.12.0 community.zabbix 1.9.1 containers.podman 1.10.1 cyberark.conjur 1.2.0 cyberark.pas 1.0.17 dellemc.enterprise_sonic 2.0.0 dellemc.openmanage 6.3.0 dellemc.os10 1.1.1 dellemc.os6 1.0.7 dellemc.os9 1.0.4 dellemc.powerflex 1.5.0 dellemc.unity 1.5.0 f5networks.f5_modules 1.22.0 fortinet.fortimanager 2.1.7 fortinet.fortios 2.2.2 frr.frr 2.0.0 gluster.gluster 1.0.2 google.cloud 1.1.2 grafana.grafana 1.1.0 hetzner.hcloud 1.9.1 hpe.nimble 1.1.4 ibm.qradar 2.1.0 ibm.spectrum_virtualize 1.11.0 infinidat.infinibox 1.3.12 infoblox.nios_modules 1.4.1 inspur.ispim 1.2.0 inspur.sm 2.3.0 junipernetworks.junos 4.1.0 kubernetes.core 2.3.2 lowlydba.sqlserver 1.3.1 mellanox.onyx 1.0.0 netapp.aws 21.7.0 netapp.azure 21.10.0 netapp.cloudmanager 21.22.0 netapp.elementsw 21.7.0 netapp.ontap 22.2.0 netapp.storagegrid 21.11.1 netapp.um_info 21.8.0 netapp_eseries.santricity 1.4.0 netbox.netbox 3.10.0 ngine_io.cloudstack 2.3.0 ngine_io.exoscale 1.0.0 ngine_io.vultr 1.1.3 openstack.cloud 1.10.0 openvswitch.openvswitch 2.1.0 ovirt.ovirt 2.4.1 purestorage.flasharray 1.16.2 purestorage.flashblade 1.10.0 purestorage.fusion 1.3.0 sensu.sensu_go 1.13.2 splunk.es 2.1.0 t_systems_mms.icinga_director 1.32.0 theforeman.foreman 3.8.0 vmware.vmware_rest 2.2.0 vultr.cloud 1.7.0 vyos.vyos 4.0.0 wti.remote 1.0.4 ``` ### Configuration ```console (paste below) CONFIG_FILE() = None ``` ### OS / Environment Linux ### Additional Information _No response_ ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct
[ { "content": "#!/usr/bin/python\n# Copyright (c) 2021 Ansible Project\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\nDOCUMENTATION = r'''\n---\nmodule: iam_access_key\nve...
[ { "content": "#!/usr/bin/python\n# Copyright (c) 2021 Ansible Project\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\nDOCUMENTATION = r'''\n---\nmodule: iam_access_key\nve...
diff --git a/changelogs/fragments/iam_access_key_docs_fix.yml b/changelogs/fragments/iam_access_key_docs_fix.yml new file mode 100644 index 00000000000..f47a15eb91f --- /dev/null +++ b/changelogs/fragments/iam_access_key_docs_fix.yml @@ -0,0 +1,2 @@ +trivial: + - iam_access_key - Use correct parameter names in the docs example section (https://github.com/ansible-collections/community.aws/pull/1711). \ No newline at end of file diff --git a/plugins/modules/iam_access_key.py b/plugins/modules/iam_access_key.py index 1d5701e9d74..6e3f47bfd4b 100644 --- a/plugins/modules/iam_access_key.py +++ b/plugins/modules/iam_access_key.py @@ -68,8 +68,8 @@ - name: Delete the access_key community.aws.iam_access_key: - name: example_user - access_key_id: AKIA1EXAMPLE1EXAMPLE + user_name: example_user + id: AKIA1EXAMPLE1EXAMPLE state: absent '''
ansible-collections__community.aws-1713
Broken example in iam_access_key ### Summary The "Delete the access key" example in the `iam_access_key` module is broken. It's currently: ```yaml - name: Delete the access_key community.aws.iam_access_key: name: example_user access_key_id: AKIA1EXAMPLE1EXAMPLE state: absent ``` There are two issues: - the `name` attribute doesn't exist - it should be `user_name` (or the `username` alias). - the `access_key_id` attribute should just be `id`. The `access_key_id` attribute specifies credentials for the module to use to access the API, not the ID of the access key we're trying to delete (which is specified by `id`). Corrected example: ```yaml - name: Delete the access_key community.aws.iam_access_key: user_name: example_user id: AKIA1EXAMPLE1EXAMPLE state: absent ``` ### Issue Type Documentation Report ### Component Name iam_access_key ### Ansible Version ```console (paste below) ansible [core 2.14.2] config file = None configured module search path = ['/Users/grt006/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /Users/grt006/ws/argocd/.scratch/external_secrets/iam/ansible/.venv/lib/python3.10/site-packages/ansible ansible collection location = /Users/grt006/.ansible/collections:/usr/share/ansible/collections executable location = /Users/grt006/ws/argocd/.scratch/external_secrets/iam/ansible/.venv/bin/ansible python version = 3.10.9 (main, Dec 15 2022, 17:11:09) [Clang 14.0.0 (clang-1400.0.29.202)] (/Users/grt006/ws/argocd/.scratch/external_secrets/iam/ansible/.venv/bin/python) jinja version = 3.1.2 libyaml = True ``` ### Collection Versions ```console (paste below) Collection Version ----------------------------- ------- amazon.aws 5.2.0 ansible.netcommon 4.1.0 ansible.posix 1.5.1 ansible.utils 2.9.0 ansible.windows 1.13.0 arista.eos 6.0.0 awx.awx 21.11.0 azure.azcollection 1.14.0 check_point.mgmt 4.0.0 chocolatey.chocolatey 1.4.0 cisco.aci 2.3.0 cisco.asa 4.0.0 cisco.dnac 6.6.3 cisco.intersight 1.0.23 cisco.ios 4.3.1 cisco.iosxr 4.1.0 cisco.ise 2.5.12 cisco.meraki 2.15.0 cisco.mso 2.2.1 cisco.nso 1.0.3 cisco.nxos 4.0.1 cisco.ucs 1.8.0 cloud.common 2.1.2 cloudscale_ch.cloud 2.2.4 community.aws 5.2.0 community.azure 2.0.0 community.ciscosmb 1.0.5 community.crypto 2.10.0 community.digitalocean 1.23.0 community.dns 2.5.0 community.docker 3.4.0 community.fortios 1.0.0 community.general 6.3.0 community.google 1.0.0 community.grafana 1.5.3 community.hashi_vault 4.1.0 community.hrobot 1.7.0 community.libvirt 1.2.0 community.mongodb 1.4.2 community.mysql 3.5.1 community.network 5.0.0 community.okd 2.2.0 community.postgresql 2.3.2 community.proxysql 1.5.1 community.rabbitmq 1.2.3 community.routeros 2.7.0 community.sap 1.0.0 community.sap_libs 1.4.0 community.skydive 1.0.0 community.sops 1.6.0 community.vmware 3.3.0 community.windows 1.12.0 community.zabbix 1.9.1 containers.podman 1.10.1 cyberark.conjur 1.2.0 cyberark.pas 1.0.17 dellemc.enterprise_sonic 2.0.0 dellemc.openmanage 6.3.0 dellemc.os10 1.1.1 dellemc.os6 1.0.7 dellemc.os9 1.0.4 dellemc.powerflex 1.5.0 dellemc.unity 1.5.0 f5networks.f5_modules 1.22.0 fortinet.fortimanager 2.1.7 fortinet.fortios 2.2.2 frr.frr 2.0.0 gluster.gluster 1.0.2 google.cloud 1.1.2 grafana.grafana 1.1.0 hetzner.hcloud 1.9.1 hpe.nimble 1.1.4 ibm.qradar 2.1.0 ibm.spectrum_virtualize 1.11.0 infinidat.infinibox 1.3.12 infoblox.nios_modules 1.4.1 inspur.ispim 1.2.0 inspur.sm 2.3.0 junipernetworks.junos 4.1.0 kubernetes.core 2.3.2 lowlydba.sqlserver 1.3.1 mellanox.onyx 1.0.0 netapp.aws 21.7.0 netapp.azure 21.10.0 netapp.cloudmanager 21.22.0 netapp.elementsw 21.7.0 netapp.ontap 22.2.0 netapp.storagegrid 21.11.1 netapp.um_info 21.8.0 netapp_eseries.santricity 1.4.0 netbox.netbox 3.10.0 ngine_io.cloudstack 2.3.0 ngine_io.exoscale 1.0.0 ngine_io.vultr 1.1.3 openstack.cloud 1.10.0 openvswitch.openvswitch 2.1.0 ovirt.ovirt 2.4.1 purestorage.flasharray 1.16.2 purestorage.flashblade 1.10.0 purestorage.fusion 1.3.0 sensu.sensu_go 1.13.2 splunk.es 2.1.0 t_systems_mms.icinga_director 1.32.0 theforeman.foreman 3.8.0 vmware.vmware_rest 2.2.0 vultr.cloud 1.7.0 vyos.vyos 4.0.0 wti.remote 1.0.4 ``` ### Configuration ```console (paste below) CONFIG_FILE() = None ``` ### OS / Environment Linux ### Additional Information _No response_ ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct
[ { "content": "#!/usr/bin/python\n# Copyright (c) 2021 Ansible Project\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\nDOCUMENTATION = r'''\n---\nmodule: iam_access_key\nve...
[ { "content": "#!/usr/bin/python\n# Copyright (c) 2021 Ansible Project\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\nDOCUMENTATION = r'''\n---\nmodule: iam_access_key\nve...
diff --git a/changelogs/fragments/iam_access_key_docs_fix.yml b/changelogs/fragments/iam_access_key_docs_fix.yml new file mode 100644 index 00000000000..f47a15eb91f --- /dev/null +++ b/changelogs/fragments/iam_access_key_docs_fix.yml @@ -0,0 +1,2 @@ +trivial: + - iam_access_key - Use correct parameter names in the docs example section (https://github.com/ansible-collections/community.aws/pull/1711). \ No newline at end of file diff --git a/plugins/modules/iam_access_key.py b/plugins/modules/iam_access_key.py index 3207741ab94..ad61b5b2ad3 100644 --- a/plugins/modules/iam_access_key.py +++ b/plugins/modules/iam_access_key.py @@ -69,8 +69,8 @@ - name: Delete the access_key community.aws.iam_access_key: - name: example_user - access_key_id: AKIA1EXAMPLE1EXAMPLE + user_name: example_user + id: AKIA1EXAMPLE1EXAMPLE state: absent '''
ansible-collections__community.aws-1711
Broken example in iam_access_key ### Summary The "Delete the access key" example in the `iam_access_key` module is broken. It's currently: ```yaml - name: Delete the access_key community.aws.iam_access_key: name: example_user access_key_id: AKIA1EXAMPLE1EXAMPLE state: absent ``` There are two issues: - the `name` attribute doesn't exist - it should be `user_name` (or the `username` alias). - the `access_key_id` attribute should just be `id`. The `access_key_id` attribute specifies credentials for the module to use to access the API, not the ID of the access key we're trying to delete (which is specified by `id`). Corrected example: ```yaml - name: Delete the access_key community.aws.iam_access_key: user_name: example_user id: AKIA1EXAMPLE1EXAMPLE state: absent ``` ### Issue Type Documentation Report ### Component Name iam_access_key ### Ansible Version ```console (paste below) ansible [core 2.14.2] config file = None configured module search path = ['/Users/grt006/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /Users/grt006/ws/argocd/.scratch/external_secrets/iam/ansible/.venv/lib/python3.10/site-packages/ansible ansible collection location = /Users/grt006/.ansible/collections:/usr/share/ansible/collections executable location = /Users/grt006/ws/argocd/.scratch/external_secrets/iam/ansible/.venv/bin/ansible python version = 3.10.9 (main, Dec 15 2022, 17:11:09) [Clang 14.0.0 (clang-1400.0.29.202)] (/Users/grt006/ws/argocd/.scratch/external_secrets/iam/ansible/.venv/bin/python) jinja version = 3.1.2 libyaml = True ``` ### Collection Versions ```console (paste below) Collection Version ----------------------------- ------- amazon.aws 5.2.0 ansible.netcommon 4.1.0 ansible.posix 1.5.1 ansible.utils 2.9.0 ansible.windows 1.13.0 arista.eos 6.0.0 awx.awx 21.11.0 azure.azcollection 1.14.0 check_point.mgmt 4.0.0 chocolatey.chocolatey 1.4.0 cisco.aci 2.3.0 cisco.asa 4.0.0 cisco.dnac 6.6.3 cisco.intersight 1.0.23 cisco.ios 4.3.1 cisco.iosxr 4.1.0 cisco.ise 2.5.12 cisco.meraki 2.15.0 cisco.mso 2.2.1 cisco.nso 1.0.3 cisco.nxos 4.0.1 cisco.ucs 1.8.0 cloud.common 2.1.2 cloudscale_ch.cloud 2.2.4 community.aws 5.2.0 community.azure 2.0.0 community.ciscosmb 1.0.5 community.crypto 2.10.0 community.digitalocean 1.23.0 community.dns 2.5.0 community.docker 3.4.0 community.fortios 1.0.0 community.general 6.3.0 community.google 1.0.0 community.grafana 1.5.3 community.hashi_vault 4.1.0 community.hrobot 1.7.0 community.libvirt 1.2.0 community.mongodb 1.4.2 community.mysql 3.5.1 community.network 5.0.0 community.okd 2.2.0 community.postgresql 2.3.2 community.proxysql 1.5.1 community.rabbitmq 1.2.3 community.routeros 2.7.0 community.sap 1.0.0 community.sap_libs 1.4.0 community.skydive 1.0.0 community.sops 1.6.0 community.vmware 3.3.0 community.windows 1.12.0 community.zabbix 1.9.1 containers.podman 1.10.1 cyberark.conjur 1.2.0 cyberark.pas 1.0.17 dellemc.enterprise_sonic 2.0.0 dellemc.openmanage 6.3.0 dellemc.os10 1.1.1 dellemc.os6 1.0.7 dellemc.os9 1.0.4 dellemc.powerflex 1.5.0 dellemc.unity 1.5.0 f5networks.f5_modules 1.22.0 fortinet.fortimanager 2.1.7 fortinet.fortios 2.2.2 frr.frr 2.0.0 gluster.gluster 1.0.2 google.cloud 1.1.2 grafana.grafana 1.1.0 hetzner.hcloud 1.9.1 hpe.nimble 1.1.4 ibm.qradar 2.1.0 ibm.spectrum_virtualize 1.11.0 infinidat.infinibox 1.3.12 infoblox.nios_modules 1.4.1 inspur.ispim 1.2.0 inspur.sm 2.3.0 junipernetworks.junos 4.1.0 kubernetes.core 2.3.2 lowlydba.sqlserver 1.3.1 mellanox.onyx 1.0.0 netapp.aws 21.7.0 netapp.azure 21.10.0 netapp.cloudmanager 21.22.0 netapp.elementsw 21.7.0 netapp.ontap 22.2.0 netapp.storagegrid 21.11.1 netapp.um_info 21.8.0 netapp_eseries.santricity 1.4.0 netbox.netbox 3.10.0 ngine_io.cloudstack 2.3.0 ngine_io.exoscale 1.0.0 ngine_io.vultr 1.1.3 openstack.cloud 1.10.0 openvswitch.openvswitch 2.1.0 ovirt.ovirt 2.4.1 purestorage.flasharray 1.16.2 purestorage.flashblade 1.10.0 purestorage.fusion 1.3.0 sensu.sensu_go 1.13.2 splunk.es 2.1.0 t_systems_mms.icinga_director 1.32.0 theforeman.foreman 3.8.0 vmware.vmware_rest 2.2.0 vultr.cloud 1.7.0 vyos.vyos 4.0.0 wti.remote 1.0.4 ``` ### Configuration ```console (paste below) CONFIG_FILE() = None ``` ### OS / Environment Linux ### Additional Information _No response_ ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct
[ { "content": "#!/usr/bin/python\n# Copyright (c) 2021 Ansible Project\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\nDOCUMENTATION = r'''\n---\nmodule: iam_access_key\nve...
[ { "content": "#!/usr/bin/python\n# Copyright (c) 2021 Ansible Project\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\nDOCUMENTATION = r'''\n---\nmodule: iam_access_key\nve...
diff --git a/changelogs/fragments/iam_access_key_docs_fix.yml b/changelogs/fragments/iam_access_key_docs_fix.yml new file mode 100644 index 00000000000..f47a15eb91f --- /dev/null +++ b/changelogs/fragments/iam_access_key_docs_fix.yml @@ -0,0 +1,2 @@ +trivial: + - iam_access_key - Use correct parameter names in the docs example section (https://github.com/ansible-collections/community.aws/pull/1711). \ No newline at end of file diff --git a/plugins/modules/iam_access_key.py b/plugins/modules/iam_access_key.py index ab3e9110604..32220a216e3 100644 --- a/plugins/modules/iam_access_key.py +++ b/plugins/modules/iam_access_key.py @@ -69,8 +69,8 @@ - name: Delete the access_key community.aws.iam_access_key: - name: example_user - access_key_id: AKIA1EXAMPLE1EXAMPLE + user_name: example_user + id: AKIA1EXAMPLE1EXAMPLE state: absent '''
pytorch__vision-1501
Deprecate PILLOW_VERSION torchvision now uses PILLOW_VERSION https://github.com/pytorch/vision/blob/1e857d93c8de081e61695dd43e6f06e3e7c2b0a2/torchvision/transforms/functional.py#L5 However, this constant is deprecated in Pillow 5.2, and soon to be removed completely: https://github.com/python-pillow/Pillow/blob/master/CHANGES.rst#700-unreleased
[ { "content": "from __future__ import division\nimport torch\nimport sys\nimport math\nfrom PIL import Image, ImageOps, ImageEnhance, PILLOW_VERSION\ntry:\n import accimage\nexcept ImportError:\n accimage = None\nimport numpy as np\nimport numbers\nimport collections\nimport warnings\n\nif sys.version_info...
[ { "content": "from __future__ import division\nimport torch\nimport sys\nimport math\nfrom PIL import Image, ImageOps, ImageEnhance, __version__ as PILLOW_VERSION\ntry:\n import accimage\nexcept ImportError:\n accimage = None\nimport numpy as np\nimport numbers\nimport collections\nimport warnings\n\nif s...
diff --git a/torchvision/transforms/functional.py b/torchvision/transforms/functional.py index 6f43d5d263f..a8fdbef86bf 100644 --- a/torchvision/transforms/functional.py +++ b/torchvision/transforms/functional.py @@ -2,7 +2,7 @@ import torch import sys import math -from PIL import Image, ImageOps, ImageEnhance, PILLOW_VERSION +from PIL import Image, ImageOps, ImageEnhance, __version__ as PILLOW_VERSION try: import accimage except ImportError:
bridgecrewio__checkov-1497
checkov fails with junit-xml==1.8 **Describe the bug** checkov fails with junit-xml==1.8 **To Reproduce** Steps to reproduce the behavior: 1. pip3 install junit-xml==1.8 2. checkov -d . 3. See error: ``` Traceback (most recent call last): File "/usr/local/bin/checkov", line 2, in <module> from checkov.main import run File "/opt/rh/rh-python38/root/usr/local/lib/python3.8/site-packages/checkov/main.py", line 12, in <module> from checkov.arm.runner import Runner as arm_runner File "/opt/rh/rh-python38/root/usr/local/lib/python3.8/site-packages/checkov/arm/runner.py", line 7, in <module> from checkov.common.output.report import Report File "/opt/rh/rh-python38/root/usr/local/lib/python3.8/site-packages/checkov/common/output/report.py", line 5, in <module> from junit_xml import TestCase, TestSuite, to_xml_report_string ImportError: cannot import name 'to_xml_report_string' from 'junit_xml' (/opt/rh/rh-python38/root/usr/local/lib/python3.8/site-packages/junit_xml/__init__.py) ``` **Expected behavior** checkov runs fine with junit-xml==1.9 so a reasonable fix would be to pin that version (or greater) in setup.py install_requires. **Screenshots** If applicable, add screenshots to help explain your problem. **Desktop (please complete the following information):** - OS: RHEL 7 - Checkov Version [e.g. 22]: 2.0.350 **Additional context** Add any other context about the problem here (e.g. code snippets).
[ { "content": "#!/usr/bin/env python\nimport logging\nimport os\nfrom importlib import util\nfrom os import path\n\nimport setuptools\nfrom setuptools import setup\n\n# read the contents of your README file\nthis_directory = path.abspath(path.dirname(__file__))\nwith open(path.join(this_directory, \"README.md\")...
[ { "content": "#!/usr/bin/env python\nimport logging\nimport os\nfrom importlib import util\nfrom os import path\n\nimport setuptools\nfrom setuptools import setup\n\n# read the contents of your README file\nthis_directory = path.abspath(path.dirname(__file__))\nwith open(path.join(this_directory, \"README.md\")...
diff --git a/Pipfile b/Pipfile index ef3df7dbb1..c2f19fac07 100644 --- a/Pipfile +++ b/Pipfile @@ -23,7 +23,7 @@ deep_merge = "*" tabulate = "*" colorama="*" termcolor="*" -junit-xml ="*" +junit-xml = ">=1.9" dpath = ">=1.5.0,<2" pyyaml = ">=5.4.1" boto3 = "==1.17.*" diff --git a/Pipfile.lock b/Pipfile.lock index 0ba6bf46d4..4e5d5b33a4 100644 --- a/Pipfile.lock +++ b/Pipfile.lock @@ -1,7 +1,7 @@ { "_meta": { "hash": { - "sha256": "8dded0accadc2382e9bf421a3643aa1a4eb0a7ced54bffdbcb0a8e0e5502f2ac" + "sha256": "59ae28dfc33196758545ef134178198dde9a1bbf23289701f45c74a5aac9efe4" }, "pipfile-spec": 6, "requires": { @@ -183,11 +183,11 @@ }, "importlib-metadata": { "hashes": [ - "sha256:0645585859e9a6689c523927a5032f2ba5919f1f7d0e84bd4533312320de1ff9", - "sha256:51c6635429c77cf1ae634c997ff9e53ca3438b495f10a55ba28594dd69764a8b" + "sha256:7b30a78db2922d78a6f47fb30683156a14f3c6aa5cc23f77cc8967e9ab2d002f", + "sha256:ed5157fef23a4bc4594615a0dd8eba94b2bb36bf2a343fa3d8bb2fa0a62a99d5" ], "index": "pypi", - "version": "==4.6.3" + "version": "==4.6.4" }, "jinja2": { "hashes": [ @@ -233,30 +233,50 @@ "sha256:0446679737af14f45767963a1a9ef7620189912317d095f2d9ffa183a4d25d2b", "sha256:0717a7390a68be14b8c793ba258e075c6f4ca819f15edfc2a3a027c823718567", "sha256:0955295dd5eec6cb6cc2fe1698f4c6d84af2e92de33fbcac4111913cd100a6ff", + "sha256:0d4b31cc67ab36e3392bbf3862cfbadac3db12bdd8b02a2731f509ed5b829724", "sha256:10f82115e21dc0dfec9ab5c0223652f7197feb168c940f3ef61563fc2d6beb74", + "sha256:168cd0a3642de83558a5153c8bd34f175a9a6e7f6dc6384b9655d2697312a646", "sha256:1d609f577dc6e1aa17d746f8bd3c31aa4d258f4070d61b2aa5c4166c1539de35", + "sha256:1f2ade76b9903f39aa442b4aadd2177decb66525062db244b35d71d0ee8599b6", + "sha256:2a7d351cbd8cfeb19ca00de495e224dea7e7d919659c2841bbb7f420ad03e2d6", + "sha256:2d7d807855b419fc2ed3e631034685db6079889a1f01d5d9dac950f764da3dad", "sha256:2ef54abee730b502252bcdf31b10dacb0a416229b72c18b19e24a4509f273d26", + "sha256:36bc903cbb393720fad60fc28c10de6acf10dc6cc883f3e24ee4012371399a38", + "sha256:37205cac2a79194e3750b0af2a5720d95f786a55ce7df90c3af697bfa100eaac", "sha256:3c112550557578c26af18a1ccc9e090bfe03832ae994343cfdacd287db6a6ae7", + "sha256:3dd007d54ee88b46be476e293f48c85048603f5f516008bee124ddd891398ed6", "sha256:47ab1e7b91c098ab893b828deafa1203de86d0bc6ab587b160f78fe6c4011f75", "sha256:49e3ceeabbfb9d66c3aef5af3a60cc43b85c33df25ce03d0031a608b0a8b2e3f", "sha256:4efca8f86c54b22348a5467704e3fec767b2db12fc39c6d963168ab1d3fc9135", "sha256:53edb4da6925ad13c07b6d26c2a852bd81e364f95301c66e930ab2aef5b5ddd8", + "sha256:5855f8438a7d1d458206a2466bf82b0f104a3724bf96a1c781ab731e4201731a", "sha256:594c67807fb16238b30c44bdf74f36c02cdf22d1c8cda91ef8a0ed8dabf5620a", + "sha256:5bb28c636d87e840583ee3adeb78172efc47c8b26127267f54a9c0ec251d41a9", + "sha256:60bf42e36abfaf9aff1f50f52644b336d4f0a3fd6d8a60ca0d054ac9f713a864", "sha256:611d1ad9a4288cf3e3c16014564df047fe08410e628f89805e475368bd304914", "sha256:6557b31b5e2c9ddf0de32a691f2312a32f77cd7681d8af66c2692efdbef84c18", "sha256:693ce3f9e70a6cf7d2fb9e6c9d8b204b6b39897a2c4a1aa65728d5ac97dcc1d8", "sha256:6a7fae0dd14cf60ad5ff42baa2e95727c3d81ded453457771d02b7d2b3f9c0c2", "sha256:6c4ca60fa24e85fe25b912b01e62cb969d69a23a5d5867682dd3e80b5b02581d", + "sha256:6fcf051089389abe060c9cd7caa212c707e58153afa2c649f00346ce6d260f1b", "sha256:7d91275b0245b1da4d4cfa07e0faedd5b0812efc15b702576d103293e252af1b", "sha256:905fec760bd2fa1388bb5b489ee8ee5f7291d692638ea5f67982d968366bef9f", "sha256:97383d78eb34da7e1fa37dd273c20ad4320929af65d156e35a5e2d89566d9dfb", "sha256:984d76483eb32f1bcb536dc27e4ad56bba4baa70be32fa87152832cdd9db0833", + "sha256:99df47edb6bda1249d3e80fdabb1dab8c08ef3975f69aed437cb69d0a5de1e28", "sha256:a30e67a65b53ea0a5e62fe23682cfe22712e01f453b95233b25502f7c61cb415", "sha256:ab3ef638ace319fa26553db0624c4699e31a28bb2a835c5faca8f8acf6a5a902", + "sha256:add36cb2dbb8b736611303cd3bfcee00afd96471b09cda130da3581cbdc56a6d", "sha256:b2f4bf27480f5e5e8ce285a8c8fd176c0b03e93dcc6646477d4630e83440c6a9", "sha256:b7f2d075102dc8c794cbde1947378051c4e5180d52d276987b8d28a3bd58c17d", + "sha256:baa1a4e8f868845af802979fcdbf0bb11f94f1cb7ced4c4b8a351bb60d108145", "sha256:be98f628055368795d818ebf93da628541e10b75b41c559fdf36d104c5787066", + "sha256:bf5d821ffabf0ef3533c39c518f3357b171a1651c1ff6827325e4489b0e46c3c", + "sha256:c47adbc92fc1bb2b3274c4b3a43ae0e4573d9fbff4f54cd484555edbf030baf1", "sha256:d7f9850398e85aba693bb640262d3611788b1f29a79f0c93c565694658f4071f", + "sha256:d8446c54dc28c01e5a2dbac5a25f071f6653e6e40f3a8818e8b45d790fe6ef53", + "sha256:e0f138900af21926a02425cf736db95be9f4af72ba1bb21453432a07f6082134", + "sha256:e9936f0b261d4df76ad22f8fee3ae83b60d7c3e871292cd42f40b81b70afae85", "sha256:f5653a225f31e113b152e56f154ccbe59eeb1c7487b39b9d9f9cdb58e6c79dc5", "sha256:f826e31d18b516f653fe296d967d700fddad5901ae07c622bb3705955e1faa94", "sha256:f8ba0e8349a38d3001fae7eadded3f6606f0da5d748ee53cc1dab1d6527b9509", @@ -420,11 +440,11 @@ }, "tqdm": { "hashes": [ - "sha256:3642d483b558eec80d3c831e23953582c34d7e4540db86d9e5ed9dad238dabc6", - "sha256:706dea48ee05ba16e936ee91cb3791cd2ea6da348a0e50b46863ff4363ff4340" + "sha256:07856e19a1fe4d2d9621b539d3f072fa88c9c1ef1f3b7dd4d4953383134c3164", + "sha256:35540feeaca9ac40c304e916729e6b78045cbbeccd3e941b2868f09306798ac9" ], "index": "pypi", - "version": "==4.62.0" + "version": "==4.62.1" }, "typing-extensions": { "hashes": [ @@ -453,11 +473,11 @@ }, "websocket-client": { "hashes": [ - "sha256:4cf754af7e3b3ba76589d49f9e09fd9a6c0aae9b799a89124d656009c01a261d", - "sha256:8d07f155f8ed14ae3ced97bd7582b08f280bb1bfd27945f023ba2aceff05ab52" + "sha256:0133d2f784858e59959ce82ddac316634229da55b498aac311f1620567a710ec", + "sha256:8dfb715d8a992f5712fff8c843adae94e22b22a99b2c5e6b0ec4a1a981cc4e0d" ], "markers": "python_version >= '3.6'", - "version": "==1.1.1" + "version": "==1.2.1" }, "zipp": { "hashes": [ diff --git a/setup.py b/setup.py index d9f0cd62a1..991fef5940 100644 --- a/setup.py +++ b/setup.py @@ -39,7 +39,7 @@ "tabulate", "colorama", "termcolor", - "junit-xml", + "junit-xml>=1.9", "dpath>=1.5.0,<2", "pyyaml>=5.4.1", "boto3==1.17.*",
dbt-labs__dbt-core-5507
[CT-876] Could we also now remove our upper bound on `MarkupSafe`, which we put in place earlier this year due to incompatibility with Jinja2? Remove our upper bound on `MarkupSafe`, which we put in place earlier this year due to incompatibility with Jinja2(#4745). Also bump minimum requirement to match [Jinja2's requirements](https://github.com/pallets/jinja/blob/1c4066a4fad5aaeb2ac55809d1d38477cd23a0f6/setup.py#L6).
[ { "content": "#!/usr/bin/env python\nimport os\nimport sys\n\nif sys.version_info < (3, 7, 2):\n print(\"Error: dbt does not support this version of Python.\")\n print(\"Please upgrade to Python 3.7.2 or higher.\")\n sys.exit(1)\n\n\nfrom setuptools import setup\n\ntry:\n from setuptools import find...
[ { "content": "#!/usr/bin/env python\nimport os\nimport sys\n\nif sys.version_info < (3, 7, 2):\n print(\"Error: dbt does not support this version of Python.\")\n print(\"Please upgrade to Python 3.7.2 or higher.\")\n sys.exit(1)\n\n\nfrom setuptools import setup\n\ntry:\n from setuptools import find...
diff --git a/.changes/unreleased/Dependencies-20220721-093233.yaml b/.changes/unreleased/Dependencies-20220721-093233.yaml new file mode 100644 index 00000000000..f5c623e9581 --- /dev/null +++ b/.changes/unreleased/Dependencies-20220721-093233.yaml @@ -0,0 +1,7 @@ +kind: Dependencies +body: Remove pin for MarkUpSafe from >=0.23,<2.1 +time: 2022-07-21T09:32:33.494002-05:00 +custom: + Author: emmyoop + Issue: "5506" + PR: "5507" diff --git a/core/setup.py b/core/setup.py index 2aa2340f10c..d8b415e1b0a 100644 --- a/core/setup.py +++ b/core/setup.py @@ -49,7 +49,6 @@ }, install_requires=[ "Jinja2==3.1.2", - "MarkupSafe>=0.23,<2.1", "agate>=1.6,<1.6.4", "click>=7.0,<9", "colorama>=0.3.9,<0.4.6",
microsoft__Qcodes-867
missing dependency`jsonschema` in requirements.txt The latest pip installable version of QCoDeS does not list jsonschema as a dependency but requires it. This problem came to light when running tests on a project that depeneds on QCoDeS. Part of my build script installs qcodes (pip install qcodes). Importing qcodes then raises an exception because jsonschema is missing.
[ { "content": "from setuptools import setup, find_packages\nfrom distutils.version import StrictVersion\nfrom importlib import import_module\nimport re\n\ndef get_version(verbose=1):\n \"\"\" Extract version information from source code \"\"\"\n\n try:\n with open('qcodes/version.py', 'r') as f:\n ...
[ { "content": "from setuptools import setup, find_packages\nfrom distutils.version import StrictVersion\nfrom importlib import import_module\nimport re\n\ndef get_version(verbose=1):\n \"\"\" Extract version information from source code \"\"\"\n\n try:\n with open('qcodes/version.py', 'r') as f:\n ...
diff --git a/docs_requirements.txt b/docs_requirements.txt index a8abb189efe..e9b853631b8 100644 --- a/docs_requirements.txt +++ b/docs_requirements.txt @@ -1,6 +1,5 @@ sphinx sphinx_rtd_theme -jsonschema sphinxcontrib-jsonschema nbconvert ipython diff --git a/requirements.txt b/requirements.txt index 32d5add6fb3..77f0dd3edbc 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ -numpy==1.13.1 +numpy==1.13.3 matplotlib==2.0.2 pyqtgraph==0.10.0 PyVISA==1.8 @@ -6,3 +6,4 @@ PyQt5==5.9 sip==4.19.3 QtPy==1.3.1 h5py==2.7.1 +jsonschema diff --git a/setup.py b/setup.py index d8ca9bf8632..2dcd4cef701 100644 --- a/setup.py +++ b/setup.py @@ -61,7 +61,8 @@ def readme(): 'numpy>=1.10', 'pyvisa>=1.8', 'h5py>=2.6', - 'websockets>=3.2,<3.4' + 'websockets>=3.2,<3.4', + 'jsonschema' ], test_suite='qcodes.tests',
cupy__cupy-1944
incorrect FFT results for Fortran-order arrays? * Conditions (from `python -c 'import cupy; cupy.show_config()'`) Tested in two environments with different CuPy versions: ```bash CuPy Version : 4.4.1 CUDA Root : /usr/local/cuda CUDA Build Version : 9010 CUDA Driver Version : 9010 CUDA Runtime Version : 9010 cuDNN Build Version : 7102 cuDNN Version : 7102 NCCL Build Version : 2115 ``` and (this CuPy is built from the latest master branch) ``` CuPy Version : 6.0.0b1 CUDA Root : /usr/local/cuda CUDA Build Version : 9010 CUDA Driver Version : 9010 CUDA Runtime Version : 9010 cuDNN Build Version : None cuDNN Version : None NCCL Build Version : None ``` * Code to reproduce ```python import numpy as np import cupy as cp AXES=[(0,), (1,), (2,), (0,1), (1,2), (0,2), (0,1,2)] a_np = np.random.random((3,4,5))+1j*np.random.random((3,4,5)) print("In C order:") a_np = np.ascontiguousarray(a_np) a_cp = cp.asarray(a_np) a_cp = cp.ascontiguousarray(a_cp) assert np.allclose(cp.asnumpy(a_cp), a_np) for axes in AXES: result_np = np.fft.fftn(a_np, axes=axes) result_cp = cp.fft.fftn(a_cp, axes=axes) print(axes, ":", np.allclose(cp.asnumpy(result_cp), result_np)) print("\nIn F order:") a_np = np.asfortranarray(a_np) a_cp = cp.asarray(a_np) a_cp = cp.asfortranarray(a_cp) assert np.allclose(cp.asnumpy(a_cp), a_np) for axes in AXES: result_np = np.fft.fftn(a_np, axes=axes) result_cp = cp.fft.fftn(a_cp, axes=axes) print(axes, ":", np.allclose(cp.asnumpy(result_cp), result_np)) ``` * Error messages, stack traces, or logs The outputs from both environments are identical: ```bash In C order: (0,) : True (1,) : True (2,) : True (0, 1) : True (1, 2) : True (0, 2) : True (0, 1, 2) : True In F order: (0,) : False (1,) : True (2,) : False (0, 1) : True (1, 2) : False (0, 2) : False (0, 1, 2) : True ``` But it's expected to be `True` for all of the axes choices. It seems to me the bug is not introduced by the recent changes in adding support for cuFFT plans (#1669, #1745, #1746) but by something much older. For now I have not yet tracked down the problem, will update here if I find it. I hope I didn't do something stupid in the test... Thanks.
[ { "content": "from copy import copy\n\nimport six\n\nimport numpy as np\n\nimport cupy\nfrom cupy.cuda import cufft\nfrom math import sqrt\nfrom cupy.fft import config\n\n\ndef _output_dtype(a, value_type):\n if value_type != 'R2C':\n if a.dtype in [np.float16, np.float32]:\n return np.comp...
[ { "content": "from copy import copy\n\nimport six\n\nimport numpy as np\n\nimport cupy\nfrom cupy.cuda import cufft\nfrom math import sqrt\nfrom cupy.fft import config\n\n\ndef _output_dtype(a, value_type):\n if value_type != 'R2C':\n if a.dtype in [np.float16, np.float32]:\n return np.comp...
diff --git a/cupy/fft/fft.py b/cupy/fft/fft.py index e72a4906f5c..cee323e6c2f 100644 --- a/cupy/fft/fft.py +++ b/cupy/fft/fft.py @@ -76,7 +76,7 @@ def _exec_fft(a, direction, value_type, norm, axis, overwrite_x, if axis % a.ndim != a.ndim - 1: a = a.swapaxes(axis, -1) - if a.base is not None: + if a.base is not None or not a.flags.c_contiguous: a = a.copy() if out_size is None: diff --git a/tests/cupy_tests/fft_tests/test_fft.py b/tests/cupy_tests/fft_tests/test_fft.py index a93de2d2679..1ad15a6514d 100644 --- a/tests/cupy_tests/fft_tests/test_fft.py +++ b/tests/cupy_tests/fft_tests/test_fft.py @@ -84,6 +84,45 @@ def test_ifft(self, xp, dtype): return out +@testing.parameterize(*testing.product({ + 'shape': [(10, 10), (10, 5, 10)], + 'data_order': ['F', 'C'], + 'axis': [0, 1, -1], +})) +@testing.gpu +@testing.with_requires('numpy>=1.10.0') +class TestFftOrder(unittest.TestCase): + + @testing.for_all_dtypes() + @testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError, + contiguous_check=False) + def test_fft(self, xp, dtype): + a = testing.shaped_random(self.shape, xp, dtype) + if self.data_order == 'F': + a = xp.asfortranarray(a) + out = xp.fft.fft(a, axis=self.axis) + + # np.fft.fft alway returns np.complex128 + if xp == np and dtype in [np.float16, np.float32, np.complex64]: + out = out.astype(np.complex64) + + return out + + @testing.for_all_dtypes() + @testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError, + contiguous_check=False) + def test_ifft(self, xp, dtype): + a = testing.shaped_random(self.shape, xp, dtype) + if self.data_order == 'F': + a = xp.asfortranarray(a) + out = xp.fft.ifft(a, axis=self.axis) + + if xp == np and dtype in [np.float16, np.float32, np.complex64]: + out = out.astype(np.complex64) + + return out + + @testing.gpu class TestDefaultPlanType(unittest.TestCase):
privacyidea__privacyidea-1746
Fix typo in registration token The example of the registration token contains a typo. The toketype of course is a "registration" token, not a "register".
[ { "content": "# -*- coding: utf-8 -*-\n#\n# privacyIDEA\n# Aug 12, 2014 Cornelius Kölbel\n# License: AGPLv3\n# contact: http://www.privacyidea.org\n#\n# 2015-01-29 Adapt during migration to flask\n# Cornelius Kölbel <cornelius@privacyidea.org>\n#\n# This code is free software; you can redistr...
[ { "content": "# -*- coding: utf-8 -*-\n#\n# privacyIDEA\n# Aug 12, 2014 Cornelius Kölbel\n# License: AGPLv3\n# contact: http://www.privacyidea.org\n#\n# 2015-01-29 Adapt during migration to flask\n# Cornelius Kölbel <cornelius@privacyidea.org>\n#\n# This code is free software; you can redistr...
diff --git a/privacyidea/lib/tokens/registrationtoken.py b/privacyidea/lib/tokens/registrationtoken.py index 54beeb5ed4..8aa4df8c8f 100644 --- a/privacyidea/lib/tokens/registrationtoken.py +++ b/privacyidea/lib/tokens/registrationtoken.py @@ -64,7 +64,7 @@ class RegistrationTokenClass(PasswordTokenClass): Host: example.com Accept: application/json - type=register + type=registration user=cornelius realm=realm1
getpelican__pelican-1507
abbr support doesn't work for multiline Eg: ``` rst this is an :abbr:`TLA (Three Letter Abbreviation)` ``` will output `<abbr>TLA (Three Letter Abbreviation)</abbr>` instead of `<abbr title="Three Letter Abbreviation">TLA</abbr>` I believe this could be fixed by adding the `re.M` flag to the `re.compile` call on this line: https://github.com/getpelican/pelican/blob/636fd6cc380f2537924532a587c70e96a386e25c/pelican/rstdirectives.py#L101 This refs ticket #395
[ { "content": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals, print_function\n\nfrom docutils import nodes, utils\nfrom docutils.parsers.rst import directives, roles, Directive\nfrom pygments.formatters import HtmlFormatter\nfrom pygments import highlight\nfrom pygments.lexers import get_lexer...
[ { "content": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals, print_function\n\nfrom docutils import nodes, utils\nfrom docutils.parsers.rst import directives, roles, Directive\nfrom pygments.formatters import HtmlFormatter\nfrom pygments import highlight\nfrom pygments.lexers import get_lexer...
diff --git a/pelican/rstdirectives.py b/pelican/rstdirectives.py index 1bf6971ca..1c25cc42a 100644 --- a/pelican/rstdirectives.py +++ b/pelican/rstdirectives.py @@ -70,7 +70,7 @@ def run(self): directives.register_directive('sourcecode', Pygments) -_abbr_re = re.compile('\((.*)\)$') +_abbr_re = re.compile('\((.*)\)$', re.DOTALL) class abbreviation(nodes.Inline, nodes.TextElement): diff --git a/pelican/tests/test_rstdirectives.py b/pelican/tests/test_rstdirectives.py new file mode 100644 index 000000000..ae863b309 --- /dev/null +++ b/pelican/tests/test_rstdirectives.py @@ -0,0 +1,32 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals, print_function + +from mock import Mock +from pelican.tests.support import unittest + +class Test_abbr_role(unittest.TestCase): + def call_it(self, text): + from pelican.rstdirectives import abbr_role + rawtext = text + lineno = 42 + inliner = Mock(name='inliner') + nodes, system_messages = abbr_role( + 'abbr', rawtext, text, lineno, inliner) + self.assertEqual(system_messages, []) + self.assertEqual(len(nodes), 1) + return nodes[0] + + def test(self): + node = self.call_it("Abbr (Abbreviation)") + self.assertEqual(node.astext(), "Abbr") + self.assertEqual(node['explanation'], "Abbreviation") + + def test_newlines_in_explanation(self): + node = self.call_it("CUL (See you\nlater)") + self.assertEqual(node.astext(), "CUL") + self.assertEqual(node['explanation'], "See you\nlater") + + def test_newlines_in_abbr(self): + node = self.call_it("US of\nA \n (USA)") + self.assertEqual(node.astext(), "US of\nA") + self.assertEqual(node['explanation'], "USA")
awslabs__gluonts-644
Index of forecast is wrong in multivariate Time Series ## Description When forecasting multivariate Time Series the index has the length of the target dimension instead of the prediction length ## To Reproduce ```python import numpy as np from gluonts.dataset.common import ListDataset from gluonts.distribution import MultivariateGaussianOutput from gluonts.model.deepar import DeepAREstimator from gluonts.trainer import Trainer from gluonts.evaluation.backtest import make_evaluation_predictions train_dataset = ListDataset( data_iter=[ { "start": "2019-01-01 00:00:00", "target": np.ones(shape=(4, 4)), }, ], freq="W", one_dim_target=False, ) test_dataset = ListDataset( data_iter=[ { "start": "2019-01-01 00:00:00", "target": np.ones(shape=(4, 5)), }, ], freq="W", one_dim_target=False, ) estimator = DeepAREstimator( 'W', prediction_length=1, trainer=Trainer(epochs=3, hybridize=False), distr_output=MultivariateGaussianOutput(dim=4), ) predictor = estimator.train(train_dataset) forecast_it, ts_it = make_evaluation_predictions(dataset=test_dataset, predictor=predictor, num_samples=10) forecast_list = list(forecast_it) ts_list = list(ts_it) ``` ## Error Message DatetimeIndex(['2019-02-03', '2019-02-10', '2019-02-17', '2019-02-24'], dtype='datetime64[ns]', freq='W-SUN') While it should only be ['2019-02-03'] ## Environment - Operating system: Amazon Linux - Python version: 3.6 - GluonTS version: a96a0cc4 internal
[ { "content": "# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\").\n# You may not use this file except in compliance with the License.\n# A copy of the License is located at\n#\n# http://www.apache.org/licenses/LICE...
[ { "content": "# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\").\n# You may not use this file except in compliance with the License.\n# A copy of the License is located at\n#\n# http://www.apache.org/licenses/LICE...
diff --git a/src/gluonts/model/forecast.py b/src/gluonts/model/forecast.py index 19ffc7abc1..6ad91fe9da 100644 --- a/src/gluonts/model/forecast.py +++ b/src/gluonts/model/forecast.py @@ -373,7 +373,7 @@ def prediction_length(self): """ Time length of the forecast. """ - return self.samples.shape[-1] + return self.samples.shape[1] @property def mean(self) -> np.ndarray: diff --git a/test/model/test_forecast.py b/test/model/test_forecast.py index d05c8f231b..252f550e3b 100644 --- a/test/model/test_forecast.py +++ b/test/model/test_forecast.py @@ -94,3 +94,38 @@ def percentile(value): assert forecast.prediction_length == pred_length assert len(forecast.index) == pred_length assert forecast.index[0] == pd.Timestamp(START_DATE) + + +@pytest.mark.parametrize( + "forecast, exp_index", + [ + ( + SampleForecast( + samples=np.random.normal(size=(100, 7, 3)), + start_date=pd.Timestamp("2020-01-01 00:00:00"), + freq="1D", + ), + pd.date_range( + start=pd.Timestamp("2020-01-01 00:00:00"), + freq="1D", + periods=7, + ), + ), + ( + DistributionForecast( + Uniform( + low=mx.nd.zeros(shape=(5, 2)), + high=mx.nd.ones(shape=(5, 2)), + ), + start_date=pd.Timestamp("2020-01-01 00:00:00"), + freq="W", + ), + pd.date_range( + start=pd.Timestamp("2020-01-01 00:00:00"), freq="W", periods=5, + ), + ), + ], +) +def test_forecast_multivariate(forecast, exp_index): + assert forecast.prediction_length == len(exp_index) + assert np.all(forecast.index == exp_index)
python__peps-3263
Infra: Check Sphinx warnings on CI This is similar to what we have in the CPython repo, most recently: https://github.com/python/cpython/pull/106460, and will help us gradually remove Sphinx warnings, and avoid new ones being introduces. It checks three things: 1. If a file previously had no warnings (not listed in `.nitignore`), and new ones are introduced, it fails * -> To prevent regressions 2. If a file previously had warnings (it's lsited in `.nitignore`), but now has none, it fails and tells us to remove it from `.nitignore` * To help us incrementally improve over time 3. If a file previously had warnings (it's listed in `.nitignore`), and still has warnings, it doesn't fail, but it will annotate the PR to show the warning * To make them more visible, and give us the opportunity to fix them I've intentionally kept the code and layout as close as possible to the CPython version (see https://github.com/python/cpython/tree/main/Doc/tools) for easier future maintenance. <!-- readthedocs-preview pep-previews start --> ---- :books: Documentation preview :books:: https://pep-previews--3213.org.readthedocs.build/ <!-- readthedocs-preview pep-previews end -->
[ { "content": "# This file is placed in the public domain or under the\n# CC0-1.0-Universal license, whichever is more permissive.\n\n\"\"\"Configuration for building PEPs using Sphinx.\"\"\"\n\nfrom pathlib import Path\nimport sys\n\nsys.path.append(str(Path(\".\").absolute()))\n\n# -- Project information -----...
[ { "content": "# This file is placed in the public domain or under the\n# CC0-1.0-Universal license, whichever is more permissive.\n\n\"\"\"Configuration for building PEPs using Sphinx.\"\"\"\n\nfrom pathlib import Path\nimport sys\n\nsys.path.append(str(Path(\".\").absolute()))\n\n# -- Project information -----...
diff --git a/conf.py b/conf.py index 8e2ae485f06..95a1debd451 100644 --- a/conf.py +++ b/conf.py @@ -45,6 +45,9 @@ "pep-0012/pep-NNNN.rst", ] +# Warn on missing references +nitpicky = True + # Intersphinx configuration intersphinx_mapping = { 'python': ('https://docs.python.org/3/', None),
azavea__raster-vision-1557
Query is invisible in interactive docs search ## 🐛 Bug When I search for something in the docs using the new interactive search bar it seems to work except the query is not visible in the search box. Instead a bunch of dots appear. This was in Chrome Version 107.0.5304.110 (Official Build) (arm64) with the extension turned off. ![Screen Shot 2022-11-14 at 12 40 05 PM (2)](https://user-images.githubusercontent.com/1896461/201729357-53cc9725-0327-413c-91da-979dac0ea7e4.png) Query is invisible in interactive docs search ## 🐛 Bug When I search for something in the docs using the new interactive search bar it seems to work except the query is not visible in the search box. Instead a bunch of dots appear. This was in Chrome Version 107.0.5304.110 (Official Build) (arm64) with the extension turned off. ![Screen Shot 2022-11-14 at 12 40 05 PM (2)](https://user-images.githubusercontent.com/1896461/201729357-53cc9725-0327-413c-91da-979dac0ea7e4.png) Query is invisible in interactive docs search ## 🐛 Bug When I search for something in the docs using the new interactive search bar it seems to work except the query is not visible in the search box. Instead a bunch of dots appear. This was in Chrome Version 107.0.5304.110 (Official Build) (arm64) with the extension turned off. ![Screen Shot 2022-11-14 at 12 40 05 PM (2)](https://user-images.githubusercontent.com/1896461/201729357-53cc9725-0327-413c-91da-979dac0ea7e4.png)
[ { "content": "# -*- coding: utf-8 -*-\n#\n# Configuration file for the Sphinx documentation builder.\n#\n# This file does only contain a selection of the most common options. For a\n# full list see the documentation:\n# http://www.sphinx-doc.org/en/stable/config\n\nfrom typing import TYPE_CHECKING, List\nimport...
[ { "content": "# -*- coding: utf-8 -*-\n#\n# Configuration file for the Sphinx documentation builder.\n#\n# This file does only contain a selection of the most common options. For a\n# full list see the documentation:\n# http://www.sphinx-doc.org/en/stable/config\n\nfrom typing import TYPE_CHECKING, List\nimport...
diff --git a/.readthedocs.yml b/.readthedocs.yml index a0bdb3900..e99691356 100644 --- a/.readthedocs.yml +++ b/.readthedocs.yml @@ -45,4 +45,4 @@ python: search: ranking: # down-rank source code pages - '*/_modules/*': -10 + _modules/*: -10 diff --git a/docs/conf.py b/docs/conf.py index 28bf2a200..5fe593053 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -107,8 +107,6 @@ def setup(app: 'Sphinx') -> None: 'sphinx_gallery.load_style', # add a copy button to code blocks 'sphinx_copybutton', - # search-as-you-type - 'sphinx_search.extension', ] ######################### diff --git a/docs/requirements.txt b/docs/requirements.txt index 3508e6c1d..983790dac 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -4,7 +4,6 @@ myst-parser==0.18.1 autodoc-pydantic==1.8.0 nbsphinx==0.8.9 sphinx-copybutton==0.5.* -readthedocs-sphinx-search==0.1.* # update when this is resolved: https://github.com/spatialaudio/nbsphinx/issues/655 sphinx-gallery>=0.10,<0.11
scrapy__scrapy-5880
_sent_failed cut the errback chain in MailSender `MailSender._sent_failed` return `None`, instead of `failure`. This cut the errback call chain, making impossible to detect in the code fail in the mails in client code.
[ { "content": "\"\"\"\nMail sending helpers\n\nSee documentation in docs/topics/email.rst\n\"\"\"\nimport logging\nfrom email import encoders as Encoders\nfrom email.mime.base import MIMEBase\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.nonmultipart import MIMENonMultipart\nfrom email.mime.te...
[ { "content": "\"\"\"\nMail sending helpers\n\nSee documentation in docs/topics/email.rst\n\"\"\"\nimport logging\nfrom email import encoders as Encoders\nfrom email.mime.base import MIMEBase\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.nonmultipart import MIMENonMultipart\nfrom email.mime.te...
diff --git a/scrapy/mail.py b/scrapy/mail.py index 43115c53ea9..c11f3898d0d 100644 --- a/scrapy/mail.py +++ b/scrapy/mail.py @@ -164,6 +164,7 @@ def _sent_failed(self, failure, to, cc, subject, nattachs): "mailerr": errstr, }, ) + return failure def _sendmail(self, to_addrs, msg): from twisted.internet import reactor
OpenEnergyPlatform__oeplatform-1338
Django compressor seems to produce unexpected cache behavior. ## Description of the issue @Darynarli and myself experienced unexpected behavior that is triggered by the new package `django-compression`. This behavior prevents updating the compressed sources like js or css files entirely. This also happens somewhat silent, because the compressed files (e.g. `static/CACHE/js/....`) are created as expected (Using the management command `manage.py compress`). The first error can be found if the template that imports the source script (html-template) code is inspected using the dev-tool (e.g. chrome). It was observed in the local dev-environments, production might also be affected by the next release. If you want to know what is part of the compression search in templates for this templatetag: ``` jinja2 {% compress js %} <script src="update source name here "></script> {% endcompress %} ``` To avoid this behavior in development, you can deactivate the compressor in the settings.py COMPRESS_ENABLED = False ## Steps to Reproduce We noticed this during the development of the open peer review. We have updated the source code of a JS source that is part of the compression. I noticed that the script import of the compressed file (name like output18471749.js) is not updated in the html-template after a new compressed file was created. ## Ideas of solution - Fix django-compressor settings - or Report bug - or Fix template inheritance - other??? ## Context and Environment * Version used: * Operating system: * Environment setup and (python) version: ## Workflow checklist - [ ] I am aware of the workflow in [CONTRIBUTING.md](https://github.com/OpenEnergyPlatform/oeplatform/blob/develop/CONTRIBUTING.md)
[ { "content": "\"\"\"\nDjango settings for oeplatform project.\n\nGenerated by 'django-admin startproject' using Django 1.8.5.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.8/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/e...
[ { "content": "\"\"\"\nDjango settings for oeplatform project.\n\nGenerated by 'django-admin startproject' using Django 1.8.5.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.8/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/e...
diff --git a/oeplatform/settings.py b/oeplatform/settings.py index 6f0696bdf..bc7e34b87 100644 --- a/oeplatform/settings.py +++ b/oeplatform/settings.py @@ -166,5 +166,9 @@ def external_urls_context_processor(request): "compressor.finders.CompressorFinder", } + +# https://django-compressor.readthedocs.io/en/stable/settings.html COMPRESS_ENABLED = True COMPRESS_OFFLINE = True +COMPRESS_REBUILD_TIMEOUT = 0 +COMPRESS_MTIME_DELAY = 0 diff --git a/versions/changelogs/current.md b/versions/changelogs/current.md index 0598b515b..db641d3f1 100644 --- a/versions/changelogs/current.md +++ b/versions/changelogs/current.md @@ -7,5 +7,6 @@ ### Bugs - Open Peer Review: Fix a bug in the review backend to handle reviews that are finished in one go (without any feedback). [(#1333)](https://github.com/OpenEnergyPlatform/oeplatform/pull/1333) +- The django-compressor integration now updates the compressed sources and cache as expected [(#1338)](https://github.com/OpenEnergyPlatform/oeplatform/pull/1338) ### Removed
Azure__azure-cli-extensions-3046
vmware 2.0.0 does not work in azure-cli:2.7.0 - If the issue is to do with Azure CLI 2.0 in-particular, create an issue here at [Azure/azure-cli](https://github.com/Azure/azure-cli/issues) ### Extension name (the extension in question) vmware ### Description of issue (in as much detail as possible) The vmware 2.0.0 extension released yesterday does not work with the az cli 2.7.0 released on 2020-06-01, about 9 months ago. I'm not sure exactly what the minimum version should be set to. I believe this needs to be updated, but I'm not sure what it should be or what the best process is for updating it. https://github.com/Azure/azure-cli-extensions/blob/master/src/vmware/azext_vmware/azext_metadata.json ``` "azext.minCliCoreVersion": "2.0.66" ``` ![image](https://user-images.githubusercontent.com/80104/109070352-3f62fd00-76a7-11eb-8151-e474f1edba71.png) steps to reproduce: ``` docker run --rm -it mcr.microsoft.com/azure-cli:2.7.0 az extension add -n vmware az vmware private-cloud show -g taggac-2020-12 -n taggac-20210219 ``` Here are the ``` PS C:\Users\cataggar\io\fct> docker run --rm -it mcr.microsoft.com/azure-cli:2.7.0 bash-5.0# az extension add -n vmware bash-5.0# az vmware private-cloud show -g taggac-2020-12 -n taggac-20210219 The command failed with an unexpected error. Here is the traceback: cannot import name 'ARMHttpLoggingPolicy' Traceback (most recent call last): File "/usr/local/lib/python3.6/site-packages/knack/cli.py", line 215, in invoke cmd_result = self.invocation.execute(args) File "/usr/local/lib/python3.6/site-packages/azure/cli/core/commands/__init__.py", line 553, in execute self.commands_loader.load_arguments(command) File "/usr/local/lib/python3.6/site-packages/azure/cli/core/__init__.py", line 344, in load_arguments self.command_table[command].load_arguments() # this loads the arguments via reflection File "/usr/local/lib/python3.6/site-packages/azure/cli/core/commands/__init__.py", line 310, in load_arguments super(AzCliCommand, self).load_arguments() File "/usr/local/lib/python3.6/site-packages/knack/commands.py", line 106, in load_arguments cmd_args = self.arguments_loader() File "/usr/local/lib/python3.6/site-packages/azure/cli/core/commands/arm.py", line 723, in generic_show_arguments_loader cmd_args = get_arguments_loader(context, getter_op, operation_group=kwargs.get('operation_group')) File "/usr/local/lib/python3.6/site-packages/azure/cli/core/commands/arm.py", line 402, in get_arguments_loader getter_args = dict(extract_args_from_signature(context.get_op_handler(getter_op, operation_group=operation_group), File "/usr/local/lib/python3.6/site-packages/azure/cli/core/__init__.py", line 588, in get_op_handler op = import_module(mod_to_import) File "/usr/local/lib/python3.6/importlib/__init__.py", line 126, in import_module return _bootstrap._gcd_import(name[level:], package, level) File "<frozen importlib._bootstrap>", line 994, in _gcd_import File "<frozen importlib._bootstrap>", line 971, in _find_and_load File "<frozen importlib._bootstrap>", line 955, in _find_and_load_unlocked File "<frozen importlib._bootstrap>", line 665, in _load_unlocked File "<frozen importlib._bootstrap_external>", line 678, in exec_module File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed File "/root/.azure/cliextensions/vmware/azext_vmware/custom.py", line 7, in <module> from azext_vmware.vendored_sdks.avs_client import AVSClient File "/root/.azure/cliextensions/vmware/azext_vmware/vendored_sdks/avs_client/__init__.py", line 7, in <module> from ._avs_client import AVSClient File "/root/.azure/cliextensions/vmware/azext_vmware/vendored_sdks/avs_client/_avs_client.py", line 18, in <module> from ._configuration import AVSClientConfiguration File "/root/.azure/cliextensions/vmware/azext_vmware/vendored_sdks/avs_client/_configuration.py", line 11, in <module> from azure.mgmt.core.policies import ARMHttpLoggingPolicy ImportError: cannot import name 'ARMHttpLoggingPolicy' To open an issue, please run: 'az feedback' bash-5.0# ``` -----
[ { "content": "#!/usr/bin/env python\n\n# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# ------------------...
[ { "content": "#!/usr/bin/env python\n\n# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# ------------------...
diff --git a/src/vmware/CHANGELOG.md b/src/vmware/CHANGELOG.md index 32cde2511f8..c0910f47bdb 100644 --- a/src/vmware/CHANGELOG.md +++ b/src/vmware/CHANGELOG.md @@ -1,5 +1,8 @@ # Release History +## 2.0.1 (2021-02) +- Update the minimum az cli version to 2.11.0 [#3045](https://github.com/Azure/azure-cli-extensions/issues/3045) + ## 2.0.0 (2021-02) This version has **breaking changes** for scripts. diff --git a/src/vmware/azext_vmware/azext_metadata.json b/src/vmware/azext_vmware/azext_metadata.json index 341daf2272c..4e44ef19715 100644 --- a/src/vmware/azext_vmware/azext_metadata.json +++ b/src/vmware/azext_vmware/azext_metadata.json @@ -1,4 +1,4 @@ { "azext.isPreview": false, - "azext.minCliCoreVersion": "2.0.66" + "azext.minCliCoreVersion": "2.11.0" } \ No newline at end of file diff --git a/src/vmware/setup.py b/src/vmware/setup.py index 195ba52bb15..9649cf71529 100644 --- a/src/vmware/setup.py +++ b/src/vmware/setup.py @@ -8,7 +8,7 @@ from io import open from setuptools import setup, find_packages -VERSION = "2.0.0" +VERSION = "2.0.1" with open('README.md', encoding='utf-8') as f: readme = f.read()
aio-libs__aiohttp-493
[bug] URL parssing error in the web server If you run this simple server example : ``` python import asyncio from aiohttp import web @asyncio.coroutine def handle(request): return webResponse(body=request.path.encode('utf8')) @asyncio.coroutine def init(loop): app = web.Application(loop=loop) app.router.add_route('GET', '/', handle) srv = yield from loop.create_server(app.make_handler(), '127.0.0.1', 5555) return srv loop = asyncio.get_event_loop() loop.run_until_complete(init(loop)) try: loop.run_forever() except KeyboardInterrupt: pass ``` The following requests will get a `200 OK` with the considered path as content : ``` $ curl -X GET http://127.0.0.1:5555/// / $ curl -X GET http://127.0.0.1:555//foo/ / ``` As you can see, the path of the URL is not well parsed. This bug resides in the `_splitted_path` non-data descriptor which uses `urlsplit` on a path rather than a full URL. The consequence is a second segment interpreted as a network location if the first one is empty. I've not quite investigated a fix at the moment, but `_splitted_path` only being used by `raw_path` and `query_string` seems to be a plea to use a fake scheme and netloc for `urlsplit` to behave the expected way.
[ { "content": "__all__ = ('ContentCoding', 'Request', 'StreamResponse', 'Response')\n\nimport asyncio\nimport binascii\nimport cgi\nimport collections\nimport datetime\nimport http.cookies\nimport io\nimport json\nimport math\nimport time\nimport warnings\n\nimport enum\n\nfrom email.utils import parsedate\nfrom...
[ { "content": "__all__ = ('ContentCoding', 'Request', 'StreamResponse', 'Response')\n\nimport asyncio\nimport binascii\nimport cgi\nimport collections\nimport datetime\nimport http.cookies\nimport io\nimport json\nimport math\nimport time\nimport warnings\n\nimport enum\n\nfrom email.utils import parsedate\nfrom...
diff --git a/aiohttp/web_reqrep.py b/aiohttp/web_reqrep.py index 6f3e566c8c6..c3e8abc4067 100644 --- a/aiohttp/web_reqrep.py +++ b/aiohttp/web_reqrep.py @@ -173,7 +173,8 @@ def path_qs(self): @reify def _splitted_path(self): - return urlsplit(self._path_qs) + url = '{}://{}{}'.format(self.scheme, self.host, self._path_qs) + return urlsplit(url) @property def raw_path(self): diff --git a/tests/test_web.py b/tests/test_web.py index b7626d80e2c..3968eab1843 100644 --- a/tests/test_web.py +++ b/tests/test_web.py @@ -16,6 +16,7 @@ def setUp(self): def tearDown(self): self.loop.close() + @unittest.skip('moved to test_web_functional') def test_handler_returns_not_response(self): app = web.Application(loop=self.loop) diff --git a/tests/test_web_functional.py b/tests/test_web_functional.py index c1ec94cb4ae..8493bdeec2c 100644 --- a/tests/test_web_functional.py +++ b/tests/test_web_functional.py @@ -66,6 +66,20 @@ def go(): self.loop.run_until_complete(go()) + def test_handler_returns_not_response(self): + + @asyncio.coroutine + def handler(request): + return 'abc' + + @asyncio.coroutine + def go(): + _, _, url = yield from self.create_server('GET', '/', handler) + resp = yield from request('GET', url, loop=self.loop) + self.assertEqual(500, resp.status) + + self.loop.run_until_complete(go()) + def test_post_form(self): @asyncio.coroutine diff --git a/tests/test_web_request.py b/tests/test_web_request.py index 2dc3bf9b1b3..60c77bf3af5 100644 --- a/tests/test_web_request.py +++ b/tests/test_web_request.py @@ -64,6 +64,10 @@ def test_ctor(self): self.assertIs(self.transport, req.transport) self.assertTrue(req.keep_alive) + def test_doubleslashes(self): + req = self.make_request('GET', '//foo/') + self.assertEqual('//foo/', req.path) + def test_POST(self): req = self.make_request('POST', '/') with self.assertRaises(RuntimeError):
psf__black-4019
Internal error on a specific file <!-- Please make sure that the bug is not already fixed either in newer versions or the current development version. To confirm this, you have three options: 1. Update Black's version if a newer release exists: `pip install -U black` 2. Use the online formatter at <https://black.vercel.app/?version=main>, which will use the latest main branch. 3. Or run _Black_ on your machine: - create a new virtualenv (make sure it's the same Python version); - clone this repository; - run `pip install -e .[d]`; - run `pip install -r test_requirements.txt` - make sure it's sane by running `python -m pytest`; and - run `black` like you did last time. --> **Describe the bug** `black` reports an internal error when formatting a specific file. ``` error: cannot format /home/nicpa/codes/sisl/src/sisl_toolbox/siesta/minimizer/_metric_siesta.py: INTERNAL ERROR: Black produced code that is not equivalent to the source. Please report a bug on https://github.com/psf/black/issues. This diff might be helpful: /tmp/blk__3mh1ucd.log ``` <!-- A clear and concise description of what the bug is. --> **To Reproduce** 1. Download [this file](https://github.com/zerothi/sisl/blob/5a63302b57fcb38d7460507bf000f077655ac664/src/sisl_toolbox/siesta/minimizer/_metric_siesta.py) 2. Run `black` on the file. (I have done `pip install -U black` as of today!) My pyproject.toml configuration file has this: ```toml [tool.black] line-length = 88 target-version = ["py38", "py39", "py310", "py311", "py312"] ``` The resulting error is: ``` error: cannot format /home/nicpa/codes/sisl/src/sisl_toolbox/siesta/minimizer/_metric_siesta.py: INTERNAL ERROR: Black produced code that is not equivalent to the source. Please report a bug on https://github.com/psf/black/issues. This diff might be helpful: /tmp/blk__3mh1ucd.log ``` Here is the attached diff-log file: ```diff --- src +++ dst @@ -2307,16 +2307,10 @@ body= Expr( value= JoinedStr( values= - Constant( - kind= - None, # NoneType - value= - '', # str - ) # /Constant FormattedValue( conversion= -1, # int format_spec= None, # NoneType @@ -3263,16 +3257,10 @@ body= Expr( value= JoinedStr( values= - Constant( - kind= - None, # NoneType - value= - '', # str - ) # /Constant FormattedValue( conversion= -1, # int format_spec= None, # NoneType @@ -4273,16 +4261,10 @@ body= Expr( value= JoinedStr( values= - Constant( - kind= - None, # NoneType - value= - '', # str - ) # /Constant FormattedValue( conversion= -1, # int format_spec= None, # NoneType ``` **Expected behavior** <!-- A clear and concise description of what you expected to happen. --> **Environment** <!-- Please complete the following information: --> - Black's version: <!-- e.g. [main] --> > black --version > black, 23.10.1 (compiled: yes) > Python (CPython) 3.11.6 - OS and Python version: Linux/debian, Python 3.11.6
[ { "content": "\"\"\"\nblib2to3 Node/Leaf transformation-related utility functions.\n\"\"\"\n\nimport sys\nfrom typing import Final, Generic, Iterator, List, Optional, Set, Tuple, TypeVar, Union\n\nif sys.version_info >= (3, 10):\n from typing import TypeGuard\nelse:\n from typing_extensions import TypeGua...
[ { "content": "\"\"\"\nblib2to3 Node/Leaf transformation-related utility functions.\n\"\"\"\n\nimport sys\nfrom typing import Final, Generic, Iterator, List, Optional, Set, Tuple, TypeVar, Union\n\nif sys.version_info >= (3, 10):\n from typing import TypeGuard\nelse:\n from typing_extensions import TypeGua...
diff --git a/CHANGES.md b/CHANGES.md index 5ce37943693..4f90f493ad8 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -13,6 +13,9 @@ - Fix crash on formatting code like `await (a ** b)` (#3994) +- No longer treat leading f-strings as docstrings. This matches Python's behaviour and + fixes a crash (#4019) + ### Preview style - Multiline dictionaries and lists that are the sole argument to a function are now diff --git a/src/black/nodes.py b/src/black/nodes.py index 5f6b280c035..fff8e05a118 100644 --- a/src/black/nodes.py +++ b/src/black/nodes.py @@ -529,7 +529,7 @@ def is_docstring(leaf: Leaf) -> bool: return False prefix = get_string_prefix(leaf.value) - if "b" in prefix or "B" in prefix: + if set(prefix).intersection("bBfF"): return False if prev_siblings_are( diff --git a/tests/data/cases/docstring_preview.py b/tests/data/cases/docstring_preview.py index ff4819acb67..a3c656be2f8 100644 --- a/tests/data/cases/docstring_preview.py +++ b/tests/data/cases/docstring_preview.py @@ -58,7 +58,8 @@ def docstring_almost_at_line_limit(): def docstring_almost_at_line_limit_with_prefix(): - f"""long docstring................................................................""" + f"""long docstring................................................................ + """ def mulitline_docstring_almost_at_line_limit(): diff --git a/tests/data/cases/f_docstring.py b/tests/data/cases/f_docstring.py new file mode 100644 index 00000000000..667f550b353 --- /dev/null +++ b/tests/data/cases/f_docstring.py @@ -0,0 +1,20 @@ +def foo(e): + f""" {'.'.join(e)}""" + +def bar(e): + f"{'.'.join(e)}" + +def baz(e): + F""" {'.'.join(e)}""" + +# output +def foo(e): + f""" {'.'.join(e)}""" + + +def bar(e): + f"{'.'.join(e)}" + + +def baz(e): + f""" {'.'.join(e)}""" diff --git a/tests/data/cases/docstring_preview_no_string_normalization.py b/tests/data/cases/preview_docstring_no_string_normalization.py similarity index 100% rename from tests/data/cases/docstring_preview_no_string_normalization.py rename to tests/data/cases/preview_docstring_no_string_normalization.py
helmholtz-analytics__heat-1268
Fix Pytorch release tracking workflows ## Due Diligence <!--- Please address the following points before setting your PR "ready for review". ---> - General: - [x] **base branch** must be `main` for new features, latest release branch (e.g. `release/1.3.x`) for bug fixes - [x] **title** of the PR is suitable to appear in the [Release Notes](https://github.com/helmholtz-analytics/heat/releases/latest) - Implementation: - [x] unit tests: all split configurations tested - [x] unit tests: multiple dtypes tested - [x] documentation updated where needed ## Description <!--- Include a summary of the change/s. Please also include relevant motivation and context. List any dependencies that are required for this change. ---> Issue/s resolved: #1241 ## Changes proposed: - upgrade to the latest version of checkout action - delete the token parameter such that the default action token is used ## Type of change <!-- i.e. - Bug fix (non-breaking change which fixes an issue) - New feature (non-breaking change which adds functionality) - Breaking change (fix or feature that would cause existing functionality to not work as expected) - Documentation update ---> ## Memory requirements <!--- Compare memory requirements to previous implementation / relevant torch operations if applicable: - in distributed and non-distributed mode - with `split=None` and `split not None` This can be done using https://github.com/pythonprofilers/memory_profiler for CPU memory measurements, GPU measurements can be done with https://pytorch.org/docs/master/generated/torch.cuda.max_memory_allocated.html. These tools only profile the memory used by each process, not the entire function. ---> ## Performance <!--- Compare performance to previous implementation / relevant torch operations if applicable: - in distributed and non-distributed mode - with `split=None` and `split not None` Python has an embedded profiler: https://docs.python.org/3.9/library/profile.html Again, this will only profile the performance on each process. Printing the results with many processes may be illegible. It may be easiest to save the output of each to a file. ---> #### Does this change modify the behaviour of other functions? If so, which? no
[ { "content": "\"\"\"This module contains Heat's version information.\"\"\"\n\n\nmajor: int = 1\n\"\"\"Indicates Heat's main version.\"\"\"\nminor: int = 3\n\"\"\"Indicates feature extension.\"\"\"\nmicro: int = 0\n\"\"\"Indicates revisions for bugfixes.\"\"\"\nextension: str = \"dev\"\n\"\"\"Indicates special b...
[ { "content": "\"\"\"This module contains Heat's version information.\"\"\"\n\n\nmajor: int = 1\n\"\"\"Indicates Heat's main version.\"\"\"\nminor: int = 4\n\"\"\"Indicates feature extension.\"\"\"\nmicro: int = 0\n\"\"\"Indicates revisions for bugfixes.\"\"\"\nextension: str = \"dev\"\n\"\"\"Indicates special b...
diff --git a/.github/workflows/pytorch-latest-main.yml b/.github/workflows/pytorch-latest-main.yml index 139c66d82c..c59736c82f 100644 --- a/.github/workflows/pytorch-latest-main.yml +++ b/.github/workflows/pytorch-latest-main.yml @@ -11,9 +11,8 @@ jobs: runs-on: ubuntu-latest if: ${{ github.repository }} == 'hemlholtz-analytics/heat' steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: - token: ${{ secrets.GHACTIONS }} ref: '${{ env.base_branch }}' - name: Fetch PyTorch release version run: | diff --git a/.github/workflows/pytorch-latest-release.yml b/.github/workflows/pytorch-latest-release.yml index 0daa39fc2c..6126fa610a 100644 --- a/.github/workflows/pytorch-latest-release.yml +++ b/.github/workflows/pytorch-latest-release.yml @@ -11,9 +11,8 @@ jobs: runs-on: ubuntu-latest if: ${{ github.repository }} == 'hemlholtz-analytics/heat' steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: - token: ${{ secrets.GHACTIONS }} ref: '${{ env.base_branch }}' - name: Fetch PyTorch release version run: | diff --git a/heat/core/linalg/tests/test_solver.py b/heat/core/linalg/tests/test_solver.py index 5bf2cbff08..f8f9889a9d 100644 --- a/heat/core/linalg/tests/test_solver.py +++ b/heat/core/linalg/tests/test_solver.py @@ -65,15 +65,8 @@ def test_lanczos(self): lanczos_B = V_out @ T_out @ V_inv self.assertTrue(ht.allclose(lanczos_B, B)) - # single precision tolerance - if ( - int(torch.__version__.split(".")[0]) == 1 - and int(torch.__version__.split(".")[1]) >= 13 - or int(torch.__version__.split(".")[0]) > 1 - ): - tolerance = 1e-3 - else: - tolerance = 1e-4 + # single precision tolerance for torch.inv() is pretty bad + tolerance = 1e-3 # float32, pre_defined v0, split mismatch A = ht.random.randn(n, n, dtype=ht.float32, split=0) diff --git a/heat/core/version.py b/heat/core/version.py index 4b3e384aea..d680344436 100644 --- a/heat/core/version.py +++ b/heat/core/version.py @@ -3,7 +3,7 @@ major: int = 1 """Indicates Heat's main version.""" -minor: int = 3 +minor: int = 4 """Indicates feature extension.""" micro: int = 0 """Indicates revisions for bugfixes."""
biolab__orange3-3530
Report window and clipboard Can't copy form Reports ##### Orange version <!-- From menu _Help→About→Version_ or code `Orange.version.full_version` --> 3.17.0.dev0+8f507ed ##### Expected behavior If items are selected in the Report window it should be possible to copy to the clipboard for using it in a presentation or a document. ##### Actual behavior Can't copy anything.
[ { "content": "import os\nimport logging\nimport warnings\nimport pickle\nfrom collections import OrderedDict\nfrom enum import IntEnum\n\nfrom typing import Optional\n\nimport pkg_resources\n\nfrom AnyQt.QtCore import Qt, QObject, pyqtSlot\nfrom AnyQt.QtGui import QIcon, QCursor, QStandardItemModel, QStandardIt...
[ { "content": "import os\nimport logging\nimport warnings\nimport pickle\nfrom collections import OrderedDict\nfrom enum import IntEnum\n\nfrom typing import Optional\n\nimport pkg_resources\n\nfrom AnyQt.QtCore import Qt, QObject, pyqtSlot\nfrom AnyQt.QtGui import QIcon, QCursor, QStandardItemModel, QStandardIt...
diff --git a/Orange/widgets/report/owreport.py b/Orange/widgets/report/owreport.py index e2a70d8aa71..47a99ab4766 100644 --- a/Orange/widgets/report/owreport.py +++ b/Orange/widgets/report/owreport.py @@ -477,6 +477,9 @@ def get_canvas_instance(self): return window return None + def copy_to_clipboard(self): + self.report_view.triggerPageAction(self.report_view.page().Copy) + if __name__ == "__main__": import sys
falconry__falcon-801
Default OPTIONS responder does not set Content-Length to "0" Per RFC 7231: > A server MUST generate a Content-Length field with a value of "0" if no payload body is to be sent in the response.
[ { "content": "# Copyright 2013 by Rackspace Hosting, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless requir...
[ { "content": "# Copyright 2013 by Rackspace Hosting, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless requir...
diff --git a/falcon/responders.py b/falcon/responders.py index b5f61866d..34da8075b 100644 --- a/falcon/responders.py +++ b/falcon/responders.py @@ -58,5 +58,6 @@ def create_default_options(allowed_methods): def on_options(req, resp, **kwargs): resp.status = HTTP_204 resp.set_header('Allow', allowed) + resp.set_header('Content-Length', '0') return on_options diff --git a/tests/test_headers.py b/tests/test_headers.py index 88809923f..838755d8f 100644 --- a/tests/test_headers.py +++ b/tests/test_headers.py @@ -534,6 +534,12 @@ def test_add_link_complex(self): self._check_link_header(resource, expected_value) + def test_content_length_options(self): + result = self.simulate_options() + + content_length = '0' + self.assertEqual(result.headers['Content-Length'], content_length) + # ---------------------------------------------------------------------- # Helpers # ----------------------------------------------------------------------
openvinotoolkit__datumaro-743
Wrong annotated return type in Registry class https://github.com/openvinotoolkit/datumaro/blob/0d4a73d3bbe3a93585af7a0148a0e344fd1106b3/datumaro/components/environment.py#L41-L42 In the referenced code the return type of the method appears to be wrong. Either it should be `Iterator[str]` since iteration over a dict returns its keys which are of type `str` or the return statement should be `return iter(self.items.values())`. When using the library with static type checkers this annotation causes type check errors. When removing the annotation, type checkers correctly infer the type `Iterator[str]`. Wrong annotated return type in Registry class https://github.com/openvinotoolkit/datumaro/blob/0d4a73d3bbe3a93585af7a0148a0e344fd1106b3/datumaro/components/environment.py#L41-L42 In the referenced code the return type of the method appears to be wrong. Either it should be `Iterator[str]` since iteration over a dict returns its keys which are of type `str` or the return statement should be `return iter(self.items.values())`. When using the library with static type checkers this annotation causes type check errors. When removing the annotation, type checkers correctly infer the type `Iterator[str]`.
[ { "content": "# Copyright (C) 2020-2022 Intel Corporation\n#\n# SPDX-License-Identifier: MIT\n\nimport glob\nimport importlib\nimport logging as log\nimport os.path as osp\nfrom functools import partial\nfrom inspect import isclass\nfrom typing import Callable, Dict, Generic, Iterable, Iterator, List, Optional,...
[ { "content": "# Copyright (C) 2020-2022 Intel Corporation\n#\n# SPDX-License-Identifier: MIT\n\nimport glob\nimport importlib\nimport logging as log\nimport os.path as osp\nfrom functools import partial\nfrom inspect import isclass\nfrom typing import Callable, Dict, Generic, Iterable, Iterator, List, Optional,...
diff --git a/CHANGELOG.md b/CHANGELOG.md index 84c35ac044..e9706cb94b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,22 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Add jupyter sample introducing how to merge datasets (<https://github.com/openvinotoolkit/datumaro/pull/738>) +### Changed +- N/A + +### Deprecated +- N/A + +### Removed +- N/A + +### Fixed +- Fix static type checking + (<https://github.com/openvinotoolkit/datumaro/pull/743>) + +### Security +- N/A + ## 06/09/2022 - Release v0.3.1 ### Added - Support for custom media types, new `PointCloud` media type, diff --git a/datumaro/components/environment.py b/datumaro/components/environment.py index 4e2b9e72c3..9490e9239e 100644 --- a/datumaro/components/environment.py +++ b/datumaro/components/environment.py @@ -38,7 +38,7 @@ def __getitem__(self, key: str) -> T: def __contains__(self, key) -> bool: return key in self.items - def __iter__(self) -> Iterator[T]: + def __iter__(self) -> Iterator[str]: return iter(self.items)
rasterio__rasterio-437
Check for "ndarray-like" instead of ndarray in _warp; other places I want to use `rasterio.warp.reproject` on an `xray.Dataset` with `xray.Dataset.apply` (http://xray.readthedocs.org/en/stable/). xray has a feature to turn the dataset into a `np.ndarray`, but that means losing all my metadata. At https://github.com/mapbox/rasterio/blob/master/rasterio/_warp.pyx#L249, _warp checks that the source is an `np.ndarray` (whereas the source in my case is an `xray.DataArray` - satisfying the same interfaces as `np.ndarray`), so I get an invalid source error. It could be a good idea to check for something like ``` def is_ndarray_like(source): return hasattr(source, '__array__') ``` instead of ``` isinstance(source, np.ndarray) ``` so other numpy-like arrays can be used.
[ { "content": "# Mapping of GDAL to Numpy data types.\n#\n# Since 0.13 we are not importing numpy here and data types are strings.\n# Happily strings can be used throughout Numpy and so existing code will\n# break.\n#\n# Within Rasterio, to test data types, we use Numpy's dtype() factory to \n# do something like...
[ { "content": "# Mapping of GDAL to Numpy data types.\n#\n# Since 0.13 we are not importing numpy here and data types are strings.\n# Happily strings can be used throughout Numpy and so existing code will\n# break.\n#\n# Within Rasterio, to test data types, we use Numpy's dtype() factory to \n# do something like...
diff --git a/rasterio/_features.pyx b/rasterio/_features.pyx index bc83a65fd..028930303 100644 --- a/rasterio/_features.pyx +++ b/rasterio/_features.pyx @@ -67,7 +67,7 @@ def _shapes(image, mask, connectivity, transform): if is_float: fieldtp = 2 - if isinstance(image, np.ndarray): + if dtypes.is_ndarray(image): mem_ds = InMemoryRaster(image, transform) hband = mem_ds.band elif isinstance(image, tuple): @@ -76,7 +76,7 @@ def _shapes(image, mask, connectivity, transform): else: raise ValueError("Invalid source image") - if isinstance(mask, np.ndarray): + if dtypes.is_ndarray(mask): # A boolean mask must be converted to uint8 for GDAL mask_ds = InMemoryRaster(mask.astype('uint8'), transform) hmaskband = mask_ds.band @@ -168,7 +168,7 @@ def _sieve(image, size, output, mask, connectivity): cdef _io.RasterUpdater udr cdef _io.RasterReader mask_reader - if isinstance(image, np.ndarray): + if dtypes.is_ndarray(image): in_mem_ds = InMemoryRaster(image) in_band = in_mem_ds.band elif isinstance(image, tuple): @@ -177,7 +177,7 @@ def _sieve(image, size, output, mask, connectivity): else: raise ValueError("Invalid source image") - if isinstance(output, np.ndarray): + if dtypes.is_ndarray(output): log.debug("Output array: %r", output) out_mem_ds = InMemoryRaster(output) out_band = out_mem_ds.band @@ -187,7 +187,7 @@ def _sieve(image, size, output, mask, connectivity): else: raise ValueError("Invalid output image") - if isinstance(mask, np.ndarray): + if dtypes.is_ndarray(mask): # A boolean mask must be converted to uint8 for GDAL mask_mem_ds = InMemoryRaster(mask.astype('uint8')) mask_band = mask_mem_ds.band diff --git a/rasterio/_fill.pyx b/rasterio/_fill.pyx index 2515564f4..723af0a30 100644 --- a/rasterio/_fill.pyx +++ b/rasterio/_fill.pyx @@ -23,7 +23,7 @@ def _fillnodata(image, mask, double max_search_distance=100.0, cdef _io.RasterReader mrdr cdef char **alg_options = NULL - if isinstance(image, np.ndarray): + if dtypes.is_ndarray(image): # copy numpy ndarray into an in-memory dataset. image_dataset = _gdal.GDALCreate( memdriver, @@ -38,7 +38,7 @@ def _fillnodata(image, mask, double max_search_distance=100.0, else: raise ValueError("Invalid source image") - if isinstance(mask, np.ndarray): + if dtypes.is_ndarray(mask): mask_cast = mask.astype('uint8') mask_dataset = _gdal.GDALCreate( memdriver, diff --git a/rasterio/_warp.pyx b/rasterio/_warp.pyx index 1e267afb9..7d4e4f471 100644 --- a/rasterio/_warp.pyx +++ b/rasterio/_warp.pyx @@ -246,7 +246,7 @@ def _reproject( # If the source is an ndarray, we copy to a MEM dataset. # We need a src_transform and src_dst in this case. These will # be copied to the MEM dataset. - if isinstance(source, np.ndarray): + if dtypes.is_ndarray(source): # Convert 2D single-band arrays to 3D multi-band. if len(source.shape) == 2: source = source.reshape(1, *source.shape) @@ -300,7 +300,7 @@ def _reproject( raise ValueError("Invalid source") # Next, do the same for the destination raster. - if isinstance(destination, np.ndarray): + if dtypes.is_ndarray(destination): if len(destination.shape) == 2: destination = destination.reshape(1, *destination.shape) if destination.shape[0] != src_count: @@ -489,11 +489,11 @@ def _reproject( # _gdal.GDALDestroyApproxTransformer(psWOptions.pTransformerArg) if psWOptions != NULL: _gdal.GDALDestroyWarpOptions(psWOptions) - if isinstance(source, np.ndarray): + if dtypes.is_ndarray(source): if hdsin != NULL: _gdal.GDALClose(hdsin) - if reprojected and isinstance(destination, np.ndarray): + if reprojected and dtypes.is_ndarray(destination): retval = _io.io_auto(destination, hdsout, 0) # TODO: handle errors (by retval). diff --git a/rasterio/dtypes.py b/rasterio/dtypes.py index e08f14e67..449ec5f0f 100644 --- a/rasterio/dtypes.py +++ b/rasterio/dtypes.py @@ -96,3 +96,9 @@ def get_minimum_int_dtype(values): return int16 elif min_value >= -2147483648 and max_value <= 2147483647: return int32 + + +def is_ndarray(array): + import numpy + + return isinstance(array, numpy.ndarray) or hasattr(array, '__array__') diff --git a/tests/test_dtypes.py b/tests/test_dtypes.py index 7a41fc778..1826ec52c 100644 --- a/tests/test_dtypes.py +++ b/tests/test_dtypes.py @@ -1,14 +1,23 @@ import numpy as np -import rasterio.dtypes +from rasterio import dtypes, ubyte + + +def test_is_ndarray(): + assert dtypes.is_ndarray(np.zeros((1,))) + assert dtypes.is_ndarray([0]) == False + assert dtypes.is_ndarray((0,)) == False + def test_np_dt_uint8(): - assert rasterio.dtypes.check_dtype(np.uint8) + assert dtypes.check_dtype(np.uint8) + def test_dt_ubyte(): - assert rasterio.dtypes.check_dtype(rasterio.ubyte) + assert dtypes.check_dtype(ubyte) + def test_gdal_name(): - assert rasterio.dtypes._gdal_typename(rasterio.ubyte) == 'Byte' - assert rasterio.dtypes._gdal_typename(np.uint8) == 'Byte' - assert rasterio.dtypes._gdal_typename(np.uint16) == 'UInt16' + assert dtypes._gdal_typename(ubyte) == 'Byte' + assert dtypes._gdal_typename(np.uint8) == 'Byte' + assert dtypes._gdal_typename(np.uint16) == 'UInt16'
Lightning-Universe__lightning-flash-665
ImageEmbedder default behavior is not a flattened output ## 🐛 Bug I discovered this issue while testing PR #655. If you run the [Image Embedding README example code](https://github.com/PyTorchLightning/lightning-flash#example-1-image-embedding), it returns a 3D tensor. My understanding from the use of embeddings in general, and how they are used in [Fifty One](https://voxel51.com/docs/fiftyone/tutorials/image_embeddings.html) is they expect the embeddings to be 1D (for each embedding). The reason it returns a 3D tensor is because it depends on the backbone used. The default there is `resnet101`, which returns a `2048x7x7` shape tensor. Others like inception return a flat 1D tensor, i.e. length-X. ### To Reproduce Steps to reproduce the behavior: Run the [README example](https://github.com/PyTorchLightning/lightning-flash#example-1-image-embedding), but remove the `embedding_dim` parameter. See below for example. Note: as-is, this will error on `print(embeddings.shape)`, regardless of configuration, since that is a list. But the question here is around the logic for the ImageEmbedder. #### Code sample ```python from flash.core.data.utils import download_data from flash.image import ImageEmbedder # 1. Download the data download_data("https://pl-flash-data.s3.amazonaws.com/hymenoptera_data.zip", "data/") # 2. Create an ImageEmbedder with resnet50 trained on imagenet. embedder = ImageEmbedder(backbone="resnet50") # 3. Generate an embedding from an image path. embeddings = embedder.predict("data/hymenoptera_data/predict/153783656_85f9c3ac70.jpg") # 4. Print embeddings shape print(embeddings.shape) ``` ### Expected behavior Expect to see a 100352x1 shape tensor as the output, instead of 2048x7x7. ### Environment - PyTorch Version (e.g., 1.0): 1.9 - OS (e.g., Linux): Linux - How you installed PyTorch (`conda`, `pip`, source): pip - Build command you used (if compiling from source): N/A - Python version: 3.8.6 - CUDA/cuDNN version: N/A - GPU models and configuration: N/A - Any other relevant information: N/A ### Additional context I believe the question is around what the logic should be here: https://github.com/PyTorchLightning/lightning-flash/blob/075de3a46d74d9fc0e769401063fede1f12d0518/flash/image/embedding/model.py#L85-L92 If `embedding_dim` is None, then the head is `nn.Identity()`. **If we desire a flat 1D embedding, then the question is: should `nn.Identity()` change to `nn.Flatten()`?** It could be argued that the user should be left to flatten after on their own, but per the contributing guidelines, I thought this would align with "[Force User Decisions To Best Practices](https://github.com/PyTorchLightning/lightning-flash/blob/ddd942d3dfe3884a97a855446410166c3c9f16d9/.github/CONTRIBUTING.md#force-user-decisions-to-best-practices)" Let me know your thoughts. If that makes sense, then I can update the code, run some tests, and update docs in a PR.
[ { "content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required ...
[ { "content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required ...
diff --git a/README.md b/README.md index be19cb06f9..9b840d3476 100644 --- a/README.md +++ b/README.md @@ -206,13 +206,13 @@ from flash.image import ImageEmbedder download_data("https://pl-flash-data.s3.amazonaws.com/hymenoptera_data.zip", "data/") # 2. Create an ImageEmbedder with resnet50 trained on imagenet. -embedder = ImageEmbedder(backbone="resnet50", embedding_dim=128) +embedder = ImageEmbedder(backbone="resnet50") # 3. Generate an embedding from an image path. embeddings = embedder.predict("data/hymenoptera_data/predict/153783656_85f9c3ac70.jpg") # 4. Print embeddings shape -print(embeddings.shape) +print(embeddings[0].shape) ``` </details> diff --git a/flash_examples/integrations/fiftyone/image_embedding.py b/flash_examples/integrations/fiftyone/image_embedding.py index b9d1651ceb..019bd9cffe 100644 --- a/flash_examples/integrations/fiftyone/image_embedding.py +++ b/flash_examples/integrations/fiftyone/image_embedding.py @@ -28,7 +28,7 @@ ) # 3 Load model -embedder = ImageEmbedder(backbone="resnet101", embedding_dim=128) +embedder = ImageEmbedder(backbone="resnet101") # 4 Generate embeddings filepaths = dataset.values("filepath")
getmoto__moto-1613
Running lambda invoke with govcloud results in a KeyError moto version: 1.3.3 botocore version: 1.10.4 When using moto to invoke a lambda function on a govcloud region, you run into a key error with the lambda_backends. This is because boto.awslambda.regions() does not include the govcloud region, despite it being available for use. I've made a pull request that fixes the issue: #1613 Trace of the error: ``` Traceback (most recent call last): File "/Users/eric/nimbis/sites/tss/apps/session_aws/tasks/dns.py", line 84, in run Payload=lambda_payload) File "/Users/eric/.virtualenvs/tss/lib/python2.7/site-packages/botocore/client.py", line 314, in _api_call return self._make_api_call(operation_name, kwargs) File "/Users/eric/.virtualenvs/tss/lib/python2.7/site-packages/botocore/client.py", line 599, in _make_api_call operation_model, request_dict) File "/Users/eric/.virtualenvs/tss/lib/python2.7/site-packages/botocore/endpoint.py", line 148, in make_request return self._send_request(request_dict, operation_model) File "/Users/eric/.virtualenvs/tss/lib/python2.7/site-packages/botocore/endpoint.py", line 177, in _send_request success_response, exception): File "/Users/eric/.virtualenvs/tss/lib/python2.7/site-packages/botocore/endpoint.py", line 273, in _needs_retry caught_exception=caught_exception, request_dict=request_dict) File "/Users/eric/.virtualenvs/tss/lib/python2.7/site-packages/botocore/hooks.py", line 227, in emit return self._emit(event_name, kwargs) File "/Users/eric/.virtualenvs/tss/lib/python2.7/site-packages/botocore/hooks.py", line 210, in _emit response = handler(**kwargs) File "/Users/eric/.virtualenvs/tss/lib/python2.7/site-packages/botocore/retryhandler.py", line 183, in __call__ if self._checker(attempts, response, caught_exception): File "/Users/eric/.virtualenvs/tss/lib/python2.7/site-packages/botocore/retryhandler.py", line 251, in __call__ caught_exception) File "/Users/eric/.virtualenvs/tss/lib/python2.7/site-packages/botocore/retryhandler.py", line 269, in _should_retry return self._checker(attempt_number, response, caught_exception) File "/Users/eric/.virtualenvs/tss/lib/python2.7/site-packages/botocore/retryhandler.py", line 317, in __call__ caught_exception) File "/Users/eric/.virtualenvs/tss/lib/python2.7/site-packages/botocore/retryhandler.py", line 223, in __call__ attempt_number, caught_exception) File "/Users/eric/.virtualenvs/tss/lib/python2.7/site-packages/botocore/retryhandler.py", line 359, in _check_caught_exception raise caught_exception KeyError: u'us-gov-west-1' ```
[ { "content": "from __future__ import unicode_literals\n\nimport base64\nfrom collections import defaultdict\nimport copy\nimport datetime\nimport docker.errors\nimport hashlib\nimport io\nimport logging\nimport os\nimport json\nimport re\nimport zipfile\nimport uuid\nimport functools\nimport tarfile\nimport cal...
[ { "content": "from __future__ import unicode_literals\n\nimport base64\nfrom collections import defaultdict\nimport copy\nimport datetime\nimport docker.errors\nimport hashlib\nimport io\nimport logging\nimport os\nimport json\nimport re\nimport zipfile\nimport uuid\nimport functools\nimport tarfile\nimport cal...
diff --git a/moto/awslambda/models.py b/moto/awslambda/models.py index 80b4ffba3e71..d49df81c753a 100644 --- a/moto/awslambda/models.py +++ b/moto/awslambda/models.py @@ -675,3 +675,4 @@ def do_validate_s3(): for _region in boto.awslambda.regions()} lambda_backends['ap-southeast-2'] = LambdaBackend('ap-southeast-2') +lambda_backends['us-gov-west-1'] = LambdaBackend('us-gov-west-1')
ESMCI__cime-993
scripts_regression_tests.py O_TestTestScheduler This test fails with error SystemExit: ERROR: Leftover threads? when run as part of the full scripts_regression_tests.py but passes when run using ctest or when run as an individual test.
[ { "content": "\"\"\"\nLibraries for checking python code with pylint\n\"\"\"\n\nfrom CIME.XML.standard_module_setup import *\n\nfrom CIME.utils import run_cmd, run_cmd_no_fail, expect, get_cime_root, is_python_executable\n\nfrom multiprocessing.dummy import Pool as ThreadPool\nfrom distutils.spawn import find_e...
[ { "content": "\"\"\"\nLibraries for checking python code with pylint\n\"\"\"\n\nfrom CIME.XML.standard_module_setup import *\n\nfrom CIME.utils import run_cmd, run_cmd_no_fail, expect, get_cime_root, is_python_executable\n\nfrom multiprocessing.dummy import Pool as ThreadPool\nfrom distutils.spawn import find_e...
diff --git a/utils/python/CIME/code_checker.py b/utils/python/CIME/code_checker.py index e98e3b21315..e1df4262e98 100644 --- a/utils/python/CIME/code_checker.py +++ b/utils/python/CIME/code_checker.py @@ -106,4 +106,6 @@ def check_code(files, num_procs=10, interactive=False): pool = ThreadPool(num_procs) results = pool.map(lambda x : _run_pylint(x, interactive), files_to_check) + pool.close() + pool.join() return dict(results)
jschneier__django-storages-589
Is it correct in the `get_available_overwrite_name` function? Hi, Please tell me what the following code. When `name`'s length equals `max_length` in the `get_available_overwrite_name`, `get_available_overwrite_name` returns overwritten `name`. The `name` must be less than or equal to `max_length` isn't it? https://github.com/jschneier/django-storages/blob/master/storages/utils.py#L105 Regards, Chihiro
[ { "content": "import os\nimport posixpath\n\nfrom django.conf import settings\nfrom django.core.exceptions import (\n ImproperlyConfigured, SuspiciousFileOperation,\n)\nfrom django.utils.encoding import force_text\n\n\ndef setting(name, default=None):\n \"\"\"\n Helper function to get a Django setting ...
[ { "content": "import os\nimport posixpath\n\nfrom django.conf import settings\nfrom django.core.exceptions import (\n ImproperlyConfigured, SuspiciousFileOperation,\n)\nfrom django.utils.encoding import force_text\n\n\ndef setting(name, default=None):\n \"\"\"\n Helper function to get a Django setting ...
diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 7d4b23bbb..a99adc176 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,6 +1,14 @@ django-storages CHANGELOG ========================= +1.7.1 (2018-09-XX) +****************** + +- Fix off-by-1 error in ``get_available_name`` whenever ``file_overwrite`` or ``overwrite_files`` is ``True`` (`#588`_, `#589`_) + +.. _#588: https://github.com/jschneier/django-storages/issues/588 +.. _#589: https://github.com/jschneier/django-storages/pull/589 + 1.7 (2018-09-03) **************** diff --git a/storages/utils.py b/storages/utils.py index 5e3997352..14d47b067 100644 --- a/storages/utils.py +++ b/storages/utils.py @@ -102,7 +102,7 @@ def lookup_env(names): def get_available_overwrite_name(name, max_length): - if max_length is None or len(name) < max_length: + if max_length is None or len(name) <= max_length: return name # Adapted from Django diff --git a/tests/test_gcloud.py b/tests/test_gcloud.py index 06f0dd4ec..1d3a8f723 100644 --- a/tests/test_gcloud.py +++ b/tests/test_gcloud.py @@ -8,9 +8,7 @@ import mimetypes from datetime import datetime, timedelta -from django.core.exceptions import ( - ImproperlyConfigured, SuspiciousFileOperation, -) +from django.core.exceptions import ImproperlyConfigured from django.core.files.base import ContentFile from django.test import TestCase from django.utils import timezone @@ -376,14 +374,6 @@ def test_get_available_name_unicode(self): filename = 'ủⓝï℅ⅆℇ.txt' self.assertEqual(self.storage.get_available_name(filename), filename) - def test_get_available_name_overwrite_maxlength(self): - self.storage.file_overwrite = True - - self.assertEqual(self.storage.get_available_name('test/foo.txt', 11), 'test/fo.txt') - self.assertEqual(self.storage.get_available_name('test_a/foobar.txt', None), 'test_a/foobar.txt') - with self.assertRaises(SuspiciousFileOperation): - self.storage.get_available_name('test_a/foobar.txt', 10) - def test_cache_control(self): data = 'This is some test content.' filename = 'cache_control_file.txt' diff --git a/tests/test_utils.py b/tests/test_utils.py index 2fcee3ac3..e411d232c 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -1,9 +1,11 @@ import datetime from django.conf import settings +from django.core.exceptions import SuspiciousFileOperation from django.test import TestCase from storages import utils +from storages.utils import get_available_overwrite_name as gaon class SettingTest(TestCase): @@ -108,3 +110,26 @@ def test_join_nothing(self): def test_with_base_url_join_nothing(self): path = utils.safe_join('base_url') self.assertEqual(path, 'base_url/') + + +class TestGetAvailableOverwriteName(TestCase): + def test_maxlength_is_none(self): + name = 'superlong/file/with/path.txt' + self.assertEqual(gaon(name, None), name) + + def test_maxlength_equals_name(self): + name = 'parent/child.txt' + self.assertEqual(gaon(name, len(name)), name) + + def test_maxlength_is_greater_than_name(self): + name = 'parent/child.txt' + self.assertEqual(gaon(name, len(name) + 1), name) + + def test_maxlength_less_than_name(self): + name = 'parent/child.txt' + self.assertEqual(gaon(name, len(name) - 1), 'parent/chil.txt') + + def test_truncates_away_filename_raises(self): + name = 'parent/child.txt' + with self.assertRaises(SuspiciousFileOperation): + gaon(name, len(name) - 5)
bridgecrewio__checkov-1228
boto3 is fixed at the patch level version **Is your feature request related to a problem? Please describe.** free boto3 dependency patch version. **Describe the solution you'd like** replace the line here: https://github.com/bridgecrewio/checkov/blob/master/Pipfile#L29 with ``` boto3 = "==1.17.*" ``` **Describe alternatives you've considered** there are no alternatives as the patch version i don't see why is locked. it can cause conflicts with already installed boto3 library **Additional context** boto3 dependency install latest patch version By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license. Fixes #1211
[ { "content": "#!/usr/bin/env python\nimport logging\nimport os\nfrom importlib import util\nfrom os import path\n\nimport setuptools\nfrom setuptools import setup\n\n# read the contents of your README file\nthis_directory = path.abspath(path.dirname(__file__))\nwith open(path.join(this_directory, \"README.md\")...
[ { "content": "#!/usr/bin/env python\nimport logging\nimport os\nfrom importlib import util\nfrom os import path\n\nimport setuptools\nfrom setuptools import setup\n\n# read the contents of your README file\nthis_directory = path.abspath(path.dirname(__file__))\nwith open(path.join(this_directory, \"README.md\")...
diff --git a/Pipfile b/Pipfile index 679055be77..6a18ef5e20 100644 --- a/Pipfile +++ b/Pipfile @@ -26,7 +26,7 @@ termcolor="*" junit-xml ="*" dpath = ">=1.5.0,<2" pyyaml = ">=5.4.1" -boto3 = "==1.17.27" +boto3 = "==1.17.*" GitPython = "*" six = "==1.15.0" jmespath = "*" diff --git a/Pipfile.lock b/Pipfile.lock index b9dcab95b2..1a449cab1a 100644 --- a/Pipfile.lock +++ b/Pipfile.lock @@ -1,7 +1,7 @@ { "_meta": { "hash": { - "sha256": "9e2b5b0b254c8c74d8f2e0268c436ccfe24f00078f540c6f4e38c0851734bc77" + "sha256": "a0c8170968925bd035d8f5aa085dadda7500510070e78b9466b80c5ab68e866e" }, "pipfile-spec": 6, "requires": { @@ -34,19 +34,19 @@ }, "boto3": { "hashes": [ - "sha256:6758751f1181b9363e4e7559dcbd5ac0fc7147b73f429c976ec5ecd1688c9ec7", - "sha256:fa41987f9f71368013767306d9522b627946a01b4843938a26fb19cc8adb06c0" + "sha256:0a21893db156c0938d0a06b622c3dd3d2da2dcd9d06d343c8f9536ac9de4ec7f", + "sha256:c83a33fff7d20027386552967355508ce71fb7406ab0cc8e627e257c94754d43" ], "index": "pypi", - "version": "==1.17.27" + "version": "==1.17.74" }, "botocore": { "hashes": [ - "sha256:e4f8cb923edf035c2ae5f6169c70e77e31df70b88919b92b826a6b9bd14511b1", - "sha256:f7c2c5c5ed5212b2628d8fb1c587b31c6e8d413ecbbd1a1cdf6f96ed6f5c8d5e" + "sha256:2061cf3d17615aa4114c91dbed8917adc5287a88354a7693c96aa8e9f9dedd6e", + "sha256:6937954ce6dabc00eb157e9fbd21edd45b4dfe3de738e68dbca4c042bfda0954" ], "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4, 3.5'", - "version": "==1.20.62" + "version": "==1.20.74" }, "cached-property": { "hashes": [ @@ -72,19 +72,19 @@ }, "click": { "hashes": [ - "sha256:d2b5255c7c6349bc1bd1e59e08cd12acbbd63ce649f2588755783aa94dfb6b1a", - "sha256:dacca89f4bfadd5de3d7489b7c8a566eee0d3676333fbb50030263894c38c0dc" + "sha256:7d8c289ee437bcb0316820ccee14aefcb056e58d31830ecab8e47eda6540e136", + "sha256:e90e62ced43dc8105fb9a26d62f0d9340b5c8db053a814e25d95c19873ae87db" ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", - "version": "==7.1.2" + "markers": "python_full_version >= '3.6.0'", + "version": "==8.0.0" }, "click-option-group": { "hashes": [ - "sha256:1b4b2ecf87ba8dea78060cffd294b38eea5af81f28a5f9be223c01b8c5ea9ab0", - "sha256:743733a0f564438b6b761f49ddf37d845f9a662294ecabe0e832e597208bcf31" + "sha256:9653a2297357335d7325a1827e71ac1245d91c97d959346a7decabd4a52d5354", + "sha256:a6e924f3c46b657feb5b72679f7e930f8e5b224b766ab35c91ae4019b4e0615e" ], - "markers": "python_version >= '3.6' and python_version < '4'", - "version": "==0.5.2" + "markers": "python_version < '4' and python_full_version >= '3.6.0'", + "version": "==0.5.3" }, "cloudsplaining": { "hashes": [ @@ -158,11 +158,11 @@ }, "gitpython": { "hashes": [ - "sha256:3283ae2fba31c913d857e12e5ba5f9a7772bbc064ae2bb09efafa71b0dd4939b", - "sha256:be27633e7509e58391f10207cd32b2a6cf5b908f92d9cd30da2e514e1137af61" + "sha256:29fe82050709760081f588dd50ce83504feddbebdc4da6956d02351552b1c135", + "sha256:ee24bdc93dce357630764db659edaf6b8d664d4ff5447ccfeedd2dc5c253f41e" ], "index": "pypi", - "version": "==3.1.14" + "version": "==3.1.17" }, "idna": { "hashes": [ @@ -182,11 +182,11 @@ }, "jinja2": { "hashes": [ - "sha256:03e47ad063331dd6a3f04a43eddca8a966a26ba0c5b7207a9a9e4e08f1b29419", - "sha256:a6d58433de0ae800347cab1fa3043cebbabe8baa9d29e668f1c768cb87a333c6" + "sha256:2f2de5285cf37f33d33ecd4a9080b75c87cd0c1994d5a9c6df17131ea1f049c6", + "sha256:ea8d7dd814ce9df6de6a761ec7f1cac98afe305b8cdc4aaae4e114b8d8ce24c5" ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", - "version": "==2.11.3" + "markers": "python_full_version >= '3.6.0'", + "version": "==3.0.0" }, "jmespath": { "hashes": [ @@ -214,66 +214,48 @@ "sha256:31b5b491868dcc87d6c24b7e3d19a0d730d59d3e46f4eea6430a321bed387a49", "sha256:96c3ba1261de2f7547b46a00ea8463832c921d3f9d6aba3f255a6f71386db20c" ], - "markers": "python_version >= '3.6'", + "markers": "python_full_version >= '3.6.0'", "version": "==3.3.4" }, "markupsafe": { "hashes": [ - "sha256:00bc623926325b26bb9605ae9eae8a215691f33cae5df11ca5424f06f2d1f473", - "sha256:09027a7803a62ca78792ad89403b1b7a73a01c8cb65909cd876f7fcebd79b161", - "sha256:09c4b7f37d6c648cb13f9230d847adf22f8171b1ccc4d5682398e77f40309235", - "sha256:1027c282dad077d0bae18be6794e6b6b8c91d58ed8a8d89a89d59693b9131db5", - "sha256:13d3144e1e340870b25e7b10b98d779608c02016d5184cfb9927a9f10c689f42", - "sha256:195d7d2c4fbb0ee8139a6cf67194f3973a6b3042d742ebe0a9ed36d8b6f0c07f", - "sha256:22c178a091fc6630d0d045bdb5992d2dfe14e3259760e713c490da5323866c39", - "sha256:24982cc2533820871eba85ba648cd53d8623687ff11cbb805be4ff7b4c971aff", - "sha256:29872e92839765e546828bb7754a68c418d927cd064fd4708fab9fe9c8bb116b", - "sha256:2beec1e0de6924ea551859edb9e7679da6e4870d32cb766240ce17e0a0ba2014", - "sha256:3b8a6499709d29c2e2399569d96719a1b21dcd94410a586a18526b143ec8470f", - "sha256:43a55c2930bbc139570ac2452adf3d70cdbb3cfe5912c71cdce1c2c6bbd9c5d1", - "sha256:46c99d2de99945ec5cb54f23c8cd5689f6d7177305ebff350a58ce5f8de1669e", - "sha256:500d4957e52ddc3351cabf489e79c91c17f6e0899158447047588650b5e69183", - "sha256:535f6fc4d397c1563d08b88e485c3496cf5784e927af890fb3c3aac7f933ec66", - "sha256:596510de112c685489095da617b5bcbbac7dd6384aeebeda4df6025d0256a81b", - "sha256:62fe6c95e3ec8a7fad637b7f3d372c15ec1caa01ab47926cfdf7a75b40e0eac1", - "sha256:6788b695d50a51edb699cb55e35487e430fa21f1ed838122d722e0ff0ac5ba15", - "sha256:6dd73240d2af64df90aa7c4e7481e23825ea70af4b4922f8ede5b9e35f78a3b1", - "sha256:6f1e273a344928347c1290119b493a1f0303c52f5a5eae5f16d74f48c15d4a85", - "sha256:6fffc775d90dcc9aed1b89219549b329a9250d918fd0b8fa8d93d154918422e1", - "sha256:717ba8fe3ae9cc0006d7c451f0bb265ee07739daf76355d06366154ee68d221e", - "sha256:79855e1c5b8da654cf486b830bd42c06e8780cea587384cf6545b7d9ac013a0b", - "sha256:7c1699dfe0cf8ff607dbdcc1e9b9af1755371f92a68f706051cc8c37d447c905", - "sha256:7fed13866cf14bba33e7176717346713881f56d9d2bcebab207f7a036f41b850", - "sha256:84dee80c15f1b560d55bcfe6d47b27d070b4681c699c572af2e3c7cc90a3b8e0", - "sha256:88e5fcfb52ee7b911e8bb6d6aa2fd21fbecc674eadd44118a9cc3863f938e735", - "sha256:8defac2f2ccd6805ebf65f5eeb132adcf2ab57aa11fdf4c0dd5169a004710e7d", - "sha256:98bae9582248d6cf62321dcb52aaf5d9adf0bad3b40582925ef7c7f0ed85fceb", - "sha256:98c7086708b163d425c67c7a91bad6e466bb99d797aa64f965e9d25c12111a5e", - "sha256:9add70b36c5666a2ed02b43b335fe19002ee5235efd4b8a89bfcf9005bebac0d", - "sha256:9bf40443012702a1d2070043cb6291650a0841ece432556f784f004937f0f32c", - "sha256:a6a744282b7718a2a62d2ed9d993cad6f5f585605ad352c11de459f4108df0a1", - "sha256:acf08ac40292838b3cbbb06cfe9b2cb9ec78fce8baca31ddb87aaac2e2dc3bc2", - "sha256:ade5e387d2ad0d7ebf59146cc00c8044acbd863725f887353a10df825fc8ae21", - "sha256:b00c1de48212e4cc9603895652c5c410df699856a2853135b3967591e4beebc2", - "sha256:b1282f8c00509d99fef04d8ba936b156d419be841854fe901d8ae224c59f0be5", - "sha256:b1dba4527182c95a0db8b6060cc98ac49b9e2f5e64320e2b56e47cb2831978c7", - "sha256:b2051432115498d3562c084a49bba65d97cf251f5a331c64a12ee7e04dacc51b", - "sha256:b7d644ddb4dbd407d31ffb699f1d140bc35478da613b441c582aeb7c43838dd8", - "sha256:ba59edeaa2fc6114428f1637ffff42da1e311e29382d81b339c1817d37ec93c6", - "sha256:bf5aa3cbcfdf57fa2ee9cd1822c862ef23037f5c832ad09cfea57fa846dec193", - "sha256:c8716a48d94b06bb3b2524c2b77e055fb313aeb4ea620c8dd03a105574ba704f", - "sha256:caabedc8323f1e93231b52fc32bdcde6db817623d33e100708d9a68e1f53b26b", - "sha256:cd5df75523866410809ca100dc9681e301e3c27567cf498077e8551b6d20e42f", - "sha256:cdb132fc825c38e1aeec2c8aa9338310d29d337bebbd7baa06889d09a60a1fa2", - "sha256:d53bc011414228441014aa71dbec320c66468c1030aae3a6e29778a3382d96e5", - "sha256:d73a845f227b0bfe8a7455ee623525ee656a9e2e749e4742706d80a6065d5e2c", - "sha256:d9be0ba6c527163cbed5e0857c451fcd092ce83947944d6c14bc95441203f032", - "sha256:e249096428b3ae81b08327a63a485ad0878de3fb939049038579ac0ef61e17e7", - "sha256:e8313f01ba26fbbe36c7be1966a7b7424942f670f38e666995b88d012765b9be", - "sha256:feb7b34d6325451ef96bc0e36e1a6c0c1c64bc1fbec4b854f4529e51887b1621" - ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", - "version": "==1.1.1" + "sha256:007dc055dbce5b1104876acee177dbfd18757e19d562cd440182e1f492e96b95", + "sha256:031bf79a27d1c42f69c276d6221172417b47cb4b31cdc73d362a9bf5a1889b9f", + "sha256:161d575fa49395860b75da5135162481768b11208490d5a2143ae6785123e77d", + "sha256:24bbc3507fb6dfff663af7900a631f2aca90d5a445f272db5fc84999fa5718bc", + "sha256:2efaeb1baff547063bad2b2893a8f5e9c459c4624e1a96644bbba08910ae34e0", + "sha256:32200f562daaab472921a11cbb63780f1654552ae49518196fc361ed8e12e901", + "sha256:3261fae28155e5c8634dd7710635fe540a05b58f160cef7713c7700cb9980e66", + "sha256:3b54a9c68995ef4164567e2cd1a5e16db5dac30b2a50c39c82db8d4afaf14f63", + "sha256:3c352ff634e289061711608f5e474ec38dbaa21e3e168820d53d5f4015e5b91b", + "sha256:3fb47f97f1d338b943126e90b79cad50d4fcfa0b80637b5a9f468941dbbd9ce5", + "sha256:441ce2a8c17683d97e06447fcbccbdb057cbf587c78eb75ae43ea7858042fe2c", + "sha256:45535241baa0fc0ba2a43961a1ac7562ca3257f46c4c3e9c0de38b722be41bd1", + "sha256:4aca81a687975b35e3e80bcf9aa93fe10cd57fac37bf18b2314c186095f57e05", + "sha256:4cc563836f13c57f1473bc02d1e01fc37bab70ad4ee6be297d58c1d66bc819bf", + "sha256:4fae0677f712ee090721d8b17f412f1cbceefbf0dc180fe91bab3232f38b4527", + "sha256:58bc9fce3e1557d463ef5cee05391a05745fd95ed660f23c1742c711712c0abb", + "sha256:664832fb88b8162268928df233f4b12a144a0c78b01d38b81bdcf0fc96668ecb", + "sha256:70820a1c96311e02449591cbdf5cd1c6a34d5194d5b55094ab725364375c9eb2", + "sha256:79b2ae94fa991be023832e6bcc00f41dbc8e5fe9d997a02db965831402551730", + "sha256:83cf0228b2f694dcdba1374d5312f2277269d798e65f40344964f642935feac1", + "sha256:87de598edfa2230ff274c4de7fcf24c73ffd96208c8e1912d5d0fee459767d75", + "sha256:8f806bfd0f218477d7c46a11d3e52dc7f5fdfaa981b18202b7dc84bbc287463b", + "sha256:90053234a6479738fd40d155268af631c7fca33365f964f2208867da1349294b", + "sha256:a00dce2d96587651ef4fa192c17e039e8cfab63087c67e7d263a5533c7dad715", + "sha256:a08cd07d3c3c17cd33d9e66ea9dee8f8fc1c48e2d11bd88fd2dc515a602c709b", + "sha256:a19d39b02a24d3082856a5b06490b714a9d4179321225bbf22809ff1e1887cc8", + "sha256:d00a669e4a5bec3ee6dbeeeedd82a405ced19f8aeefb109a012ea88a45afff96", + "sha256:dab0c685f21f4a6c95bfc2afd1e7eae0033b403dd3d8c1b6d13a652ada75b348", + "sha256:df561f65049ed3556e5b52541669310e88713fdae2934845ec3606f283337958", + "sha256:e4570d16f88c7f3032ed909dc9e905a17da14a1c4cfd92608e3fda4cb1208bbd", + "sha256:e77e4b983e2441aff0c0d07ee711110c106b625f440292dfe02a2f60c8218bd6", + "sha256:e79212d09fc0e224d20b43ad44bb0a0a3416d1e04cf6b45fed265114a5d43d20", + "sha256:f58b5ba13a5689ca8317b98439fccfbcc673acaaf8241c1869ceea40f5d585bf", + "sha256:fef86115fdad7ae774720d7103aa776144cf9b66673b4afa9bcaa7af990ed07b" + ], + "markers": "python_full_version >= '3.6.0'", + "version": "==2.0.0" }, "networkx": { "hashes": [ @@ -296,7 +278,7 @@ "sha256:15c1aa5e4d887d07df495518445126182d4a551e177c192a46169593ce971fbc", "sha256:2c3e4405a72f8284f7a3c987fbd666b3ae63fd095101e004e9ee6a1fb1ab76ff" ], - "markers": "python_version >= '3.6'", + "markers": "python_full_version >= '3.6.0'", "version": "==0.11.10" }, "pyparsing": { @@ -360,10 +342,10 @@ }, "s3transfer": { "hashes": [ - "sha256:35627b86af8ff97e7ac27975fe0a98a312814b46c6333d8a6b889627bcd80994", - "sha256:efa5bd92a897b6a8d5c1383828dca3d52d0790e0756d49740563a3fb6ed03246" + "sha256:9b3752887a2880690ce628bc263d6d13a3864083aeacff4890c1c9839a5eb0bc", + "sha256:cb022f4b16551edebbb31a377d3f09600dbada7363d8c5db7976e7f47732e1b2" ], - "version": "==0.3.7" + "version": "==0.4.2" }, "schema": { "hashes": [ @@ -454,29 +436,29 @@ }, "websocket-client": { "hashes": [ - "sha256:44b5df8f08c74c3d82d28100fdc81f4536809ce98a17f0757557813275fbb663", - "sha256:63509b41d158ae5b7f67eb4ad20fecbb4eee99434e73e140354dc3ff8e09716f" + "sha256:5051b38a2f4c27fbd7ca077ebb23ec6965a626ded5a95637f36be1b35b6c4f81", + "sha256:57f876f1af4731cacb806cf54d02f5fbf75dee796053b9a5b94fd7c1d9621db9" ], - "markers": "python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2, 3.3'", - "version": "==0.58.0" + "markers": "python_full_version >= '3.6.0'", + "version": "==1.0.0" }, "zipp": { "hashes": [ "sha256:3607921face881ba3e026887d8150cca609d517579abe052ac81fc5aeffdbd76", "sha256:51cb66cc54621609dd593d1787f286ee42a5c0adbb4b29abea5a63edc3e03098" ], - "markers": "python_version >= '3.6'", + "markers": "python_full_version >= '3.6.0'", "version": "==3.4.1" } }, "develop": { "attrs": { "hashes": [ - "sha256:31b2eced602aa8423c2aea9c76a724617ed67cf9513173fd3a4f03e3a929c7e6", - "sha256:832aa3cde19744e49938b91fea06d69ecb9e649c93ba974535d08ad92164f700" + "sha256:149e90d6d8ac20db7a955ad60cf0e6881a3f20d37096140088356da6c716b0b1", + "sha256:ef6aaac3ca6cd92904cdd0d83f629a15f18053ec84e6432106f7a4d04ae4f5fb" ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", - "version": "==20.3.0" + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", + "version": "==21.2.0" }, "bandit": { "hashes": [ @@ -562,11 +544,11 @@ }, "gitpython": { "hashes": [ - "sha256:3283ae2fba31c913d857e12e5ba5f9a7772bbc064ae2bb09efafa71b0dd4939b", - "sha256:be27633e7509e58391f10207cd32b2a6cf5b908f92d9cd30da2e514e1137af61" + "sha256:29fe82050709760081f588dd50ce83504feddbebdc4da6956d02351552b1c135", + "sha256:ee24bdc93dce357630764db659edaf6b8d664d4ff5447ccfeedd2dc5c253f41e" ], "index": "pypi", - "version": "==3.1.14" + "version": "==3.1.17" }, "importlib-metadata": { "hashes": [ @@ -625,11 +607,11 @@ }, "pytest": { "hashes": [ - "sha256:671238a46e4df0f3498d1c3270e5deb9b32d25134c99b7d75370a68cfbe9b634", - "sha256:6ad9c7bdf517a808242b998ac20063c41532a570d088d77eec1ee12b0b5574bc" + "sha256:50bcad0a0b9c5a72c8e4e7c9855a3ad496ca6a881a3641b4260605450772c54b", + "sha256:91ef2131a9bd6be8f76f1f08eac5c5317221d6ad1e143ae03894b862e8976890" ], "index": "pypi", - "version": "==6.2.3" + "version": "==6.2.4" }, "pyyaml": { "hashes": [ @@ -720,7 +702,7 @@ "sha256:3607921face881ba3e026887d8150cca609d517579abe052ac81fc5aeffdbd76", "sha256:51cb66cc54621609dd593d1787f286ee42a5c0adbb4b29abea5a63edc3e03098" ], - "markers": "python_version >= '3.6'", + "markers": "python_full_version >= '3.6.0'", "version": "==3.4.1" } } diff --git a/setup.py b/setup.py index 2c264adb76..0b629f92e5 100644 --- a/setup.py +++ b/setup.py @@ -41,7 +41,7 @@ "junit-xml", "dpath>=1.5.0,<2", "pyyaml>=5.4.1", - "boto3==1.17.27", + "boto3==1.17.*", "GitPython", "six==1.15.0", "jmespath",
mitmproxy__mitmproxy-2072
[web] Failed to dump flows into json when visiting https website. ##### Steps to reproduce the problem: 1. start mitmweb and set the correct proxy configuration in the browser. 2. visit [github](https://github.com), or any other website with https 3. mitmweb stuck and throw an exception: ```python ERROR:tornado.application:Exception in callback <function WebMaster.run.<locals>.<lambda> at 0x7f8871ebb378> Traceback (most recent call last): File "/home/matthew/Hack/mitmproxy/venv3.5/lib/python3.5/site-packages/tornado/ioloop.py", line 1041, in _run return self.callback() File "/home/matthew/Hack/mitmproxy/mitmproxy/tools/web/master.py", line 109, in <lambda> tornado.ioloop.PeriodicCallback(lambda: self.tick(timeout=0), 5).start() File "/home/matthew/Hack/mitmproxy/mitmproxy/master.py", line 109, in tick handle_func(obj) File "/home/matthew/Hack/mitmproxy/mitmproxy/controller.py", line 70, in wrapper master.addons(f.__name__, message) File "/home/matthew/Hack/mitmproxy/mitmproxy/addonmanager.py", line 90, in __call__ self.invoke(i, name, *args, **kwargs) File "/home/matthew/Hack/mitmproxy/mitmproxy/addonmanager.py", line 85, in invoke func(*args, **kwargs) File "/home/matthew/Hack/mitmproxy/mitmproxy/addons/view.py", line 327, in request self.add(f) File "/home/matthew/Hack/mitmproxy/mitmproxy/addons/view.py", line 255, in add self.sig_view_add.send(self, flow=f) File "/home/matthew/Hack/mitmproxy/venv3.5/lib/python3.5/site-packages/blinker/base.py", line 267, in send for receiver in self.receivers_for(sender)] File "/home/matthew/Hack/mitmproxy/venv3.5/lib/python3.5/site-packages/blinker/base.py", line 267, in <listcomp> for receiver in self.receivers_for(sender)] File "/home/matthew/Hack/mitmproxy/mitmproxy/tools/web/master.py", line 58, in _sig_view_add data=app.flow_to_json(flow) File "/home/matthew/Hack/mitmproxy/mitmproxy/tools/web/app.py", line 197, in broadcast message = json.dumps(kwargs, ensure_ascii=False).encode("utf8", "surrogateescape") File "/usr/lib/python3.5/json/__init__.py", line 237, in dumps **kw).encode(obj) File "/usr/lib/python3.5/json/encoder.py", line 198, in encode chunks = self.iterencode(o, _one_shot=True) File "/usr/lib/python3.5/json/encoder.py", line 256, in iterencode return _iterencode(o, 0) File "/usr/lib/python3.5/json/encoder.py", line 179, in default raise TypeError(repr(o) + " is not JSON serializable") TypeError: b'-----BEGIN CERTIFICATE-----\nMIIC5jCCAc6gAwIBAgIGDYj0HL5MMA0GCSqGSIb3DQEBCwUAMCgxEjAQBgNVBAMM\nCW1pdG1wcm94eTESMBAGA1UECgwJbWl0bXByb3h5MB4XDTE3MDIyNTA5MDM0M1oX\nDTIwMDIyNzA5MDM0M1owFTETMBEGA1UEAwwKZ2l0aHViLmNvbTCCASIwDQYJKoZI\nhvcNAQEBBQADggEPADCCAQoCggEBAMTLqdlVNA4h2xzkX5XhLO1wtqZX0X0JpsXC\nHUO+KE3Pf2IBHWFAzeB3SVuaTSIa55UvRUDgZm+gYpl/qswf3MpPB8rkosLtwSJt\ns7ziAYF0JlrwYW+ZBaH/baQZ4JmgpY3qFzrkNhXwrVW+Wg3uO47w/9GaIuUNVv5t\nElfbCDBO0wvWt9tgEuaFNLVwOnibN4LEioQw/xnnUZu4JU6u+16rWasARxU7vlGs\no+CB6wgoK62W4VnSK7aQv6PMAOR49tyzhLXO6LKHQtZA4DG34zXWTYfXhuTC7rnA\nQ6haZ9qyVyeYclIXpJkmf10q2eJTjQbj8ff4Cj3LYlVmBtC2qbsCAwEAAaMpMCcw\nJQYDVR0RBB4wHIIKZ2l0aHViLmNvbYIOd3d3LmdpdGh1Yi5jb20wDQYJKoZIhvcN\nAQELBQADggEBABRJcH+lDB6ec343S+tNYDtr+wWgSiGw7WggKcUpMawBuqY61K4L\nLoxous98ie5XFfLbZI2rW/sIbMEuhjjamEMNmt83ZmZxo/YzMTXO/HlmHZYm+Vjw\nTdhGxe5cGTxjCwXhygRHX+IupDjanniwmh2jfg/0SlW7S4YE/MQJ1mcbGyzppwkg\n4hZ6sEcGe+RC7Sn1tJWlVpA3V8a6udZE8ejlaZV0/PYbJUWyRxAl00PlvRG2sPu5\nEJM7Xbd0TxtqVX7oagImBhqlhf0CyJfRMq0DU34j0oeUqtV/0FaapMumOODcnloI\nJeldz1QeX2hHksE1hYeVjZNFNKQLtzvEpgg=\n-----END CERTIFICATE-----\n' is not JSON serializable ``` ##### Any other comments? What have you tried so far? `Flow.client_conn.mitmcert`is in the type of `bytes`, so `json.dumps()` could not handle it and throw an exception saying that it is not JSON serializable. I noticed that in `flow_to_json()` function, there is a comment say: > Remove flow message content and cert to save transmission space. And we indeed remove the `server_conn.cert` from the returning dict, but left `client_conn.mitmcert`. I have tried to add one more line of code to remove `client_conn.mitmcert` from the returning dict and it solve this exception. However, I am not sure whether it is appropriate to remove this item. Or should we convert it into a string and keep it in the returning dict? ##### System information Mitmproxy version: 3.0.0 (2.0.0dev0028-0x0fdf2c0) Python version: 3.5.2 Platform: Linux-4.4.0-63-generic-x86_64-with-Ubuntu-16.04-xenial SSL version: OpenSSL 1.0.2g 1 Mar 2016 Linux distro: Ubuntu 16.04 xenial
[ { "content": "import hashlib\nimport json\nimport logging\nimport os.path\nimport re\nfrom io import BytesIO\n\nimport mitmproxy.addons.view\nimport mitmproxy.flow\nimport tornado.escape\nimport tornado.web\nimport tornado.websocket\nfrom mitmproxy import contentviews\nfrom mitmproxy import exceptions\nfrom mit...
[ { "content": "import hashlib\nimport json\nimport logging\nimport os.path\nimport re\nfrom io import BytesIO\n\nimport mitmproxy.addons.view\nimport mitmproxy.flow\nimport tornado.escape\nimport tornado.web\nimport tornado.websocket\nfrom mitmproxy import contentviews\nfrom mitmproxy import exceptions\nfrom mit...
diff --git a/mitmproxy/tools/web/app.py b/mitmproxy/tools/web/app.py index 1f3467cce5..893c3dde0a 100644 --- a/mitmproxy/tools/web/app.py +++ b/mitmproxy/tools/web/app.py @@ -85,6 +85,7 @@ def flow_to_json(flow: mitmproxy.flow.Flow) -> dict: "is_replay": flow.response.is_replay, } f.get("server_conn", {}).pop("cert", None) + f.get("client_conn", {}).pop("mitmcert", None) return f
praw-dev__praw-982
mark_visited function appears to be broken or using the wrong endpoint ## Issue Description I was tooling around in an interactive session so I don't have a super clean source code snippet, but I tried to mark a submission as visited and got this error instead. ``` In [44]: submi = reddit.submission(s.id) In [45]: submi.mark_visited() --------------------------------------------------------------------------- Forbidden Traceback (most recent call last) ~/reddit_opioid_mining/scraper.py in <module>() ----> 1 submi.mark_visited() /usr/local/lib/python3.5/dist-packages/praw/models/reddit/submission.py in mark_visited(self) 181 """ 182 data = {'links': self.fullname} --> 183 self._reddit.post(API_PATH['store_visits'], data=data) 184 185 def hide(self, other_submissions=None): /usr/local/lib/python3.5/dist-packages/praw/reddit.py in post(self, path, data, files, params) 463 """ 464 data = self.request('POST', path, data=data or {}, files=files, --> 465 params=params) 466 return self._objector.objectify(data) 467 /usr/local/lib/python3.5/dist-packages/praw/reddit.py in request(self, method, path, params, data, files) 504 """ 505 return self._core.request(method, path, data=data, files=files, --> 506 params=params) 507 508 def submission( # pylint: disable=invalid-name,redefined-builtin /usr/local/lib/python3.5/dist-packages/prawcore/sessions.py in request(self, method, path, data, files, json, params) 183 return self._request_with_retries( 184 data=data, files=files, json=json, method=method, --> 185 params=params, url=url) 186 187 /usr/local/lib/python3.5/dist-packages/prawcore/sessions.py in _request_with_retries(self, data, files, json, method, params, url, retries) 128 retries, saved_exception, url) 129 elif response.status_code in self.STATUS_EXCEPTIONS: --> 130 raise self.STATUS_EXCEPTIONS[response.status_code](response) 131 elif response.status_code == codes['no_content']: 132 return Forbidden: received 403 HTTP response ``` ## System Information - PRAW Version: 6.0.0 - Python Version: 3.5.2 - Operating System: Ubuntu 16.04
[ { "content": "\"\"\"Provide the Submission class.\"\"\"\nfrom ...const import API_PATH, urljoin\nfrom ...exceptions import ClientException\nfrom ..comment_forest import CommentForest\nfrom ..listing.mixins import SubmissionListingMixin\nfrom .base import RedditBase\nfrom .mixins import ThingModerationMixin, Use...
[ { "content": "\"\"\"Provide the Submission class.\"\"\"\nfrom ...const import API_PATH, urljoin\nfrom ...exceptions import ClientException\nfrom ..comment_forest import CommentForest\nfrom ..listing.mixins import SubmissionListingMixin\nfrom .base import RedditBase\nfrom .mixins import ThingModerationMixin, Use...
diff --git a/praw/models/reddit/submission.py b/praw/models/reddit/submission.py index f987186a8..fe75d0a4e 100644 --- a/praw/models/reddit/submission.py +++ b/praw/models/reddit/submission.py @@ -216,6 +216,8 @@ def _info_path(self): def mark_visited(self): """Mark submission as visited. + This method requires a subscription to reddit premium. + Example usage: .. code:: python
python-telegram-bot__python-telegram-bot-1063
User.full_name doesn't handle non-ASCII (in Python 2?) ### Steps to reproduce ```python updater = ext.Updater(token=settings.telegram_token()) def F(bot, update): user = update.effective_user print repr(user.first_name), repr(user.last_name) print '%s %s' % (user.first_name, user.last_name) print user.full_name updater.dispatcher.add_handler(ext.MessageHandler(0, F)) updater.start_polling() updater.idle() ``` ### Expected behaviour ``` u'Dan\u2022iel' u'Reed' Dan•iel Reed Dan•iel Reed ``` ### Actual behaviour ``` u'Dan\u2022iel' u'Reed' Dan•iel Reed ERROR dispatcher.py:301] An uncaught error was raised while processing the update Traceback (most recent call last): File "local/lib/python2.7/site-packages/telegram/ext/dispatcher.py", line 279, in process_update handler.handle_update(update, self) File "local/lib/python2.7/site-packages/telegram/ext/messagehandler.py", line 169, in handle_update return self.callback(dispatcher.bot, update, **optional_args) File "<stdin>", line 5, in F File "local/lib/python2.7/site-packages/telegram/user.py", line 91, in full_name return '{} {}'.format(self.first_name, self.last_name) UnicodeEncodeError: 'ascii' codec can't encode character u'\u2022' in position 3: ordinal not in range(128) ``` ### Configuration **Operating System:** **Version of Python, python-telegram-bot & dependencies:** ``` python-telegram-bot 10.0.1 certifi 2018.01.18 future 0.16.0 Python 2.7.14 (default, Sep 23 2017, 22:06:14) [GCC 7.2.0] ``` I'm a little rushed, but this is works for me: ```python @property def full_name(self): """ :obj:`str`: Convenience property. The user's :attr:`first_name`, followed by (if available) :attr:`last_name`. """ if self.last_name: ! return u'{} {}'.format(self.first_name, self.last_name) return self.first_name ```
[ { "content": "#!/usr/bin/env python\n# pylint: disable=C0103,W0622\n#\n# A library that provides a Python interface to the Telegram Bot API\n# Copyright (C) 2015-2018\n# Leandro Toledo de Souza <devs@python-telegram-bot.org>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under ...
[ { "content": "#!/usr/bin/env python\n# pylint: disable=C0103,W0622\n#\n# A library that provides a Python interface to the Telegram Bot API\n# Copyright (C) 2015-2018\n# Leandro Toledo de Souza <devs@python-telegram-bot.org>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under ...
diff --git a/telegram/user.py b/telegram/user.py index 018dde25ee6..a407888bde9 100644 --- a/telegram/user.py +++ b/telegram/user.py @@ -88,7 +88,7 @@ def full_name(self): """ if self.last_name: - return '{} {}'.format(self.first_name, self.last_name) + return u'{} {}'.format(self.first_name, self.last_name) return self.first_name @classmethod diff --git a/tests/test_user.py b/tests/test_user.py index 67975ca0bc4..216ed33d88d 100644 --- a/tests/test_user.py +++ b/tests/test_user.py @@ -42,8 +42,8 @@ def user(bot): class TestUser(object): id = 1 is_bot = True - first_name = 'first_name' - last_name = 'last_name' + first_name = u'first\u2022name' + last_name = u'last\u2022name' username = 'username' language_code = 'en_us' @@ -85,16 +85,16 @@ def test_de_json_without_username_and_last_name(self, json_dict, bot): def test_name(self, user): assert user.name == '@username' user.username = None - assert user.name == 'first_name last_name' + assert user.name == u'first\u2022name last\u2022name' user.last_name = None - assert user.name == 'first_name' + assert user.name == u'first\u2022name' user.username = self.username assert user.name == '@username' def test_full_name(self, user): - assert user.full_name == 'first_name last_name' + assert user.full_name == u'first\u2022name last\u2022name' user.last_name = None - assert user.full_name == 'first_name' + assert user.full_name == u'first\u2022name' def test_get_profile_photos(self, monkeypatch, user): def test(_, *args, **kwargs):
ansible-collections__community.vmware-1030
Documentation fix needed in community.vmware.vsphere_file module ##### SUMMARY There is module called **community.vmware.vsphere_file** . There is one task _Query a file on a datastore_ to get information of already existing file on vsphere. But In Documentation there mentioned **state : touch** . But state:touch is used to create new blank file on vsphere,not to get existing file information. In order to Query a file the state attribute value should `file` not touch. **state : file** Correct code : - name: Query a file on a datastore community.vmware.vsphere_file: host: '{{ vhost }}' username: '{{ vuser }}' password: '{{ vpass }}' datacenter: DC1 Someplace datastore: datastore1 path: some/remote/file **state: file** delegate_to: localhost ignore_errors: true ##### ISSUE TYPE - Documentation Report ##### COMPONENT NAME community.vmware.vsphere_file ##### ANSIBLE VERSION ``` ansible 2.10.9 config file = /etc/ansible/ansible.cfg configured module search path = ['/root/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /usr/local/lib/python3.6/site-packages/ansible executable location = /usr/local/bin/ansible python version = 3.6.8 (default, Aug 13 2020, 07:46:32) [GCC 4.8.5 20150623 (Red Hat 4.8.5-39)] ```
[ { "content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# Copyright: (c) 2017, Dag Wieers (@dagwieers) <dag@wieers.com>\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\...
[ { "content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# Copyright: (c) 2017, Dag Wieers (@dagwieers) <dag@wieers.com>\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\...
diff --git a/plugins/modules/vsphere_file.py b/plugins/modules/vsphere_file.py index 28fc54649b..17415699e2 100644 --- a/plugins/modules/vsphere_file.py +++ b/plugins/modules/vsphere_file.py @@ -105,7 +105,7 @@ datacenter: DC1 Someplace datastore: datastore1 path: some/remote/file - state: touch + state: file delegate_to: localhost ignore_errors: true
docker__docker-py-1473
DaemonApiMixin.events does not propagate HttpHeaders from config.json The docker.api.daemon.DaemonApiMixin.events does not make use of the config.json, which could have custom HTTP headers to pass to the server.
[ { "content": "import os\nimport warnings\nfrom datetime import datetime\n\nfrom .. import auth, utils\nfrom ..constants import INSECURE_REGISTRY_DEPRECATION_WARNING\n\n\nclass DaemonApiMixin(object):\n def events(self, since=None, until=None, filters=None, decode=None):\n \"\"\"\n Get real-time...
[ { "content": "import os\nimport warnings\nfrom datetime import datetime\n\nfrom .. import auth, utils\nfrom ..constants import INSECURE_REGISTRY_DEPRECATION_WARNING\n\n\nclass DaemonApiMixin(object):\n def events(self, since=None, until=None, filters=None, decode=None):\n \"\"\"\n Get real-time...
diff --git a/docker/api/daemon.py b/docker/api/daemon.py index d40631f59..033458491 100644 --- a/docker/api/daemon.py +++ b/docker/api/daemon.py @@ -54,7 +54,7 @@ def events(self, since=None, until=None, filters=None, decode=None): } return self._stream_helper( - self.get(self._url('/events'), params=params, stream=True), + self._get(self._url('/events'), params=params, stream=True), decode=decode ) diff --git a/tests/unit/api_test.py b/tests/unit/api_test.py index 15e4d7cc6..b632d209b 100644 --- a/tests/unit/api_test.py +++ b/tests/unit/api_test.py @@ -228,7 +228,8 @@ def test_events(self): 'GET', url_prefix + 'events', params={'since': None, 'until': None, 'filters': None}, - stream=True + stream=True, + timeout=DEFAULT_TIMEOUT_SECONDS ) def test_events_with_since_until(self): @@ -247,7 +248,8 @@ def test_events_with_since_until(self): 'until': ts + 10, 'filters': None }, - stream=True + stream=True, + timeout=DEFAULT_TIMEOUT_SECONDS ) def test_events_with_filters(self): @@ -265,7 +267,8 @@ def test_events_with_filters(self): 'until': None, 'filters': expected_filters }, - stream=True + stream=True, + timeout=DEFAULT_TIMEOUT_SECONDS ) def _socket_path_for_client_session(self, client):
flairNLP__flair-1375
print function for Dictionary class Currently, the dictionary class only prints an object pointer: ```python corpus = flair.datasets.UD_ENGLISH(in_memory=False) tag_dictionary = corpus.make_tag_dictionary(tag_type='upos') print(tag_dictionary) ``` This prints: ```console <flair.data.Dictionary object at 0x7f83187fcb50> ``` Better would be a printout that shows the number of items in dictionary and lists them.
[ { "content": "from abc import abstractmethod\nfrom operator import itemgetter\nfrom typing import List, Dict, Union, Callable\nimport re\n\nimport torch, flair\nimport logging\n\nfrom collections import Counter\nfrom collections import defaultdict\n\nfrom segtok.segmenter import split_single\nfrom segtok.tokeni...
[ { "content": "from abc import abstractmethod\nfrom operator import itemgetter\nfrom typing import List, Dict, Union, Callable\nimport re\n\nimport torch, flair\nimport logging\n\nfrom collections import Counter\nfrom collections import defaultdict\n\nfrom segtok.segmenter import split_single\nfrom segtok.tokeni...
diff --git a/flair/data.py b/flair/data.py index 0e35dcaa8b..8f6bafed92 100644 --- a/flair/data.py +++ b/flair/data.py @@ -131,6 +131,10 @@ def load(cls, name: str): return Dictionary.load_from_file(name) + def __str__(self): + tags = ', '.join(self.get_item_for_index(i) for i in range(min(len(self), 30))) + return f"Dictionary with {len(self)} tags: {tags}" + class Label: """
lra__mackup-1412
AssertionError on Ubuntu 18.04.2 LTS, Mackup 0.8.25, Python 3.6.7 I'm trying to `mackup restore` on a machine running - Ubuntu 18.04.2 LTS - Mackup 0.8.25 - Python 3.6.7 It fails immediately with the following: ``` Traceback (most recent call last): File "/home/REDACTED/.pyenv/versions/3.6.7/bin/mackup", line 10, in <module> sys.exit(main()) File "/home/REDACTED/.pyenv/versions/3.6.7/lib/python3.6/site-packages/mackup/main.py", line 102, in main verbose) File "/home/REDACTED/.pyenv/versions/3.6.7/lib/python3.6/site-packages/mackup/application.py", line 26, in __init__ assert isinstance(files, set) AssertionError ``` I sync via dropbox, and to debug I made a tar.gz of the original mackup folder and copied/extracted it directly with no luck :( Not sure how to proceed to debug further, I've also tried `mackup restore -v` with no luck.
[ { "content": "\"\"\"\nThe applications database.\n\nThe Applications Database provides an easy to use interface to load application\ndata from the Mackup Database (files).\n\"\"\"\nimport os\n\ntry:\n import configparser\nexcept ImportError:\n import ConfigParser as configparser\n\n\nfrom .constants impor...
[ { "content": "\"\"\"\nThe applications database.\n\nThe Applications Database provides an easy to use interface to load application\ndata from the Mackup Database (files).\n\"\"\"\nimport os\n\ntry:\n import configparser\nexcept ImportError:\n import ConfigParser as configparser\n\n\nfrom .constants impor...
diff --git a/CHANGELOG.md b/CHANGELOG.md index 04c4026cb..485ce9631 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,8 @@ ## WIP +- Hotfix, Mackup could not run in most scenarios + ## Mackup 0.8.25 - Add support for yabai (via @mbdmbd) diff --git a/mackup/appsdb.py b/mackup/appsdb.py index 88df5fb07..01e881a20 100644 --- a/mackup/appsdb.py +++ b/mackup/appsdb.py @@ -139,7 +139,7 @@ def get_files(self, name): Returns: set of str. """ - return sorted(self.apps[name]['configuration_files']) + return self.apps[name]['configuration_files'] def get_app_names(self): """
aws-cloudformation__cfn-lint-1081
Error running cfn-lint with pipe (|) cfn-lint version: *v0.23.0* Hello we have a problem running cfn-lint with find command. Only this version is affected as far as we know. We are keeping couple of template is a folder and linting them like that: ``` find ./templates -type f | xargs cfn-lint -f parseable -c I -t ``` It worked flawlessly before but with the new update we are getting this error: > 2019-08-02 15:37:01,818 - cfnlint.decode - ERROR - Template file not found: None None:1:1:1:2:E0000:Template file not found: None Splitting the files in separated lines with `xargs -L 1` doesn't help. If you run the cfn-lint command on it's own it works as expected. This example **doesn't** work: ``` find ./templates -type f | xargs -t cfn-lint -f parseable -c I -t cfn-lint -f parseable -c I -t ./templates/t1.yml ./templates/t2.yml ./templates/t3.yml 2019-08-02 15:50:20,891 - cfnlint.decode - ERROR - Template file not found: None None:1:1:1:2:E0000:Template file not found: None ``` This example works: ``` cfn-lint -f parseable -c I -t ./templates/t1.yml ./templates/t2.yml ./templates/t3.yml ``` Regards TT
[ { "content": "\"\"\"\n Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\n software and associated documentation files (the \"Software\"), to deal in the Software\n without restriction, including w...
[ { "content": "\"\"\"\n Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\n software and associated documentation files (the \"Software\"), to deal in the Software\n without restriction, including w...
diff --git a/src/cfnlint/core.py b/src/cfnlint/core.py index 1e81225e53..bd129e2ec5 100644 --- a/src/cfnlint/core.py +++ b/src/cfnlint/core.py @@ -140,7 +140,7 @@ def get_args_filenames(cli_args): print(rules) exit(0) - if not sys.stdin.isatty(): + if not sys.stdin.isatty() and not config.templates: return(config, [None], formatter) if not config.templates: diff --git a/test/module/core/test_run_cli.py b/test/module/core/test_run_cli.py index 32fea1073b..8dfdbf91d0 100644 --- a/test/module/core/test_run_cli.py +++ b/test/module/core/test_run_cli.py @@ -92,6 +92,10 @@ def test_template_via_stdin(self): (_, filenames, _) = cfnlint.core.get_args_filenames([]) assert filenames == [None] + with patch('sys.stdin', StringIO(file_content)): + (_, filenames, _) = cfnlint.core.get_args_filenames(['--template', filename]) + assert filenames == [filename] + @patch('cfnlint.config.ConfigFileArgs._read_config', create=True) def test_template_config(self, yaml_mock): """Test template config"""
googleapis__google-cloud-python-6262
Redis: regen README.rst (DO NOT MERGE) This PR was generated using Autosynth. :rainbow: Here's the log from Synthtool: ``` synthtool > Cloning googleapis. synthtool > Running generator for google/cloud/redis/artman_redis_v1beta1.yaml. synthtool > Ensuring dependencies. synthtool > Pulling artman image. synthtool > Generated code into /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/python/redis-v1beta1. synthtool > Running generator for google/cloud/redis/artman_redis_v1.yaml. synthtool > Ensuring dependencies. synthtool > Pulling artman image. synthtool > Generated code into /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/python/redis-v1. synthtool > Replaced 'resources of the form:\\n ``' in google/cloud/redis_v1/gapic/cloud_redis_client.py. synthtool > Replaced 'resources of the form:\\n ``' in google/cloud/redis_v1beta1/gapic/cloud_redis_client.py. synthtool > Replaced '\n parent \\(str\\): Required. The resource name of the instance location using the form:\n ::\n\n `projects/{project_id}/locations/{location_id}`\n where ``location_id`` refers to a GCP region' in google/cloud/redis_v1/gapic/cloud_redis_client.py. synthtool > Replaced '\n parent \\(str\\): Required. The resource name of the instance location using the form:\n ::\n\n `projects/{project_id}/locations/{location_id}`\n where ``location_id`` refers to a GCP region' in google/cloud/redis_v1beta1/gapic/cloud_redis_client.py. synthtool > Replaced '\n with the following restrictions:\n\n \\* Must contain only lowercase letters, numbers, and hyphens\\.' in google/cloud/redis_v1/gapic/cloud_redis_client.py. synthtool > Replaced '\n with the following restrictions:\n\n \\* Must contain only lowercase letters, numbers, and hyphens\\.' in google/cloud/redis_v1beta1/gapic/cloud_redis_client.py. synthtool > Replaced '\n name \\(str\\): Required. Redis instance resource name using the form:\n ::\n\n `projects/{project_id}/locations/{location_id}/instances/{instance_id}`\n where ``location_id`` refers to a GCP region' in google/cloud/redis_v1/gapic/cloud_redis_client.py. synthtool > Replaced '\n name \\(str\\): Required. Redis instance resource name using the form:\n ::\n\n `projects/{project_id}/locations/{location_id}/instances/{instance_id}`\n where ``location_id`` refers to a GCP region' in google/cloud/redis_v1beta1/gapic/cloud_redis_client.py. synthtool > Replaced '\n fields from ``Instance``:\n\n \\* ``displayName``\n \\* ``labels``\n \\* ``memorySizeGb``\n \\* ``redisConfig``' in google/cloud/redis_v1/gapic/cloud_redis_client.py. synthtool > Replaced '(release_status = )(.*)$' in setup.py. synthtool > Replaced '.. _Enable the Google Cloud Memorystore for Redis API.: https://cloud.google.com/redis' in README.rst. synthtool > Replaced 'https://cloud.google.com/redis' in README.rst. synthtool > Replaced 'https://googlecloudplatform.github.io/google-cloud-python/stable/redis/usage.html' in README.rst. synthtool > Replaced 'https://googlecloudplatform.github.io/google-cloud-python/stable/core/auth.html' in README.rst. synthtool > Cleaned up 1 temporary directories. ```
[ { "content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicabl...
[ { "content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicabl...
diff --git a/redis/synth.py b/redis/synth.py index 21c92a28c279..b0942bd4500e 100644 --- a/redis/synth.py +++ b/redis/synth.py @@ -23,8 +23,9 @@ gapic = gcp.GAPICGenerator() common = gcp.CommonTemplates() excludes = [ + 'README.rst', 'setup.py', - 'nox.py', + 'nox*.py', 'docs/conf.py', 'docs/index.rst', ]
zostera__django-bootstrap3-473
Fix simple typo: attrivute -> attribute There is a small typo in src/bootstrap3/templatetags/bootstrap3.py. Should read `attribute` rather than `attrivute`.
[ { "content": "import re\nfrom math import floor\n\nfrom django import template\nfrom django.contrib.messages import constants as message_constants\nfrom django.template import Context\nfrom django.utils.safestring import mark_safe\n\nfrom ..bootstrap import css_url, get_bootstrap_setting, javascript_url, jquery...
[ { "content": "import re\nfrom math import floor\n\nfrom django import template\nfrom django.contrib.messages import constants as message_constants\nfrom django.template import Context\nfrom django.utils.safestring import mark_safe\n\nfrom ..bootstrap import css_url, get_bootstrap_setting, javascript_url, jquery...
diff --git a/src/bootstrap3/templatetags/bootstrap3.py b/src/bootstrap3/templatetags/bootstrap3.py index 23da590a..f163e711 100644 --- a/src/bootstrap3/templatetags/bootstrap3.py +++ b/src/bootstrap3/templatetags/bootstrap3.py @@ -623,7 +623,7 @@ def bootstrap_icon(icon, **kwargs): Extra CSS classes to add to the icon HTML title - A title for the icon (HTML title attrivute) + A title for the icon (HTML title attribute) **Usage**::
dotkom__onlineweb4-501
UserResource in API should not display last login date publicly Somewhat sensitive information...
[ { "content": "# -*- coding: utf-8 -*-\n\nfrom tastypie import fields\nfrom tastypie.resources import ModelResource\nfrom tastypie.authorization import Authorization\n\nfrom apps.authentication.models import OnlineUser as User\n\nclass UserResource(ModelResource):\n\n class Meta:\n queryset = User.obje...
[ { "content": "# -*- coding: utf-8 -*-\n\nfrom tastypie import fields\nfrom tastypie.resources import ModelResource\nfrom tastypie.authorization import Authorization\n\nfrom apps.authentication.models import OnlineUser as User\n\nclass UserResource(ModelResource):\n\n class Meta:\n queryset = User.obje...
diff --git a/apps/api/v0/authentication.py b/apps/api/v0/authentication.py index c299928ea..c79905101 100644 --- a/apps/api/v0/authentication.py +++ b/apps/api/v0/authentication.py @@ -11,4 +11,4 @@ class UserResource(ModelResource): class Meta: queryset = User.objects.all() resource_name = 'user' - fields = ['username', 'first_name', 'last_name', 'last_login', 'email', ] + fields = ['username', 'first_name', 'last_name', 'email', ]
open-telemetry__opentelemetry-python-contrib-566
AWS X-Ray propagator should be registered with xray environment variable In the spec, we have a definition for the environment variable as `xray` https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/sdk-environment-variables.md#general-sdk-configuration Currently python uses `aws_xray`
[ { "content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by...
[ { "content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by...
diff --git a/CHANGELOG.md b/CHANGELOG.md index 5661998af0..2d2a12f78f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,8 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). ## [Unreleased](https://github.com/open-telemetry/opentelemetry-python/compare/v1.3.0-0.22b0...HEAD) +- `opentelemetry-sdk-extension-aws` Update AWS entry points to match spec + ([#566](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/566)) - Include Flask 2.0 as compatible with existing flask instrumentation ([#545](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/545)) diff --git a/sdk-extension/opentelemetry-sdk-extension-aws/README.rst b/sdk-extension/opentelemetry-sdk-extension-aws/README.rst index 790f6cb1dc..e95b44411e 100644 --- a/sdk-extension/opentelemetry-sdk-extension-aws/README.rst +++ b/sdk-extension/opentelemetry-sdk-extension-aws/README.rst @@ -53,7 +53,7 @@ This can be done by either setting this environment variable: :: - export OTEL_PROPAGATORS = aws_xray + export OTEL_PROPAGATORS = xray Or by setting this propagator in your instrumented application: diff --git a/sdk-extension/opentelemetry-sdk-extension-aws/setup.cfg b/sdk-extension/opentelemetry-sdk-extension-aws/setup.cfg index bec34df03b..6f312fa51c 100644 --- a/sdk-extension/opentelemetry-sdk-extension-aws/setup.cfg +++ b/sdk-extension/opentelemetry-sdk-extension-aws/setup.cfg @@ -42,9 +42,9 @@ install_requires = [options.entry_points] opentelemetry_propagator = - aws_xray = opentelemetry.sdk.extension.aws.trace.propagation.aws_xray_format:AwsXRayFormat + xray = opentelemetry.sdk.extension.aws.trace.propagation.aws_xray_format:AwsXRayFormat opentelemetry_id_generator = - aws_xray = opentelemetry.sdk.extension.aws.trace.aws_xray_id_generator:AwsXRayIdGenerator + xray = opentelemetry.sdk.extension.aws.trace.aws_xray_id_generator:AwsXRayIdGenerator [options.extras_require] test = diff --git a/sdk-extension/opentelemetry-sdk-extension-aws/src/opentelemetry/sdk/extension/aws/trace/propagation/aws_xray_format.py b/sdk-extension/opentelemetry-sdk-extension-aws/src/opentelemetry/sdk/extension/aws/trace/propagation/aws_xray_format.py index 2e5e913252..2bc145b791 100644 --- a/sdk-extension/opentelemetry-sdk-extension-aws/src/opentelemetry/sdk/extension/aws/trace/propagation/aws_xray_format.py +++ b/sdk-extension/opentelemetry-sdk-extension-aws/src/opentelemetry/sdk/extension/aws/trace/propagation/aws_xray_format.py @@ -31,7 +31,7 @@ :: - export OTEL_PROPAGATORS = aws_xray + export OTEL_PROPAGATORS = xray Or by setting this propagator in your instrumented application:
mlflow__mlflow-9827
[DOC-FIX] Doc for Run.inputs erroneously refers to Run.data ### Willingness to contribute No. I cannot contribute a documentation fix at this time. ### URL(s) with the issue https://www.mlflow.org/docs/latest/python_api/mlflow.entities.html#mlflow.entities.Run ### Description of proposal (what needs changing) In the Run doc page, the doc for Run.inputs refers to Run.data instead of Run.input. property inputs The run inputs, including dataset inputs Return type mlflow.entities.RunData
[ { "content": "from typing import Any, Dict, Optional\n\nfrom mlflow.entities._mlflow_object import _MLflowObject\nfrom mlflow.entities.run_data import RunData\nfrom mlflow.entities.run_info import RunInfo\nfrom mlflow.entities.run_inputs import RunInputs\nfrom mlflow.exceptions import MlflowException\nfrom mlfl...
[ { "content": "from typing import Any, Dict, Optional\n\nfrom mlflow.entities._mlflow_object import _MLflowObject\nfrom mlflow.entities.run_data import RunData\nfrom mlflow.entities.run_info import RunInfo\nfrom mlflow.entities.run_inputs import RunInputs\nfrom mlflow.exceptions import MlflowException\nfrom mlfl...
diff --git a/mlflow/entities/run.py b/mlflow/entities/run.py index 84718209c1954..731ffd900c0b0 100644 --- a/mlflow/entities/run.py +++ b/mlflow/entities/run.py @@ -45,7 +45,7 @@ def inputs(self) -> RunInputs: """ The run inputs, including dataset inputs - :rtype: :py:class:`mlflow.entities.RunData` + :rtype: :py:class:`mlflow.entities.RunInputs` """ return self._inputs
awslabs__gluonts-2148
`PandasDataset` slow at creating when many large `DataFrame`s are given ## Description The `PandasDataset` class is slow at constructing when several large DataFrames are given. It appears like [this check](https://github.com/awslabs/gluon-ts/blob/94247a9c0d4768aeb4a17a8bb44252706c519a6a/src/gluonts/dataset/pandas.py#L296-L308) is to be blamed. ## To Reproduce The following snippet takes something like 14 seconds to run on my machine: ```python import pandas as pd from gluonts.dataset.pandas import PandasDataset df = pd.DataFrame( { k: [1.0] * 5000 for k in range(200) }, index=pd.period_range("2005-01-01", periods=5000, freq="2H") ) dataset = PandasDataset(dict(df)) ``` ## What I tried Changing the definition of [`is_uniform`](https://github.com/awslabs/gluon-ts/blob/94247a9c0d4768aeb4a17a8bb44252706c519a6a/src/gluonts/dataset/pandas.py#L296-L308) to ```python def is_uniform(index: pd.PeriodIndex) -> bool: ts_index = index.to_timestamp() return (ts_index[1:] - ts_index[:-1] == index.freq).all() ``` drastically reduces the runtime. However, this doesn't work with irregular offsets like `MonthEnd` (in fact, a test using `3M` frequency fails): turning `MonthEnd` periods to timestamp makes their difference become irregular in terms of days: ```python import pandas as pd pi = pd.period_range("2012-01", periods=3, freq="M") print(pi[1:] - pi[:-1]) # Index([<MonthEnd>, <MonthEnd>], dtype='object') dti = pi.to_timestamp() print(dti[1:] - dti[:-1]) # TimedeltaIndex(['31 days', '29 days'], dtype='timedelta64[ns]', freq=None) ```
[ { "content": "# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\").\n# You may not use this file except in compliance with the License.\n# A copy of the License is located at\n#\n# http://www.apache.org/licenses/LICE...
[ { "content": "# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\").\n# You may not use this file except in compliance with the License.\n# A copy of the License is located at\n#\n# http://www.apache.org/licenses/LICE...
diff --git a/src/gluonts/dataset/pandas.py b/src/gluonts/dataset/pandas.py index 72ecbefc65..4c21c12a34 100644 --- a/src/gluonts/dataset/pandas.py +++ b/src/gluonts/dataset/pandas.py @@ -305,4 +305,5 @@ def is_uniform(index: pd.PeriodIndex) -> bool: >>> is_uniform(pd.DatetimeIndex(ts).to_period("2H")) False """ - return (index[1:] - index[:-1] == index.freq).all() + other = pd.period_range(index[0], periods=len(index), freq=index.freq) + return (other == index).all()
d2l-ai__d2l-vi-115
test
[ { "content": "# encoding=utf8\nimport codecs\nimport filecmp\nimport re\nimport sys\nimport argparse\n\n# reload(sys)\n# sys.setdefaultencoding('utf8')\n\nBEGIN_BLOCK_COMMENT = '<!--\\n'\nEND_BLOCK_COMMENT = '-->\\n\\n'\nTRANSLATE_INDICATOR = '*dịch đoạn phía trên*'\nHEADER_INDICATOR = ' *dịch tiêu đề phía trên...
[ { "content": "# encoding=utf8\nimport codecs\nimport filecmp\nimport re\nimport sys\nimport argparse\n\nBEGIN_BLOCK_COMMENT = '<!--\\n'\nEND_BLOCK_COMMENT = '-->\\n\\n'\nTRANSLATE_INDICATOR = '*dịch đoạn phía trên*'\nHEADER_INDICATOR = ' *dịch tiêu đề phía trên*\\n'\nIMAGE_CAPTION_INDICATOR = '*dịch chú thích ả...
diff --git a/utils.py b/utils.py index 242fea1d92..89e1f0c35a 100644 --- a/utils.py +++ b/utils.py @@ -5,9 +5,6 @@ import sys import argparse -# reload(sys) -# sys.setdefaultencoding('utf8') - BEGIN_BLOCK_COMMENT = '<!--\n' END_BLOCK_COMMENT = '-->\n\n' TRANSLATE_INDICATOR = '*dịch đoạn phía trên*'
PaddlePaddle__PaddleSpeech-2364
Added pre-install doc for G2P and TN modules and updated the dependency version of pypinyin Added pre-install doc for G2P and TN modules and updated the dependency version of pypinyin
[ { "content": "# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2....
[ { "content": "# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2....
diff --git a/docs/requirements.txt b/docs/requirements.txt index bd071e7e20c..3fb82367f64 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -27,7 +27,7 @@ pattern_singleton Pillow>=9.0.0 praatio==5.0.0 prettytable -pypinyin +pypinyin<=0.44.0 pypinyin-dict python-dateutil pyworld==0.2.12 diff --git a/examples/other/g2p/README.md b/examples/other/g2p/README.md index 85c9535d1f3..a1911b2f6a4 100644 --- a/examples/other/g2p/README.md +++ b/examples/other/g2p/README.md @@ -9,6 +9,9 @@ We use `WER` as an evaluation criterion. Run the command below to get the results of the test. ```bash +cd ../../../tools +bash extras/install_sclite.sh +cd - ./run.sh ``` diff --git a/examples/other/tn/README.md b/examples/other/tn/README.md index 3b80de661e0..cae89a36a39 100644 --- a/examples/other/tn/README.md +++ b/examples/other/tn/README.md @@ -5,6 +5,9 @@ We use `CER` as an evaluation criterion. ## Start Run the command below to get the results of the test. ```bash +cd ../../../tools +bash extras/install_sclite.sh +cd - ./run.sh ``` The `avg CER` of text normalization is: 0.00730093543235227 diff --git a/setup.py b/setup.py index fac9e1207d8..e551d9fa6f7 100644 --- a/setup.py +++ b/setup.py @@ -52,7 +52,7 @@ "Pillow>=9.0.0", "praatio==5.0.0", "protobuf>=3.1.0, <=3.20.0", - "pypinyin", + "pypinyin<=0.44.0", "pypinyin-dict", "python-dateutil", "pyworld==0.2.12",
AUTOMATIC1111__stable-diffusion-webui-60
FileNotFoundError after new update Getting a FileNotFoundError: [WinError 3] The system cannot find the path specified: 'C:\\Users\\admin\\stable-diffusion-webui\\scripts' after the new update. Not exactly good at all the coding stuff, using it just fine yesterday but I downloaded the repo instead of git clone, for the sake of easier update I started a new installation by git cloning into user folder and the installation went well but ran into this while launching through webui.py. Python 3.10.6 venv C:\Users\admin\stable-diffusion-webui\venv\Scripts\Python.exe Launching webui.py... Loading model from C:\Users\admin\stable-diffusion-webui\model.ckpt Global Step: 470000 LatentDiffusion: Running in eps-prediction mode DiffusionWrapper has 859.52 M params. making attention of type 'vanilla' with 512 in_channels Working with z of shape (1, 4, 32, 32) = 4096 dimensions. making attention of type 'vanilla' with 512 in_channels Traceback (most recent call last): File "C:\Users\admin\stable-diffusion-webui\webui.py", line 135, in <module> modules.scripts.load_scripts(os.path.join(script_path, "scripts")) File "C:\Users\admin\stable-diffusion-webui\modules\scripts.py", line 32, in load_scripts for filename in os.listdir(basedir): FileNotFoundError: [WinError 3] The system cannot find the path specified: 'C:\\Users\\admin\\stable-diffusion-webui\\scripts'
[ { "content": "import os\r\nimport sys\r\nimport traceback\r\n\r\nimport modules.ui as ui\r\nimport gradio as gr\r\n\r\nfrom modules.processing import StableDiffusionProcessing\r\n\r\nclass Script:\r\n filename = None\r\n args_from = None\r\n args_to = None\r\n\r\n def title(self):\r\n raise N...
[ { "content": "import os\r\nimport sys\r\nimport traceback\r\n\r\nimport modules.ui as ui\r\nimport gradio as gr\r\n\r\nfrom modules.processing import StableDiffusionProcessing\r\n\r\nclass Script:\r\n filename = None\r\n args_from = None\r\n args_to = None\r\n\r\n def title(self):\r\n raise N...
diff --git a/modules/scripts.py b/modules/scripts.py index be348a70481..37a236827c4 100644 --- a/modules/scripts.py +++ b/modules/scripts.py @@ -29,6 +29,9 @@ def describe(self): def load_scripts(basedir): + if not os.path.exists(basedir): + return + for filename in os.listdir(basedir): path = os.path.join(basedir, filename)
mitmproxy__mitmproxy-5603
libGL error when starting latest version of mitmweb 8.1.1 on Debian #### Problem Description I was using old version of mitmproxy 6.0.2 that I got installed from the debian unstable repository and it works just fine. then today I decided to download the latest version of mitmproxy 8.1.1 and I got the below errors immediately after I type in `./mitmweb` ``` Web server listening at http://127.0.0.1:8081/ Opening in existing browser session. Proxy server listening at *:8080 libGL error: MESA-LOADER: failed to open crocus: /usr/lib/dri/crocus_dri.so: cannot open shared object file: No such file or directory (search paths /usr/lib/x86_64-linux-gnu/dri:\$${ORIGIN}/dri:/usr/lib/dri, suffix _dri) libGL error: failed to load driver: crocus libGL error: MESA-LOADER: failed to open crocus: /usr/lib/dri/crocus_dri.so: cannot open shared object file: No such file or directory (search paths /usr/lib/x86_64-linux-gnu/dri:\$${ORIGIN}/dri:/usr/lib/dri, suffix _dri) libGL error: failed to load driver: crocus libGL error: MESA-LOADER: failed to open swrast: /usr/lib/dri/swrast_dri.so: cannot open shared object file: No such file or directory (search paths /usr/lib/x86_64-linux-gnu/dri:\$${ORIGIN}/dri:/usr/lib/dri, suffix _dri) libGL error: failed to load driver: swrast [5508:5508:0100/000000.622195:ERROR:angle_platform_impl.cc(43)] Display.cpp:992 (initialize): ANGLE Display::initialize error 12289: Could not create a backing OpenGL context. [5508:5508:0100/000000.622454:ERROR:gl_surface_egl.cc(831)] EGL Driver message (Critical) eglInitialize: Could not create a backing OpenGL context. [5508:5508:0100/000000.622599:ERROR:gl_surface_egl.cc(1353)] eglInitialize OpenGL failed with error EGL_NOT_INITIALIZED, trying next display type [5508:5508:0100/000000.625277:ERROR:angle_platform_impl.cc(43)] Display.cpp:992 (initialize): ANGLE Display::initialize error 12289: Could not create a backing OpenGL context. [5508:5508:0100/000000.625508:ERROR:gl_surface_egl.cc(831)] EGL Driver message (Critical) eglInitialize: Could not create a backing OpenGL context. [5508:5508:0100/000000.625555:ERROR:gl_surface_egl.cc(1353)] eglInitialize OpenGLES failed with error EGL_NOT_INITIALIZED [5508:5508:0100/000000.625654:ERROR:gl_ozone_egl.cc(23)] GLSurfaceEGL::InitializeOneOff failed. ``` And the URL at http://127.0.0.1:8081 loads just a blank page. Note that I checked, and I have `libgl1-mesa-dri` package already installed. #### Steps to reproduce the behavior: 1. download latest version of mitmproxy 8.1.1 2. open the terminal and type in `./mitmweb` #### System Information Paste the output of "./mitmproxy --version" ``` Mitmproxy: 8.1.1 binary Python: 3.10.5 OpenSSL: OpenSSL 3.0.3 3 May 2022 Platform: Linux-5.18.0-3-amd64-x86_64-with-glibc2.34 ``` I will include the output of mitmproxy of version 6.0.2 that I have installed on the same system as I noticed that Python and OpenSSL versions are different: ``` Mitmproxy: 6.0.2 Python: 3.10.6 OpenSSL: OpenSSL 3.0.5 5 Jul 2022 Platform: Linux-5.18.0-3-amd64-x86_64-with-glibc2.34 ```
[ { "content": "import logging\nimport webbrowser\nfrom collections.abc import Sequence\n\nfrom mitmproxy import ctx\n\n\nclass WebAddon:\n def load(self, loader):\n loader.add_option(\"web_open_browser\", bool, True, \"Start a browser.\")\n loader.add_option(\"web_debug\", bool, False, \"Enable ...
[ { "content": "import logging\nimport webbrowser\nfrom collections.abc import Sequence\n\nfrom mitmproxy import ctx\n\n\nclass WebAddon:\n def load(self, loader):\n loader.add_option(\"web_open_browser\", bool, True, \"Start a browser.\")\n loader.add_option(\"web_debug\", bool, False, \"Enable ...
diff --git a/CHANGELOG.md b/CHANGELOG.md index 8476be5201..6b3a73a5a8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -39,6 +39,9 @@ ([#5588](https://github.com/mitmproxy/mitmproxy/pull/5588), @nikitastupin, @abbbe) * Add WireGuard mode to enable userspace transparent proxying via WireGuard. ([#5562](https://github.com/mitmproxy/mitmproxy/pull/5562), @decathorpe, @mhils) +* Fix mitmweb not properly opening a browser and being stuck on some Linux. + ([#5522](https://github.com/mitmproxy/mitmproxy/issues/5522), @Prinzhorn) + ## 28 June 2022: mitmproxy 8.1.1 diff --git a/mitmproxy/tools/web/webaddons.py b/mitmproxy/tools/web/webaddons.py index 3ba42f12a2..6d5970988a 100644 --- a/mitmproxy/tools/web/webaddons.py +++ b/mitmproxy/tools/web/webaddons.py @@ -44,7 +44,7 @@ def open_browser(url: str) -> bool: "macosx", "wslview %s", "gio", - "x-www-browser %s", + "x-www-browser", "gnome-open %s", "xdg-open", "google-chrome",
wemake-services__wemake-python-styleguide-204
Feature: ignore async function definitions from jones complexity check Currently we only ignore `ClassDef` and `FunctionDef`: https://github.com/wemake-services/wemake-python-styleguide/blob/master/wemake_python_styleguide/visitors/ast/complexity/jones.py#L38-L41 What needs to be done: 1. ignore `AsyncFunctionDef` from the check 2. we do not have a special test case for ignoring nodes for now. It should be added. We can call it `test_that_some_nodes_are_ignored`. It should test all three ignored nodes: with the lowest complexity threshold there should be no errors: https://github.com/wemake-services/wemake-python-styleguide/blob/master/tests/test_visitors/test_ast/test_complexity/test_jones/test_line_complexity.py
[ { "content": "# -*- coding: utf-8 -*-\n\n\"\"\"\nJones Complexity to count inline complexity.\n\nBased on the original `jones-complexity` project:\nhttps://github.com/Miserlou/JonesComplexity\n\nOriginal project is licensed under MIT.\n\"\"\"\n\nimport ast\nfrom collections import defaultdict\nfrom statistics i...
[ { "content": "# -*- coding: utf-8 -*-\n\n\"\"\"\nJones Complexity to count inline complexity.\n\nBased on the original `jones-complexity` project:\nhttps://github.com/Miserlou/JonesComplexity\n\nOriginal project is licensed under MIT.\n\"\"\"\n\nimport ast\nfrom collections import defaultdict\nfrom statistics i...
diff --git a/CHANGELOG.md b/CHANGELOG.md index 358a18761..7011c6fdf 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,6 +14,7 @@ We used to have incremental versioning before `0.1.0`. - We now count `async` methods as method for classes complexity check - We now count `async` functions as functions for module complexity check - We now count `async` functions complexity +- We now ignore `async` functions in jones complexity check ### Misc diff --git a/tests/test_visitors/test_ast/test_complexity/test_jones/test_line_complexity.py b/tests/test_visitors/test_ast/test_complexity/test_jones/test_line_complexity.py index 5aea60576..20e807e9c 100644 --- a/tests/test_visitors/test_ast/test_complexity/test_jones/test_line_complexity.py +++ b/tests/test_visitors/test_ast/test_complexity/test_jones/test_line_complexity.py @@ -16,12 +16,39 @@ def some_function(): return 2 + 1 """ +line_inside_async_function = """ +async def some_function(): + return 2 + 1 +""" + line_inside_class = """ class SomeClass(): field = 13 / 2 """ +class_with_function = """ +class First: + def second(): + return 2 + 1 +""" + +class_with_async_function = """ +class First: + async def second(): + return 2 + 1 +""" + +class_with_usual_and_async_function = """ +class First: + async def second(): + return 2 + 1 + + def third(): + return 2 + 2 +""" + function_declaration = 'def some_function(): ...' +async_function_declaration = 'async def some_function(): ...' class_declaration = 'class SomeClass(object): ...' empty_module = '' @@ -32,10 +59,15 @@ class SomeClass(): line_with_comprehension, line_with_math, line_inside_function, + line_inside_async_function, line_inside_class, function_declaration, + async_function_declaration, class_declaration, empty_module, + class_with_function, + class_with_async_function, + class_with_usual_and_async_function, ]) def test_regular_nodes(assert_errors, parse_ast_tree, code, default_options): """Testing that regular nodes do not raise violations.""" @@ -50,6 +82,7 @@ def test_regular_nodes(assert_errors, parse_ast_tree, code, default_options): @pytest.mark.parametrize('code', [ line_simple, line_inside_function, + line_inside_async_function, line_inside_class, ]) def test_complex_lines(assert_errors, parse_ast_tree, code, options): @@ -96,3 +129,22 @@ def test_exact_complexity(parse_ast_tree, default_options, code, complexity): assert len(visitor._lines) == 1 assert len(visitor._lines[1]) == complexity + + +@pytest.mark.parametrize('code, number_of_lines', [ + (line_inside_function, 1), + (line_inside_async_function, 1), + (class_with_async_function, 1), + (class_with_function, 1), + (class_with_usual_and_async_function, 2), +]) +def test_that_some_nodes_are_ignored( + parse_ast_tree, default_options, code, assert_errors, number_of_lines, +): + """Ensures that complexity is counted correctly.""" + tree = parse_ast_tree(code) + + visitor = JonesComplexityVisitor(default_options, tree=tree) + visitor.run() + + assert len(visitor._lines) == number_of_lines diff --git a/wemake_python_styleguide/visitors/ast/complexity/jones.py b/wemake_python_styleguide/visitors/ast/complexity/jones.py index 59dab73cc..1d9e40e1d 100644 --- a/wemake_python_styleguide/visitors/ast/complexity/jones.py +++ b/wemake_python_styleguide/visitors/ast/complexity/jones.py @@ -38,6 +38,7 @@ class JonesComplexityVisitor(BaseNodeVisitor): # TODO: consider `logical_line` _ignored_nodes = ( ast.FunctionDef, ast.ClassDef, + ast.AsyncFunctionDef, ) def __init__(self, *args, **kwargs) -> None:
dmlc__dgl-2897
Moving a graph to GPU will change the default CUDA device ## 🐛 Bug <!-- A clear and concise description of what the bug is. --> ## To Reproduce ``` import torch import dgl torch.cuda.set_device(1) print(torch.cuda.current_device()) # print 1 device = 'cuda' # 'cuda:1' g = dgl.graph((torch.tensor([0, 1, 2]), torch.tensor([1, 2, 3]))).to(device) print(torch.cuda.current_device()) # print 0 ``` <!-- If you have a code sample, error messages, stack traces, please provide it here as well --> ## Expected behavior The index of the current device should not be changed. <!-- A clear and concise description of what you expected to happen. --> ## Environment - DGL Version (e.g., 1.0): 0.6 - Backend Library & Version (e.g., PyTorch 0.4.1, MXNet/Gluon 1.3): PyTorch 1.9.0a0+gitaeaa91b - OS (e.g., Linux): RHEL - How you installed DGL (`conda`, `pip`, source): source - Build command you used (if compiling from source): - Python version: 3.8 - CUDA/cuDNN version (if applicable): 11.0 - GPU models and configuration (e.g. V100): NVIDIA GeForce RTX 2080 Ti - Any other relevant information: ## Additional context <!-- Add any other context about the problem here. -->
[ { "content": "from __future__ import absolute_import\n\nfrom distutils.version import LooseVersion\n\nimport scipy # Weird bug in new pytorch when import scipy after import torch\nimport torch as th\nimport builtins\nimport numbers\nfrom torch.utils import dlpack\n\nfrom ... import ndarray as nd\nfrom ..._depre...
[ { "content": "from __future__ import absolute_import\n\nfrom distutils.version import LooseVersion\n\nimport scipy # Weird bug in new pytorch when import scipy after import torch\nimport torch as th\nimport builtins\nimport numbers\nfrom torch.utils import dlpack\n\nfrom ... import ndarray as nd\nfrom ..._depre...
diff --git a/python/dgl/backend/pytorch/tensor.py b/python/dgl/backend/pytorch/tensor.py index 7c99a31847ea..34284cd9ec7a 100644 --- a/python/dgl/backend/pytorch/tensor.py +++ b/python/dgl/backend/pytorch/tensor.py @@ -86,7 +86,7 @@ def device_type(ctx): def device_id(ctx): ctx = th.device(ctx) if ctx.index is None: - return 0 + return 0 if ctx.type == 'cpu' else th.cuda.current_device() else: return ctx.index
facebookresearch__ParlAI-581
Can we keep a mturk task from outside parlai/mturk/tasks? Hi @JackUrb, I have a few questions regarding the mturk evaluation: 1. This link (http://parl.ai/static/docs/mturk.html#running-a-task) says that > to run an MTurk task, first ensure that the task directory is in `parlai/mturk/tasks/`. Is it by design? I tried to keep my task in some other directory (outside root parlai directory) and tried to import parlai as a package but that doesn't seem to work. Basically I am trying to use ParlAI as one of the dependency for one of my project for Human In the loop evaluation. 2. How easy/hard it is to provide support for keeping the mturk task anywhere?
[ { "content": "# Copyright (c) 2017-present, Facebook, Inc.\n# All rights reserved.\n# This source code is licensed under the BSD-style license found in the\n# LICENSE file in the root directory of this source tree. An additional grant\n# of patent rights can be found in the PATENTS file in the same directory.\n...
[ { "content": "# Copyright (c) 2017-present, Facebook, Inc.\n# All rights reserved.\n# This source code is licensed under the BSD-style license found in the\n# LICENSE file in the root directory of this source tree. An additional grant\n# of patent rights can be found in the PATENTS file in the same directory.\n...
diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 00000000000..fa9ea114284 --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1,3 @@ +include parlai/mturk/core/server/html/* +include parlai/mturk/core/server/server.js +include parlai/mturk/core/server/package.json diff --git a/setup.py b/setup.py index 4e895050e87..936b86aae45 100644 --- a/setup.py +++ b/setup.py @@ -30,4 +30,5 @@ packages=find_packages(exclude=( 'data', 'docs', 'downloads', 'examples', 'logs', 'tests')), install_requires=reqs.strip().split('\n'), + include_package_data=True, )
netbox-community__netbox-2144
PUTs to Site Endpoint Requires Value for time_zone <!-- Before opening a new issue, please search through the existing issues to see if your topic has already been addressed. Note that you may need to remove the "is:open" filter from the search bar to include closed issues. Check the appropriate type for your issue below by placing an x between the brackets. For assistance with installation issues, or for any other issues other than those listed below, please raise your topic for discussion on our mailing list: https://groups.google.com/forum/#!forum/netbox-discuss Please note that issues which do not fall under any of the below categories will be closed. Due to an excessive backlog of feature requests, we are not currently accepting any proposals which extend NetBox's feature scope. Do not prepend any sort of tag to your issue's title. An administrator will review your issue and assign labels as appropriate. ---> ### Issue type [ ] Feature request <!-- An enhancement of existing functionality --> [X] Bug report <!-- Unexpected or erroneous behavior --> [ ] Documentation <!-- A modification to the documentation --> <!-- Please describe the environment in which you are running NetBox. (Be sure to verify that you are running the latest stable release of NetBox before submitting a bug report.) If you are submitting a bug report and have made any changes to the code base, please first validate that your bug can be recreated while running an official release. --> ### Environment * Python version: 2.6.7 * NetBox version: 2.4-dev, but includes previous versions as well. <!-- BUG REPORTS must include: * A list of the steps needed for someone else to reproduce the bug * A description of the expected and observed behavior * Any relevant error messages (screenshots may also help) FEATURE REQUESTS must include: * A detailed description of the proposed functionality * A use case for the new feature * A rough description of any necessary changes to the database schema * Any relevant third-party libraries which would be needed --> ### Description More details over at digitalocean/pynetbox#59, but when the `time_zone` field is present and null we get an error saying it can't be null. Omitting the field doesn't return an error.
[ { "content": "from __future__ import unicode_literals\n\nfrom collections import OrderedDict\n\nfrom rest_framework import serializers\nfrom rest_framework.validators import UniqueTogetherValidator\n\nfrom circuits.models import Circuit, CircuitTermination\nfrom dcim.constants import (\n CONNECTION_STATUS_CH...
[ { "content": "from __future__ import unicode_literals\n\nfrom collections import OrderedDict\n\nfrom rest_framework import serializers\nfrom rest_framework.validators import UniqueTogetherValidator\n\nfrom circuits.models import Circuit, CircuitTermination\nfrom dcim.constants import (\n CONNECTION_STATUS_CH...
diff --git a/netbox/dcim/api/serializers.py b/netbox/dcim/api/serializers.py index e37354d47f6..988a2d59f69 100644 --- a/netbox/dcim/api/serializers.py +++ b/netbox/dcim/api/serializers.py @@ -80,7 +80,7 @@ class Meta: class WritableSiteSerializer(CustomFieldModelSerializer): - time_zone = TimeZoneField(required=False) + time_zone = TimeZoneField(required=False, allow_null=True) class Meta: model = Site
vas3k__vas3k.club-260
Сломался check_PR экшн на новые пуллреквесты Вот здесь все пошло не так после пары изменений в requirements и докерфайлах: https://github.com/vas3k/vas3k.club/blob/master/.github/workflows/CI.yml Из-за этого все новые пуллреквесты красненькие и мержить их приходится только суровой админской рукой. Надо бы переосмыслить этот CI как-нибудь. У кого есть идеи? По сути мне важны линтеры и чтобы докер с новым кодом успешно поднимался. Остального пока нет.
[ { "content": "import io\nimport logging\nimport os\nfrom urllib.parse import urlparse\n\nimport requests\nfrom PIL import Image\nfrom django.conf import settings\n\nlog = logging.getLogger(__name__)\n\n\ndef upload_image_bytes(\n filename, data, resize=(192, 192), convert_to=None, quality=None\n):\n if no...
[ { "content": "import io\nimport logging\nimport os\nfrom urllib.parse import urlparse\n\nimport requests\nfrom PIL import Image\nfrom django.conf import settings\n\nlog = logging.getLogger(__name__)\n\n\ndef upload_image_bytes(\n filename, data, resize=(192, 192), convert_to=None, quality=None\n):\n if no...
diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml index 65f731e55..0d6194288 100644 --- a/.github/workflows/CI.yml +++ b/.github/workflows/CI.yml @@ -3,29 +3,28 @@ name: check_pr on: [pull_request] jobs: + lint: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@master + - uses: actions/setup-python@v2 + with: + python-version: '3.8' + architecture: 'x64' + - name: Install requirements + run: | + pip install --no-cache-dir flake8 + - name: run flake8 + run: | + # stop the build if there are Python syntax errors or undefined names + flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics + # exit-zero treats all errors as warnings. + flake8 . --count --exit-zero --statistics -# Disabled due errors with gdal installation -# lint: -# runs-on: ubuntu-latest -# -# steps: -# - uses: actions/checkout@master -# - uses: actions/setup-python@v2 -# with: -# python-version: '3.8' -# architecture: 'x64' -# - name: Install requirements -# run: | -# pip install --no-cache-dir pipenv -# pipenv install --dev -# - name: run lint -# run: make test-ci -# # continue-on-error: true dockerize: runs-on: ubuntu-latest -# needs: lint - steps: - uses: actions/checkout@master - name: Build the docker-compose stack @@ -36,7 +35,7 @@ jobs: time: '20s' - name: Check db migrate on container run: | - docker-compose exec -T club_app make migrate + docker-compose exec -T club_app make docker-migrate - name: Check build frontend on container run: | docker-compose exec -T webpack npm run build diff --git a/Makefile b/Makefile index fd55484ad..1763cfffe 100644 --- a/Makefile +++ b/Makefile @@ -11,16 +11,16 @@ run-dev: ## Runs dev server run-queue: ## Runs task broker pipenv run python manage.py qcluster -run-queue-production: +docker-run-queue: python manage.py qcluster run-uvicorn: ## Runs uvicorn (ASGI) server in managed mode pipenv run uvicorn --fd 0 --lifespan off club.asgi:application docker-run-dev: ## Runs dev server in docker - pipenv run python ./utils/wait_for_postgres.py - pipenv run python manage.py migrate - pipenv run python manage.py runserver 0.0.0.0:8000 + python ./utils/wait_for_postgres.py + python manage.py migrate + python manage.py runserver 0.0.0.0:8000 docker-run-production: ## Runs production server in docker python3 manage.py migrate @@ -40,6 +40,9 @@ requirements: ## Generate requirements.txt for production migrate: ## Migrate database to the latest version pipenv run python3 manage.py migrate +docker-migrate: + python3 manage.py migrate + build-frontend: ## Runs webpack npm run --prefix frontend build diff --git a/docker-compose.production.yml b/docker-compose.production.yml index fc4c2c5ac..6255725b2 100644 --- a/docker-compose.production.yml +++ b/docker-compose.production.yml @@ -30,7 +30,7 @@ services: queue: <<: *app - command: make run-queue-production + command: make docker-run-queue container_name: club_queue depends_on: - postgres diff --git a/docker-compose.yml b/docker-compose.yml index 9288da34f..5a1d42140 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -31,7 +31,7 @@ services: build: dockerfile: dev.dockerfile context: . - command: make run-queue + command: make docker-run-queue environment: - DEBUG=true - PYTHONUNBUFFERED=1 @@ -70,10 +70,3 @@ services: volumes: - .:/app:delegated working_dir: /app/frontend - - migrate_and_init: - <<: *app - container_name: club_migrate_and_init - restart: "no" - ports: [] - command: make migrate diff --git a/requirements.txt b/requirements.txt index e54cc9b28..ff4b88c03 100644 --- a/requirements.txt +++ b/requirements.txt @@ -13,6 +13,7 @@ cryptography==2.8 cssselect==1.1.0 cssutils==1.0.2 decorator==4.4.2 +django-debug-toolbar==2.2 django-picklefield==2.1.1 django-q-sentry==0.1.1 django-q[sentry]==1.2.1 diff --git a/utils/images.py b/utils/images.py index cf0fd5646..73b166b2f 100644 --- a/utils/images.py +++ b/utils/images.py @@ -29,7 +29,7 @@ def upload_image_bytes( try: image.save(saved_image) - except OSError: + except OSError as ex: log.warning(f"Error saving image data: {ex}") return None
akvo__akvo-rsr-1603
Transaction admin creates internal server error
[ { "content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\n\nfrom dja...
[ { "content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\n\nfrom dja...
diff --git a/akvo/rsr/models/transaction.py b/akvo/rsr/models/transaction.py index 65687712e4..30bf931422 100644 --- a/akvo/rsr/models/transaction.py +++ b/akvo/rsr/models/transaction.py @@ -89,7 +89,7 @@ class Transaction(models.Model): ) def __unicode__(self): - return self.value + return unicode(self.value) def iati_currency(self): return codelist_value(Currency, self, 'currency')
speechbrain__speechbrain-1127
Broken docs for `speechbrain.alignment.ctc_segmentation` Hi, thanks for maintaining such a wonderful library. Looks like the documentation for `speechbrain.alignment.ctc_segmentation` is broken: https://speechbrain.readthedocs.io/en/latest/API/speechbrain.alignment.ctc_segmentation.html I guess this is caused by unneeded shebang, as shown in the following: https://github.com/speechbrain/speechbrain/blob/develop/speechbrain/alignment/ctc_segmentation.py#L1-L2 Perhaps this could be related to #819 ?
[ { "content": "# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Path setup -------------------------------------------...
[ { "content": "# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Path setup -------------------------------------------...
diff --git a/docs/conf.py b/docs/conf.py index 435bf01b80..a774420cbf 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -69,7 +69,7 @@ autodoc_default_options = {} # Autodoc mock extra dependencies: -autodoc_mock_imports = ["numba", "sklearn"] +autodoc_mock_imports = ["sklearn"] # Order of API items: autodoc_member_order = "bysource" diff --git a/docs/docs-requirements.txt b/docs/docs-requirements.txt index 9a506e0f15..b029209d95 100644 --- a/docs/docs-requirements.txt +++ b/docs/docs-requirements.txt @@ -1,6 +1,7 @@ better-apidoc>=0.3.1 -numba +numba>=0.54.1 recommonmark>=0.7.1 six sphinx-rtd-theme>=0.4.3 Sphinx>=3.4.3 +ctc-segmentation>=1.7.0
tensorflow__addons-1941
Usage with tf.keras API https://github.com/tensorflow/addons/blob/5f618fdb92d9737da059de2a33fa606e97505398/tensorflow_addons/losses/focal_loss.py#L52-L53 The usage in `tf.keras` API example is incorrect. It should be replaced with: ```python model = tf.keras.Model(inputs, outputs) model.compile('sgd', loss=tfa.losses.SigmoidFocalCrossEntropy()) ```
[ { "content": "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\...
[ { "content": "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\...
diff --git a/tensorflow_addons/losses/focal_loss.py b/tensorflow_addons/losses/focal_loss.py index 973bfccb1f..550a82a614 100644 --- a/tensorflow_addons/losses/focal_loss.py +++ b/tensorflow_addons/losses/focal_loss.py @@ -50,7 +50,7 @@ class SigmoidFocalCrossEntropy(LossFunctionWrapper): ```python model = tf.keras.Model(inputs, outputs) - model.compile('sgd', loss=tf.keras.losses.SigmoidFocalCrossEntropy()) + model.compile('sgd', loss=tfa.losses.SigmoidFocalCrossEntropy()) ``` Args
Gallopsled__pwntools-669
Need import util/iters.py have not import time and context,It cause a problem when use mbruteforce Need import util/iters.py have not import time and context,It cause a problem when use mbruteforce Need import util/iters.py have not import time and context,It cause a problem when use mbruteforce
[ { "content": "\"\"\"\nThis module includes and extends the standard module :mod:`itertools`.\n\"\"\"\n\n__all__ = [\n 'bruteforce' ,\n 'mbruteforce' ,\n 'chained' ,\n 'consume' ,\n ...
[ { "content": "\"\"\"\nThis module includes and extends the standard module :mod:`itertools`.\n\"\"\"\n\n__all__ = [\n 'bruteforce' ,\n 'mbruteforce' ,\n 'chained' ,\n 'consume' ,\n ...
diff --git a/pwnlib/util/iters.py b/pwnlib/util/iters.py index 835ef78ce..b5a68bf49 100644 --- a/pwnlib/util/iters.py +++ b/pwnlib/util/iters.py @@ -58,8 +58,10 @@ import multiprocessing import operator import random +import time from itertools import * +from ..context import context from ..log import getLogger log = getLogger(__name__)
robocorp__rpaframework-550
`RPA.JSON` RecursionError: maximum recursion depth exceeded This error is at the moment breaking our [Certificate level 3](https://robocorp.com/docs/courses/work-data-management/validate-business-data) course with `rpaframework==15.0.0`. Works correctly with `rpaframework==14.0.0` ``` *** Keywords *** Validate traffic data [Arguments] ${traffic_data} ${country}= Get Value From Json ${traffic_data} $.country ${valid}= Evaluate len("${country}") == 3 RETURN ${valid} ``` example content of `${traffic_data}` ``` { "country": "ISR", "year": 2019, "rate": 3.90874 } ```
[ { "content": "import json\nimport logging\nfrom typing import Any, Callable, Dict, Hashable, List, Optional, Union\n\nfrom jsonpath_ng import Index, Fields\nfrom jsonpath_ng.ext.filter import Filter\nfrom jsonpath_ng.ext.parser import ExtentedJsonPathParser\n\nfrom robot.api.deco import keyword\n\n\nJSONValue =...
[ { "content": "import json\nimport logging\nfrom typing import Any, Callable, Dict, Hashable, List, Optional, Union\n\nfrom jsonpath_ng import Index, Fields\nfrom jsonpath_ng.ext.filter import Filter\nfrom jsonpath_ng.ext.parser import ExtentedJsonPathParser\n\nfrom robot.api.deco import keyword\n\n\nJSONValue =...
diff --git a/docs/source/releasenotes.rst b/docs/source/releasenotes.rst index 3399a9b7ff..2d4efdd348 100644 --- a/docs/source/releasenotes.rst +++ b/docs/source/releasenotes.rst @@ -5,12 +5,17 @@ Release notes `Upcoming release <https://github.com/robocorp/rpaframework/projects/3#column-16713994>`_ +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -- Deprecate *Lab* references under documentation. - `Released <https://pypi.org/project/rpaframework/#history>`_ ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +15.1.1 - 17 June 2022 +--------------------- + +- Library **RPA.JSON** (:issue:`548`): Fix *libspec* infinite recursion on ``JSONType`` + type. +- Deprecate *Lab* references under documentation. + 15.1.0 - 15 June 2022 --------------------- diff --git a/packages/main/poetry.lock b/packages/main/poetry.lock index 25fc7bfdda..1dc5caa88b 100644 --- a/packages/main/poetry.lock +++ b/packages/main/poetry.lock @@ -143,14 +143,14 @@ uvloop = ["uvloop (>=0.15.2)"] [[package]] name = "boto3" -version = "1.24.10" +version = "1.24.11" description = "The AWS SDK for Python" category = "main" optional = true python-versions = ">= 3.7" [package.dependencies] -botocore = ">=1.27.10,<1.28.0" +botocore = ">=1.27.11,<1.28.0" jmespath = ">=0.7.1,<2.0.0" s3transfer = ">=0.6.0,<0.7.0" @@ -159,7 +159,7 @@ crt = ["botocore[crt] (>=1.21.0,<2.0a0)"] [[package]] name = "botocore" -version = "1.27.10" +version = "1.27.11" description = "Low-level, data-driven core of boto 3." category = "main" optional = true @@ -381,7 +381,7 @@ pyflakes = ">=2.3.0,<2.4.0" [[package]] name = "fpdf2" -version = "2.5.4" +version = "2.5.5" description = "Simple & fast PDF generation for Python" category = "main" optional = false @@ -389,7 +389,7 @@ python-versions = "*" [package.dependencies] defusedxml = "*" -Pillow = ">=9.1.0" +Pillow = ">=6.2.2" [[package]] name = "furl" @@ -2187,12 +2187,12 @@ black = [ {file = "black-22.3.0.tar.gz", hash = "sha256:35020b8886c022ced9282b51b5a875b6d1ab0c387b31a065b84db7c33085ca79"}, ] boto3 = [ - {file = "boto3-1.24.10-py3-none-any.whl", hash = "sha256:32ffc0fd50408acc710cf5ce40037aa3c14926d6e3f6fbf61ed5990fb63cd881"}, - {file = "boto3-1.24.10.tar.gz", hash = "sha256:88fd816274d4b64bcf90889441d4efa5f16a0048ed670bc33cbd0f5a678313a6"}, + {file = "boto3-1.24.11-py3-none-any.whl", hash = "sha256:19d6fb2b5e51f10e7b5d551a111cf9c64b9a5144b2838493ac41be0706e590cf"}, + {file = "boto3-1.24.11.tar.gz", hash = "sha256:79fc9699006af26de4413105e458af5f1626ba32d1f00fa0b3e8b94c2b16e2dc"}, ] botocore = [ - {file = "botocore-1.27.10-py3-none-any.whl", hash = "sha256:24ec42b4f29a50f7ef78f9f863c3c25e00f65b5a48db669c8068457789a90803"}, - {file = "botocore-1.27.10.tar.gz", hash = "sha256:b39da97452c9e2c856e7778d8c908252394da81e2e5792f1d4cb0ece4ce1043a"}, + {file = "botocore-1.27.11-py3-none-any.whl", hash = "sha256:8efab7f85156705cbe532aeb17b065b67ba32addc3270d9000964b98c07bb20a"}, + {file = "botocore-1.27.11.tar.gz", hash = "sha256:92f099a36df832d7f151682e1efa8e1d47d23a5cedde8692adcaa6420bcb18aa"}, ] cached-property = [ {file = "cached-property-1.5.2.tar.gz", hash = "sha256:9fa5755838eecbb2d234c3aa390bd80fbd3ac6b6869109bfc1b499f7bd89a130"}, @@ -2370,8 +2370,8 @@ flake8 = [ {file = "flake8-3.9.2.tar.gz", hash = "sha256:07528381786f2a6237b061f6e96610a4167b226cb926e2aa2b6b1d78057c576b"}, ] fpdf2 = [ - {file = "fpdf2-2.5.4-py2.py3-none-any.whl", hash = "sha256:0f5bb5059d6049ad6b6fa985120bd81ca2beecff60ec48735272e2ab4f1b39d7"}, - {file = "fpdf2-2.5.4.tar.gz", hash = "sha256:24b045c8bab16ce0b52769f4066385b5255dc6a01c474b0e41cb6d8bbfebe3ff"}, + {file = "fpdf2-2.5.5-py2.py3-none-any.whl", hash = "sha256:72deaec4d0172e10025f4febddaa306edc5cfad28a3fa0069a368d9d896caa46"}, + {file = "fpdf2-2.5.5.tar.gz", hash = "sha256:2dace3a7cfa9ebfbfa08a4d40d97d8944838370b3cee739e4b1549c48afc4811"}, ] furl = [ {file = "furl-2.1.3-py2.py3-none-any.whl", hash = "sha256:9ab425062c4217f9802508e45feb4a83e54324273ac4b202f1850363309666c0"}, diff --git a/packages/main/pyproject.toml b/packages/main/pyproject.toml index 078cbecb03..62b1e7051f 100644 --- a/packages/main/pyproject.toml +++ b/packages/main/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "rpaframework" -version = "15.1.0" +version = "15.1.1" description = "A collection of tools and libraries for RPA" authors = ["RPA Framework <rpafw@robocorp.com>"] license = "Apache-2.0" diff --git a/packages/main/src/RPA/JSON.py b/packages/main/src/RPA/JSON.py index 92c73bec70..a73bb77c44 100644 --- a/packages/main/src/RPA/JSON.py +++ b/packages/main/src/RPA/JSON.py @@ -10,7 +10,7 @@ JSONValue = Optional[Union[str, int, float, bool]] -JSONType = Union[Dict[Hashable, "JSONType"], List["JSONType"], JSONValue] +JSONType = Union[Dict[Hashable, JSONValue], List[JSONValue], JSONValue] class RPAFilter(Filter): diff --git a/poetry.lock b/poetry.lock index 9a0346e503..a4dd8d1d8e 100644 --- a/poetry.lock +++ b/poetry.lock @@ -135,14 +135,14 @@ uvloop = ["uvloop (>=0.15.2)"] [[package]] name = "boto3" -version = "1.24.10" +version = "1.24.11" description = "The AWS SDK for Python" category = "main" optional = false python-versions = ">= 3.7" [package.dependencies] -botocore = ">=1.27.10,<1.28.0" +botocore = ">=1.27.11,<1.28.0" jmespath = ">=0.7.1,<2.0.0" s3transfer = ">=0.6.0,<0.7.0" @@ -151,7 +151,7 @@ crt = ["botocore[crt] (>=1.21.0,<2.0a0)"] [[package]] name = "botocore" -version = "1.27.10" +version = "1.27.11" description = "Low-level, data-driven core of boto 3." category = "main" optional = false @@ -356,7 +356,7 @@ termcolor = "*" [[package]] name = "fpdf2" -version = "2.5.4" +version = "2.5.5" description = "Simple & fast PDF generation for Python" category = "main" optional = false @@ -364,7 +364,7 @@ python-versions = "*" [package.dependencies] defusedxml = "*" -Pillow = ">=9.1.0" +Pillow = ">=6.2.2" [[package]] name = "furl" @@ -1682,7 +1682,7 @@ python-versions = "*" [[package]] name = "rpaframework" -version = "15.1.0" +version = "15.1.1" description = "A collection of tools and libraries for RPA" category = "main" optional = false @@ -2444,12 +2444,12 @@ black = [ {file = "black-22.3.0.tar.gz", hash = "sha256:35020b8886c022ced9282b51b5a875b6d1ab0c387b31a065b84db7c33085ca79"}, ] boto3 = [ - {file = "boto3-1.24.10-py3-none-any.whl", hash = "sha256:32ffc0fd50408acc710cf5ce40037aa3c14926d6e3f6fbf61ed5990fb63cd881"}, - {file = "boto3-1.24.10.tar.gz", hash = "sha256:88fd816274d4b64bcf90889441d4efa5f16a0048ed670bc33cbd0f5a678313a6"}, + {file = "boto3-1.24.11-py3-none-any.whl", hash = "sha256:19d6fb2b5e51f10e7b5d551a111cf9c64b9a5144b2838493ac41be0706e590cf"}, + {file = "boto3-1.24.11.tar.gz", hash = "sha256:79fc9699006af26de4413105e458af5f1626ba32d1f00fa0b3e8b94c2b16e2dc"}, ] botocore = [ - {file = "botocore-1.27.10-py3-none-any.whl", hash = "sha256:24ec42b4f29a50f7ef78f9f863c3c25e00f65b5a48db669c8068457789a90803"}, - {file = "botocore-1.27.10.tar.gz", hash = "sha256:b39da97452c9e2c856e7778d8c908252394da81e2e5792f1d4cb0ece4ce1043a"}, + {file = "botocore-1.27.11-py3-none-any.whl", hash = "sha256:8efab7f85156705cbe532aeb17b065b67ba32addc3270d9000964b98c07bb20a"}, + {file = "botocore-1.27.11.tar.gz", hash = "sha256:92f099a36df832d7f151682e1efa8e1d47d23a5cedde8692adcaa6420bcb18aa"}, ] cached-property = [ {file = "cached-property-1.5.2.tar.gz", hash = "sha256:9fa5755838eecbb2d234c3aa390bd80fbd3ac6b6869109bfc1b499f7bd89a130"}, @@ -2584,8 +2584,8 @@ fire = [ {file = "fire-0.4.0.tar.gz", hash = "sha256:c5e2b8763699d1142393a46d0e3e790c5eb2f0706082df8f647878842c216a62"}, ] fpdf2 = [ - {file = "fpdf2-2.5.4-py2.py3-none-any.whl", hash = "sha256:0f5bb5059d6049ad6b6fa985120bd81ca2beecff60ec48735272e2ab4f1b39d7"}, - {file = "fpdf2-2.5.4.tar.gz", hash = "sha256:24b045c8bab16ce0b52769f4066385b5255dc6a01c474b0e41cb6d8bbfebe3ff"}, + {file = "fpdf2-2.5.5-py2.py3-none-any.whl", hash = "sha256:72deaec4d0172e10025f4febddaa306edc5cfad28a3fa0069a368d9d896caa46"}, + {file = "fpdf2-2.5.5.tar.gz", hash = "sha256:2dace3a7cfa9ebfbfa08a4d40d97d8944838370b3cee739e4b1549c48afc4811"}, ] furl = [ {file = "furl-2.1.3-py2.py3-none-any.whl", hash = "sha256:9ab425062c4217f9802508e45feb4a83e54324273ac4b202f1850363309666c0"},
joke2k__faker-993
text-unidecode is released under the Artistic license `text-unidecode` is released under the Artistic license v1.0, which is considered non-free by the FSF (and therefore not compatible with the GPL). I believe this clause is also of concern to commercial users of faker too: > 5. You may charge a reasonable copying fee for any distribution of this Package. You may charge any fee you choose for support of this Package. You may not charge a fee for this Package itself. However, you may distribute this Package in aggregate with other (possibly commercial) programs as part of a larger (possibly commercial) software distribution provided that you do not advertise this Package as a product of your own. Not being able to charge a fee for the software is problematic for those of us who are contractors, for example. I realise there aren't really any good alternatives (`unidecode` is GPL licensed as pointed out in #628 , `isounidecode` doesn't support Python 3), so would a patch making `text-unidecode` an optional dependency be acceptable?
[ { "content": "#!/usr/bin/env python\n# coding=utf-8\n\nimport io\nimport os\n\nfrom setuptools import find_packages, setup\n\nhere = os.path.abspath(os.path.dirname(__file__))\nwith io.open(os.path.join(here, 'README.rst'), encoding='utf-8') as fp:\n README = fp.read()\n\nwith io.open(os.path.join(here, 'VER...
[ { "content": "#!/usr/bin/env python\n# coding=utf-8\n\nimport io\nimport os\n\nfrom setuptools import find_packages, setup\n\nhere = os.path.abspath(os.path.dirname(__file__))\nwith io.open(os.path.join(here, 'README.rst'), encoding='utf-8') as fp:\n README = fp.read()\n\nwith io.open(os.path.join(here, 'VER...
diff --git a/setup.py b/setup.py index e0ef8ee22c..67d70d304d 100644 --- a/setup.py +++ b/setup.py @@ -66,7 +66,7 @@ install_requires=[ "python-dateutil>=2.4", "six>=1.10", - "text-unidecode==1.2", + "text-unidecode==1.3", ], tests_require=[ "validators>=0.13.0",
dynamiqs__dynamiqs-196
implement a ver() method As a user if I want to make sure my setup is up to date with the latest version, I want to be able to call dq.ver() to know which version I am running
[ { "content": "from .mesolve import mesolve\nfrom .sesolve import sesolve\nfrom .smesolve import smesolve\nfrom .utils import *\n", "path": "dynamiqs/__init__.py" } ]
[ { "content": "from importlib.metadata import version\n\nfrom .mesolve import mesolve\nfrom .sesolve import sesolve\nfrom .smesolve import smesolve\nfrom .utils import *\n\n# get version from pyproject.toml\n__version__ = version(__package__)\n", "path": "dynamiqs/__init__.py" } ]
diff --git a/dynamiqs/__init__.py b/dynamiqs/__init__.py index 3167dd305..e89e489f3 100644 --- a/dynamiqs/__init__.py +++ b/dynamiqs/__init__.py @@ -1,4 +1,9 @@ +from importlib.metadata import version + from .mesolve import mesolve from .sesolve import sesolve from .smesolve import smesolve from .utils import * + +# get version from pyproject.toml +__version__ = version(__package__)
Pyomo__pyomo-429
Review objects exposed by environ At the request of @jsiirola after I brought this to his attention, some Pyomo objects are not exposed by environ that would otherwise be expected. One that I have encountered is `TerminationCondition`, which needs to be imported from `pyomo.opt`.
[ { "content": "# ___________________________________________________________________________\n#\n# Pyomo: Python Optimization Modeling Objects\n# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC\n# Under the terms of Contract DE-NA0003525 with National Technology and\n# Engineerin...
[ { "content": "# ___________________________________________________________________________\n#\n# Pyomo: Python Optimization Modeling Objects\n# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC\n# Under the terms of Contract DE-NA0003525 with National Technology and\n# Engineerin...
diff --git a/pyomo/environ/__init__.py b/pyomo/environ/__init__.py index e8d3de7d3b3..01d842e0b29 100644 --- a/pyomo/environ/__init__.py +++ b/pyomo/environ/__init__.py @@ -93,4 +93,7 @@ def _import_packages(): # Expose the symbols from pyomo.core # from pyomo.core import * -from pyomo.opt import SolverFactory, SolverManagerFactory, UnknownSolver +from pyomo.opt import ( + SolverFactory, SolverManagerFactory, UnknownSolver, + TerminationCondition, SolverStatus, +)
nilearn__nilearn-1936
_threshold_maps_ratio changes the input map My maps images keep changing when I use RegionExtractor. I think we need to make a copy [here](https://github.com/nilearn/nilearn/blob/master/nilearn/regions/region_extractor.py#L58) For instance the following code throws an `AssertionError: Arrays are not equal` ```Python from nilearn._utils.data_gen import generate_maps import numpy as np from nilearn.regions.region_extractor import _threshold_maps_ratio maps, mask_img = generate_maps((10, 10, 10), 30) maps.get_data()[:5] = 100 maps_data = maps.get_data().copy() thresholded_maps = _threshold_maps_ratio(maps, threshold=1) np.testing.assert_array_equal(maps.get_data(), maps_data) ```
[ { "content": "\"\"\"\nBetter brain parcellations for Region of Interest analysis\n\"\"\"\n\nimport numbers\nimport collections\nimport numpy as np\n\nfrom scipy import ndimage\nfrom scipy.stats import scoreatpercentile\n\nfrom sklearn.externals.joblib import Memory\n\nfrom .. import masking\nfrom ..input_data i...
[ { "content": "\"\"\"\nBetter brain parcellations for Region of Interest analysis\n\"\"\"\n\nimport numbers\nimport collections\nimport numpy as np\n\nfrom scipy import ndimage\nfrom scipy.stats import scoreatpercentile\n\nfrom sklearn.externals.joblib import Memory\n\nfrom .. import masking\nfrom ..input_data i...
diff --git a/nilearn/regions/region_extractor.py b/nilearn/regions/region_extractor.py index c84bfc9c22..5701c5afa8 100644 --- a/nilearn/regions/region_extractor.py +++ b/nilearn/regions/region_extractor.py @@ -55,7 +55,7 @@ def _threshold_maps_ratio(maps_img, threshold): else: ratio = threshold - maps_data = _safe_get_data(maps, ensure_finite=True) + maps_data = _safe_get_data(maps, ensure_finite=True).copy() abs_maps = np.abs(maps_data) # thresholding diff --git a/nilearn/regions/tests/test_region_extractor.py b/nilearn/regions/tests/test_region_extractor.py index 4687b9a67b..dff8c9c5ea 100644 --- a/nilearn/regions/tests/test_region_extractor.py +++ b/nilearn/regions/tests/test_region_extractor.py @@ -51,10 +51,16 @@ def test_threshold_maps_ratio(): # smoke test for function _threshold_maps_ratio with randomly # generated maps - # make sure that n_regions (4th dimension) are kept same even - # in thresholded image maps, _ = generate_maps((6, 8, 10), n_regions=3) + + # test that there is no side effect + maps.get_data()[:3] = 100 + maps_data = maps.get_data().copy() thr_maps = _threshold_maps_ratio(maps, threshold=1.0) + np.testing.assert_array_equal(maps.get_data(), maps_data) + + # make sure that n_regions (4th dimension) are kept same even + # in thresholded image assert_true(thr_maps.shape[-1] == maps.shape[-1]) # check that the size should be same for 3D image
pulp__pulpcore-2498
As a developer, I can have pytest run the unit tests Author: @bmbouter (bmbouter) Redmine Issue: 9643, https://pulp.plan.io/issues/9643 --- As part of the testing effort, it would be nice to have pytest run the unittests in addition to our functional tests.
[ { "content": "\"\"\"\nDjango settings for the Pulp Platform application\n\nNever import this module directly, instead `from django.conf import settings`, see\nhttps://docs.djangoproject.com/en/1.11/topics/settings/#using-settings-in-python-code\n\nFor the full list of settings and their values, see\nhttps://doc...
[ { "content": "\"\"\"\nDjango settings for the Pulp Platform application\n\nNever import this module directly, instead `from django.conf import settings`, see\nhttps://docs.djangoproject.com/en/1.11/topics/settings/#using-settings-in-python-code\n\nFor the full list of settings and their values, see\nhttps://doc...
diff --git a/.github/workflows/scripts/script.sh b/.github/workflows/scripts/script.sh index 44f447e338..1b81d816e2 100755 --- a/.github/workflows/scripts/script.sh +++ b/.github/workflows/scripts/script.sh @@ -111,7 +111,7 @@ cmd_prefix bash -c "django-admin makemigrations --check --dry-run" if [[ "$TEST" != "upgrade" ]]; then # Run unit tests. - cmd_prefix bash -c "PULP_DATABASES__default__USER=postgres django-admin test --noinput /usr/local/lib/python3.8/site-packages/pulpcore/tests/unit/" + cmd_prefix bash -c "PULP_DATABASES__default__USER=postgres pytest -v -r sx --color=yes --pyargs pulpcore.tests.unit" fi # Run functional tests diff --git a/CHANGES/2070.misc b/CHANGES/2070.misc new file mode 100644 index 0000000000..39a0115674 --- /dev/null +++ b/CHANGES/2070.misc @@ -0,0 +1 @@ +Switches the unit test runner to use pytest, and port unit tests accordingly. diff --git a/MANIFEST.in b/MANIFEST.in index 247bac5420..5dafae0ada 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -7,7 +7,7 @@ include COMMITMENT include functest_requirements.txt include unittest_requirements.txt recursive-include pulpcore/tests/functional/api/using_plugin/artifacts * -recursive-exclude pulpcore/tests/fixtures/ * +recursive-exclude pulpcore/tests/functional/fixtures/ * include CODE_OF_CONDUCT.md include CONTRIBUTING.md include COPYRIGHT diff --git a/pulpcore/app/settings.py b/pulpcore/app/settings.py index 288e41b3b5..96d93155f7 100644 --- a/pulpcore/app/settings.py +++ b/pulpcore/app/settings.py @@ -375,7 +375,8 @@ if not ( - Path(sys.argv[0]).name == "sphinx-build" + Path(sys.argv[0]).name == "pytest" + or Path(sys.argv[0]).name == "sphinx-build" or (len(sys.argv) >= 2 and sys.argv[1] == "collectstatic") ): try: diff --git a/pulpcore/tests/conftest.py b/pulpcore/tests/functional/conftest.py similarity index 100% rename from pulpcore/tests/conftest.py rename to pulpcore/tests/functional/conftest.py diff --git a/pulpcore/tests/conftest_pulp_file.py b/pulpcore/tests/functional/conftest_pulp_file.py similarity index 100% rename from pulpcore/tests/conftest_pulp_file.py rename to pulpcore/tests/functional/conftest_pulp_file.py diff --git a/pulpcore/tests/fixtures/basic/1.iso b/pulpcore/tests/functional/fixtures/basic/1.iso similarity index 100% rename from pulpcore/tests/fixtures/basic/1.iso rename to pulpcore/tests/functional/fixtures/basic/1.iso diff --git a/pulpcore/tests/fixtures/basic/2.iso b/pulpcore/tests/functional/fixtures/basic/2.iso similarity index 100% rename from pulpcore/tests/fixtures/basic/2.iso rename to pulpcore/tests/functional/fixtures/basic/2.iso diff --git a/pulpcore/tests/fixtures/basic/3.iso b/pulpcore/tests/functional/fixtures/basic/3.iso similarity index 100% rename from pulpcore/tests/fixtures/basic/3.iso rename to pulpcore/tests/functional/fixtures/basic/3.iso diff --git a/pulpcore/tests/fixtures/basic/PULP_MANIFEST b/pulpcore/tests/functional/fixtures/basic/PULP_MANIFEST similarity index 100% rename from pulpcore/tests/fixtures/basic/PULP_MANIFEST rename to pulpcore/tests/functional/fixtures/basic/PULP_MANIFEST diff --git a/pulpcore/tests/unit/serializers/test_content.py b/pulpcore/tests/unit/serializers/test_content.py deleted file mode 100644 index 0aad81dbdc..0000000000 --- a/pulpcore/tests/unit/serializers/test_content.py +++ /dev/null @@ -1,37 +0,0 @@ -from unittest import TestCase - -import mock -from pulpcore.app.models import Artifact -from pulpcore.app.serializers import ArtifactSerializer -from pulpcore.constants import ALL_KNOWN_CONTENT_CHECKSUMS -from rest_framework import serializers - - -class TestArtifactSerializer(TestCase): - def test_validate_file_checksum(self): - mock_file = mock.MagicMock(size=42) - mock_file.hashers.__getitem__.return_value.hexdigest.return_value = "asdf" - - data = {"file": mock_file} - serializer = ArtifactSerializer(data=data) - self.assertTrue(serializer.is_valid()) - new_data = serializer.validated_data - self.assertEqual(new_data["file"], mock_file) - self.assertEqual(new_data["size"], 42) - for csum in Artifact.DIGEST_FIELDS: - self.assertEqual(new_data[csum], "asdf") - - for csum in ALL_KNOWN_CONTENT_CHECKSUMS.difference(Artifact.DIGEST_FIELDS): - self.assertFalse(csum in new_data, f"Found forbidden checksum {csum}") - - # This part of the test will only fire if the system-under-test has forbidden - # use of 'md5' - if "md5" not in Artifact.DIGEST_FIELDS: - data = {"file": mock_file, "md5": "asdf"} - with self.assertRaises(serializers.ValidationError) as cm: # noqa - serializer.validate(data) - - def test_emtpy_data(self): - data = {} - serializer = ArtifactSerializer(data=data) - self.assertFalse(serializer.is_valid()) diff --git a/pulpcore/tests/unit/serializers/test_repository.py b/pulpcore/tests/unit/serializers/test_repository.py index 4c661a20e2..927d92b3e2 100644 --- a/pulpcore/tests/unit/serializers/test_repository.py +++ b/pulpcore/tests/unit/serializers/test_repository.py @@ -4,9 +4,7 @@ import mock from rest_framework import serializers -from pulpcore.app.models import Distribution from pulpcore.app.serializers import ( - DistributionSerializer, PublicationSerializer, RemoteSerializer, ) @@ -15,38 +13,6 @@ class TestRemoteSerializer(TestCase): minimal_data = {"name": "test", "url": "http://whatever"} - def test_minimal_data(self): - data = {} - data.update(self.minimal_data) - serializer = RemoteSerializer(data=data) - serializer.is_valid(raise_exception=True) - - def test_validate_proxy(self): - data = {"proxy_url": "http://whatever"} - data.update(self.minimal_data) - serializer = RemoteSerializer(data=data) - serializer.is_valid(raise_exception=True) - - def test_validate_proxy_invalid(self): - data = {"proxy_url": "http://user:pass@whatever"} - data.update(self.minimal_data) - serializer = RemoteSerializer(data=data) - with self.assertRaises(serializers.ValidationError): - serializer.is_valid(raise_exception=True) - - def test_validate_proxy_creds(self): - data = {"proxy_url": "http://whatever", "proxy_username": "user", "proxy_password": "pass"} - data.update(self.minimal_data) - serializer = RemoteSerializer(data=data) - serializer.is_valid(raise_exception=True) - - def test_validate_proxy_creds_invalid(self): - data = {"proxy_url": "http://whatever", "proxy_username": "user"} - data.update(self.minimal_data) - serializer = RemoteSerializer(data=data) - with self.assertRaises(serializers.ValidationError): - serializer.is_valid(raise_exception=True) - def test_validate_proxy_creds_update(self): Remote = SimpleNamespace( proxy_url="http://whatever", @@ -115,58 +81,3 @@ def test_validate_repository_version_only_unknown_field(self): serializer = PublicationSerializer(data=data) with self.assertRaises(serializers.ValidationError): serializer.validate(data) - - -class TestDistributionPath(TestCase): - def test_overlap(self): - Distribution.objects.create(base_path="foo/bar", name="foobar") - overlap_errors = {"base_path": ["Overlaps with existing distribution 'foobar'"]} - - # test that the new distribution cannot be nested in an existing path - data = {"name": "foobarbaz", "base_path": "foo/bar/baz"} - serializer = DistributionSerializer(data=data) - self.assertFalse(serializer.is_valid()) - self.assertDictEqual(overlap_errors, serializer.errors) - - # test that the new distribution cannot nest an existing path - data = {"name": "foo", "base_path": "foo"} - serializer = DistributionSerializer(data=data) - self.assertFalse(serializer.is_valid()) - self.assertDictEqual(overlap_errors, serializer.errors) - - def test_no_overlap(self): - Distribution.objects.create(base_path="fu/bar", name="fubar") - - # different path - data = {"name": "fufu", "base_path": "fubar"} - serializer = DistributionSerializer(data=data) - self.assertTrue(serializer.is_valid()) - self.assertDictEqual({}, serializer.errors) - - # common base path but different path - data = {"name": "fufu", "base_path": "fu/baz"} - serializer = DistributionSerializer(data=data) - self.assertTrue(serializer.is_valid()) - self.assertDictEqual({}, serializer.errors) - - def test_slashes(self): - overlap_errors = {"base_path": ["Relative path cannot begin or end with slashes."]} - - data = {"name": "fefe", "base_path": "fefe/"} - serializer = DistributionSerializer(data=data) - self.assertFalse(serializer.is_valid()) - self.assertDictEqual(overlap_errors, serializer.errors) - - data = {"name": "fefe", "base_path": "/fefe/foo"} - serializer = DistributionSerializer(data=data) - self.assertFalse(serializer.is_valid()) - self.assertDictEqual(overlap_errors, serializer.errors) - - def test_uniqueness(self): - Distribution.objects.create(base_path="fizz/buzz", name="fizzbuzz") - data = {"name": "feefee", "base_path": "fizz/buzz"} - overlap_errors = {"base_path": ["This field must be unique."]} - - serializer = DistributionSerializer(data=data) - self.assertFalse(serializer.is_valid()) - self.assertDictEqual(overlap_errors, serializer.errors) diff --git a/unittest_requirements.txt b/unittest_requirements.txt index 4c0775506b..ed1dc5f387 100644 --- a/unittest_requirements.txt +++ b/unittest_requirements.txt @@ -1,3 +1,4 @@ # Unit test requirements asynctest mock +pytest-django
zestedesavoir__zds-site-5586
SEO et signature : <a rel="nofollow" /> Dans la signature il faudrait voir si on peut facilement ajouter un attribut `rel="nofollow"` pour préserver notre SEO. https://github.com/zestedesavoir/zmarkdown/blob/1dded309a2670689a4a3353f9e38b80624c6df1a/packages/zmarkdown/server/handlers.js#L139 > limitez les liens en signatures à des no follow or lien interne. c’est pas mal (:evil) de partager un lien, mais si A-312 répond 4 fois dans la même page, il renvoie 4 fois du jus sur son compte twitter, 4 coding game, … ca a plusieurs effet négatifs Source: https://zestedesavoir.com/forums/sujet/12099/seo-et-spam/?page=1#p199005
[ { "content": "import re\nimport json\nimport logging\nfrom requests import post, HTTPError\n\nfrom django import template\nfrom django.conf import settings\nfrom django.template.defaultfilters import stringfilter\nfrom django.utils.safestring import mark_safe\nfrom django.utils.translation import ugettext_lazy ...
[ { "content": "import re\nimport json\nimport logging\nfrom requests import post, HTTPError\n\nfrom django import template\nfrom django.conf import settings\nfrom django.template.defaultfilters import stringfilter\nfrom django.utils.safestring import mark_safe\nfrom django.utils.translation import ugettext_lazy ...
diff --git a/zds/utils/templatetags/emarkdown.py b/zds/utils/templatetags/emarkdown.py index e933290542..b403e746d2 100644 --- a/zds/utils/templatetags/emarkdown.py +++ b/zds/utils/templatetags/emarkdown.py @@ -178,7 +178,7 @@ def emarkdown_inline(text): :rtype: str """ rendered = emarkdown(text, inline=True) - return rendered + return mark_safe(rendered.replace('<a href=', '<a rel="nofollow" href=')) def sub_hd(match, count): diff --git a/zds/utils/tests/tests_emarkdown.py b/zds/utils/tests/tests_emarkdown.py index 416798c150..4b172ba0f7 100644 --- a/zds/utils/tests/tests_emarkdown.py +++ b/zds/utils/tests/tests_emarkdown.py @@ -40,7 +40,13 @@ def test_emarkdown_inline(self): self.assertEqual(tr, expected) - # Todo: Find a way to force parsing crash or simulate it. + def test_emarkdown_inline_with_link(self): + # The goal is not to test zmarkdown but test that template tag correctly call it + self.context['content'] = '[zds](zestedesavoir.com)' + tr = Template('{% load emarkdown %}{{ content | emarkdown_inline}}').render(self.context) + + expected = '<p><a rel="nofollow" href="zestedesavoir.com">zds</a></p>' + self.assertEqual(tr, expected) def test_shift_heading(self): tr = Template('{% load emarkdown %}{{ content | shift_heading_1}}').render(self.context)
TileDB-Inc__TileDB-Py-151
Reading dense array doesn't free memory Hi, I'm wondering if this is expected behavior or if you have any tips to fix. On Ubuntu 16, Python 3.7, and _tiledb_ 0.4.1: Create toy array: ``` x = np.ones(10000000) ctx = tiledb.Ctx() path = 'test_tile_db' d1 = tiledb.Dim( 'test_domain', domain=(0, x.shape[0] - 1), tile=10000, dtype="uint32" ) domain = tiledb.Domain(d1) v = tiledb.Attr( 'test_value', dtype="float32", ) schema = tiledb.ArraySchema( domain=domain, attrs=(v,), cell_order="row-major", tile_order="row-major" ) A = tiledb.DenseArray.create(path, schema) values = x.astype(np.float32) with tiledb.DenseArray(path, mode="w", ctx=ctx) as A: A[:] = {'test_value': values} ``` Read from array: ``` for i in range(10): with tiledb.DenseArray(path, mode='r') as data: data[:] print('Gigs:', round(psutil.virtual_memory().used / (10**9), 2)) Gigs: 0.84 Gigs: 0.89 Gigs: 0.93 Gigs: 0.97 Gigs: 1.01 Gigs: 1.05 Gigs: 1.1 Gigs: 1.14 Gigs: 1.18 Gigs: 1.22 ``` Basically memory never seems to get released even when I don't assign the `data[:]` to any variable. I've tried playing around with garbage collection (`import gc; gc.collect()`) but it seems Python is not aware. Have also tried doing some explicit closing of the DenseArray. Eventually have to reset Jupyter notebook to get memory to free. In my real use case I am iterating over several tileDBs and pulling full array data out from each, doing some transforms, and writing new tileDBs with transformed data. Works okay except every read call adds around 2GBs to the used memory and never releases it, causing the machine to eventually run out of memory. Current work around is to spin up new process for every iteration. Thanks!
[ { "content": "from __future__ import absolute_import, print_function\n\nimport multiprocessing\nimport os\nimport shutil\nimport subprocess\nimport zipfile\nimport platform\nfrom distutils.sysconfig import get_config_var\nfrom distutils.version import LooseVersion\n\n\ntry:\n # For Python 3\n from urllib....
[ { "content": "from __future__ import absolute_import, print_function\n\nimport multiprocessing\nimport os\nimport shutil\nimport subprocess\nimport zipfile\nimport platform\nfrom distutils.sysconfig import get_config_var\nfrom distutils.version import LooseVersion\n\n\ntry:\n # For Python 3\n from urllib....
diff --git a/azure-pipelines.yml b/azure-pipelines.yml index cfbf0ade25..35e00249b0 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -27,7 +27,7 @@ steps: architecture: 'x64' - script: | - python -m pip install --upgrade pip setuptools wheel numpy tox setuptools-scm cython + python -m pip install --upgrade pip setuptools wheel numpy tox setuptools-scm cython psutil displayName: 'Install dependencies' - script: | diff --git a/requirements.txt b/requirements.txt index 8c34793759..6db5ef6f8c 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,3 +4,4 @@ numpy>=1.7.2 setuptools>=18.0.1 setuptools-scm>=1.5.4 wheel>=0.30.0 +psutil diff --git a/requirements_dev.txt b/requirements_dev.txt index b0e1dc77cc..130724f89b 100644 --- a/requirements_dev.txt +++ b/requirements_dev.txt @@ -6,3 +6,4 @@ setuptools==40.8.0 setuptools-scm==1.5.4 wheel==0.30.0 tox==3.0.0 +psutil diff --git a/setup.py b/setup.py index 44def48710..1e0652eaf9 100644 --- a/setup.py +++ b/setup.py @@ -36,7 +36,7 @@ def get_zipfile(url): from sys import version_info as ver # Target branch -TILEDB_VERSION = "dev" +TILEDB_VERSION = "1.5.1" # Use `setup.py [] --debug` for a debug build of libtiledb TILEDB_DEBUG_BUILD = False diff --git a/tiledb/libtiledb.pyx b/tiledb/libtiledb.pyx index fef27d6196..937d0bb9c2 100644 --- a/tiledb/libtiledb.pyx +++ b/tiledb/libtiledb.pyx @@ -11,9 +11,6 @@ from cpython.bytes cimport (PyBytes_GET_SIZE, PyBytes_FromString, PyBytes_FromStringAndSize) -from cpython.mem cimport (PyMem_Malloc, - PyMem_Realloc, - PyMem_Free) from cpython.ref cimport (Py_INCREF, Py_DECREF, PyTypeObject) @@ -52,6 +49,10 @@ cdef extern from "numpy/arrayobject.h": object obj) # Steals a reference to dtype, need to incref the dtype object PyArray_Scalar(void* ptr, np.dtype descr, object itemsize) + void PyArray_ENABLEFLAGS(np.ndarray arr, int flags) + void* PyDataMem_NEW(size_t nbytes) + void* PyDataMem_RENEW(void* data, size_t nbytes) + void PyDataMem_FREE(void* data) import sys from os.path import abspath @@ -72,7 +73,8 @@ _MB = 1024 * _KB # The native int type for this platform IntType = np.dtype(np.int_) -# Numpy initialization code +# Numpy initialization code (critical) +# https://docs.scipy.org/doc/numpy/reference/c-api.array.html#c.import_array np.import_array() def version(): @@ -2375,7 +2377,7 @@ cdef class KV(object): @staticmethod def create(uri, KVSchema schema, key=None, Ctx ctx=default_ctx()): - """Creates a persistent KV at the given URI, returns a KV class instance + """Creates a persistent KV at the given URI """ cdef tiledb_ctx_t* ctx_ptr = ctx.ptr cdef bytes buri = unicode_path(uri) @@ -2402,7 +2404,7 @@ cdef class KV(object): if rc != TILEDB_OK: _raise_ctx_err(ctx_ptr, rc) - return KV(uri, key=key, ctx=ctx) + return def __init__(self, uri, mode='r', key=None, timestamp=None, Ctx ctx=default_ctx()): cdef tiledb_ctx_t* ctx_ptr = ctx.ptr @@ -3403,6 +3405,7 @@ cdef class Array(object): cdef uint64_t _timestamp = 0 if timestamp is not None: _timestamp = <uint64_t> timestamp + # allocate and then open the array cdef tiledb_array_t* array_ptr = NULL cdef int rc = TILEDB_OK @@ -3429,9 +3432,12 @@ cdef class Array(object): # view on a single attribute if attr and not any(attr == schema.attr(i).name for i in range(schema.nattr)): + tiledb_array_close(ctx_ptr, array_ptr) + tiledb_array_free(&array_ptr) raise KeyError("No attribute matching '{}'".format(attr)) else: self.view_attr = unicode(attr) if (attr is not None) else None + self.ctx = ctx self.uri = unicode(uri) self.mode = unicode(mode) @@ -3817,11 +3823,11 @@ cdef class Array(object): # note: must not divide by itemsize for a string, because it may be zero (e.g 'S0') dims[0] = el_bytelen / el_dtype.base.itemsize newobj = \ - PyArray_NewFromDescr( + np.copy(PyArray_NewFromDescr( <PyTypeObject*> np.ndarray, el_dtype.base, 1, dims, NULL, el_ptr, - np.NPY_ENSURECOPY, <object> NULL) + 0, <object> NULL)) # set the output object out_flat[el] = newobj @@ -3840,7 +3846,6 @@ cdef class ReadQuery(object): @property def _offsets(self): return self._offsets - def __init__(self, Array array, np.ndarray subarray, list attr_names, tiledb_layout_t layout): self._buffers = dict() self._offsets = dict() @@ -3854,8 +3859,10 @@ cdef class ReadQuery(object): cdef: vector [void*] buffer_ptrs vector [uint64_t*] offsets_ptrs + void* tmp_ptr = NULL void* subarray_ptr = NULL np.npy_intp dims[1] + np.ndarray tmparray bytes battr_name Py_ssize_t nattr = len(attr_names) @@ -3880,11 +3887,13 @@ cdef class ReadQuery(object): tiledb_query_free(&query_ptr) _raise_ctx_err(ctx_ptr, rc) - cdef uint64_t* buffer_sizes_ptr = <uint64_t*> PyMem_Malloc(nattr * sizeof(uint64_t)) + # lifetime: free in finally clause + cdef uint64_t* buffer_sizes_ptr = <uint64_t*> PyDataMem_NEW(nattr * sizeof(uint64_t)) if buffer_sizes_ptr == NULL: tiledb_query_free(&query_ptr) raise MemoryError() - cdef uint64_t* offsets_sizes_ptr = <uint64_t*> PyMem_Malloc(nattr * sizeof(uint64_t)) + # lifetime: free in finally clause + cdef uint64_t* offsets_sizes_ptr = <uint64_t*> PyDataMem_NEW(nattr * sizeof(uint64_t)) if offsets_sizes_ptr == NULL: tiledb_query_free(&query_ptr) raise MemoryError() @@ -3911,19 +3920,31 @@ cdef class ReadQuery(object): # allocate buffer to hold offsets for var-length attribute # NOTE offsets_sizes is in BYTES - offsets_ptrs.push_back(<uint64_t*> PyMem_Malloc(<size_t>(offsets_sizes_ptr[i]))) - #self._offsets[name] = np.empty(offsets_sizes_ptr[i], dtype=np.uint8) + + # lifetime: + # - free on exception + # - otherwise, ownership transferred to NumPy + tmp_ptr = PyDataMem_NEW(<size_t>(offsets_sizes_ptr[i])) + if tmp_ptr == NULL: + raise MemoryError() + offsets_ptrs.push_back(<uint64_t*> tmp_ptr) + tmp_ptr = NULL else: rc = tiledb_array_max_buffer_size(ctx_ptr, array_ptr, battr_name, subarray_ptr, &(buffer_sizes_ptr[i])) - if rc != TILEDB_OK: _raise_ctx_err(ctx_ptr, rc) offsets_ptrs.push_back(NULL) - buffer_ptrs.push_back(<void*> PyMem_Malloc(<size_t>(buffer_sizes_ptr[i]))) - #self._buffers[name] = np.empty(buffer_sizes_ptr[i], dtype=np.uint8) + # lifetime: + # - free on exception + # - otherwise, ownership transferred to NumPy + tmp_ptr = PyDataMem_NEW(<size_t>(buffer_sizes_ptr[i])) + if tmp_ptr == NULL: + raise MemoryError() + buffer_ptrs.push_back(tmp_ptr) + tmp_ptr = NULL # set the query buffers for i in range(nattr): @@ -3956,39 +3977,34 @@ cdef class ReadQuery(object): for i in range(nattr): name = attr_names[i] - dtype = np.dtype('uint8') - # Note: we don't know the actual read size until *after* the query executes # so the realloc below is very important as consumers of this buffer # rely on the size corresponding to actual bytes read. if name != "coords" and schema.attr(name).isvar: dims[0] = offsets_sizes_ptr[i] - Py_INCREF(dtype) + tmp_ptr = PyDataMem_RENEW(offsets_ptrs[i], <size_t>(offsets_sizes_ptr[i])) self._offsets[name] = \ - PyArray_NewFromDescr( - <PyTypeObject*> np.ndarray, - dtype, 1, dims, NULL, - PyMem_Realloc(offsets_ptrs[i], <size_t>(offsets_sizes_ptr[i])), - np.NPY_OWNDATA, <object> NULL) + np.PyArray_SimpleNewFromData(1, dims, np.NPY_UINT8, tmp_ptr) + PyArray_ENABLEFLAGS(self._offsets[name], np.NPY_OWNDATA) dims[0] = buffer_sizes_ptr[i] - Py_INCREF(dtype) + tmp_ptr = PyDataMem_RENEW(buffer_ptrs[i], <size_t>(buffer_sizes_ptr[i])) self._buffers[name] = \ - PyArray_NewFromDescr( - <PyTypeObject*> np.ndarray, - dtype, 1, dims, NULL, - PyMem_Realloc(buffer_ptrs[i], <size_t>(buffer_sizes_ptr[i])), - np.NPY_OWNDATA, <object> NULL) + np.PyArray_SimpleNewFromData(1, dims, np.NPY_UINT8, tmp_ptr) + PyArray_ENABLEFLAGS(self._buffers[name], np.NPY_OWNDATA) + except: + # we only free the PyDataMem_NEW'd buffers on exception, + # otherwise NumPy manages them for i in range(nattr): if buffer_ptrs[i] != NULL: - PyMem_Free(buffer_ptrs[i]) + PyDataMem_FREE(buffer_ptrs[i]) if offsets_ptrs[i] != NULL: - PyMem_Free(offsets_ptrs[i]) + PyDataMem_FREE(offsets_ptrs[i]) raise finally: - PyMem_Free(buffer_sizes_ptr) - PyMem_Free(offsets_sizes_ptr) + PyDataMem_FREE(buffer_sizes_ptr) + PyDataMem_FREE(offsets_sizes_ptr) tiledb_query_free(&query_ptr) diff --git a/tiledb/tests/common.py b/tiledb/tests/common.py index af594cf91e..c2acaf85e6 100644 --- a/tiledb/tests/common.py +++ b/tiledb/tests/common.py @@ -4,9 +4,11 @@ import os import shutil import tempfile +import traceback from unittest import TestCase class DiskTestCase(TestCase): + pathmap = dict() def setUp(self): prefix = 'tiledb-' + self.__class__.__name__ @@ -20,8 +22,14 @@ def tearDown(self): except OSError as exc: print("test '{}' error deleting '{}'".format(self.__class__.__name__, dirpath)) - raise + print("registered paths and originating functions:") + for path,frame in self.pathmap.items(): + print(" '{}' <- '{}'".format(path,frame)) + raise exc def path(self, path): - return os.path.abspath(os.path.join(self.rootdir, path)) + out = os.path.abspath(os.path.join(self.rootdir, path)) + frame = traceback.extract_stack(limit=2)[-2][2] + self.pathmap[out] = frame + return out diff --git a/tiledb/tests/test_libtiledb.py b/tiledb/tests/test_libtiledb.py index 84d5144ddb..bead3d02cc 100644 --- a/tiledb/tests/test_libtiledb.py +++ b/tiledb/tests/test_libtiledb.py @@ -1046,14 +1046,13 @@ def test_varlen_write_floats(self): att = tiledb.Attr(dtype=np.float64, var=True, ctx=ctx) schema = tiledb.ArraySchema(dom, (att,), ctx=ctx) - tiledb.DenseArray.create(self.path("foo"), schema) with tiledb.DenseArray(self.path("foo"), mode='w', ctx=ctx) as T: T[:] = A with tiledb.DenseArray(self.path("foo"), mode='r', ctx=ctx) as T: T_ = T[:] - self.assertEqual(len(A), len(T)) + self.assertEqual(len(A), len(T_)) # can't use assert_array_equal w/ np.object array self.assertTrue(all(np.array_equal(x,A[i]) for i,x in enumerate(T_))) @@ -1560,9 +1559,9 @@ def test_pickle_roundtrip(self): with io.BytesIO() as buf, tiledb.DenseArray(uri) as V: pickle.dump(V, buf) buf.seek(0) - V2 = pickle.load(buf) - # make sure anonymous view pickles and round-trips - assert_array_equal(V, V2) + with pickle.load(buf) as V2: + # make sure anonymous view pickles and round-trips + assert_array_equal(V, V2) def test_pickle_with_config(self): import io, pickle @@ -1606,13 +1605,13 @@ def test_view_multiattr(self): anon_ar = np.random.rand(3, 3) named_ar = np.random.rand(3, 3) - with tiledb.DenseArray(uri, 'w') as T: + with tiledb.DenseArray(uri, 'w', ctx=ctx) as T: T[:] = {'': anon_ar, 'named': named_ar} with self.assertRaises(KeyError): - T = tiledb.DenseArray(uri, 'r', attr="foo111") + T = tiledb.DenseArray(uri, 'r', attr="foo111", ctx=ctx) - with tiledb.DenseArray(uri, 'r', attr="named") as T: + with tiledb.DenseArray(uri, 'r', attr="named", ctx=ctx) as T: assert_array_equal(T, named_ar) # make sure each attr view can pickle and round-trip with io.BytesIO() as buf: @@ -1621,7 +1620,7 @@ def test_view_multiattr(self): with pickle.load(buf) as T_rt: assert_array_equal(T, T_rt) - with tiledb.DenseArray(uri, 'r', attr="") as T: + with tiledb.DenseArray(uri, 'r', attr="", ctx=ctx) as T: assert_array_equal(T, anon_ar) with io.BytesIO() as buf: @@ -1632,10 +1631,10 @@ def test_view_multiattr(self): # set subarray on multi-attribute range_ar = np.arange(0,9).reshape(3,3) - with tiledb.DenseArray(uri, 'w', attr='named') as V_named: + with tiledb.DenseArray(uri, 'w', attr='named', ctx=ctx) as V_named: V_named[1:3,1:3] = range_ar[1:3,1:3] - with tiledb.DenseArray(uri, 'r', attr='named') as V_named: + with tiledb.DenseArray(uri, 'r', attr='named', ctx=ctx) as V_named: assert_array_equal(V_named[1:3,1:3], range_ar[1:3,1:3]) @@ -1749,8 +1748,7 @@ def test_kv_write_schema_load(self): a1 = tiledb.Attr("value", dtype=bytes, ctx=ctx) schema = tiledb.KVSchema(ctx, attrs=(a1,)) # persist kv schema - kv = tiledb.KV.create(self.path("foo"), schema, ctx=ctx) - self.assertNotEqual(kv, None) + tiledb.KV.create(self.path("foo"), schema, ctx=ctx) self.assertEqual(tiledb.KVSchema.load(self.path("foo"), ctx=ctx), schema) def test_kv_contains(self): @@ -1798,8 +1796,7 @@ def test_kv_write_consolidate(self): schema = tiledb.KVSchema(attrs=(a1,), ctx=ctx) # persist kv schema - kv = tiledb.KV.create(self.path("foo1"), schema, ctx=ctx) - kv.close() + tiledb.KV.create(self.path("foo1"), schema, ctx=ctx) def append_kv(path, k, v): kv = tiledb.KV(path, mode='w', ctx=ctx) @@ -1850,18 +1847,19 @@ def test_kv_write_load_read_encrypted(self): def test_kv_update_reload(self): # create a kv array - ctx = tiledb.Ctx() - a1 = tiledb.Attr("val", ctx=ctx, dtype=bytes) + ctx1 = tiledb.Ctx() + ctx2 = tiledb.Ctx() + a1 = tiledb.Attr("val", ctx=ctx1, dtype=bytes) # persist kv schema - schema = tiledb.KVSchema(attrs=(a1,), ctx=ctx) - tiledb.KV.create(self.path("foo"), schema, ctx=ctx) + schema = tiledb.KVSchema(attrs=(a1,), ctx=ctx1) + tiledb.KV.create(self.path("foo"), schema, ctx=ctx1) # load kv array - with tiledb.KV(self.path("foo"), mode='w', ctx=ctx) as kv1: + with tiledb.KV(self.path("foo"), mode='w', ctx=ctx1) as kv1: kv1['foo'] = 'bar' kv1.flush() - with tiledb.KV(self.path("foo"), mode='r', ctx=ctx) as kv2: + with tiledb.KV(self.path("foo"), mode='r', ctx=ctx2) as kv2: self.assertTrue('foo' in kv2) kv1['bar'] = 'baz' kv1.flush() @@ -2113,6 +2111,75 @@ def test_io(self): self.assertEqual(io.readall(), b"") +class MemoryTest(DiskTestCase): + # sanity check that memory usage doesn't increase more than 10% reading 40MB 100x + # https://github.com/TileDB-Inc/TileDB-Py/issues/150 + + def setUp(self): + super(MemoryTest, self).setUp() + import sys + if not sys.platform.startswith("linux"): + self.skipTest("Only run MemoryTest on linux") + + @staticmethod + def use_many_buffers(path): + import psutil, os + # https://stackoverflow.com/questions/938733/total-memory-used-by-python-process + process = psutil.Process(os.getpid()) + + x = np.ones(10000000, dtype=np.float32) + ctx = tiledb.Ctx() + d1 = tiledb.Dim( + 'test_domain', domain=(0, x.shape[0] - 1), tile=10000, dtype="uint32") + domain = tiledb.Domain(d1) + v = tiledb.Attr( + 'test_value', + dtype="float32") + + schema = tiledb.ArraySchema( + domain=domain, attrs=(v,), cell_order="row-major", tile_order="row-major") + + A = tiledb.DenseArray.create(path, schema) + + with tiledb.DenseArray(path, mode="w", ctx=ctx) as A: + A[:] = {'test_value': x} + + with tiledb.DenseArray(path, mode='r') as data: + data[:] + initial = process.memory_info().rss + print(" initial RSS: {}".format(round(initial / (10 ** 6)), 2)) + for i in range(100): + # read but don't store: this memory should be freed + data[:] + + if i % 10 == 0: + print(' read iter {}, RSS (MB): {}'.format( + i, round(process.memory_info().rss / (10 ** 6), 2))) + + return initial + + def test_memory_cleanup(self): + import tiledb, numpy as np + import psutil, os + + # run function which reads 100x from a 40MB test array + # TODO: RSS is too loose to do this end-to-end, so should use instrumentation. + print("Starting TileDB-Py memory test:") + initial = self.use_many_buffers(self.path('test_memory_cleanup')) + + process = psutil.Process(os.getpid()) + final = process.memory_info().rss + print(" final RSS: {}".format(round(final / (10 ** 6)), 2)) + + import gc + gc.collect() + + final_gc = process.memory_info().rss + print(" final RSS after forced GC: {}".format(round(final_gc / (10 ** 6)), 2)) + + self.assertTrue((final - initial) < (.1 * initial)) + + #if __name__ == '__main__': # # run a single example for in-process debugging # # better to use `pytest --gdb` if available
AUTOMATIC1111__stable-diffusion-webui-7353
[Bug]: thumbnail cards are not loading the preview image ### Is there an existing issue for this? - [X] I have searched the existing issues and checked the recent builds/commits ### What happened? just getting black image, and if I try to update an image, it goes black too. It was working before checkpoints were added, I don't know if that's related. ### Steps to reproduce the problem 1. Go to .... 2. Press .... 3. ... ### What should have happened? should see the preview images ### Commit where the problem happens 0a8515085ef258d4b76fdc000f7ed9d55751d6b8 ### What platforms do you use to access the UI ? _No response_ ### What browsers do you use to access the UI ? _No response_ ### Command Line Arguments ```Shell --api --cors-allow-origins http://localhost:5173 --administrator --no-half-vae --no-half --disable-safe-unpickle --force-cpu --xformers ``` ### List of extensions all of them ### Console logs ```Shell ERROR: Exception in ASGI application Traceback (most recent call last): File "D:\stable-diffusion-webui\venv\lib\site-packages\anyio\streams\memory.py", line 94, in receive return self.receive_nowait() File "D:\stable-diffusion-webui\venv\lib\site-packages\anyio\streams\memory.py", line 89, in receive_nowait raise WouldBlock anyio.WouldBlock During handling of the above exception, another exception occurred: Traceback (most recent call last): File "D:\stable-diffusion-webui\venv\lib\site-packages\starlette\middleware\base.py", line 77, in call_next message = await recv_stream.receive() File "D:\stable-diffusion-webui\venv\lib\site-packages\anyio\streams\memory.py", line 114, in receive raise EndOfStream anyio.EndOfStream During handling of the above exception, another exception occurred: Traceback (most recent call last): File "D:\stable-diffusion-webui\venv\lib\site-packages\uvicorn\protocols\http\h11_impl.py", line 407, in run_asgi result = await app( # type: ignore[func-returns-value] File "D:\stable-diffusion-webui\venv\lib\site-packages\uvicorn\middleware\proxy_headers.py", line 78, in __call__ return await self.app(scope, receive, send) File "D:\stable-diffusion-webui\venv\lib\site-packages\fastapi\applications.py", line 270, in __call__ await super().__call__(scope, receive, send) File "D:\stable-diffusion-webui\venv\lib\site-packages\starlette\applications.py", line 124, in __call__ await self.middleware_stack(scope, receive, send) File "D:\stable-diffusion-webui\venv\lib\site-packages\starlette\middleware\errors.py", line 184, in __call__ raise exc File "D:\stable-diffusion-webui\venv\lib\site-packages\starlette\middleware\errors.py", line 162, in __call__ await self.app(scope, receive, _send) File "D:\stable-diffusion-webui\venv\lib\site-packages\starlette\middleware\base.py", line 106, in __call__ response = await self.dispatch_func(request, call_next) File "D:\stable-diffusion-webui\extensions\auto-sd-paint-ext\backend\app.py", line 391, in app_encryption_middleware res: StreamingResponse = await call_next(req) File "D:\stable-diffusion-webui\venv\lib\site-packages\starlette\middleware\base.py", line 80, in call_next raise app_exc File "D:\stable-diffusion-webui\venv\lib\site-packages\starlette\middleware\base.py", line 69, in coro await self.app(scope, receive_or_disconnect, send_no_error) File "D:\stable-diffusion-webui\venv\lib\site-packages\starlette\middleware\base.py", line 106, in __call__ response = await self.dispatch_func(request, call_next) File "D:\stable-diffusion-webui\modules\api\api.py", line 96, in log_and_time res: Response = await call_next(req) File "D:\stable-diffusion-webui\venv\lib\site-packages\starlette\middleware\base.py", line 80, in call_next raise app_exc File "D:\stable-diffusion-webui\venv\lib\site-packages\starlette\middleware\base.py", line 69, in coro await self.app(scope, receive_or_disconnect, send_no_error) File "D:\stable-diffusion-webui\venv\lib\site-packages\starlette\middleware\gzip.py", line 24, in __call__ await responder(scope, receive, send) File "D:\stable-diffusion-webui\venv\lib\site-packages\starlette\middleware\gzip.py", line 43, in __call__ await self.app(scope, receive, self.send_with_gzip) File "D:\stable-diffusion-webui\venv\lib\site-packages\starlette\middleware\cors.py", line 84, in __call__ await self.app(scope, receive, send) File "D:\stable-diffusion-webui\venv\lib\site-packages\starlette\middleware\exceptions.py", line 79, in __call__ raise exc File "D:\stable-diffusion-webui\venv\lib\site-packages\starlette\middleware\exceptions.py", line 68, in __call__ await self.app(scope, receive, sender) File "D:\stable-diffusion-webui\venv\lib\site-packages\fastapi\middleware\asyncexitstack.py", line 21, in __call__ raise e File "D:\stable-diffusion-webui\venv\lib\site-packages\fastapi\middleware\asyncexitstack.py", line 18, in __call__ await self.app(scope, receive, send) File "D:\stable-diffusion-webui\venv\lib\site-packages\starlette\routing.py", line 706, in __call__ await route.handle(scope, receive, send) File "D:\stable-diffusion-webui\venv\lib\site-packages\starlette\routing.py", line 276, in handle await self.app(scope, receive, send) File "D:\stable-diffusion-webui\venv\lib\site-packages\starlette\routing.py", line 66, in app response = await func(request) File "D:\stable-diffusion-webui\venv\lib\site-packages\fastapi\routing.py", line 235, in app raw_response = await run_endpoint_function( File "D:\stable-diffusion-webui\venv\lib\site-packages\fastapi\routing.py", line 163, in run_endpoint_function return await run_in_threadpool(dependant.call, **values) File "D:\stable-diffusion-webui\venv\lib\site-packages\starlette\concurrency.py", line 41, in run_in_threadpool return await anyio.to_thread.run_sync(func, *args) File "D:\stable-diffusion-webui\venv\lib\site-packages\anyio\to_thread.py", line 31, in run_sync return await get_asynclib().run_sync_in_worker_thread( File "D:\stable-diffusion-webui\venv\lib\site-packages\anyio\_backends\_asyncio.py", line 937, in run_sync_in_worker_thread return await future File "D:\stable-diffusion-webui\venv\lib\site-packages\anyio\_backends\_asyncio.py", line 867, in run result = context.run(func, *args) File "D:\stable-diffusion-webui\modules\ui_extra_networks.py", line 28, in fetch_file if not any([Path(x).resolve() in Path(filename).resolve().parents for x in allowed_dirs]): File "D:\stable-diffusion-webui\modules\ui_extra_networks.py", line 28, in <listcomp> if not any([Path(x).resolve() in Path(filename).resolve().parents for x in allowed_dirs]): File "D:\Python\Python310\lib\pathlib.py", line 960, in __new__ self = cls._from_parts(args) File "D:\Python\Python310\lib\pathlib.py", line 594, in _from_parts drv, root, parts = self._parse_args(args) File "D:\Python\Python310\lib\pathlib.py", line 578, in _parse_args a = os.fspath(a) TypeError: expected str, bytes or os.PathLike object, not NoneType ``` ### Additional information _No response_
[ { "content": "import html\r\nimport json\r\nimport os\r\nimport urllib.parse\r\n\r\nfrom modules import shared, ui_extra_networks, sd_models\r\n\r\n\r\nclass ExtraNetworksPageCheckpoints(ui_extra_networks.ExtraNetworksPage):\r\n def __init__(self):\r\n super().__init__('Checkpoints')\r\n\r\n def re...
[ { "content": "import html\r\nimport json\r\nimport os\r\nimport urllib.parse\r\n\r\nfrom modules import shared, ui_extra_networks, sd_models\r\n\r\n\r\nclass ExtraNetworksPageCheckpoints(ui_extra_networks.ExtraNetworksPage):\r\n def __init__(self):\r\n super().__init__('Checkpoints')\r\n\r\n def re...
diff --git a/modules/ui_extra_networks_checkpoints.py b/modules/ui_extra_networks_checkpoints.py index c66cb8307ad..5b471671a09 100644 --- a/modules/ui_extra_networks_checkpoints.py +++ b/modules/ui_extra_networks_checkpoints.py @@ -34,5 +34,5 @@ def list_items(self): } def allowed_directories_for_previews(self): - return [shared.cmd_opts.ckpt_dir, sd_models.model_path] + return [v for v in [shared.cmd_opts.ckpt_dir, sd_models.model_path] if v is not None]
praw-dev__praw-1304
Sphinx stops emitting warnings if it encounters only one **Describe the bug** <!-- A clear and concise description of what the bug is. --> When running pre_push, if Sphinx runs into an warning, it does does print any more. When there are lots of warnings, it takes a lot of time to re-run pre_push per warning I recommend adding the command line argument `--keep-going`. This will cause it to print all warnings. **System Info** - PRAW Version: Latest
[ { "content": "#!/usr/bin/env python3\n\"\"\"Run static analysis on the project.\"\"\"\n\nimport argparse\nimport sys\nfrom os import path\nfrom shutil import rmtree\nfrom subprocess import CalledProcessError, check_call\nfrom tempfile import mkdtemp\n\ncurrent_directory = path.abspath(path.join(__file__, \"..\"...
[ { "content": "#!/usr/bin/env python3\n\"\"\"Run static analysis on the project.\"\"\"\n\nimport argparse\nimport sys\nfrom os import path\nfrom shutil import rmtree\nfrom subprocess import CalledProcessError, check_call\nfrom tempfile import mkdtemp\n\ncurrent_directory = path.abspath(path.join(__file__, \"..\"...
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index dcb104835..cf5962247 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -36,7 +36,7 @@ jobs: - name: Run pydocstyle run: pydocstyle praw - name: Run sphinx - run: sphinx-build -W docs/ /tmp/foo + run: sphinx-build -W --keep-going docs/ /tmp/foo strategy: matrix: os: [macOS-latest, ubuntu-latest, windows-latest] @@ -69,7 +69,7 @@ jobs: - name: Run pydocstyle run: pydocstyle praw - name: Run sphinx - run: sphinx-build -W docs/ /tmp/foo + run: sphinx-build -W --keep-going docs/ /tmp/foo strategy: matrix: python-version: [3.6, 3.7, 3.8] diff --git a/pre_push.py b/pre_push.py index 3be3c1937..49d13522f 100755 --- a/pre_push.py +++ b/pre_push.py @@ -53,7 +53,9 @@ def run_static(): tmp_dir = mkdtemp() try: - success &= do_process(["sphinx-build", "-W", "docs", tmp_dir]) + success &= do_process( + ["sphinx-build", "-W", "--keep-going", "docs", tmp_dir] + ) finally: rmtree(tmp_dir)
python-pillow__Pillow-399
Image opened twice if imagemagick and xv are installed If ImageMagick and xv are both installed and you call Image.show() it will open the image twice, once with display and once with xv. This is probably related to the for loop over registered viewers. Not sure why the return 1 isn't working though.
[ { "content": "#\n# The Python Imaging Library.\n# $Id$\n#\n# im.show() drivers\n#\n# History:\n# 2008-04-06 fl Created\n#\n# Copyright (c) Secret Labs AB 2008.\n#\n# See the README file for information on usage and redistribution.\n#\n\nfrom __future__ import print_function\n\nfrom PIL import Image\nimport os...
[ { "content": "#\n# The Python Imaging Library.\n# $Id$\n#\n# im.show() drivers\n#\n# History:\n# 2008-04-06 fl Created\n#\n# Copyright (c) Secret Labs AB 2008.\n#\n# See the README file for information on usage and redistribution.\n#\n\nfrom __future__ import print_function\n\nfrom PIL import Image\nimport os...
diff --git a/PIL/ImageShow.py b/PIL/ImageShow.py index 7e3d63ba3cd..78bc210f3d6 100644 --- a/PIL/ImageShow.py +++ b/PIL/ImageShow.py @@ -65,7 +65,7 @@ def show(self, image, **options): if base != image.mode and image.mode != "1": image = image.convert(base) - self.show_image(image, **options) + return self.show_image(image, **options) # hook methods
nilearn__nilearn-2792
`FirstLevelModel._get_voxelwise_model_attribute` only returns first design matrix's attribute <!--Provide a brief description of the bug.--> The FirstLevelModel attributes which use `_get_voxelwise_model_attribute()` only return the img for the first design matrix, rather than all of the design matrices' associated imgs. <!--Please fill in the following information, to the best of your ability.--> Nilearn version: ~0.7.1 (`master` at c4839dd) ### Expected behavior Accessing one of the voxelwise attributes which rely on `FirstLevelModel._get_voxelwise_model_attribute()`, such as `FirstLevelModel.residuals`, `FirstLevelModel.predicted`, or `FirstLevelModel.r_square`, should return a list of Nifti1Image objects with the same length as `FirstLevelModel.design_matrices_`. ### Actual behavior The attributes are lists with only one item. ### The associated code https://github.com/nilearn/nilearn/blob/c4839ddfe68ddf15775def1fc0ce9ea23544a527/nilearn/glm/first_level/first_level.py#L668-L686 ### The solution Unindenting line 686 should fix it, I think. There should also be at least one test to make sure that the length of the attribute list is the same as the length of `model.design_matrices`.
[ { "content": "\"\"\"\nThis module presents an interface to use the glm implemented in\nnistats.regression.\n\nIt contains the GLM and contrast classes that are meant to be the main objects\nof fMRI data analyses.\n\nAuthor: Bertrand Thirion, Martin Perez-Guevara, 2016\n\n\"\"\"\nimport glob\nimport json\nimport...
[ { "content": "\"\"\"\nThis module presents an interface to use the glm implemented in\nnistats.regression.\n\nIt contains the GLM and contrast classes that are meant to be the main objects\nof fMRI data analyses.\n\nAuthor: Bertrand Thirion, Martin Perez-Guevara, 2016\n\n\"\"\"\nimport glob\nimport json\nimport...
diff --git a/doc/whats_new.rst b/doc/whats_new.rst index 79546fe1bc..df432c18c0 100644 --- a/doc/whats_new.rst +++ b/doc/whats_new.rst @@ -11,6 +11,9 @@ Fixes in :func:`nilearn.signal.clean`, so that these operations are applied in the same order as for the signals, i.e., first detrending and then temporal filtering (https://github.com/nilearn/nilearn/issues/2730). +- Fix number of attributes returned by the + :func:`nilearn.glm.first_level.FirstLevelModel._get_voxelwise_model_attribute` method in the first level model. + It used to return only the first attribute, and now returns as many attributes as design matrices. Enhancements diff --git a/nilearn/glm/first_level/first_level.py b/nilearn/glm/first_level/first_level.py index a26d9e084b..f7dfbb44c1 100644 --- a/nilearn/glm/first_level/first_level.py +++ b/nilearn/glm/first_level/first_level.py @@ -683,7 +683,7 @@ def _get_voxelwise_model_attribute(self, attribute, output.append(self.masker_.inverse_transform(voxelwise_attribute)) - return output + return output @auto_attr def residuals(self): diff --git a/nilearn/glm/tests/test_first_level.py b/nilearn/glm/tests/test_first_level.py index 8cafb03686..136afdfb1c 100644 --- a/nilearn/glm/tests/test_first_level.py +++ b/nilearn/glm/tests/test_first_level.py @@ -286,6 +286,7 @@ def test_compute_contrast_num_contrasts(): with pytest.warns(UserWarning, match='One contrast given, assuming it for all 3 runs'): multi_session_model.compute_contrast([np.eye(rk)[1]]) + def test_run_glm(): rng = np.random.RandomState(42) n, p, q = 100, 80, 10 @@ -703,6 +704,24 @@ def test_first_level_residuals(): assert_array_almost_equal(mean_residuals, 0) +@pytest.mark.parametrize("shapes", [ + [(10, 10, 10, 25)], + [(10, 10, 10, 25), (10, 10, 10, 100)], +]) +def test_get_voxelwise_attributes_should_return_as_many_as_design_matrices(shapes): + mask, fmri_data, design_matrices = generate_fake_fmri_data_and_design(shapes) + + for i in range(len(design_matrices)): + design_matrices[i].iloc[:, 0] = 1 + + model = FirstLevelModel(mask_img=mask, minimize_memory=False, + noise_model='ols') + model.fit(fmri_data, design_matrices=design_matrices) + + # Check that length of outputs is the same as the number of design matrices + assert len(model._get_voxelwise_model_attribute("resid", True)) == len(shapes) + + def test_first_level_predictions_r_square(): shapes, rk = [(10, 10, 10, 25)], 3 mask, fmri_data, design_matrices = generate_fake_fmri_data_and_design(shapes, rk) @@ -726,4 +745,4 @@ def test_first_level_predictions_r_square(): assert_almost_equal(np.mean(y_predicted - y_measured), 0) r_square_2d = model.masker_.transform(r_square_3d) - assert_array_less(0., r_square_2d) \ No newline at end of file + assert_array_less(0., r_square_2d)
e-valuation__EvaP-817
+x on update.sh, earlier apache restart update_production.sh is missing the x bit, also because of the cache clearing the apache is restarted 2min after the code has changed.
[ { "content": "from django.core.management.base import BaseCommand\nfrom django.core.serializers.base import ProgressBar\nfrom django.core.cache import cache\n\nfrom evap.evaluation.models import Course\nfrom evap.evaluation.tools import calculate_results\n\n\nclass Command(BaseCommand):\n args = ''\n help...
[ { "content": "from django.core.management.base import BaseCommand\nfrom django.core.serializers.base import ProgressBar\nfrom django.core.cache import cache\n\nfrom evap.evaluation.models import Course\nfrom evap.evaluation.tools import calculate_results\n\n\nclass Command(BaseCommand):\n args = ''\n help...
diff --git a/deployment/update_production.sh b/deployment/update_production.sh old mode 100644 new mode 100755 index 44c3c4f261..9cc89634a8 --- a/deployment/update_production.sh +++ b/deployment/update_production.sh @@ -6,13 +6,16 @@ set -x # print executed commands sudo -u evap git fetch sudo -u evap git checkout origin/release sudo pip3 install -r requirements.txt -sudo -u evap ./manage.py migrate -sudo -u evap ./manage.py collectstatic --noinput sudo -u evap ./manage.py compilemessages +sudo -u evap ./manage.py collectstatic --noinput sudo -u evap ./manage.py compress --verbosity=0 +sudo -u evap ./manage.py migrate +# reload only after static files are updated, so the new code finds all the files it expects. +# also, reload after migrations happened. see https://github.com/fsr-itse/EvaP/pull/817 for a discussion. +sudo service apache2 reload +# update caches. this can take minutes but doesn't need a reload. sudo -u evap ./manage.py clear_cache sudo -u evap ./manage.py refresh_results_cache -sudo service apache2 restart { set +x; } 2>/dev/null # don't print the echo command, and don't print the 'set +x' itself diff --git a/evap/evaluation/management/commands/refresh_results_cache.py b/evap/evaluation/management/commands/refresh_results_cache.py index 300b802123..3aaecd26e4 100644 --- a/evap/evaluation/management/commands/refresh_results_cache.py +++ b/evap/evaluation/management/commands/refresh_results_cache.py @@ -24,4 +24,4 @@ def handle(self, *args, **options): progress_bar.update(counter + 1) calculate_results(course) - self.stdout.write("Done with updating cache.\n") + self.stdout.write("Results cache has been refreshed.\n") diff --git a/evap/evaluation/tests/test_commands.py b/evap/evaluation/tests/test_commands.py index 5d3a856840..abc1671cd3 100644 --- a/evap/evaluation/tests/test_commands.py +++ b/evap/evaluation/tests/test_commands.py @@ -3,7 +3,7 @@ from unittest.mock import patch from django.conf import settings -from django.utils.six import StringIO +from io import StringIO from django.core import management, mail from django.test import TestCase from django.test.utils import override_settings diff --git a/evap/evaluation/tests/test_misc.py b/evap/evaluation/tests/test_misc.py index bc12d0765b..9fe8344e12 100644 --- a/evap/evaluation/tests/test_misc.py +++ b/evap/evaluation/tests/test_misc.py @@ -1,4 +1,5 @@ import os.path +from io import StringIO from django.conf import settings from django.contrib.auth.models import Group @@ -59,3 +60,15 @@ def load_test_data(self): call_command("loaddata", "test_data", verbosity=0) except Exception: self.fail("Test data failed to load.") + + +class TestMissingMigrations(TestCase): + def test_for_missing_migrations(self): + output = StringIO() + try: + call_command('makemigrations', interactive=False, dry_run=True, exit_code=True, stdout=output) + except SystemExit as e: + # The exit code will be 1 when there are no missing migrations + self.assertEqual(str(e), '1') + else: + self.fail("There are missing migrations:\n %s" % output.getvalue())
lnbits__lnbits-1183
[BUG] LNDhub extension return unusable `getinfo` response **Describe the bug** The [getinfo call](https://github.com/lnbits/lnbits/blob/main/lnbits/extensions/lndhub/views_api.py#L22) simply returns `bad auth` everytime, which breaks integrations like for us in BTCPay Server (see btcpayserver/btcpayserver#4414). **Expected behavior** Return [valid information](https://github.com/BlueWallet/LndHub/blob/master/doc/Send-requirements.md#get-getinfo), which we can use to connect. For us that would mean having a list of `uris` and a `block_height` being set.
[ { "content": "import asyncio\nimport time\nfrom base64 import urlsafe_b64encode\nfrom http import HTTPStatus\n\nfrom fastapi.param_functions import Query\nfrom fastapi.params import Depends\nfrom pydantic import BaseModel\nfrom starlette.exceptions import HTTPException\n\nfrom lnbits import bolt11\nfrom lnbits....
[ { "content": "import asyncio\nimport time\nfrom base64 import urlsafe_b64encode\nfrom http import HTTPStatus\n\nfrom fastapi.param_functions import Query\nfrom fastapi.params import Depends\nfrom pydantic import BaseModel\nfrom starlette.exceptions import HTTPException\n\nfrom lnbits import bolt11\nfrom lnbits....
diff --git a/lnbits/extensions/lndhub/views_api.py b/lnbits/extensions/lndhub/views_api.py index 8cbe5a6bfd..2acdc4ec93 100644 --- a/lnbits/extensions/lndhub/views_api.py +++ b/lnbits/extensions/lndhub/views_api.py @@ -21,7 +21,7 @@ @lndhub_ext.get("/ext/getinfo") async def lndhub_getinfo(): - raise HTTPException(status_code=HTTPStatus.UNAUTHORIZED, detail="bad auth") + return {"alias": LNBITS_SITE_TITLE} class AuthData(BaseModel):
boto__boto-2166
Invalid path check in euca-bundle-image The -i option uses convert_file in boto/roboto/param.py to verify that the path passed is, indeed, a file. This fails unless the path specified is a boring old file which is not necessary. Indeed it not being necessary is sort of the whole point in unix having a /dev in the first place. Everything is a file. The code calls os.path.isfile(value) in convert_file(). It should call os.path.exists(value) and not os.path.isdir(value). Directories are the only types of files which need to be considered special in the normal course of events.
[ { "content": "# Copyright (c) 2010 Mitch Garnaat http://garnaat.org/\n# Copyright (c) 2010, Eucalyptus Systems, Inc.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without re...
[ { "content": "# Copyright (c) 2010 Mitch Garnaat http://garnaat.org/\n# Copyright (c) 2010, Eucalyptus Systems, Inc.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without re...
diff --git a/boto/roboto/param.py b/boto/roboto/param.py index ed3e6be9b9..35a25b4af5 100644 --- a/boto/roboto/param.py +++ b/boto/roboto/param.py @@ -46,7 +46,7 @@ def convert_boolean(cls, param, value): @classmethod def convert_file(cls, param, value): - if os.path.isfile(value): + if os.path.exists(value) and not os.path.isdir(value): return value raise ValueError