in_source_id stringlengths 13 58 | issue stringlengths 3 241k | before_files listlengths 0 3 | after_files listlengths 0 3 | pr_diff stringlengths 109 107M ⌀ |
|---|---|---|---|---|
mit-ll-responsible-ai__hydra-zen-355 | `validates_with_beartype` considers `Partial` as `NoneType`
Hi @rsokl. I was having a blast using this fascinating library. But It seems when used with `hydra_zen.third_party.validates_with_beartype`, it casts `hydra_zen.typing.Partial` as `NoneType`.
```python
from hydra_zen.typing import Partial
from hydra_zen.third_party.beartype import validates_with_beartype
def f(x: Partial[list]):
return x
val_f = validates_with_beartype(f)
val_f(3)
```
It raises the following error. Can you take a look?
```bash
beartype.roar.BeartypeCallHintParamViolation: @beartyped __main__.f()
parameter x=3 violates type hint None, as int 3 not instance of <class "builtins.NoneType">.
```
| [
{
"content": "# Copyright (c) 2022 Massachusetts Institute of Technology\n# SPDX-License-Identifier: MIT\n\n# pyright: strict\n\nimport sys\nimport types\nfrom enum import Enum\nfrom pathlib import Path, PosixPath, WindowsPath\nfrom typing import (\n TYPE_CHECKING,\n Any,\n ByteString,\n Callable,\n... | [
{
"content": "# Copyright (c) 2022 Massachusetts Institute of Technology\n# SPDX-License-Identifier: MIT\n\n# pyright: strict\n\nimport sys\nimport types\nfrom enum import Enum\nfrom pathlib import Path, PosixPath, WindowsPath\nfrom typing import (\n TYPE_CHECKING,\n Any,\n ByteString,\n Callable,\n... | diff --git a/.github/workflows/tox_run.yml b/.github/workflows/tox_run.yml
index fe7d517c6..6d7f016b0 100644
--- a/.github/workflows/tox_run.yml
+++ b/.github/workflows/tox_run.yml
@@ -91,10 +91,10 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- - name: Set up Python 3.8
+ - name: Set up Python 3.9
uses: actions/setup-python@v4
with:
- python-version: 3.8
+ python-version: 3.9
- name: Install dependencies
run: |
python -m pip install --upgrade pip
diff --git a/docs/source/changes.rst b/docs/source/changes.rst
index 514ba78bf..4429b8636 100644
--- a/docs/source/changes.rst
+++ b/docs/source/changes.rst
@@ -11,7 +11,7 @@ chronological order. All previous releases should still be available on pip.
.. _v0.9.0:
---------------------
-0.9.0rc4 - 2022-11-21
+0.9.0rc5 - 2022-12-09
---------------------
.. note:: This is documentation for an unreleased version of hydra-zen. You can try out this pre-release version using `pip install --pre hydra-zen`
@@ -123,6 +123,10 @@ Improvements
- :func:`~hydra_zen.hydrated_dataclass` will now produce a pickle-compatible dataclass type. See :pull:`338`.
- All documentation code blocks are scanned by pyright as part of our CI process. Several errors in the documentation were fixed. See :pull:`343` and :pull:`344`.
+Bug Fixes
+---------
+- :pull:`355` fixes an issue where the parameterized generic `hydra_zen.typing.Partial[<...>]` would return `None` for Python versions 3.9+. This prevented this annotation from being used by runtime type checkers.
+
Compatibility-Breaking Changes
------------------------------
- Previously, any class decorated by :func:`~hydra_zen.hydrated_dataclass` would have a `__module__` attribute set to `typing`. Now the class's `__module__` will reflect the module where its static definition resides. This enables pickle-compatibility (:pull:`338`). This is unlikely to cause any issues for users.
diff --git a/pyproject.toml b/pyproject.toml
index 99780bc3a..784ab0968 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -203,7 +203,7 @@ commands = pytest -n auto tests/test_docs_typecheck.py -vv
[testenv:third-party]
install_command = pip install --upgrade --upgrade-strategy eager {opts} {packages}
-basepython = python3.8
+basepython = python3.9
deps = {[testenv]deps}
torch
pytorch-lightning
diff --git a/src/hydra_zen/typing/_implementations.py b/src/hydra_zen/typing/_implementations.py
index 330742397..eccfc1540 100644
--- a/src/hydra_zen/typing/_implementations.py
+++ b/src/hydra_zen/typing/_implementations.py
@@ -108,7 +108,7 @@ def __new__(
) -> Self:
...
- if sys.version_info >= (3, 9): # pragma: no cover
+ if TYPE_CHECKING and sys.version_info >= (3, 9): # pragma: no cover
def __class_getitem__(cls, item: Any) -> types.GenericAlias:
...
diff --git a/tests/test_protocols.py b/tests/test_protocols.py
index 35db0c2b5..d29b570be 100644
--- a/tests/test_protocols.py
+++ b/tests/test_protocols.py
@@ -146,3 +146,8 @@ def test_protocol_checkers(x, yes_builds, yes_just, yes_partial):
def test_partial_protocol():
assert isinstance(partial(int), Partial)
assert not isinstance(print, Partial)
+
+
+def test_parameterized_partial_regression():
+ # https://github.com/mit-ll-responsible-ai/hydra-zen/issues/352
+ assert Partial[int].__origin__ is Partial # type: ignore
diff --git a/tests/test_py39.py b/tests/test_py39.py
index b2ad85ebc..ff17efdad 100644
--- a/tests/test_py39.py
+++ b/tests/test_py39.py
@@ -20,7 +20,8 @@
)
def test_sanitized_type_expected_behavior(in_type, expected_type):
# tests collections-as-generics introduced in py39
- assert sanitized_type(in_type) is expected_type
+ actual = sanitized_type(in_type)
+ assert actual is expected_type or actual == expected_type
@dataclass
class Tmp:
diff --git a/tests/test_third_party/test_using_beartype.py b/tests/test_third_party/test_using_beartype.py
index 94aec6458..c8e2d6d5f 100644
--- a/tests/test_third_party/test_using_beartype.py
+++ b/tests/test_third_party/test_using_beartype.py
@@ -1,12 +1,21 @@
# Copyright (c) 2022 Massachusetts Institute of Technology
# SPDX-License-Identifier: MIT
+from functools import partial
+from typing import Type
+
import pytest
from beartype.cave import RegexTypes
from beartype.vale import Is
from typing_extensions import Annotated
+from hydra_zen import builds
from hydra_zen.third_party.beartype import validates_with_beartype
+from hydra_zen.typing import Builds, Partial
+
+
+def func(x: int) -> float:
+ ...
@pytest.mark.parametrize(
@@ -14,6 +23,8 @@
[
(RegexTypes, "abc+", 22),
(Annotated[str, Is[lambda text: 2 == len(text)]], "hi", "bye"),
+ (Partial[float], partial(func), func),
+ (Builds[Type[float]], builds(func), func),
],
)
def test_beartype_specific_fields(custom_type, good_val, bad_val):
|
netbox-community__netbox-6029 | Virtual chassis search displays duplicate entries
### NetBox version
v2.10.6
### Python version
3.8
### Steps to Reproduce
1. Create a device named test-vc-1-1
2. Create a device named test-vc-1-2
3. Create a virtual chassis named test-vc-1 and bundle the two device created in 1. and 2.
4. In the virtual chassis view perform a search by name using test-vc-1
### Expected Behavior
The list view should display only one entry for the virtual chassis (check the screenshot of the search using Nebox 2.9 release)
### Observed Behavior
The list view displays twice the same entry for the virtual chassis, one per member (check the screenshot of the search using Nebox 2.10.6 release)
### Expected behaviour in netbox 2.9

### Observed behaviour in netbox 2.10

### Solution hint
After some code investigation it seems that the `VirtualChassisFilterSet.search` method in `dcim/filter.py` has something wrong with the [forged queryset](https://github.com/netbox-community/netbox/blob/91fe80f73c12bb4182ee892ca612252e9a30126b/netbox/dcim/filters.py#L1078).
```python
def search(self, queryset, name, value):
if not value.strip():
return queryset
qs_filter = (
Q(name__icontains=value) |
Q(members__name__icontains=value) |
Q(domain__icontains=value)
)
return queryset.filter(qs_filter)
```
When you modify the queryset with the following code, the entry is not duplicated anymore in the WebUI.
```python
def search(self, queryset, name, value):
if not value.strip():
return queryset
qs_filter = (
Q(name__icontains=value) |
Q(members__name__icontains=value) |
Q(domain__icontains=value)
)
return queryset.filter(qs_filter).distinct() # adding distinct de-duplicate the VC
```
| [
{
"content": "import django_filters\nfrom django.contrib.auth.models import User\nfrom django.db.models import Count\n\nfrom extras.filters import CustomFieldModelFilterSet, LocalConfigContextFilterSet, CreatedUpdatedFilterSet\nfrom tenancy.filters import TenancyFilterSet\nfrom tenancy.models import Tenant\nfro... | [
{
"content": "import django_filters\nfrom django.contrib.auth.models import User\nfrom django.db.models import Count\n\nfrom extras.filters import CustomFieldModelFilterSet, LocalConfigContextFilterSet, CreatedUpdatedFilterSet\nfrom tenancy.filters import TenancyFilterSet\nfrom tenancy.models import Tenant\nfro... | diff --git a/netbox/dcim/filters.py b/netbox/dcim/filters.py
index 41363c26119..548f401c04f 100644
--- a/netbox/dcim/filters.py
+++ b/netbox/dcim/filters.py
@@ -1075,7 +1075,7 @@ def search(self, queryset, name, value):
Q(members__name__icontains=value) |
Q(domain__icontains=value)
)
- return queryset.filter(qs_filter)
+ return queryset.filter(qs_filter).distinct()
class CableFilterSet(BaseFilterSet):
|
unionai-oss__pandera-909 | Implicit ignore_na=True behaviour causes custom dataframe checks to be ignored by default
**Describe the bug**
When using custom dataframe checks returning boolean series, `SchemaModel.validate()` does not report validation errors generated by a custom check if the record in question (or is it the entire dataframe?) contains null values. This behaviour is deafult and can be disabled by setting `ignore_na` to `False` (either via `@pa.dataframe_check(ignore_na=False)` or a Config entry such as `check_name = {"ignore_na": False}`). This default behaviour is surprising, and the root cause of my issue was discovered only after a lengthy debugging process with much help from @cosmicBboy. Perhaps this default behaviour could be changed or at least better documented, as Niels mentions that this has come up more than once previously.
- [x] I have checked that this issue has not already been reported.
- [x] I have confirmed this bug exists on the latest version of pandera.
- [x] (optional) I have confirmed this bug exists on the master branch of pandera.
#### Code Sample, a copy-pastable example
CSV (DataFrame):
```
field0,field1,field2
,foo,foo
```
```python
@pa.dataframe_check
def field1_does_not_equal_field2(cls, df: pd.DataFrame) -> Series[bool]:
return df["field1"] != df["field2"]
# Even though field1 == field2, validation passes because field0 is null
```
#### Expected behavior
Create a custom Pandera dataframe check returning a boolean series that accepts a CSV containing null values.
| [
{
"content": "\"\"\"Utility functions for validation.\"\"\"\n\nfrom functools import lru_cache\nfrom typing import NamedTuple, Optional, Tuple, Union\n\nimport pandas as pd\n\nSupportedTypes = NamedTuple(\n \"SupportedTypes\",\n (\n (\"table_types\", Tuple[type, ...]),\n (\"field_types\", Tu... | [
{
"content": "\"\"\"Utility functions for validation.\"\"\"\n\nfrom functools import lru_cache\nfrom typing import NamedTuple, Optional, Tuple, Union\n\nimport pandas as pd\n\nSupportedTypes = NamedTuple(\n \"SupportedTypes\",\n (\n (\"table_types\", Tuple[type, ...]),\n (\"field_types\", Tu... | diff --git a/pandera/check_utils.py b/pandera/check_utils.py
index f6bc56b8b..eca859db0 100644
--- a/pandera/check_utils.py
+++ b/pandera/check_utils.py
@@ -105,7 +105,7 @@ def prepare_series_check_output(
"""
if ignore_na:
isna = (
- check_obj.isna().any(axis="columns")
+ check_obj.isna().all(axis="columns")
if isinstance(check_obj, pd.DataFrame)
else check_obj.isna()
)
diff --git a/tests/core/test_checks.py b/tests/core/test_checks.py
index d3d6cbcf1..587c3354f 100644
--- a/tests/core/test_checks.py
+++ b/tests/core/test_checks.py
@@ -458,3 +458,19 @@ def test_dataframe_check_schema_error() -> None:
"index == 3 & column == 'b'"
).failure_case.iloc[0]
)
+
+
+def test_prepare_series_check_output_df_level():
+ """Test that dataframe-level checks only ignore rows where all values are null."""
+ df = pd.DataFrame(
+ {
+ "a": [1, 1, 2, 2, 3, 3, None],
+ "b": [2, 1, 4, 3, 6, 5, None],
+ "c": [None] * 7,
+ }
+ )
+ check = Check(lambda df: df["b"] == df["a"] * 2, ignore_na=True)
+ # The last record should evaluate to True, since all values are null
+ expected_output = [True, False, True, False, True, False, True]
+ result = check(df)
+ assert result.check_output.tolist() == expected_output
|
openfun__marsha-1250 | Allow instructors to publish their video publicly
## Feature Request
**Is your feature request related to a problem or unsupported use case? Please describe.**
We want instructors to be able to publish their video publicly.
At the moment, the flag is only accessible via the Django admin interface.
**Describe the solution you'd like**
expose the "is_public" field to the video API endpoint, allowing only instructors to update its value.
| [
{
"content": "\"\"\"Structure of Video related models API responses with Django Rest Framework serializers.\"\"\"\nfrom datetime import timedelta\nfrom urllib.parse import quote_plus\n\nfrom django.conf import settings\nfrom django.shortcuts import get_object_or_404\nfrom django.urls import reverse\nfrom django... | [
{
"content": "\"\"\"Structure of Video related models API responses with Django Rest Framework serializers.\"\"\"\nfrom datetime import timedelta\nfrom urllib.parse import quote_plus\n\nfrom django.conf import settings\nfrom django.shortcuts import get_object_or_404\nfrom django.urls import reverse\nfrom django... | diff --git a/CHANGELOG.md b/CHANGELOG.md
index 59da34e4d4..81348b263b 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -21,6 +21,7 @@ Versioning](https://semver.org/spec/v2.0.0.html).
- Add API endpoints to pair an external device to Jitsi live videos
- Add a store in the frontend to control live layout
- Add frontend components to pair an external device to Jitsi live videos
+- Add public availability to video api
### Changed
diff --git a/src/backend/marsha/core/serializers/video.py b/src/backend/marsha/core/serializers/video.py
index 822fccdc3b..d494a98709 100644
--- a/src/backend/marsha/core/serializers/video.py
+++ b/src/backend/marsha/core/serializers/video.py
@@ -548,6 +548,7 @@ class Meta: # noqa
"active_stamp",
"description",
"id",
+ "is_public",
"is_ready_to_show",
"is_scheduled",
"timed_text_tracks",
diff --git a/src/backend/marsha/core/tests/test_api_video.py b/src/backend/marsha/core/tests/test_api_video.py
index cafc752ae0..535a01778f 100644
--- a/src/backend/marsha/core/tests/test_api_video.py
+++ b/src/backend/marsha/core/tests/test_api_video.py
@@ -226,6 +226,7 @@ def test_api_video_read_detail_token_user(self):
"id": str(video.id),
"title": video.title,
"active_stamp": "1533686400",
+ "is_public": False,
"is_ready_to_show": True,
"is_scheduled": False,
"show_download": True,
@@ -337,6 +338,7 @@ def test_api_video_read_detail_token_user_no_active_stamp(self):
"id": str(video.id),
"title": video.title,
"active_stamp": None,
+ "is_public": False,
"is_ready_to_show": False,
"is_scheduled": False,
"show_download": True,
@@ -389,6 +391,7 @@ def test_api_video_read_detail_token_user_not_sucessfully_uploaded(self):
"id": str(video.id),
"title": video.title,
"active_stamp": None,
+ "is_public": False,
"is_ready_to_show": False,
"is_scheduled": False,
"show_download": True,
@@ -528,6 +531,7 @@ def test_api_video_read_detail_by_organization_admin(self):
"description": video.description,
"has_transcript": False,
"id": str(video.id),
+ "is_public": False,
"is_ready_to_show": False,
"is_scheduled": False,
"live_info": {},
@@ -602,6 +606,7 @@ def test_api_video_read_detail_by_playlist_admin(self):
"description": video.description,
"has_transcript": False,
"id": str(video.id),
+ "is_public": False,
"is_ready_to_show": False,
"is_scheduled": False,
"live_info": {},
@@ -726,6 +731,7 @@ def test_api_video_read_list_user_with_playlist_access(self):
"description": video.description,
"has_transcript": False,
"id": str(video.id),
+ "is_public": False,
"is_ready_to_show": False,
"is_scheduled": False,
"live_info": {},
@@ -803,6 +809,7 @@ def test_api_video_read_list_user_with_organization_access(self):
"description": video_1.description,
"has_transcript": False,
"id": str(video_1.id),
+ "is_public": False,
"is_ready_to_show": False,
"is_scheduled": False,
"live_info": {},
@@ -828,6 +835,7 @@ def test_api_video_read_list_user_with_organization_access(self):
"description": video_2.description,
"has_transcript": False,
"id": str(video_2.id),
+ "is_public": False,
"is_ready_to_show": False,
"is_scheduled": False,
"live_info": {},
@@ -919,6 +927,7 @@ def test_api_video_read_list_by_playlist_user_with_playlist_access(self):
"description": video.description,
"has_transcript": False,
"id": str(video.id),
+ "is_public": False,
"is_ready_to_show": False,
"is_scheduled": False,
"live_info": {},
@@ -985,6 +994,7 @@ def test_api_video_read_list_by_playlist_user_with_org_access(self):
"description": video.description,
"has_transcript": False,
"id": str(video.id),
+ "is_public": False,
"is_ready_to_show": False,
"is_scheduled": False,
"live_info": {},
@@ -1080,6 +1090,7 @@ def test_api_video_read_list_by_org_user_with_playlist_access(self):
"description": video.description,
"has_transcript": False,
"id": str(video.id),
+ "is_public": False,
"is_ready_to_show": False,
"is_scheduled": False,
"live_info": {},
@@ -1145,6 +1156,7 @@ def test_api_video_read_list_by_org_user_with_org_access(self):
"description": video_1.description,
"has_transcript": False,
"id": str(video_1.id),
+ "is_public": False,
"is_ready_to_show": False,
"is_scheduled": False,
"live_info": {},
@@ -1170,6 +1182,7 @@ def test_api_video_read_list_by_org_user_with_org_access(self):
"description": video_2.description,
"has_transcript": False,
"id": str(video_2.id),
+ "is_public": False,
"is_ready_to_show": False,
"is_scheduled": False,
"live_info": {},
@@ -1284,6 +1297,20 @@ def test_api_video_create_token_user_playlist_preexists(self):
self.assertEqual(response.status_code, 401)
self.assertFalse(models.Video.objects.exists())
+ def test_api_video_create_student(self):
+ """Student users should not be able to create videos."""
+ video = factories.VideoFactory()
+ jwt_token = AccessToken()
+ jwt_token.payload["resource_id"] = str(video.id)
+ jwt_token.payload["roles"] = ["student"]
+ jwt_token.payload["permissions"] = {"can_update": False}
+ response = self.client.post(
+ "/api/videos/",
+ HTTP_AUTHORIZATION=f"Bearer {jwt_token}",
+ )
+ self.assertEqual(response.status_code, 403)
+ self.assertEqual(models.Video.objects.count(), 1)
+
def test_api_video_create_staff_or_user(self):
"""Users authenticated via a session should not be able to create videos."""
for user in [factories.UserFactory(), factories.UserFactory(is_staff=True)]:
@@ -1333,6 +1360,7 @@ def test_api_video_create_by_playlist_admin(self):
"description": "",
"has_transcript": False,
"id": str(models.Video.objects.get().id),
+ "is_public": False,
"is_ready_to_show": False,
"is_scheduled": False,
"live_info": {},
@@ -1493,6 +1521,7 @@ def test_api_video_create_by_organization_admin(self):
"description": "",
"has_transcript": False,
"id": str(models.Video.objects.get().id),
+ "is_public": False,
"is_ready_to_show": False,
"is_scheduled": False,
"live_info": {},
@@ -1559,6 +1588,7 @@ def test_api_video_create_with_scheduled_date_gets_ignored(self):
"description": "",
"has_transcript": False,
"id": str(models.Video.objects.get().id),
+ "is_public": False,
"is_ready_to_show": False,
"is_scheduled": False,
"live_info": {},
@@ -1630,6 +1660,28 @@ def test_api_video_update_detail_anonymous(self):
video.refresh_from_db()
self.assertEqual(video.title, "my title")
+ def test_api_video_update_detail_student(self):
+ """Student users should not be allowed to update a video through the API."""
+ video = factories.VideoFactory(title="my title")
+ jwt_token = AccessToken()
+ jwt_token.payload["resource_id"] = str(video.id)
+ jwt_token.payload["roles"] = ["student"]
+
+ data = {"title": "my new title"}
+ response = self.client.put(
+ f"/api/videos/{video.id}/",
+ data,
+ content_type="application/json",
+ HTTP_AUTHORIZATION=f"Bearer {jwt_token}",
+ )
+ self.assertEqual(response.status_code, 403)
+ content = json.loads(response.content)
+ self.assertEqual(
+ content, {"detail": "You do not have permission to perform this action."}
+ )
+ video.refresh_from_db()
+ self.assertEqual(video.title, "my title")
+
def test_api_video_update_detail_token_user_title(self):
"""Token users should be able to update the title of their video through the API."""
video = factories.VideoFactory(title="my title")
@@ -1886,6 +1938,41 @@ def test_api_video_instructor_update_video_in_read_only(self):
)
self.assertEqual(response.status_code, 403)
+ def test_api_video_patch_video_anonymous(self):
+ """Anonymous users should not be allowed to patch a video through the API."""
+ video = factories.VideoFactory(title="my title")
+ data = {"title": "my new title"}
+ response = self.client.patch(
+ f"/api/videos/{video.id}/",
+ data,
+ content_type="application/json",
+ )
+ self.assertEqual(response.status_code, 401)
+ video.refresh_from_db()
+ self.assertEqual(video.title, "my title")
+
+ def test_api_video_patch_video_student(self):
+ """Student users should not be allowed to patch a video through the API."""
+ video = factories.VideoFactory(title="my title")
+ jwt_token = AccessToken()
+ jwt_token.payload["resource_id"] = str(video.id)
+ jwt_token.payload["roles"] = ["student"]
+
+ data = {"title": "my new title"}
+ response = self.client.patch(
+ f"/api/videos/{video.id}/",
+ data,
+ content_type="application/json",
+ HTTP_AUTHORIZATION=f"Bearer {jwt_token}",
+ )
+ self.assertEqual(response.status_code, 403)
+ content = json.loads(response.content)
+ self.assertEqual(
+ content, {"detail": "You do not have permission to perform this action."}
+ )
+ video.refresh_from_db()
+ self.assertEqual(video.title, "my title")
+
def test_api_video_instructor_patch_video_in_read_only(self):
"""An instructor with read_only set to true should not be able to patch the video."""
video = factories.VideoFactory()
@@ -1996,6 +2083,25 @@ def test_api_video_patch_detail_token_user_description(self):
video.refresh_from_db()
self.assertEqual(video.description, "my new description")
+ def test_api_video_patch_detail_token_user_is_public(self):
+ """Instructors and administrators should be able to
+ patch the public flag of their video through the API."""
+ video = factories.VideoFactory(is_public=False)
+ jwt_token = AccessToken()
+ jwt_token.payload["resource_id"] = str(video.id)
+ jwt_token.payload["roles"] = [random.choice(["instructor", "administrator"])]
+ jwt_token.payload["permissions"] = {"can_update": True}
+ data = {"is_public": True}
+ response = self.client.patch(
+ f"/api/videos/{video.id}/",
+ data,
+ HTTP_AUTHORIZATION=f"Bearer {jwt_token}",
+ content_type="application/json",
+ )
+ self.assertEqual(response.status_code, 200)
+ video.refresh_from_db()
+ self.assertTrue(video.is_public)
+
def test_api_video_patch_by_organization_instructor(self):
"""Organization instructors cannot patch videos on the API."""
user = factories.UserFactory()
@@ -2472,6 +2578,24 @@ def test_api_video_delete_detail_token_user(self):
self.assertEqual(response.status_code, 403)
self.assertTrue(models.Video.objects.filter(id=video.id).exists())
+ def test_api_video_delete_detail_student(self):
+ """Student users should not be able to delete a video."""
+ video = factories.VideoFactory()
+ jwt_token = AccessToken()
+ jwt_token.payload["resource_id"] = str(video.id)
+ jwt_token.payload["roles"] = ["student"]
+
+ response = self.client.delete(
+ f"/api/videos/{video.id}/",
+ HTTP_AUTHORIZATION=f"Bearer {jwt_token}",
+ )
+
+ self.assertEqual(response.status_code, 403)
+ content = json.loads(response.content)
+ self.assertEqual(
+ content, {"detail": "You do not have permission to perform this action."}
+ )
+
def test_api_video_delete_detail_staff_or_user(self):
"""Users authenticated via a session should not be able to delete a video."""
video = factories.VideoFactory()
@@ -3076,6 +3200,7 @@ def test_api_video_instructor_initiate_live(self):
"id": str(video.id),
"title": video.title,
"active_stamp": None,
+ "is_public": False,
"is_ready_to_show": True,
"is_scheduled": False,
"show_download": True,
@@ -3125,6 +3250,7 @@ def test_api_video_instructor_initiate_jitsi_live(self):
"id": str(video.id),
"title": video.title,
"active_stamp": None,
+ "is_public": False,
"is_ready_to_show": True,
"is_scheduled": False,
"show_download": True,
@@ -3226,6 +3352,7 @@ def test_api_instructor_start_non_created_live(self):
"id": str(video.id),
"title": video.title,
"active_stamp": None,
+ "is_public": False,
"is_ready_to_show": True,
"is_scheduled": False,
"show_download": True,
@@ -3345,6 +3472,7 @@ def test_api_instructor_start_already_created_live(self):
"id": str(video.id),
"title": video.title,
"active_stamp": None,
+ "is_public": False,
"is_ready_to_show": True,
"is_scheduled": False,
"show_download": True,
@@ -3547,6 +3675,7 @@ def test_api_video_instructor_stop_live(self):
"id": str(video.id),
"title": video.title,
"active_stamp": None,
+ "is_public": False,
"is_ready_to_show": True,
"is_scheduled": False,
"show_download": True,
@@ -3710,6 +3839,7 @@ def test_api_video_instructor_end_idle_live(self):
"id": str(video.id),
"title": video.title,
"active_stamp": None,
+ "is_public": False,
"is_ready_to_show": False,
"is_scheduled": False,
"show_download": True,
@@ -3791,6 +3921,7 @@ def test_api_video_instructor_end_paused_live(self):
"id": str(video.id),
"title": video.title,
"active_stamp": None,
+ "is_public": False,
"is_ready_to_show": True,
"is_scheduled": False,
"show_download": True,
@@ -3895,6 +4026,7 @@ def test_api_video_instructor_end_paused_live_missing_manifest(self):
"id": str(video.id),
"title": video.title,
"active_stamp": None,
+ "is_public": False,
"is_ready_to_show": False,
"is_scheduled": False,
"show_download": True,
diff --git a/src/backend/marsha/core/tests/test_views_lti_development.py b/src/backend/marsha/core/tests/test_views_lti_development.py
index 76b8896c55..516f2ff90c 100644
--- a/src/backend/marsha/core/tests/test_views_lti_development.py
+++ b/src/backend/marsha/core/tests/test_views_lti_development.py
@@ -144,6 +144,7 @@ def test_views_lti_development_post_bypass_lti_instructor(self):
context.get("resource"),
{
"active_stamp": None,
+ "is_public": False,
"is_ready_to_show": False,
"is_scheduled": False,
"show_download": True,
@@ -216,6 +217,7 @@ def test_views_lti_development_post_bypass_lti_instructor_no_video(self):
context.get("resource"),
{
"active_stamp": None,
+ "is_public": False,
"is_ready_to_show": False,
"is_scheduled": False,
"show_download": True,
diff --git a/src/backend/marsha/core/tests/test_views_lti_video.py b/src/backend/marsha/core/tests/test_views_lti_video.py
index c08c1f983e..f6484770c4 100644
--- a/src/backend/marsha/core/tests/test_views_lti_video.py
+++ b/src/backend/marsha/core/tests/test_views_lti_video.py
@@ -105,6 +105,7 @@ def test_views_lti_video_post_instructor(self, mock_get_consumer_site, mock_veri
context.get("resource"),
{
"active_stamp": None,
+ "is_public": False,
"is_ready_to_show": False,
"is_scheduled": False,
"show_download": True,
@@ -227,6 +228,7 @@ def test_views_lti_video_instructor_live_mode_on(
context.get("resource"),
{
"active_stamp": None,
+ "is_public": False,
"is_ready_to_show": True,
"is_scheduled": False,
"show_download": True,
@@ -376,6 +378,7 @@ def test_views_lti_video_instructor_live_mode_and_chat_on(
context.get("resource"),
{
"active_stamp": None,
+ "is_public": False,
"is_ready_to_show": True,
"is_scheduled": False,
"show_download": True,
@@ -517,6 +520,7 @@ def test_views_lti_video_student_live_mode_on(
context.get("resource"),
{
"active_stamp": None,
+ "is_public": False,
"is_ready_to_show": True,
"is_scheduled": False,
"show_download": True,
@@ -640,6 +644,7 @@ def test_views_lti_video_post_administrator(
context.get("resource"),
{
"active_stamp": None,
+ "is_public": False,
"is_ready_to_show": False,
"is_scheduled": False,
"show_download": True,
@@ -720,6 +725,7 @@ def test_views_lti_video_read_other_playlist(
context.get("resource"),
{
"active_stamp": "1569309880",
+ "is_public": False,
"is_ready_to_show": True,
"is_scheduled": False,
"show_download": True,
@@ -848,6 +854,7 @@ def test_views_lti_video_restricted_resolutions_list(
context.get("resource"),
{
"active_stamp": "1569309880",
+ "is_public": False,
"is_ready_to_show": True,
"is_scheduled": False,
"show_download": True,
@@ -964,6 +971,7 @@ def test_views_lti_video_harvested_upload_state(
context.get("resource"),
{
"active_stamp": "1569309880",
+ "is_public": False,
"is_ready_to_show": False,
"is_scheduled": False,
"show_download": True,
@@ -1082,6 +1090,7 @@ def test_views_lti_video_post_student_with_video(
context.get("resource"),
{
"active_stamp": "1569309880",
+ "is_public": False,
"is_ready_to_show": True,
"is_scheduled": False,
"show_download": True,
@@ -1388,6 +1397,7 @@ def test_views_lti_video_has_transcript(self, mock_get_consumer_site, mock_verif
context.get("resource"),
{
"active_stamp": None,
+ "is_public": False,
"is_ready_to_show": False,
"is_scheduled": False,
"show_download": True,
@@ -1478,6 +1488,7 @@ def test_views_lti_video_has_transcript_false(
context.get("resource"),
{
"active_stamp": None,
+ "is_public": False,
"is_ready_to_show": False,
"is_scheduled": False,
"show_download": True,
diff --git a/src/backend/marsha/core/tests/test_views_public_video.py b/src/backend/marsha/core/tests/test_views_public_video.py
index e0cc80b9e6..093dc505b8 100644
--- a/src/backend/marsha/core/tests/test_views_public_video.py
+++ b/src/backend/marsha/core/tests/test_views_public_video.py
@@ -60,6 +60,7 @@ def test_video_publicly_accessible(self):
context.get("resource"),
{
"active_stamp": "1569309880",
+ "is_public": True,
"is_ready_to_show": True,
"is_scheduled": False,
"show_download": True,
@@ -235,6 +236,7 @@ def test_video_live_publicly_available(self):
context.get("resource"),
{
"active_stamp": "1569309880",
+ "is_public": True,
"is_ready_to_show": True,
"is_scheduled": False,
"show_download": True,
|
Mailu__Mailu-840 | Document the new setup utility
Title says all
| [
{
"content": "import flask\nimport flask_bootstrap\nimport redis\nimport json\nimport os\nimport jinja2\nimport uuid\nimport string\nimport random\nimport ipaddress\nimport hashlib\n\n\nversion = os.getenv(\"this_version\")\nstatic_url_path = \"/\" + version + \"/static\"\napp = flask.Flask(__name__, static_url... | [
{
"content": "import flask\nimport flask_bootstrap\nimport redis\nimport json\nimport os\nimport jinja2\nimport uuid\nimport string\nimport random\nimport ipaddress\nimport hashlib\n\n\nversion = os.getenv(\"this_version\", \"master\")\nstatic_url_path = \"/\" + version + \"/static\"\napp = flask.Flask(__name__... | diff --git a/docs/compose/.env b/docs/compose/.env
index cf906b580..218b94d23 100644
--- a/docs/compose/.env
+++ b/docs/compose/.env
@@ -1,3 +1,5 @@
+# WARNING: this file is being deprecated over the new setup utility, found at https://setup.mailu.io
+
# Mailu main configuration file
## Most configuration variables can be modified through the Web interface,
# these few settings must however be configured before starting the mail
diff --git a/docs/compose/docker-compose.yml b/docs/compose/docker-compose.yml
index 2cff9608e..2686ee279 100644
--- a/docs/compose/docker-compose.yml
+++ b/docs/compose/docker-compose.yml
@@ -1,3 +1,5 @@
+# WARNING: this file is being deprecated over the new setup utility, found at https://setup.mailu.io
+
version: '2'
services:
diff --git a/docs/compose/setup.rst b/docs/compose/setup.rst
index 3ff1f6787..c1a620e6a 100644
--- a/docs/compose/setup.rst
+++ b/docs/compose/setup.rst
@@ -12,34 +12,22 @@ Mailu will store all of its persistent data in a path of your choice
mkdir /mailu
cd /mailu
-Download the initial configuration file
----------------------------------------
+Create the configuration files
+------------------------------
-Docker Compose configuration is stored in a file named
-:download:`docker-compose.yml`. Additionally, Mailu
-relies on a :download:`.env` file for various settings. Download
-the proper template files from the git repository. To download the configuration
-for the ``VERSION_TAG`` branch, use:
+Docker Compose configuration is stored in a file named ``docker-compose.yml``.
+Additionally, Mailu relies on a ``mailu.env`` file for various settings.
+Both files can be generated by the `mailu setup utility`_. The setup utility
+is mostly self-explanatory, with some more additional information in this section.
-.. code-block:: bash
-
- wget https://mailu.io/VERSION_TAG/_downloads/docker-compose.yml
- wget https://mailu.io/VERSION_TAG/_downloads/.env
-
-Important configuration variables
----------------------------------
-
-Open the ``.env`` file and review the following variable settings:
+.. _`mailu setup utility`: https://setup.mailu.io
-- Change ``ROOT`` if you have your setup directory in a different location then ``/mailu``.
-- Check ``VERSION`` to reflect the version you picked. (``master`` or ``1.5``).
-
-Make sure to read the comments in the file and instructions from the :ref:`common_cfg` section.
+.. _tls_flavor:
TLS certificates
````````````````
-Set the ``TLS_FLAVOR`` to one of the following
+Sets the ``TLS_FLAVOR`` to one of the following
values:
- ``cert`` is the default and requires certificates to be setup manually;
@@ -59,7 +47,7 @@ values:
Bind address
````````````
-Modify ``BIND_ADDRESS4`` and ``BIND_ADDRESS6`` to match the public IP addresses assigned to your server. For IPv6 you will need the ``<global>`` scope address.
+The bind addresses need to match the public IP addresses assigned to your server. For IPv6 you will need the ``<global>`` scope address.
You can find those addresses by running the following:
@@ -81,56 +69,17 @@ you would simply like the server to listen on all interfaces, use ``0.0.0.0`` an
.. _issues: https://github.com/Mailu/Mailu/issues/641
-Enable optional features
-------------------------
-
-Some of Mailu features are not used by every user and are thus not enabled in a
-default configuration.
-
-A Webmail is a Web interface exposing an email client. Mailu webmails are
-bound to the internal IMAP and SMTP server for users to access their mailbox through
-the Web. By exposing a complex application such as a Webmail, you should be aware of
-the security implications caused by such an increase of attack surface. The ``WEBMAIL``
-configuration option must be one of the following:
-
-- ``none`` is the default value, no Webmail service will be exposed;
-- ``roundcube`` will run the popular Roundcube Webmail;
-- ``rainloop`` will run the popular Rainloop Webmail.
-
-The administration interface is not exposed on the public address by default,
-you will need to set the ``ADMIN`` variable accordingly:
-
-- ``true`` will expose the admin interface in ``/admin``;
-- ``false`` (or any other value) will disable this behaviour.
-
-A Webdav server exposes a Dav interface over HTTP so that clients can store
-contacts or calendars using the mail account. This can be enabled using the `WEBDAV`
-setting. The configuration option must be one of the following:
-
-- ``none`` is the default value, no webdav service will be exposed;
-- ``radicale`` exposes the radicale Webdav service.
-
-An antivirus server helps fighting large scale virus spreading campaigns
-that leverage e-mail for initial infection. This can be setup using the ``ANTIVIRUS``
-setting. The configuration option must be one of the following:
-
-- ``none`` disables antivirus checks;
-- ``clamav`` is the default values, the popular ClamAV antivirus is enabled.
-
-Make sure that you have at least 1GB of memory for ClamAV to load its signature
-database.
+Review configuration variables
+------------------------------
-If you run Mailu behind a reverse proxy you can use ``REAL_IP_HEADER`` and
-``REAL_IP_FROM`` to set the values of respective the Nginx directives
-``real_ip_header`` and ``set_real_ip_from``. The ``REAL_IP_FROM`` configuration
-option is a comma-separated list of IPs (or CIDRs) of which for each a
-``set_real_ip_from`` directive is added in the Nginx configuration file.
+After downloading the files, open ``mailu.env`` and review the variable settings.
+Make sure to read the comments in the file and instructions from the :ref:`common_cfg` page.
Finish setting up TLS
---------------------
Mailu relies heavily on TLS and must have a key pair and a certificate
-available, at least for the hostname configured in the ``.env`` file.
+available, at least for the hostname configured in the ``mailu.env`` file.
If you set ``TLS_FLAVOR`` to ``cert`` or ``mail`` then you must create a ``certs`` directory
in your root path and setup a key-certificate pair there:
@@ -155,4 +104,4 @@ Finally, you must create the initial admin user account:
This will create a user named ``me@example.net`` with password ``password`` and administration privileges. Connect to the Web admin interface and change the password to a strong one.
- .. note:: It is vitally important that either a user with the same email as ``POSTMASTER`` in your ``.env`` exists, or you remember to create an alias with this name after you log in. All kinds of strange errors will occur as a result of not doing so!
+ .. note:: It is vitally important that either a user with the same email as ``POSTMASTER`` in your ``mailu.env`` exists, or you remember to create an alias with this name after you log in. All kinds of strange errors will occur as a result of not doing so!
diff --git a/docs/configuration.rst b/docs/configuration.rst
index ec114c979..e7dfa2af8 100644
--- a/docs/configuration.rst
+++ b/docs/configuration.rst
@@ -1,5 +1,9 @@
-Mailu configuration settings
-============================
+Configuration reference
+=======================
+
+This page explains the variables found in ``mailu.env``.
+In most cases ``mailu.env`` is setup correctly by the setup utility and can be left as-is.
+However, some advanced settings or modifications can be done by modifying this file.
.. _common_cfg:
@@ -37,6 +41,9 @@ The ``AUTH_RATELIMIT`` holds a security setting for fighting attackers that
try to guess user passwords. The value is the limit of requests that a single
IP address can perform against IMAP, POP and SMTP authentication endpoints.
+The ``TLS_FLAVOR`` sets how Mailu handles TLS connections. Setting this value to
+``notls`` will cause Mailu not to server any web content! More on :ref:`tls_flavor`.
+
Mail settings
-------------
diff --git a/docs/docker-compose.yml b/docs/docker-compose.yml
index b7026564b..9c5d24731 100644
--- a/docs/docker-compose.yml
+++ b/docs/docker-compose.yml
@@ -1,28 +1,10 @@
-version: '3'
+# This file is used to test the mailu/docs website
+# Deployment files can be found on github.com/mailu/infra
+version: '3'
services:
- docs_master:
- image: mailu/docs:master
- networks:
- - web
- labels:
- - traefik.enable=true
- - traefik.port=80
- - traefik.main.frontend.rule=Host:${ADDRESS};PathPrefix:/master/
-
- docs_15:
- image: mailu/docs:1.5
- networks:
- - web
- labels:
- - traefik.enable=true
- - traefik.port=80
- - traefik.root.frontend.redirect.regex=.*
- - traefik.root.frontend.redirect.replacement=/1.5/
- - traefik.root.frontend.rule=Host:${ADDRESS};PathPrefix:/
- - traefik.main.frontend.rule=Host:${ADDRESS};PathPrefix:/1.5/
-
-networks:
- web:
- external: true
+ docs:
+ image: ${DOCKER_ORG:-mailu}/docs:${MAILU_VERSION:-master}
+ ports:
+ - 127.0.0.1:8000:80
diff --git a/docs/index.rst b/docs/index.rst
index 98825ab6b..0808010c2 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -53,10 +53,10 @@ the version of Mailu that you are running.
:caption: Setup
setup
- configuration
compose/requirements
compose/setup
kubernetes/mailu/index
+ configuration
dns
reverse
database
diff --git a/setup/docker-compose.yml b/setup/docker-compose.yml
index 6d14153af..9c93fd6fe 100644
--- a/setup/docker-compose.yml
+++ b/setup/docker-compose.yml
@@ -1,50 +1,16 @@
-# This file is used to run the mailu/setup utility
+# This file is used to test the mailu/setup utility
+# Deployment files can be found on github.com/mailu/infra
version: '3.6'
services:
redis:
image: redis:alpine
- networks:
- - default
- setup_master:
- image: mailu/setup:master
- networks:
- - web
- - default
+ setup:
+ image: ${DOCKER_ORG:-mailu}/setup:${MAILU_VERSION:-master}
env_file: .env
- environment:
- this_version: "master"
- labels:
- - traefik.enable=true
- - traefik.port=80
- - traefik.docker.network=web
- - traefik.main.frontend.rule=Host:${ADDRESS};PathPrefix:/master/
depends_on:
- redis
-
- setup_release:
- image: mailu/setup:${RELEASE}
- networks:
- - web
- - default
- env_file: .env
- environment:
- this_version: ${RELEASE}
- labels:
- - traefik.enable=true
- - traefik.port=80
- - traefik.docker.network=web
- - traefik.root.frontend.redirect.regex=.*
- - traefik.root.frontend.redirect.replacement=/${RELEASE}/
- - traefik.root.frontend.rule=Host:${ADDRESS};PathPrefix:/
- - traefik.main.frontend.rule=Host:${ADDRESS};PathPrefix:/${RELEASE}/
- depends_on:
- - redis
-
-networks:
- web:
- external: true
- default:
- external: false
+ ports:
+ - 127.0.0.1:8001:80
diff --git a/setup/server.py b/setup/server.py
index fea27ead1..556d4b3a4 100644
--- a/setup/server.py
+++ b/setup/server.py
@@ -11,7 +11,7 @@
import hashlib
-version = os.getenv("this_version")
+version = os.getenv("this_version", "master")
static_url_path = "/" + version + "/static"
app = flask.Flask(__name__, static_url_path=static_url_path)
flask_bootstrap.Bootstrap(app)
diff --git a/setup/templates/steps/compose/02_services.html b/setup/templates/steps/compose/02_services.html
index 11e7a14e0..a78a3f620 100644
--- a/setup/templates/steps/compose/02_services.html
+++ b/setup/templates/steps/compose/02_services.html
@@ -1,13 +1,13 @@
{% call macros.panel("info", "Step 3 - pick some features") %}
<p>Mailu comes with multiple base features, including a specific admin
-interface, Web email clients (webmails), antispam, antivirus, etc. If you
-wish to disable some of these features, you are free to do so.</p>
-
-<p>Emails will be available through IMAP and POP3. You may also enable a Web
-email client. These do add some complexity but provide an easier way of
-accessing messages for beginner users.</p>
+interface, Web email clients, antispam, antivirus, etc.
+In this section you can enable the services to you liking.</p>
<!-- Switched from radio buttons to dropdown menu in order to remove the checkbox -->
+<p>A Webmail is a Web interface exposing an email client. Mailu webmails are
+bound to the internal IMAP and SMTP server for users to access their mailbox through
+the Web. By exposing a complex application such as a Webmail, you should be aware of
+the security implications caused by such an increase of attack surface.<p>
<div class="form-group">
<label>Enable Web email client (and path to the Web email client)</label>
<!-- <div class="radio"> -->
@@ -26,10 +26,9 @@
</div>
</div>
-<p>Email filtering is a really important features. You can still disable it, which
-will prevent Mailu from doing spam filtering, virus filtering, and from applying
-white and blacklists that you may configure in the admin interface. You may
-also disable the antivirus if required (it does use aroung 1GB of ram).</p>
+<p>An antivirus server helps fighting large scale virus spreading campaigns that leverage
+e-mail for initial infection. Make sure that you have at least 1GB of memory for ClamAV to
+load its signature database.</p>
<div class="form-check form-check-inline">
<label class="form-check-label">
@@ -38,6 +37,9 @@
</label>
</div>
+<p>A Webdav server exposes a Dav interface over HTTP so that clients can store
+contacts or calendars using the mail account.</p>
+
<div class="form-check form-check-inline">
<label class="form-check-label">
<input class="form-check-input" type="checkbox" name="webdav_enabled" value="radicale">
@@ -45,6 +47,8 @@
</label>
</div>
+<p>Fetchmail allows to download mails over IMAP/POP3 and uploads it your Mailu mailbox.</p>
+
<div class="form-check form-check-inline">
<label class="form-check-label">
<input class="form-check-input" type="checkbox" name="fetchmail_enabled" value="true">
diff --git a/setup/templates/steps/compose/03_expose.html b/setup/templates/steps/compose/03_expose.html
index 0c9127784..837b7bba2 100644
--- a/setup/templates/steps/compose/03_expose.html
+++ b/setup/templates/steps/compose/03_expose.html
@@ -10,7 +10,8 @@
an IPv4 or an IPv6 address if you wish to access Mailu.</p>
<p><span class="label label-warning">Warning</span> You must use specific addresses, please
-avoid generic all-interfaces addresses like <code>0.0.0.0</code> or <code>::</code>.</p>
+avoid generic all-interfaces addresses like <code>0.0.0.0</code> or <code>::</code>.
+<a href="https://mailu.io/{{ version }}/compose/setup.html#bind-address">How to find these addresses.</a></p>
<div class="form-group">
<label>IPv4 listen address</label>
@@ -26,13 +27,14 @@
pattern="^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/([0-9]|[1-9][0-9]|1[0-1][0-9]|12[0-8]))?$">
</div>
+<p>The unbound resolver enables Mailu to do DNSsec verification, DNS root lookups and caching. This also helps the antispam service not to get blocked by the public or ISP DNS servers.</p>
<div class="form-check form-check-inline">
<label class="form-check-label">
<input class="form-check-input" type="checkbox" name="resolver_enabled" value="true">
Enable unbound resolver
</label>
</div>
-
+<br><br>
<div class="form-group">
<label>Subnet of the docker network. This should not conflict with any networks to which your system is connected. (Internal and external!)</label>
<input class="form-control" type="text" name="subnet" required pattern="^([0-9]{1,3}\.){3}[0-9]{1,3}(\/([0-9]|[1-2][0-9]|3[0-2]))$"
diff --git a/setup/templates/steps/config.html b/setup/templates/steps/config.html
index d843d684e..330e008f3 100644
--- a/setup/templates/steps/config.html
+++ b/setup/templates/steps/config.html
@@ -1,8 +1,17 @@
+{% if flavor == "stack" %}
+{% call macros.panel("danger", "Docker stack / swarm is experimental") %}
+Setup is capable of generating a somewhat decent docker-compose.yml,
+for the docker stack flavor. However its usage is for advanced users an is experimental.
+Expect many challenges is shared mail storage and fail-over scenarios! Some user experiences
+have been <a href="https://github.com/Mailu/Mailu/blob/master/docs/swarm/master/README.md">shared on GitHub.</a>
+{% endcall %}
+{% endif %}
+
{% call macros.panel("info", "Step 2 - Initial configuration") %}
<p>Before starting some variables must be set</p>
<div class="form-group">
- <label>Root path: </label>
+ <label>Mailu storage path: </label>
<!-- Validates path -->
<input class="form-control" type="text" name="root" value="/mailu" required pattern="^/[-_A-Za-z0-9]+(/[-_A-Za-z0-9]*)*">
</div>
@@ -27,7 +36,7 @@
</div>
<div class="form-group">
- <label>Choose how you wish to handle security (TLS) certificates</label>
+ <label>Choose how you wish to handle security <a href="https://mailu.io/{{ version }}/compose/setup.html#tls-certificates">TLS certificates</a></label>
<br/>
<select class="btn btn-primary dropdown-toggle" name="tls_flavor">
{% for tlsflavor in ["letsencrypt", "cert", "notls", "mail", "mail-letsencrypt"] %}
@@ -61,7 +70,7 @@
<label>Linked Website URL</label>
<!-- Validates url with or without https:// -->
<input class="form-control" type="url" name="website" value="https://mailu.io" required
- pattern="^(https?://)?([a-zA-Z0-9]([a-zA-ZäöüÄÖÜ0-9\-]{0,61}[a-zA-Z0-9])?\.)+[a-zA-Z]{2,6}$">
+ pattern="^(https?://)?([a-zA-Z0-9]([a-zA-ZäöüÄÖÜ0-9\-]{0,61}[a-zA-Z0-9])?\.)+[a-zA-Z]{2,}$">
</div>
<p>The admin interface is the main Mailu-specific bit, it provides tools to
diff --git a/setup/templates/steps/flavor.html b/setup/templates/steps/flavor.html
index 64aa0158b..a77b0215b 100644
--- a/setup/templates/steps/flavor.html
+++ b/setup/templates/steps/flavor.html
@@ -9,8 +9,5 @@
<div class="radio">
{{ macros.radio("flavor", "compose", "Compose", "simply using Docker Compose manager", flavor) }}
{{ macros.radio("flavor", "stack", "Stack", "using stack deployments in a Swarm cluster", flavor) }}
- {{ macros.radio("flavor", "rancher", "Rancher", "on top of the Rancher container manager", flavor) }}
- {{ macros.radio("flavor", "kubernetes", "Kubernetes", "on top of the Kubernetes container manager", flavor) }}
</div>
-
{% endcall %}
diff --git a/setup/templates/steps/stack/02_services.html b/setup/templates/steps/stack/02_services.html
index 36493e05d..4f50bb400 100644
--- a/setup/templates/steps/stack/02_services.html
+++ b/setup/templates/steps/stack/02_services.html
@@ -1,19 +1,15 @@
{% call macros.panel("info", "Step 3 - pick some features") %}
<p>Mailu comes with multiple base features, including a specific admin
-interface, Web email clients (webmails), antispam, antivirus, etc. If you
-wish to disable some of these features, you are free to do so.</p>
-
-<p>Emails will be available through IMAP and POP3. You may also enable a Web
-email client. These do add some complexity but provide an easier way of
-accessing messages for beginner users.</p>
+interface, Web email clients, antispam, antivirus, etc.
+In this section you can enable the services to you liking.</p>
<!-- Switched from radio buttons to dropdown menu in order to remove the checkbox -->
+<p>A Webmail is a Web interface exposing an email client. Mailu webmails are
+bound to the internal IMAP and SMTP server for users to access their mailbox through
+the Web. By exposing a complex application such as a Webmail, you should be aware of
+the security implications caused by such an increase of attack surface.<p>
<div class="form-group">
<label>Enable Web email client (and path to the Web email client)</label>
-<!-- <div class="radio"> -->
-<!-- {{ macros.radio("webmail_type", "roundcube", "RoundCube", "popular Webmail running on top of PHP") }} -->
-<!-- {{ macros.radio("webmail_type", "rainloop", "Rainloop", "lightweight Webmail based on PHP, no database") }} -->
-<!-- </div> -->
<br/>
<select class="btn btn-primary dropdown-toggle" name="webmail_type" id="webmail">
{% for webmailtype in ["none", "roundcube", "rainloop"] %}
@@ -26,10 +22,9 @@
</div>
</div>
-<p>Email filtering is a really important features. You can still disable it, which
-will prevent Mailu from doing spam filtering, virus filtering, and from applying
-white and blacklists that you may configure in the admin interface. You may
-also disable the antivirus if required (it does use aroung 1GB of ram).</p>
+<p>An antivirus server helps fighting large scale virus spreading campaigns that leverage
+e-mail for initial infection. Make sure that you have at least 1GB of memory for ClamAV to
+load its signature database.</p>
<div class="form-check form-check-inline">
<label class="form-check-label">
@@ -38,6 +33,9 @@
</label>
</div>
+<p>A Webdav server exposes a Dav interface over HTTP so that clients can store
+contacts or calendars using the mail account.</p>
+
<div class="form-check form-check-inline">
<label class="form-check-label">
<input class="form-check-input" type="checkbox" name="webdav_enabled" value="radicale">
@@ -45,6 +43,8 @@
</label>
</div>
+<p>Fetchmail allows to download mails over IMAP/POP3 and uploads it your Mailu mailbox.</p>
+
<div class="form-check form-check-inline">
<label class="form-check-label">
<input class="form-check-input" type="checkbox" name="fetchmail_enabled" value="true">
|
ansible__ansible-25551 | wait_for missing import for os: "global name 'os' is not defined"
##### ISSUE TYPE
- Bug Report
##### COMPONENT NAME
wait_for
##### ANSIBLE VERSION
<!--- Paste verbatim output from “ansible --version” between quotes below -->
```
ansible 2.4.0 (devel 416d9774ce) last updated 2017/06/09 10:50:57 (GMT -400)
config file = /etc/ansible/ansible.cfg
configured module search path = [u'/root/.ansible/plugins/modules', u'/usr/share/ansible/plugins/modules']
ansible python module location = /root/tmp/ansible/lib/ansible
executable location = /root/tmp/ansible/bin/ansible
python version = 2.7.5 (default, Aug 2 2016, 04:20:16) [GCC 4.8.5 20150623 (Red Hat 4.8.5-4)]
```
##### CONFIGURATION
<!---
Mention any settings you have changed/added/removed in ansible.cfg
(or using the ANSIBLE_* environment variables).
-->
##### OS / ENVIRONMENT
N/A
##### SUMMARY
When running the wait_for task to check for the existence of a file that is present, ansible reports a module failure due to `global name 'os' is not defined`.
##### STEPS TO REPRODUCE
```
cat << EOF > inventory
[local]
localhost ansible_connection=local
EOF
cat << EOF > playbook.yml
---
- hosts: local
tasks:
- wait_for:
path: '/tmp/test_file'
timeout: 1
EOF
touch /tmp/test_file
ansible -i inventory playbook.yml
```
<!--- You can also paste gist.github.com links for larger files -->
##### EXPECTED RESULTS
`wait_for` successfully confirms file exists
##### ACTUAL RESULTS
```
Traceback (most recent call last):
File "/tmp/ansible_SksI7U/ansible_module_wait_for.py", line 606, in <module>
main()
File "/tmp/ansible_SksI7U/ansible_module_wait_for.py", line 505, in main
os.stat(path)
NameError: global name 'os' is not defined
fatal: [localhost]: FAILED! => {
"changed": false,
"failed": true,
"module_stderr": "Traceback (most recent call last):\n File \"/tmp/ansible_SksI7U/ansible_module_wait_for.py\", line 606, in <module>\n main()\n File \"/tmp/ansible_SksI7U/ansible_module_wait_for.py\", line 505, in main\n os.stat(path)\nNameError: global name 'os' is not defined\n",
"module_stdout": "",
"msg": "MODULE FAILURE",
"rc": 0
}
```
Full output:
https://gist.github.com/jladdjr/bd5506b56d3bb66d975de278eb61c207#file-gistfile1-txt-L114
| [
{
"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# (c) 2012, Jeroen Hoekx <jeroen@hoekx.be>\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation,... | [
{
"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# (c) 2012, Jeroen Hoekx <jeroen@hoekx.be>\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation,... | diff --git a/lib/ansible/modules/utilities/logic/wait_for.py b/lib/ansible/modules/utilities/logic/wait_for.py
index 9fe7bcbbd416a0..083e672bfe1206 100644
--- a/lib/ansible/modules/utilities/logic/wait_for.py
+++ b/lib/ansible/modules/utilities/logic/wait_for.py
@@ -183,6 +183,7 @@
from ansible.module_utils.basic import AnsibleModule, load_platform_subclass
from ansible.module_utils._text import to_native
+from ansible.module_utils.pycompat24 import get_exception
HAS_PSUTIL = False
|
ktbyers__netmiko-2037 | ssh_autodetect.py fails to detect Dell OS9 devices
I found issues with Netmiko ssh_autodetect.py feature with Dell OS9 (or dell_force10) switches but this same issues might appear with other vendor OSs as well. I'm asking for the comments and ideas for the best possible implementation.
The first issue is that the ssh_autodetect.py detects only one Dell hardware type, S4048-ON, instead of detecting the running OS. For example, it is also possible to run Dell OS10 on that specific hardware type. It would be better to match on the line 'Networking OS Version : 9.14(0.1)' on the output of 'show version' command and it would be simple to fix.
The other, more complex, issue is that there is 'show system' command in 'SSH_MAPPER_BASE' which is valid for Dell OS9 switches but it returns paginated output and therefore breaks the detection.
I tested this with python3.6 in which dictionaries are insertion ordered. The code loops through the items in SSH_MAPPER_BASE and the cmds are checked in order ‘show system’, ‘show version’, ‘show system’, ‘show version’, ‘show version’ etc against the corresponding search patterns.
Here's the output of the 'show system' command
```
Stack MAC : 00:00:00:00:00:00
Reload-Type : normal-reload [Next boot : normal-reload]
-- Unit 1 --
Unit Type : Management Unit
Status : online
Next Boot : online
Required Type : S3048-ON - 52-port GE/TE (SG-ON)
Current Type : S3048-ON - 52-port GE/TE (SG-ON)
Master priority : 0
Hardware Rev : 0.0
Num Ports : 52
Up Time : 22 wk, 1 day, 21 hr, 54 min
Networking OS Version : 9.14(0.1)
Jumbo Capable : yes
POE Capable : no
FIPS Mode : disabled
Burned In MAC : 00:00:00:00:00:00
No Of MACs : 3
-- Power Supplies --
--More--
```
and then the next command entered to the cli is ‘how version’ as the first character, ‘s’, just ‘exists’ from the previous output.
```
sw1#how version
^
% Error: Invalid input at "^" marker.
sw1#
```
I came up with couple of options how this could be solved;
1. Use OrderedDict for SSH_MAPPER_BASE and change the order of the commands
Currently items in SSH_MAPPER_BASE are in alphabetical order based on vendor name. There would be option to change the order of items in ‘SSH_MAPPER_BASE’ (as an ordered dict) so that the order of commands sent to the devices would be in the order of frequency in ‘SSH_MAPPER_BASE’ i.e.
'show version' -> appeares 11 times
'show system' -> appears 2 times
rest of the commands -> only once
This order would be more optimal as most of the devices can be identified based on output of 'show version'.
1. Change the commands to include only the matched line on the output
This would also solve the issue but there would be more commands to be sent to the devices which is not optimal
'show version | i ASA'
'show version | i Networking OS Version'
etc
1. Add the support for the paginated output
I suppose this would be rather complicated as the OS and the corresponding command is unknown.
Any other ideas, recommendations, comments etc?
| [
{
"content": "\"\"\"\nThe ssh_autodetect module is used to auto-detect the netmiko device_type to use to further initiate\na new SSH connection with a remote host. This auto-detection is based on a unique class called\n**SSHDetect**.\n\nNotes\n-----\n\nThe **SSHDetect** class is instantiated using the same para... | [
{
"content": "\"\"\"\nThe ssh_autodetect module is used to auto-detect the netmiko device_type to use to further initiate\na new SSH connection with a remote host. This auto-detection is based on a unique class called\n**SSHDetect**.\n\nNotes\n-----\n\nThe **SSHDetect** class is instantiated using the same para... | diff --git a/netmiko/ssh_autodetect.py b/netmiko/ssh_autodetect.py
index 9b1f96d26..971570244 100644
--- a/netmiko/ssh_autodetect.py
+++ b/netmiko/ssh_autodetect.py
@@ -101,7 +101,7 @@
},
"dell_force10": {
"cmd": "show version",
- "search_patterns": [r"S4048-ON"],
+ "search_patterns": [r"Real Time Operating System Software"],
"priority": 99,
"dispatch": "_autodetect_std",
},
|
chainer__chainer-3204 | VariableNode.shape is None although Parameter.initialize is called.
The following code returns `None` with the latest version of Chainer. (3.0.0b1, 034c4c596)
```py
a = chainer.Parameter()
a.initialize((1, 1))
print(a.node.shape) # returns None
```
It causes an internal error by running the following code.
```py
import chainer
from chainer import computational_graph as c
def main():
a = chainer.Parameter()
b = chainer.Parameter()
a.initialize((1, 1))
b.initialize((1, 1))
vs = a + b
g = c.build_computational_graph(vs)
g.dump()
if __name__ == '__main__':
main()
```
```
Traceback (most recent call last):
File "poc.py", line 16, in <module>
main()
File "poc.py", line 12, in main
g.dump()
File "/home/igarashi/.pyenv/versions/3.6.1/lib/python3.6/site-packages/chainer-3.0.0b1-py3.6.egg/chainer/computational_graph.py", line 164, in dump
return self._to_dot()
File "/home/igarashi/.pyenv/versions/3.6.1/lib/python3.6/site-packages/chainer-3.0.0b1-py3.6.egg/chainer/computational_graph.py", line 120, in _to_dot
node, self.variable_style, self.show_name).label
File "/home/igarashi/.pyenv/versions/3.6.1/lib/python3.6/site-packages/chainer-3.0.0b1-py3.6.egg/chainer/computational_graph.py", line 29, in __init__
self.attribute = {'label': node.label}
File "/home/igarashi/.pyenv/versions/3.6.1/lib/python3.6/site-packages/chainer-3.0.0b1-py3.6.egg/chainer/variable.py", line 280, in label
return '(%s), %s' % (', '.join(map(str, self.shape)),
TypeError: 'NoneType' object is not iterable
If you suspect this is an IPython bug, please report it at:
https://github.com/ipython/ipython/issues
or send an email to the mailing list at ipython-dev@python.org
You can print a more detailed traceback right now with "%tb", or use "%debug"
to interactively debug it.
Extra-detailed tracebacks for bug-reporting purposes can be enabled via:
%config Application.verbose_crash=True
```
| [
{
"content": "import collections\nimport copy\nimport heapq\nimport traceback\nimport warnings\nimport weakref\n\nimport numpy\n\nimport chainer\nfrom chainer import cuda\nfrom chainer import initializers\nfrom chainer.initializers import constant\nfrom chainer.utils import argument\n\n\ndef _check_grad_type(fu... | [
{
"content": "import collections\nimport copy\nimport heapq\nimport traceback\nimport warnings\nimport weakref\n\nimport numpy\n\nimport chainer\nfrom chainer import cuda\nfrom chainer import initializers\nfrom chainer.initializers import constant\nfrom chainer.utils import argument\n\n\ndef _check_grad_type(fu... | diff --git a/chainer/variable.py b/chainer/variable.py
index e0050d20c3ac..680b810d3aa1 100644
--- a/chainer/variable.py
+++ b/chainer/variable.py
@@ -1189,7 +1189,7 @@ def initialize(self, shape):
grad = None if ginit is None else initializers.generate_array(
ginit, shape, xp)
- self._data[0] = data
+ self.data = data
self.grad = grad
def update(self):
diff --git a/tests/chainer_tests/test_variable.py b/tests/chainer_tests/test_variable.py
index 75b9fa1511e7..feb66a15914f 100644
--- a/tests/chainer_tests/test_variable.py
+++ b/tests/chainer_tests/test_variable.py
@@ -757,6 +757,13 @@ def test_initialize_dtype(self):
self.assertEqual(x.data.dtype, np.float64)
self.assertEqual(x.grad.dtype, np.float64)
+ def test_initialize_node(self):
+ initializer = initializers.Zero(np.float64)
+ x = chainer.Parameter(initializer=initializer)
+ x.initialize((2, 3))
+ self.assertEqual(x.node.shape, (2, 3))
+ self.assertEqual(x.node.dtype, np.float64)
+
@attr.gpu
def test_initialize_to_gpu(self):
x = chainer.Parameter(initializer=initializers.Constant(self.a))
|
certbot__certbot-9561 | docs: generated man pages sticks everything under NAME
## My operating system is (include version):
FreeBSD
## I installed Certbot with (snap, OS package manager, pip, certbot-auto, etc):
i didn't, really, i was just looking for a man page on https://man.freebsd.org when I noticed this
## I ran this command and it produced this output:
apropos certbot
https://man.freebsd.org/cgi/man.cgi?query=certbot&apropos=1&sektion=0&manpath=FreeBSD+13.1-RELEASE+and+Ports&arch=default&format=html
## Certbot's behavior differed from what I expected because:
this should only show the first line, but it has the entire man page. That means that the source must be formated wrong
| [
{
"content": "# -*- coding: utf-8 -*-\n#\n# Certbot documentation build configuration file, created by\n# sphinx-quickstart on Sun Nov 23 20:35:21 2014.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in th... | [
{
"content": "# -*- coding: utf-8 -*-\n#\n# Certbot documentation build configuration file, created by\n# sphinx-quickstart on Sun Nov 23 20:35:21 2014.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in th... | diff --git a/certbot/docs/conf.py b/certbot/docs/conf.py
index de9e287fc03..5102e7a1e39 100644
--- a/certbot/docs/conf.py
+++ b/certbot/docs/conf.py
@@ -286,7 +286,7 @@
man_pages = [
('index', 'certbot', u'Certbot Documentation',
[project], 7),
- ('man/certbot', 'certbot', u'certbot script documentation',
+ ('man/certbot', 'certbot', u"Automatically configure HTTPS using Let's Encrypt",
[project], 1),
]
diff --git a/certbot/docs/man/certbot.rst b/certbot/docs/man/certbot.rst
index 2f25504b04e..cc690d2da1d 100644
--- a/certbot/docs/man/certbot.rst
+++ b/certbot/docs/man/certbot.rst
@@ -1,3 +1,23 @@
:orphan:
-.. literalinclude:: ../cli-help.txt
+=======
+certbot
+=======
+
+Synopsis
+========
+The objective of Certbot, Let's Encrypt, and the ACME (Automated Certificate Management
+Environment) protocol is to make it possible to set up an HTTPS server and have it automatically
+obtain a browser-trusted certificate, without any human intervention. This is accomplished by
+running a certificate management agent on the web server.
+
+This agent is used to:
+
+- Automatically prove to the Let's Encrypt CA that you control the website
+- Obtain a browser-trusted certificate and set it up on your web server
+- Keep track of when your certificate is going to expire, and renew it
+- Help you revoke the certificate if that ever becomes necessary.
+
+Options
+=======
+.. literalinclude:: ../cli-help.txt
\ No newline at end of file
|
django-cms__django-filer-486 | [Django>=1.7] FilerFolderField admin widget hidden
If you use Django 1.7 and have a FilerFolderField in a model, the corresponding admin widget will not show up.
That’s happening since django/django@dc3d2ac98c1bcfad74d3e9523caf07e7e9fb15aa. In `fieldset.html`, a `hidden` class is added to a form row if all the contained widgets have `is_hidden = True`.
Setting `is_hidden` to `False` works fine. I’m not sure whether this attribute is useful somewhere else. I found it used 7 times in Django, and always refer to showing the field and its label. Since it’s not really a hidden field, I suggest we do that change.
| [
{
"content": "#-*- coding: utf-8 -*-\nfrom django.template.loader import render_to_string\nimport inspect\nimport warnings\nfrom django import forms\nfrom django.conf import settings\nfrom django.contrib.admin.widgets import ForeignKeyRawIdWidget\nfrom django.contrib.admin.sites import site\nfrom django.core.ur... | [
{
"content": "#-*- coding: utf-8 -*-\nfrom django.template.loader import render_to_string\nimport inspect\nimport warnings\nfrom django import forms\nfrom django.conf import settings\nfrom django.contrib.admin.widgets import ForeignKeyRawIdWidget\nfrom django.contrib.admin.sites import site\nfrom django.core.ur... | diff --git a/filer/fields/folder.py b/filer/fields/folder.py
index fdd3c7f0f..ac5b5050d 100644
--- a/filer/fields/folder.py
+++ b/filer/fields/folder.py
@@ -18,7 +18,6 @@
class AdminFolderWidget(ForeignKeyRawIdWidget):
choices = None
input_type = 'hidden'
- is_hidden = True
def render(self, name, value, attrs=None):
obj = self.obj_for_value(value)
|
interlegis__sapl-1513 | Problema ao apagar dados legados de ExpedienteSessaoPlenaria
O campo txt_expediente pode possuir valores com tags em HTML, o que prejudica a formatação da string sql de delete por contas das aspas duplas.
| [
{
"content": "import re\nfrom datetime import date\nfrom functools import lru_cache\nfrom subprocess import PIPE, call\n\nimport pkg_resources\nimport reversion\nimport yaml\nfrom django.apps import apps\nfrom django.apps.config import AppConfig\nfrom django.contrib.auth import get_user_model\nfrom django.contr... | [
{
"content": "import re\nfrom datetime import date\nfrom functools import lru_cache\nfrom subprocess import PIPE, call\n\nimport pkg_resources\nimport reversion\nimport yaml\nfrom django.apps import apps\nfrom django.apps.config import AppConfig\nfrom django.contrib.auth import get_user_model\nfrom django.contr... | diff --git a/sapl/legacy/migration.py b/sapl/legacy/migration.py
index d34cf935a..e9d4f73b1 100644
--- a/sapl/legacy/migration.py
+++ b/sapl/legacy/migration.py
@@ -259,6 +259,9 @@ def excluir_registrovotacao_duplicados():
def delete_old(legacy_model, cols_values):
+ # ajuste necessário por conta de cósigos html em txt_expediente
+ if legacy_model.__name__ == 'ExpedienteSessaoPlenaria':
+ cols_values.pop('txt_expediente')
def eq_clause(col, value):
if value is None:
|
iterative__dvc-2364 | status: change nothing to reproduce message
If I use DVC only to version data/models and don't care about pipelines, this message:
`Pipelines are up to date. Nothing to reproduce.`
looks really strange.
Let's change it to something more generic:
`Data and pipelines are up to date.`
or something similar
| [
{
"content": "from __future__ import unicode_literals\n\nimport logging\n\nfrom dvc.command.data_sync import CmdDataBase\nfrom dvc.utils.compat import str\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass CmdDataStatus(CmdDataBase):\n STATUS_LEN = 20\n STATUS_INDENT = \"\\t\"\n UP_TO_DATE_MSG = \"Pi... | [
{
"content": "from __future__ import unicode_literals\n\nimport logging\n\nfrom dvc.command.data_sync import CmdDataBase\nfrom dvc.utils.compat import str\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass CmdDataStatus(CmdDataBase):\n STATUS_LEN = 20\n STATUS_INDENT = \"\\t\"\n UP_TO_DATE_MSG = \"Da... | diff --git a/dvc/command/status.py b/dvc/command/status.py
index ecb6082f25..9f6c90ab0a 100644
--- a/dvc/command/status.py
+++ b/dvc/command/status.py
@@ -12,7 +12,7 @@
class CmdDataStatus(CmdDataBase):
STATUS_LEN = 20
STATUS_INDENT = "\t"
- UP_TO_DATE_MSG = "Pipelines are up to date. Nothing to reproduce."
+ UP_TO_DATE_MSG = "Data and pipelines are up to date."
def _normalize(self, s):
s += ":"
|
pytorch__vision-6883 | deepcopying retinanet fails
### 🐛 Describe the bug
Deepcoping retinanet fails
```py
from torchvision.models.detection.retinanet import retinanet_resnet50_fpn
from torchvision.models.resnet import ResNet50_Weights
from copy import deepcopy
from torch import nn
class RetinaNet(nn.Module):
def __init__(self):
super().__init__()
self.weights_backbone = ResNet50_Weights.IMAGENET1K_V1
self.model = retinanet_resnet50_fpn(weights=None, weights_backbone=self.weights_backbone)
if __name__ == '__main__':
deepcopy(RetinaNet())
```
Error:
```console
le "/Users/goku/Desktop/work/repos/lightning-bolts/build/tmp2.py", line 15, in <module>
deepcopy(RetinaNet())
File "/Users/goku/miniconda3/envs/lit_bolts/lib/python3.9/copy.py", line 172, in deepcopy
y = _reconstruct(x, memo, *rv)
File "/Users/goku/miniconda3/envs/lit_bolts/lib/python3.9/copy.py", line 270, in _reconstruct
state = deepcopy(state, memo)
File "/Users/goku/miniconda3/envs/lit_bolts/lib/python3.9/copy.py", line 146, in deepcopy
y = copier(x, memo)
File "/Users/goku/miniconda3/envs/lit_bolts/lib/python3.9/copy.py", line 230, in _deepcopy_dict
y[deepcopy(key, memo)] = deepcopy(value, memo)
File "/Users/goku/miniconda3/envs/lit_bolts/lib/python3.9/copy.py", line 172, in deepcopy
y = _reconstruct(x, memo, *rv)
File "/Users/goku/miniconda3/envs/lit_bolts/lib/python3.9/copy.py", line 264, in _reconstruct
y = func(*args)
File "/Users/goku/miniconda3/envs/lit_bolts/lib/python3.9/enum.py", line 384, in __call__
return cls.__new__(cls, value)
File "/Users/goku/miniconda3/envs/lit_bolts/lib/python3.9/enum.py", line 702, in __new__
raise ve_exc
ValueError: Weights(url='https://download.pytorch.org/models/resnet50-0676ba61.pth', transforms=functools.partial(<class 'torchvision.transforms._presets.ImageClassification'>, crop_size=224), meta={'min_size': (1, 1), 'categories': ['tench', 'goldfish', 'great white shark', ...}}, '_docs': 'These weights reproduce closely the results of the paper using a simple training recipe.'}) is not a valid ResNet50_Weights
```
In short this fails:
```python
from copy import deepcopy
from torchvision.models.resnet import ResNet50_Weights
deepcopy(ResNet50_Weights.IMAGENET1K_V1)
```
### Versions
```console
Collecting environment information...
PyTorch version: 1.13.0
Is debug build: False
CUDA used to build PyTorch: None
ROCM used to build PyTorch: N/A
OS: macOS 11.6 (x86_64)
GCC version: Could not collect
Clang version: 13.0.0 (clang-1300.0.29.3)
CMake version: version 3.21.3
Libc version: N/A
Python version: 3.9.13 (main, Oct 13 2022, 16:12:30) [Clang 12.0.0 ] (64-
Python platform: macOS-10.16-x86_64-i386-64bit
Is CUDA available: False
CUDA runtime version: No CUDA
CUDA_MODULE_LOADING set to: N/A
GPU models and configuration: No CUDA
Nvidia driver version: No CUDA
cuDNN version: No CUDA
HIP runtime version: N/A
MIOpen runtime version: N/A
Is XNNPACK available: True
Versions of relevant libraries:
[pip3] numpy==1.23.4
[pip3] pytorch-lightning==1.8.0rc1
[pip3] torch==1.13.0
[pip3] torchmetrics==0.10.1
[pip3] torchvision==0.14.0
[conda] numpy 1.23.4 pypi_0 pypi
[conda] pytorch-lightning 1.8.0rc1 pypi_0 pypi
[conda] torch 1.13.0 pypi_0 pypi
[conda] torchmetrics 0.10.1 pypi_0 pypi
[conda] torchvision 0.14.0 pypi_0 pypi
```
| [
{
"content": "import importlib\nimport inspect\nimport sys\nfrom dataclasses import dataclass, fields\nfrom inspect import signature\nfrom types import ModuleType\nfrom typing import Any, Callable, cast, Dict, List, Mapping, Optional, TypeVar, Union\n\nfrom torch import nn\n\nfrom torchvision._utils import StrE... | [
{
"content": "import importlib\nimport inspect\nimport sys\nfrom dataclasses import dataclass, fields\nfrom inspect import signature\nfrom types import ModuleType\nfrom typing import Any, Callable, cast, Dict, List, Mapping, Optional, TypeVar, Union\n\nfrom torch import nn\n\nfrom torchvision._utils import StrE... | diff --git a/test/test_extended_models.py b/test/test_extended_models.py
index c467564c9c4..2cd8a568113 100644
--- a/test/test_extended_models.py
+++ b/test/test_extended_models.py
@@ -1,3 +1,4 @@
+import copy
import os
import pytest
@@ -59,6 +60,25 @@ def test_get_model_weights(name, weight):
assert models.get_model_weights(name) == weight
+@pytest.mark.parametrize("copy_fn", [copy.copy, copy.deepcopy])
+@pytest.mark.parametrize(
+ "name",
+ [
+ "resnet50",
+ "retinanet_resnet50_fpn_v2",
+ "raft_large",
+ "quantized_resnet50",
+ "lraspp_mobilenet_v3_large",
+ "mvit_v1_b",
+ ],
+)
+def test_weights_copyable(copy_fn, name):
+ model_weights = models.get_model_weights(name)
+ for weights in list(model_weights):
+ copied_weights = copy_fn(weights)
+ assert copied_weights is weights
+
+
@pytest.mark.parametrize(
"module", [models, models.detection, models.quantization, models.segmentation, models.video, models.optical_flow]
)
diff --git a/torchvision/models/_api.py b/torchvision/models/_api.py
index 52ac070e6d3..d550594c5b7 100644
--- a/torchvision/models/_api.py
+++ b/torchvision/models/_api.py
@@ -75,6 +75,9 @@ def __getattr__(self, name):
return object.__getattribute__(self.value, name)
return super().__getattr__(name)
+ def __deepcopy__(self, memodict=None):
+ return self
+
def get_weight(name: str) -> WeightsEnum:
"""
|
ManimCommunity__manim-907 | Sound in manimce
I want to add sound to my video in manimce like manim by 3b1b
I use the following code -:
`self.add_sound(" sound file name in assets folder", gain = value)`
But it's giving an error in manimce

I think there will be some other syntax in manimce.
Please resolve this!!
| [
{
"content": "\"\"\"Basic canvas for animations.\"\"\"\n\n\n__all__ = [\"Scene\"]\n\n\nimport inspect\nimport random\nimport warnings\nimport platform\nimport copy\nimport string\nimport types\n\nfrom tqdm import tqdm\nimport numpy as np\n\nfrom .. import config, logger\nfrom ..animation.animation import Animat... | [
{
"content": "\"\"\"Basic canvas for animations.\"\"\"\n\n\n__all__ = [\"Scene\"]\n\n\nimport inspect\nimport random\nimport warnings\nimport platform\nimport copy\nimport string\nimport types\n\nfrom tqdm import tqdm\nimport numpy as np\n\nfrom .. import config, logger\nfrom ..animation.animation import Animat... | diff --git a/manim/scene/scene.py b/manim/scene/scene.py
index 9110b54fd1..11e42c8de4 100644
--- a/manim/scene/scene.py
+++ b/manim/scene/scene.py
@@ -880,5 +880,5 @@ def add_sound(self, sound_file, time_offset=0, gain=None, **kwargs):
"""
if self.renderer.skip_animations:
return
- time = self.time + time_offset
+ time = self.renderer.time + time_offset
self.renderer.file_writer.add_sound(sound_file, time, gain, **kwargs)
diff --git a/tests/test_sound.py b/tests/test_sound.py
new file mode 100644
index 0000000000..d7614eca65
--- /dev/null
+++ b/tests/test_sound.py
@@ -0,0 +1,20 @@
+import os, struct, wave
+
+from manim import Scene
+
+
+def test_add_sound():
+ # create sound file
+ f = wave.open("noise.wav", "w")
+ f.setparams((2, 2, 44100, 0, "NONE", "not compressed"))
+ for _ in range(22050): # half a second of sound
+ packed_value = struct.pack("h", 14242)
+ f.writeframes(packed_value)
+ f.writeframes(packed_value)
+
+ f.close()
+
+ scene = Scene()
+ scene.add_sound("noise.wav")
+
+ os.remove("noise.wav")
|
benoitc__gunicorn-1654 | Access log not emitted when using `logconfig_dict`
Using the unreleased version from `master`, HTTP requests do not create log records in the logger `gunicorn.access` when using the new `logconfig_dict`.
See relevant snippet from `glogging.py`:
```python
def access(self, resp, req, environ, request_time):
""" See http://httpd.apache.org/docs/2.0/logs.html#combined
for format details
"""
if not (self.cfg.accesslog or self.cfg.logconfig or
(self.cfg.syslog and not self.cfg.disable_access_log_redirection)):
return
```
| [
{
"content": "# -*- coding: utf-8 -\n#\n# This file is part of gunicorn released under the MIT license.\n# See the NOTICE for more information.\n\nimport base64\nimport binascii\nimport time\nimport logging\nlogging.Logger.manager.emittedNoHandlerWarning = 1\nfrom logging.config import fileConfig\ntry:\n fro... | [
{
"content": "# -*- coding: utf-8 -\n#\n# This file is part of gunicorn released under the MIT license.\n# See the NOTICE for more information.\n\nimport base64\nimport binascii\nimport time\nimport logging\nlogging.Logger.manager.emittedNoHandlerWarning = 1\nfrom logging.config import fileConfig\ntry:\n fro... | diff --git a/gunicorn/glogging.py b/gunicorn/glogging.py
index f5d4cfd0d..88f0c1336 100644
--- a/gunicorn/glogging.py
+++ b/gunicorn/glogging.py
@@ -339,6 +339,7 @@ def access(self, resp, req, environ, request_time):
"""
if not (self.cfg.accesslog or self.cfg.logconfig or
+ self.cfg.logconfig_dict or
(self.cfg.syslog and not self.cfg.disable_access_log_redirection)):
return
|
cisagov__manage.get.gov-959 | Fix for checkbox accessibility no longer working
### Current Behavior
Checkboxes in django admin superuser no longer generated with an associated label.
### Expected Behavior
Expect to see accessible checkboxes in django admin, no missing columns in either superuser or staff views.
### Steps to Reproduce
1. Log in as superuser
2. Go to list view on a model
3. Run ANDI or inspect checkboxes
### Environment
_No response_
### Additional Context
Traced this to the fix for missing columns in staff view. The check {% if results.0.form %} did not work and failed silently. Have a fix for this.
Will prioritize implementation and deployment to staging since we have some accessibility testing in progress.
### Issue Links
_No response_
| [
{
"content": "from django import template\nimport re\n\nregister = template.Library()\n\n\n@register.filter(name=\"extract_value\")\ndef extract_value(html_input):\n match = re.search(r'value=\"([^\"]*)\"', html_input)\n if match:\n return match.group(1)\n return \"\"\n\n\n@register.filter\ndef ... | [
{
"content": "from django import template\nimport re\n\nregister = template.Library()\n\n\n@register.filter(name=\"extract_value\")\ndef extract_value(html_input):\n match = re.search(r'value=\"([^\"]*)\"', html_input)\n if match:\n return match.group(1)\n return \"\"\n\n\n@register.filter\ndef ... | diff --git a/src/registrar/templates/admin/change_list_results.html b/src/registrar/templates/admin/change_list_results.html
index 9ee3f9f59..831350888 100644
--- a/src/registrar/templates/admin/change_list_results.html
+++ b/src/registrar/templates/admin/change_list_results.html
@@ -17,7 +17,7 @@
<thead>
<tr>
-{% if results.0.form %}
+{% if results.0|contains_checkbox %}
{# .gov - hardcode the select all checkbox #}
<th scope="col" class="action-checkbox-column" title="Toggle all">
<div class="text">
diff --git a/src/registrar/templatetags/custom_filters.py b/src/registrar/templatetags/custom_filters.py
index f16408bf8..3614db18e 100644
--- a/src/registrar/templatetags/custom_filters.py
+++ b/src/registrar/templatetags/custom_filters.py
@@ -40,3 +40,11 @@ def slice_after(value, substring):
result = value[index + len(substring) :]
return result
return value
+
+
+@register.filter
+def contains_checkbox(html_list):
+ for html_string in html_list:
+ if re.search(r'<input[^>]*type="checkbox"', html_string):
+ return True
+ return False
diff --git a/src/registrar/tests/test_templatetags.py b/src/registrar/tests/test_templatetags.py
index 36325ab5d..d5f8523c8 100644
--- a/src/registrar/tests/test_templatetags.py
+++ b/src/registrar/tests/test_templatetags.py
@@ -8,6 +8,7 @@
extract_a_text,
find_index,
slice_after,
+ contains_checkbox,
)
@@ -83,3 +84,21 @@ def test_slice_after(self):
self.assertEqual(
result, value
) # Should return the original value if substring not found
+
+ def test_contains_checkbox_with_checkbox(self):
+ # Test the filter when HTML list contains a checkbox
+ html_list = [
+ '<input type="checkbox" name="_selected_action">',
+ "<div>Some other HTML content</div>",
+ ]
+ result = contains_checkbox(html_list)
+ self.assertTrue(result) # Expecting True
+
+ def test_contains_checkbox_without_checkbox(self):
+ # Test the filter when HTML list does not contain a checkbox
+ html_list = [
+ "<div>Some HTML content without checkbox</div>",
+ "<p>More HTML content</p>",
+ ]
+ result = contains_checkbox(html_list)
+ self.assertFalse(result) # Expecting False
|
pymedusa__Medusa-9537 | Error message "Failed parsing provider" & "list index out of range"
**Describe the bug**
Error message:
`2020-12-05 07:52:25 ERROR SEARCHQUEUE-BACKLOG-260586 :: [Beyond-HD] :: [ce68da5] Failed parsing provider.
Traceback (most recent call last):
File "/Applications/Medusa/medusa/providers/torrent/html/beyondhd.py", line 128, in parse
download_url = urljoin(self.url, cells[2].find('a')['href'])
IndexError: list index out of range`
**Medusa (please complete the following information):**
- OS: macOS Catalina 10.15,.7
- Branch: master
- Commit: ce68da57b3878591f77c21bb2acf28e6a58269fa
- Python version: 3.8.5
- Database version: 44.17
| [
{
"content": "# coding=utf-8\n\n\"\"\"Provider code for Beyond-hd.\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport logging\n\nfrom medusa import tv\nfrom medusa.bs4_parser import BS4Parser\nfrom medusa.helper.common import convert_size\nfrom medusa.logger.adapters.style import BraceAdapter\nfrom medu... | [
{
"content": "# coding=utf-8\n\n\"\"\"Provider code for Beyond-hd.\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport logging\n\nfrom medusa import tv\nfrom medusa.bs4_parser import BS4Parser\nfrom medusa.helper.common import convert_size\nfrom medusa.logger.adapters.style import BraceAdapter\nfrom medu... | diff --git a/medusa/providers/torrent/html/beyondhd.py b/medusa/providers/torrent/html/beyondhd.py
index 47be8c1a10..3978d9ef4c 100644
--- a/medusa/providers/torrent/html/beyondhd.py
+++ b/medusa/providers/torrent/html/beyondhd.py
@@ -121,7 +121,7 @@ def parse(self, data, mode):
cells = result('td')
try:
- if len(cells) < 2:
+ if len(cells) < 3:
continue
link = cells[1].find('a')
|
pyca__cryptography-8318 | Incorrect docstrings in x25519 and x448 `.public_key()` methods
See:
https://github.com/pyca/cryptography/blob/127a2860740c77f45362e68e0ed7d2d108a39033/src/cryptography/hazmat/primitives/asymmetric/x25519.py#L60-L64
https://github.com/pyca/cryptography/blob/127a2860740c77f45362e68e0ed7d2d108a39033/src/cryptography/hazmat/primitives/asymmetric/x448.py#L60-L64
In both instances, the method does not return serialised bytes, but a public key object. The full [generated documentation](https://cryptography.io/en/latest/hazmat/primitives/asymmetric/x25519/#cryptography.hazmat.primitives.asymmetric.x25519.X25519PrivateKey.public_key) is correct, as are the Ed* docstrings.
| [
{
"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\n\nimport abc\n\nfrom cryptography.exceptions import UnsupportedAlgorithm, _Reasons\nfrom cryptography.hazmat.prim... | [
{
"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\n\nimport abc\n\nfrom cryptography.exceptions import UnsupportedAlgorithm, _Reasons\nfrom cryptography.hazmat.prim... | diff --git a/src/cryptography/hazmat/primitives/asymmetric/x25519.py b/src/cryptography/hazmat/primitives/asymmetric/x25519.py
index 690af78c2152..d1347b883f37 100644
--- a/src/cryptography/hazmat/primitives/asymmetric/x25519.py
+++ b/src/cryptography/hazmat/primitives/asymmetric/x25519.py
@@ -60,7 +60,7 @@ def from_private_bytes(cls, data: bytes) -> "X25519PrivateKey":
@abc.abstractmethod
def public_key(self) -> X25519PublicKey:
"""
- The serialized bytes of the public key.
+ Returns the public key assosciated with this private key
"""
@abc.abstractmethod
|
huggingface__text-generation-inference-1617 | Incorrectly multiplied timeout by 60 in the asynchronous client
### System Info
I'm testing TGI using Docker. Below is the exact command I'm utilizing:
```console
docker run --gpus '"device=1,2"' --shm-size 1g -p 8000:80 -v ~/tgi-test:/data ghcr.io/huggingface/text-generation-inference:1.4 --model-id mistralai/Mistral-7B-v0.1 --max-input-length 8000 --max-total-tokens 8001
```
### Information
- [ ] Docker
- [ ] The CLI directly
### Tasks
- [ ] An officially supported command
- [ ] My own modifications
### Reproduction
Given the generation request:
```python
async def test():
start = time.time()
try:
response = await client.generate('1' * 6_000, max_new_tokens=1_800)
except Exception as ex:
pass
print(time.time() - start)
```
And this async client definition:
```python
client = AsyncClient('http://localhost:8000', timeout=1)
```
It doesn't timeout after 1 second:
```python
>>> await test()
60.88534379005432
```
But if we create a client with a timeout of 2/60:
```python
client = AsyncClient('http://localhost:8000', timeout=(2/60))
```
It does timeout after 2 seconds:
```python
>>> await test()
2.0035104751586914
```
### Expected behavior
The function should have timed out after 1 second with this client definition:
```python
client = AsyncClient('http://localhost:8000', timeout=1)
```
| [
{
"content": "import json\nimport requests\n\nfrom aiohttp import ClientSession, ClientTimeout\nfrom pydantic import ValidationError\nfrom typing import Dict, Optional, List, AsyncIterator, Iterator, Union\n\nfrom text_generation.types import (\n StreamResponse,\n Response,\n Request,\n Parameters,\... | [
{
"content": "import json\nimport requests\n\nfrom aiohttp import ClientSession, ClientTimeout\nfrom pydantic import ValidationError\nfrom typing import Dict, Optional, List, AsyncIterator, Iterator, Union\n\nfrom text_generation.types import (\n StreamResponse,\n Response,\n Request,\n Parameters,\... | diff --git a/clients/python/text_generation/client.py b/clients/python/text_generation/client.py
index 09660de3cd9..e1de253b780 100644
--- a/clients/python/text_generation/client.py
+++ b/clients/python/text_generation/client.py
@@ -424,7 +424,7 @@ def __init__(
self.base_url = base_url
self.headers = headers
self.cookies = cookies
- self.timeout = ClientTimeout(timeout * 60)
+ self.timeout = ClientTimeout(timeout)
async def chat(
self,
|
mkdocs__mkdocs-1998 | Fix simple typo: seperate -> separate
# Issue Type
[x] Bug (Typo)
# Steps to Replicate
1. Examine mkdocs/commands/serve.py.
2. Search for `seperate`.
# Expected Behaviour
1. Should read `separate`.
| [
{
"content": "import logging\nimport shutil\nimport tempfile\nimport sys\n\nfrom os.path import isfile, join\nfrom mkdocs.commands.build import build\nfrom mkdocs.config import load_config\n\nlog = logging.getLogger(__name__)\n\n\ndef _init_asyncio_patch():\n \"\"\"\n Select compatible event loop for Torn... | [
{
"content": "import logging\nimport shutil\nimport tempfile\nimport sys\n\nfrom os.path import isfile, join\nfrom mkdocs.commands.build import build\nfrom mkdocs.config import load_config\n\nlog = logging.getLogger(__name__)\n\n\ndef _init_asyncio_patch():\n \"\"\"\n Select compatible event loop for Torn... | diff --git a/mkdocs/commands/serve.py b/mkdocs/commands/serve.py
index ba716776a8..21b7ca6c1e 100644
--- a/mkdocs/commands/serve.py
+++ b/mkdocs/commands/serve.py
@@ -80,7 +80,7 @@ def get_web_handlers(self, script):
def _static_server(host, port, site_dir):
- # Importing here to seperate the code paths from the --livereload
+ # Importing here to separate the code paths from the --livereload
# alternative.
_init_asyncio_patch()
from tornado import ioloop
|
scrapy__scrapy-4585 | Downloadable documentation is missing for versions 2.0 and 2.1 on readthedocs.org
For some reason downloadable documentation on https://readthedocs.org/projects/scrapy/downloads/ is available only up to version 1.8.
That's a minor issue, but I think that I'm not the only one who prefers to read technical papers in the pdf format (to be able to take notes).
| [
{
"content": "# Scrapy documentation build configuration file, created by\n# sphinx-quickstart on Mon Nov 24 12:02:52 2008.\n#\n# This file is execfile()d with the current directory set to its containing dir.\n#\n# The contents of this file are pickled, so don't put values in the namespace\n# that aren't pickle... | [
{
"content": "# Scrapy documentation build configuration file, created by\n# sphinx-quickstart on Mon Nov 24 12:02:52 2008.\n#\n# This file is execfile()d with the current directory set to its containing dir.\n#\n# The contents of this file are pickled, so don't put values in the namespace\n# that aren't pickle... | diff --git a/.readthedocs.yml b/.readthedocs.yml
index 17eba34f337..e4d3f02cc3f 100644
--- a/.readthedocs.yml
+++ b/.readthedocs.yml
@@ -1,4 +1,5 @@
version: 2
+formats: all
sphinx:
configuration: docs/conf.py
fail_on_warning: true
diff --git a/docs/conf.py b/docs/conf.py
index 8ab38a090c3..468c1d1901b 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -100,6 +100,9 @@
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
+# List of Sphinx warnings that will not be raised
+suppress_warnings = ['epub.unknown_project_files']
+
# Options for HTML output
# -----------------------
|
pre-commit__pre-commit-1713 | BUG - hooks not working on windows 10, when user account name contains non-ascii characters
When user account name contains non-ascii characters such as 'š', such that python executable ends up for example in C:\Users\john.š\\.cache\pre-commit\repo\py_env-python3.8\Scripts\python.exe, when committing to the git repository following message appears:
An unexpected error has occurred: AssertionError: BUG: expected environment for python to be healthy() immediately after install, please open an issue describing your environment.
PS: fucntion os.path.isfile() in parse_shebang.normexe() returns False, even though the executable exists there and is a file.
| [
{
"content": "import contextlib\nimport functools\nimport os\nimport sys\nfrom typing import Dict\nfrom typing import Generator\nfrom typing import Optional\nfrom typing import Sequence\nfrom typing import Tuple\n\nimport pre_commit.constants as C\nfrom pre_commit.envcontext import envcontext\nfrom pre_commit.e... | [
{
"content": "import contextlib\nimport functools\nimport os\nimport sys\nfrom typing import Dict\nfrom typing import Generator\nfrom typing import Optional\nfrom typing import Sequence\nfrom typing import Tuple\n\nimport pre_commit.constants as C\nfrom pre_commit.envcontext import envcontext\nfrom pre_commit.e... | diff --git a/pre_commit/languages/python.py b/pre_commit/languages/python.py
index 65f521cdc..43b728082 100644
--- a/pre_commit/languages/python.py
+++ b/pre_commit/languages/python.py
@@ -36,7 +36,7 @@ def _version_info(exe: str) -> str:
def _read_pyvenv_cfg(filename: str) -> Dict[str, str]:
ret = {}
- with open(filename) as f:
+ with open(filename, encoding='UTF-8') as f:
for line in f:
try:
k, v = line.split('=')
diff --git a/tests/languages/python_test.py b/tests/languages/python_test.py
index cfe14834f..90d1036a3 100644
--- a/tests/languages/python_test.py
+++ b/tests/languages/python_test.py
@@ -23,6 +23,13 @@ def test_read_pyvenv_cfg(tmpdir):
assert python._read_pyvenv_cfg(pyvenv_cfg) == expected
+def test_read_pyvenv_cfg_non_utf8(tmpdir):
+ pyvenv_cfg = tmpdir.join('pyvenv_cfg')
+ pyvenv_cfg.write_binary('hello = hello john.š\n'.encode())
+ expected = {'hello': 'hello john.š'}
+ assert python._read_pyvenv_cfg(pyvenv_cfg) == expected
+
+
def test_norm_version_expanduser():
home = os.path.expanduser('~')
if os.name == 'nt': # pragma: nt cover
|
zigpy__zha-device-handlers-342 | Update zigpy version to use the new (old module) name for zigpy?
@dmulcahey Ready to update zigpy version to use new (old) module name without -homeassistant suffix?
@Adminiuga in the PR https://github.com/zigpy/zigpy/pull/363 changed the zigpy module name back to just "zigpy" (from "zigpy-homeassistant")
https://github.com/zigpy/zigpy/pull/363/commits/6c9e0e9412a322d4b9558977decf50ca4dfb5ffd
From https://pypi.org/project/zigpy-homeassistant/ back to https://pypi.org/project/zigpy/
| [
{
"content": "\"\"\"Setup module for ZHAQuirks.\"\"\"\n\nfrom setuptools import find_packages, setup\n\nVERSION = \"0.0.38\"\n\n\ndef readme():\n \"\"\"Print long description.\"\"\"\n with open(\"README.md\") as f:\n return f.read()\n\n\nsetup(\n name=\"zha-quirks\",\n version=VERSION,\n d... | [
{
"content": "\"\"\"Setup module for ZHAQuirks.\"\"\"\n\nfrom setuptools import find_packages, setup\n\nVERSION = \"0.0.38\"\n\n\ndef readme():\n \"\"\"Print long description.\"\"\"\n with open(\"README.md\") as f:\n return f.read()\n\n\nsetup(\n name=\"zha-quirks\",\n version=VERSION,\n d... | diff --git a/setup.py b/setup.py
index 99bd58aeea..1c5e45900e 100644
--- a/setup.py
+++ b/setup.py
@@ -24,6 +24,6 @@ def readme():
keywords="zha quirks homeassistant hass",
packages=find_packages(exclude=["*.tests"]),
python_requires=">=3",
- install_requires=["zigpy-homeassistant>=0.18.1"],
+ install_requires=["zigpy>=0.20.0"],
tests_require=["pytest"],
)
|
emissary-ingress__emissary-515 | 'unicode' is an undefined name in Python 3
flake8 testing of https://github.com/datawire/ambassador on Python 3.6.3
2.28s$ time flake8 . --count --select=E901,E999,F821,F822,F823 --show-source --statistics__
```
./ambassador/ambassador_diag/envoy.py:223:45: F821 undefined name 'active_cluster_map'
if True or (cluster_name in active_cluster_map):
^
./scripts/versioner.py:103:16: F821 undefined name 'unicode'
return unicode(self)
^
2 F821 undefined name 'active_cluster_map'
2
```
| [
{
"content": "#!/usr/bin/env python\n\nimport sys\n\nimport json\nimport logging\nimport os\nimport re\nimport subprocess\n\nfrom semantic_version import Version\nfrom git import Repo\n\ndry_run = True\n\nclass VersionedBranch (object):\n \"\"\" A branch for which we're going to wrangle versions based on tag... | [
{
"content": "#!/usr/bin/env python\n\nimport sys\n\nimport json\nimport logging\nimport os\nimport re\nimport subprocess\n\nfrom semantic_version import Version\nfrom git import Repo\n\ndry_run = True\n\nclass VersionedBranch (object):\n \"\"\" A branch for which we're going to wrangle versions based on tag... | diff --git a/scripts/versioner.py b/scripts/versioner.py
index 52c80c0091..bc796e10e3 100755
--- a/scripts/versioner.py
+++ b/scripts/versioner.py
@@ -100,7 +100,7 @@ def __unicode__(self):
self.version, str(self.versioned_commit)[0:8]))
def __str__(self):
- return unicode(self)
+ return str(self)
def recent_commits(self, since_tag=None):
if not since_tag:
|
ManageIQ__integration_tests-8533 | is_displayed for catalog all page is not working as expected
Currently the is_display of catalog all page returns True even if the view is on Add catalog page.
| [
{
"content": "import attr\nfrom navmazing import NavigateToAttribute\nfrom navmazing import NavigateToSibling\nfrom widgetastic.utils import Parameter\nfrom widgetastic.widget import Text\nfrom widgetastic_patternfly import Button\nfrom widgetastic_patternfly import CandidateNotFound\nfrom widgetastic_patternfl... | [
{
"content": "import attr\nfrom navmazing import NavigateToAttribute\nfrom navmazing import NavigateToSibling\nfrom widgetastic.utils import Parameter\nfrom widgetastic.widget import Text\nfrom widgetastic_patternfly import Button\nfrom widgetastic_patternfly import CandidateNotFound\nfrom widgetastic_patternfl... | diff --git a/cfme/services/catalogs/catalog.py b/cfme/services/catalogs/catalog.py
index 6367d52997..f248e20199 100644
--- a/cfme/services/catalogs/catalog.py
+++ b/cfme/services/catalogs/catalog.py
@@ -49,6 +49,7 @@ def is_displayed(self):
return (
self.in_explorer and
self.catalogs.is_opened and
+ self.title.text == "All Catalogs" and
self.catalogs.tree.currently_selected == ["All Catalogs"])
|
flairNLP__flair-2170 | Close log_handler after training is complete.
**Describe the bug**
We are removing the log_handler [here](https://github.com/flairNLP/flair/blob/master/flair/trainers/trainer.py#L633) but not closing the handler leading to `ResourceWarning: unclosed file <_io.BufferedReader name='training.log`. Hence we are not able to programatically access the training.log (For ex - Unable to upload the file to s3 using botocore)
**To Reproduce**
Training just any model leads to this Warning.
**Expected behavior**
The `log_handler` to be closed before training exits
**Screenshots**
If applicable, add screenshots to help explain your problem.
**Environment (please complete the following information):**
- MacOS 10.15.6
- Version: flair-0.8
| [
{
"content": "import copy\nimport logging\nfrom pathlib import Path\nfrom typing import List, Union\nimport time\nimport datetime\nimport sys\nimport inspect\n\nimport torch\nfrom torch.optim.sgd import SGD\nfrom torch.utils.data.dataset import ConcatDataset\n\ntry:\n from apex import amp\nexcept ImportError... | [
{
"content": "import copy\nimport logging\nfrom pathlib import Path\nfrom typing import List, Union\nimport time\nimport datetime\nimport sys\nimport inspect\n\nimport torch\nfrom torch.optim.sgd import SGD\nfrom torch.utils.data.dataset import ConcatDataset\n\ntry:\n from apex import amp\nexcept ImportError... | diff --git a/flair/trainers/trainer.py b/flair/trainers/trainer.py
index 021ac53633..3b22478fbc 100644
--- a/flair/trainers/trainer.py
+++ b/flair/trainers/trainer.py
@@ -630,6 +630,8 @@ def train(
final_score = 0
log.info("Test data not provided setting final score to 0")
+ log_handler.close()
+
log.removeHandler(log_handler)
if self.use_tensorboard:
|
ansible-collections__amazon.aws-1332 | aws_rds inventory plugin does not return any results due to regression in 478022695b333043857a6929b350a2a3c07ae567
### Summary
aws_rds inventory plugin does not return any results due to regression in 478022695b333043857a6929b350a2a3c07ae567.
This commit, ostensibly named "linting", has actually removed the following line in plugins/inventory/aws_rds.py:
```
config_data = self._read_config_data(path)
```
This causes the inventory plugin to return no data. Restoring this line fixes the expected output.
### Issue Type
Bug Report
### Component Name
aws_rds inventory plugin
### Ansible Version
```
ansible [core 2.14.1]
config file = /home/ansible/ansible/ansible.cfg
configured module search path = ['/home/ansible/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /usr/local/lib/python3.9/dist-packages/ansible
ansible collection location = /home/ansible/.ansible/collections:/usr/share/ansible/collections
executable location = /usr/local/bin/ansible
python version = 3.9.2 (default, Feb 28 2021, 17:03:44) [GCC 10.2.1 20210110] (/usr/bin/python3)
jinja version = 3.1.2
libyaml = True
```
### Collection Versions
```
# /usr/local/lib/python3.9/dist-packages/ansible_collections
Collection Version
----------------------------- -------
amazon.aws 5.1.0
ansible.netcommon 4.1.0
ansible.posix 1.4.0
ansible.utils 2.8.0
ansible.windows 1.12.0
arista.eos 6.0.0
awx.awx 21.10.0
azure.azcollection 1.14.0
check_point.mgmt 4.0.0
chocolatey.chocolatey 1.3.1
cisco.aci 2.3.0
cisco.asa 4.0.0
cisco.dnac 6.6.1
cisco.intersight 1.0.22
cisco.ios 4.0.0
cisco.iosxr 4.0.3
cisco.ise 2.5.9
cisco.meraki 2.13.0
cisco.mso 2.1.0
cisco.nso 1.0.3
cisco.nxos 4.0.1
cisco.ucs 1.8.0
cloud.common 2.1.2
cloudscale_ch.cloud 2.2.3
community.aws 5.0.0
community.azure 2.0.0
community.ciscosmb 1.0.5
community.crypto 2.9.0
community.digitalocean 1.22.0
community.dns 2.4.2
community.docker 3.3.1
community.fortios 1.0.0
community.general 6.1.0
community.google 1.0.0
community.grafana 1.5.3
community.hashi_vault 4.0.0
community.hrobot 1.6.0
community.libvirt 1.2.0
community.mongodb 1.4.2
community.mysql 3.5.1
community.network 5.0.0
community.okd 2.2.0
community.postgresql 2.3.1
community.proxysql 1.4.0
community.rabbitmq 1.2.3
community.routeros 2.5.0
community.sap 1.0.0
community.sap_libs 1.4.0
community.skydive 1.0.0
community.sops 1.5.0
community.vmware 3.2.0
community.windows 1.11.1
community.zabbix 1.9.0
containers.podman 1.10.1
cyberark.conjur 1.2.0
cyberark.pas 1.0.14
dellemc.enterprise_sonic 2.0.0
dellemc.openmanage 6.3.0
dellemc.os10 1.1.1
dellemc.os6 1.0.7
dellemc.os9 1.0.4
f5networks.f5_modules 1.21.0
fortinet.fortimanager 2.1.7
fortinet.fortios 2.2.1
frr.frr 2.0.0
gluster.gluster 1.0.2
google.cloud 1.0.2
grafana.grafana 1.1.0
hetzner.hcloud 1.9.0
hpe.nimble 1.1.4
ibm.qradar 2.1.0
ibm.spectrum_virtualize 1.10.0
infinidat.infinibox 1.3.12
infoblox.nios_modules 1.4.1
inspur.ispim 1.2.0
inspur.sm 2.3.0
junipernetworks.junos 4.1.0
kubernetes.core 2.3.2
lowlydba.sqlserver 1.2.1
mellanox.onyx 1.0.0
netapp.aws 21.7.0
netapp.azure 21.10.0
netapp.cloudmanager 21.21.0
netapp.elementsw 21.7.0
netapp.ontap 22.0.1
netapp.storagegrid 21.11.1
netapp.um_info 21.8.0
netapp_eseries.santricity 1.3.1
netbox.netbox 3.9.0
ngine_io.cloudstack 2.3.0
ngine_io.exoscale 1.0.0
ngine_io.vultr 1.1.2
openstack.cloud 1.10.0
openvswitch.openvswitch 2.1.0
ovirt.ovirt 2.4.1
purestorage.flasharray 1.15.0
purestorage.flashblade 1.10.0
purestorage.fusion 1.2.0
sensu.sensu_go 1.13.1
splunk.es 2.1.0
t_systems_mms.icinga_director 1.31.4
theforeman.foreman 3.7.0
vmware.vmware_rest 2.2.0
vultr.cloud 1.3.1
vyos.vyos 4.0.0
wti.remote 1.0.4
# /home/ansible/.ansible/collections/ansible_collections
Collection Version
----------------- -------
amazon.aws 5.1.0
ansible.posix 1.4.0
community.general 6.1.0
community.mysql 3.5.1
mafalb.squid 0.2.0
```
### AWS SDK versions
```
Name: boto
Version: 2.49.0
Summary: Amazon Web Services Library
Home-page: https://github.com/boto/boto/
Author: Mitch Garnaat
Author-email: mitch@garnaat.com
License: MIT
Location: /usr/lib/python3/dist-packages
Requires:
Required-by: cloudwatchmon
---
Name: boto3
Version: 1.26.32
Summary: The AWS SDK for Python
Home-page: https://github.com/boto/boto3
Author: Amazon Web Services
Author-email: None
License: Apache License 2.0
Location: /usr/local/lib/python3.9/dist-packages
Requires: jmespath, botocore, s3transfer
Required-by:
---
Name: botocore
Version: 1.29.32
Summary: Low-level, data-driven core of boto 3.
Home-page: https://github.com/boto/botocore
Author: Amazon Web Services
Author-email: None
License: Apache License 2.0
Location: /usr/local/lib/python3.9/dist-packages
Requires: urllib3, python-dateutil, jmespath
Required-by: s3transfer, boto3
```
### Configuration
```
plugin: aws_rds
regions:
- us-east-1
keyed_groups:
- prefix: tag
key: tags
compose:
ansible_host: endpoint.address
```
### OS / Environment
_No response_
### Steps to Reproduce
```
ansible-inventory -vvvvvvvv -i inventory/aws_rds.yaml --list
```
### Expected Results
I expected some inventory results to appear
### Actual Results
```
ansible-inventory [core 2.14.1]
config file = /home/ansible/ansible/ansible.cfg
configured module search path = ['/home/ansible/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /usr/local/lib/python3.9/dist-packages/ansible
ansible collection location = /home/ansible/.ansible/collections:/usr/share/ansible/collections
executable location = /usr/local/bin/ansible-inventory
python version = 3.9.2 (default, Feb 28 2021, 17:03:44) [GCC 10.2.1 20210110] (/usr/bin/python3)
jinja version = 3.1.2
libyaml = True
Using /home/ansible/ansible/ansible.cfg as config file
Reading vault password file: /home/ansible/ansible/.vault_pass
setting up inventory plugins
redirecting (type: inventory) ansible.builtin.aws_ec2 to amazon.aws.aws_ec2
Loading collection amazon.aws from /home/ansible/.ansible/collections/ansible_collections/amazon/aws
redirecting (type: inventory) ansible.builtin.aws_rds to amazon.aws.aws_rds
ansible_collections.amazon.aws.plugins.inventory.aws_ec2 declined parsing /home/ansible/ansible/inventory/aws_rds.yaml as it did not pass its verify_file() method
Parsed /home/ansible/ansible/inventory/aws_rds.yaml inventory source with ansible_collections.amazon.aws.plugins.inventory.aws_rds plugin
{
"_meta": {
"hostvars": {}
},
"all": {
"children": [
"aws_rds",
"ungrouped"
]
}
}
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
| [
{
"content": "# Copyright (c) 2018 Ansible Project\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nDOCUMENTATION = '''\nname: aws_rds\nshort_description: RDS instance inve... | [
{
"content": "# Copyright (c) 2018 Ansible Project\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nDOCUMENTATION = '''\nname: aws_rds\nshort_description: RDS instance inve... | diff --git a/changelogs/fragments/1304-aws_rds-config.yml b/changelogs/fragments/1304-aws_rds-config.yml
new file mode 100644
index 00000000000..bd32f37f82b
--- /dev/null
+++ b/changelogs/fragments/1304-aws_rds-config.yml
@@ -0,0 +1,2 @@
+bugfixes:
+- aws_rds - fixes bug in RDS inventory plugin where config file was ignored (https://github.com/ansible-collections/amazon.aws/issues/1304).
diff --git a/plugins/inventory/aws_rds.py b/plugins/inventory/aws_rds.py
index e03464168f7..02f86073a0a 100644
--- a/plugins/inventory/aws_rds.py
+++ b/plugins/inventory/aws_rds.py
@@ -360,6 +360,7 @@ def parse(self, inventory, loader, path, cache=True):
if not HAS_BOTO3:
raise AnsibleError(missing_required_lib('botocore and boto3'))
+ self._read_config_data(path)
self._set_credentials()
# get user specifications
|
PlasmaPy__PlasmaPy-2506 | Fix linkcheck GitHub Action along with minor updates to workflows
#2490 ended up disabling the workflow dispatch option for doing a linkcheck. This PR reverts some of #2490 so that the linkcheck workflow does not attempt to run in PRs, and instead adds a workflow step for CI that does the linkcheck.
I also took a moment to make the formatting of workflow files a bit more consistent, as well as some other minor updates.
| [
{
"content": "\"\"\"\nVarious decorators to validate input/output arguments to functions.\n\"\"\"\n\n__all__ = [\"validate_class_attributes\", \"validate_quantities\", \"ValidateQuantities\"]\n\nimport functools\nimport inspect\nimport warnings\nfrom collections.abc import Iterable\nfrom typing import Any\n\nim... | [
{
"content": "\"\"\"\nVarious decorators to validate input/output arguments to functions.\n\"\"\"\n\n__all__ = [\"validate_class_attributes\", \"validate_quantities\", \"ValidateQuantities\"]\n\nimport functools\nimport inspect\nimport warnings\nfrom collections.abc import Iterable\nfrom typing import Any\n\nim... | diff --git a/changelog/2506.bugfix.rst b/changelog/2506.bugfix.rst
new file mode 100644
index 0000000000..ed7538f243
--- /dev/null
+++ b/changelog/2506.bugfix.rst
@@ -0,0 +1,2 @@
+- Enabled |validate_quantities| to be compatible with postponed evaluation of
+ annotations (see :pep:`563`). (:pr:`2479`)
diff --git a/src/plasmapy/utils/decorators/validators.py b/src/plasmapy/utils/decorators/validators.py
index 94b89c35df..a5ab247bc7 100644
--- a/src/plasmapy/utils/decorators/validators.py
+++ b/src/plasmapy/utils/decorators/validators.py
@@ -177,7 +177,7 @@ def __call__(self, f):
wrapped function of ``f``
"""
self.f = f
- wrapped_sign = inspect.signature(f)
+ wrapped_sign = inspect.signature(f, eval_str=True)
@preserve_signature
@functools.wraps(f)
diff --git a/tests/utils/decorators/test_validate_quantities_annotations.py b/tests/utils/decorators/test_validate_quantities_annotations.py
new file mode 100644
index 0000000000..d821718a89
--- /dev/null
+++ b/tests/utils/decorators/test_validate_quantities_annotations.py
@@ -0,0 +1,17 @@
+"""Test that @validate_quantities works with postponed evaluation of annotations."""
+
+from __future__ import annotations
+
+import astropy.units as u
+
+from plasmapy.utils.decorators.validators import validate_quantities
+
+
+@validate_quantities # type: ignore[misc]
+def annotated_function(mass: u.Quantity[u.g]) -> u.Quantity[u.kg]:
+ return mass
+
+
+def test_validate_quantities_postponed_annotations() -> None:
+ result = annotated_function(1 * u.g)
+ assert result.unit == u.kg
|
getmoto__moto-1286 | Resources in mentioned in cloud formation template is not getting created.
Hi,
I am creating security group through cloud formation template and then trying to retrieve that through boto client but it says that security group does not exists. If i create security group through command line then i am able to fetch it.
It seems like resources in cloud formation does not get created when we deploy it.
| [
{
"content": "from __future__ import unicode_literals\n\nfrom collections import defaultdict\n\nimport string\nimport random\nimport uuid\nfrom jinja2 import Template\n\nfrom moto.core import BaseBackend, BaseModel\n\n\nROUTE53_ID_CHOICE = string.ascii_uppercase + string.digits\n\n\ndef create_route53_zone_id()... | [
{
"content": "from __future__ import unicode_literals\n\nfrom collections import defaultdict\n\nimport string\nimport random\nimport uuid\nfrom jinja2 import Template\n\nfrom moto.core import BaseBackend, BaseModel\n\n\nROUTE53_ID_CHOICE = string.ascii_uppercase + string.digits\n\n\ndef create_route53_zone_id()... | diff --git a/moto/route53/models.py b/moto/route53/models.py
index d12f4ee7a803..f0e52086d508 100644
--- a/moto/route53/models.py
+++ b/moto/route53/models.py
@@ -209,7 +209,7 @@ def get_record_sets(self, type_filter, name_filter):
@property
def physical_resource_id(self):
- return self.name
+ return self.id
@classmethod
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
diff --git a/tests/test_cloudformation/fixtures/route53_ec2_instance_with_public_ip.py b/tests/test_cloudformation/fixtures/route53_ec2_instance_with_public_ip.py
index 5e66bbd86959..43a11104b3a5 100644
--- a/tests/test_cloudformation/fixtures/route53_ec2_instance_with_public_ip.py
+++ b/tests/test_cloudformation/fixtures/route53_ec2_instance_with_public_ip.py
@@ -1,6 +1,13 @@
from __future__ import unicode_literals
template = {
+ "Parameters": {
+ "R53ZoneName": {
+ "Type": "String",
+ "Default": "my_zone"
+ }
+ },
+
"Resources": {
"Ec2Instance": {
"Type": "AWS::EC2::Instance",
@@ -13,20 +20,20 @@
"HostedZone": {
"Type": "AWS::Route53::HostedZone",
"Properties": {
- "Name": "my_zone"
+ "Name": {"Ref": "R53ZoneName"}
}
},
"myDNSRecord": {
"Type": "AWS::Route53::RecordSet",
"Properties": {
- "HostedZoneName": {"Ref": "HostedZone"},
+ "HostedZoneId": {"Ref": "HostedZone"},
"Comment": "DNS name for my instance.",
"Name": {
"Fn::Join": ["", [
{"Ref": "Ec2Instance"}, ".",
{"Ref": "AWS::Region"}, ".",
- {"Ref": "HostedZone"}, "."
+ {"Ref": "R53ZoneName"}, "."
]]
},
"Type": "A",
diff --git a/tests/test_cloudformation/fixtures/route53_health_check.py b/tests/test_cloudformation/fixtures/route53_health_check.py
index f6a2c9b8e36c..420cd38ba84c 100644
--- a/tests/test_cloudformation/fixtures/route53_health_check.py
+++ b/tests/test_cloudformation/fixtures/route53_health_check.py
@@ -26,7 +26,7 @@
"myDNSRecord": {
"Type": "AWS::Route53::RecordSet",
"Properties": {
- "HostedZoneName": {"Ref": "HostedZone"},
+ "HostedZoneId": {"Ref": "HostedZone"},
"Comment": "DNS name for my instance.",
"Name": "my_record_set",
"Type": "A",
diff --git a/tests/test_cloudformation/fixtures/route53_roundrobin.py b/tests/test_cloudformation/fixtures/route53_roundrobin.py
index da4fecd4d17b..199e3e0886ad 100644
--- a/tests/test_cloudformation/fixtures/route53_roundrobin.py
+++ b/tests/test_cloudformation/fixtures/route53_roundrobin.py
@@ -5,30 +5,37 @@
"Description": "AWS CloudFormation Sample Template Route53_RoundRobin: Sample template showing how to use weighted round robin (WRR) DNS entried via Amazon Route 53. This contrived sample uses weighted CNAME records to illustrate that the weighting influences the return records. It assumes that you already have a Hosted Zone registered with Amazon Route 53. **WARNING** This template creates one or more AWS resources. You will be billed for the AWS resources used if you create a stack from this template.",
+ "Parameters": {
+ "R53ZoneName": {
+ "Type": "String",
+ "Default": "my_zone"
+ }
+ },
+
"Resources": {
"MyZone": {
"Type": "AWS::Route53::HostedZone",
"Properties": {
- "Name": "my_zone"
+ "Name": {"Ref": "R53ZoneName"}
}
},
"MyDNSRecord": {
"Type": "AWS::Route53::RecordSetGroup",
"Properties": {
- "HostedZoneName": {"Ref": "MyZone"},
+ "HostedZoneId": {"Ref": "MyZone"},
"Comment": "Contrived example to redirect to aws.amazon.com 75% of the time and www.amazon.com 25% of the time.",
"RecordSets": [{
"SetIdentifier": {"Fn::Join": [" ", [{"Ref": "AWS::StackName"}, "AWS"]]},
- "Name": {"Fn::Join": ["", [{"Ref": "AWS::StackName"}, ".", {"Ref": "AWS::Region"}, ".", {"Ref": "MyZone"}, "."]]},
+ "Name": {"Fn::Join": ["", [{"Ref": "AWS::StackName"}, ".", {"Ref": "AWS::Region"}, ".", {"Ref": "R53ZoneName"}, "."]]},
"Type": "CNAME",
"TTL": "900",
"ResourceRecords": ["aws.amazon.com"],
"Weight": "3"
}, {
"SetIdentifier": {"Fn::Join": [" ", [{"Ref": "AWS::StackName"}, "Amazon"]]},
- "Name": {"Fn::Join": ["", [{"Ref": "AWS::StackName"}, ".", {"Ref": "AWS::Region"}, ".", {"Ref": "MyZone"}, "."]]},
+ "Name": {"Fn::Join": ["", [{"Ref": "AWS::StackName"}, ".", {"Ref": "AWS::Region"}, ".", {"Ref": "R53ZoneName"}, "."]]},
"Type": "CNAME",
"TTL": "900",
"ResourceRecords": ["www.amazon.com"],
|
googleapis__google-api-python-client-1125 | Incorrect logging level for `oauth2client.contrib.locked_file` (ref #427)
This is - as per request in the original #427 - a follow-up issue.
The function documentation says:
> Detects an appropriate cache module and returns it. Returns `googleapiclient.discovery_cache.base.Cache`, a cache object which is auto detected, or `None` if no cache object is available.
Exceptions in this context only provide a pythonic way to do control flow, and do not indicate anomalies or malfunctioning of the code. `None` is a perfectly fine, as-per-specifications behaviour. The correct level should probably be `INFO`.
Also, even if you would stand by the `WARNING` level, you should tidy up the messy log message. This is what the user is met with:
```WARNING:googleapiclient.discovery_cache:file_cache is unavailable when using oauth2client >= 4.0.0 or google-auth
Traceback (most recent call last):
File "/home/mac/dev/flashfaces/flashfaces/lib/python3.8/site-packages/googleapiclient/discovery_cache/__init__.py", line 36, in autodetect
from google.appengine.api import memcache
ModuleNotFoundError: No module named 'google.appengine'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/mac/dev/flashfaces/flashfaces/lib/python3.8/site-packages/googleapiclient/discovery_cache/file_cache.py", line 33, in <module>
from oauth2client.contrib.locked_file import LockedFile
ModuleNotFoundError: No module named 'oauth2client.contrib.locked_file'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/mac/dev/flashfaces/flashfaces/lib/python3.8/site-packages/googleapiclient/discovery_cache/file_cache.py", line 37, in <module>
from oauth2client.locked_file import LockedFile
ModuleNotFoundError: No module named 'oauth2client.locked_file'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/mac/dev/flashfaces/flashfaces/lib/python3.8/site-packages/googleapiclient/discovery_cache/__init__.py", line 42, in autodetect
from . import file_cache
File "/home/mac/dev/flashfaces/flashfaces/lib/python3.8/site-packages/googleapiclient/discovery_cache/file_cache.py", line 40, in <module>
raise ImportError(
ImportError: file_cache is unavailable when using oauth2client >= 4.0.0 or google-auth
```
A saner way to to communicate with the user could potentially be:
`WARNING - No caching available (file_cache is unavailable when using oauth2client >= 4.0.0 or google-auth)`
| [
{
"content": "# Copyright 2014 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unles... | [
{
"content": "# Copyright 2014 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unles... | diff --git a/googleapiclient/discovery_cache/__init__.py b/googleapiclient/discovery_cache/__init__.py
index 455ff6224f2..197f6bc0a1a 100644
--- a/googleapiclient/discovery_cache/__init__.py
+++ b/googleapiclient/discovery_cache/__init__.py
@@ -44,6 +44,7 @@ def autodetect():
from . import file_cache
return file_cache.cache
- except Exception as e:
- LOGGER.warning(e, exc_info=True)
+ except Exception:
+ LOGGER.info("file_cache is only supported with oauth2client<4.0.0",
+ exc_info=False)
return None
|
bokeh__bokeh-4792 | docstring of ``add_tools`` not correct.
| [
{
"content": "\"\"\" Models for representing top-level plot objects.\n\n\"\"\"\nfrom __future__ import absolute_import\n\nfrom six import string_types\nimport warnings\n\nfrom ..core.query import find\nfrom ..core import validation\nfrom ..core.validation.errors import REQUIRED_RANGE\nfrom ..core.validation.war... | [
{
"content": "\"\"\" Models for representing top-level plot objects.\n\n\"\"\"\nfrom __future__ import absolute_import\n\nfrom six import string_types\nimport warnings\n\nfrom ..core.query import find\nfrom ..core import validation\nfrom ..core.validation.errors import REQUIRED_RANGE\nfrom ..core.validation.war... | diff --git a/bokeh/models/plots.py b/bokeh/models/plots.py
index 838d29ac46b..86a04895ba0 100644
--- a/bokeh/models/plots.py
+++ b/bokeh/models/plots.py
@@ -246,7 +246,7 @@ def add_layout(self, obj, place='center'):
getattr(self, place).append(obj)
def add_tools(self, *tools):
- ''' Adds an tools to the plot.
+ ''' Adds tools to the plot.
Args:
*tools (Tool) : the tools to add to the Plot
|
huggingface__accelerate-811 | Error when pickling accelerated optimizers with PyTorch 1.13
### System Info
```Shell
$ pip list
Package Version
------------------------ ----------
accelerate 0.13.2
numpy 1.23.4
nvidia-cublas-cu11 11.10.3.66
nvidia-cuda-nvrtc-cu11 11.7.99
nvidia-cuda-runtime-cu11 11.7.99
nvidia-cudnn-cu11 8.5.0.96
packaging 21.3
pip 22.3
psutil 5.9.3
pyparsing 3.0.9
PyYAML 6.0
setuptools 65.5.0
torch 1.13.0
typing_extensions 4.4.0
wheel 0.37.1
```
Same issue with torch-cpu.
OS: Ubuntu 20.04
### Information
- [ ] The official example scripts
- [X] My own modified scripts
### Tasks
- [ ] One of the scripts in the examples/ folder of Accelerate or an officially supported `no_trainer` script in the `examples` folder of the `transformers` repo (such as `run_no_trainer_glue.py`)
- [X] My own task or dataset (give details below)
### Reproduction
```python
import pickle
import accelerate
import torch
model = torch.nn.Linear(10, 10)
optimizer = torch.optim.SGD(model.parameters(), 0.1)
pickle.loads(pickle.dumps(optimizer)) # works
accelerator = accelerate.Accelerator(mixed_precision='fp16')
optimizer2 = accelerator.prepare(optimizer)
pickle.loads(pickle.dumps(optimizer2)) # fails
```
### Expected behavior
I ran into a problem with accelerate and PyTorch 1.13. It appears that optimizers cannot be pickled anymore after being accelerated. When running the attached script, I get:
```
Traceback (most recent call last):
File ".../skorch/foo.py", line 12, in <module>
pickle.loads(pickle.dumps(optimizer2))
File ".../torch/optim/optimizer.py", line 84, in __setstate__
self.defaults.setdefault('differentiable', False)
File ".../accelerate/optimizer.py", line 90, in defaults
return self.optimizer.defaults
AttributeError: 'AcceleratedOptimizer' object has no attribute 'optimizer'
```
The offending addition on PyTorch seems to be this line:
https://github.com/pytorch/pytorch/blob/23fe6c8ca15ec2cf6ea74f93aa91cae343ea534f/torch/optim/optimizer.py#L84
which was not present in PyTorch 1.12. At object creation time, PyTorch now tries to access the `defaults` attribute, which in turn calls the `defaults` property in accelerate, which requires the `optimizer` attribute, which doesn't exist and thus errors. At first glance, it looks like `AcceleratedOptimizer` might need its own `__getstate__` and `__setstate__` to solve this but I'm not sure.
| [
{
"content": "# Copyright 2021 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#... | [
{
"content": "# Copyright 2021 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#... | diff --git a/src/accelerate/optimizer.py b/src/accelerate/optimizer.py
index 4fad12c724e..d5eeef99a02 100644
--- a/src/accelerate/optimizer.py
+++ b/src/accelerate/optimizer.py
@@ -157,3 +157,9 @@ def is_overflow(self):
def step_was_skipped(self):
"""Whether or not the optimizer step was skipped."""
return self._is_overflow
+
+ def __getstate__(self):
+ return self.__dict__.copy()
+
+ def __setstate__(self, state):
+ self.__dict__.update(state)
diff --git a/tests/test_optimizer.py b/tests/test_optimizer.py
new file mode 100644
index 00000000000..15a095bf798
--- /dev/null
+++ b/tests/test_optimizer.py
@@ -0,0 +1,36 @@
+# Copyright 2022 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import pickle
+import unittest
+
+import torch
+
+from accelerate import Accelerator
+from accelerate.state import AcceleratorState
+from accelerate.test_utils import require_cpu
+
+
+@require_cpu
+class OptimizerTester(unittest.TestCase):
+ def test_accelerated_optimizer_pickling(self):
+ model = torch.nn.Linear(10, 10)
+ optimizer = torch.optim.SGD(model.parameters(), 0.1)
+ accelerator = Accelerator()
+ optimizer = accelerator.prepare(optimizer)
+ try:
+ pickle.loads(pickle.dumps(optimizer))
+ except Exception as e:
+ self.fail(f"Accelerated optimizer pickling failed with {e}")
+ AcceleratorState._reset_state()
|
conan-io__conan-center-index-18494 | [package] clickhouse-cpp/*: fPIC option is not respected
In the recipe file fPIC option is always removed during configure stage, which can lead to not working static library.
| [
{
"content": "from conan import ConanFile\nfrom conan.tools.cmake import CMake, CMakeToolchain,CMakeDeps, cmake_layout\nfrom conan.tools.files import copy, get\nfrom conan.tools.build import check_min_cppstd\nfrom conan.errors import ConanInvalidConfiguration\nfrom conan.tools.scm import Version\nimport os\n\nr... | [
{
"content": "from conan import ConanFile\nfrom conan.tools.cmake import CMake, CMakeToolchain,CMakeDeps, cmake_layout\nfrom conan.tools.files import copy, get\nfrom conan.tools.build import check_min_cppstd\nfrom conan.errors import ConanInvalidConfiguration\nfrom conan.tools.scm import Version\nimport os\n\nr... | diff --git a/recipes/clickhouse-cpp/all/conanfile.py b/recipes/clickhouse-cpp/all/conanfile.py
index 9586c0240cbc1..442d24123399e 100644
--- a/recipes/clickhouse-cpp/all/conanfile.py
+++ b/recipes/clickhouse-cpp/all/conanfile.py
@@ -75,7 +75,8 @@ def config_options(self):
del self.options.fPIC
def configure(self):
- self.options.rm_safe("fPIC")
+ if self.options.shared:
+ self.options.rm_safe("fPIC")
def layout(self):
cmake_layout(self, src_folder="src")
|
pyscript__pyscript-1064 | Python Plugin Methods are Executed Twice
The Python plugin methods appear to be getting called twice each. To recreate, run `make test-integration ARGS='-k test_execution_hooks --headed` and look at the console log. You'll see both `afterSetup` and `afterStartup` are logged twice. (The test passes because it checks that each is present at least once).
This is not just in testing - if you load the PyMarkdown plugin and look at the dev console, you'll see `runtime Received: [object Object]` present twice.
Tested on unstable, on Chromium.
| [
{
"content": "import ast\nimport asyncio\nimport base64\nimport html\nimport io\nimport re\nimport time\nfrom collections import namedtuple\nfrom textwrap import dedent\n\nimport js\n\ntry:\n from pyodide import create_proxy\nexcept ImportError:\n from pyodide.ffi import create_proxy\n\nloop = asyncio.get... | [
{
"content": "import ast\nimport asyncio\nimport base64\nimport html\nimport io\nimport re\nimport time\nfrom collections import namedtuple\nfrom textwrap import dedent\n\nimport js\n\ntry:\n from pyodide import create_proxy\nexcept ImportError:\n from pyodide.ffi import create_proxy\n\nloop = asyncio.get... | diff --git a/pyscriptjs/src/python/pyscript.py b/pyscriptjs/src/python/pyscript.py
index ec7dd5a8489..5c946c00745 100644
--- a/pyscriptjs/src/python/pyscript.py
+++ b/pyscriptjs/src/python/pyscript.py
@@ -497,7 +497,6 @@ def __init__(self, name=None):
def init(self, app):
self.app = app
- self.app.plugins.addPythonPlugin(create_proxy(self))
def register_custom_element(self, tag):
# TODO: Ideally would be better to use the logger.
diff --git a/pyscriptjs/tests/integration/test_plugins.py b/pyscriptjs/tests/integration/test_plugins.py
index 04ed6eeb30f..3299ca2d49b 100644
--- a/pyscriptjs/tests/integration/test_plugins.py
+++ b/pyscriptjs/tests/integration/test_plugins.py
@@ -164,7 +164,7 @@ def test_execution_hooks(self):
# EXPECT it to log the correct logs for the events it intercepts
log_lines = self.console.log.lines
for method in hooks_available:
- assert f"{method} called" in log_lines
+ assert log_lines.count(f"{method} called") == 1
# EXPECT it to NOT be called (hence not log anything) the events that happen
# before it's ready, hence is not called
|
mozilla__pontoon-3003 | GetText check fails incorrectly on newline
https://pontoon.mozilla.org/en-GB/all-projects/all-resources/?string=286055
If you copy the source string, an extra line is added at the back, and that fails the checks for GetText.
| [
{
"content": "from django import forms\nfrom django.contrib.postgres.forms import SimpleArrayField\n\nfrom pontoon.base.models import (\n Entity,\n Locale,\n)\n\n\nclass CreateTranslationForm(forms.Form):\n \"\"\"\n Form for parameters to the `entities` view.\n \"\"\"\n\n entity = forms.Intege... | [
{
"content": "from django import forms\nfrom django.contrib.postgres.forms import SimpleArrayField\n\nfrom pontoon.base.models import (\n Entity,\n Locale,\n)\n\n\nclass CreateTranslationForm(forms.Form):\n \"\"\"\n Form for parameters to the `entities` view.\n \"\"\"\n\n entity = forms.Intege... | diff --git a/pontoon/translations/forms.py b/pontoon/translations/forms.py
index bc2371e765..125c8e4122 100644
--- a/pontoon/translations/forms.py
+++ b/pontoon/translations/forms.py
@@ -51,5 +51,8 @@ def clean_plural_form(self):
return None
return self.cleaned_data["plural_form"]
+ def clean_original(self):
+ return self.data.get("original", "")
+
def clean_translation(self):
return self.data.get("translation", "")
|
conda__conda-build-862 | Metadata parse failure when building apsw pypi package
To generate the output below, I added some print output to `conda_build.metadata.yamlize`:
``` python
@memoized
def yamlize(data):
print(72*'*')
print(data)
try:
return yaml.load(data, Loader=BaseLoader)
```
Here is the build failure:
```
bash-3.2$ conda build apsw
************************************************************************
package:
name: uninitialized
************************************************************************
package:
name: apsw
version: "3.9.2-r1"
source:
fn: apsw-3.9.2-r1.tar.gz
url: https://pypi.python.org/packages/source/a/apsw/apsw-3.9.2-r1.tar.gz
md5: 8cfdf9fea2904e3cc4c212ab41760fdd
requirements:
build:
- python
run:
- python
about:
home: https://github.com/rogerbinns/apsw/
license: OSI Approved ::
summary: 'Another Python SQLite Wrapper'
Traceback (most recent call last):
File "/Users/alx/anaconda/bin/conda-build", line 5, in <module>
sys.exit(main())
File "/Users/alx/anaconda/lib/python2.7/site-packages/conda_build/main_build.py", line 208, in main
args_func(args, p)
File "/Users/alx/anaconda/lib/python2.7/site-packages/conda_build/main_build.py", line 493, in args_func
args.func(args, p)
File "/Users/alx/anaconda/lib/python2.7/site-packages/conda_build/main_build.py", line 385, in execute
m = MetaData(recipe_dir)
File "/Users/alx/anaconda/lib/python2.7/site-packages/conda_build/metadata.py", line 347, in __init__
self.parse_again(permit_undefined_jinja=True)
File "/Users/alx/anaconda/lib/python2.7/site-packages/conda_build/metadata.py", line 358, in parse_again
self.meta = parse(self._get_contents(permit_undefined_jinja))
File "/Users/alx/anaconda/lib/python2.7/site-packages/conda_build/metadata.py", line 146, in parse
res = yamlize(data)
File "/Users/alx/anaconda/lib/python2.7/site-packages/conda/utils.py", line 118, in __call__
value = self.func(*args, **kw)
File "/Users/alx/anaconda/lib/python2.7/site-packages/conda_build/metadata.py", line 101, in yamlize
return yaml.load(data, Loader=BaseLoader)
File "/Users/alx/anaconda/lib/python2.7/site-packages/yaml/__init__.py", line 71, in load
return loader.get_single_data()
File "/Users/alx/anaconda/lib/python2.7/site-packages/yaml/constructor.py", line 37, in get_single_data
node = self.get_single_node()
File "_yaml.pyx", line 707, in _yaml.CParser.get_single_node (ext/_yaml.c:8308)
File "_yaml.pyx", line 725, in _yaml.CParser._compose_document (ext/_yaml.c:8581)
File "_yaml.pyx", line 776, in _yaml.CParser._compose_node (ext/_yaml.c:9306)
File "_yaml.pyx", line 890, in _yaml.CParser._compose_mapping_node (ext/_yaml.c:10838)
File "_yaml.pyx", line 776, in _yaml.CParser._compose_node (ext/_yaml.c:9306)
File "_yaml.pyx", line 892, in _yaml.CParser._compose_mapping_node (ext/_yaml.c:10868)
File "_yaml.pyx", line 905, in _yaml.CParser._parse_next_event (ext/_yaml.c:11045)
yaml.scanner.ScannerError: mapping values are not allowed in this context
in "<unicode string>", line 27, column 26
bash-3.2$
```
| [
{
"content": "from __future__ import absolute_import, division, print_function\n\nimport os\nimport re\nimport sys\nfrom os.path import isdir, isfile, join\n\nfrom conda.compat import iteritems, PY3, text_type\nfrom conda.utils import memoized, md5_file\nimport conda.config as cc\nfrom conda.resolve import Matc... | [
{
"content": "from __future__ import absolute_import, division, print_function\n\nimport os\nimport re\nimport sys\nfrom os.path import isdir, isfile, join\n\nfrom conda.compat import iteritems, PY3, text_type\nfrom conda.utils import memoized, md5_file\nimport conda.config as cc\nfrom conda.resolve import Matc... | diff --git a/conda_build/metadata.py b/conda_build/metadata.py
index cd8329a021..d737e6d6c4 100644
--- a/conda_build/metadata.py
+++ b/conda_build/metadata.py
@@ -97,7 +97,7 @@ def select_lines(data, namespace):
def yamlize(data):
try:
return yaml.load(data, Loader=BaseLoader)
- except yaml.parser.ParserError as e:
+ except yaml.error.YAMLError as e:
if '{{' in data:
try:
import jinja2
|
Nitrate__Nitrate-337 | Upgrade django-tinymce to 2.7.0
As per subject.
| [
{
"content": "# -*- coding: utf-8 -*-\n\nfrom setuptools import setup, find_packages\n\n\nwith open('VERSION.txt', 'r') as f:\n pkg_version = f.read().strip()\n\n\ndef get_long_description():\n with open('README.rst', 'r') as f:\n return f.read()\n\n\ninstall_requires = [\n 'PyMySQL == 0.7.11',\... | [
{
"content": "# -*- coding: utf-8 -*-\n\nfrom setuptools import setup, find_packages\n\n\nwith open('VERSION.txt', 'r') as f:\n pkg_version = f.read().strip()\n\n\ndef get_long_description():\n with open('README.rst', 'r') as f:\n return f.read()\n\n\ninstall_requires = [\n 'PyMySQL == 0.7.11',\... | diff --git a/requirements.txt b/requirements.txt
index 631796a5..ae527117 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,63 +1,67 @@
-# Frozen on May 26, 2018
-alabaster==0.7.10
-amqp==2.2.2
-atomicwrites==1.1.5
-attrs==18.1.0
-Babel==2.5.3
-beautifulsoup4==4.6.0
-billiard==3.5.0.3
+alabaster==0.7.12
+amqp==2.3.2
+atomicwrites==1.2.1
+attrs==18.2.0
+Babel==2.6.0
+beautifulsoup4==4.6.3
+billiard==3.5.0.4
celery==4.1.0
-certifi==2018.4.16
+certifi==2018.10.15
chardet==3.0.4
-coverage==4.5.1
-Django==1.11.13
+coverage==4.5.2
+Django==1.11.16
django-contrib-comments==1.8.0
django-debug-toolbar==1.7
-django-extensions==2.0.7
-django-tinymce==2.6.0
+django-extensions==2.1.3
+django-tinymce==2.7.0
django-uuslug==1.1.8
docutils==0.14
factory-boy==2.11.1
-Faker==0.8.15
-flake8==3.5.0
+Faker==1.0.0
+filelock==3.0.10
+flake8==3.6.0
+future-breakpoint==1.0.1
html2text==2018.1.9
-idna==2.6
-imagesize==1.0.0
+idna==2.7
+imagesize==1.1.0
Jinja2==2.10
+kerberos==1.2.5
kobo==0.7.0
-kombu==4.2.0
-MarkupSafe==1.0
+kombu==4.2.1
+MarkupSafe==1.1.0
mccabe==0.6.1
mock==2.0.0
-more-itertools==4.2.0
+more-itertools==4.3.0
+nitrate==4.1
odfpy==1.3.6
-packaging==17.1
-pbr==4.0.3
-pluggy==0.6.0
-py==1.5.3
-pycodestyle==2.3.1
-pyflakes==1.6.0
+packaging==18.0
+pbr==5.1.1
+pluggy==0.8.0
+py==1.7.0
+pycodestyle==2.4.0
+pyflakes==2.0.0
Pygments==2.2.0
-pygraphviz==1.3.1
+pygraphviz==1.5
PyMySQL==0.7.11
-pyparsing==2.2.0
-pytest==3.6.0
-pytest-cov==2.5.1
-pytest-django==3.2.1
-python-dateutil==2.7.3
-python-slugify==1.2.5
-pytz==2018.4
-requests==2.18.4
+pyparsing==2.3.0
+pytest==4.0.0
+pytest-cov==2.6.0
+pytest-django==3.4.4
+python-dateutil==2.7.5
+python-slugify==1.2.6
+pytz==2018.7
+requests==2.20.1
six==1.11.0
snowballstemmer==1.2.1
-Sphinx==1.7.4
-sphinx-rtd-theme==0.3.1
-sphinxcontrib-websupport==1.0.1
+Sphinx==1.8.2
+sphinx-rtd-theme==0.4.2
+sphinxcontrib-websupport==1.1.0
sqlparse==0.2.4
text-unidecode==1.2
-tox==3.0.0
+toml==0.10.0
+tox==3.5.3
Unidecode==1.0.22
-urllib3==1.22
+urllib3==1.24.1
vine==1.1.4
-virtualenv==16.0.0
+virtualenv==16.1.0
xmltodict==0.11.0
diff --git a/setup.py b/setup.py
index a3d1ed2c..59252f48 100644
--- a/setup.py
+++ b/setup.py
@@ -17,7 +17,7 @@ def get_long_description():
'beautifulsoup4 >= 4.1.1',
'celery == 4.1.0',
'django-contrib-comments == 1.8.0',
- 'django-tinymce == 2.6.0',
+ 'django-tinymce == 2.7.0',
'django-uuslug == 1.1.8',
'django >= 1.10,<2.0',
'html2text',
|
qtile__qtile-1604 | libqtile utils.py:safe_import():L192 Unmet dependencies for optional Widget: '.widget.launchbar.LaunchBar', No module named 'xdg.IconTheme'
It seems there's confusion about the `xdg` dependency that is used. The code expects [PyXDG](https://freedesktop.org/wiki/Software/pyxdg/) while the actual installed version is [xdg](https://pypi.org/project/xdg/). The latter does not have an `IconTheme` submodule, explaining the message.
The distribution name for `pyxdg` is `pyxdg` (not `xdg`).
https://github.com/qtile/qtile/blob/0d8b6e5de1cacb9827c4b30ce7ed8da4bb686f26/libqtile/widget/launchbar.py#L49
| [
{
"content": "# Copyright (c) 2014 Tycho Andersen\n# Copyright (c) 2014 dequis\n# Copyright (c) 2014-2015 Joseph Razik\n# Copyright (c) 2014 Sean Vig\n# Copyright (c) 2015 reus\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation fil... | [
{
"content": "# Copyright (c) 2014 Tycho Andersen\n# Copyright (c) 2014 dequis\n# Copyright (c) 2014-2015 Joseph Razik\n# Copyright (c) 2014 Sean Vig\n# Copyright (c) 2015 reus\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation fil... | diff --git a/libqtile/widget/launchbar.py b/libqtile/widget/launchbar.py
index 838126a58f..cda36dc686 100644
--- a/libqtile/widget/launchbar.py
+++ b/libqtile/widget/launchbar.py
@@ -52,6 +52,10 @@
class LaunchBar(base._Widget):
"""A widget that display icons to launch the associated command
+ Widget requirements: pyxdg_.
+
+ .. _pyxdg: https://freedesktop.org/wiki/Software/pyxdg/
+
Parameters
==========
progs :
|
cython__cython-6246 | [BUG] Limited API: cythonize() method drops py_limited_api=True
### Describe the bug
In a setup.py file, if you use an explicit setuptools Extension with `py_limited_api=True`, then pass this to the cythonize() method, the resulting .so files incorrectly include a `.cpython-38-x86_64-linux-gnu.so` suffix (or similar), whereas the expected suffix is `.abi3.so`.
As far as I can tell, this is because the `py_limitd_api` field is popped from the kwarg blob by setuptools [here](https://github.com/pypa/setuptools/blob/main/setuptools/extension.py#L129), and as a result when cython attempts to create a new Extension object [here](https://github.com/cython/cython/blob/d455d51bf31379f47c074e40517e24857d4d9cc0/Cython/Build/Dependencies.py#L866) the field isn't included. It kinda looks like the fix is probably to differentiate between *Extension_distutils* and *Extension_setuptools* and in the latter case read *py_limitd_api*.
The issue can be worked around by explicitly using Cython.Distutils.extension.Extension.
### Code to reproduce the behaviour:
_No response_
### Expected behaviour
_No response_
### OS
_No response_
### Python version
_No response_
### Cython version
_No response_
### Additional context
_No response_
| [
{
"content": "import cython\n\nimport collections\nimport os\nimport re, sys, time\nfrom glob import iglob\nfrom io import StringIO\nfrom os.path import relpath as _relpath\nfrom .Cache import Cache, FingerprintFlags\n\nfrom collections.abc import Iterable\n\ntry:\n import pythran\nexcept:\n pythran = Non... | [
{
"content": "import cython\n\nimport collections\nimport os\nimport re, sys, time\nfrom glob import iglob\nfrom io import StringIO\nfrom os.path import relpath as _relpath\nfrom .Cache import Cache, FingerprintFlags\n\nfrom collections.abc import Iterable\n\ntry:\n import pythran\nexcept:\n pythran = Non... | diff --git a/Cython/Build/Dependencies.py b/Cython/Build/Dependencies.py
index d75a103b25f..a791fc4b2a1 100644
--- a/Cython/Build/Dependencies.py
+++ b/Cython/Build/Dependencies.py
@@ -705,6 +705,8 @@ def default_create_extension(template, kwds):
t = template.__class__
ext = t(**kwds)
+ if hasattr(template, "py_limited_api"):
+ ext.py_limited_api = template.py_limited_api
metadata = dict(distutils=kwds, module_name=kwds['name'])
return (ext, metadata)
diff --git a/docs/src/userguide/source_files_and_compilation.rst b/docs/src/userguide/source_files_and_compilation.rst
index 9241bbf0a8c..64e8f904f36 100644
--- a/docs/src/userguide/source_files_and_compilation.rst
+++ b/docs/src/userguide/source_files_and_compilation.rst
@@ -276,6 +276,8 @@ The default function (defined in ``Cython.Build.Dependencies``) is::
t = template.__class__
ext = t(**kwds)
+ if hasattr(template, "py_limited_api"):
+ ext.py_limited_api = template.py_limited_api
metadata = dict(distutils=kwds, module_name=kwds['name'])
return ext, metadata
|
ipython__ipython-2280 | SSH passwordless check with OpenSSH checks for the wrong thing
The pattern passed to pexpect is '[Ppassword]:', which looks for any of those letters followed by ':', and not, as the intention seems to be, for 'Password:'. The correct pattern is '[Pp]assword:'.
This is at IPython/external/ssh/tunnel.py:100.
| [
{
"content": "\"\"\"Basic ssh tunnel utilities, and convenience functions for tunneling\nzeromq connections.\n\nAuthors\n-------\n* Min RK\n\"\"\"\n\n#-----------------------------------------------------------------------------\n# Copyright (C) 2010-2011 The IPython Development Team\n#\n# Distributed under ... | [
{
"content": "\"\"\"Basic ssh tunnel utilities, and convenience functions for tunneling\nzeromq connections.\n\nAuthors\n-------\n* Min RK\n\"\"\"\n\n#-----------------------------------------------------------------------------\n# Copyright (C) 2010-2011 The IPython Development Team\n#\n# Distributed under ... | diff --git a/IPython/external/ssh/tunnel.py b/IPython/external/ssh/tunnel.py
index 4fce68c008b..9ae2311fa69 100644
--- a/IPython/external/ssh/tunnel.py
+++ b/IPython/external/ssh/tunnel.py
@@ -97,7 +97,7 @@ def _try_passwordless_openssh(server, keyfile):
p = pexpect.spawn(cmd)
while True:
try:
- p.expect('[Ppassword]:', timeout=.1)
+ p.expect('[Pp]assword:', timeout=.1)
except pexpect.TIMEOUT:
continue
except pexpect.EOF:
|
google__mobly-578 | yaml.load_all is unsafe with default loader
tests/mobly/output_test.py::OutputTest::test_teardown_class_output
T:\src\github\mobly\tests\mobly\output_test.py:258: YAMLLoadWarning:
*** Calling yaml.load_all() without Loader=... is deprecated.
*** The default Loader is unsafe.
*** Please read https://msg.pyyaml.org/load for full details.
for entry in yaml.load_all(f):
tests/mobly/records_test.py::RecordsTest::test_summary_user_data
T:\src\github\mobly\tests\mobly\records_test.py:401: YAMLLoadWarning:
*** Calling yaml.load_all() without Loader=... is deprecated.
*** The default Loader is unsafe.
*** Please read https://msg.pyyaml.org/load for full details.
for c in yaml.load_all(f):
tests/mobly/records_test.py::RecordsTest::test_summary_write_dump
T:\src\github\mobly\tests\mobly\records_test.py:364: YAMLLoadWarning:
*** Calling yaml.load() without Loader=... is deprecated.
*** The default Loader is unsafe.
*** Please read https://msg.pyyaml.org/load for full details.
content = yaml.load(f)
tests/mobly/records_test.py::RecordsTest::test_summary_write_dump_with_unicode
T:\src\github\mobly\tests\mobly\records_test.py:383: YAMLLoadWarning:
*** Calling yaml.load() without Loader=... is deprecated.
*** The default Loader is unsafe.
*** Please read https://msg.pyyaml.org/load for full details.
content = yaml.load(f)
tests/mobly/test_runner_test.py::TestRunnerTest::test_summary_file_entries
T:\src\github\mobly\tests\mobly\test_runner_test.py:135: YAMLLoadWarning:
*** Calling yaml.load_all() without Loader=... is deprecated.
*** The default Loader is unsafe.
*** Please read https://msg.pyyaml.org/load for full details.
summary_entries = list(yaml.load_all(f))
| [
{
"content": "# Copyright 2016 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicab... | [
{
"content": "# Copyright 2016 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicab... | diff --git a/mobly/config_parser.py b/mobly/config_parser.py
index 2278ca6a..d26f34ba 100644
--- a/mobly/config_parser.py
+++ b/mobly/config_parser.py
@@ -152,7 +152,7 @@ def _load_config_file(path):
A dict that represents info in the config file.
"""
with io.open(utils.abs_path(path), 'r', encoding='utf-8') as f:
- conf = yaml.load(f)
+ conf = yaml.safe_load(f)
return conf
diff --git a/tests/mobly/base_test_test.py b/tests/mobly/base_test_test.py
index 43951f4b..abe3a13e 100755
--- a/tests/mobly/base_test_test.py
+++ b/tests/mobly/base_test_test.py
@@ -148,8 +148,8 @@ def test_never(self):
bt_cls = MockBaseTest(self.mock_test_cls_configs)
expected_msg = (
- 'Test method name not_a_test_something does not follow '
- 'naming convention test_\*, abort.')
+ r'Test method name not_a_test_something does not follow '
+ r'naming convention test_\*, abort.')
with self.assertRaisesRegex(base_test.Error, expected_msg):
bt_cls.run()
@@ -186,8 +186,8 @@ def test_never(self):
bt_cls = MockBaseTest(self.mock_test_cls_configs)
expected_msg = (
- 'Test method name not_a_test_something does not follow '
- 'naming convention test_\*, abort.')
+ r'Test method name not_a_test_something does not follow '
+ r'naming convention test_\*, abort.')
with self.assertRaisesRegex(base_test.Error, expected_msg):
bt_cls.run(test_names=["not_a_test_something"])
@@ -1994,7 +1994,7 @@ def test_something(self):
self.assertEqual(actual_record.test_name, "test_something")
hit = False
with io.open(self.summary_file, 'r', encoding='utf-8') as f:
- for c in yaml.load_all(f):
+ for c in yaml.safe_load_all(f):
if c['Type'] != records.TestSummaryEntryType.USER_DATA.value:
continue
hit = True
diff --git a/tests/mobly/output_test.py b/tests/mobly/output_test.py
index 8a879028..6caf9b81 100755
--- a/tests/mobly/output_test.py
+++ b/tests/mobly/output_test.py
@@ -232,7 +232,7 @@ def test_basic_output(self):
info_log_path) = self.assert_output_logs_exist(output_dir)
summary_entries = []
with io.open(summary_file_path, 'r', encoding='utf-8') as f:
- for entry in yaml.load_all(f):
+ for entry in yaml.safe_load_all(f):
self.assertTrue(entry['Type'])
summary_entries.append(entry)
self.assert_log_contents(debug_log_path, whitelist=['DEBUG', 'INFO'])
@@ -255,7 +255,7 @@ def test_teardown_class_output(self):
with io.open(summary_file_path, 'r', encoding='utf-8') as f:
raw_content = f.read()
f.seek(0)
- for entry in yaml.load_all(f):
+ for entry in yaml.safe_load_all(f):
if (entry['Type'] == 'Record'
and entry[records.TestResultEnums.RECORD_NAME] ==
'teardown_class'):
diff --git a/tests/mobly/records_test.py b/tests/mobly/records_test.py
index ba25a690..a374347d 100755
--- a/tests/mobly/records_test.py
+++ b/tests/mobly/records_test.py
@@ -361,7 +361,7 @@ def test_summary_write_dump(self):
writer = records.TestSummaryWriter(dump_path)
writer.dump(record1.to_dict(), records.TestSummaryEntryType.RECORD)
with io.open(dump_path, 'r', encoding='utf-8') as f:
- content = yaml.load(f)
+ content = yaml.safe_load(f)
self.assertEqual(content['Type'],
records.TestSummaryEntryType.RECORD.value)
self.assertEqual(content[records.TestResultEnums.RECORD_DETAILS],
@@ -380,7 +380,7 @@ def test_summary_write_dump_with_unicode(self):
writer = records.TestSummaryWriter(dump_path)
writer.dump(record1.to_dict(), records.TestSummaryEntryType.RECORD)
with io.open(dump_path, 'r', encoding='utf-8') as f:
- content = yaml.load(f)
+ content = yaml.safe_load(f)
self.assertEqual(content['Type'],
records.TestSummaryEntryType.RECORD.value)
self.assertEqual(content[records.TestResultEnums.RECORD_DETAILS],
@@ -398,7 +398,7 @@ def test_summary_user_data(self):
writer.dump(data, records.TestSummaryEntryType.USER_DATA)
with io.open(dump_path, 'r', encoding='utf-8') as f:
contents = []
- for c in yaml.load_all(f):
+ for c in yaml.safe_load_all(f):
contents.append(c)
for content in contents:
self.assertEqual(content['Type'],
diff --git a/tests/mobly/test_runner_test.py b/tests/mobly/test_runner_test.py
index 0d282ba4..26af2eee 100755
--- a/tests/mobly/test_runner_test.py
+++ b/tests/mobly/test_runner_test.py
@@ -132,7 +132,7 @@ def test_summary_file_entries(self):
summary_path = os.path.join(logging.log_path,
records.OUTPUT_FILE_SUMMARY)
with io.open(summary_path, 'r', encoding='utf-8') as f:
- summary_entries = list(yaml.load_all(f))
+ summary_entries = list(yaml.safe_load_all(f))
self.assertEqual(len(summary_entries), 4)
# Verify the first entry is the list of test names.
self.assertEqual(summary_entries[0]['Type'],
|
kivy__python-for-android-1351 | Python2 Build fails with make: *** [Makefile:426: sharedmods] Error 139
# Python version: 3.6
# OS: Arch Linux
# python-for-android version: 0.6.0
The command I use to build is:
`
p4a apk --private ~/Projects/Python/Mobile_Apps/BeerApp/ --package=org.drink.recommendations --name "Drink Recommendations" --version 0.2 --bootstrap=sdl2 --requirements=python2,kivy --ndk_version r9c
`
The error is:
`
make: *** [Makefile:426: sharedmods] Error 139
`
The build logs are in the following file.
[p4a_errors.txt](https://github.com/kivy/python-for-android/files/2091833/p4a_errors.txt)
Initally I thought that this was a buildozer issue, as I attempted it that way first. So, I opened an issue on their github page and multiple users pointed out that they too were experiencing this issue. I've tried with both python3 and python2, the out come is the same. There is absolutely no unicode in any of my source files, I've also attempted the build with pygame instead of sdl2 (for python 2). There are also multiple simillar SO threads open about this.
Does anyone have any sugesstions or ideas as to why this is happening and how to go about fixing it?
It's also worth noting that if I use the kivy buildozer vm, I can use buildozer to carry out a successful build. Just not on any other machine using either buildozer or p4a, using the same source and build commands.
The buildozer issue is here: https://github.com/kivy/buildozer/issues/673
The output from the dump file is:
`
Reading symbols from /home/suroh/.local/share/python-for-android/build/other_builds/hostpython2/desktop/hostpython2/python...done.
[New LWP 28854]
[Thread debugging using libthread_db enabled]
Using host libthread_db library "/usr/lib/libthread_db.so.1".
Core was generated by ./python -E ./setup.py -q build.
Program terminated with signal SIGSEGV, Segmentation fault.
#0 0x000055731803eb2a in PyInstance_NewRaw (klass=klass@entry=0x7f7cbf1d1c18, dict=0x557319325210, dict@entry=0x0) at Objects/classobject.c:534
534 inst->in_dict = dict;
File "/home/suroh/.local/share/python-for-android/build/other_builds/hostpython2/desktop/hostpython2/python-gdb.py", line 55
Py_TPFLAGS_HEAPTYPE = (1L << 9)
^
SyntaxError: invalid syntax
`
| [
{
"content": "\nfrom pythonforandroid.toolchain import Recipe, shprint, current_directory, info, warning\nfrom os.path import join, exists\nimport os\nimport sh\n\n\nclass Hostpython2Recipe(Recipe):\n version = '2.7.2'\n url = 'https://python.org/ftp/python/{version}/Python-{version}.tar.bz2'\n name = ... | [
{
"content": "\nfrom pythonforandroid.toolchain import Recipe, shprint, current_directory, info, warning\nfrom os.path import join, exists\nfrom os import chdir\nimport os\nimport sh\n\n\nclass Hostpython2Recipe(Recipe):\n version = '2.7.2'\n url = 'https://python.org/ftp/python/{version}/Python-{version}... | diff --git a/pythonforandroid/recipes/hostpython2/__init__.py b/pythonforandroid/recipes/hostpython2/__init__.py
index 5a5b362f59..dc1ccb4cbc 100644
--- a/pythonforandroid/recipes/hostpython2/__init__.py
+++ b/pythonforandroid/recipes/hostpython2/__init__.py
@@ -10,6 +10,7 @@ class Hostpython2Recipe(Recipe):
version = '2.7.2'
url = 'https://python.org/ftp/python/{version}/Python-{version}.tar.bz2'
name = 'hostpython2'
+ patches = ['fix-segfault-pygchead.patch']
conflicts = ['hostpython3']
diff --git a/pythonforandroid/recipes/hostpython2/fix-segfault-pygchead.patch b/pythonforandroid/recipes/hostpython2/fix-segfault-pygchead.patch
new file mode 100644
index 0000000000..25d4599cb0
--- /dev/null
+++ b/pythonforandroid/recipes/hostpython2/fix-segfault-pygchead.patch
@@ -0,0 +1,12 @@
+diff -Naur Python-2.7.2.orig/Include/objimpl.h Python-2.7.2/Include/objimpl.h
+--- Python-2.7.2.orig/Include/objimpl.h 2011-06-11 17:46:23.000000000 +0200
++++ Python-2.7.2/Include/objimpl.h 2018-09-04 17:33:09.254654565 +0200
+@@ -255,7 +255,7 @@
+ union _gc_head *gc_prev;
+ Py_ssize_t gc_refs;
+ } gc;
+- long double dummy; /* force worst-case alignment */
++ double dummy; /* force worst-case alignment */
+ } PyGC_Head;
+
+ extern PyGC_Head *_PyGC_generation0;
|
DistrictDataLabs__yellowbrick-1162 | pytest-runner is deprecated
pytest-runner is deprecated: https://github.com/pytest-dev/pytest-runner/#deprecation-notice
If I find time, then I can make a PR, but I thought I'd let you know in the meantime.
| [
{
"content": "#!/usr/bin/env python\n# setup\n# Setup script for installing yellowbrick\n#\n# Author: Benjamin Bengfort\n# Created: Wed May 18 14:33:26 2016 -0400\n#\n# Copyright (C) 2016 The scikit-yb developers\n# For license information, see LICENSE.txt and NOTICE.md\n#\n# ID: setup.py [c4f3ba7] benjamin@... | [
{
"content": "#!/usr/bin/env python\n# setup\n# Setup script for installing yellowbrick\n#\n# Author: Benjamin Bengfort\n# Created: Wed May 18 14:33:26 2016 -0400\n#\n# Copyright (C) 2016 The scikit-yb developers\n# For license information, see LICENSE.txt and NOTICE.md\n#\n# ID: setup.py [c4f3ba7] benjamin@... | diff --git a/Makefile b/Makefile
index d8ba922f9..4cc479eff 100644
--- a/Makefile
+++ b/Makefile
@@ -26,7 +26,7 @@ clean:
# Targets for testing
test:
- python setup.py test
+ pytest
# Publish to gh-pages
publish:
@@ -48,3 +48,4 @@ install:
deploy:
python setup.py register
twine upload dist/*
+
diff --git a/setup.py b/setup.py
index d47d5f66e..6ed128329 100755
--- a/setup.py
+++ b/setup.py
@@ -163,9 +163,7 @@ def get_description_type(path=PKG_DESCRIBE):
"zip_safe": False,
"entry_points": {"console_scripts": []},
"install_requires": list(get_requires()),
- "python_requires": ">=3.4, <4",
- "setup_requires": ["pytest-runner"],
- "tests_require": ["pytest"],
+ "python_requires": ">=3.4, <4"
}
|
agconti__cookiecutter-django-rest-250 | Update Managers to Admins in `common.py`
| [
{
"content": "import os\nfrom os.path import join\n\nfrom configurations import Configuration, values\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n\nclass Common(Configuration):\n\n INSTALLED_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'd... | [
{
"content": "import os\nfrom os.path import join\n\nfrom configurations import Configuration, values\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n\nclass Common(Configuration):\n\n INSTALLED_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'd... | diff --git a/{{cookiecutter.github_repository_name}}/{{cookiecutter.app_name}}/config/common.py b/{{cookiecutter.github_repository_name}}/{{cookiecutter.app_name}}/config/common.py
index 9df3857eb..3b7996df6 100755
--- a/{{cookiecutter.github_repository_name}}/{{cookiecutter.app_name}}/config/common.py
+++ b/{{cookiecutter.github_repository_name}}/{{cookiecutter.app_name}}/config/common.py
@@ -48,7 +48,7 @@ class Common(Configuration):
# Email
EMAIL_BACKEND = values.Value('django.core.mail.backends.smtp.EmailBackend')
- MANAGERS = (
+ ADMINS = (
('Author', '{{cookiecutter.email}}'),
)
|
RedHatInsights__insights-core-2890 | first_file in insights_archive isn't bound to the right context
The first_file helper in [insights_archive.py](https://github.com/RedHatInsights/insights-core/blob/master/insights/specs/insights_archive.py#L7) isn't bound to the `HostArchiveContext`, so it will try to fire for any context that has a filesystem root.
| [
{
"content": "from insights.core.spec_factory import glob_file, simple_file, head, first_file\nfrom functools import partial\nfrom insights.core.context import HostArchiveContext\nfrom insights.specs import Specs\n\nsimple_file = partial(simple_file, context=HostArchiveContext)\nglob_file = partial(glob_file, c... | [
{
"content": "from insights.core.spec_factory import glob_file, simple_file, head, first_file\nfrom functools import partial\nfrom insights.core.context import HostArchiveContext\nfrom insights.specs import Specs\n\nsimple_file = partial(simple_file, context=HostArchiveContext)\nglob_file = partial(glob_file, c... | diff --git a/insights/specs/insights_archive.py b/insights/specs/insights_archive.py
index 068cf5af35..2e88c7ac4d 100644
--- a/insights/specs/insights_archive.py
+++ b/insights/specs/insights_archive.py
@@ -5,6 +5,7 @@
simple_file = partial(simple_file, context=HostArchiveContext)
glob_file = partial(glob_file, context=HostArchiveContext)
+first_file = partial(first_file, context=HostArchiveContext)
class InsightsArchiveSpecs(Specs):
|
matrix-org__synapse-13326 | Ubuntu 21.10 (Impish Indri) has reached end of life as of July 14, 2022
See https://lists.ubuntu.com/archives/ubuntu-announce/2022-July/000281.html
I don't think we have good docs for removing a distribution, but should be the opposite of [gitlab.matrix.org/new-vector/internal/-/wikis/Synapse-Debian-Packages#adding-a-new-distribution](https://gitlab.matrix.org/new-vector/internal/-/wikis/Synapse-Debian-Packages#adding-a-new-distribution).
| [
{
"content": "#!/usr/bin/env python3\n\n# Build the Debian packages using Docker images.\n#\n# This script builds the Docker images and then executes them sequentially, each\n# one building a Debian package for the targeted operating system. It is\n# designed to be a \"single command\" to produce all the images... | [
{
"content": "#!/usr/bin/env python3\n\n# Build the Debian packages using Docker images.\n#\n# This script builds the Docker images and then executes them sequentially, each\n# one building a Debian package for the targeted operating system. It is\n# designed to be a \"single command\" to produce all the images... | diff --git a/changelog.d/13326.removal b/changelog.d/13326.removal
new file mode 100644
index 000000000000..8112286671d7
--- /dev/null
+++ b/changelog.d/13326.removal
@@ -0,0 +1 @@
+Stop builindg `.deb` packages for Ubuntu 21.10 (Impish Indri), which has reached end of life.
diff --git a/scripts-dev/build_debian_packages.py b/scripts-dev/build_debian_packages.py
index 38564893e95b..cd2e64b75f9d 100755
--- a/scripts-dev/build_debian_packages.py
+++ b/scripts-dev/build_debian_packages.py
@@ -26,7 +26,6 @@
"debian:bookworm",
"debian:sid",
"ubuntu:focal", # 20.04 LTS (our EOL forced by Py38 on 2024-10-14)
- "ubuntu:impish", # 21.10 (EOL 2022-07)
"ubuntu:jammy", # 22.04 LTS (EOL 2027-04)
)
|
twisted__twisted-11838 | twisted.web.template._flattenElement spends a significant amount of runtime in typing.py
**Describe the incorrect behavior you saw**
`_flattenElement` defines a closure `keepGoing` which is used to recursively call `_flattenElement`. `keepGoing`'s type definition includes multiple `Callable[…]`s.
Since the file does not include `from __future__ import annotations` the type definitions are evaluated at function definition time, which is on every call to `_flattenElement`.
`typing.Callable.__getitem__` does multiple `isinstance` checks before deferring to the the `@_tp_cache`ed implementation of `Callable.__getitem_inner__`. This causes evaluating the types for the closure to make up a significant portion of `_flattenElement`'s runtime.
**Describe how to cause this behavior**
This behavior was discovered while profiling the a documentation build using pydoctor under the austin profiler.
Speedscope URL for the profile is here: https://www.speedscope.app/#profileURL=https%3A%2F%2Fgist.githubusercontent.com%2Fdreid%2F197566471f39a96523f5065d19d0bf7f%2Fraw%2F3e7ec92a17bc82d40acceb1e2efcaa3ef7c8ef07%2Ftwisted-austin-trunk.speedscope
In the profile you can see that the `inner` function in `_tp_cache` accounts for 1m25s of the total runtime, and the `Callable.__getitem__` accounts for 25s of total runtime.


**Describe the correct behavior you'd like to see**
A clear and concise description of what you expected to happen, or what you believe should be happening instead.
**Testing environment**
- Operating System and Version; paste the output of these commands:
- on Linux, `uname -a ; cat /etc/lsb-release`
- on Windows, `systeminfo | Findstr /i "OS"`
- on macOS, `sw_vers`
- Twisted version [e.g. 22.2.0]
- please paste the output of `twist --version` and `pip --freeze`
- Reactor [e.g. select, iocp]
**Additional context**
Add any other context about the problem here.
| [
{
"content": "# -*- test-case-name: twisted.web.test.test_flatten,twisted.web.test.test_template -*-\n# Copyright (c) Twisted Matrix Laboratories.\n# See LICENSE for details.\n\n\"\"\"\nContext-free flattener/serializer for rendering Python objects, possibly\ncomplex or arbitrarily nested, as strings.\n\"\"\"\n... | [
{
"content": "# -*- test-case-name: twisted.web.test.test_flatten,twisted.web.test.test_template -*-\n# Copyright (c) Twisted Matrix Laboratories.\n# See LICENSE for details.\n\n\"\"\"\nContext-free flattener/serializer for rendering Python objects, possibly\ncomplex or arbitrarily nested, as strings.\n\"\"\"\n... | diff --git a/src/twisted/newsfragments/11835.bugfix b/src/twisted/newsfragments/11835.bugfix
new file mode 100644
index 00000000000..1dc8f6d0af7
--- /dev/null
+++ b/src/twisted/newsfragments/11835.bugfix
@@ -0,0 +1 @@
+`twisted.web.template` now avoids some unecessary evaluation of type annotations and is faster.
diff --git a/src/twisted/web/_flatten.py b/src/twisted/web/_flatten.py
index 276c6455083..e5eba2b6cac 100644
--- a/src/twisted/web/_flatten.py
+++ b/src/twisted/web/_flatten.py
@@ -6,6 +6,7 @@
Context-free flattener/serializer for rendering Python objects, possibly
complex or arbitrarily nested, as strings.
"""
+from __future__ import annotations
from inspect import iscoroutine
from io import BytesIO
|
liqd__a4-meinberlin-2082 | dashboard: district "gesamtstädtisch" ist ---
in dashboard the default district is "---" and should be changed to "Gesamtstädtisch"
| [
{
"content": "from django import forms\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom django.core.exceptions import ValidationError\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom adhocracy4.dashboard.forms import ProjectDashboardForm\nfrom adhocracy4.ma... | [
{
"content": "from django import forms\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom django.core.exceptions import ValidationError\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom adhocracy4.dashboard.forms import ProjectDashboardForm\nfrom adhocracy4.ma... | diff --git a/meinberlin/apps/projects/forms.py b/meinberlin/apps/projects/forms.py
index 969995993c..f45b56634c 100644
--- a/meinberlin/apps/projects/forms.py
+++ b/meinberlin/apps/projects/forms.py
@@ -92,3 +92,7 @@ class Meta:
'point': maps_widgets.MapChoosePointWidget(
polygon=settings.BERLIN_POLYGON)
}
+
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self.fields['administrative_district'].empty_label = _('City wide')
|
tiangolo__fastapi-681 | Async class method dependency raises a ValueError
**Describe the bug**
If you use an async class method as a dependency, a `ValueError` is thrown. It doesn't happen for a non-async method.
Complete error: `ValueError: [KeyError(<class 'coroutine'>), TypeError("'coroutine' object is not iterable"), TypeError('vars() argument must have __dict__ attribute')]` (at `fastapi/encoders.py:106`)
**To Reproduce**
```py
from fastapi import Depends, FastAPI
from starlette.requests import Request
class DependencyClass:
async def async_dep(self, request: Request):
return True
def sync_dep(self, request: Request):
return True
app = FastAPI()
dependency = DependencyClass()
# Error
@app.get('/async-dep')
def async_dep(r=Depends(dependency.async_dep)):
return r
# Everything is fine
@app.get('/sync-dep')
def sync_dep(r=Depends(dependency.sync_dep)):
return r
```
**Expected behavior**
The async class method dependency should be called and its return value injected.
**Environment:**
- OS: macOS
- FastAPI Version: 0.42.0
- Python version: 3.7.2
**Additional context**
I believe the issue comes from here:
https://github.com/tiangolo/fastapi/blob/65536cbf63318d111bf608960378d651b6c1596a/fastapi/dependencies/utils.py#L353-L359
Indeed, `inspect.isfunction(call)` will return `False` in case of a class method. Hence, it is [sent to `run_in_threadpool`](https://github.com/tiangolo/fastapi/blob/65536cbf63318d111bf608960378d651b6c1596a/fastapi/dependencies/utils.py#L453-L456), which never awaits the coroutine, and we end up trying to serialize it instead of its result (hence the `ValueError`).
Changing the check by:
```py
if inspect.isfunction(call) or inspect.ismethod(call):
```
solves the issue. I can make a PR with the fix and unit tests if it helps.
| [
{
"content": "import asyncio\nimport inspect\nfrom contextlib import contextmanager\nfrom copy import deepcopy\nfrom typing import (\n Any,\n Callable,\n Dict,\n List,\n Mapping,\n Optional,\n Sequence,\n Tuple,\n Type,\n Union,\n cast,\n)\n\nfrom fastapi import params\nfrom fas... | [
{
"content": "import asyncio\nimport inspect\nfrom contextlib import contextmanager\nfrom copy import deepcopy\nfrom typing import (\n Any,\n Callable,\n Dict,\n List,\n Mapping,\n Optional,\n Sequence,\n Tuple,\n Type,\n Union,\n cast,\n)\n\nfrom fastapi import params\nfrom fas... | diff --git a/fastapi/dependencies/utils.py b/fastapi/dependencies/utils.py
index 4745f173f0d6e..2cda78a9e9a6f 100644
--- a/fastapi/dependencies/utils.py
+++ b/fastapi/dependencies/utils.py
@@ -351,7 +351,7 @@ def add_param_to_fields(*, field: Field, dependant: Dependant) -> None:
def is_coroutine_callable(call: Callable) -> bool:
- if inspect.isfunction(call):
+ if inspect.isroutine(call):
return asyncio.iscoroutinefunction(call)
if inspect.isclass(call):
return False
diff --git a/tests/test_dependency_class.py b/tests/test_dependency_class.py
new file mode 100644
index 0000000000000..db1f5cc8fe70a
--- /dev/null
+++ b/tests/test_dependency_class.py
@@ -0,0 +1,70 @@
+import pytest
+from fastapi import Depends, FastAPI
+from starlette.testclient import TestClient
+
+app = FastAPI()
+
+
+class CallableDependency:
+ def __call__(self, value: str) -> str:
+ return value
+
+
+class AsyncCallableDependency:
+ async def __call__(self, value: str) -> str:
+ return value
+
+
+class MethodsDependency:
+ def synchronous(self, value: str) -> str:
+ return value
+
+ async def asynchronous(self, value: str) -> str:
+ return value
+
+
+callable_dependency = CallableDependency()
+async_callable_dependency = AsyncCallableDependency()
+methods_dependency = MethodsDependency()
+
+
+@app.get("/callable-dependency")
+async def get_callable_dependency(value: str = Depends(callable_dependency)):
+ return value
+
+
+@app.get("/async-callable-dependency")
+async def get_callable_dependency(value: str = Depends(async_callable_dependency)):
+ return value
+
+
+@app.get("/synchronous-method-dependency")
+async def get_synchronous_method_dependency(
+ value: str = Depends(methods_dependency.synchronous),
+):
+ return value
+
+
+@app.get("/asynchronous-method-dependency")
+async def get_asynchronous_method_dependency(
+ value: str = Depends(methods_dependency.asynchronous),
+):
+ return value
+
+
+client = TestClient(app)
+
+
+@pytest.mark.parametrize(
+ "route,value",
+ [
+ ("/callable-dependency", "callable-dependency"),
+ ("/async-callable-dependency", "async-callable-dependency"),
+ ("/synchronous-method-dependency", "synchronous-method-dependency"),
+ ("/asynchronous-method-dependency", "asynchronous-method-dependency"),
+ ],
+)
+def test_class_dependency(route, value):
+ response = client.get(route, params={"value": value})
+ assert response.status_code == 200
+ assert response.json() == value
|
kivy__python-for-android-1163 | Openssl recipe crashes on x86 arch
p4a branch: stable
buildozer: 0.33
bootstrap: sdl2
kivy: 1.10.0
Error message i get:
```
arm_arch.h:46:6: error: #error "unsupported ARM architecture"
```
| [
{
"content": "from functools import partial\n\nfrom pythonforandroid.toolchain import Recipe, shprint, current_directory\nimport sh\n\n\nclass OpenSSLRecipe(Recipe):\n version = '1.0.2h'\n url = 'https://www.openssl.org/source/openssl-{version}.tar.gz'\n\n def should_build(self, arch):\n return ... | [
{
"content": "from functools import partial\n\nfrom pythonforandroid.toolchain import Recipe, shprint, current_directory\nimport sh\n\n\nclass OpenSSLRecipe(Recipe):\n version = '1.0.2h'\n url = 'https://www.openssl.org/source/openssl-{version}.tar.gz'\n\n def should_build(self, arch):\n return ... | diff --git a/pythonforandroid/recipes/openssl/__init__.py b/pythonforandroid/recipes/openssl/__init__.py
index 5be1cdd445..355e6f539d 100644
--- a/pythonforandroid/recipes/openssl/__init__.py
+++ b/pythonforandroid/recipes/openssl/__init__.py
@@ -36,6 +36,8 @@ def select_build_arch(self, arch):
return 'android-armv7'
if 'arm' in aname:
return 'android'
+ if 'x86' in aname:
+ return 'android-x86'
return 'linux-armv4'
def build_arch(self, arch):
|
WordPress__openverse-api-556 | Sound category mismatch
## Description
<!-- Concisely describe the bug. -->
The `sound` category for audio doesn't work on the front-end.
There seems to be a mismatch between the `audio` category of `sound_effect`:
If you go to `https://api.openverse.engineering/v1/audio/?q=cat&categories=sound`, you will get a 400 response:
```
HTTP 400 Bad Request
Allow: GET, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept
{
"detail": {
"categories": [
"Invalid category: sound. Available options: {'music', 'audiobook', 'podcast', 'news', 'sound_effect'}"
]
}
}
```
However, if you access a single audio result, you will see that it returns `sound` for the category:
https://api.openverse.engineering/v1/audio/1bb94f50-009c-4371-a605-dd289562a9f5/
## Expectation
<!-- Concisely describe what you expected to happen. -->
Both the query category parameter and the result category property for sound effect should have the same name.
## Additional context
The catalog sets the category as `sound`, so that is the value we get from the database:
https://github.com/WordPress/openverse-catalog/blob/cb19f839e96de7ae1a55e8b7dc82a7d2bf5588e8/openverse_catalog/dags/providers/provider_api_scripts/freesound.py#L33-L34
## Resolution
<!-- Replace the [ ] with [x] to check the box. -->
- [ ] 🙋 I would be interested in resolving this bug.
| [
{
"content": "from catalog.api.controllers.search_controller import get_sources\nfrom catalog.api.docs.media_docs import fields_to_md\nfrom catalog.api.models import AudioReport\nfrom catalog.api.models.audio import Audio\nfrom catalog.api.serializers.media_serializers import (\n MediaSearchRequestSerializer... | [
{
"content": "from catalog.api.controllers.search_controller import get_sources\nfrom catalog.api.docs.media_docs import fields_to_md\nfrom catalog.api.models import AudioReport\nfrom catalog.api.models.audio import Audio\nfrom catalog.api.serializers.media_serializers import (\n MediaSearchRequestSerializer... | diff --git a/api/catalog/api/serializers/audio_serializers.py b/api/catalog/api/serializers/audio_serializers.py
index 26525c079..de8b0c25b 100644
--- a/api/catalog/api/serializers/audio_serializers.py
+++ b/api/catalog/api/serializers/audio_serializers.py
@@ -88,6 +88,7 @@ def validate_categories(value):
"podcast",
"news",
"audiobook",
+ "pronunciation",
}
_validate_enum("category", valid_categories, value)
return value.lower()
|
google__clusterfuzz-1169 | _update_issue_metadata in progression task fails on OSS-Fuzz
This is due to use of untrusted runner on OSS-Fuzz. Is this even needed there, currently causing exceptions. Should this be a simple bailout ?
| [
{
"content": "# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicab... | [
{
"content": "# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicab... | diff --git a/src/python/bot/tasks/progression_task.py b/src/python/bot/tasks/progression_task.py
index ad61f2d903..0a93c13b37 100644
--- a/src/python/bot/tasks/progression_task.py
+++ b/src/python/bot/tasks/progression_task.py
@@ -140,6 +140,10 @@ def _check_fixed_for_custom_binary(testcase, job_type, testcase_file_path):
def _update_issue_metadata(testcase):
"""Update issue metadata."""
+ if environment.is_trusted_host():
+ # Not applicable.
+ return
+
fuzz_target = testcase.get_fuzz_target()
if not fuzz_target:
return
|
AUTOMATIC1111__stable-diffusion-webui-1326 | New samplers are not showing up
I just updated my version to try out the new samplers but they are not showing up. I deleted repositories/k-diffusion as a test but they still dont show up.
Someone on reddit mentioned to do "source venv/bin/activate/" and then to do a pip uninstall k-diffusion, but I have no idea what it means.
How can I get the new samplers to show up in the UI?
Edit: They dont show up in the img2img Tab
| [
{
"content": "from collections import namedtuple\r\nimport numpy as np\r\nimport torch\r\nimport tqdm\r\nfrom PIL import Image\r\nimport inspect\r\n\r\nimport k_diffusion.sampling\r\nimport ldm.models.diffusion.ddim\r\nimport ldm.models.diffusion.plms\r\nfrom modules import prompt_parser\r\n\r\nfrom modules.sha... | [
{
"content": "from collections import namedtuple\r\nimport numpy as np\r\nimport torch\r\nimport tqdm\r\nfrom PIL import Image\r\nimport inspect\r\nfrom modules.paths import paths\r\nsys.path.insert(0, paths[\"k_diffusion\"])\r\nimport k_diffusion.sampling\r\nimport ldm.models.diffusion.ddim\r\nimport ldm.model... | diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py
index 5642b870cdc..5e60e494b90 100644
--- a/modules/sd_samplers.py
+++ b/modules/sd_samplers.py
@@ -4,7 +4,8 @@
import tqdm
from PIL import Image
import inspect
-
+from modules.paths import paths
+sys.path.insert(0, paths["k_diffusion"])
import k_diffusion.sampling
import ldm.models.diffusion.ddim
import ldm.models.diffusion.plms
|
jazzband__pip-tools-488 | Providing a source file which does not have an extension writes to a .txt file in current dir
If you happen to use an extensionless filename as a source of requirements in, pip-compile will deduce the wrong output filename and generate a ".txt" file (relative file, thus at current working dir).
##### Environment Versions
1. OS Type Should be cross platform, but observed on Windows
1. Python version: `$ python -V` 2.7.8
1. pip version: `$ pip --version` 9.0.1
1. pip-tools version: `$ pip-compile --version` 1.8.1rc2
##### Steps to replicate
1. echo "request" > no_extension
2. pip-compile no_extension
##### Expected result
We should error out because it is difficult to deduce a new name if there is no extension to remove.
##### Actual result
a .txt file is generated at the current directory.
| [
{
"content": "# coding: utf-8\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nimport optparse\nimport os\nimport sys\nimport tempfile\n\nimport pip\nfrom pip.req import InstallRequirement, parse_requirements\n\nfrom .. import click\nfrom ..except... | [
{
"content": "# coding: utf-8\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nimport optparse\nimport os\nimport sys\nimport tempfile\n\nimport pip\nfrom pip.req import InstallRequirement, parse_requirements\n\nfrom .. import click\nfrom ..except... | diff --git a/CHANGELOG.md b/CHANGELOG.md
index 902b16430..8522af6f4 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,6 @@
# 1.9.0 (Unreleased)
+- Fixed the default output file name when the source file has no extension ([#470](https://github.com/jazzband/pip-tools/issues/470))
- Added a `--max-rounds` argument to the pip-compile command to allow for solving large requirement sets ([#472](https://github.com/jazzband/pip-tools/pull/472))
- Exclude unsafe packages' dependencies when `--allow-unsafe` is not in use (#445)
- Exclude irrelevant pip constraints ([#471](https://github.com/jazzband/pip-tools/pull/471))
diff --git a/piptools/scripts/compile.py b/piptools/scripts/compile.py
index d88a72963..850e4a503 100755
--- a/piptools/scripts/compile.py
+++ b/piptools/scripts/compile.py
@@ -92,7 +92,7 @@ def cli(verbose, dry_run, pre, rebuild, find_links, index_url, extra_index_url,
if output_file:
dst_file = output_file
else:
- base_name, _, _ = src_files[0].rpartition('.')
+ base_name = src_files[0].rsplit('.', 1)[0]
dst_file = base_name + '.txt'
if upgrade and upgrade_packages:
diff --git a/tests/test_cli.py b/tests/test_cli.py
index 36178fe71..0976ecfda 100644
--- a/tests/test_cli.py
+++ b/tests/test_cli.py
@@ -219,3 +219,21 @@ def test_editable_package(tmpdir):
assert out.exit_code == 0
assert fake_package_dir in out.output
assert 'six==1.10.0' in out.output
+
+
+def test_input_file_without_extension(tmpdir):
+ """
+ piptools can compile a file without an extension,
+ and add .txt as the defaut output file extension.
+ """
+ runner = CliRunner()
+ with runner.isolated_filesystem():
+ with open('requirements', 'w') as req_in:
+ req_in.write('six==1.10.0')
+
+ out = runner.invoke(cli, ['-n', 'requirements'])
+
+ print(out.output)
+ assert out.exit_code == 0
+ assert '--output-file requirements.txt' in out.output
+ assert 'six==1.10.0' in out.output
|
microsoft__playwright-python-145 | DEBUG outputs won't get forwarded
| [
{
"content": "# Copyright (c) Microsoft Corporation.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by ap... | [
{
"content": "# Copyright (c) Microsoft Corporation.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by ap... | diff --git a/playwright/main.py b/playwright/main.py
index 6a8451c0a..4726ef4bf 100644
--- a/playwright/main.py
+++ b/playwright/main.py
@@ -49,7 +49,7 @@ async def run_driver_async() -> Connection:
str(driver_executable),
stdin=asyncio.subprocess.PIPE,
stdout=asyncio.subprocess.PIPE,
- stderr=asyncio.subprocess.PIPE,
+ stderr=sys.stderr,
limit=32768,
)
assert proc.stdout
|
urllib3__urllib3-2424 | Remove integration tests for Botocore with Python 2.7
Botocore dropped support for Python 2.7 in July so we don't have to do integration testing with Botocore+Python 2.7 on the 1.26.x branch any longer.
Reference: https://github.com/urllib3/urllib3/pull/2422
| [
{
"content": "import os\nimport shutil\nimport subprocess\n\nimport nox\n\nSOURCE_FILES = [\n \"docs/\",\n \"dummyserver/\",\n \"src/\",\n \"test/\",\n \"noxfile.py\",\n \"setup.py\",\n]\n\n\ndef tests_impl(session, extras=\"socks,secure,brotli\"):\n # Install deps and the package itself.\n... | [
{
"content": "import os\nimport shutil\nimport subprocess\n\nimport nox\n\nSOURCE_FILES = [\n \"docs/\",\n \"dummyserver/\",\n \"src/\",\n \"test/\",\n \"noxfile.py\",\n \"setup.py\",\n]\n\n\ndef tests_impl(session, extras=\"socks,secure,brotli\"):\n # Install deps and the package itself.\n... | diff --git a/.github/workflows/integration.yml b/.github/workflows/integration.yml
index e421d8f3af..94c447d36f 100644
--- a/.github/workflows/integration.yml
+++ b/.github/workflows/integration.yml
@@ -12,6 +12,11 @@ jobs:
matrix:
python-version: [2.7, 3.9]
downstream: [botocore, requests]
+ exclude:
+ # excludes botocore in Python 2.7
+ - python-version: 2.7
+ downstream: botocore
+
runs-on: ubuntu-18.04
steps:
diff --git a/noxfile.py b/noxfile.py
index f317e3b97e..7b27a97cd1 100644
--- a/noxfile.py
+++ b/noxfile.py
@@ -88,7 +88,7 @@ def git_clone(session, git_url):
session.run("git", "clone", "--depth", "1", git_url, external=True)
-@nox.session(python=["2.7", "3.9"])
+@nox.session(python=["3.9"])
def downstream_botocore(session):
root = os.getcwd()
tmp_dir = session.create_tmp()
|
flairNLP__flair-300 | Update torch version to 1.0.0
torch version 1.0.0 is available. Use torch version 1.0.0 in Flair.
| [
{
"content": "from setuptools import setup, find_packages\n\nsetup(\n name='flair',\n version='0.3.2',\n description='A very simple framework for state-of-the-art NLP',\n long_description=open(\"README.md\", encoding='utf-8').read(),\n long_description_content_type=\"text/markdown\",\n author=... | [
{
"content": "from setuptools import setup, find_packages\n\nsetup(\n name='flair',\n version='0.3.2',\n description='A very simple framework for state-of-the-art NLP',\n long_description=open(\"README.md\", encoding='utf-8').read(),\n long_description_content_type=\"text/markdown\",\n author=... | diff --git a/requirements.txt b/requirements.txt
index 71da5a8102..e1aef4f572 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,4 +1,4 @@
-torch==0.4.1
+torch==1.0.0
gensim==3.4.0
typing==3.6.4
pytest==3.6.4
diff --git a/setup.py b/setup.py
index 90749487f3..4234e19ade 100644
--- a/setup.py
+++ b/setup.py
@@ -12,7 +12,7 @@
packages=find_packages(exclude='test'), # same as name
license='MIT',
install_requires=[
- 'torch==0.4.1',
+ 'torch==1.0.0',
'gensim==3.4.0',
'typing==3.6.4',
'tqdm==4.26.0',
|
Kaggle__docker-python-1326 | NameError: name 'io' is not defined
## 🐛 Bug
I am trying to run my scripts on GPU notebook, and I keep getting the following error.
```shell
Traceback (most recent call last):
File "/opt/conda/lib/python3.10/site-packages/wandb/sdk/wandb_init.py", line 1172, in init
getcaller()
File "/opt/conda/lib/python3.10/site-packages/wandb/sdk/wandb_init.py", line 846, in getcaller
src, line, func, stack = logger.findCaller(stack_info=True)
File "/root/.local/lib/python3.10/site-packages/log.py", line 42, in findCaller
sio = io.StringIO()
NameError: name 'io' is not defined
```
In addition, I found that there is no import `io` package in [this](https://github.com/Kaggle/docker-python/blob/main/patches/log.py) code.
### To Reproduce
### Expected behavior
### Additional context
<!-- Add any other context about the problem here. -->
| [
{
"content": "import logging\nimport os\n\nimport google.auth\n\n\n_LOG_TO_FILE_ENV = os.getenv(\"KAGGLE_LOG_TO_FILE\")\n\n\nclass _LogFormatter(logging.Formatter):\n \"\"\"A logging formatter which truncates long messages.\"\"\"\n\n _MAX_LOG_LENGTH = 10000 # Be generous, not to truncate long backtraces.... | [
{
"content": "import io\nimport logging\nimport os\n\nimport google.auth\n\n\n_LOG_TO_FILE_ENV = os.getenv(\"KAGGLE_LOG_TO_FILE\")\n\n\nclass _LogFormatter(logging.Formatter):\n \"\"\"A logging formatter which truncates long messages.\"\"\"\n\n _MAX_LOG_LENGTH = 10000 # Be generous, not to truncate long ... | diff --git a/patches/log.py b/patches/log.py
index 2da5993c..59a07c8c 100644
--- a/patches/log.py
+++ b/patches/log.py
@@ -1,3 +1,4 @@
+import io
import logging
import os
@@ -129,4 +130,4 @@ def _static_init():
logging.basicConfig(level=logging.INFO, handlers=[handler])
Log._initialized = True
-Log._static_init()
\ No newline at end of file
+Log._static_init()
|
readthedocs__readthedocs.org-4676 | Improve Intro and Getting Started documentation
I think the [introduction](https://docs.readthedocs.io/en/latest/) and [getting started guide](https://docs.readthedocs.io/en/latest/getting_started.html) could use a few improvements to make it easier for brand new users who may not already know about Sphinx/MkDocs/Markdown/RestructuredText and are just looking for a guide on how to write some docs.
I also think our introduction could stand some improvements to point users in the right direction. We have a lot of docs, but a few layout and explanation improvements will help users find the right section for them.
Here are some specific goals and improvements:
- Make it easier to start a brand new docs project
* Have a getting started guide for Sphinx
* Have a getting started guide for Sphinx with commonmark
* Have a getting started guide for MkDocs
* Explain the why between the above technologies
- Improve the intro paragraphs ("Read the Docs hosts documentation for...") on the index page to explain RTD's value proposition and why somebody should choose Read the Docs.
- Full sentence/paragraph descriptions on different sections (eg. User documentation) rather than just a big toctree.
| [
{
"content": "# -*- coding: utf-8 -*-\n\nfrom __future__ import division, print_function, unicode_literals\n\nimport os\nimport sys\n\nimport sphinx_rtd_theme\nfrom recommonmark.parser import CommonMarkParser\n\nsys.path.insert(0, os.path.abspath('..'))\nsys.path.append(os.path.dirname(__file__))\nos.environ.se... | [
{
"content": "# -*- coding: utf-8 -*-\n\nfrom __future__ import division, print_function, unicode_literals\n\nimport os\nimport sys\n\nimport sphinx_rtd_theme\nfrom recommonmark.parser import CommonMarkParser\n\nsys.path.insert(0, os.path.abspath('..'))\nsys.path.append(os.path.dirname(__file__))\nos.environ.se... | diff --git a/docs/_static/images/first-steps/import-a-repository.png b/docs/_static/images/first-steps/import-a-repository.png
new file mode 100644
index 00000000000..252f69834df
Binary files /dev/null and b/docs/_static/images/first-steps/import-a-repository.png differ
diff --git a/docs/_static/images/first-steps/mkdocs-hello-world.png b/docs/_static/images/first-steps/mkdocs-hello-world.png
new file mode 100644
index 00000000000..0c1bde0bf17
Binary files /dev/null and b/docs/_static/images/first-steps/mkdocs-hello-world.png differ
diff --git a/docs/_static/images/first-steps/sphinx-hello-world.png b/docs/_static/images/first-steps/sphinx-hello-world.png
new file mode 100644
index 00000000000..cc7639664cf
Binary files /dev/null and b/docs/_static/images/first-steps/sphinx-hello-world.png differ
diff --git a/docs/conf.py b/docs/conf.py
index 71d66d0a881..1cb99137f38 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -71,7 +71,7 @@
gettext_compact = False
html_theme = 'sphinx_rtd_theme'
-# html_static_path = ['_static']
+html_static_path = ['_static']
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_logo = 'img/logo.svg'
html_theme_options = {
diff --git a/docs/connected-accounts.rst b/docs/connected-accounts.rst
new file mode 100644
index 00000000000..3db7ed6ca51
--- /dev/null
+++ b/docs/connected-accounts.rst
@@ -0,0 +1,20 @@
+Connecting Your Account
+-----------------------
+
+If you are going to import repositories from GitHub, Bitbucket, or GitLab,
+you should connect your Read the Docs account to your repository host first.
+Connecting your account allows for:
+
+* Easier importing of your repositories
+* Automatically configure your repository :doc:`webhooks`
+ which allow Read the Docs to build your docs on every change to your repository
+* Log into Read the Docs with your GitHub, Bitbucket, or GitLab credentials
+
+If you signed up or logged in to Read the Docs with your GitHub, Bitbucket, or GitLab
+credentials, you're all done. Your account is connected.
+
+To connect your unconnected account, go to your *Settings* dashboard
+and select `Connected Services <https://readthedocs.org/accounts/social/connections/>`_.
+From here, you'll be able to connect to your GitHub, Bitbucket or GitLab
+account. This process will ask you to authorize a connection to Read the Docs,
+that allows us to read information about and clone your repositories.
diff --git a/docs/getting_started.rst b/docs/getting_started.rst
deleted file mode 100644
index 3eb2330a9bd..00000000000
--- a/docs/getting_started.rst
+++ /dev/null
@@ -1,151 +0,0 @@
-Getting Started
-===============
-
-This document will show you how to get up and running with Read the Docs.
-You will have your docs imported on Read the Docs in 5 minutes,
-displayed beautifully for the world.
-
-If you are already using Sphinx or Markdown for your docs, skip ahead to
-:ref:`getting_started:Import Your Docs`.
-
-Write Your Docs
----------------
-
-You have two options for formatting your documentation:
-
-* `In reStructuredText`_
-* `In Markdown`_
-
-In reStructuredText
-~~~~~~~~~~~~~~~~~~~
-
-There is `a screencast`_ that will help you get started if you prefer.
-
-Sphinx_ is a tool that makes it easy to create beautiful documentation.
-Assuming you have Python_ already, `install Sphinx`_::
-
- $ pip install sphinx sphinx-autobuild
-
-Create a directory inside your project to hold your docs::
-
- $ cd /path/to/project
- $ mkdir docs
-
-Run ``sphinx-quickstart`` in there::
-
- $ cd docs
- $ sphinx-quickstart
-
-This quick start will walk you through creating the basic configuration; in most cases, you
-can just accept the defaults. When it's done, you'll have an ``index.rst``, a
-``conf.py`` and some other files. Add these to revision control.
-
-Now, edit your ``index.rst`` and add some information about your project.
-Include as much detail as you like (refer to the reStructuredText_ syntax
-or `this template`_ if you need help). Build them to see how they look::
-
- $ make html
-
-.. note:: You can use ``sphinx-autobuild`` to auto-reload your docs. Run ``sphinx-autobuild . _build/html`` instead.
-
-Edit your files and rebuild until you like what you see, then commit your changes and push to your public repository.
-Once you have Sphinx documentation in a public repository, you can start using Read the Docs.
-
-In Markdown
-~~~~~~~~~~~
-
-You can use Markdown and reStructuredText in the same Sphinx project.
-We support this natively on Read the Docs, and you can do it locally::
-
- $ pip install recommonmark
-
-Then in your ``conf.py``:
-
-.. code-block:: python
-
- from recommonmark.parser import CommonMarkParser
-
- source_parsers = {
- '.md': CommonMarkParser,
- }
-
- source_suffix = ['.rst', '.md']
-
-.. note:: Markdown doesn't support a lot of the features of Sphinx,
- like inline markup and directives. However, it works for
- basic prose content. reStructuredText is the preferred
- format for technical documentation, please read `this blog post`_
- for motivation.
-
-.. _this blog post: http://ericholscher.com/blog/2016/mar/15/dont-use-markdown-for-technical-docs/
-
-Sign Up and Connect an External Account
----------------------------------------
-
-If you are going to import a repository from GitHub, Bitbucket or GitLab, you should
-connect your account to your provider first. Connecting your account allows for
-easier importing and enables Read the Docs to configure your repository webhooks
-automatically.
-
-To connect your account, go to your *Settings* dashboard and select *Connected
-Services*. From here, you'll be able to connect to your GitHub, Bitbucket or GitLab
-account. This process will ask you to authorize a connection to Read the Docs,
-that allows us to read information about and clone your repositories.
-
-Import Your Docs
-----------------
-
-To import a repository, visit your dashboard_ and click Import_.
-
-If you have a connected account, you will see a list of your repositories that
-we are able to import. To import one of these projects, just click the import
-icon next to the repository you'd like to import. This will bring up a form that
-is already filled with your project's information. Feel free to edit any of
-these properties, and the click **Next** to build your documentation.
-
-Manually Import Your Docs
-~~~~~~~~~~~~~~~~~~~~~~~~~
-
-If you do not have a connected account, you will need select **Import Manually**
-and enter the information for your repository yourself. You will also need to
-manually configure the webhook for your repository as well. When importing your
-project, you will be asked for the repository URL, along with some other
-information for you new project. The URL is normally the URL or path name you'd
-use to checkout, clone, or branch your repository. Some examples:
-
-* Git: ``http://github.com/ericholscher/django-kong.git``
-* Mercurial: ``https://bitbucket.org/ianb/pip``
-* Subversion: ``http://varnish-cache.org/svn/trunk``
-* Bazaar: ``lp:pasta``
-
-Add an optional homepage URL and some tags, and then click **Next**.
-
-Once your project is created, you'll need to manually configure the repository
-webhook if you would like to have new changesets to trigger builds for your
-project on Read the Docs. Go to your project's **Integrations** page to
-configure a new webhook, or see :ref:`our steps for webhook creation <webhooks:Webhook Creation>`
-for more information on this process.
-
-Within a few seconds your code will automatically be fetched from your public repository,
-and the documentation will be built.
-Check out our :doc:`builds` page to learn more about how we build your docs,
-and to troubleshoot any issues that arise.
-
-Read the Docs will host multiple versions of your code. You can read more about
-how to use this well on our :doc:`versions` page.
-
-If you have any more trouble, don't hesitate to reach out to us. The :doc:`support` page has more information on getting in touch.
-
-.. _a screencast: https://youtu.be/oJsUvBQyHBs
-.. _Python: https://www.python.org/
-.. _Sphinx: http://sphinx-doc.org/
-.. _Markdown: http://daringfireball.net/projects/markdown/syntax
-.. _Mkdocs: http://www.mkdocs.org/
-.. _install Sphinx: http://sphinx-doc.org/latest/install.html
-.. _install Mkdocs: http://www.mkdocs.org/#installation
-.. _reStructuredText: http://sphinx-doc.org/rest.html
-.. _this template: https://www.writethedocs.org/guide/writing/beginners-guide-to-docs/#id1
-.. _Sign up: https://readthedocs.org/accounts/signup
-.. _log in: https://readthedocs.org/accounts/login
-.. _dashboard: https://readthedocs.org/dashboard
-.. _Import: https://readthedocs.org/dashboard/import
diff --git a/docs/index.rst b/docs/index.rst
index 4258087a11a..536ac4c516d 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -1,34 +1,53 @@
-Welcome to Read The Docs
-========================
+Read the Docs: Documentation Simplified
+=======================================
-`Read the Docs`_ hosts documentation for the open source community.
-We support Sphinx_ docs written with reStructuredText_ and `CommonMark`_.
-We pull your code from your Subversion_, Bazaar_, Git_, and Mercurial_ repositories.
-Then we build documentation and host it for you.
+`Read the Docs`_ simplifies software documentation
+by automating building, versioning, and hosting of your docs for you.
Think of it as *Continuous Documentation*.
-The code is open source, and `available on GitHub`_.
+Never out of sync
+ Whenever you push code to your favorite version control system,
+ whether that is Git, Mercurial, Bazaar, or Subversion,
+ Read the Docs will automatically build your docs
+ so your code and documentation are always up-to-date.
+
+Multiple versions
+ Read the Docs can host and build multiple versions of your docs
+ so having a 1.0 version of your docs and a 2.0 version
+ of your docs is as easy as having a separate branch or tag in your version control system.
+
+Free and open source
+ Read the Docs is free and open source and hosts documentation
+ for nearly 100,000 large and small open source projects
+ in almost every human and computer language.
.. _Read the docs: http://readthedocs.org/
-.. _Sphinx: http://sphinx.pocoo.org/
-.. _reStructuredText: http://sphinx.pocoo.org/rest.html
-.. _CommonMark: http://commonmark.org/
-.. _Subversion: http://subversion.tigris.org/
-.. _Bazaar: http://bazaar.canonical.com/
-.. _Git: http://git-scm.com/
-.. _Mercurial: https://www.mercurial-scm.org/
-.. _available on GitHub: http://github.com/rtfd/readthedocs.org
-The main documentation for the site is organized into a couple sections:
-* :ref:`user-docs`
-* :ref:`feature-docs`
-* :ref:`about-docs`
+First steps
+-----------
+
+Are you new to software documentation
+or are you looking to use your existing docs with Read the Docs?
+Learn about documentation authoring tools such as Sphinx and MkDocs
+to help you create fantastic documentation for your project.
+
+* **Getting started**:
+ :doc:`With Sphinx <intro/getting-started-with-sphinx>` |
+ :doc:`With MkDocs <intro/getting-started-with-mkdocs>`
+
+* **Importing your existing documentation**:
+ :doc:`Import guide <intro/import-guide>`
+
+.. toctree::
+ :maxdepth: 2
+ :hidden:
+ :caption: First Steps
-Information about development is also available:
+ intro/getting-started-with-sphinx
+ intro/getting-started-with-mkdocs
+ intro/import-guide
-* :ref:`dev-docs`
-* :ref:`design-docs`
.. _user-docs:
@@ -36,10 +55,10 @@ Information about development is also available:
:maxdepth: 2
:caption: User Documentation
- getting_started
versions
builds
features
+ connected-accounts
support
faq
yaml-config
@@ -47,25 +66,6 @@ Information about development is also available:
api/index
embed
-.. _about-docs:
-
-.. toctree::
- :maxdepth: 2
- :caption: About Read the Docs
-
- contribute
- roadmap
- team
- gsoc
- code-of-conduct
- privacy-policy
- advertising/index
- sponsors
- open-source-philosophy
- story
- abandoned-projects
- dmca/index
-
.. _feature-docs:
.. toctree::
@@ -87,11 +87,29 @@ Information about development is also available:
automatic-redirects
features/*
+.. _about-docs:
+
+.. toctree::
+ :maxdepth: 1
+ :caption: About Read the Docs
+
+ contribute
+ roadmap
+ team
+ gsoc
+ code-of-conduct
+ privacy-policy
+ advertising/index
+ sponsors
+ open-source-philosophy
+ story
+ abandoned-projects
+ dmca/index
.. _dev-docs:
.. toctree::
- :maxdepth: 2
+ :maxdepth: 1
:caption: Developer Documentation
changelog
@@ -107,6 +125,8 @@ Information about development is also available:
i18n
issue-labels
security
+ design
+ RTD Theme <https://sphinx-rtd-theme.readthedocs.io/en/latest/>
.. _business-docs:
@@ -123,13 +143,3 @@ Information about development is also available:
:caption: Custom Install Documentation
custom_installs/index
-
-.. _design-docs:
-
-.. toctree::
- :maxdepth: 2
- :caption: Designer Documentation
-
- design
- Theme <https://sphinx-rtd-theme.readthedocs.io/en/latest/>
-
diff --git a/docs/install.rst b/docs/install.rst
index 68c335882db..5aa2cbd1125 100644
--- a/docs/install.rst
+++ b/docs/install.rst
@@ -151,4 +151,4 @@ Importing your docs
One of the goals of readthedocs.org is to make it easy for any open source developer to get high quality hosted docs with great visibility!
Simply provide us with the clone URL to your repo, we'll pull your code, extract your docs, and build them!
We make available a post-commit webhook that can be configured to update the docs whenever you commit to your repo.
-See our :doc:`getting_started` page to learn more.
+See :doc:`/intro/import-guide` to learn more.
diff --git a/docs/intro/getting-started-with-mkdocs.rst b/docs/intro/getting-started-with-mkdocs.rst
new file mode 100644
index 00000000000..04b09f945f7
--- /dev/null
+++ b/docs/intro/getting-started-with-mkdocs.rst
@@ -0,0 +1,73 @@
+Getting Started with MkDocs
+===========================
+
+MkDocs is a documentation generator that focuses on speed and simplicity.
+It has many great features including:
+
+* Preview your documentation as you write it
+* Easy customization with themes and extensions
+* Writing documentation with Markdown
+
+.. note::
+
+ MkDocs is a great choice for building technical documentation.
+ However, Read the Docs also supports :doc:`Sphinx </intro/getting-started-with-sphinx>`,
+ another tool for writing and building documentation.
+
+
+Quick start
+-----------
+
+Assuming you have Python already, `install MkDocs`_:
+
+.. sourcecode:: bash
+
+ $ pip install mkdocs
+
+Setup your MkDocs project:
+
+.. sourcecode:: bash
+
+ $ mkdocs new .
+
+This command creates ``mkdocs.yml`` which holds your MkDocs configuration,
+and ``docs/index.md`` which is the Markdown file
+that is the entry point for your documentation.
+
+You can edit this ``index.md`` file to add more details about your project
+and then you can build your documentation:
+
+.. sourcecode:: bash
+
+ $ mkdocs serve
+
+This command builds your Markdown files into HTML
+and starts a development server to browse your documentation.
+Open up http://127.0.0.1:8000/ in your web browser to see your documentation.
+You can make changes to your Markdown files and your docs will automatically rebuild.
+
+.. figure:: ../_static/images/first-steps/mkdocs-hello-world.png
+ :align: right
+ :figwidth: 300px
+ :target: ../_static/images/first-steps/mkdocs-hello-world.png
+
+ Your MkDocs project is built
+
+Once you have your documentation in a public repository such as GitHub, Bitbucket, or GitLab,
+you can start using Read the Docs by :doc:`importing your docs </intro/import-guide>`.
+
+.. _install MkDocs: https://www.mkdocs.org/#installation
+
+
+External resources
+------------------
+
+Here are some external resources to help you learn more about MkDocs.
+
+* `MkDocs documentation`_
+* `Markdown syntax guide`_
+* `Writing your docs with MkDocs`_
+
+.. _MkDocs documentation: https://www.mkdocs.org/
+.. _Markdown syntax guide: http://daringfireball.net/projects/markdown/syntax
+.. _Writing your docs with MkDocs: https://www.mkdocs.org/user-guide/writing-your-docs/
diff --git a/docs/intro/getting-started-with-sphinx.rst b/docs/intro/getting-started-with-sphinx.rst
new file mode 100644
index 00000000000..782cdd39b8a
--- /dev/null
+++ b/docs/intro/getting-started-with-sphinx.rst
@@ -0,0 +1,127 @@
+Getting Started with Sphinx
+===========================
+
+Sphinx is a powerful documentation generator that
+has many great features for writing technical documentation including:
+
+* Generate web pages, printable PDFs, documents for e-readers (ePub),
+ and more all from the same sources
+* You can use reStructuredText or :ref:`Markdown <intro/getting-started-with-sphinx:Using Markdown with Sphinx>`
+ to write documentation
+* An extensive system of cross-referencing code and documentation
+* Syntax highlighted code samples
+* A vibrant ecosystem of first and third-party extensions_
+
+.. _extensions: http://www.sphinx-doc.org/en/master/ext/builtins.html#builtin-sphinx-extensions
+
+
+Quick start video
+-----------------
+
+This screencast will help you get started or you can
+:ref:`read our guide below <intro/getting-started-with-sphinx:Quick start>`.
+
+.. raw:: html
+
+ <div style="text-align: center; margin-bottom: 2em;">
+ <iframe width="100%" height="350" src="https://www.youtube.com/embed/oJsUvBQyHBs?rel=0" frameborder="0" allow="autoplay; encrypted-media" allowfullscreen></iframe>
+ </div>
+
+
+Quick start
+-----------
+
+Assuming you have Python already, `install Sphinx`_:
+
+.. sourcecode:: bash
+
+ $ pip install sphinx
+
+Create a directory inside your project to hold your docs:
+
+.. sourcecode:: bash
+
+ $ cd /path/to/project
+ $ mkdir docs
+
+Run ``sphinx-quickstart`` in there:
+
+.. sourcecode:: bash
+
+ $ cd docs
+ $ sphinx-quickstart
+
+This quick start will walk you through creating the basic configuration; in most cases, you
+can just accept the defaults. When it's done, you'll have an ``index.rst``, a
+``conf.py`` and some other files. Add these to revision control.
+
+Now, edit your ``index.rst`` and add some information about your project.
+Include as much detail as you like (refer to the reStructuredText_ syntax
+or `this template`_ if you need help). Build them to see how they look:
+
+.. sourcecode:: bash
+
+ $ make html
+
+Your ``index.rst`` has been built into ``index.html``
+in your documentation output directory (typically ``_build/html/index.html``).
+Open this file in your web browser to see your docs.
+
+.. figure:: ../_static/images/first-steps/sphinx-hello-world.png
+ :align: right
+ :figwidth: 300px
+ :target: ../_static/images/first-steps/sphinx-hello-world.png
+
+ Your Sphinx project is built
+
+Edit your files and rebuild until you like what you see, then commit your changes and push to your public repository.
+Once you have Sphinx documentation in a public repository, you can start using Read the Docs
+by :doc:`importing your docs </intro/import-guide>`.
+
+.. _install Sphinx: http://sphinx-doc.org/latest/install.html
+.. _reStructuredText: http://sphinx-doc.org/rest.html
+.. _this template: https://www.writethedocs.org/guide/writing/beginners-guide-to-docs/#id1
+
+Using Markdown with Sphinx
+--------------------------
+
+You can use Markdown and reStructuredText in the same Sphinx project.
+We support this natively on Read the Docs, and you can do it locally:
+
+.. sourcecode:: bash
+
+ $ pip install recommonmark
+
+Then in your ``conf.py``:
+
+.. code-block:: python
+
+ from recommonmark.parser import CommonMarkParser
+
+ source_parsers = {
+ '.md': CommonMarkParser,
+ }
+
+ source_suffix = ['.rst', '.md']
+
+.. warning:: Markdown doesn't support a lot of the features of Sphinx,
+ like inline markup and directives. However, it works for
+ basic prose content. reStructuredText is the preferred
+ format for technical documentation, please read `this blog post`_
+ for motivation.
+
+.. _this blog post: http://ericholscher.com/blog/2016/mar/15/dont-use-markdown-for-technical-docs/
+
+
+External resources
+------------------
+
+Here are some external resources to help you learn more about Sphinx.
+
+* `Sphinx documentation`_
+* `RestructuredText primer`_
+* `An introduction to Sphinx and Read the Docs for technical writers`_
+
+.. _Sphinx documentation: http://www.sphinx-doc.org/
+.. _RestructuredText primer: http://www.sphinx-doc.org/en/master/usage/restructuredtext/basics.html
+.. _An introduction to Sphinx and Read the Docs for technical writers: http://ericholscher.com/blog/2016/jul/1/sphinx-and-rtd-for-writers/
diff --git a/docs/intro/import-guide.rst b/docs/intro/import-guide.rst
new file mode 100644
index 00000000000..b44cdfe4d67
--- /dev/null
+++ b/docs/intro/import-guide.rst
@@ -0,0 +1,68 @@
+Importing Your Documentation
+============================
+
+To import a documentation repository, visit your `Read the Docs dashboard`_ and click Import_.
+
+If you have :doc:`connected your Read the Docs account <../connected-accounts>` to GitHub, Bitbucket, or GitLab,
+you will see a list of your repositories that we are able to import.
+To import one of these projects, just click the import
+icon next to the repository you'd like to import. This will bring up a form that
+is already filled with your project's information. Feel free to edit any of
+these properties, and the click **Next** to
+:ref:`build your documentation <intro/import-guide:Building your documentation>`.
+
+.. _Read the Docs dashboard: https://readthedocs.org/dashboard
+.. _Import: https://readthedocs.org/dashboard/import
+
+
+.. figure:: ../_static/images/first-steps/import-a-repository.png
+ :align: right
+ :figwidth: 300px
+ :target: ../_static/images/first-steps/import-a-repository.png
+
+ Importing a repository
+
+
+Manually import your docs
+-------------------------
+
+If you do not have a connected account, you will need select **Import Manually**
+and enter the information for your repository yourself. You will also need to
+manually configure the webhook for your repository as well. When importing your
+project, you will be asked for the repository URL, along with some other
+information for you new project. The URL is normally the URL or path name you'd
+use to checkout, clone, or branch your repository. Some examples:
+
+* Git: ``https://github.com/ericholscher/django-kong.git``
+* Mercurial: ``https://bitbucket.org/ianb/pip``
+* Subversion: ``http://varnish-cache.org/svn/trunk``
+* Bazaar: ``lp:pasta``
+
+Add an optional homepage URL and some tags, and then click **Next**.
+
+Once your project is created, you'll need to manually configure the repository
+webhook if you would like to have new changes trigger builds for your
+project on Read the Docs. Go to your project's **Integrations** page to
+configure a new webhook, or see :ref:`our steps for webhook creation <webhooks:Webhook Creation>`
+for more information on this process.
+
+
+Building your documentation
+---------------------------
+
+Within a few seconds of completing the import process,
+your code will automatically be fetched from your public repository,
+and the documentation will be built.
+Check out our :doc:`/builds` page to learn more about how Read the Docs builds your docs,
+and to troubleshoot any issues that arise.
+
+Some documentation projects require additional configuration to build
+such as specifying a certain version of Python or installing additional dependencies.
+You can configure these settings in a ``readthedocs.yml`` file.
+See our :doc:`/yaml-config` docs for more details.
+
+Read the Docs will host multiple versions of your code. You can read more about
+how to use this well on our :doc:`/versions` page.
+
+If you have any more trouble, don't hesitate to reach out to us.
+The :doc:`/support` page has more information on getting in touch.
diff --git a/docs/webhooks.rst b/docs/webhooks.rst
index 47b2e0e4aee..ecf18611b50 100644
--- a/docs/webhooks.rst
+++ b/docs/webhooks.rst
@@ -23,7 +23,7 @@ repository provider such as GitHub, GitLab, or Bitbucket.
Webhook Creation
----------------
-If you import a project using a :ref:`connected account <getting_started:Sign Up and Connect an External Account>`,
+If you have :doc:`connected your Read the Docs account </connected-accounts>` to GitHub, Bitbucket, or GitLab,
a webhook will be set up automatically for your repository. However, if your
project was not imported through a connected account, you may need to
manually configure a webhook for your project.
|
huggingface__accelerate-127 | Error when loading optimizer state
Thanks for this awesome product!!
When I try to load optimizer state on TPUs, I get an error, since self.device_placement is never initialized in AcceleratedOptimizer..!
https://github.com/huggingface/accelerate/blob/e0a420f7cb32124cadeeae690b56e463f8fc598f/src/accelerate/optimizer.py#L83-L86
Error when loading optimizer state
Thanks for this awesome product!!
When I try to load optimizer state on TPUs, I get an error, since self.device_placement is never initialized in AcceleratedOptimizer..!
https://github.com/huggingface/accelerate/blob/e0a420f7cb32124cadeeae690b56e463f8fc598f/src/accelerate/optimizer.py#L83-L86
| [
{
"content": "# Copyright 2021 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#... | [
{
"content": "# Copyright 2021 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#... | diff --git a/src/accelerate/optimizer.py b/src/accelerate/optimizer.py
index 30fd8314eb0..57ea686f91c 100644
--- a/src/accelerate/optimizer.py
+++ b/src/accelerate/optimizer.py
@@ -52,6 +52,7 @@ def __init__(self, optimizer, device_placement=True, scaler=None):
self.optimizer = optimizer
self.scaler = scaler
self.state = AcceleratorState()
+ self.device_placement = device_placement
# Handle device placement
if device_placement:
|
NVIDIA__NVFlare-191 | The "show_stats" command got broken
The "show_stats server" and "show_stats client" command got the following error. This is caused by this PR change (https://github.com/NVIDIA/NVFlare/pull/162):
> show_stats server
Error: Failed to communicate with Admin Server localhost on 8003: '_DefaultReplyProcessor' object has no attribute 'process_dict'
Done [7269 usecs] 2022-02-08 17:26:12.865006
>
| [
{
"content": "# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICEN... | [
{
"content": "# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICEN... | diff --git a/nvflare/fuel/hci/client/api_spec.py b/nvflare/fuel/hci/client/api_spec.py
index cb5dacd40e..5d8b9a1c18 100644
--- a/nvflare/fuel/hci/client/api_spec.py
+++ b/nvflare/fuel/hci/client/api_spec.py
@@ -38,6 +38,9 @@ def process_error(self, api: AdminAPISpec, err: str):
def process_table(self, api: AdminAPISpec, table: Table):
pass
+ def process_dict(self, api: AdminAPISpec, data: dict):
+ pass
+
def process_shutdown(self, api: AdminAPISpec, msg: str):
pass
|
holoviz__holoviews-3427 | Bokeh streams callback fails with convert_timestamp in Python 2.7
Discovered this while writing a BoundsX to review dates of a selected data points in a time series. It fails out and kills the stream.
https://github.com/ioam/holoviews/blob/9a6a630b727c8827a8bd6fbe77bf31e1f35a7e5a/holoviews/plotting/bokeh/util.py#L89
The output from browser console:
```
Python failed with the following traceback:
~/conda/lib/python2.7/site-packages/pyviz_comms/__init__.py _handle_msg L296
~/conda/lib/python2.7/site-packages/holoviews/plotting/bokeh/callbacks.py on_msg L121
~/conda/lib/python2.7/site-packages/holoviews/plotting/bokeh/callbacks.py _process_msg L821
~/conda/lib/python2.7/site-packages/holoviews/plotting/bokeh/util.py convert_timestamp L89
AttributeError: 'module' object has no attribute 'timezone'
```
| [
{
"content": "from __future__ import absolute_import, division, unicode_literals\n\nimport re\nimport time\nimport sys\nimport calendar\nimport datetime as dt\n\nfrom collections import defaultdict\nfrom contextlib import contextmanager\n\nimport param\nimport bokeh\nimport numpy as np\n\nfrom bokeh.core.json_e... | [
{
"content": "from __future__ import absolute_import, division, unicode_literals\n\nimport re\nimport time\nimport sys\nimport calendar\nimport datetime as dt\n\nfrom collections import defaultdict\nfrom contextlib import contextmanager\n\nimport param\nimport bokeh\nimport numpy as np\n\nfrom bokeh.core.json_e... | diff --git a/holoviews/plotting/bokeh/util.py b/holoviews/plotting/bokeh/util.py
index 4edb0e259c..36cd7f1795 100644
--- a/holoviews/plotting/bokeh/util.py
+++ b/holoviews/plotting/bokeh/util.py
@@ -86,7 +86,7 @@ def convert_timestamp(timestamp):
"""
Converts bokehJS timestamp to datetime64.
"""
- datetime = dt.datetime.fromtimestamp(timestamp/1000., dt.timezone.utc)
+ datetime = dt.datetime.utcfromtimestamp(timestamp/1000.)
return np.datetime64(datetime.replace(tzinfo=None))
|
carpentries__amy-690 | No reverse match for rest_framework namespace
The error for a very strange reason shows when accessing these URLs:
https://github.com/swcarpentry/amy/blob/develop/api/urls.py#L57
I wasn't able to get rid of it; it's not being used at all, so maybe it should be removed…?
| [
{
"content": "from django.conf.urls import url, include\nfrom rest_framework_nested import routers\nfrom rest_framework.urlpatterns import format_suffix_patterns\n\nfrom . import views\n\n# new in Django 1.9: this defines a namespace for URLs; there's no need for\n# `namespace='api'` in the include()\napp_name ... | [
{
"content": "from django.conf.urls import url, include\nfrom rest_framework_nested import routers\nfrom rest_framework.urlpatterns import format_suffix_patterns\n\nfrom . import views\n\n# new in Django 1.9: this defines a namespace for URLs; there's no need for\n# `namespace='api'` in the include()\napp_name ... | diff --git a/api/urls.py b/api/urls.py
index 4eecbc2b7..ad8aa1df3 100644
--- a/api/urls.py
+++ b/api/urls.py
@@ -52,10 +52,4 @@
url('^', include(todos_router.urls)),
]
-# for login-logout functionality
-urlpatterns += [
- url(r'^api-auth/',
- include('rest_framework.urls', namespace='rest_framework')),
-]
-
urlpatterns = format_suffix_patterns(urlpatterns) # allow to specify format
|
hpcaitech__ColossalAI-5442 | [tensor] fix some unittests
[tensor] fix some unittests
[tensor] fix some unittests
| [
{
"content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nContinual Pre-training/Supervised fine-tuning of Colossal-LLaMA-2 developed by Colossal-AI Team\n\"\"\"\n\nimport argparse\nimport json\nimport os\nimport resource\nfrom contextlib import nullcontext\n\nimport torch\nimport torch.distributed... | [
{
"content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nContinual Pre-training/Supervised fine-tuning of Colossal-LLaMA-2 developed by Colossal-AI Team\n\"\"\"\n\nimport argparse\nimport json\nimport os\nimport resource\nfrom contextlib import nullcontext\n\nimport torch\nimport torch.distributed... | diff --git a/applications/Colossal-LLaMA-2/train.py b/applications/Colossal-LLaMA-2/train.py
index 2e4bab75a085..d97da61e4dc8 100644
--- a/applications/Colossal-LLaMA-2/train.py
+++ b/applications/Colossal-LLaMA-2/train.py
@@ -56,6 +56,7 @@ def format_numel_str(numel: int) -> str:
def all_reduce_mean(tensor: torch.Tensor) -> torch.Tensor:
dist.all_reduce(tensor=tensor, op=dist.ReduceOp.SUM)
+ tensor = tensor.data
tensor.div_(dist.get_world_size())
return tensor
|
Mailu__Mailu-2513 | Include start and end dates in the auto-reply period
<!--
Thank you for opening an issue with Mailu. Please understand that issues are meant for bugs and enhancement-requests.
For **user-support questions**, reach out to us on [matrix](https://matrix.to/#/#mailu:tedomum.net).
To be able to help you best, we need some more information.
Before you open your issue
- Check if no issue or pull-request for this already exists.
- Check [documentation](https://mailu.io/master/) and [FAQ](https://mailu.io/master/faq.html). (Tip, use the search function on the documentation page)
- You understand `Mailu` is made by volunteers in their **free time** — be concise, civil and accept that delays can occur.
- The title of the issue should be short and simple. It should contain specific terms related to the actual issue. Be specific while writing the title.
Please put your text outside of the comment blocks to be visible. You can use the button "Preview" above to check.
-->
## Environment & Version
### Environment
- [X] docker-compose
- [ ] kubernetes
- [ ] docker swarm
### Version
- Version: `1.9`
## Description
The administration interface provides an [auto-reply](https://mailu.io/master/webadministration.html#auto-reply) page where automatic replies can be configured with start and end dates. Unfortunately both the start date and the end date are not included in the auto-reply period (i.e. no auto replies are being sent on these two days). To work around this issue you have to insert the day before your vacation as start date and the day after your vacation as end date. This is not intuitive.
## Replication Steps
Activate the auto-reply feature, insert subject and body text and the current date as "start of vacation" ("end of vacation" has to be a date in the future). Then send an email from another email account (external) to your email address hosted on Mailu.
## Observed behaviour
No auto reply message received by the sender.
## Expected behaviour
Auto reply message received by the sender.
To verify this behaviour you can add yesterday's date as "start of vacation" date and send another email to your Mailu account ... the sender will receive an auto reply message ...
The same applies to the "end of vacation" date.
## Logs
n/a
| [
{
"content": "\"\"\" Mailu config storage model\n\"\"\"\n\nimport os\nimport smtplib\nimport json\n\nfrom datetime import date\nfrom email.mime import text\nfrom itertools import chain\n\nimport flask_sqlalchemy\nimport sqlalchemy\nimport passlib.context\nimport passlib.hash\nimport passlib.registry\nimport tim... | [
{
"content": "\"\"\" Mailu config storage model\n\"\"\"\n\nimport os\nimport smtplib\nimport json\n\nfrom datetime import date\nfrom email.mime import text\nfrom itertools import chain\n\nimport flask_sqlalchemy\nimport sqlalchemy\nimport passlib.context\nimport passlib.hash\nimport passlib.registry\nimport tim... | diff --git a/core/admin/mailu/models.py b/core/admin/mailu/models.py
index f30ef3870..48ce8b33f 100644
--- a/core/admin/mailu/models.py
+++ b/core/admin/mailu/models.py
@@ -546,8 +546,8 @@ def reply_active(self):
now = date.today()
return (
self.reply_enabled and
- self.reply_startdate < now and
- self.reply_enddate > now
+ self.reply_startdate <= now and
+ self.reply_enddate >= now
)
@property
diff --git a/towncrier/newsfragments/2512.bugfix b/towncrier/newsfragments/2512.bugfix
new file mode 100644
index 000000000..b1b6aa99b
--- /dev/null
+++ b/towncrier/newsfragments/2512.bugfix
@@ -0,0 +1 @@
+Fix: include start and end dates in the auto-reply period
\ No newline at end of file
|
adamchainz__django-mysql-398 | MySQLCache.set_many should return a list of failing keys
This new behaviour in Django 2.0 ( django/django#7520 ) should be implemented.
| [
{
"content": "# -*- coding:utf-8 -*-\nfrom __future__ import (\n absolute_import, division, print_function, unicode_literals\n)\n\nimport re\nimport zlib\nfrom random import random\nfrom textwrap import dedent\nfrom time import time\n\nfrom django.core.cache.backends.base import (\n DEFAULT_TIMEOUT, BaseC... | [
{
"content": "# -*- coding:utf-8 -*-\nfrom __future__ import (\n absolute_import, division, print_function, unicode_literals\n)\n\nimport re\nimport zlib\nfrom random import random\nfrom textwrap import dedent\nfrom time import time\n\nfrom django.core.cache.backends.base import (\n DEFAULT_TIMEOUT, BaseC... | diff --git a/HISTORY.rst b/HISTORY.rst
index 671dd6cf..44452077 100644
--- a/HISTORY.rst
+++ b/HISTORY.rst
@@ -12,6 +12,8 @@ Pending
------------------
* Changed subprocess imports for compatibility with Google App Engine.
+* (Insert new release notes below this line)
+* Made ``MySQLCache.set_many`` return a list as per Django 2.0.
2.1.0 (2017-06-11)
------------------
diff --git a/django_mysql/cache.py b/django_mysql/cache.py
index 77096f4a..eadae2d7 100644
--- a/django_mysql/cache.py
+++ b/django_mysql/cache.py
@@ -289,6 +289,7 @@ def set_many(self, data, timeout=DEFAULT_TIMEOUT, version=None):
with connections[db].cursor() as cursor:
cursor.execute(query, params)
+ return []
def delete(self, key, version=None):
key = self.make_key(key, version=version)
diff --git a/tests/testapp/test_cache.py b/tests/testapp/test_cache.py
index fb6adb20..be9af6b6 100644
--- a/tests/testapp/test_cache.py
+++ b/tests/testapp/test_cache.py
@@ -846,11 +846,16 @@ def test_set_many(self):
caches['no_cull'].get('nonexistent')
with self.assertNumQueries(1):
- caches['no_cull'].set_many({"key1": "spam"})
+ result = caches['no_cull'].set_many({"key1": "spam"})
+ assert result == []
# Multiple keys can be set using set_many
with self.assertNumQueries(1):
- caches['no_cull'].set_many({"key1": "spam", "key2": "eggs"})
+ result = caches['no_cull'].set_many({
+ 'key1': 'spam',
+ 'key2': 'eggs',
+ })
+ assert result == []
assert cache.get("key1") == "spam"
assert cache.get("key2") == "eggs"
|
python__python-docs-es-1201 | readthedocs: 'extensions' is not defined
Por alguna razón, hemos encontrado https://github.com/UPC/ravada/issues/890 en la CI de readthedocs, y actualmente los builds tienen el siguiente error:
```
% python -m sphinx -T -j auto -E -b html -d _build/doctrees -D language=es . _build/html
Running Sphinx v2.2.0
Traceback (most recent call last):
File "/home/cmaureir/repos/python-docs-es-admin/venv/lib/python3.9/site-packages/sphinx/config.py", line 361, in eval_config_file
execfile_(filename, namespace)
File "/home/cmaureir/repos/python-docs-es-admin/venv/lib/python3.9/site-packages/sphinx/util/pycompat.py", line 81, in execfile_
exec(code, _globals)
File "/home/cmaureir/repos/python-docs-es-admin/conf.py", line 22, in <module>
from conf import *
File "/home/cmaureir/repos/python-docs-es-admin/conf.py", line 72, in <module>
if extensions:
NameError: name 'extensions' is not defined
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/cmaureir/repos/python-docs-es-admin/venv/lib/python3.9/site-packages/sphinx/cmd/build.py", line 272, in build_main
app = Sphinx(args.sourcedir, args.confdir, args.outputdir,
File "/home/cmaureir/repos/python-docs-es-admin/venv/lib/python3.9/site-packages/sphinx/application.py", line 210, in __init__
self.config = Config.read(self.confdir, confoverrides or {}, self.tags)
File "/home/cmaureir/repos/python-docs-es-admin/venv/lib/python3.9/site-packages/sphinx/config.py", line 196, in read
namespace = eval_config_file(filename, tags)
File "/home/cmaureir/repos/python-docs-es-admin/venv/lib/python3.9/site-packages/sphinx/config.py", line 371, in eval_config_file
raise ConfigError(msg % traceback.format_exc())
sphinx.errors.ConfigError: There is a programmable error in your configuration file:
Traceback (most recent call last):
File "/home/cmaureir/repos/python-docs-es-admin/venv/lib/python3.9/site-packages/sphinx/config.py", line 361, in eval_config_file
execfile_(filename, namespace)
File "/home/cmaureir/repos/python-docs-es-admin/venv/lib/python3.9/site-packages/sphinx/util/pycompat.py", line 81, in execfile_
exec(code, _globals)
File "/home/cmaureir/repos/python-docs-es-admin/conf.py", line 22, in <module>
from conf import *
File "/home/cmaureir/repos/python-docs-es-admin/conf.py", line 72, in <module>
if extensions:
NameError: name 'extensions' is not defined
Configuration error:
There is a programmable error in your configuration file:
Traceback (most recent call last):
File "/home/cmaureir/repos/python-docs-es-admin/venv/lib/python3.9/site-packages/sphinx/config.py", line 361, in eval_config_file
execfile_(filename, namespace)
File "/home/cmaureir/repos/python-docs-es-admin/venv/lib/python3.9/site-packages/sphinx/util/pycompat.py", line 81, in execfile_
exec(code, _globals)
File "/home/cmaureir/repos/python-docs-es-admin/conf.py", line 22, in <module>
from conf import *
File "/home/cmaureir/repos/python-docs-es-admin/conf.py", line 72, in <module>
if extensions:
NameError: name 'extensions' is not defined
```
Localmente `extensions` está definido, pero por alguna razón no en el CI de readthedocs.
| [
{
"content": "# Sphinx configuration file.\n#\n# - import original configurations from cpython/Doc/conf.py\n# - append the path considering the cpython submodule is at ./cpython\n# - create the symbolic links under ./cpython/locale/es/LC_MESSAGES\n# - make the build to work under Read the Docs\n#\n# The git... | [
{
"content": "# Sphinx configuration file.\n#\n# - import original configurations from cpython/Doc/conf.py\n# - append the path considering the cpython submodule is at ./cpython\n# - create the symbolic links under ./cpython/locale/es/LC_MESSAGES\n# - make the build to work under Read the Docs\n#\n# The git... | diff --git a/conf.py b/conf.py
index 8ecc6c9d56..8956b12c0a 100644
--- a/conf.py
+++ b/conf.py
@@ -69,10 +69,16 @@
_stdauthor, 'manual'),
]
-extensions.extend([
- 'sphinx_tabs.tabs',
- 'sphinxemoji.sphinxemoji',
-])
+try:
+ extensions.extend([
+ 'sphinx_tabs.tabs',
+ 'sphinxemoji.sphinxemoji',
+ ])
+except NameError:
+ extensions = [
+ 'sphinx_tabs.tabs',
+ 'sphinxemoji.sphinxemoji',
+ ]
def setup(app):
|
chainer__chainer-1319 | Is it possible to import caffe model on Python 3?
As stated in the documentation, `chainer.functions.caffe.CaffeFunction` only supports Python 2.7. However in the "Install Chainer" section, it says
```
Caffe model support
Protocol Buffers
protobuf>=3.0.0 is required for Py3
```
Thus I am curious whether caffe model import is supported in Python 3.
Thank you very much for your help.
| [
{
"content": "import collections\nimport pkg_resources\nimport sys\nimport warnings\n\nimport numpy\nimport six\n\nfrom chainer import functions\nfrom chainer import link\nfrom chainer import links\n\n\ndef _protobuf3():\n ws = pkg_resources.WorkingSet()\n try:\n ws.require('protobuf>=3.0.0a')\n ... | [
{
"content": "import collections\nimport pkg_resources\nimport sys\nimport warnings\n\nimport numpy\nimport six\n\nfrom chainer import functions\nfrom chainer import link\nfrom chainer import links\n\n\ndef _protobuf3():\n ws = pkg_resources.WorkingSet()\n try:\n ws.require('protobuf>=3.0.0a')\n ... | diff --git a/chainer/links/caffe/caffe_function.py b/chainer/links/caffe/caffe_function.py
index 69628f28f61d..7a1fdef31226 100644
--- a/chainer/links/caffe/caffe_function.py
+++ b/chainer/links/caffe/caffe_function.py
@@ -60,9 +60,8 @@ class CaffeFunction(link.Chain):
.. note::
- This class only supports Python 2.7, since the compiled module for
- protocol buffers only supports Python 2. The ``__init__`` function
- raises an exception in Python 3.
+ protobuf>=3.0.0 is required if you use Python 3 because protobuf 2 is
+ not supported on Python 3.
.. note::
|
modin-project__modin-1782 | ClusterError class should implement its own version of __str__ method
<!--
General questions should be asked on the mailing list modin-dev@googlegroups.com.
Before submitting an issue, please fill out the following form.
-->
### System information
- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**:
- **Modin installed from (source or binary)**:
- **Modin version**:
- **Python version**:
- **Exact command to reproduce**:
<!--
You can obtain the Modin version with
python -c "import modin; print(modin.__version__)"
-->
### Describe the problem
<!-- Describe the problem clearly here. -->
`ClusterError` includes the `cause` field that is not printed. This makes it difficult to understand the problems that cause exceptions.
### Source code / logs
<!-- Include any logs or source code that would be helpful to diagnose the problem. If including tracebacks, please include the full traceback. Large logs and files should be attached. Try to provide a reproducible test case that is the bare minimum necessary to generate the problem. -->
| [
{
"content": "# Licensed to Modin Development Team under one or more contributor license agreements.\n# See the NOTICE file distributed with this work for additional information regarding\n# copyright ownership. The Modin Development Team licenses this file to you under the\n# Apache License, Version 2.0 (the ... | [
{
"content": "# Licensed to Modin Development Team under one or more contributor license agreements.\n# See the NOTICE file distributed with this work for additional information regarding\n# copyright ownership. The Modin Development Team licenses this file to you under the\n# Apache License, Version 2.0 (the ... | diff --git a/modin/experimental/cloud/base.py b/modin/experimental/cloud/base.py
index e73d4b4bf9b..f78ddf30c7d 100644
--- a/modin/experimental/cloud/base.py
+++ b/modin/experimental/cloud/base.py
@@ -26,6 +26,11 @@ def __init__(self, *args, cause: BaseException = None, traceback: str = None, **
self.traceback = traceback
super().__init__(*args, **kw)
+ def __str__(self):
+ if self.clause:
+ return f"clause: {self.cause}\n{super()}"
+ return str(super())
+
class CannotSpawnCluster(ClusterError):
"""
|
rasterio__rasterio-1390 | 1.0 RC 1
Hey all, if there aren't any reports of show-stopping bugs in 1.0b4, I'd like to put out a release candidate on Wednesday 6/27.
| [
{
"content": "\"\"\"Rasterio\"\"\"\n\nfrom __future__ import absolute_import\n\nfrom collections import namedtuple\nfrom contextlib import contextmanager\nimport logging\nimport warnings\n\ntry:\n from pathlib import Path\nexcept ImportError: # pragma: no cover\n class Path:\n pass\n\ntry:\n fr... | [
{
"content": "\"\"\"Rasterio\"\"\"\n\nfrom __future__ import absolute_import\n\nfrom collections import namedtuple\nfrom contextlib import contextmanager\nimport logging\nimport warnings\n\ntry:\n from pathlib import Path\nexcept ImportError: # pragma: no cover\n class Path:\n pass\n\ntry:\n fr... | diff --git a/CHANGES.txt b/CHANGES.txt
index 4d7f55abf..8f5e2755e 100644
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@ -1,13 +1,16 @@
Changes
=======
-Next
-------------------
+1.0rc1 (2018-06-27)
+-------------------
Bug fixes:
- Internal Env() in `rasterio.open` has been replaced with an environment
- ensuring decorator (#1009).
+ ensuring decorator (#1009). The same decorator ensures that credentials are
+ obtained when functions from `rasterio.shutils` are called.
+- Input file arguments for all CLI commands are now parsed and validated in
+ a uniform manner (#999).
- Local loggers have all been changed to `getLogger(__name__)` in rasterio.rio
module (#1328).
diff --git a/rasterio/__init__.py b/rasterio/__init__.py
index a09236d42..5129bb605 100644
--- a/rasterio/__init__.py
+++ b/rasterio/__init__.py
@@ -42,8 +42,8 @@ def emit(self, record):
import rasterio.path
-__all__ = ['band', 'open', 'pad']
-__version__ = "1.0b4"
+__all__ = ['band', 'open', 'pad', 'Env']
+__version__ = "1.0rc1"
__gdal_version__ = gdal_version()
# Rasterio attaches NullHandler to the 'rasterio' logger and its
diff --git a/tests/test_env.py b/tests/test_env.py
index e4fed81c7..5159d39ed 100644
--- a/tests/test_env.py
+++ b/tests/test_env.py
@@ -9,7 +9,7 @@
import rasterio
from rasterio._env import del_gdal_config, get_gdal_config, set_gdal_config
-from rasterio.env import Env, defenv, delenv, getenv, setenv, ensure_env
+from rasterio.env import Env, defenv, delenv, getenv, setenv, ensure_env, ensure_env_credentialled
from rasterio.env import GDALVersion, require_gdal_version
from rasterio.errors import EnvError, RasterioIOError, GDALVersionError
from rasterio.rio.main import main_group
@@ -113,6 +113,24 @@ def f():
assert f() is True
+def test_ensure_env_credentialled_decorator(monkeypatch, gdalenv):
+ """Credentialization is ensured by wrapper"""
+ monkeypatch.setenv('AWS_ACCESS_KEY_ID', 'id')
+ monkeypatch.setenv('AWS_SECRET_ACCESS_KEY', 'key')
+ monkeypatch.setenv('AWS_SESSION_TOKEN', 'token')
+
+ @ensure_env_credentialled
+ def f(path):
+ return getenv()
+
+ config = f('s3://foo/bar')
+ assert config['AWS_ACCESS_KEY_ID'] == 'id'
+ assert config['AWS_SECRET_ACCESS_KEY'] == 'key'
+ assert config['AWS_SESSION_TOKEN'] == 'token'
+
+ monkeypatch.undo()
+
+
def test_no_aws_gdal_config(gdalenv):
"""Trying to set AWS-specific GDAL config options fails."""
with pytest.raises(EnvError):
|
cloud-custodian__cloud-custodian-3075 | GCP: Firewalls resource policy fails with no resource attribute 'Firewall'
When running this policy custodian fails:
- policies:
- name: firewall-test
resource: gcp.firewall
The error returned is:
AttributeError: 'Resource' object has no attribute 'firewall'
| [
{
"content": "# Copyright 2018 Capital One Services, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required b... | [
{
"content": "# Copyright 2018 Capital One Services, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required b... | diff --git a/tools/c7n_gcp/c7n_gcp/resources/network.py b/tools/c7n_gcp/c7n_gcp/resources/network.py
index b023e0d1fe2..aa22a7502f7 100644
--- a/tools/c7n_gcp/c7n_gcp/resources/network.py
+++ b/tools/c7n_gcp/c7n_gcp/resources/network.py
@@ -43,8 +43,7 @@ class Firewall(QueryResourceManager):
class resource_type(TypeInfo):
service = 'compute'
version = 'v1'
- component = 'firewall'
- scope_template = "projects/{}/global/firewalls"
+ component = 'firewalls'
@resources.register('router')
|
huggingface__text-generation-inference-794 | small typo in galactica model loading
https://github.com/huggingface/text-generation-inference/blob/1fdc88ee908beb8ae0afe17810a17b9b4d8848e2/server/text_generation_server/models/__init__.py#L92
should be trust_remote_code
| [
{
"content": "import os\nimport torch\n\nfrom loguru import logger\nfrom transformers.configuration_utils import PretrainedConfig\nfrom transformers.models.auto import modeling_auto\nfrom typing import Optional\n\nfrom text_generation_server.models.model import Model\nfrom text_generation_server.models.causal_l... | [
{
"content": "import os\nimport torch\n\nfrom loguru import logger\nfrom transformers.configuration_utils import PretrainedConfig\nfrom transformers.models.auto import modeling_auto\nfrom typing import Optional\n\nfrom text_generation_server.models.model import Model\nfrom text_generation_server.models.causal_l... | diff --git a/server/text_generation_server/models/__init__.py b/server/text_generation_server/models/__init__.py
index 71efcab745b..621652e8b62 100644
--- a/server/text_generation_server/models/__init__.py
+++ b/server/text_generation_server/models/__init__.py
@@ -89,7 +89,7 @@ def get_model(
revision,
quantize=quantize,
dtype=dtype,
- dtypetrust_remote_code=trust_remote_code,
+ trust_remote_code=trust_remote_code,
)
if model_id.startswith("bigcode/"):
|
dbt-labs__dbt-core-1743 | Support for Snowflake Secure Views
### Adding support for Secure View in Snowflake
When using the Materialize feature where setting the type of materialization, adding secure-view to the {{ config(materialized='secure-view') }} would be beneficial.
### Current Work-around
Currently the solution for Snowflake secure views is running post-hook events to set the targeted views as secure, example: `alter view sv_mySecureTest set secure;`
This works, and each view that needs to be secured will need to be added to the post-hook event.
### Affects only Snowflake
This feature is specific to the Snowflake Cloud Data warehouse.
[https://docs.snowflake.net/manuals/user-guide/views-secure.html](url)
### This will help DBT Snowflake Developer / Non Developers
When creating a secure view in Snowflake, a developer can use 2 syntax commands
1. CREATE OR REPLACE SECURE VIEW...
2. Alter view <view_name> Set Secure
The first method will allow non-dbt user to render the DDL with the secure declaration as part of the DDL, the second statement is added to the end of the generated DDL however it may be ignored by developers unfamiliar with Snowflake Syntax, causing possible security issues, allowing unauthorized access to the View DDL by Read-Only roles in Snowflake.
| [
{
"content": "from dbt.adapters.sql import SQLAdapter\nfrom dbt.adapters.snowflake import SnowflakeConnectionManager\nfrom dbt.adapters.snowflake import SnowflakeRelation\nfrom dbt.utils import filter_null_values\n\n\nclass SnowflakeAdapter(SQLAdapter):\n Relation = SnowflakeRelation\n ConnectionManager =... | [
{
"content": "from dbt.adapters.sql import SQLAdapter\nfrom dbt.adapters.snowflake import SnowflakeConnectionManager\nfrom dbt.adapters.snowflake import SnowflakeRelation\nfrom dbt.utils import filter_null_values\n\n\nclass SnowflakeAdapter(SQLAdapter):\n Relation = SnowflakeRelation\n ConnectionManager =... | diff --git a/plugins/snowflake/dbt/adapters/snowflake/impl.py b/plugins/snowflake/dbt/adapters/snowflake/impl.py
index 31600a8a236..4e3d3d7793e 100644
--- a/plugins/snowflake/dbt/adapters/snowflake/impl.py
+++ b/plugins/snowflake/dbt/adapters/snowflake/impl.py
@@ -9,7 +9,7 @@ class SnowflakeAdapter(SQLAdapter):
ConnectionManager = SnowflakeConnectionManager
AdapterSpecificConfigs = frozenset(
- {"transient", "cluster_by", "automatic_clustering"}
+ {"transient", "cluster_by", "automatic_clustering", "secure"}
)
@classmethod
diff --git a/plugins/snowflake/dbt/include/snowflake/macros/adapters.sql b/plugins/snowflake/dbt/include/snowflake/macros/adapters.sql
index cb3b67efde2..ab3af5c4ceb 100644
--- a/plugins/snowflake/dbt/include/snowflake/macros/adapters.sql
+++ b/plugins/snowflake/dbt/include/snowflake/macros/adapters.sql
@@ -35,7 +35,10 @@
{% endmacro %}
{% macro snowflake__create_view_as(relation, sql) -%}
- create or replace view {{ relation }} as (
+ {%- set secure = config.get('secure', default=false) -%}
+ create or replace {% if secure -%}
+ secure
+ {%- endif %} view {{ relation }} as (
{{ sql }}
);
{% endmacro %}
|
django-extensions__django-extensions-1654 | RemovedInDjango41Warning: 'django_extensions' defines default_app_config
The following warning is emitted when using django-extenstions along with django 3.2
```
django.utils.deprecation.RemovedInDjango41Warning: 'django_extensions' defines default_app_config = 'django_extensions.apps.DjangoExtensionsConfig'. Django now detects this configuration automatically. You can remove default_app_config.
```
I suppose it is related to the change introduced by django 3.2: https://docs.djangoproject.com/en/3.2/releases/3.2/#what-s-new-in-django-3-2
Environment:
* python 3.8
* django 3.2
| [
{
"content": "# -*- coding: utf-8 -*-\nVERSION = (3, 1, 3, 'dev')\n\n\ndef get_version(version):\n \"\"\"Dynamically calculate the version based on VERSION tuple.\"\"\"\n if len(version) > 2 and version[2] is not None:\n if len(version) == 4:\n str_version = \"%s.%s.%s.%s\" % version\n ... | [
{
"content": "# -*- coding: utf-8 -*-\nVERSION = (3, 1, 3, 'dev')\n\n\ndef get_version(version):\n \"\"\"Dynamically calculate the version based on VERSION tuple.\"\"\"\n if len(version) > 2 and version[2] is not None:\n if len(version) == 4:\n str_version = \"%s.%s.%s.%s\" % version\n ... | diff --git a/django_extensions/__init__.py b/django_extensions/__init__.py
index 488c8e0d6..8e1d2201a 100644
--- a/django_extensions/__init__.py
+++ b/django_extensions/__init__.py
@@ -19,4 +19,11 @@ def get_version(version):
__version__ = get_version(VERSION)
-default_app_config = 'django_extensions.apps.DjangoExtensionsConfig'
+try:
+ import django
+
+ if django.VERSION < (3, 2):
+ default_app_config = 'django_extensions.apps.DjangoExtensionsConfig'
+except ModuleNotFoundError:
+ # this part is useful for allow setup.py to be used for version checks
+ pass
diff --git a/tests/management/commands/test_notes.py b/tests/management/commands/test_notes.py
index cfe7536e6..8536743c9 100644
--- a/tests/management/commands/test_notes.py
+++ b/tests/management/commands/test_notes.py
@@ -8,7 +8,7 @@ def test_without_args(capsys, settings):
call_command('notes')
out, err = capsys.readouterr()
- assert 'tests/testapp/__init__.py:\n * [ 4] TODO this is a test todo\n\n' in out
+ assert 'tests/testapp/__init__.py:\n * [ 8] TODO this is a test todo\n\n' in out
def test_with_utf8(capsys, settings):
diff --git a/tests/test_compatibility.py b/tests/test_compatibility.py
new file mode 100644
index 000000000..703fb8ee1
--- /dev/null
+++ b/tests/test_compatibility.py
@@ -0,0 +1,16 @@
+# -*- coding: utf-8 -*-
+"""Test Compatility between different django versions"""
+import django
+import pytest
+
+import django_extensions
+
+
+class TestDefaultAppConfigDefinition:
+ @pytest.mark.skipif(django.VERSION < (3, 2), reason='app config is automatically defined by django')
+ def test_app_config_not_defined(self):
+ assert hasattr(django_extensions, 'default_app_config') is False
+
+ @pytest.mark.skipif(django.VERSION >= (3, 2), reason='app config is not automatically defined by django')
+ def test_app_config_defined(self):
+ assert hasattr(django_extensions, 'default_app_config') is True
diff --git a/tests/testapp/__init__.py b/tests/testapp/__init__.py
index 0599a0784..b889f41b7 100644
--- a/tests/testapp/__init__.py
+++ b/tests/testapp/__init__.py
@@ -1,4 +1,8 @@
# -*- coding: utf-8 -*-
-default_app_config = 'tests.testapp.apps.TestAppConfig'
+import django
+
+
+if django.VERSION < (3, 2):
+ default_app_config = 'tests.testapp.apps.TestAppConfig'
# TODO: this is a test todo
|
fidals__shopelectro-200 | SE yml fix delivery 3k -> 5k
[origin trello task](https://trello.com/c/LyLVDakS/298-se-%D0%BF%D1%80%D0%B0%D0%B2%D1%8C-yml)
Внезапно обноружили, что через фид передается( в теге <sales_notes>) неверная инфа о доставке.
Исправь на 5к.
| [
{
"content": "\"\"\"\nDjango settings for shopelectro project.\n\nGenerated by 'django-admin startproject' using Django 1.9.5.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.9/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/... | [
{
"content": "\"\"\"\nDjango settings for shopelectro project.\n\nGenerated by 'django-admin startproject' using Django 1.9.5.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.9/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/... | diff --git a/shopelectro/settings/base.py b/shopelectro/settings/base.py
index 0ed86524..3f3942b0 100644
--- a/shopelectro/settings/base.py
+++ b/shopelectro/settings/base.py
@@ -268,7 +268,7 @@
'cps_phone': '+78124163200',
'cps_email': 'info@shopelectro.ru',
'local_delivery_cost': 300,
- 'local_delivery_cost_threshold': 3000,
+ 'local_delivery_cost_threshold': 5000,
}
# used in data-migrations and tests
|
MycroftAI__mycroft-core-275 | Base media skill is loaded as a normal skill
At the moment, the load_skills function tries to load the media skill as if it is a normal skill.
| [
{
"content": "# Copyright 2016 Mycroft AI, Inc.\n#\n# This file is part of Mycroft Core.\n#\n# Mycroft Core is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at ... | [
{
"content": "# Copyright 2016 Mycroft AI, Inc.\n#\n# This file is part of Mycroft Core.\n#\n# Mycroft Core is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at ... | diff --git a/mycroft/skills/core.py b/mycroft/skills/core.py
index a61dce2f4d10..af32d10d6c4a 100644
--- a/mycroft/skills/core.py
+++ b/mycroft/skills/core.py
@@ -35,7 +35,7 @@
__author__ = 'seanfitz'
PRIMARY_SKILLS = ['intent', 'wake']
-BLACKLISTED_SKILLS = ["send_sms"]
+BLACKLISTED_SKILLS = ["send_sms", "media"]
SKILLS_BASEDIR = dirname(__file__)
THIRD_PARTY_SKILLS_DIR = "/opt/mycroft/third_party"
|
mdn__kuma-5855 | Escalated number of errors from Google Search Console: Breadcrumbs
[Google Search Console](https://search.google.com/search-console/breadcrumbs/drilldown?resource_id=https%3A%2F%2Fdeveloper.mozilla.org%2F&item_key=CgwICRoIcG9zaXRpb24QAxgP&hl=en) emailed us about a big increase in indexing "errors" about breadcrumbs.
<img width="1174" alt="Screen Shot 2019-09-20 at 1 47 54 PM" src="https://user-images.githubusercontent.com/26739/65347578-4a118c80-dbad-11e9-8bda-8df0bd7871de.png">
The code that produces our breadcrumbs (on the Wiki) [hasn't changed in years](https://github.com/mozilla/kuma/blame/master/kuma/wiki/jinja2/wiki/includes/document_macros.html).
| [
{
"content": "# -*- coding: utf-8 -*-\nimport difflib\nimport json\nimport re\n\nimport jinja2\nimport six\nfrom constance import config\nfrom cssselect.parser import SelectorSyntaxError\nfrom django.conf import settings\nfrom django.core.serializers.json import DjangoJSONEncoder\nfrom django.template import lo... | [
{
"content": "# -*- coding: utf-8 -*-\nimport difflib\nimport json\nimport re\n\nimport jinja2\nimport six\nfrom constance import config\nfrom cssselect.parser import SelectorSyntaxError\nfrom django.conf import settings\nfrom django.core.serializers.json import DjangoJSONEncoder\nfrom django.template import lo... | diff --git a/kuma/static/styles/components/wiki/crumbs.scss b/kuma/static/styles/components/wiki/crumbs.scss
index 5281e2404b7..427a3d58fca 100644
--- a/kuma/static/styles/components/wiki/crumbs.scss
+++ b/kuma/static/styles/components/wiki/crumbs.scss
@@ -35,6 +35,13 @@ $crumb-vertical-spacing-desktop: $grid-spacing / 4;
}
}
+ a.crumb-current-page {
+ &:link,
+ &:visited {
+ color: $text-color;
+ }
+ }
+
span {
display: inline-block;
position: relative;
diff --git a/kuma/wiki/jinja2/wiki/includes/document_macros.html b/kuma/wiki/jinja2/wiki/includes/document_macros.html
index be21351c6a5..5d3d2916637 100644
--- a/kuma/wiki/jinja2/wiki/includes/document_macros.html
+++ b/kuma/wiki/jinja2/wiki/includes/document_macros.html
@@ -11,7 +11,10 @@
</li>
{% endfor %}
<li property="itemListElement" typeof="ListItem" class="crumb">
- <span property="name" aria-current="page">{{ document.title }}</span>
+ <a href="{{ document.get_absolute_url() }}" class="crumb-current-page" property="item" typeof="WebPage">
+ <span property="name" aria-current="page">{{ document.title }}</span>
+ </a>
+ <meta property="position" content="{{ document.parents|length_plus_one }}">
</li>
</ol>
</nav>
diff --git a/kuma/wiki/templatetags/jinja_helpers.py b/kuma/wiki/templatetags/jinja_helpers.py
index 35075a29b98..ed5580e448c 100644
--- a/kuma/wiki/templatetags/jinja_helpers.py
+++ b/kuma/wiki/templatetags/jinja_helpers.py
@@ -262,3 +262,12 @@ def include_svg(path, title=None, title_id=None):
else:
svg_out = svg
return jinja2.Markup(svg_out)
+
+
+@library.filter
+def length_plus_one(lengthy):
+ """Useful when you want to do something like
+ `{{ somelist|length_plus_one }}` and you want it to output the
+ Python equivalent of `len(somelist) + 1`.
+ """
+ return len(lengthy) + 1
|
cloudtools__troposphere-120 | Update metadata to include property keys
Within the CloudFormation metadata object for auto scaling launch configurations, it'd be nice to be able to rely on the validators within the template to sanity check the allowed keys within the 'config' dictionary:
```
"Resources": {
"MyInstance": {
"Type": "AWS::EC2::Instance",
"Metadata" : {
"AWS::CloudFormation::Init" : {
"config" : {
"packages" : {
:
},
"groups" : {
:
},
"users" : {
:
},
"sources" : {
:
},
"files" : {
:
},
"commands" : {
:
},
"services" : {
:
}
}
}
},
"Properties": {
:
}
}
}
```
Currently the implementation of troposphere.cloudformation.InitConfig only contains one key which is 'files'
| [
{
"content": "# Copyright (c) 2013, Mark Peek <mark@peek.org>\n# All rights reserved.\n#\n# See LICENSE file for full license.\n\nfrom . import AWSHelperFn, AWSObject, AWSProperty, Ref\nfrom .validators import integer\n\n\nclass Stack(AWSObject):\n type = \"AWS::CloudFormation::Stack\"\n\n props = {\n ... | [
{
"content": "# Copyright (c) 2013, Mark Peek <mark@peek.org>\n# All rights reserved.\n#\n# See LICENSE file for full license.\n\nfrom . import AWSHelperFn, AWSObject, AWSProperty, Ref\nfrom .validators import integer\n\n\nclass Stack(AWSObject):\n type = \"AWS::CloudFormation::Stack\"\n\n props = {\n ... | diff --git a/troposphere/cloudformation.py b/troposphere/cloudformation.py
index c79ff3904..069496741 100644
--- a/troposphere/cloudformation.py
+++ b/troposphere/cloudformation.py
@@ -67,7 +67,13 @@ def JSONrepr(self):
class InitConfig(AWSProperty):
props = {
- 'files': (dict, False)
+ 'groups': (dict, False),
+ 'users': (dict, False),
+ 'sources': (dict, False),
+ 'packages': (dict, False),
+ 'files': (dict, False),
+ 'commands': (dict, False),
+ 'services': (dict, False)
}
|
mdn__kuma-6598 | Possibly to prefill Reason textarea on $delete
If you're going to have some human-helped automation that deletes the 20% or so non-en-US documents that aren't actually translated, it would be nice if you don't have to type in the same reason every time.
<img width="989" alt="Screen Shot 2020-02-26 at 11 56 40 AM" src="https://user-images.githubusercontent.com/26739/75367987-1be85500-588f-11ea-8ba1-f49e0db69cc7.png">
Would be neat if you could control it with something like `?reason=Sample%20reason`
| [
{
"content": "from django.db import IntegrityError\nfrom django.shortcuts import get_object_or_404, redirect, render\nfrom django.utils.translation import ugettext\nfrom django.views.decorators.cache import never_cache\n\nfrom kuma.core.decorators import (\n block_user_agents,\n ensure_wiki_domain,\n l... | [
{
"content": "from django.db import IntegrityError\nfrom django.shortcuts import get_object_or_404, redirect, render\nfrom django.utils.translation import ugettext\nfrom django.views.decorators.cache import never_cache\n\nfrom kuma.core.decorators import (\n block_user_agents,\n ensure_wiki_domain,\n l... | diff --git a/kuma/wiki/views/delete.py b/kuma/wiki/views/delete.py
index cd80d192a5a..23ee6197d2c 100644
--- a/kuma/wiki/views/delete.py
+++ b/kuma/wiki/views/delete.py
@@ -100,7 +100,8 @@ def delete_document(request, document_slug, document_locale):
document.delete()
return redirect(document)
else:
- form = DocumentDeletionForm()
+
+ form = DocumentDeletionForm(initial={"reason": request.GET.get("reason", "")})
context = {
"document": document,
|
tornadoweb__tornado-2629 | When HttpResponse body is empty it returns a ValueError when it should not
In checkin https://github.com/tornadoweb/tornado/pull/2514/commits/7b846ea56bff1892a4d4d05206210b4d234e292b the code for httpclient.HttpResponse.body was changed to throw a ValueError when the body is empty.
But since the message body is optional throwing an ValueError seems not right because it is not an error. Can it be reverted back to the 5 behavior of just returning a None when the body is empty.
| [
{
"content": "\"\"\"Blocking and non-blocking HTTP client interfaces.\n\nThis module defines a common interface shared by two implementations,\n``simple_httpclient`` and ``curl_httpclient``. Applications may either\ninstantiate their chosen implementation class directly or use the\n`AsyncHTTPClient` class from... | [
{
"content": "\"\"\"Blocking and non-blocking HTTP client interfaces.\n\nThis module defines a common interface shared by two implementations,\n``simple_httpclient`` and ``curl_httpclient``. Applications may either\ninstantiate their chosen implementation class directly or use the\n`AsyncHTTPClient` class from... | diff --git a/tornado/httpclient.py b/tornado/httpclient.py
index 33abe2e16e..882600af82 100644
--- a/tornado/httpclient.py
+++ b/tornado/httpclient.py
@@ -665,7 +665,7 @@ def __init__(
@property
def body(self) -> bytes:
if self.buffer is None:
- raise ValueError("body not set")
+ return b""
elif self._body is None:
self._body = self.buffer.getvalue()
|
conan-io__conan-2921 | local cache inconsistent after enabling short_paths in a recipe
To help us debug your issue please explain:
- [x] I've read the [CONTRIBUTING guide](https://raw.githubusercontent.com/conan-io/conan/develop/.github/CONTRIBUTING.md).
- [x] I've specified the Conan version, operating system version and any tool that can be relevant.
- [x] I've explained the steps to reproduce the error or the motivation/use case of the question/suggestion.
Conan Version 1.3.3
Windows 10
With a package in local cache whose recipe does NOT have `short_paths=True`, modify in normal development folder the recipe and set `short_paths=True` and run conan create.
Folders in local cache become inconsistent showing both folders from previous conan create run and .conan_link files pointing to the short paths folders.
This seems no not affect conan tool behavior when running commands and works well if `short_paths` is removed once again.
| [
{
"content": "import os\nimport subprocess\n\nfrom conans.util.files import load, mkdir, save, rmdir\nimport tempfile\n\n\nCONAN_LINK = \".conan_link\"\n\n\ndef conan_expand_user(path):\n \"\"\" wrapper to the original expanduser function, to workaround python returning\n verbatim %USERPROFILE% when some ... | [
{
"content": "import os\nimport subprocess\n\nfrom conans.util.files import load, mkdir, save, rmdir\nimport tempfile\n\n\nCONAN_LINK = \".conan_link\"\n\n\ndef conan_expand_user(path):\n \"\"\" wrapper to the original expanduser function, to workaround python returning\n verbatim %USERPROFILE% when some ... | diff --git a/conans/test/functional/short_paths_test.py b/conans/test/functional/short_paths_test.py
new file mode 100644
index 00000000000..6d9f55b6832
--- /dev/null
+++ b/conans/test/functional/short_paths_test.py
@@ -0,0 +1,67 @@
+import os
+import platform
+import unittest
+
+from conans.model.ref import ConanFileReference
+from conans.test.utils.tools import TestClient
+
+
+class ShortPathsTest(unittest.TestCase):
+
+ @unittest.skipUnless(platform.system() == "Windows", "Requires Windows")
+ def inconsistent_cache_test(self):
+ conanfile = """
+import os
+from conans import ConanFile, tools
+
+
+class TestConan(ConanFile):
+ name = "test"
+ version = "1.0"
+ short_paths = {0}
+ exports_sources = "source_file.cpp"
+
+ def source(self):
+ for item in os.listdir(self.source_folder):
+ self.output.info("SOURCE: " + str(item))
+ def build(self):
+ tools.save(os.path.join(self.build_folder, "artifact"), "")
+ for item in os.listdir(self.build_folder):
+ self.output.info("BUILD: " + str(item))
+ def package(self):
+ self.copy("source_file.cpp")
+ self.copy("artifact")
+ for item in os.listdir(self.build_folder):
+ self.output.info("PACKAGE: " + str(item))
+"""
+
+ client = TestClient()
+ client.save({"conanfile.py": conanfile.format("False"),
+ "source_file.cpp": ""})
+ client.run("create . danimtb/testing")
+ conan_ref = ConanFileReference("test", "1.0", "danimtb", "testing")
+ source_folder = os.path.join(client.client_cache.conan(conan_ref), "source")
+ build_folder = os.path.join(client.client_cache.conan(conan_ref), "build",
+ "5ab84d6acfe1f23c4fae0ab88f26e3a396351ac9")
+ package_folder = os.path.join(client.client_cache.conan(conan_ref), "package",
+ "5ab84d6acfe1f23c4fae0ab88f26e3a396351ac9")
+ self.assertIn("SOURCE: source_file.cpp", client.out)
+ self.assertEqual(["source_file.cpp"], os.listdir(source_folder))
+ self.assertIn("BUILD: source_file.cpp", client.out)
+ self.assertIn("BUILD: artifact", client.out)
+ self.assertEqual(["artifact", "conanbuildinfo.txt", "conaninfo.txt", "source_file.cpp"],
+ os.listdir(build_folder))
+ self.assertIn("PACKAGE: source_file.cpp", client.out)
+ self.assertIn("PACKAGE: artifact", client.out)
+ self.assertEqual(["artifact", "conaninfo.txt", "conanmanifest.txt", "source_file.cpp"],
+ os.listdir(package_folder))
+ client.save({"conanfile.py": conanfile.format("True")})
+ client.run("create . danimtb/testing")
+ self.assertIn("SOURCE: source_file.cpp", client.out)
+ self.assertEqual([".conan_link"], os.listdir(source_folder))
+ self.assertIn("BUILD: source_file.cpp", client.out)
+ self.assertIn("BUILD: artifact", client.out)
+ self.assertEqual([".conan_link"], os.listdir(build_folder))
+ self.assertIn("PACKAGE: source_file.cpp", client.out)
+ self.assertIn("PACKAGE: artifact", client.out)
+ self.assertEqual([".conan_link"], os.listdir(package_folder))
diff --git a/conans/util/windows.py b/conans/util/windows.py
index 635597a8051..636fc672e87 100644
--- a/conans/util/windows.py
+++ b/conans/util/windows.py
@@ -45,6 +45,9 @@ def path_shortener(path, short_paths):
elif short_paths is None:
return path
+ if os.path.exists(path):
+ rmdir(path)
+
short_home = os.getenv("CONAN_USER_HOME_SHORT")
if not short_home:
drive = os.path.splitdrive(path)[0]
|
googleapis__google-cloud-python-5856 | Request to release GCS Python library
Hi,
Is it possible to release the Storage client library for Python?
I'd like the new method `get_service_account_email` to be available. Unless there exist concerns.
| [
{
"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicabl... | [
{
"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicabl... | diff --git a/storage/CHANGELOG.md b/storage/CHANGELOG.md
index 002e92db7c09..b1b0ef25782e 100644
--- a/storage/CHANGELOG.md
+++ b/storage/CHANGELOG.md
@@ -4,11 +4,32 @@
[1]: https://pypi.org/project/google-cloud-storage/#history
+## 1.11.0
+
+### Implementation Changes
+- Preserve message / args from an `InvalidResponse`. (#5492)
+- Fix generating signed urls for blobs with non-ascii names. (#5625)
+- Move bucket location specification to `Bucket.create`; deprecate `Bucket.location` setter (#5808)
+
+### New Features
+- Add `Client.get_service_account_email`. (#5765)
+
+### Documentation
+- Clarify `None` values for resource-backed properties. (#5509)
+- Elaborate docs for `{Bucket,Blob}.make_{public,private}`; note how to enable anonymous accesss to `Blob.public_url`. (#5767)
+
+### Internal / Testing Changes
+- Harden `create_bucket` systest against 429 responses. (#5535)
+- Add system test: signed URLs w/ non-ASCII blob name. (#5626)
+- Harden `tearDownModule` against 429 TooManyRequests. (#5701)
+- Retry `notification.create()` on `503 ServiceUnavailable`. (#5741)
+- Fix failing KMS system tests. (#5832, #5837, #5860)
+
## 1.10.0
### New Features
- Add support for KMS keys (#5259)
-- Add '{Blob,Bucket}make_private' method (#5336)
+- Add `{Blob,Bucket}make_private` method (#5336)
### Internal / Testing Changes
- Modify system tests to use prerelease versions of grpcio (#5304)
diff --git a/storage/setup.py b/storage/setup.py
index a0ce64d4aca7..ac046e924b5e 100644
--- a/storage/setup.py
+++ b/storage/setup.py
@@ -22,7 +22,7 @@
name = 'google-cloud-storage'
description = 'Google Cloud Storage API client library'
-version = '1.10.0'
+version = '1.11.0'
# Should be one of:
# 'Development Status :: 3 - Alpha'
# 'Development Status :: 4 - Beta'
|
twisted__twisted-12069 | spawnProcess() passes incorrect environment to subprocess when env=None and posix_spawnp() is used
[Documentation on reactor.spawnProcess](https://docs.twisted.org/en/stable/api/twisted.internet.interfaces.IReactorProcess.html) says the following about env parameter:
```env is None: On POSIX: pass os.environ```
However, twisted has [this code](https://github.com/twisted/twisted/blob/68f112f1eecb4613a3b678314a5479464c184ab4/src/twisted/internet/process.py#L881) in the code path leading to a call to posix_spawnp().
```
if environment is None:
environment = {}
```
This leads to a subprocess being initialized with empty environment even though `os.environ` was expected.
**Describe how to cause this behavior**
There's a PR with automated tests added to Twisted.
**Describe the correct behavior you'd like to see**
Subprocess having parent process environment when invoked via `reactor.spawnProcess(..., env=None)`.
**Testing environment**
- Operating System and Version;
- Debian 12
- Twisted version: 23.10.0
- Reactor: default on Linux
**Additional context**
Probably a regression since 23.8.0 when posix_spawnp was enabled.
| [
{
"content": "# -*- test-case-name: twisted.test.test_process -*-\n# Copyright (c) Twisted Matrix Laboratories.\n# See LICENSE for details.\n\n\"\"\"\nUNIX Process management.\n\nDo NOT use this module directly - use reactor.spawnProcess() instead.\n\nMaintainer: Itamar Shtull-Trauring\n\"\"\"\nfrom __future__ ... | [
{
"content": "# -*- test-case-name: twisted.test.test_process -*-\n# Copyright (c) Twisted Matrix Laboratories.\n# See LICENSE for details.\n\n\"\"\"\nUNIX Process management.\n\nDo NOT use this module directly - use reactor.spawnProcess() instead.\n\nMaintainer: Itamar Shtull-Trauring\n\"\"\"\nfrom __future__ ... | diff --git a/src/twisted/internet/process.py b/src/twisted/internet/process.py
index ef3b88d9f19..ff7684e358b 100644
--- a/src/twisted/internet/process.py
+++ b/src/twisted/internet/process.py
@@ -879,7 +879,7 @@ def _trySpawnInsteadOfFork(
else:
fdState.append((eachFD, isCloseOnExec))
if environment is None:
- environment = {}
+ environment = os.environ
setSigDef = [
everySignal
diff --git a/src/twisted/internet/test/test_process.py b/src/twisted/internet/test/test_process.py
index 0b8cdee7500..d1d930cca39 100644
--- a/src/twisted/internet/test/test_process.py
+++ b/src/twisted/internet/test/test_process.py
@@ -29,6 +29,7 @@
from twisted.python.filepath import FilePath, _asFilesystemBytes
from twisted.python.log import err, msg
from twisted.python.runtime import platform
+from twisted.test.test_process import Accumulator
from twisted.trial.unittest import SynchronousTestCase, TestCase
# Get the current Python executable as a bytestring.
@@ -1001,6 +1002,132 @@ def launchProcessAndWait(reactor):
hamcrest.equal_to(["process already removed as desired"]),
)
+ def checkSpawnProcessEnvironment(self, spawnKwargs, expectedEnv, usePosixSpawnp):
+ """
+ Shared code for testing the environment variables
+ present in the spawned process.
+
+ The spawned process serializes its environ to stderr or stdout (depending on usePTY)
+ which is checked against os.environ of the calling process.
+ """
+ p = Accumulator()
+ d = p.endedDeferred = Deferred()
+
+ reactor = self.buildReactor()
+ reactor._neverUseSpawn = not usePosixSpawnp
+
+ reactor.callWhenRunning(
+ reactor.spawnProcess,
+ p,
+ pyExe,
+ [
+ pyExe,
+ b"-c",
+ networkString(
+ "import os, sys; "
+ "env = dict(os.environ); "
+ # LC_CTYPE is set by python, see https://peps.python.org/pep-0538/
+ 'env.pop("LC_CTYPE", None); '
+ 'env.pop("__CF_USER_TEXT_ENCODING", None); '
+ "sys.stderr.write(str(sorted(env.items())))"
+ ),
+ ],
+ usePTY=self.usePTY,
+ **spawnKwargs,
+ )
+
+ def shutdown(ign):
+ reactor.stop()
+
+ d.addBoth(shutdown)
+
+ self.runReactor(reactor)
+
+ expectedEnv.pop("LC_CTYPE", None)
+ expectedEnv.pop("__CF_USER_TEXT_ENCODING", None)
+ self.assertEqual(
+ bytes(str(sorted(expectedEnv.items())), "utf-8"),
+ p.outF.getvalue() if self.usePTY else p.errF.getvalue(),
+ )
+
+ def checkSpawnProcessEnvironmentWithPosixSpawnp(self, spawnKwargs, expectedEnv):
+ return self.checkSpawnProcessEnvironment(
+ spawnKwargs, expectedEnv, usePosixSpawnp=True
+ )
+
+ def checkSpawnProcessEnvironmentWithFork(self, spawnKwargs, expectedEnv):
+ return self.checkSpawnProcessEnvironment(
+ spawnKwargs, expectedEnv, usePosixSpawnp=False
+ )
+
+ @onlyOnPOSIX
+ def test_environmentPosixSpawnpEnvNotSet(self):
+ """
+ An empty environment is passed to the spawned process, when the default value of the C{env}
+ is used. That is, when the C{env} argument is not explicitly set.
+
+ In this case posix_spawnp is used as the backend for spawning processes.
+ """
+ return self.checkSpawnProcessEnvironmentWithPosixSpawnp({}, {})
+
+ @onlyOnPOSIX
+ def test_environmentForkEnvNotSet(self):
+ """
+ An empty environment is passed to the spawned process, when the default value of the C{env}
+ is used. That is, when the C{env} argument is not explicitly set.
+
+ In this case fork+execvpe is used as the backend for spawning processes.
+ """
+ return self.checkSpawnProcessEnvironmentWithFork({}, {})
+
+ @onlyOnPOSIX
+ def test_environmentPosixSpawnpEnvNone(self):
+ """
+ The parent process environment is passed to the spawned process, when C{env} is set to
+ C{None}.
+
+ In this case posix_spawnp is used as the backend for spawning processes.
+ """
+ return self.checkSpawnProcessEnvironmentWithPosixSpawnp(
+ {"env": None}, os.environ
+ )
+
+ @onlyOnPOSIX
+ def test_environmentForkEnvNone(self):
+ """
+ The parent process environment is passed to the spawned process, when C{env} is set to
+ C{None}.
+
+ In this case fork+execvpe is used as the backend for spawning processes.
+ """
+ return self.checkSpawnProcessEnvironmentWithFork({"env": None}, os.environ)
+
+ @onlyOnPOSIX
+ def test_environmentPosixSpawnpEnvCustom(self):
+ """
+ The user-specified environment without extra variables from parent process is passed to the
+ spawned process, when C{env} is set to a dictionary.
+
+ In this case posix_spawnp is used as the backend for spawning processes.
+ """
+ return self.checkSpawnProcessEnvironmentWithPosixSpawnp(
+ {"env": {"MYENV1": "myvalue1"}},
+ {"MYENV1": "myvalue1"},
+ )
+
+ @onlyOnPOSIX
+ def test_environmentForkEnvCustom(self):
+ """
+ The user-specified environment without extra variables from parent process is passed to the
+ spawned process, when C{env} is set to a dictionary.
+
+ In this case fork+execvpe is used as the backend for spawning processes.
+ """
+ return self.checkSpawnProcessEnvironmentWithFork(
+ {"env": {"MYENV1": "myvalue1"}},
+ {"MYENV1": "myvalue1"},
+ )
+
globals().update(ProcessTestsBuilder.makeTestCaseClasses())
diff --git a/src/twisted/newsfragments/12068.bugfix b/src/twisted/newsfragments/12068.bugfix
new file mode 100644
index 00000000000..584d3ed9443
--- /dev/null
+++ b/src/twisted/newsfragments/12068.bugfix
@@ -0,0 +1 @@
+twisted.internet.process.Process, used by ``reactor.spawnProcess``, now copies the parent environment when the `env=None` argument is passed on Posix systems and ``os.posix_spawnp`` is used internally.
|
vllm-project__vllm-1666 | Batch generation with long prompt generates incorrect number of outputs
When a prompt in a batch generation is too long for the model, `llm.generate` returns an unexpected number of outputs:
```python
In [11]: prompts = ["This is a short prompt", "This is a very long prompt " * 1000]
...: print(len(prompts))
2
In [12]: outputs = llm.generate(prompts, sampling_params=sampling_params, use_tqdm=False)
WARNING 11-14 04:11:47 scheduler.py:146] Input prompt (6002 tokens) is too long and exceeds limit of 4096
In [13]: print(len(outputs))
3
```
It appears the too-long prompt gets doubled up in the output:
```python
In [14]: prompts = ["This is a short prompt", "This is a very long prompt " * 1000, "Here's another short
...: prompt"]
...: print(len(prompts))
3
In [15]: outputs = llm.generate(prompts, sampling_params=sampling_params, use_tqdm=False)
WARNING 11-14 04:15:02 scheduler.py:146] Input prompt (6002 tokens) is too long and exceeds limit of 4096
In [16]: outputs[0].prompt[:100]
Out[16]: 'This is a short prompt'
In [17]: outputs[1].prompt[:100]
Out[17]: 'This is a very long prompt This is a very long prompt This is a very long prompt This is a very long'
In [18]: outputs[2].prompt[:100]
Out[18]: 'This is a very long prompt This is a very long prompt This is a very long prompt This is a very long'
In [19]: outputs[3].prompt[:100]
Out[19]: "Here's another short prompt"
```
We are using `zip` to recombine the `outputs` with input data after the generation, and this causes big problems since the zip is off-by-one after any prompt was encountered over the size limit.
Here's a minimum reproducible script:
```python
from vllm import LLM, SamplingParams
sampling_params = SamplingParams(temperature=0.01, top_p=0.1, max_tokens=256)
llm = LLM(model=f"meta-llama/Llama-2-7b-hf",
max_num_batched_tokens=4096,
tensor_parallel_size=1)
prompts = ["This is a short prompt", "This is a very long prompt " * 1000]
print(len(prompts))
outputs = llm.generate(prompts, sampling_params=sampling_params)
print(len(outputs))
```
Environment info:
```
(eb) kwood@kwood-lab:~$ cat /etc/issue
Ubuntu 22.04.3 LTS \n \l
(eb) kwood@kwood-lab:~$ pip freeze | grep vllm
vllm==0.2.1.post1
(eb) kwood@kwood-lab:~$ nvidia-smi
Tue Nov 14 04:22:19 2023
+---------------------------------------------------------------------------------------+
| NVIDIA-SMI 535.129.03 Driver Version: 535.129.03 CUDA Version: 12.2 |
|-----------------------------------------+----------------------+----------------------+
| GPU Name Persistence-M | Bus-Id Disp.A | Volatile Uncorr. ECC |
| Fan Temp Perf Pwr:Usage/Cap | Memory-Usage | GPU-Util Compute M. |
| | | MIG M. |
|=========================================+======================+======================|
| 0 NVIDIA GeForce RTX 4090 On | 00000000:2D:00.0 Off | Off |
| 0% 40C P8 36W / 450W | 3MiB / 24564MiB | 0% Default |
| | | N/A |
+-----------------------------------------+----------------------+----------------------+
+---------------------------------------------------------------------------------------+
| Processes: |
| GPU GI CI PID Type Process name GPU Memory |
| ID ID Usage |
|=======================================================================================|
| No running processes found |
+---------------------------------------------------------------------------------------+
```
| [
{
"content": "import copy\nimport time\nfrom functools import partial\nfrom typing import TYPE_CHECKING, Any, Iterable, List, Optional, Tuple, Union\n\nfrom vllm.config import (CacheConfig, ModelConfig, ParallelConfig,\n SchedulerConfig)\nfrom vllm.core.scheduler import Scheduler, Schedu... | [
{
"content": "import copy\nimport time\nfrom functools import partial\nfrom typing import TYPE_CHECKING, Any, Iterable, List, Optional, Tuple, Union\n\nfrom vllm.config import (CacheConfig, ModelConfig, ParallelConfig,\n SchedulerConfig)\nfrom vllm.core.scheduler import Scheduler, Schedu... | diff --git a/tests/test_regression.py b/tests/test_regression.py
new file mode 100644
index 00000000000..3bfb2b43f26
--- /dev/null
+++ b/tests/test_regression.py
@@ -0,0 +1,27 @@
+"""Containing tests that check for regressions in vLLM's behavior.
+
+It should include tests that are reported by users and making sure they
+will never happen again.
+
+"""
+from vllm import LLM, SamplingParams
+
+
+def test_duplicated_ignored_sequence_group():
+ """https://github.com/vllm-project/vllm/issues/1655"""
+
+ sampling_params = SamplingParams(temperature=0.01,
+ top_p=0.1,
+ max_tokens=256)
+ llm = LLM(model="facebook/opt-125m",
+ max_num_batched_tokens=4096,
+ tensor_parallel_size=1)
+ prompts = ["This is a short prompt", "This is a very long prompt " * 1000]
+ outputs = llm.generate(prompts, sampling_params=sampling_params)
+
+ assert len(prompts) == len(outputs)
+
+
+if __name__ == "__main__":
+ import pytest
+ pytest.main([__file__])
diff --git a/vllm/engine/llm_engine.py b/vllm/engine/llm_engine.py
index c3752b11f56..20af3fb3e38 100644
--- a/vllm/engine/llm_engine.py
+++ b/vllm/engine/llm_engine.py
@@ -567,7 +567,7 @@ def step(self) -> List[RequestOutput]:
blocks_to_copy=scheduler_outputs.blocks_to_copy,
)
- return self._process_model_outputs(output, scheduler_outputs) + ignored
+ return self._process_model_outputs(output, scheduler_outputs)
def _log_system_stats(
self,
|
buildbot__buildbot-5588 | Can't use secret interpolation in p4poller.P4Source and steps.P4
I tried to use secrets for perforce passwords (using buildbot 1.0.0).
First I tried to use it in the P4 change source:
p4ChangeSource = p4poller.P4Source(p4port=p4Port,
p4user=p4User,
p4passwd=util.Secret('p4passwd'),
...)
which fails with:
... [-] P4 poll failed ..
...
exceptions.TypeError: Arguments contain a non-string value
Then I tried to use the secret in the P4 source checkout step:
factory.addStep(steps.P4(p4port=p4Port,
...,
p4passwd=util.Secret('p4passwd'),
....)
This fails with:
Upon execvpe p4 ['p4', ..., '-P', Unpersistable('Unpersistable data: instance of class buildbot.process.properties.Secret deemed insecure'), ...] in environment id 56699632
:Traceback (most recent call last):
File "/.../internet/process.py", line 445, in _fork environment)
File "/.../internet/process.py", line 523, in _execChild os.execvpe(executable, args, environment)
File "/.../lib/python2.7/os.py", line 353, in execvpe _execvpe(file, args, env)
File "/.../lib/python2.7/os.py", line 380, in _execvpe func(fullname, *argrest)
TypeError: coercing to Unicode: need string or buffer, instance found
I suppose, the secrets are still not implemented for perforce functions.
Or am I doing something wrong?
| [
{
"content": "# This file is part of Buildbot. Buildbot is free software: you can\n# redistribute it and/or modify it under the terms of the GNU General Public\n# License as published by the Free Software Foundation, version 2.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n... | [
{
"content": "# This file is part of Buildbot. Buildbot is free software: you can\n# redistribute it and/or modify it under the terms of the GNU General Public\n# License as published by the Free Software Foundation, version 2.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n... | diff --git a/master/buildbot/steps/source/p4.py b/master/buildbot/steps/source/p4.py
index b29e376dcbbf..5fdddd8abcb9 100644
--- a/master/buildbot/steps/source/p4.py
+++ b/master/buildbot/steps/source/p4.py
@@ -44,7 +44,7 @@ class P4(Source):
name = 'p4'
- renderables = ['mode', 'p4base', 'p4client', 'p4viewspec', 'p4branch']
+ renderables = ['mode', 'p4base', 'p4client', 'p4viewspec', 'p4branch', 'p4passwd']
possible_modes = ('incremental', 'full')
def __init__(self, mode='incremental',
|
internetarchive__openlibrary-5899 | Work search - Sort by first published displays work with no publication date first
<!-- What problem are we solving? What does the experience look like today? What are the symptoms? -->
When searching for a work and sorting by `First Published`, if some works match the search and have no first publication date, they appear first.
I expected to see works with known first published year first and not pages later.
### Evidence / Screenshot (if possible)
n/a
### Relevant url?
<!-- `https://openlibrary.org/...` -->
For example: https://openlibrary.org/search?q=calamity&mode=everything&sort=old
### Steps to Reproduce
<!-- What steps caused you to find the bug? -->
1. Search for `calamity` in the search bar (https://openlibrary.org/search?q=calamity&mode=everything)
2. Sort by `First Published` (https://openlibrary.org/search?q=calamity&mode=everything&sort=old)
<!-- What actually happened after these steps? What did you expect to happen? -->
* Actual: First result is `The Mount Rushmore Calamity by Jeff Brown` with no publication date
* Expected: `A theatre wherein be represented as wel the miseries & calamities that follow the voluptuous worldlings by Noot, Jan van der`, first published in 1569 and only displayed on page 2 of the search
### Details
- **Logged in (Y/N)?** No
- **Browser type/version?**
- **Operating system?**
- **Environment (prod/dev/local)?** prod
<!-- If not sure, put prod -->
### Proposal & Constraints
<!-- What is the proposed solution / implementation? Is there a precedent of this approach succeeding elsewhere? -->
When sorting by publication date (`First Published` or `Most Recent`), works with no publication date should be the last results displayed.
### Related files
<!-- Files related to this issue; this is super useful for new contributors who might want to help! If you're not sure, leave this blank; a maintainer will add them. -->
### Stakeholders
<!-- @ tag stakeholders of this bug -->
| [
{
"content": "from datetime import datetime\nimport copy\nimport json\nimport logging\nimport random\nimport re\nimport string\nfrom typing import List, Tuple, Any, Union, Optional, Iterable, Dict\nfrom unicodedata import normalize\nfrom json import JSONDecodeError\nimport requests\nimport web\nfrom lxml.etree ... | [
{
"content": "from datetime import datetime\nimport copy\nimport json\nimport logging\nimport random\nimport re\nimport string\nfrom typing import List, Tuple, Any, Union, Optional, Iterable, Dict\nfrom unicodedata import normalize\nfrom json import JSONDecodeError\nimport requests\nimport web\nfrom lxml.etree ... | diff --git a/openlibrary/plugins/worksearch/code.py b/openlibrary/plugins/worksearch/code.py
index 7f1991f46bf..8f2f002034d 100644
--- a/openlibrary/plugins/worksearch/code.py
+++ b/openlibrary/plugins/worksearch/code.py
@@ -116,7 +116,7 @@
}
SORTS = {
'editions': 'edition_count desc',
- 'old': 'first_publish_year asc',
+ 'old': 'def(first_publish_year, 9999) asc',
'new': 'first_publish_year desc',
'scans': 'ia_count desc',
# Classifications
|
openstates__openstates-scrapers-2384 | MN failing since at least 2018-06-15
MN has been failing since 2018-06-15
Based on automated runs it appears that MN has not run successfully in 2 days (2018-06-15).
```
/opt/openstates/venv-pupa/lib/python3.6/site-packages/psycopg2/__init__.py:144: UserWarning: The psycopg2 wheel package will be renamed from release 2.8; in order to keep installing from binary please use "pip install psycopg2-binary" instead. For details see: <http://initd.org/psycopg/docs/install.html#binary-install-from-pypi>.
""")
02:01:53 CRITICAL pupa: Session(s) 91st Legislature, 2019-2020 were reported by Minnesota.get_session_list() but were not found in Minnesota.legislative_sessions or Minnesota.ignored_scraped_sessions.
loaded Open States pupa settings...
mn (scrape, import)
bills: {}
committees: {}
people: {}
vote_events: {}
```
Visit http://bobsled.openstates.org for more info.
| [
{
"content": "from pupa.scrape import Jurisdiction, Organization\n\nfrom openstates.utils import url_xpath\n\nfrom .bills import MNBillScraper\nfrom .committees import MNCommitteeScraper\nfrom .people import MNPersonScraper\nfrom .vote_events import MNVoteScraper\n# from .events import MNEventScraper\n\n\"\"\"\... | [
{
"content": "from pupa.scrape import Jurisdiction, Organization\n\nfrom openstates.utils import url_xpath\n\nfrom .bills import MNBillScraper\nfrom .committees import MNCommitteeScraper\nfrom .people import MNPersonScraper\nfrom .vote_events import MNVoteScraper\n# from .events import MNEventScraper\n\n\"\"\"\... | diff --git a/openstates/mn/__init__.py b/openstates/mn/__init__.py
index c6dadd14f7..f5a168769b 100644
--- a/openstates/mn/__init__.py
+++ b/openstates/mn/__init__.py
@@ -127,6 +127,7 @@ class Minnesota(Jurisdiction):
'79th Legislature, 1995-1996',
'79th Legislature, 1995 1st Special Session',
'89th Legislature, 2015-2016',
+ '91st Legislature, 2019-2020',
]
def get_organizations(self):
|
mlflow__mlflow-11463 | [DOC-FIX] Document that attribute RunInfo.lifecycle_stage is of type LifecycleStage
### Willingness to contribute
No. I cannot contribute a documentation fix at this time.
### URL(s) with the issue
https://mlflow.org/docs/latest/python_api/mlflow.entities.html#mlflow.entities.RunInfo.lifecycle_stage
### Description of proposal (what needs changing)
For [documentation on RunInfo](https://mlflow.org/docs/latest/python_api/mlflow.entities.html#mlflow.entities.RunInfo) class.
For the `RunInfo.lifecycle_stage` attribute we should mention that it's type is enum LifecycleStage. Analogous to the documentation for the `RunInfo.stage` attribute.
Should be
```
property lifecycle_stage[source]
One of the values in mlflow.entities.lifecycle_stage.LifecycleStage describing the lifecycle stage of the run.
```
similar to the existing
```
property status[source]
One of the values in mlflow.entities.RunStatus describing the status of the run.
```
| [
{
"content": "from mlflow.entities._mlflow_object import _MLflowObject\nfrom mlflow.entities.lifecycle_stage import LifecycleStage\nfrom mlflow.entities.run_status import RunStatus\nfrom mlflow.exceptions import MlflowException\nfrom mlflow.protos.databricks_pb2 import INVALID_PARAMETER_VALUE\nfrom mlflow.proto... | [
{
"content": "from mlflow.entities._mlflow_object import _MLflowObject\nfrom mlflow.entities.lifecycle_stage import LifecycleStage\nfrom mlflow.entities.run_status import RunStatus\nfrom mlflow.exceptions import MlflowException\nfrom mlflow.protos.databricks_pb2 import INVALID_PARAMETER_VALUE\nfrom mlflow.proto... | diff --git a/mlflow/entities/run_info.py b/mlflow/entities/run_info.py
index 1d22e402e754b..5fca3152ae814 100644
--- a/mlflow/entities/run_info.py
+++ b/mlflow/entities/run_info.py
@@ -138,6 +138,10 @@ def artifact_uri(self):
@property
def lifecycle_stage(self):
+ """
+ One of the values in :py:class:`mlflow.entities.lifecycle_stage.LifecycleStage`
+ describing the lifecycle stage of the run.
+ """
return self._lifecycle_stage
def to_proto(self):
|
ansible__ansible-17707 | lookup properties
<!--- Verify first that your issue/request is not already reported in GitHub -->
##### ISSUE TYPE
<!--- Pick one below and delete the rest: -->
- Bug Report
##### ANSIBLE VERSION
<!--- Paste verbatim output from “ansible --version” between quotes below -->
```
ansible 2.2.0 (devel 4e369a31db) last updated 2016/07/02 15:01:01 (GMT +400)
lib/ansible/modules/core: (detached HEAD 1d0d5db97a) last updated 2016/07/02 15:01:12 (GMT +400)
lib/ansible/modules/extras: (detached HEAD 00b8b96906) last updated 2016/07/02 15:01:12 (GMT +400)
config file = /etc/ansible/ansible.cfg
configured module search path = Default w/o overrides
```
##### CONFIGURATION
##### OS / ENVIRONMENT
"N/A"
##### SUMMARY
[commit](https://github.com/ansible/ansible/commit/4ba60d00c8d7e62912a37ec24f90f6e5d0770c4d)
this commit breaks `lookup` for `type=properties`
##### STEPS TO REPRODUCE
just try to read some properties through `lookup`
```
- name: lookup
vars:
property_value: "{{ lookup('ini', 'some.java.property type=properties file=config.properties') }}"
debug: msg="{{ property_value }}"
```
##### EXPECTED RESULTS
read correct value from property file
##### ACTUAL RESULTS
```
File "/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/ConfigParser.py", line 512, in _read
raise MissingSectionHeaderError(fpname, lineno, line)
MissingSectionHeaderError: File contains no section headers.
file: /config.properties, line: 3
'environment=dev\n'
fatal: [localhost]: FAILED! => {"failed": true, "msg": "Unexpected failure during module execution.", "stdout": ""}
NO MORE HOSTS LEFT *************************************************************
to retry, use: --limit @test.retry
PLAY RECAP *********************************************************************
localhost : ok=1 changed=0 unreachable=0 failed=1
```
By the way, it would be great to implement 'autofill' properties
```
host=127.0.0.1
api.host=${host}
```
cc @jctanner
| [
{
"content": "# (c) 2015, Yannig Perre <yannig.perre(at)gmail.com>\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or... | [
{
"content": "# (c) 2015, Yannig Perre <yannig.perre(at)gmail.com>\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or... | diff --git a/lib/ansible/plugins/lookup/ini.py b/lib/ansible/plugins/lookup/ini.py
index cbe0d13e6d5d83..11900c0d089523 100644
--- a/lib/ansible/plugins/lookup/ini.py
+++ b/lib/ansible/plugins/lookup/ini.py
@@ -36,7 +36,7 @@
def _parse_params(term):
'''Safely split parameter term to preserve spaces'''
- keys = ['key', 'section', 'file', 're']
+ keys = ['key', 'type', 'section', 'file', 're']
params = {}
for k in keys:
params[k] = ''
|
beetbox__beets-535 | mpdstats: last_played is documented but not implemented
As pointed out [on the mailing list](https://groups.google.com/d/msg/beets-users/VW0pxtCVZG4/sq9gGsNS9zEJ), the mpdstats plugin (paging @pscn and @kljohann) does not seem to set the `last_played` field, even though the field is described in [the plugin's docs](http://beets.readthedocs.org/en/v1.3.2/plugins/mpdstats.html). Grepping in mpdstats.py for "last_played" shows that doesn't seem to be implemented. We should probably either add it to the plugin or remove it from the docs.
| [
{
"content": "# coding=utf-8\n# This file is part of beets.\n# Copyright 2013, Peter Schnebel and Johann Klähn.\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restric... | [
{
"content": "# coding=utf-8\n# This file is part of beets.\n# Copyright 2013, Peter Schnebel and Johann Klähn.\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restric... | diff --git a/beetsplug/mpdstats.py b/beetsplug/mpdstats.py
index e2f990f447..04355fb31e 100644
--- a/beetsplug/mpdstats.py
+++ b/beetsplug/mpdstats.py
@@ -281,6 +281,9 @@ def on_play(self, status):
'beets_item': self.get_item(path),
}
+ self.update_item(self.now_playing['beets_item'],
+ 'last_played', value=int(time.time()))
+
def run(self):
self.mpd.connect()
events = ['player']
|
spotify__luigi-2679 | Is there a reason python-dateutil is pinned to v2.7.5?
In this [commit](https://github.com/spotify/luigi/commit/ca0aa9afedecda539339e51974ef38cecf180d4b), I can see that python-dateutil has been pinned to version 2.7.5 - is this strictly necessary? Version 2.8.0 was released a couple of weeks ago and It's causing `ContextualVersionConflict` errors for us.
| [
{
"content": "# Copyright (c) 2012 Spotify AB\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may not\n# use this file except in compliance with the License. You may obtain a copy of\n# the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicabl... | [
{
"content": "# Copyright (c) 2012 Spotify AB\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may not\n# use this file except in compliance with the License. You may obtain a copy of\n# the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicabl... | diff --git a/setup.py b/setup.py
index 03a22f485c..75b7fe13bc 100644
--- a/setup.py
+++ b/setup.py
@@ -41,7 +41,7 @@ def get_static_files(path):
'tornado>=4.0,<5',
# https://pagure.io/python-daemon/issue/18
'python-daemon<2.2.0',
- 'python-dateutil==2.7.5',
+ 'python-dateutil>=2.7.5,<3',
]
# Note: To support older versions of setuptools, we're explicitly not
|
dask__distributed-1170 | self.workers.remove(w) fails trying to remove worker.
Probably a nit, but this code:
```python
import dask.array as da
from distributed import (Client,
LocalCluster)
import numpy as np
cluster = LocalCluster()
client = Client(cluster.scheduler_address)
ones = da.ones(shape=(1000,1000), chunks=(100,100), dtype=np.float64)
A = client.persist(ones)
print A
f = client.compute(A)
print f.result()
client.shutdown()
cluster.close()
```
fails in the following way:
```bash
dask.array<wrapped, shape=(1000, 1000), dtype=float64, chunksize=(100, 100)>
[[ 1. 1. 1. ..., 1. 1. 1.]
[ 1. 1. 1. ..., 1. 1. 1.]
[ 1. 1. 1. ..., 1. 1. 1.]
...,
[ 1. 1. 1. ..., 1. 1. 1.]
[ 1. 1. 1. ..., 1. 1. 1.]
[ 1. 1. 1. ..., 1. 1. 1.]]
tornado.application - ERROR - Exception in callback <functools.partial object at 0x7f7b0b0f24c8>
Traceback (most recent call last):
File "/home/sperkins/venv/mb/local/lib/python2.7/site-packages/tornado/ioloop.py", line 605, in _run_callback
ret = callback()
File "/home/sperkins/venv/mb/local/lib/python2.7/site-packages/tornado/stack_context.py", line 277, in null_wrapper
return fn(*args, **kwargs)
File "/home/sperkins/venv/mb/local/lib/python2.7/site-packages/tornado/ioloop.py", line 626, in _discard_future_result
future.result()
File "/home/sperkins/venv/mb/local/lib/python2.7/site-packages/tornado/concurrent.py", line 238, in result
raise_exc_info(self._exc_info)
File "/home/sperkins/venv/mb/local/lib/python2.7/site-packages/tornado/gen.py", line 1069, in run
yielded = self.gen.send(value)
File "/home/sperkins/work/ska/code/distributed/distributed/deploy/local.py", line 206, in _stop_worker
self.workers.remove(w)
ValueError: list.remove(x): x not in list
tornado.application - ERROR - Exception in callback <functools.partial object at 0x7f7b0a012af8>
Traceback (most recent call last):
File "/home/sperkins/venv/mb/local/lib/python2.7/site-packages/tornado/ioloop.py", line 605, in _run_callback
ret = callback()
File "/home/sperkins/venv/mb/local/lib/python2.7/site-packages/tornado/stack_context.py", line 277, in null_wrapper
return fn(*args, **kwargs)
File "/home/sperkins/venv/mb/local/lib/python2.7/site-packages/tornado/ioloop.py", line 626, in _discard_future_result
future.result()
File "/home/sperkins/venv/mb/local/lib/python2.7/site-packages/tornado/concurrent.py", line 238, in result
raise_exc_info(self._exc_info)
File "/home/sperkins/venv/mb/local/lib/python2.7/site-packages/tornado/gen.py", line 1069, in run
yielded = self.gen.send(value)
File "/home/sperkins/work/ska/code/distributed/distributed/deploy/local.py", line 206, in _stop_worker
self.workers.remove(w)
ValueError: list.remove(x): x not in list
tornado.application - ERROR - Exception in callback <functools.partial object at 0x7f7b0a012db8>
Traceback (most recent call last):
File "/home/sperkins/venv/mb/local/lib/python2.7/site-packages/tornado/ioloop.py", line 605, in _run_callback
ret = callback()
File "/home/sperkins/venv/mb/local/lib/python2.7/site-packages/tornado/stack_context.py", line 277, in null_wrapper
return fn(*args, **kwargs)
File "/home/sperkins/venv/mb/local/lib/python2.7/site-packages/tornado/ioloop.py", line 626, in _discard_future_result
future.result()
File "/home/sperkins/venv/mb/local/lib/python2.7/site-packages/tornado/concurrent.py", line 238, in result
raise_exc_info(self._exc_info)
File "/home/sperkins/venv/mb/local/lib/python2.7/site-packages/tornado/gen.py", line 1069, in run
yielded = self.gen.send(value)
File "/home/sperkins/work/ska/code/distributed/distributed/deploy/local.py", line 206, in _stop_worker
self.workers.remove(w)
ValueError: list.remove(x): x not in list
tornado.application - ERROR - Exception in callback <functools.partial object at 0x7f7b0b12d158>
Traceback (most recent call last):
File "/home/sperkins/venv/mb/local/lib/python2.7/site-packages/tornado/ioloop.py", line 605, in _run_callback
ret = callback()
File "/home/sperkins/venv/mb/local/lib/python2.7/site-packages/tornado/stack_context.py", line 277, in null_wrapper
return fn(*args, **kwargs)
File "/home/sperkins/venv/mb/local/lib/python2.7/site-packages/tornado/ioloop.py", line 626, in _discard_future_result
future.result()
File "/home/sperkins/venv/mb/local/lib/python2.7/site-packages/tornado/concurrent.py", line 238, in result
raise_exc_info(self._exc_info)
File "/home/sperkins/venv/mb/local/lib/python2.7/site-packages/tornado/gen.py", line 1069, in run
yielded = self.gen.send(value)
File "/home/sperkins/work/ska/code/distributed/distributed/deploy/local.py", line 206, in _stop_worker
self.workers.remove(w)
ValueError: list.remove(x): x not in list
tornado.application - ERROR - Exception in callback <functools.partial object at 0x7f7b0a021e68>
Traceback (most recent call last):
File "/home/sperkins/venv/mb/local/lib/python2.7/site-packages/tornado/ioloop.py", line 605, in _run_callback
ret = callback()
File "/home/sperkins/venv/mb/local/lib/python2.7/site-packages/tornado/stack_context.py", line 277, in null_wrapper
return fn(*args, **kwargs)
File "/home/sperkins/venv/mb/local/lib/python2.7/site-packages/tornado/ioloop.py", line 626, in _discard_future_result
future.result()
File "/home/sperkins/venv/mb/local/lib/python2.7/site-packages/tornado/concurrent.py", line 238, in result
raise_exc_info(self._exc_info)
File "/home/sperkins/venv/mb/local/lib/python2.7/site-packages/tornado/gen.py", line 1069, in run
yielded = self.gen.send(value)
File "/home/sperkins/work/ska/code/distributed/distributed/deploy/local.py", line 206, in _stop_worker
self.workers.remove(w)
ValueError: list.remove(x): x not in list
tornado.application - ERROR - Exception in callback <functools.partial object at 0x7f7b09f70368>
Traceback (most recent call last):
File "/home/sperkins/venv/mb/local/lib/python2.7/site-packages/tornado/ioloop.py", line 605, in _run_callback
ret = callback()
File "/home/sperkins/venv/mb/local/lib/python2.7/site-packages/tornado/stack_context.py", line 277, in null_wrapper
return fn(*args, **kwargs)
File "/home/sperkins/venv/mb/local/lib/python2.7/site-packages/tornado/ioloop.py", line 626, in _discard_future_result
future.result()
File "/home/sperkins/venv/mb/local/lib/python2.7/site-packages/tornado/concurrent.py", line 238, in result
raise_exc_info(self._exc_info)
File "/home/sperkins/venv/mb/local/lib/python2.7/site-packages/tornado/gen.py", line 1069, in run
yielded = self.gen.send(value)
File "/home/sperkins/work/ska/code/distributed/distributed/deploy/local.py", line 206, in _stop_worker
self.workers.remove(w)
ValueError: list.remove(x): x not in list
tornado.application - ERROR - Exception in callback <functools.partial object at 0x7f7b0b0f28e8>
Traceback (most recent call last):
File "/home/sperkins/venv/mb/local/lib/python2.7/site-packages/tornado/ioloop.py", line 605, in _run_callback
ret = callback()
File "/home/sperkins/venv/mb/local/lib/python2.7/site-packages/tornado/stack_context.py", line 277, in null_wrapper
return fn(*args, **kwargs)
File "/home/sperkins/venv/mb/local/lib/python2.7/site-packages/tornado/ioloop.py", line 626, in _discard_future_result
future.result()
File "/home/sperkins/venv/mb/local/lib/python2.7/site-packages/tornado/concurrent.py", line 238, in result
raise_exc_info(self._exc_info)
File "/home/sperkins/venv/mb/local/lib/python2.7/site-packages/tornado/gen.py", line 1069, in run
yielded = self.gen.send(value)
File "/home/sperkins/work/ska/code/distributed/distributed/deploy/local.py", line 206, in _stop_worker
self.workers.remove(w)
ValueError: list.remove(x): x not in list
tornado.application - ERROR - Exception in callback <functools.partial object at 0x7f7b0b0f2d08>
Traceback (most recent call last):
File "/home/sperkins/venv/mb/local/lib/python2.7/site-packages/tornado/ioloop.py", line 605, in _run_callback
ret = callback()
File "/home/sperkins/venv/mb/local/lib/python2.7/site-packages/tornado/stack_context.py", line 277, in null_wrapper
return fn(*args, **kwargs)
File "/home/sperkins/venv/mb/local/lib/python2.7/site-packages/tornado/ioloop.py", line 626, in _discard_future_result
future.result()
File "/home/sperkins/venv/mb/local/lib/python2.7/site-packages/tornado/concurrent.py", line 238, in result
raise_exc_info(self._exc_info)
File "/home/sperkins/venv/mb/local/lib/python2.7/site-packages/tornado/gen.py", line 1069, in run
yielded = self.gen.send(value)
File "/home/sperkins/work/ska/code/distributed/distributed/deploy/local.py", line 206, in _stop_worker
self.workers.remove(w)
ValueError: list.remove(x): x not in list
```
| [
{
"content": "from __future__ import print_function, division, absolute_import\n\nimport atexit\nimport logging\nimport math\nfrom threading import Thread\nfrom time import sleep\nimport warnings\nimport weakref\n\nfrom tornado import gen\nfrom tornado.ioloop import IOLoop\n\nfrom ..core import CommClosedError\... | [
{
"content": "from __future__ import print_function, division, absolute_import\n\nimport atexit\nimport logging\nimport math\nfrom threading import Thread\nfrom time import sleep\nimport warnings\nimport weakref\n\nfrom tornado import gen\nfrom tornado.ioloop import IOLoop\n\nfrom ..core import CommClosedError\... | diff --git a/distributed/deploy/local.py b/distributed/deploy/local.py
index 182a0041df1..d1458c4f824 100644
--- a/distributed/deploy/local.py
+++ b/distributed/deploy/local.py
@@ -203,7 +203,8 @@ def start_worker(self, ncores=0, **kwargs):
@gen.coroutine
def _stop_worker(self, w):
yield w._close()
- self.workers.remove(w)
+ if w in self.workers:
+ self.workers.remove(w)
def stop_worker(self, w):
""" Stop a running worker
diff --git a/distributed/deploy/tests/test_local.py b/distributed/deploy/tests/test_local.py
index 99a148fce8e..c1a7784cf25 100644
--- a/distributed/deploy/tests/test_local.py
+++ b/distributed/deploy/tests/test_local.py
@@ -15,7 +15,8 @@
from distributed.deploy.local import LocalCluster
from distributed.metrics import time
from distributed.utils_test import (inc, loop, raises, gen_test, pristine_loop,
- assert_can_connect_locally_4, assert_can_connect_from_everywhere_4_6)
+ assert_can_connect_locally_4, assert_can_connect_from_everywhere_4_6,
+ captured_logger)
from distributed.utils import ignoring, sync
from distributed.worker import TOTAL_MEMORY, _ncores
@@ -32,6 +33,20 @@ def test_simple(loop):
assert any(w.data == {x.key: 2} for w in c.workers)
+def test_close_twice(loop):
+ cluster = LocalCluster()
+ with Client(cluster.scheduler_address) as client:
+ f = client.map(inc, range(100))
+ client.gather(f)
+ with captured_logger('tornado.application') as log:
+ cluster.close()
+ cluster.close()
+ sleep(0.5)
+ log = log.getvalue()
+ print(log)
+ assert not log
+
+
@pytest.mark.skipif('sys.version_info[0] == 2', reason='multi-loop')
def test_procs(loop):
with LocalCluster(2, scheduler_port=0, processes=False, threads_per_worker=3,
diff --git a/distributed/utils_test.py b/distributed/utils_test.py
index cdcfb4b28b6..9dad79e9d52 100644
--- a/distributed/utils_test.py
+++ b/distributed/utils_test.py
@@ -797,6 +797,8 @@ def assert_can_connect_locally_6(port, timeout=None, connection_args=None):
def captured_logger(logger):
"""Capture output from the given Logger.
"""
+ if isinstance(logger, str):
+ logger = logging.getLogger(logger)
orig_handlers = logger.handlers[:]
sio = six.StringIO()
logger.handlers[:] = [logging.StreamHandler(sio)]
|
ansible__ansible-modules-extras-3339 | Marker in blockinfile is a line prefix, not a whole line
##### ISSUE TYPE
- Bug Report
##### COMPONENT NAME
`blockinfile`
##### ANSIBLE VERSION
```
ansible 2.1.0 (devel 2e529d7a51) last updated 2016/03/29 20:29:18 (GMT +100)
lib/ansible/modules/core: (detached HEAD 0268864211) last updated 2016/03/29 20:30:38 (GMT +100)
lib/ansible/modules/extras: (detached HEAD 6978984244) last updated 2016/03/29 20:30:38 (GMT +100)
config file = /etc/ansible/ansible.cfg
configured module search path = Default w/o overrides
```
##### CONFIGURATION
N/A.
##### OS / ENVIRONMENT
N/A.
##### SUMMARY
The `blockinfile` documentation talks about `marker` being a "line template", but actually it doesn't match against a whole line, it looks for this marker at the start of the line. This causes trouble when one marker happens to be a leading substring of another marker.
##### STEPS TO REPRODUCE
Run the following play twice:
``` yaml
- hosts: localhost
tasks:
- blockinfile:
dest: /tmp/example
block: this is an example block to insert
create: true
- blockinfile:
dest: /tmp/example
block: this is a different block
marker: '# {mark} ANSIBLE MANAGED BLOCK: non-default marker'
```
##### EXPECTED RESULTS
File `/tmp/example` has the following contents:
```
# BEGIN ANSIBLE MANAGED BLOCK
this is an example block to insert
# END ANSIBLE MANAGED BLOCK
# BEGIN ANSIBLE MANAGED BLOCK: non-default marker
this is a different block
# END ANSIBLE MANAGED BLOCK: non-default marker
```
##### ACTUAL RESULTS
File `/tmp/example` has the following contents:
```
# BEGIN ANSIBLE MANAGED BLOCK
this is an example block to insert
# END ANSIBLE MANAGED BLOCK
# BEGIN ANSIBLE MANAGED BLOCK
this is an example block to insert
# END ANSIBLE MANAGED BLOCK
# BEGIN ANSIBLE MANAGED BLOCK: non-default marker
this is a different block
# END ANSIBLE MANAGED BLOCK: non-default marker
```
Here, on the second run, the first task has overwritten the second block, and the second task has reinserted the second block.
| [
{
"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# (c) 2014, 2015 YAEGASHI Takeshi <yaegashi@debian.org>\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Softwar... | [
{
"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# (c) 2014, 2015 YAEGASHI Takeshi <yaegashi@debian.org>\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Softwar... | diff --git a/files/blockinfile.py b/files/blockinfile.py
index 96f430cf14a..ecee4800117 100755
--- a/files/blockinfile.py
+++ b/files/blockinfile.py
@@ -258,9 +258,9 @@ def main():
n0 = n1 = None
for i, line in enumerate(lines):
- if line.startswith(marker0):
+ if line == marker0:
n0 = i
- if line.startswith(marker1):
+ if line == marker1:
n1 = i
if None in (n0, n1):
|
pydantic__pydantic-738 | duplicated errors when validators raise ValidationError
# Bug
As a work around for #619 I tried the following
```py
from pydantic import VERSION, BaseModel, Union, validator
from typing_extensions import Literal
print('pydantic version:', VERSION)
class Foo(BaseModel):
model_type: Literal['foo']
f: int
class Bar(BaseModel):
model_type: Literal['bar']
b: int
class MyModel(BaseModel):
foobar: Union[Foo, Bar]
@validator('foobar', pre=True)
def check_action(cls, v):
if isinstance(v, dict):
model_type = v.get('model_type')
if model_type == 'foo':
return Foo(**v)
if model_type == 'var':
return Bar(**v)
return v
MyModel(foobar={'model_type': 'foo', 'f': 'x'})
```
Output:
```
pydantic version: 0.32.1
Traceback (most recent call last):
File "test.py", line 31, in <module>
MyModel(foobar={'model_type': 'foo', 'f': 'x'})
File "pydantic/main.py", line 275, in pydantic.main.BaseModel.__init__
File "pydantic/main.py", line 785, in pydantic.main.validate_model
pydantic.error_wrappers.ValidationError: 2 validation errors for MyModel
foobar -> f
value is not a valid integer (type=type_error.integer)
foobar -> f
value is not a valid integer (type=type_error.integer)
```
When validators raise `ValidationError` the errors are duplicated.
Won't be that common, but should be fixed.
Repeated error when validator raises an exception
# Bug
Please complete:
* OS: **Ubuntu**
* Python version `import sys; print(sys.version)`: **3.7.4**
* Pydantic version `import pydantic; print(pydantic.VERSION)`: **v0.32.1**
```py
from typing import Optional
from pydantic import BaseModel, validator
class Foobar(BaseModel):
foo: Optional[str] = None
@validator('foo', always=True)
def check_foo(cls, v):
if not v:
raise ValueError('custom error, foo is required')
return v
print(Foobar(foo='x'))
print(Foobar())
```
Outputs:
```
pydantic.error_wrappers.ValidationError: 2 validation errors for Foobar
foo
none is not an allowed value (type=type_error.none.not_allowed)
foo
custom error, foo is required (type=value_error)
```
If i add `pre=True`, the error is even weirder:
```
pydantic.error_wrappers.ValidationError: 2 validation errors for Foobar
foo
custom error, foo is required (type=value_error)
foo
custom error, foo is required (type=value_error)
```
| [
{
"content": "import json\nfrom functools import lru_cache\nfrom typing import TYPE_CHECKING, Any, Dict, Generator, List, Optional, Sequence, Tuple, Type, Union\n\nif TYPE_CHECKING: # pragma: no cover\n from pydantic import BaseConfig # noqa: F401\n\n__all__ = ('ErrorWrapper', 'ValidationError')\n\n\nclass... | [
{
"content": "import json\nfrom functools import lru_cache\nfrom typing import TYPE_CHECKING, Any, Dict, Generator, List, Optional, Sequence, Tuple, Type, Union\n\nif TYPE_CHECKING: # pragma: no cover\n from pydantic import BaseConfig # noqa: F401\n\n__all__ = ('ErrorWrapper', 'ValidationError')\n\n\nclass... | diff --git a/changes/738-samuelcolvin.rst b/changes/738-samuelcolvin.rst
new file mode 100644
index 00000000000..af8154f43a7
--- /dev/null
+++ b/changes/738-samuelcolvin.rst
@@ -0,0 +1 @@
+add ``__repr__`` method to ``ErrorWrapper``
diff --git a/pydantic/error_wrappers.py b/pydantic/error_wrappers.py
index f483fa6e605..3b83b67cdf6 100644
--- a/pydantic/error_wrappers.py
+++ b/pydantic/error_wrappers.py
@@ -42,6 +42,9 @@ def dict(self, *, loc_prefix: Optional[Tuple[str, ...]] = None) -> Dict[str, Any
return d
+ def __repr__(self) -> str:
+ return f'<ErrorWrapper {self.dict()}>'
+
# ErrorList is something like Union[List[Union[List[ErrorWrapper], ErrorWrapper]], ErrorWrapper]
# but recursive, therefore just use:
diff --git a/tests/test_errors.py b/tests/test_errors.py
index bf3a68b1717..6bf7ef4fe3c 100644
--- a/tests/test_errors.py
+++ b/tests/test_errors.py
@@ -1,6 +1,13 @@
+from typing import Optional, Union
+
import pytest
-from pydantic import PydanticTypeError
+from pydantic import BaseModel, PydanticTypeError, ValidationError, validator
+
+try:
+ from typing_extensions import Literal
+except ImportError:
+ Literal = None
def test_pydantic_error():
@@ -14,3 +21,61 @@ def __init__(self, *, test_ctx: int) -> None:
with pytest.raises(TestError) as exc_info:
raise TestError(test_ctx='test_value')
assert str(exc_info.value) == 'test message template "test_value"'
+
+
+@pytest.mark.skipif(not Literal, reason='typing_extensions not installed')
+def test_interval_validation_error():
+ class Foo(BaseModel):
+ model_type: Literal['foo']
+ f: int
+
+ class Bar(BaseModel):
+ model_type: Literal['bar']
+ b: int
+
+ class MyModel(BaseModel):
+ foobar: Union[Foo, Bar]
+
+ @validator('foobar', pre=True, whole=True)
+ def check_action(cls, v):
+ if isinstance(v, dict):
+ model_type = v.get('model_type')
+ if model_type == 'foo':
+ return Foo(**v)
+ if model_type == 'bar':
+ return Bar(**v)
+ raise ValueError('not valid Foo or Bar')
+
+ m1 = MyModel(foobar={'model_type': 'foo', 'f': '1'})
+ assert m1.foobar.f == 1
+ assert isinstance(m1.foobar, Foo)
+
+ m2 = MyModel(foobar={'model_type': 'bar', 'b': '2'})
+ assert m2.foobar.b == 2
+ assert isinstance(m2.foobar, BaseModel)
+
+ with pytest.raises(ValidationError) as exc_info:
+ MyModel(foobar={'model_type': 'foo', 'f': 'x'})
+ assert exc_info.value.errors() == [
+ {'loc': ('foobar', 'f'), 'msg': 'value is not a valid integer', 'type': 'type_error.integer'}
+ ]
+
+
+def test_error_on_optional():
+ class Foobar(BaseModel):
+ foo: Optional[str] = None
+
+ @validator('foo', always=True, whole=True)
+ def check_foo(cls, v):
+ raise ValueError('custom error')
+
+ with pytest.raises(ValidationError) as exc_info:
+ Foobar(foo='x')
+ assert exc_info.value.errors() == [{'loc': ('foo',), 'msg': 'custom error', 'type': 'value_error'}]
+ assert repr(exc_info.value.raw_errors[0]) == (
+ "<ErrorWrapper {'loc': ('foo',), 'msg': 'custom error', 'type': 'value_error'}>"
+ )
+
+ with pytest.raises(ValidationError) as exc_info:
+ Foobar(foo=None)
+ assert exc_info.value.errors() == [{'loc': ('foo',), 'msg': 'custom error', 'type': 'value_error'}]
|
DataBiosphere__toil-1535 | NonCachingFileStore doesn't have the jobID attribute
This makes NonCachingFileStore incompatible with dockerCall.
NonCachingFileStore doesn't have the jobID attribute
This makes NonCachingFileStore incompatible with dockerCall.
| [
{
"content": "\"\"\"\n Module for calling Docker. Assumes `docker` is on the PATH.\n\n Contains two user-facing functions: dockerCall and dockerCheckOutput\n\n Uses Toil's defer functionality to ensure containers are shutdown even in case of job or pipeline failure\n\n Example of using dockerCall in... | [
{
"content": "\"\"\"\n Module for calling Docker. Assumes `docker` is on the PATH.\n\n Contains two user-facing functions: dockerCall and dockerCheckOutput\n\n Uses Toil's defer functionality to ensure containers are shutdown even in case of job or pipeline failure\n\n Example of using dockerCall in... | diff --git a/src/toil/lib/docker.py b/src/toil/lib/docker.py
index c4b918c6ec..19d8a6f9b6 100644
--- a/src/toil/lib/docker.py
+++ b/src/toil/lib/docker.py
@@ -232,7 +232,6 @@ def _fixPermissions(tool, workDir):
def _getContainerName(job):
return '--'.join([str(job),
- job.fileStore.jobID,
base64.b64encode(os.urandom(9), '-_')]).replace("'", '').replace('_', '')
diff --git a/src/toil/test/lib/dockerTest.py b/src/toil/test/lib/dockerTest.py
index 1b951f9458..66537c26f4 100644
--- a/src/toil/test/lib/dockerTest.py
+++ b/src/toil/test/lib/dockerTest.py
@@ -30,7 +30,7 @@ class DockerTest(ToilTest):
def setUp(self):
self.tempDir = self._createTempDir(purpose='tempDir')
- def testDockerClean(self):
+ def testDockerClean(self, caching=True):
"""
Run the test container that creates a file in the work dir, and sleeps for 5 minutes. Ensure
that the calling job gets SIGKILLed after a minute, leaving behind the spooky/ghost/zombie
@@ -57,6 +57,8 @@ def testDockerClean(self):
options.logLevel = 'INFO'
options.workDir = work_dir
options.clean = 'always'
+ if not caching:
+ options.disableCaching = True
for rm in (True, False):
for detached in (True, False):
if detached and rm:
@@ -64,7 +66,6 @@ def testDockerClean(self):
for defer in (FORGO, STOP, RM, None):
# Not using base64 logic here since it might create a name starting with a `-`.
container_name = uuid.uuid4().hex
- print rm, detached, defer
A = Job.wrapJobFn(_testDockerCleanFn, data_dir, detached, rm, defer,
container_name)
try:
@@ -94,7 +95,7 @@ def testDockerClean(self):
_dockerKill(container_name, RM)
os.remove(test_file)
- def testDockerPipeChain(self):
+ def testDockerPipeChain(self, caching=True):
"""
Test for piping API for dockerCall(). Using this API (activated when list of
argument lists is given as parameters), commands a piped together into a chain
@@ -105,10 +106,18 @@ def testDockerPipeChain(self):
options.logLevel = 'INFO'
options.workDir = self.tempDir
options.clean = 'always'
+ if not caching:
+ options.disableCaching = True
A = Job.wrapJobFn(_testDockerPipeChainFn)
rv = Job.Runner.startToil(A, options)
assert rv.strip() == '2'
+ def testNonCachingDockerChain(self):
+ self.testDockerPipeChain(caching=False)
+
+ def testNonCachingDockerClean(self):
+ self.testDockerClean(caching=False)
+
def _testDockerCleanFn(job, workDir, detached=None, rm=None, defer=None, containerName=None):
"""
Test function for test docker_clean. Runs a container with given flags and then dies leaving
|
elastic__apm-agent-python-1397 | No module named 'elasticapm.metrics.sets.transactions'
fastapi: 0.70.0
elastic-apm: 6.6.2
Could not register elasticapm.metrics.sets.transactions.TransactionsMetricSet metricset: No module named 'elasticapm.metrics.sets.transactions'
```
File "elasticapm/contrib/starlette/__init__.py", line 70, in make_apm_client
return client_cls(config, **defaults)
File "elasticapm/base.py", line 199, in __init__
self._metrics.register(path)
File "elasticapm/metrics/base_metrics.py", line 72, in register
logger.warning("Could not register %s metricset: %s", class_path, compat.text_type(e))
File "__init__.py", line 1480, in warning
self._log(WARNING, msg, args, **kwargs)
File "__init__.py", line 1615, in _log
self.handle(record)
File "__init__.py", line 1625, in handle
self.callHandlers(record)
File "__init__.py", line 967, in handle
self.emit(record)
```
| [
{
"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2012, the Sentry Team, see AUTHORS for more details\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following c... | [
{
"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2012, the Sentry Team, see AUTHORS for more details\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following c... | diff --git a/elasticapm/conf/__init__.py b/elasticapm/conf/__init__.py
index 25f13b4ec..3c7c97326 100644
--- a/elasticapm/conf/__init__.py
+++ b/elasticapm/conf/__init__.py
@@ -550,7 +550,6 @@ class Config(_ConfigBase):
"METRICS_SETS",
default=[
"elasticapm.metrics.sets.cpu.CPUMetricSet",
- "elasticapm.metrics.sets.transactions.TransactionsMetricSet",
],
)
metrics_interval = _ConfigValue(
|
rasterio__rasterio-778 | Copy colormap when rasters are merged
I'm running `rio merge` over a few single band images that contain a colormap. During the merge, the colormap is not copied to the new raster. Can we modify `rio merge` to preserve the colormap?
I have an initial pass of this change at:
https://github.com/kapadia/rasterio/tree/rio-merge-colormap
| [
{
"content": "\"\"\"Merge command.\"\"\"\n\nimport logging\n\nimport click\nfrom cligj import files_inout_arg, format_opt\n\nfrom .helpers import resolve_inout\nfrom . import options\nimport rasterio\n\n\n@click.command(short_help=\"Merge a stack of raster datasets.\")\n@files_inout_arg\n@options.output_opt\n@f... | [
{
"content": "\"\"\"Merge command.\"\"\"\n\nimport logging\n\nimport click\nfrom cligj import files_inout_arg, format_opt\n\nfrom .helpers import resolve_inout\nfrom . import options\nimport rasterio\n\n\n@click.command(short_help=\"Merge a stack of raster datasets.\")\n@files_inout_arg\n@options.output_opt\n@f... | diff --git a/rasterio/rio/merge.py b/rasterio/rio/merge.py
index d583bcbf1..d44c67138 100644
--- a/rasterio/rio/merge.py
+++ b/rasterio/rio/merge.py
@@ -67,3 +67,10 @@ def merge(ctx, files, output, driver, bounds, res, nodata, force_overwrite,
with rasterio.open(output, 'w', **profile) as dst:
dst.write(dest)
+
+ # uses the colormap in the first input raster.
+ try:
+ colormap = sources[0].colormap(1)
+ dst.write_colormap(1, colormap)
+ except ValueError:
+ pass
diff --git a/tests/test_rio_merge.py b/tests/test_rio_merge.py
index b2ce1da01..d643bb9b6 100644
--- a/tests/test_rio_merge.py
+++ b/tests/test_rio_merge.py
@@ -67,6 +67,26 @@ def test_data_dir_2(tmpdir):
return tmpdir
+def test_merge_with_colormap(test_data_dir_1):
+ outputname = str(test_data_dir_1.join('merged.tif'))
+ inputs = [str(x) for x in test_data_dir_1.listdir()]
+ inputs.sort()
+
+ # Add a colormap to the first input prior merge
+ with rasterio.open(inputs[0], 'r+') as src:
+ src.write_colormap(1, {0: (255, 0, 0, 255), 255: (0, 0, 0, 0)})
+
+ runner = CliRunner()
+ result = runner.invoke(merge, inputs + [outputname])
+ assert result.exit_code == 0
+ assert os.path.exists(outputname)
+
+ with rasterio.open(outputname) as out:
+ cmap = out.colormap(1)
+ assert cmap[0] == (255, 0, 0, 255)
+ assert cmap[255] == (0, 0, 0, 255)
+
+
def test_merge_with_nodata(test_data_dir_1):
outputname = str(test_data_dir_1.join('merged.tif'))
inputs = [str(x) for x in test_data_dir_1.listdir()]
|
docker__docker-py-1576 | Can't Create/Update an replicated service with replicas = 0, bug finded
I try to update a service with the following code:
```
service.update(
name=service.name,
mode=docker.types.ServiceMode('replicated', replicas=0),
networks=networks_list,
)
```
First, I want to update the replicas only but I have to put name and network or the api think I want to change them (?) then send me an error.
Second, the ServiceMode object created here don't work and looks like this : `{'replicated': {}}`
The reason for this is https://github.com/docker/docker-py/blob/master/docker/types/services.py#L410 where the if should be an if exist.
I would love to do a pull request but i'm a newbie and I'm already lost with my first one : #1568
Thx for everything
| [
{
"content": "import six\n\nfrom .. import errors\nfrom ..constants import IS_WINDOWS_PLATFORM\nfrom ..utils import check_resource, format_environment, split_command\n\n\nclass TaskTemplate(dict):\n \"\"\"\n Describe the task specification to be used when creating or updating a\n service.\n\n Args:\... | [
{
"content": "import six\n\nfrom .. import errors\nfrom ..constants import IS_WINDOWS_PLATFORM\nfrom ..utils import check_resource, format_environment, split_command\n\n\nclass TaskTemplate(dict):\n \"\"\"\n Describe the task specification to be used when creating or updating a\n service.\n\n Args:\... | diff --git a/docker/types/services.py b/docker/types/services.py
index 9291c9bd4..e7787ec81 100644
--- a/docker/types/services.py
+++ b/docker/types/services.py
@@ -407,7 +407,7 @@ def __init__(self, mode, replicas=None):
'replicas can only be used for replicated mode'
)
self[mode] = {}
- if replicas:
+ if replicas is not None:
self[mode]['Replicas'] = replicas
@property
diff --git a/tests/unit/dockertypes_test.py b/tests/unit/dockertypes_test.py
index cb1d90ca2..160fabdd7 100644
--- a/tests/unit/dockertypes_test.py
+++ b/tests/unit/dockertypes_test.py
@@ -305,6 +305,12 @@ def test_replicated_replicas(self):
assert mode.mode == 'replicated'
assert mode.replicas == 21
+ def test_replicated_replicas_0(self):
+ mode = ServiceMode('replicated', 0)
+ assert mode == {'replicated': {'Replicas': 0}}
+ assert mode.mode == 'replicated'
+ assert mode.replicas == 0
+
def test_invalid_mode(self):
with pytest.raises(InvalidArgument):
ServiceMode('foobar')
|
cisagov__manage.get.gov-1452 | DISCOVERY: Notification and change log for domain managers
### Issue description
As a domain manager,
I want an in-app log of all changes made to my domain
So that I can ensure that it is correct, and track any changes that have been made, avoiding and correcting errors.
### Acceptance criteria
TBD
### Additional context
Notifications about changes to domain info:
All users wanted to be notified of changes to their domain information–in particular, updates to name servers. Most users said they’d like an email notifications because they rarely visit the registrar. However, an in-app audit trail would be helpful, as well, for future reference or in case an email was missed. Need to do some discovery and design exploration around this.
Souirce: [User feedback](https://docs.google.com/document/d/1M5foXX34qPc7R_J1uhBACHWUhg8WHwX3bB6nurvNNWE/edit#bookmark=id.pa0k2x54vkx1)
### Links to other issues
_No response_
| [
{
"content": "from auditlog.registry import auditlog # type: ignore\nfrom .contact import Contact\nfrom .domain_application import DomainApplication\nfrom .domain_information import DomainInformation\nfrom .domain import Domain\nfrom .draft_domain import DraftDomain\nfrom .host_ip import HostIP\nfrom .host imp... | [
{
"content": "from auditlog.registry import auditlog # type: ignore\nfrom .contact import Contact\nfrom .domain_application import DomainApplication\nfrom .domain_information import DomainInformation\nfrom .domain import Domain\nfrom .draft_domain import DraftDomain\nfrom .host_ip import HostIP\nfrom .host imp... | diff --git a/src/registrar/models/__init__.py b/src/registrar/models/__init__.py
index 1d28c9e89..1203c7878 100644
--- a/src/registrar/models/__init__.py
+++ b/src/registrar/models/__init__.py
@@ -38,6 +38,7 @@
auditlog.register(Domain)
auditlog.register(DraftDomain)
auditlog.register(DomainInvitation)
+auditlog.register(DomainInformation)
auditlog.register(HostIP)
auditlog.register(Host)
auditlog.register(Nameserver)
|
graspologic-org__graspologic-207 | GClust bug
<img width="558" alt="Screen Shot 2019-06-22 at 3 46 06 PM" src="https://user-images.githubusercontent.com/25714207/59968259-eb346c80-9504-11e9-984c-8c13dff93a37.png">
should be `- self.min_components` rather than `- 1`
This causes an indexing error when `min_components` does not equal 1
| [
{
"content": "# Copyright 2019 NeuroData (http://neurodata.io)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless ... | [
{
"content": "# Copyright 2019 NeuroData (http://neurodata.io)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless ... | diff --git a/graspy/cluster/gclust.py b/graspy/cluster/gclust.py
index 29b6bc07f..78d950399 100644
--- a/graspy/cluster/gclust.py
+++ b/graspy/cluster/gclust.py
@@ -240,6 +240,6 @@ def fit(self, X, y=None):
self.n_components_ = best_component
self.covariance_type_ = best_covariance
- self.model_ = models[best_component - 1][best_covariance_idx]
+ self.model_ = models[best_component - self.min_components][best_covariance_idx]
return self
diff --git a/tests/cluster/test_gclust.py b/tests/cluster/test_gclust.py
index 646d6953e..f30756dc0 100644
--- a/tests/cluster/test_gclust.py
+++ b/tests/cluster/test_gclust.py
@@ -98,7 +98,7 @@ def test_no_y():
assert_equal(gclust.n_components_, 2)
-def test_outputs():
+def test_two_class():
"""
Easily separable two gaussian problem.
"""
@@ -127,7 +127,29 @@ def test_outputs():
assert_allclose(gclust.ari_.loc[n_components], 1)
-def test_bic():
+def test_five_class():
+ """
+ Easily separable five gaussian problem.
+ """
+ np.random.seed(10)
+
+ n = 100
+ mus = [[i * 5, 0] for i in range(5)]
+ cov = np.eye(2) # balls
+
+ num_sims = 10
+
+ for _ in range(num_sims):
+ X = np.vstack([np.random.multivariate_normal(mu, cov, n) for mu in mus])
+
+ gclust = GaussianCluster(
+ min_components=3, max_components=10, covariance_type="all"
+ )
+ gclust.fit(X)
+ assert_equal(gclust.n_components_, 5)
+
+
+def test_ase_three_blocks():
"""
Expect 3 clusters from a 3 block model
"""
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.