title stringlengths 2 169 | diff stringlengths 235 19.5k | body stringlengths 0 30.5k | url stringlengths 48 84 | created_at stringlengths 20 20 | closed_at stringlengths 20 20 | merged_at stringlengths 20 20 | updated_at stringlengths 20 20 | diff_len float64 101 3.99k | repo_name stringclasses 83
values | __index_level_0__ int64 15 52.7k |
|---|---|---|---|---|---|---|---|---|---|---|
[gaskrank] Add new extractor | diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py
index 5ba8efb0eaa..937356d9abb 100644
--- a/youtube_dl/extractor/extractors.py
+++ b/youtube_dl/extractor/extractors.py
@@ -330,6 +330,7 @@
from .gamersyde import GamersydeIE
from .gamespot import GameSpotIE
from .gamestar import GameStarIE
+from .gaskrank import GaskrankIE
from .gazeta import GazetaIE
from .gdcvault import GDCVaultIE
from .generic import GenericIE
diff --git a/youtube_dl/extractor/gaskrank.py b/youtube_dl/extractor/gaskrank.py
new file mode 100644
index 00000000000..972b47bf2d1
--- /dev/null
+++ b/youtube_dl/extractor/gaskrank.py
@@ -0,0 +1,121 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+from .common import InfoExtractor
+from ..utils import float_or_none
+from ..utils import int_or_none
+from ..utils import js_to_json
+from ..utils import unified_strdate
+
+
+class GaskrankIE(InfoExtractor):
+ """InfoExtractor for gaskrank.tv"""
+ _VALID_URL = r'https?://(?:www\.)?gaskrank\.tv/tv/(?P<categories>[^/]+)/(?P<id>[^/]+)\.html?'
+ _TESTS = [
+ {
+ 'url': 'http://www.gaskrank.tv/tv/motorrad-fun/strike-einparken-durch-anfaenger-crash-mit-groesserem-flurschaden.htm',
+ 'md5': '1ae88dbac97887d85ebd1157a95fc4f9',
+ 'info_dict': {
+ 'id': '201601/26955',
+ 'ext': 'mp4',
+ 'title': 'Strike! Einparken können nur Männer - Flurschaden hält sich in Grenzen *lol*',
+ 'thumbnail': r're:^https?://.*\.jpg$',
+ 'categories': ['motorrad-fun'],
+ 'display_id': 'strike-einparken-durch-anfaenger-crash-mit-groesserem-flurschaden',
+ 'uploader_id': 'Bikefun',
+ 'upload_date': '20170110',
+ 'uploader_url': None,
+ }
+ },
+ {
+ 'url': 'http://www.gaskrank.tv/tv/racing/isle-of-man-tt-2011-michael-du-15920.htm',
+ 'md5': 'c33ee32c711bc6c8224bfcbe62b23095',
+ 'info_dict': {
+ 'id': '201106/15920',
+ 'ext': 'mp4',
+ 'title': 'Isle of Man - Michael Dunlop vs Guy Martin - schwindelig kucken',
+ 'thumbnail': r're:^https?://.*\.jpg$',
+ 'categories': ['racing'],
+ 'display_id': 'isle-of-man-tt-2011-michael-du-15920',
+ 'uploader_id': 'IOM',
+ 'upload_date': '20160506',
+ 'uploader_url': 'www.iomtt.com',
+ }
+ }
+ ]
+
+ def _real_extract(self, url):
+ """extract information from gaskrank.tv"""
+ def fix_json(code):
+ """Removes trailing comma in json: {{},} --> {{}}"""
+ return re.sub(r',\s*}', r'}', js_to_json(code))
+
+ display_id = self._match_id(url)
+ webpage = self._download_webpage(url, display_id)
+ categories = [re.match(self._VALID_URL, url).group('categories')]
+ title = self._search_regex(
+ r'movieName\s*:\s*\'([^\']*)\'',
+ webpage, 'title')
+ thumbnail = self._search_regex(
+ r'poster\s*:\s*\'([^\']*)\'',
+ webpage, 'thumbnail', default=None)
+
+ mobj = re.search(
+ r'Video von:\s*(?P<uploader_id>[^|]*?)\s*\|\s*vom:\s*(?P<upload_date>[0-9][0-9]\.[0-9][0-9]\.[0-9][0-9][0-9][0-9])',
+ webpage)
+ if mobj is not None:
+ uploader_id = mobj.groupdict().get('uploader_id')
+ upload_date = unified_strdate(mobj.groupdict().get('upload_date'))
+
+ uploader_url = self._search_regex(
+ r'Homepage:\s*<[^>]*>(?P<uploader_url>[^<]*)',
+ webpage, 'uploader_url', default=None)
+ tags = re.findall(
+ r'/tv/tags/[^/]+/"\s*>(?P<tag>[^<]*?)<',
+ webpage)
+
+ view_count = self._search_regex(
+ r'class\s*=\s*"gkRight"(?:[^>]*>\s*<[^>]*)*icon-eye-open(?:[^>]*>\s*<[^>]*)*>\s*(?P<view_count>[0-9\.]*)',
+ webpage, 'view_count', default=None)
+ if view_count:
+ view_count = int_or_none(view_count.replace('.', ''))
+
+ average_rating = self._search_regex(
+ r'itemprop\s*=\s*"ratingValue"[^>]*>\s*(?P<average_rating>[0-9,]+)',
+ webpage, 'average_rating')
+ if average_rating:
+ average_rating = float_or_none(average_rating.replace(',', '.'))
+
+ playlist = self._parse_json(
+ self._search_regex(
+ r'playlist\s*:\s*\[([^\]]*)\]',
+ webpage, 'playlist', default='{}'),
+ display_id, transform_source=fix_json, fatal=False)
+
+ video_id = self._search_regex(
+ r'https?://movies\.gaskrank\.tv/([^-]*?)(-[^\.]*)?\.mp4',
+ playlist.get('0').get('src'), 'video id')
+
+ formats = []
+ for key in playlist:
+ formats.append({
+ 'url': playlist[key]['src'],
+ 'format_id': key,
+ 'quality': playlist[key].get('quality')})
+ self._sort_formats(formats, field_preference=['format_id'])
+
+ return {
+ 'id': video_id,
+ 'title': title,
+ 'formats': formats,
+ 'thumbnail': thumbnail,
+ 'categories': categories,
+ 'display_id': display_id,
+ 'uploader_id': uploader_id,
+ 'upload_date': upload_date,
+ 'uploader_url': uploader_url,
+ 'tags': tags,
+ 'view_count': view_count,
+ 'average_rating': average_rating,
+ }
| - [x] At least skimmed through [adding new extractor tutorial](https://github.com/rg3/youtube-dl#adding-support-for-a-new-site) and [youtube-dl coding conventions](https://github.com/rg3/youtube-dl#youtube-dl-coding-conventions) sections
- [x] [Searched](https://github.com/rg3/youtube-dl/search?q=is%3Apr&type=Issues) the bugtracker for similar pull requests
- [x] I am the original author of this code and I am willing to release it under [Unlicense](http://unlicense.org/)
- [x] New extractor
Added extractor for gaskrank.tv, a german-language motorcycle fun video site.
| https://api.github.com/repos/ytdl-org/youtube-dl/pulls/11685 | 2017-01-11T19:25:07Z | 2017-02-05T16:19:38Z | 2017-02-05T16:19:38Z | 2017-02-05T19:40:46Z | 1,634 | ytdl-org/youtube-dl | 49,704 |
Add missing `__eq__` and `__repr__ ` methods | diff --git a/gym/spaces/box.py b/gym/spaces/box.py
index f3ff2c73fa6..d0d41f27aeb 100644
--- a/gym/spaces/box.py
+++ b/gym/spaces/box.py
@@ -35,15 +35,18 @@ def __init__(self, low=None, high=None, shape=None, dtype=None):
def sample(self):
return gym.spaces.np_random.uniform(low=self.low, high=self.high + (0 if self.dtype.kind == 'f' else 1), size=self.low.shape).astype(self.dtype)
+
def contains(self, x):
return x.shape == self.shape and (x >= self.low).all() and (x <= self.high).all()
def to_jsonable(self, sample_n):
return np.array(sample_n).tolist()
+
def from_jsonable(self, sample_n):
return [np.asarray(sample) for sample in sample_n]
def __repr__(self):
return "Box" + str(self.shape)
+
def __eq__(self, other):
return np.allclose(self.low, other.low) and np.allclose(self.high, other.high)
diff --git a/gym/spaces/dict_space.py b/gym/spaces/dict_space.py
index 669f2f848b8..56cb91a4e4e 100644
--- a/gym/spaces/dict_space.py
+++ b/gym/spaces/dict_space.py
@@ -71,3 +71,5 @@ def from_jsonable(self, sample_n):
ret.append(entry)
return ret
+ def __eq__(self, other):
+ return self.spaces == other.spaces
diff --git a/gym/spaces/discrete.py b/gym/spaces/discrete.py
index 72c2afa9c3f..c737640d80e 100644
--- a/gym/spaces/discrete.py
+++ b/gym/spaces/discrete.py
@@ -11,8 +11,10 @@ class Discrete(gym.Space):
def __init__(self, n):
self.n = n
gym.Space.__init__(self, (), np.int64)
+
def sample(self):
return gym.spaces.np_random.randint(self.n)
+
def contains(self, x):
if isinstance(x, int):
as_int = x
@@ -24,5 +26,6 @@ def contains(self, x):
def __repr__(self):
return "Discrete(%d)" % self.n
+
def __eq__(self, other):
return self.n == other.n
diff --git a/gym/spaces/multi_binary.py b/gym/spaces/multi_binary.py
index dd3f1d3649c..cfa3364c3ec 100644
--- a/gym/spaces/multi_binary.py
+++ b/gym/spaces/multi_binary.py
@@ -5,12 +5,21 @@ class MultiBinary(gym.Space):
def __init__(self, n):
self.n = n
gym.Space.__init__(self, (self.n,), np.int8)
+
def sample(self):
return gym.spaces.np_random.randint(low=0, high=2, size=self.n).astype(self.dtype)
+
def contains(self, x):
return ((x==0) | (x==1)).all()
def to_jsonable(self, sample_n):
return np.array(sample_n).tolist()
+
def from_jsonable(self, sample_n):
return [np.asarray(sample) for sample in sample_n]
+
+ def __repr__(self):
+ return "MultiBinary({})".format(self.n)
+
+ def __eq__(self, other):
+ return self.n == other.n
diff --git a/gym/spaces/multi_discrete.py b/gym/spaces/multi_discrete.py
index 16d79c5de60..ef921312520 100644
--- a/gym/spaces/multi_discrete.py
+++ b/gym/spaces/multi_discrete.py
@@ -8,12 +8,21 @@ def __init__(self, nvec):
"""
self.nvec = np.asarray(nvec, dtype=np.int32)
gym.Space.__init__(self, (self.nvec.shape,), np.int8)
+
def sample(self):
return (gym.spaces.np_random.random_sample(self.nvec.shape) * self.nvec).astype(self.dtype)
+
def contains(self, x):
return (0 <= x).all() and (x < self.nvec).all() and x.dtype.kind in 'ui'
def to_jsonable(self, sample_n):
return [sample.tolist() for sample in sample_n]
+
def from_jsonable(self, sample_n):
return np.array(sample_n)
+
+ def __repr__(self):
+ return "MultiDiscrete({})".format(self.nvec)
+
+ def __eq__(self, other):
+ return np.all(self.nvec == other.nvec)
diff --git a/gym/spaces/tests/test_spaces.py b/gym/spaces/tests/test_spaces.py
index dec8ecdf1ce..cadc86ff06c 100644
--- a/gym/spaces/tests/test_spaces.py
+++ b/gym/spaces/tests/test_spaces.py
@@ -1,16 +1,19 @@
import json # note: ujson fails this test due to float equality
+from copy import copy
+
import numpy as np
import pytest
+
from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict
@pytest.mark.parametrize("space", [
Discrete(3),
Tuple([Discrete(5), Discrete(10)]),
- Tuple([Discrete(5), Box(low=np.array([0,0]),high=np.array([1,5]))]),
+ Tuple([Discrete(5), Box(low=np.array([0, 0]),high=np.array([1, 5]))]),
Tuple((Discrete(5), Discrete(2), Discrete(2))),
- MultiDiscrete([ 2, 2, 100]),
- Dict({"position": Discrete(5), "velocity": Box(low=np.array([0,0]),high=np.array([1,5]))}),
+ MultiDiscrete([2, 2, 100]),
+ Dict({"position": Discrete(5), "velocity": Box(low=np.array([0, 0]), high=np.array([1, 5]))}),
])
def test_roundtripping(space):
sample_1 = space.sample()
@@ -30,3 +33,34 @@ def test_roundtripping(space):
s2p = space.to_jsonable([sample_2_prime])
assert s1 == s1p, "Expected {} to equal {}".format(s1, s1p)
assert s2 == s2p, "Expected {} to equal {}".format(s2, s2p)
+
+
+@pytest.mark.parametrize("space", [
+ Discrete(3),
+ Box(low=np.array([-10, 0]),high=np.array([10, 10])),
+ Tuple([Discrete(5), Discrete(10)]),
+ Tuple([Discrete(5), Box(low=np.array([0, 0]),high=np.array([1, 5]))]),
+ Tuple((Discrete(5), Discrete(2), Discrete(2))),
+ MultiDiscrete([2, 2, 100]),
+ MultiBinary(6),
+ Dict({"position": Discrete(5), "velocity": Box(low=np.array([0, 0]), high=np.array([1, 5]))}),
+ ])
+def test_equality(space):
+ space1 = space
+ space2 = copy(space)
+ assert space1 == space2, "Expected {} to equal {}".format(space1, space2)
+
+
+@pytest.mark.parametrize("spaces", [
+ (Discrete(3), Discrete(4)),
+ (MultiDiscrete([2, 2, 100]), MultiDiscrete([2, 2, 8])),
+ (MultiBinary(8), MultiBinary(7)),
+ (Box(low=np.array([-10, 0]),high=np.array([10, 10])),
+ Box(low=np.array([-10, 0]),high=np.array([10, 9]))),
+ (Tuple([Discrete(5), Discrete(10)]), Tuple([Discrete(1), Discrete(10)])),
+ (Dict({"position": Discrete(5)}), Dict({"position": Discrete(4)})),
+ (Dict({"position": Discrete(5)}), Dict({"speed": Discrete(5)})),
+ ])
+def test_inequality(spaces):
+ space1, space2 = spaces
+ assert space1 != space2, "Expected {} != {}".format(space1, space2)
diff --git a/gym/spaces/tuple_space.py b/gym/spaces/tuple_space.py
index 453663781f4..473aa6529f4 100644
--- a/gym/spaces/tuple_space.py
+++ b/gym/spaces/tuple_space.py
@@ -30,3 +30,6 @@ def to_jsonable(self, sample_n):
def from_jsonable(self, sample_n):
return [sample for sample in zip(*[space.from_jsonable(sample_n[i]) for i, space in enumerate(self.spaces)])]
+
+ def __eq__(self, other):
+ return self.spaces == other.spaces
| Fixes #1171
- Add missing `__eq__` and `__repr__ ` methods
- Update tests | https://api.github.com/repos/openai/gym/pulls/1178 | 2018-09-23T19:34:25Z | 2018-09-24T18:11:04Z | 2018-09-24T18:11:03Z | 2018-09-24T18:53:21Z | 2,097 | openai/gym | 5,208 |
Add cover to Travis CI build matrix | diff --git a/.travis.yml b/.travis.yml
index 1a63f343f7d..c444081d9f8 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -20,3 +20,4 @@ script: travis_retry tox
env:
- TOXENV=py${TRAVIS_PYTHON_VERSION//[.]/}
- TOXENV=lint
+ - TOXENV=cover
| https://api.github.com/repos/certbot/certbot/pulls/106 | 2014-11-30T02:33:19Z | 2014-11-30T05:33:20Z | 2014-11-30T05:33:20Z | 2016-05-06T19:21:26Z | 104 | certbot/certbot | 710 | |
Update __init__.py | diff --git a/certbot-dns-rfc2136/certbot_dns_rfc2136/__init__.py b/certbot-dns-rfc2136/certbot_dns_rfc2136/__init__.py
index 19734d29b84..3df0b8172ff 100644
--- a/certbot-dns-rfc2136/certbot_dns_rfc2136/__init__.py
+++ b/certbot-dns-rfc2136/certbot_dns_rfc2136/__init__.py
@@ -161,8 +161,8 @@
zone option. The zone will be then visible in both zones with exactly the same content.
.. note::
- Order matters in BIND views, the ``in-view`` zone option must refer to a
- view defined preceeding it, it cannot refer to a view defined later in the configuration file.
+ Order matters in BIND views: the ``in-view`` zone option must refer to a
+ view defined preceeding it. It cannot refer to a view defined later in the configuration file.
.. code-block:: none
:caption: Split-view BIND configuration
| - A trivial doc change to avoid a writing nit commonly seen in newer American writers.
- do not list in credits. | https://api.github.com/repos/certbot/certbot/pulls/9653 | 2023-04-01T22:27:14Z | 2023-04-02T01:15:09Z | 2023-04-02T01:15:09Z | 2023-04-02T01:15:09Z | 251 | certbot/certbot | 823 |
Added example to persist graph | diff --git a/docs/understanding/storing/storing.md b/docs/understanding/storing/storing.md
index 85dae3f104163..77f530895fafe 100644
--- a/docs/understanding/storing/storing.md
+++ b/docs/understanding/storing/storing.md
@@ -10,6 +10,12 @@ The simplest way to store your indexed data is to use the built-in `.persist()`
index.storage_context.persist(persist_dir="<persist_dir>")
```
+Here is an example for Composable Graph:
+
+```python
+graph.root_index.storage_context.persist(persist_dir="<persist_dir>")
+```
+
You can then avoid re-loading and re-indexing your data by loading the persisted index like this:
```python
| # Description
Added example to store `Composable Graph`.
Fixes # (issue)
NA
## Type of Change
Please delete options that are not relevant.
- [x] This change requires a documentation update
# How Has This Been Tested?
Please describe the tests that you ran to verify your changes. Provide instructions so we can reproduce. Please also list any relevant details for your test configuration
- [x] I tested the code
# Suggested Checklist:
- [x] I have performed a self-review of my own code
- [x] I have commented my code, particularly in hard-to-understand areas
- [x] I have made corresponding changes to the documentation
| https://api.github.com/repos/run-llama/llama_index/pulls/8558 | 2023-10-29T13:52:05Z | 2023-10-30T03:31:23Z | 2023-10-30T03:31:22Z | 2023-10-30T03:31:26Z | 166 | run-llama/llama_index | 6,402 |
update prune infer model size | diff --git a/doc/doc_ch/models_list.md b/doc/doc_ch/models_list.md
index 1f8c65c107..d8c2b77c3e 100644
--- a/doc/doc_ch/models_list.md
+++ b/doc/doc_ch/models_list.md
@@ -31,7 +31,7 @@ PaddleOCR提供的可下载模型包括`推理模型`、`训练模型`、`预训
|模型名称|模型简介|配置文件|推理模型大小|下载地址|
| --- | --- | --- | --- | --- |
-|ch_ppocr_mobile_slim_v2.0_det|slim裁剪版超轻量模型,支持中英文、多语种文本检测|[ch_det_mv3_db_v2.0.yml](../../configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml)| |[推理模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/slim/ch_ppocr_mobile_v2.0_det_prune_infer.tar)|
+|ch_ppocr_mobile_slim_v2.0_det|slim裁剪版超轻量模型,支持中英文、多语种文本检测|[ch_det_mv3_db_v2.0.yml](../../configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml)|2.6M|[推理模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/slim/ch_ppocr_mobile_v2.0_det_prune_infer.tar)|
|ch_ppocr_mobile_v2.0_det|原始超轻量模型,支持中英文、多语种文本检测|[ch_det_mv3_db_v2.0.yml](../../configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml)|3M|[推理模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_infer.tar) / [训练模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_train.tar)|
|ch_ppocr_server_v2.0_det|通用模型,支持中英文、多语种文本检测,比超轻量模型更大,但效果更好|[ch_det_res18_db_v2.0.yml](../../configs/det/ch_ppocr_v2.0/ch_det_res18_db_v2.0.yml)|47M|[推理模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_server_v2.0_det_infer.tar) / [训练模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_server_v2.0_det_train.tar)|
diff --git a/doc/doc_en/models_list_en.md b/doc/doc_en/models_list_en.md
index 78f08fda1f..0719832817 100644
--- a/doc/doc_en/models_list_en.md
+++ b/doc/doc_en/models_list_en.md
@@ -27,7 +27,7 @@ Relationship of the above models is as follows.
|model name|description|config|model size|download|
| --- | --- | --- | --- | --- |
-|ch_ppocr_mobile_slim_v2.0_det|Slim pruned lightweight model, supporting Chinese, English, multilingual text detection|[ch_det_mv3_db_v2.0.yml](../../configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml)| |[inference model](https://paddleocr.bj.bcebos.com/dygraph_v2.0/slim/ch_ppocr_mobile_v2.0_det_prune_infer.tar)|
+|ch_ppocr_mobile_slim_v2.0_det|Slim pruned lightweight model, supporting Chinese, English, multilingual text detection|[ch_det_mv3_db_v2.0.yml](../../configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml)|2.6M |[inference model](https://paddleocr.bj.bcebos.com/dygraph_v2.0/slim/ch_ppocr_mobile_v2.0_det_prune_infer.tar)|
|ch_ppocr_mobile_v2.0_det|Original lightweight model, supporting Chinese, English, multilingual text detection|[ch_det_mv3_db_v2.0.yml](../../configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml)|3M|[inference model](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_infer.tar) / [trained model](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_train.tar)|
|ch_ppocr_server_v2.0_det|General model, which is larger than the lightweight model, but achieved better performance|[ch_det_res18_db_v2.0.yml](../../configs/det/ch_ppocr_v2.0/ch_det_res18_db_v2.0.yml)|47M|[inference model](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_server_v2.0_det_infer.tar) / [trained model](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_server_v2.0_det_train.tar)|
| https://api.github.com/repos/PaddlePaddle/PaddleOCR/pulls/2893 | 2021-05-24T08:30:53Z | 2021-05-24T08:30:59Z | 2021-05-24T08:30:59Z | 2021-05-24T08:30:59Z | 1,218 | PaddlePaddle/PaddleOCR | 42,095 | |
Add SEC EDGAR data API to finance | diff --git a/README.md b/README.md
index 3547ed61b0..8f36290c3c 100644
--- a/README.md
+++ b/README.md
@@ -692,6 +692,7 @@ API | Description | Auth | HTTPS | CORS |
| [Polygon](https://polygon.io/) | Historical stock market data | `apiKey` | Yes | Unknown |
| [Razorpay IFSC](https://razorpay.com/docs/) | Indian Financial Systems Code (Bank Branch Codes) | No | Yes | Unknown |
| [Real Time Finance](https://github.com/Real-time-finance/finance-websocket-API/) | Websocket API to access realtime stock data | `apiKey` | No | Unknown |
+| [SEC EDGAR Data](https://www.sec.gov/edgar/sec-api-documentation) | API to access annual reports of public US companies | No | Yes | Yes |
| [SmartAPI](https://smartapi.angelbroking.com/) | Gain access to set of <SmartAPI> and create end-to-end broking services | `apiKey` | Yes | Unknown |
| [StockData](https://www.StockData.org) | Real-Time, Intraday & Historical Market Data, News and Sentiment API | `apiKey` | Yes | Yes |
| [Styvio](https://www.Styvio.com) | Realtime and historical stock data and current stock sentiment | No | Yes | Unknown |
| <!-- Thank you for taking the time to work on a Pull Request for this project! -->
<!-- To ensure your PR is dealt with swiftly please check the following: -->
- [x] My submission is formatted according to the guidelines in the [contributing guide](/CONTRIBUTING.md)
- [x] My addition is ordered alphabetically
- [x] My submission has a useful description
- [x] The description does not have more than 100 characters
- [x] The description does not end with punctuation
- [x] Each table column is padded with one space on either side
- [x] I have searched the repository for any relevant issues or pull requests
- [x] Any category I am creating has the minimum requirement of 3 items
- [x] All changes have been [squashed][squash-link] into a single commit
[squash-link]: <https://github.com/todotxt/todo.txt-android/wiki/Squash-All-Commits-Related-to-a-Single-Issue-into-a-Single-Commit>
| https://api.github.com/repos/public-apis/public-apis/pulls/2827 | 2021-10-30T18:45:16Z | 2021-10-30T19:37:43Z | 2021-10-30T19:37:43Z | 2021-10-30T19:37:43Z | 312 | public-apis/public-apis | 35,526 |
[extractor/discogs] Add extractor | diff --git a/yt_dlp/extractor/_extractors.py b/yt_dlp/extractor/_extractors.py
index 69464b6f002..fcfce7f2895 100644
--- a/yt_dlp/extractor/_extractors.py
+++ b/yt_dlp/extractor/_extractors.py
@@ -499,6 +499,7 @@
DeuxMNewsIE
)
from .digitalconcerthall import DigitalConcertHallIE
+from .discogs import DiscogsReleasePlaylistIE
from .discovery import DiscoveryIE
from .disney import DisneyIE
from .dispeak import DigitallySpeakingIE
diff --git a/yt_dlp/extractor/discogs.py b/yt_dlp/extractor/discogs.py
new file mode 100644
index 00000000000..048c62288ca
--- /dev/null
+++ b/yt_dlp/extractor/discogs.py
@@ -0,0 +1,35 @@
+from .common import InfoExtractor
+from .youtube import YoutubeIE
+from ..utils import traverse_obj
+
+
+class DiscogsReleasePlaylistIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:www\.)?discogs\.com/(?P<type>release|master)/(?P<id>\d+)'
+ _TESTS = [{
+ 'url': 'https://www.discogs.com/release/1-The-Persuader-Stockholm',
+ 'info_dict': {
+ 'id': 'release1',
+ 'title': 'Stockholm',
+ },
+ 'playlist_mincount': 7,
+ }, {
+ 'url': 'https://www.discogs.com/master/113-Vince-Watson-Moments-In-Time',
+ 'info_dict': {
+ 'id': 'master113',
+ 'title': 'Moments In Time',
+ },
+ 'playlist_mincount': 53,
+ }]
+
+ def _real_extract(self, url):
+ playlist_id, playlist_type = self._match_valid_url(url).group('id', 'type')
+
+ display_id = f'{playlist_type}{playlist_id}'
+ response = self._download_json(
+ f'https://api.discogs.com/{playlist_type}s/{playlist_id}', display_id)
+
+ entries = [
+ self.url_result(video['uri'], YoutubeIE, video_title=video.get('title'))
+ for video in traverse_obj(response, ('videos', lambda _, v: YoutubeIE.suitable(v['uri'])))]
+
+ return self.playlist_result(entries, display_id, response.get('title'))
| **IMPORTANT**: PRs without the template will be CLOSED
### Description of your *pull request* and other information
Add an extractor for the playlists embedded in Release and Master Release pages on discogs.com (an online music database). The generic extractor used to be able to read these, but that stopped working a long time ago.
Example URLs:
- https://www.discogs.com/release/1-The-Persuader-Stockholm
- https://www.discogs.com/master/113-Vince-Watson-Moments-In-Time
<details open><summary>Template</summary> <!-- OPEN is intentional -->
### Before submitting a *pull request* make sure you have:
- [x] At least skimmed through [contributing guidelines](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#developer-instructions) including [yt-dlp coding conventions](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#yt-dlp-coding-conventions)
- [x] [Searched](https://github.com/yt-dlp/yt-dlp/search?q=is%3Apr&type=Issues) the bugtracker for similar pull requests
- [x] Checked the code with [flake8](https://pypi.python.org/pypi/flake8) and [ran relevant tests](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#developer-instructions)
### In order to be accepted and merged into yt-dlp each piece of code must be in public domain or released under [Unlicense](http://unlicense.org/). Check all of the following options that apply:
- [x] I am the original author of this code and I am willing to release it under [Unlicense](http://unlicense.org/)
- [ ] I am not the original author of this code but it is in public domain or released under [Unlicense](http://unlicense.org/) (provide reliable evidence)
### What is the purpose of your *pull request*?
- [ ] Fix or improvement to an extractor (Make sure to add/update tests)
- [x] New extractor ([Piracy websites will not be accepted](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#is-the-website-primarily-used-for-piracy))
- [ ] Core bug fix/improvement
- [ ] New feature (It is strongly [recommended to open an issue first](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#adding-new-feature-or-making-overarching-changes))
</details>
| https://api.github.com/repos/yt-dlp/yt-dlp/pulls/6624 | 2023-03-24T22:00:09Z | 2023-06-14T18:40:07Z | 2023-06-14T18:40:07Z | 2023-06-14T18:40:07Z | 579 | yt-dlp/yt-dlp | 7,896 |
Nest | diff --git a/Recursion Visulaizer/.recursionVisualizer.py.swp b/Recursion Visulaizer/.recursionVisualizer.py.swp
new file mode 100644
index 0000000000..872ad8254b
Binary files /dev/null and b/Recursion Visulaizer/.recursionVisualizer.py.swp differ
diff --git a/Recursion Visulaizer/Magneticfieldlines.PNG b/Recursion Visulaizer/Magneticfieldlines.PNG
new file mode 100644
index 0000000000..d259443833
Binary files /dev/null and b/Recursion Visulaizer/Magneticfieldlines.PNG differ
diff --git a/Recursion Visulaizer/Perfectnests.PNG b/Recursion Visulaizer/Perfectnests.PNG
new file mode 100644
index 0000000000..de638e9f4a
Binary files /dev/null and b/Recursion Visulaizer/Perfectnests.PNG differ
diff --git a/Recursion Visulaizer/Synergy.PNG b/Recursion Visulaizer/Synergy.PNG
new file mode 100644
index 0000000000..1be046ded3
Binary files /dev/null and b/Recursion Visulaizer/Synergy.PNG differ
diff --git a/Recursion Visulaizer/git b/Recursion Visulaizer/git
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/Recursion Visulaizer/logging.PNG b/Recursion Visulaizer/logging.PNG
new file mode 100644
index 0000000000..c6078e5486
Binary files /dev/null and b/Recursion Visulaizer/logging.PNG differ
diff --git a/Recursion Visulaizer/recursionVisualizer.py b/Recursion Visulaizer/recursionVisualizer.py
index 1cdae836a5..670d8baf16 100644
--- a/Recursion Visulaizer/recursionVisualizer.py
+++ b/Recursion Visulaizer/recursionVisualizer.py
@@ -1,19 +1,56 @@
import turtle
+import random
t = turtle.Turtle()
-t.left(90)
-t.speed(200)
+num=random.randint(1,100)
+t.right(num)
+t.speed(num)
+t.left(num)
def tree(i):
if i<10:
return
else:
- t.forward(i)
- t.left(30)
+ t.right(10)
tree(3*i/4)
- t.right(60)
+ t.backward(20)
+ tree(2*i/5)
+ tree(i/5)
+ t.forward(90)
+ t.left(20)
+ tree(i*2)
+ t.backward(100)
+ t.right(30)
+ tree(i/5)
+ t.forward(30)
+ t.left(40)
tree(3*i/4)
- t.left(30)
- t.backward(i)
+ t.forward(90)
+ tree(2*i/5)
+ print('tree execution complete')
-tree(100)
+def cycle(i):
+ if i<10:
+ return
+ else:
+ try:
+ tree(random.randint(1,i))
+ tree(random.randint(1,i*2))
+ except:
+ print('An exception occured')
+ else:
+ print('No Exception occured')
+ print('cycle loop complete')
+
+def fractal(i):
+ if i<10:
+ return
+ else:
+ cycle(random.randint(1,i+1))
+ cycle(random.randint(1,i))
+ cycle(random.randint(1,i-1))
+ cycle(random.randint(1,i-2))
+ print('fractal execution complete')
+
+fractal(random.randint(1,10000))
+print('Execution complete')
turtle.done()
| https://api.github.com/repos/geekcomputers/Python/pulls/1381 | 2021-09-18T16:52:58Z | 2021-09-26T19:17:29Z | 2021-09-26T19:17:29Z | 2021-09-26T19:17:29Z | 862 | geekcomputers/Python | 31,801 | |
declare Python 3.7 support in setup.py | diff --git a/setup.py b/setup.py
index 8c47f67ce35..bd666e93c47 100644
--- a/setup.py
+++ b/setup.py
@@ -56,6 +56,7 @@ def has_environment_marker_platform_impl_support():
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
+ 'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Internet :: WWW/HTTP',
| Scrapy supports 3.7 since #3326. | https://api.github.com/repos/scrapy/scrapy/pulls/3547 | 2018-12-26T08:19:46Z | 2018-12-26T14:09:08Z | 2018-12-26T14:09:08Z | 2019-01-30T10:33:13Z | 142 | scrapy/scrapy | 34,306 |
Fix regression in Cloudflare library | diff --git a/certbot-dns-cloudflare/certbot_dns_cloudflare/__init__.py b/certbot-dns-cloudflare/certbot_dns_cloudflare/__init__.py
index 81c053c04fa..b72f19f0816 100644
--- a/certbot-dns-cloudflare/certbot_dns_cloudflare/__init__.py
+++ b/certbot-dns-cloudflare/certbot_dns_cloudflare/__init__.py
@@ -39,7 +39,7 @@
zones you need certificates for.
Using Cloudflare Tokens also requires at least version 2.3.1 of the ``cloudflare``
-python module. If the version that automatically installed with this plugin is
+Python module. If the version that automatically installed with this plugin is
older than that, and you can't upgrade it on your system, you'll have to stick to
the Global key.
@@ -77,6 +77,18 @@
including for renewal, and cannot be silenced except by addressing the issue
(e.g., by using a command like ``chmod 600`` to restrict access to the file).
+.. note::
+ Please note that the ``cloudflare`` Python module used by the plugin has
+ additional methods of providing credentials to the module, e.g. environment
+ variables or the ``cloudflare.cfg`` configuration file. These methods are not
+ supported by Certbot. If any of those additional methods of providing
+ credentials is being used, they must provide the same credentials (i.e.,
+ email and API key *or* an API token) as the credentials file provided to
+ Certbot. If there is a discrepancy, the ``cloudflare`` Python module will
+ raise an error. Also note that the credentials provided to Certbot will take
+ precedence over any other method of providing credentials to the ``cloudflare``
+ Python module.
+
Examples
--------
diff --git a/certbot-dns-cloudflare/certbot_dns_cloudflare/_internal/dns_cloudflare.py b/certbot-dns-cloudflare/certbot_dns_cloudflare/_internal/dns_cloudflare.py
index eac29a85b9d..e8bf560c682 100644
--- a/certbot-dns-cloudflare/certbot_dns_cloudflare/_internal/dns_cloudflare.py
+++ b/certbot-dns-cloudflare/certbot_dns_cloudflare/_internal/dns_cloudflare.py
@@ -82,8 +82,9 @@ def _get_cloudflare_client(self) -> "_CloudflareClient":
if not self.credentials: # pragma: no cover
raise errors.Error("Plugin has not been prepared.")
if self.credentials.conf('api-token'):
- return _CloudflareClient(None, self.credentials.conf('api-token'))
- return _CloudflareClient(self.credentials.conf('email'), self.credentials.conf('api-key'))
+ return _CloudflareClient(api_token = self.credentials.conf('api-token'))
+ return _CloudflareClient(email = self.credentials.conf('email'),
+ api_key = self.credentials.conf('api-key'))
class _CloudflareClient:
@@ -91,8 +92,19 @@ class _CloudflareClient:
Encapsulates all communication with the Cloudflare API.
"""
- def __init__(self, email: Optional[str], api_key: str) -> None:
- self.cf = CloudFlare.CloudFlare(email, api_key)
+ def __init__(self, email: Optional[str] = None, api_key: Optional[str] = None,
+ api_token: Optional[str] = None) -> None:
+ if email:
+ # If an email was specified, we're using an email/key combination and not a token.
+ # We can't use named arguments in this case, as it would break compatibility with
+ # the Cloudflare library since version 2.10.1, as the `token` argument was used for
+ # tokens and keys alike and the `key` argument did not exist in earlier versions.
+ self.cf = CloudFlare.CloudFlare(email, api_key)
+ else:
+ # If no email was specified, we're using just a token. Let's use the named argument
+ # for simplicity, which is compatible with all (current) versions of the Cloudflare
+ # library.
+ self.cf = CloudFlare.CloudFlare(token=api_token)
def add_txt_record(self, domain: str, record_name: str, record_content: str,
record_ttl: int) -> None:
diff --git a/certbot/CHANGELOG.md b/certbot/CHANGELOG.md
index 018eaa0491c..d02d2cdbaca 100644
--- a/certbot/CHANGELOG.md
+++ b/certbot/CHANGELOG.md
@@ -14,7 +14,10 @@ Certbot adheres to [Semantic Versioning](https://semver.org/).
### Fixed
-*
+* Fixed an incompatibility in the certbot-dns-cloudflare plugin and the Cloudflare library
+ which was introduced in the Cloudflare library version 2.10.1. The library would raise
+ an error if a token was specified in the Certbot `--dns-cloudflare-credentials` file as
+ well as the `cloudflare.cfg` configuration file of the Cloudflare library.
More details about these changes can be found on our GitHub repo.
| Fixes #9407
## Pull Request Checklist
- [x] The Certbot team has recently expressed interest in reviewing a PR for this. If not, this PR may be closed due our limited resources and need to prioritize how we spend them. (https://github.com/certbot/certbot/issues/9407#issuecomment-1257842294)
- [X] If the change being made is to a [distributed component](https://certbot.eff.org/docs/contributing.html#code-components-and-layout), edit the `master` section of `certbot/CHANGELOG.md` to include a description of the change being made.
- [X] Add or update any documentation as needed to support the changes in this PR. | https://api.github.com/repos/certbot/certbot/pulls/9417 | 2022-09-26T18:59:20Z | 2022-09-26T21:48:30Z | 2022-09-26T21:48:30Z | 2022-09-26T22:04:05Z | 1,177 | certbot/certbot | 1,114 |
Optimize preprocess for ragged batching | diff --git a/deepspeed/inference/v2/ragged/ragged_wrapper.py b/deepspeed/inference/v2/ragged/ragged_wrapper.py
index 15abd1987aa1..056ecfa2ac40 100644
--- a/deepspeed/inference/v2/ragged/ragged_wrapper.py
+++ b/deepspeed/inference/v2/ragged/ragged_wrapper.py
@@ -113,12 +113,23 @@ def __init__(self, config: DSStateManagerConfig) -> None:
# Default behavior should be no padding
self._is_padded = False
+ self._current_tokens = 0
+ self._current_sequences = 0
+ self._batch_tokens = []
+ self._inflight_seq_descriptors_shadow_buf = []
+ self._kv_blocks_ptr_buf = []
+ self._token_to_seq_storage_shadow_buf = []
+
def clear(self) -> None:
"""
Clear the ragged batch. This will reset the number of tokens and sequences to 0.
"""
- self._batch_metadata_storage_shadow[0] = 0
- self._batch_metadata_storage_shadow[1] = 0
+ self._current_tokens = 0
+ self._current_sequences = 0
+ self._batch_tokens = []
+ self._inflight_seq_descriptors_shadow_buf = []
+ self._kv_blocks_ptr_buf = []
+ self._token_to_seq_storage_shadow_buf = []
def insert_sequence(self, seq_descriptor: DSSequenceDescriptor, tokens: torch.Tensor, do_checks=True) -> None:
"""
@@ -140,18 +151,23 @@ def insert_sequence(self, seq_descriptor: DSSequenceDescriptor, tokens: torch.Te
if do_checks and self.current_tokens + seq_tokens > self._config.max_ragged_batch_size:
raise RuntimeError(f"Ragged batch is full due to capacity limit: {self._config.max_ragged_batch_size})")
- self._input_ids_shadow[self.current_tokens:self.current_tokens + seq_tokens].copy_(tokens)
- self._token_to_seq_storage_shadow[self.current_tokens:self.current_tokens + seq_tokens].fill_(
- self.current_sequences)
+ # The values in _inflight_seq_descriptors_shadow_buf, _token_to_seq_storage_shadow_buf, _kv_blocks_ptr_buf, etc.,
+ # are ultimately stored in PyTorch tensors: _inflight_seq_descriptors_shadow, _token_to_seq_storage_shadow, _kv_ptrs_shadow, etc.
+ # However, we found it inefficient to iterate over and substitute values into tensor slices or to use copy/fill calls for this purpose.
+ # Therefore, we initially store the values in Python lists or primitive data types and then copy them collectively in the finalize() method,
+ # instead of updating the tensors directly in each iteration.
+ self._batch_tokens.append(tokens)
+ self._inflight_seq_descriptors_shadow_buf.append(self.current_tokens)
+ self._inflight_seq_descriptors_shadow_buf.append(seq_tokens)
+ self._inflight_seq_descriptors_shadow_buf.append(seq_descriptor.seen_tokens)
+ self._inflight_seq_descriptors_shadow_buf.append(0) # alignment
- self._inflight_seq_descriptors_shadow[self.current_sequences][0] = self.current_tokens
- self._inflight_seq_descriptors_shadow[self.current_sequences][1] = seq_tokens
- self._inflight_seq_descriptors_shadow[self.current_sequences][2] = seq_descriptor.seen_tokens
+ self._token_to_seq_storage_shadow_buf.extend([self.current_sequences] * seq_tokens)
- self._kv_ptrs_shadow[self.current_sequences] = seq_descriptor.kv_blocks_ptr
+ self._kv_blocks_ptr_buf.append(seq_descriptor.kv_blocks_ptr)
- self._batch_metadata_storage_shadow[0] += seq_tokens
- self._batch_metadata_storage_shadow[1] += 1
+ self._current_tokens += seq_tokens
+ self._current_sequences += 1
@property
def tensor_toks(self) -> torch.Tensor:
@@ -171,6 +187,15 @@ def finalize(self, padding: Optional[bool] = False) -> None:
"""
cur_toks = self.current_tokens
+ # Batch-copy the values recorded in insert_sequence() into PyTorch tensors to enhance efficiency.
+ self._inflight_seq_descriptors_shadow.flatten()[:len(self._inflight_seq_descriptors_shadow_buf)].copy_(
+ torch.tensor(self._inflight_seq_descriptors_shadow_buf))
+ self._input_ids_shadow[:self.current_tokens].copy_(torch.cat(self._batch_tokens, dim=0))
+ self._token_to_seq_storage_shadow[:len(self._token_to_seq_storage_shadow_buf)].copy_(
+ torch.tensor(self._token_to_seq_storage_shadow_buf))
+ self._kv_ptrs_shadow[:len(self._kv_blocks_ptr_buf)].copy_(torch.tensor(self._kv_blocks_ptr_buf))
+ self._batch_metadata_storage_shadow.copy_(torch.tensor([cur_toks, self.current_sequences]))
+
if padding:
padded_toks = to_padded(cur_toks)
self._input_ids_shadow[cur_toks:padded_toks].fill_(-1)
@@ -256,7 +281,7 @@ def current_tokens(self) -> int:
The number of tokens in the in-flight ragged batch. This will not trigger
synchronization with the device.
"""
- return self._batch_metadata_storage_shadow[0].item()
+ return self._current_tokens
@property
def current_sequences(self) -> int:
@@ -264,4 +289,4 @@ def current_sequences(self) -> int:
The number of sequences in the in-flight ragged batch. This will not trigger
synchronization with the device.
"""
- return self._batch_metadata_storage_shadow[1].item()
+ return self._current_sequences
| This PR improves efficiency of preprocessing for ragged batching.
It is not efficient to iterate substituting values to tensor slices or copy/fill calls for small numbers of values. This PR records the values in python lists or primitives and copy them at once. | https://api.github.com/repos/microsoft/DeepSpeed/pulls/4942 | 2024-01-12T00:46:10Z | 2024-01-13T01:59:16Z | 2024-01-13T01:59:16Z | 2024-01-13T01:59:21Z | 1,267 | microsoft/DeepSpeed | 10,085 |
parallelize writing of layer checkpoint files across data parallel instances | diff --git a/deepspeed/runtime/config.py b/deepspeed/runtime/config.py
index 6d377e5dc6a2..b602d9d10958 100755
--- a/deepspeed/runtime/config.py
+++ b/deepspeed/runtime/config.py
@@ -669,6 +669,19 @@ def get_checkpoint_tag_validation_mode(checkpoint_params):
)
+def get_checkpoint_parallel_write_pipeline(checkpoint_params):
+ par_write_params = checkpoint_params.get(CHECKPOINT_PARALLEL_WRITE, {})
+ par_write_pipeline = par_write_params.get(
+ CHECKPOINT_PARALLEL_WRITE_PIPELINE_STAGE,
+ CHECKPOINT_PARALLEL_WRITE_PIPELINE_STAGE_DEFAULT)
+ if par_write_pipeline in [True, False]:
+ return par_write_pipeline
+ else:
+ raise DeepSpeedConfigError(
+ "checkpoint::parallel_write::pipeline_stage "
+ f"value of '{par_write_pipeline}' is invalid, expecting: true or false")
+
+
def get_dataloader_drop_last(param_dict):
return get_scalar_param(param_dict,
DATALOADER_DROP_LAST,
@@ -887,6 +900,8 @@ def _initialize_params(self, param_dict):
self.load_universal_checkpoint = checkpoint_params.get(
LOAD_UNIVERSAL_CHECKPOINT,
LOAD_UNIVERSAL_CHECKPOINT_DEFAULT)
+ par_write_pipe = get_checkpoint_parallel_write_pipeline(checkpoint_params)
+ self.checkpoint_parallel_write_pipeline = par_write_pipe
self.aio_config = get_aio_config(param_dict)
diff --git a/deepspeed/runtime/constants.py b/deepspeed/runtime/constants.py
index da36a7199470..c6a257e77d73 100755
--- a/deepspeed/runtime/constants.py
+++ b/deepspeed/runtime/constants.py
@@ -367,6 +367,9 @@ class ValidationMode:
# "checkpoint": {
# tag_validation=["Ignore"|"Warn"|"Fail"]
# load_universal=false
+# parallel_write: {
+# pipeline_stage: [True|False]
+# }
# }
CHECKPOINT = "checkpoint"
CHECKPOINT_TAG_VALIDATION = "tag_validation"
@@ -380,6 +383,10 @@ class ValidationMode:
LOAD_UNIVERSAL_CHECKPOINT = "load_universal"
LOAD_UNIVERSAL_CHECKPOINT_DEFAULT = False
+CHECKPOINT_PARALLEL_WRITE = "parallel_write"
+CHECKPOINT_PARALLEL_WRITE_PIPELINE_STAGE = "pipeline_stage"
+CHECKPOINT_PARALLEL_WRITE_PIPELINE_STAGE_DEFAULT = False
+
#########################################
# Drop the last incomplete Batch
# #########################################
diff --git a/deepspeed/runtime/engine.py b/deepspeed/runtime/engine.py
index 1f4331239f3b..0b54642abb0a 100644
--- a/deepspeed/runtime/engine.py
+++ b/deepspeed/runtime/engine.py
@@ -2924,7 +2924,11 @@ def save_checkpoint(self, save_dir, tag=None, client_state={}, save_latest=True)
self._create_checkpoint_file(save_dir, tag, False)
self._save_moe_checkpoint(save_dir, tag, client_state=client_state)
- if self.save_non_zero_checkpoint:
+ # We distribute the task of saving layer checkpoint files among
+ # data parallel instances, so all procs should call _save_checkpoint.
+ # All procs then call module_state_dict(), but only procs of data
+ # parallel rank 0 save the general model params.
+ if not self.has_moe_layers:
self._create_checkpoint_file(save_dir, tag, False)
self._save_checkpoint(save_dir, tag, client_state=client_state)
@@ -3091,12 +3095,18 @@ def _create_zero_checkpoint_files(self, save_dir, tag):
def _save_checkpoint(self, save_dir, tag, client_state={}):
save_path = self._get_ckpt_name(save_dir, tag)
+
+ zero_optimizer_state = self.zero_optimization() or self.bfloat16_enabled()
+
# A hack to save the checkpointing directory. Pipeline parallelism overrides
# module_state_dict() and uses this path to save the model. module_state_dict()
- # then instead just returns None.
+ # then instead just returns None. The module_state_dict() implementation in
+ # PipelineEngine expects the save path to be set in self._curr_ckpt_path.
self._curr_ckpt_path = os.path.join(save_dir, tag)
- zero_optimizer_state = self.zero_optimization() or self.bfloat16_enabled()
- state = dict(module=self.module_state_dict(),
+ module = self.module_state_dict()
+ self._curr_ckpt_path = None
+
+ state = dict(module=module,
buffer_names=self._get_buffer_names(),
optimizer=self.optimizer.state_dict()
if self.optimizer and not zero_optimizer_state else None,
@@ -3114,9 +3124,9 @@ def _save_checkpoint(self, save_dir, tag, client_state={}):
ds_version=version)
state.update(client_state)
- log_dist(message=f'Saving model checkpoint: {save_path}', ranks=[0, 1])
- self.checkpoint_engine.save(state, save_path)
- self._curr_save_path = None
+ if self.save_non_zero_checkpoint:
+ log_dist(message=f'Saving model checkpoint: {save_path}', ranks=[0, 1])
+ self.checkpoint_engine.save(state, save_path)
def _get_buffer_names(self):
buffer_names = []
diff --git a/deepspeed/runtime/pipe/engine.py b/deepspeed/runtime/pipe/engine.py
index 33edc2db1a6a..1a57bb4e84a2 100644
--- a/deepspeed/runtime/pipe/engine.py
+++ b/deepspeed/runtime/pipe/engine.py
@@ -182,6 +182,8 @@ def __init__(self, has_bool_tensors=False, *super_args, **super_kwargs):
self.module.activation_checkpoint_interval = self._config.pipeline[
'activation_checkpoint_interval']
+ self.module.checkpoint_parallel_write_pipeline = self._config.checkpoint_parallel_write_pipeline
+
if self.is_last_stage():
self.loss_model = self.module.loss_fn
diff --git a/deepspeed/runtime/pipe/module.py b/deepspeed/runtime/pipe/module.py
index 03e1c413c950..294db38b3bfb 100644
--- a/deepspeed/runtime/pipe/module.py
+++ b/deepspeed/runtime/pipe/module.py
@@ -562,13 +562,28 @@ def ckpt_layer_path_list(self, ckpt_dir, local_layer_idx):
return ckpt_files
def save_state_dict(self, save_dir, checkpoint_engine):
- if self._grid.data_parallel_id != 0:
- return
+ # Processes having the same model parallel rank on different data parallel instances
+ # have identical layer weights. We can distribute the task of saving the layer weights
+ # among the data parallel ranks. For example, if a pipeline stage has 9 layers and
+ # if there are 2 data parallel instances, rank 0 will save the first 5 layers and
+ # rank 1 will save the last 4.
+ dp_rank = self._grid.data_parallel_id
+ dp_size = self._grid.data_parallel_size
+ num_layers = len(self.forward_funcs)
+ if self.checkpoint_parallel_write_pipeline:
+ # spread layers evenly across data parallel ranks
+ offsets = ds_utils.partition_uniform(num_layers, dp_size)
+ start, end = offsets[dp_rank], offsets[dp_rank + 1]
+ else:
+ # data parallel rank 0 writes all layers
+ if dp_rank != 0:
+ return
+ start, end = 0, num_layers
+ layer_list = self.forward_funcs[start:end]
os.makedirs(save_dir, exist_ok=True)
- layer_offset = self._local_start
- for idx, layer in enumerate(self.forward_funcs):
- model_ckpt_path = self.ckpt_layer_path(save_dir, idx)
+ for idx, layer in enumerate(layer_list):
+ model_ckpt_path = self.ckpt_layer_path(save_dir, start + idx)
if not hasattr(layer, 'state_dict'):
continue
# We pass cloned tensors to torch.save() to avoid checkpoint bloat which occurs because torch.save()
| This is work in progress, but I wanted to open it early for discussion. Also, I wrote this before MOE was added, and it will need to be updated to account for that. I can help with that if this approach is approved.
In case a pipeline stage has multiple layers, this parallelizes the task of writing the layer checkpoint files across the data parallel group. For example, if one is running with two data parallel instances, and if a pipeline stage has 10 layers, this modifies things so that rank 0 will write 5 layers and rank 1 will write the other 5, rather than have rank 0 do all of the work. On my system, this reduces checkpoint cost. It also better balances the total bytes written across ranks.
The main change is to have all procs call _save_checkpoint, and then in module_state_dict, the list of layers is subdivided among the procs that have the same model parallel rank across all data parallel instances. | https://api.github.com/repos/microsoft/DeepSpeed/pulls/1419 | 2021-09-30T19:11:57Z | 2022-10-21T18:31:21Z | 2022-10-21T18:31:21Z | 2022-10-27T20:11:14Z | 1,805 | microsoft/DeepSpeed | 10,093 |
[Apache v2] Adding nodes 2/3 : add_child_directive() | diff --git a/certbot-apache/certbot_apache/augeasparser.py b/certbot-apache/certbot_apache/augeasparser.py
index e340519ce61..5fd5247c15d 100644
--- a/certbot-apache/certbot_apache/augeasparser.py
+++ b/certbot-apache/certbot_apache/augeasparser.py
@@ -221,12 +221,26 @@ def add_child_block(self, name, parameters=None, position=None): # pragma: no c
# pylint: disable=unused-argument
def add_child_directive(self, name, parameters=None, position=None): # pragma: no cover
"""Adds a new DirectiveNode to the sequence of children"""
- new_metadata = {"augeasparser": self.parser, "augeaspath": assertions.PASS}
- new_dir = AugeasDirectiveNode(name=assertions.PASS,
- ancestor=self,
- filepath=assertions.PASS,
+
+ if not parameters:
+ raise errors.PluginError("Directive requires parameters and none were set.")
+
+ insertpath, realpath, before = self._aug_resolve_child_position(
+ "directive",
+ position
+ )
+ new_metadata = {"augeasparser": self.parser, "augeaspath": realpath}
+
+ # Create the new directive
+ self.parser.aug.insert(insertpath, "directive", before)
+ # Set the directive key
+ self.parser.aug.set(realpath, name)
+
+ new_dir = AugeasDirectiveNode(name=name,
+ parameters=parameters,
+ ancestor=assertions.PASS,
+ filepath=apache_util.get_file_path(realpath),
metadata=new_metadata)
- self.children += (new_dir,)
return new_dir
def add_child_comment(self, comment="", position=None): # pylint: disable=unused-argument
diff --git a/certbot-apache/certbot_apache/tests/augeasnode_test.py b/certbot-apache/certbot_apache/tests/augeasnode_test.py
index 8846c3c0798..76f46c14d14 100644
--- a/certbot-apache/certbot_apache/tests/augeasnode_test.py
+++ b/certbot-apache/certbot_apache/tests/augeasnode_test.py
@@ -9,7 +9,7 @@
from certbot_apache.tests import util
-class AugeasParserNodeTest(util.ApacheTest):
+class AugeasParserNodeTest(util.ApacheTest): # pylint: disable=too-many-public-methods
"""Test AugeasParserNode using available test configurations"""
def setUp(self): # pylint: disable=arguments-differ
@@ -216,6 +216,7 @@ def test_node_init_error_bad_augeaspath(self):
AugeasBlockNode,
**parameters
)
+
def test_node_init_error_missing_augeaspath(self):
from certbot_apache.augeasparser import AugeasBlockNode
parameters = {
@@ -231,3 +232,22 @@ def test_node_init_error_missing_augeaspath(self):
AugeasBlockNode,
**parameters
)
+
+ def test_add_child_directive(self):
+ self.config.parser_root.primary.add_child_directive(
+ "ThisWasAdded",
+ ["with", "parameters"],
+ position=0
+ )
+ dirs = self.config.parser_root.find_directives("ThisWasAdded")
+ self.assertEqual(len(dirs), 1)
+ self.assertEqual(dirs[0].parameters, ("with", "parameters"))
+ # The new directive was added to the very first line of the config
+ self.assertTrue(dirs[0].metadata["augeaspath"].endswith("[1]"))
+
+ def test_add_child_directive_exception(self):
+ self.assertRaises(
+ errors.PluginError,
+ self.config.parser_root.primary.add_child_directive,
+ "ThisRaisesErrorBecauseMissingParameters"
+ )
| This PR implements add_child_directive() and is built on top of #7497
Note: this PR uses temporarily #7497 as base to help to hide the unnecessary diffs. This also means that the tests won't run before it's changed back to `apache-parser-v2` | https://api.github.com/repos/certbot/certbot/pulls/7517 | 2019-11-06T09:22:58Z | 2019-11-12T22:19:02Z | 2019-11-12T22:19:02Z | 2019-11-12T22:19:06Z | 887 | certbot/certbot | 1,641 |
add a template of infinity_outpainting | diff --git a/visual_chatgpt.py b/visual_chatgpt.py
index fd3b978d..e9614894 100644
--- a/visual_chatgpt.py
+++ b/visual_chatgpt.py
@@ -5,9 +5,11 @@
import cv2
import re
import uuid
-from PIL import Image
+from PIL import Image, ImageDraw, ImageOps
+import math
import numpy as np
import argparse
+import inspect
from transformers import CLIPSegProcessor, CLIPSegForImageSegmentation
from transformers import pipeline, BlipProcessor, BlipForConditionalGeneration, BlipForQuestionAnswering
@@ -87,6 +89,63 @@ def decorator(func):
return decorator
+def blend_gt2pt(old_image, new_image, sigma=0.15, steps=100):
+ new_size = new_image.size
+ old_size = old_image.size
+ easy_img = np.array(new_image)
+ gt_img_array = np.array(old_image)
+ pos_w = (new_size[0] - old_size[0]) // 2
+ pos_h = (new_size[1] - old_size[1]) // 2
+
+ kernel_h = cv2.getGaussianKernel(old_size[1], old_size[1] * sigma)
+ kernel_w = cv2.getGaussianKernel(old_size[0], old_size[0] * sigma)
+ kernel = np.multiply(kernel_h, np.transpose(kernel_w))
+
+ kernel[steps:-steps, steps:-steps] = 1
+ kernel[:steps, :steps] = kernel[:steps, :steps] / kernel[steps - 1, steps - 1]
+ kernel[:steps, -steps:] = kernel[:steps, -steps:] / kernel[steps - 1, -(steps)]
+ kernel[-steps:, :steps] = kernel[-steps:, :steps] / kernel[-steps, steps - 1]
+ kernel[-steps:, -steps:] = kernel[-steps:, -steps:] / kernel[-steps, -steps]
+ kernel = np.expand_dims(kernel, 2)
+ kernel = np.repeat(kernel, 3, 2)
+
+ weight = np.linspace(0, 1, steps)
+ top = np.expand_dims(weight, 1)
+ top = np.repeat(top, old_size[0] - 2 * steps, 1)
+ top = np.expand_dims(top, 2)
+ top = np.repeat(top, 3, 2)
+
+ weight = np.linspace(1, 0, steps)
+ down = np.expand_dims(weight, 1)
+ down = np.repeat(down, old_size[0] - 2 * steps, 1)
+ down = np.expand_dims(down, 2)
+ down = np.repeat(down, 3, 2)
+
+ weight = np.linspace(0, 1, steps)
+ left = np.expand_dims(weight, 0)
+ left = np.repeat(left, old_size[1] - 2 * steps, 0)
+ left = np.expand_dims(left, 2)
+ left = np.repeat(left, 3, 2)
+
+ weight = np.linspace(1, 0, steps)
+ right = np.expand_dims(weight, 0)
+ right = np.repeat(right, old_size[1] - 2 * steps, 0)
+ right = np.expand_dims(right, 2)
+ right = np.repeat(right, 3, 2)
+
+ kernel[:steps, steps:-steps] = top
+ kernel[-steps:, steps:-steps] = down
+ kernel[steps:-steps, :steps] = left
+ kernel[steps:-steps, -steps:] = right
+
+ pt_gt_img = easy_img[pos_h:pos_h + old_size[1], pos_w:pos_w + old_size[0]]
+ gaussian_gt_img = kernel * gt_img_array + (1 - kernel) * pt_gt_img # gt img with blur img
+ gaussian_gt_img = gaussian_gt_img.astype(np.int64)
+ easy_img[pos_h:pos_h + old_size[1], pos_w:pos_w + old_size[0]] = gaussian_gt_img
+ gaussian_img = Image.fromarray(easy_img)
+ return gaussian_img
+
+
def cut_dialogue_history(history_memory, keep_last_n_words=500):
if history_memory is None or len(history_memory) == 0:
return history_memory
@@ -119,6 +178,7 @@ def get_new_image_name(org_img_name, func_name="update"):
return os.path.join(head, new_file_name)
+
class MaskFormer:
def __init__(self, device):
print(f"Initializing MaskFormer to {device}")
@@ -617,7 +677,7 @@ def inference(self, inputs):
segmentation = Image.fromarray(color_seg)
updated_image_path = get_new_image_name(inputs, func_name="segmentation")
segmentation.save(updated_image_path)
- print(f"\nProcessed Image2Pose, Input Image: {inputs}, Output Pose: {updated_image_path}")
+ print(f"\nProcessed Image2Seg, Input Image: {inputs}, Output Pose: {updated_image_path}")
return updated_image_path
@@ -812,6 +872,104 @@ def inference(self, inputs):
return answer
+class InfinityOutPainting:
+ template_model = True # Add this line to show this is a template model.
+ def __init__(self, ImageCaptioning, ImageEditing, VisualQuestionAnswering):
+ self.llm = OpenAI(temperature=0)
+ self.ImageCaption = ImageCaptioning
+ self.ImageEditing = ImageEditing
+ self.ImageVQA = VisualQuestionAnswering
+ self.a_prompt = 'best quality, extremely detailed'
+ self.n_prompt = 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, ' \
+ 'fewer digits, cropped, worst quality, low quality'
+
+ def get_BLIP_vqa(self, image, question):
+ inputs = self.ImageVQA.processor(image, question, return_tensors="pt").to(self.ImageVQA.device,
+ self.ImageVQA.torch_dtype)
+ out = self.ImageVQA.model.generate(**inputs)
+ answer = self.ImageVQA.processor.decode(out[0], skip_special_tokens=True)
+ print(f"\nProcessed VisualQuestionAnswering, Input Question: {question}, Output Answer: {answer}")
+ return answer
+
+ def get_BLIP_caption(self, image):
+ inputs = self.ImageCaption.processor(image, return_tensors="pt").to(self.ImageCaption.device,
+ self.ImageCaption.torch_dtype)
+ out = self.ImageCaption.model.generate(**inputs)
+ BLIP_caption = self.ImageCaption.processor.decode(out[0], skip_special_tokens=True)
+ return BLIP_caption
+
+ def check_prompt(self, prompt):
+ check = f"Here is a paragraph with adjectives. " \
+ f"{prompt} " \
+ f"Please change all plural forms in the adjectives to singular forms. "
+ return self.llm(check)
+
+ def get_imagine_caption(self, image, imagine):
+ BLIP_caption = self.get_BLIP_caption(image)
+ background_color = self.get_BLIP_vqa(image, 'what is the background color of this image')
+ style = self.get_BLIP_vqa(image, 'what is the style of this image')
+ imagine_prompt = f"let's pretend you are an excellent painter and now " \
+ f"there is an incomplete painting with {BLIP_caption} in the center, " \
+ f"please imagine the complete painting and describe it" \
+ f"you should consider the background color is {background_color}, the style is {style}" \
+ f"You should make the painting as vivid and realistic as possible" \
+ f"You can not use words like painting or picture" \
+ f"and you should use no more than 50 words to describe it"
+ caption = self.llm(imagine_prompt) if imagine else BLIP_caption
+ caption = self.check_prompt(caption)
+ print(f'BLIP observation: {BLIP_caption}, ChatGPT imagine to {caption}') if imagine else print(
+ f'Prompt: {caption}')
+ return caption
+
+ def resize_image(self, image, max_size=1000000, multiple=8):
+ aspect_ratio = image.size[0] / image.size[1]
+ new_width = int(math.sqrt(max_size * aspect_ratio))
+ new_height = int(new_width / aspect_ratio)
+ new_width, new_height = new_width - (new_width % multiple), new_height - (new_height % multiple)
+ return image.resize((new_width, new_height))
+
+ def dowhile(self, original_img, tosize, expand_ratio, imagine, usr_prompt):
+ old_img = original_img
+ while (old_img.size != tosize):
+ prompt = self.check_prompt(usr_prompt) if usr_prompt else self.get_imagine_caption(old_img, imagine)
+ crop_w = 15 if old_img.size[0] != tosize[0] else 0
+ crop_h = 15 if old_img.size[1] != tosize[1] else 0
+ old_img = ImageOps.crop(old_img, (crop_w, crop_h, crop_w, crop_h))
+ temp_canvas_size = (expand_ratio * old_img.width if expand_ratio * old_img.width < tosize[0] else tosize[0],
+ expand_ratio * old_img.height if expand_ratio * old_img.height < tosize[1] else tosize[
+ 1])
+ temp_canvas, temp_mask = Image.new("RGB", temp_canvas_size, color="white"), Image.new("L", temp_canvas_size,
+ color="white")
+ x, y = (temp_canvas.width - old_img.width) // 2, (temp_canvas.height - old_img.height) // 2
+ temp_canvas.paste(old_img, (x, y))
+ temp_mask.paste(0, (x, y, x + old_img.width, y + old_img.height))
+ resized_temp_canvas, resized_temp_mask = self.resize_image(temp_canvas), self.resize_image(temp_mask)
+ image = self.ImageEditing.inpaint(prompt=prompt, image=resized_temp_canvas, mask_image=resized_temp_mask,
+ height=resized_temp_canvas.height, width=resized_temp_canvas.width,
+ num_inference_steps=50).images[0].resize(
+ (temp_canvas.width, temp_canvas.height), Image.ANTIALIAS)
+ image = blend_gt2pt(old_img, image)
+ old_img = image
+ return old_img
+
+ @prompts(name="Extend An Image",
+ description="useful when you need to extend an image into a larger image."
+ "like: extend the image into a resolution of 2048x1024, extend the image into 2048x1024. "
+ "The input to this tool should be a comma separated string of two, representing the image_path and the resolution of widthxheight")
+ def inference(self, inputs):
+ image_path, resolution = inputs.split(',')
+ width, height = resolution.split('x')
+ tosize = (int(width), int(height))
+ image = Image.open(image_path)
+ image = ImageOps.crop(image, (10, 10, 10, 10))
+ out_painted_image = self.dowhile(image, tosize, 4, True, False)
+ updated_image_path = get_new_image_name(image_path, func_name="outpainting")
+ out_painted_image.save(updated_image_path)
+ print(f"\nProcessed InfinityOutPainting, Input Image: {image_path}, Input Resolution: {resolution}, "
+ f"Output Image: {updated_image_path}")
+ return updated_image_path
+
+
class ConversationBot:
def __init__(self, load_dict):
# load_dict = {'VisualQuestionAnswering':'cuda:0', 'ImageCaptioning':'cuda:1',...}
@@ -823,16 +981,24 @@ def __init__(self, load_dict):
self.memory = ConversationBufferMemory(memory_key="chat_history", output_key='output')
self.models = {}
+ # Load Basic Foundation Models
for class_name, device in load_dict.items():
self.models[class_name] = globals()[class_name](device=device)
+ # Load Template Foundation Models
+ for class_name, module in globals().items():
+ if getattr(module, 'template_model', False):
+ template_required_names = {k for k in inspect.signature(module.__init__).parameters.keys() if k!='self'}
+ loaded_names = set([type(e).__name__ for e in self.models.values()])
+ if template_required_names.issubset(loaded_names):
+ self.models[class_name] = globals()[class_name](
+ **{name: self.models[name] for name in template_required_names})
self.tools = []
for instance in self.models.values():
for e in dir(instance):
if e.startswith('inference'):
func = getattr(instance, e)
self.tools.append(Tool(name=func.name, description=func.description, func=func))
-
self.agent = initialize_agent(
self.tools,
self.llm,
@@ -900,4 +1066,4 @@ def run_image(self, image, state, txt):
clear.click(bot.memory.clear)
clear.click(lambda: [], None, chatbot)
clear.click(lambda: [], None, state)
- demo.launch(server_name="0.0.0.0", server_port=7868)
+ demo.launch(server_name="0.0.0.0", server_port=1015)
| welcome to add more template for visual-chatgpt | https://api.github.com/repos/chenfei-wu/TaskMatrix/pulls/269 | 2023-03-22T15:44:59Z | 2023-03-22T16:09:48Z | 2023-03-22T16:09:48Z | 2023-03-22T16:09:54Z | 3,043 | chenfei-wu/TaskMatrix | 40,929 |
0.10 | diff --git a/.travis.yml b/.travis.yml
index d2c7552312..062bb77b50 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -3,8 +3,7 @@ python:
- "2.7"
# command to install dependencies, e.g. pip install -r requirements.txt --use-mirrors
install:
- - "pip install --upgrade git+https://github.com/mitmproxy/netlib.git"
- - "pip install --upgrade git+https://github.com/mitmproxy/pathod.git"
+ - "pip install --upgrade git+https://github.com/mitmproxy/netlib.git@0.10"
- "pip install -r requirements.txt --use-mirrors"
- "pip install -r test/requirements.txt --use-mirrors"
# command to run tests, e.g. python setup.py test
diff --git a/libmproxy/dump.py b/libmproxy/dump.py
index e76ea1ce33..53e07a7a14 100644
--- a/libmproxy/dump.py
+++ b/libmproxy/dump.py
@@ -31,6 +31,9 @@ class Options(object):
"stickyauth",
"verbosity",
"wfile",
+ "replay_ignore_content",
+ "replay_ignore_params",
+ "replay_not_found"
]
def __init__(self, **kwargs):
for k, v in kwargs.items():
@@ -73,6 +76,9 @@ def __init__(self, server, options, filtstr, outfile=sys.stdout):
self.filt = filt.parse(filtstr)
else:
self.filt = None
+
+ if options.replay_not_found:
+ self.not_found_filt = filt.parse(options.replay_not_found)
if options.stickycookie:
self.set_stickycookie(options.stickycookie)
@@ -101,7 +107,9 @@ def __init__(self, server, options, filtstr, outfile=sys.stdout):
self._readflow(options.server_replay),
options.kill, options.rheaders,
not options.keepserving,
- options.nopop
+ options.nopop,
+ options.replay_ignore_params,
+ options.replay_ignore_content
)
if options.client_replay:
diff --git a/libmproxy/flow.py b/libmproxy/flow.py
index 76ca4f4752..8343b69995 100644
--- a/libmproxy/flow.py
+++ b/libmproxy/flow.py
@@ -943,12 +943,12 @@ def tick(self, master, testing=False):
class ServerPlaybackState:
- def __init__(self, headers, flows, exit, nopop):
+ def __init__(self, headers, flows, exit, nopop, ignore_params, ignore_content):
"""
headers: Case-insensitive list of request headers that should be
included in request-response matching.
"""
- self.headers, self.exit, self.nopop = headers, exit, nopop
+ self.headers, self.exit, self.nopop, self.ignore_params, self.ignore_content = headers, exit, nopop, ignore_params, ignore_content
self.fmap = {}
for i in flows:
if i.response:
@@ -963,14 +963,29 @@ def _hash(self, flow):
Calculates a loose hash of the flow request.
"""
r = flow.request
+
+ _, _, path, _, query, _ = urlparse.urlparse(r.get_url())
+ queriesArray = urlparse.parse_qsl(query)
+
+ filtered = []
+ for p in queriesArray:
+ if p[0] not in self.ignore_params:
+ filtered.append(p)
+
key = [
str(r.host),
str(r.port),
str(r.scheme),
str(r.method),
- str(r.path),
- str(r.content),
+ str(path),
]
+ if not self.ignore_content:
+ key.append(str(r.content))
+
+ for p in filtered:
+ key.append(p[0])
+ key.append(p[1])
+
if self.headers:
hdrs = []
for i in self.headers:
@@ -1375,6 +1390,8 @@ def __init__(self, server, state):
self.stream = None
self.apps = AppRegistry()
+ self.not_found_filt = None
+
def start_app(self, host, port, external):
if not external:
self.apps.add(
@@ -1452,12 +1469,15 @@ def start_client_playback(self, flows, exit):
def stop_client_playback(self):
self.client_playback = None
- def start_server_playback(self, flows, kill, headers, exit, nopop):
+ def start_server_playback(self, flows, kill, headers, exit, nopop, ignore_params, ignore_content):
"""
flows: List of flows.
kill: Boolean, should we kill requests not part of the replay?
+ ignore_params: list of parameters to ignore in server replay
+ ignore_content: true if request content should be ignored in server replay
+ not_found: return 404 instead of go for the page for filtered traffic in server replay
"""
- self.server_playback = ServerPlaybackState(headers, flows, exit, nopop)
+ self.server_playback = ServerPlaybackState(headers, flows, exit, nopop, ignore_params, ignore_content)
self.kill_nonreplay = kill
def stop_server_playback(self):
@@ -1485,6 +1505,27 @@ def do_server_playback(self, flow):
return True
return None
+ def return_not_found(self, flow):
+ """
+ This method should be called by child classes in the handle_request
+ handler. Returns a hardcoded 404
+ """
+ if self.server_playback:
+ response = Response(flow.request,
+ [1,1],
+ 404, "Not found",
+ ODictCaseless([["Content-Type","text/html"]]),
+ "Not Found",
+ None)
+ response._set_replay()
+ flow.response = response
+ if self.refresh_server_playback:
+ response.refresh()
+ flow.request.reply(response)
+ return True
+ return None
+
+
def tick(self, q):
if self.client_playback:
e = [
@@ -1533,12 +1574,13 @@ def process_new_request(self, f):
f.request.anticache()
if self.anticomp:
f.request.anticomp()
-
if self.server_playback:
pb = self.do_server_playback(f)
if not pb:
if self.kill_nonreplay:
f.kill(self)
+ elif self.not_found_filt and f.match(self.not_found_filt):
+ self.return_not_found(f)
else:
f.request.reply()
diff --git a/mitmdump b/mitmdump
index 49d129d6a6..9348b825a2 100755
--- a/mitmdump
+++ b/mitmdump
@@ -18,6 +18,24 @@ if __name__ == '__main__':
action="store_true", dest="keepserving", default=False,
help="Continue serving after client playback or file read. We exit by default."
)
+ parser.add_argument(
+ "--replay-ignore-content",
+ action="store_true", dest="replay_ignore_content", default=False,
+ help="Ignore request's content while searching for a saved flow to replay"
+ )
+ parser.add_argument(
+ "--replay-ignore-param",
+ action="append", dest="replay_ignore_params", type=str,
+ help="Request's parameters to be ignored while searching for a saved flow to replay"
+ "Can be passed multiple times."
+ )
+ parser.add_argument(
+ "--replay-not-found",
+ action="append", dest="replay_not_found", type=str,
+ help="Make the proxy return 404 (not found) if a request matching filters is not found in saved flows"
+ "Can be passed multiple times."
+ )
+
parser.add_argument('args', nargs=argparse.REMAINDER)
options = parser.parse_args()
@@ -40,7 +58,14 @@ if __name__ == '__main__':
dumpopts = dump.Options(**cmdline.get_common_options(options))
except cmdline.OptionException, v:
parser.error(v.message)
+
dumpopts.keepserving = options.keepserving
+ dumpopts.replay_ignore_content = options.replay_ignore_content
+ dumpopts.replay_ignore_params = options.replay_ignore_params
+ if options.replay_not_found:
+ dumpopts.replay_not_found = " ".join(options.replay_not_found)
+ else:
+ dumpopts.replay_not_found = None
if options.args:
filt = " ".join(options.args)
diff --git a/test/test_flow.py b/test/test_flow.py
index f9198f0c63..5ed866d1db 100644
--- a/test/test_flow.py
+++ b/test/test_flow.py
@@ -107,7 +107,7 @@ def test_tick(self):
class TestServerPlaybackState:
def test_hash(self):
- s = flow.ServerPlaybackState(None, [], False, False)
+ s = flow.ServerPlaybackState(None, [], False, False, None, False)
r = tutils.tflow()
r2 = tutils.tflow()
@@ -119,7 +119,7 @@ def test_hash(self):
assert s._hash(r) != s._hash(r2)
def test_headers(self):
- s = flow.ServerPlaybackState(["foo"], [], False, False)
+ s = flow.ServerPlaybackState(["foo"], [], False, False, None, False)
r = tutils.tflow_full()
r.request.headers["foo"] = ["bar"]
r2 = tutils.tflow_full()
@@ -133,6 +133,45 @@ def test_headers(self):
r2 = tutils.tflow_full()
assert s._hash(r) == s._hash(r2)
+ def test_ignore_params(self):
+ s = flow.ServerPlaybackState(None, [], False, False, ["param1", "param2"], False)
+ r = tutils.tflow_full()
+ r.request.path="/test?param1=1"
+ r2 = tutils.tflow_full()
+ r2.request.path="/test"
+ assert s._hash(r) == s._hash(r2)
+ r2.request.path="/test?param1=2"
+ assert s._hash(r) == s._hash(r2)
+ r2.request.path="/test?param2=1"
+ assert s._hash(r) == s._hash(r2)
+ r2.request.path="/test?param3=2"
+ assert not s._hash(r) == s._hash(r2)
+
+ def test_ignore_content(self):
+ s = flow.ServerPlaybackState(None, [], False, False, None, False)
+ r = tutils.tflow_full()
+ r2 = tutils.tflow_full()
+
+ r.request.content = "foo"
+ r2.request.content = "foo"
+ assert s._hash(r) == s._hash(r2)
+ r2.request.content = "bar"
+ assert not s._hash(r) == s._hash(r2)
+
+ #now ignoring content
+ s = flow.ServerPlaybackState(None, [], False, False, None, True)
+ r = tutils.tflow_full()
+ r2 = tutils.tflow_full()
+ r.request.content = "foo"
+ r2.request.content = "foo"
+ assert s._hash(r) == s._hash(r2)
+ r2.request.content = "bar"
+ assert s._hash(r) == s._hash(r2)
+ r2.request.content = ""
+ assert s._hash(r) == s._hash(r2)
+ r2.request.content = None
+ assert s._hash(r) == s._hash(r2)
+
def test_load(self):
r = tutils.tflow_full()
r.request.headers["key"] = ["one"]
@@ -140,7 +179,7 @@ def test_load(self):
r2 = tutils.tflow_full()
r2.request.headers["key"] = ["two"]
- s = flow.ServerPlaybackState(None, [r, r2], False, False)
+ s = flow.ServerPlaybackState(None, [r, r2], False, False, None, False)
assert s.count() == 2
assert len(s.fmap.keys()) == 1
@@ -161,7 +200,7 @@ def test_load_with_nopop(self):
r2 = tutils.tflow_full()
r2.request.headers["key"] = ["two"]
- s = flow.ServerPlaybackState(None, [r, r2], False, True)
+ s = flow.ServerPlaybackState(None, [r, r2], False, True, None, False)
assert s.count() == 2
s.next_flow(r)
@@ -671,7 +710,7 @@ def test_client_playback(self):
f = tutils.tflow_full()
pb = [tutils.tflow_full(), f]
fm = flow.FlowMaster(None, s)
- assert not fm.start_server_playback(pb, False, [], False, False)
+ assert not fm.start_server_playback(pb, False, [], False, False, None, False)
assert not fm.start_client_playback(pb, False)
q = Queue.Queue()
@@ -695,16 +734,16 @@ def test_server_playback(self):
fm.refresh_server_playback = True
assert not fm.do_server_playback(tutils.tflow())
- fm.start_server_playback(pb, False, [], False, False)
+ fm.start_server_playback(pb, False, [], False, False, None, False)
assert fm.do_server_playback(tutils.tflow())
- fm.start_server_playback(pb, False, [], True, False)
+ fm.start_server_playback(pb, False, [], True, False, None, False)
r = tutils.tflow()
r.request.content = "gibble"
assert not fm.do_server_playback(r)
assert fm.do_server_playback(tutils.tflow())
- fm.start_server_playback(pb, False, [], True, False)
+ fm.start_server_playback(pb, False, [], True, False, None, False)
q = Queue.Queue()
fm.tick(q)
assert controller.should_exit
@@ -719,13 +758,28 @@ def test_server_playback_kill(self):
pb = [f]
fm = flow.FlowMaster(None, s)
fm.refresh_server_playback = True
- fm.start_server_playback(pb, True, [], False, False)
+ fm.start_server_playback(pb, True, [], False, False, None, False)
f = tutils.tflow()
f.request.host = "nonexistent"
fm.process_new_request(f)
assert "killed" in f.error.msg
+ def test_server_playback_return_not_found(self):
+ s = flow.State()
+ f = tutils.tflow()
+ f.response = tutils.tresp(f.request)
+ pb = [f]
+ fm = flow.FlowMaster(None, s)
+ fm.refresh_server_playback = True
+ fm.start_server_playback(pb, False, [], False, False, None, False)
+ fm.not_found_filt = filt.parse("~d www.example.org")
+
+ f = tutils.tflow()
+ f.request.host = "www.example.org"
+ fm.process_new_request(f)
+ assert 404 == f.response.code
+
def test_stickycookie(self):
s = flow.State()
fm = flow.FlowMaster(None, s)
| Hi, I've added 3 replay features to version 0.10:
--replay_ignore_content: tells mitmdump to ignore request content in flow matching
--replay-ignore-param: (can be passed multiple times) creates a list of query parameters that will be ignored in flow matching (useful for dealing with sites that add random numbers or request counts as part of the request)
--replay-not-found: (can be passed multiple times) takes a filter expression as argument and makes proxy return not found (http code:404) for flows not found in saved ones but matching the selected filters instead of making the request to the real server.
| https://api.github.com/repos/mitmproxy/mitmproxy/pulls/368 | 2014-09-30T10:45:12Z | 2014-10-22T20:02:31Z | 2014-10-22T20:02:31Z | 2014-10-22T20:02:31Z | 3,521 | mitmproxy/mitmproxy | 27,695 |
merge profile output path fix | diff --git a/deepspeed/profiling/flops_profiler/profiler.py b/deepspeed/profiling/flops_profiler/profiler.py
index d599736b8f1e..1a20ae470a2b 100644
--- a/deepspeed/profiling/flops_profiler/profiler.py
+++ b/deepspeed/profiling/flops_profiler/profiler.py
@@ -254,7 +254,7 @@ def print_model_profile(self,
original_stdout = None
f = None
if output_file and output_file != "":
- dir_path = os.path.dirname(output_file)
+ dir_path = os.path.dirname(os.path.abspath(output_file))
if not os.path.exists(dir_path):
os.makedirs(dir_path)
original_stdout = sys.stdout
| https://api.github.com/repos/microsoft/DeepSpeed/pulls/2397 | 2022-10-06T19:35:45Z | 2022-10-06T19:35:57Z | 2022-10-06T19:35:57Z | 2022-10-06T19:35:57Z | 170 | microsoft/DeepSpeed | 10,694 | |
Fixed #16734 -- Set script prefix in django.setup() to allow its usage outside of requests. | diff --git a/django/__init__.py b/django/__init__.py
index 3025f721bd4ec..c9c3ecf17ac00 100644
--- a/django/__init__.py
+++ b/django/__init__.py
@@ -1,3 +1,5 @@
+from __future__ import unicode_literals
+
from django.utils.version import get_version
VERSION = (1, 10, 0, 'alpha', 0)
@@ -5,14 +7,21 @@
__version__ = get_version(VERSION)
-def setup():
+def setup(set_prefix=True):
"""
Configure the settings (this happens as a side effect of accessing the
first setting), configure logging and populate the app registry.
+ Set the thread-local urlresolvers script prefix if `set_prefix` is True.
"""
from django.apps import apps
from django.conf import settings
+ from django.core.urlresolvers import set_script_prefix
+ from django.utils.encoding import force_text
from django.utils.log import configure_logging
configure_logging(settings.LOGGING_CONFIG, settings.LOGGING)
+ if set_prefix:
+ set_script_prefix(
+ '/' if settings.FORCE_SCRIPT_NAME is None else force_text(settings.FORCE_SCRIPT_NAME)
+ )
apps.populate(settings.INSTALLED_APPS)
diff --git a/django/core/wsgi.py b/django/core/wsgi.py
index 62aa43bda5de3..e0ded3db54984 100644
--- a/django/core/wsgi.py
+++ b/django/core/wsgi.py
@@ -10,5 +10,5 @@ def get_wsgi_application():
Allows us to avoid making django.core.handlers.WSGIHandler public API, in
case the internal WSGI implementation changes or moves in the future.
"""
- django.setup()
+ django.setup(set_prefix=False)
return WSGIHandler()
diff --git a/docs/ref/applications.txt b/docs/ref/applications.txt
index 498e10c954548..ba0ddeb532494 100644
--- a/docs/ref/applications.txt
+++ b/docs/ref/applications.txt
@@ -332,14 +332,20 @@ application registry.
.. currentmodule:: django
-.. function:: setup()
+.. function:: setup(set_script=True)
Configures Django by:
* Loading the settings.
* Setting up logging.
+ * If ``set_script`` is True, setting the URL resolver script prefix to
+ :setting:`FORCE_SCRIPT_NAME` if defined, or ``/`` otherwise.
* Initializing the application registry.
+ .. versionchanged:: 1.10
+
+ The ability to set the URL resolver script prefix is new.
+
This function is called automatically:
* When running an HTTP server via Django's WSGI support.
diff --git a/docs/ref/settings.txt b/docs/ref/settings.txt
index dba54e26b4b39..705cecbb57e13 100644
--- a/docs/ref/settings.txt
+++ b/docs/ref/settings.txt
@@ -1408,7 +1408,14 @@ Default: ``None``
If not ``None``, this will be used as the value of the ``SCRIPT_NAME``
environment variable in any HTTP request. This setting can be used to override
the server-provided value of ``SCRIPT_NAME``, which may be a rewritten version
-of the preferred value or not supplied at all.
+of the preferred value or not supplied at all. It is also used by
+:func:`django.setup()` to set the URL resolver script prefix outside of the
+request/response cycle (e.g. in management commands and standalone scripts) to
+generate correct URLs when ``SCRIPT_NAME`` is not ``/``.
+
+.. versionchanged:: 1.10
+
+ The setting's use in :func:`django.setup()` was added.
.. setting:: FORMAT_MODULE_PATH
diff --git a/docs/releases/1.10.txt b/docs/releases/1.10.txt
index b291dcfd41aa5..bea1dc10d1f4e 100644
--- a/docs/releases/1.10.txt
+++ b/docs/releases/1.10.txt
@@ -209,7 +209,10 @@ Tests
URLs
^^^^
-* ...
+* An addition in :func:`django.setup()` allows URL resolving that happens
+ outside of the request/response cycle (e.g. in management commands and
+ standalone scripts) to take :setting:`FORCE_SCRIPT_NAME` into account when it
+ is set.
Validators
^^^^^^^^^^
diff --git a/tests/user_commands/management/commands/reverse_url.py b/tests/user_commands/management/commands/reverse_url.py
new file mode 100644
index 0000000000000..f2064bf05d88d
--- /dev/null
+++ b/tests/user_commands/management/commands/reverse_url.py
@@ -0,0 +1,10 @@
+from django.core.management.base import BaseCommand
+from django.core.urlresolvers import reverse
+
+
+class Command(BaseCommand):
+ """
+ This command returns a URL from a reverse() call.
+ """
+ def handle(self, *args, **options):
+ return reverse('some_url')
diff --git a/tests/user_commands/tests.py b/tests/user_commands/tests.py
index 772da22bed0ae..048ac4d96399c 100644
--- a/tests/user_commands/tests.py
+++ b/tests/user_commands/tests.py
@@ -1,5 +1,7 @@
import os
+from admin_scripts.tests import AdminScriptTestCase
+
from django.apps import apps
from django.core import management
from django.core.management import BaseCommand, CommandError, find_commands
@@ -159,6 +161,23 @@ def patched_check(self_, **kwargs):
BaseCommand.check = saved_check
+class CommandRunTests(AdminScriptTestCase):
+ """
+ Tests that need to run by simulating the command line, not by call_command.
+ """
+ def tearDown(self):
+ self.remove_settings('settings.py')
+
+ def test_script_prefix_set_in_commands(self):
+ self.write_settings('settings.py', apps=['user_commands'], sdict={
+ 'ROOT_URLCONF': '"user_commands.urls"',
+ 'FORCE_SCRIPT_NAME': '"/PREFIX/"',
+ })
+ out, err = self.run_manage(['reverse_url'])
+ self.assertNoOutput(err)
+ self.assertEqual(out.strip(), '/PREFIX/some/url/')
+
+
class UtilsTests(SimpleTestCase):
def test_no_existent_external_program(self):
diff --git a/tests/user_commands/urls.py b/tests/user_commands/urls.py
new file mode 100644
index 0000000000000..fe20693dce6e5
--- /dev/null
+++ b/tests/user_commands/urls.py
@@ -0,0 +1,5 @@
+from django.conf.urls import url
+
+urlpatterns = [
+ url(r'^some/url/$', lambda req:req, name='some_url'),
+]
| https://api.github.com/repos/django/django/pulls/5470 | 2015-10-23T19:21:03Z | 2015-10-29T19:13:23Z | 2015-10-29T19:13:23Z | 2015-10-29T19:13:51Z | 1,532 | django/django | 51,273 | |
Update MODEL_CARD.md | diff --git a/MODEL_CARD.md b/MODEL_CARD.md
index 0a2718c18..18d9dfea8 100644
--- a/MODEL_CARD.md
+++ b/MODEL_CARD.md
@@ -10,7 +10,7 @@ Meta developed and released the Llama 2 family of large language models (LLMs),
**Output** Models generate text only.
-**Model Architecture** Llama 2 is an auto-regressive language model that uses an optimized transformer architecture. The tuned versions use supervised fine-tuning (SFT) and reinforcement learning with human feedback (RLHF) to align to human preferences for helpfulness and safety.
+**Model Architecture** Llama 2 is an auto-regressive language model that uses an optimized transformer architecture. The tuned versions use supervised fine-tuning (SFT) and reinforcement learning with human feedback (RLHF) to align with human preferences for helpfulness and safety.
||Training Data|Params|Content Length|GQA|Tokens|LR|
|---|---|---|---|---|---|---|
@@ -69,7 +69,7 @@ For all the evaluations, we use our internal evaluations library.
|Llama 2|13B|24.5|66.9|55.4|65.8|28.7|54.8|39.4|39.1|
|Llama 2|70B|**37.5**|**71.9**|**63.6**|**69.4**|**35.2**|**68.9**|**51.2**|**54.2**|
-**Overall performance on grouped academic benchmarks.** *Code:* We report the average pass@1 scores of our models on HumanEval and MBPP. *Commonsense Reasoning:* We report the average of PIQA, SIQA, HellaSwag, WinoGrande, ARC easy and challenge, OpenBookQA, and CommonsenseQA. We report 7-shot results for CommonSenseQA and 0-shot results for all other benchmarks. *World Knowledge:* We evaluate the 5-shot performance on NaturalQuestions and TriviaQA and report the average. *Reading Comprehension:* For reading comprehension, we report the 0-shot average on SQuAD, QuAC, and BoolQ. *MATH:* We report the average of the GSM8K (8 shot) and MATH (4 shot) benchmarks at top 1.
+**Overall performance on grouped academic benchmarks.** *Code:* We report the average pass@1 scores of our models on HumanEval and MBPP. *Commonsense Reasoning:* We report the average of PIQA, SIQA, HellaSwag, WinoGrande, ARC easy and challenge, OpenBookQA, and CommonsenseQA. We report 7-shot results for CommonSenseQA and 0-shot results for all other benchmarks. *World Knowledge:* We evaluate the 5-shot performance on NaturalQuestions and TriviaQA and report the average. *Reading Comprehension:* For reading comprehension, we report the 0-shot average on SQuAD, QuAC, and BoolQ. *MATH:* We report the average of the GSM8K (8 shot) and MATH (4 shot) benchmarks at the top 1.
|||TruthfulQA|Toxigen|
|---|---|---|---|
| https://api.github.com/repos/meta-llama/llama/pulls/511 | 2023-07-23T09:28:10Z | 2023-08-26T02:17:53Z | 2023-08-26T02:17:53Z | 2023-08-26T02:17:58Z | 724 | meta-llama/llama | 31,997 | |
loggerd: speedup unit tests | diff --git a/system/loggerd/tests/test_loggerd.py b/system/loggerd/tests/test_loggerd.py
index 49d97505e830fc..dde12b646db4d5 100755
--- a/system/loggerd/tests/test_loggerd.py
+++ b/system/loggerd/tests/test_loggerd.py
@@ -5,7 +5,6 @@
import string
import subprocess
import time
-import unittest
from collections import defaultdict
from pathlib import Path
from typing import Dict, List
@@ -31,10 +30,7 @@
and SERVICE_LIST[f].should_log and "encode" not in f.lower()]
-class TestLoggerd(unittest.TestCase):
- def setUp(self):
- os.environ.pop("LOG_ROOT", None)
-
+class TestLoggerd:
def _get_latest_log_dir(self):
log_dirs = sorted(Path(Paths.log_root()).iterdir(), key=lambda f: f.stat().st_mtime)
return log_dirs[-1]
@@ -68,21 +64,21 @@ def _gen_bootlog(self):
def _check_init_data(self, msgs):
msg = msgs[0]
- self.assertEqual(msg.which(), 'initData')
+ assert msg.which() == 'initData'
def _check_sentinel(self, msgs, route):
start_type = SentinelType.startOfRoute if route else SentinelType.startOfSegment
- self.assertTrue(msgs[1].sentinel.type == start_type)
+ assert msgs[1].sentinel.type == start_type
end_type = SentinelType.endOfRoute if route else SentinelType.endOfSegment
- self.assertTrue(msgs[-1].sentinel.type == end_type)
+ assert msgs[-1].sentinel.type == end_type
def _publish_random_messages(self, services: List[str]) -> Dict[str, list]:
pm = messaging.PubMaster(services)
managed_processes["loggerd"].start()
for s in services:
- self.assertTrue(pm.wait_for_readers_to_update(s, timeout=5))
+ assert pm.wait_for_readers_to_update(s, timeout=5)
sent_msgs = defaultdict(list)
for _ in range(random.randint(2, 10) * 100):
@@ -93,10 +89,9 @@ def _publish_random_messages(self, services: List[str]) -> Dict[str, list]:
m = messaging.new_message(s, random.randint(2, 10))
pm.send(s, m)
sent_msgs[s].append(m)
- time.sleep(0.01)
for s in services:
- self.assertTrue(pm.wait_for_readers_to_update(s, timeout=5))
+ assert pm.wait_for_readers_to_update(s, timeout=5)
managed_processes["loggerd"].stop()
return sent_msgs
@@ -121,15 +116,15 @@ def test_init_data_values(self):
lr = list(LogReader(str(self._gen_bootlog())))
initData = lr[0].initData
- self.assertTrue(initData.dirty != bool(os.environ["CLEAN"]))
- self.assertEqual(initData.version, get_version())
+ assert initData.dirty != bool(os.environ["CLEAN"])
+ assert initData.version == get_version()
if os.path.isfile("/proc/cmdline"):
with open("/proc/cmdline") as f:
- self.assertEqual(list(initData.kernelArgs), f.read().strip().split(" "))
+ assert list(initData.kernelArgs) == f.read().strip().split(" ")
with open("/proc/version") as f:
- self.assertEqual(initData.kernelVersion, f.read())
+ assert initData.kernelVersion == f.read()
# check params
logged_params = {entry.key: entry.value for entry in initData.params.entries}
@@ -137,8 +132,8 @@ def test_init_data_values(self):
assert set(logged_params.keys()) == expected_params, set(logged_params.keys()) ^ expected_params
assert logged_params['AccessToken'] == b'', f"DONT_LOG param value was logged: {repr(logged_params['AccessToken'])}"
for param_key, initData_key, v in fake_params:
- self.assertEqual(getattr(initData, initData_key), v)
- self.assertEqual(logged_params[param_key].decode(), v)
+ assert getattr(initData, initData_key) == v
+ assert logged_params[param_key].decode() == v
params.put("AccessToken", "")
@@ -162,11 +157,10 @@ def test_rotation(self):
os.environ["LOGGERD_SEGMENT_LENGTH"] = str(length)
managed_processes["loggerd"].start()
managed_processes["encoderd"].start()
- time.sleep(1)
+ assert pm.wait_for_readers_to_update("roadCameraState", timeout=5)
fps = 20.0
for n in range(1, int(num_segs*length*fps)+1):
- time_start = time.monotonic()
for stream_type, frame_spec, state in streams:
dat = np.empty(frame_spec[2], dtype=np.uint8)
vipc_server.send(stream_type, dat[:].flatten().tobytes(), n, n/fps, n/fps)
@@ -175,7 +169,9 @@ def test_rotation(self):
frame = getattr(camera_state, state)
frame.frameId = n
pm.send(state, camera_state)
- time.sleep(max((1.0/fps) - (time.monotonic() - time_start), 0))
+
+ for _, _, state in streams:
+ assert pm.wait_for_readers_to_update(state, timeout=5, dt=0.001)
managed_processes["loggerd"].stop()
managed_processes["encoderd"].stop()
@@ -185,7 +181,7 @@ def test_rotation(self):
p = Path(f"{route_path}--{n}")
logged = {f.name for f in p.iterdir() if f.is_file()}
diff = logged ^ expected_files
- self.assertEqual(len(diff), 0, f"didn't get all expected files. run={_} seg={n} {route_path=}, {diff=}\n{logged=} {expected_files=}")
+ assert len(diff) == 0, f"didn't get all expected files. run={_} seg={n} {route_path=}, {diff=}\n{logged=} {expected_files=}"
def test_bootlog(self):
# generate bootlog with fake launch log
@@ -216,7 +212,7 @@ def test_bootlog(self):
with open(path, "rb") as f:
expected_val = f.read()
bootlog_val = [e.value for e in boot.pstore.entries if e.key == fn][0]
- self.assertEqual(expected_val, bootlog_val)
+ assert expected_val == bootlog_val
def test_qlog(self):
qlog_services = [s for s in CEREAL_SERVICES if SERVICE_LIST[s].decimation is not None]
@@ -242,11 +238,11 @@ def test_qlog(self):
if s in no_qlog_services:
# check services with no specific decimation aren't in qlog
- self.assertEqual(recv_cnt, 0, f"got {recv_cnt} {s} msgs in qlog")
+ assert recv_cnt == 0, f"got {recv_cnt} {s} msgs in qlog"
else:
# check logged message count matches decimation
expected_cnt = (len(msgs) - 1) // SERVICE_LIST[s].decimation + 1
- self.assertEqual(recv_cnt, expected_cnt, f"expected {expected_cnt} msgs for {s}, got {recv_cnt}")
+ assert recv_cnt == expected_cnt, f"expected {expected_cnt} msgs for {s}, got {recv_cnt}"
def test_rlog(self):
services = random.sample(CEREAL_SERVICES, random.randint(5, 10))
@@ -263,22 +259,19 @@ def test_rlog(self):
for m in lr:
sent = sent_msgs[m.which()].pop(0)
sent.clear_write_flag()
- self.assertEqual(sent.to_bytes(), m.as_builder().to_bytes())
+ assert sent.to_bytes() == m.as_builder().to_bytes()
def test_preserving_flagged_segments(self):
services = set(random.sample(CEREAL_SERVICES, random.randint(5, 10))) | {"userFlag"}
self._publish_random_messages(services)
segment_dir = self._get_latest_log_dir()
- self.assertEqual(getxattr(segment_dir, PRESERVE_ATTR_NAME), PRESERVE_ATTR_VALUE)
+ assert getxattr(segment_dir, PRESERVE_ATTR_NAME) == PRESERVE_ATTR_VALUE
def test_not_preserving_unflagged_segments(self):
services = set(random.sample(CEREAL_SERVICES, random.randint(5, 10))) - {"userFlag"}
self._publish_random_messages(services)
segment_dir = self._get_latest_log_dir()
- self.assertIsNone(getxattr(segment_dir, PRESERVE_ATTR_NAME))
-
+ assert getxattr(segment_dir, PRESERVE_ATTR_NAME) is None
-if __name__ == "__main__":
- unittest.main()
| https://api.github.com/repos/commaai/openpilot/pulls/31115 | 2024-01-23T00:51:22Z | 2024-01-23T01:24:43Z | 2024-01-23T01:24:43Z | 2024-01-23T01:24:44Z | 1,973 | commaai/openpilot | 9,398 | |
Add IG.com | diff --git a/README.md b/README.md
index b6a7cfb6bc..af4238596c 100644
--- a/README.md
+++ b/README.md
@@ -250,6 +250,7 @@ API | Description | Auth | HTTPS | Link |
| Barchart OnDemand | Stock, Futures, and Forex Market Data | `apiKey` | Yes | [Go!](https://www.barchartondemand.com/free) |
| Consumer Financial Protection Bureau | Financial services consumer complains data | `apiKey` | Yes | [Go!](https://data.consumerfinance.gov/resource/jhzv-w97w.json) |
| IEX | Stocks and Market Data | No | Yes | [Go!](https://iextrading.com/developer/) |
+| IG | Spreadbetting and CFD Market Data | `apiKey` | Yes | [Go!](https://labs.ig.com/gettingstarted) |
| Plaid | Connect with users’ bank accounts and access transaction data | apiKey | Yes | [Go!](https://plaid.com/) |
| Razorpay IFSC | Indian Financial Systems Code (Bank Branch Codes) | No | Yes | [Go!](https://ifsc.razorpay.com/) |
| RoutingNumbers.info | ACH/NACHA Bank Routing Numbers | No | Yes | [Go!](https://www.routingnumbers.info/api/index.html) |
| Thank you for taking the time to work on a Pull Request for this project!
To ensure your PR is dealt with swiftly please check the following:
- [x] Your submissions are formatted according to the guidelines in the [contributing guide](CONTRIBUTING.md).
- [x] Your changes are made in the [README](../README.md) file, not the auto-generated JSON.
- [x] Your additions are ordered alphabetically.
- [x] Your submission has a useful description.
- [x] Each table column should be padded with one space on either side.
- [x] You have searched the repository for any relevant issues or PRs.
- [x] Any category you are creating has the minimum requirement of 3 items.
| https://api.github.com/repos/public-apis/public-apis/pulls/520 | 2017-10-21T10:00:54Z | 2017-10-22T02:38:37Z | 2017-10-22T02:38:37Z | 2017-10-22T02:38:39Z | 299 | public-apis/public-apis | 35,239 |
Update avl_tree.py | diff --git a/data_structures/binary_tree/avl_tree.py b/data_structures/binary_tree/avl_tree.py
index cb043cf188b7..71dede2ccacc 100644
--- a/data_structures/binary_tree/avl_tree.py
+++ b/data_structures/binary_tree/avl_tree.py
@@ -1,6 +1,11 @@
"""
-An auto-balanced binary tree!
+Implementation of an auto-balanced binary tree!
+For doctests run following command:
+python3 -m doctest -v avl_tree.py
+For testing run:
+python avl_tree.py
"""
+
import math
import random
@@ -11,7 +16,7 @@ def __init__(self):
self.head = 0
self.tail = 0
- def isEmpty(self):
+ def is_empty(self):
return self.head == self.tail
def push(self, data):
@@ -39,39 +44,39 @@ def __init__(self, data):
self.right = None
self.height = 1
- def getdata(self):
+ def get_data(self):
return self.data
- def getleft(self):
+ def get_left(self):
return self.left
- def getright(self):
+ def get_right(self):
return self.right
- def getheight(self):
+ def get_height(self):
return self.height
- def setdata(self, data):
+ def set_data(self, data):
self.data = data
return
- def setleft(self, node):
+ def set_left(self, node):
self.left = node
return
- def setright(self, node):
+ def set_right(self, node):
self.right = node
return
- def setheight(self, height):
+ def set_height(self, height):
self.height = height
return
-def getheight(node):
+def get_height(node):
if node is None:
return 0
- return node.getheight()
+ return node.get_height()
def my_max(a, b):
@@ -80,7 +85,7 @@ def my_max(a, b):
return b
-def leftrotation(node):
+def right_rotation(node):
r"""
A B
/ \ / \
@@ -89,138 +94,171 @@ def leftrotation(node):
Bl Br UB Br C
/
UB
-
UB = unbalanced node
"""
- print("left rotation node:", node.getdata())
- ret = node.getleft()
- node.setleft(ret.getright())
- ret.setright(node)
- h1 = my_max(getheight(node.getright()), getheight(node.getleft())) + 1
- node.setheight(h1)
- h2 = my_max(getheight(ret.getright()), getheight(ret.getleft())) + 1
- ret.setheight(h2)
+ print("left rotation node:", node.get_data())
+ ret = node.get_left()
+ node.set_left(ret.get_right())
+ ret.set_right(node)
+ h1 = my_max(get_height(node.get_right()), get_height(node.get_left())) + 1
+ node.set_height(h1)
+ h2 = my_max(get_height(ret.get_right()), get_height(ret.get_left())) + 1
+ ret.set_height(h2)
return ret
-def rightrotation(node):
+def left_rotation(node):
"""
- a mirror symmetry rotation of the leftrotation
+ a mirror symmetry rotation of the left_rotation
"""
- print("right rotation node:", node.getdata())
- ret = node.getright()
- node.setright(ret.getleft())
- ret.setleft(node)
- h1 = my_max(getheight(node.getright()), getheight(node.getleft())) + 1
- node.setheight(h1)
- h2 = my_max(getheight(ret.getright()), getheight(ret.getleft())) + 1
- ret.setheight(h2)
+ print("right rotation node:", node.get_data())
+ ret = node.get_right()
+ node.set_right(ret.get_left())
+ ret.set_left(node)
+ h1 = my_max(get_height(node.get_right()), get_height(node.get_left())) + 1
+ node.set_height(h1)
+ h2 = my_max(get_height(ret.get_right()), get_height(ret.get_left())) + 1
+ ret.set_height(h2)
return ret
-def rlrotation(node):
+def lr_rotation(node):
r"""
A A Br
/ \ / \ / \
- B C RR Br C LR B A
+ B C LR Br C RR B A
/ \ --> / \ --> / / \
Bl Br B UB Bl UB C
\ /
UB Bl
- RR = rightrotation LR = leftrotation
+ RR = right_rotation LR = left_rotation
"""
- node.setleft(rightrotation(node.getleft()))
- return leftrotation(node)
+ node.set_left(left_rotation(node.get_left()))
+ return right_rotation(node)
-def lrrotation(node):
- node.setright(leftrotation(node.getright()))
- return rightrotation(node)
+def rl_rotation(node):
+ node.set_right(right_rotation(node.get_right()))
+ return left_rotation(node)
def insert_node(node, data):
if node is None:
return my_node(data)
- if data < node.getdata():
- node.setleft(insert_node(node.getleft(), data))
+ if data < node.get_data():
+ node.set_left(insert_node(node.get_left(), data))
if (
- getheight(node.getleft()) - getheight(node.getright()) == 2
+ get_height(node.get_left()) - get_height(node.get_right()) == 2
): # an unbalance detected
if (
- data < node.getleft().getdata()
+ data < node.get_left().get_data()
): # new node is the left child of the left child
- node = leftrotation(node)
+ node = right_rotation(node)
else:
- node = rlrotation(node) # new node is the right child of the left child
+ node = lr_rotation(node)
else:
- node.setright(insert_node(node.getright(), data))
- if getheight(node.getright()) - getheight(node.getleft()) == 2:
- if data < node.getright().getdata():
- node = lrrotation(node)
+ node.set_right(insert_node(node.get_right(), data))
+ if get_height(node.get_right()) - get_height(node.get_left()) == 2:
+ if data < node.get_right().get_data():
+ node = rl_rotation(node)
else:
- node = rightrotation(node)
- h1 = my_max(getheight(node.getright()), getheight(node.getleft())) + 1
- node.setheight(h1)
+ node = left_rotation(node)
+ h1 = my_max(get_height(node.get_right()), get_height(node.get_left())) + 1
+ node.set_height(h1)
return node
-def getRightMost(root):
- while root.getright() is not None:
- root = root.getright()
- return root.getdata()
+def get_rightMost(root):
+ while root.get_right() is not None:
+ root = root.get_right()
+ return root.get_data()
-def getLeftMost(root):
- while root.getleft() is not None:
- root = root.getleft()
- return root.getdata()
+def get_leftMost(root):
+ while root.get_left() is not None:
+ root = root.get_left()
+ return root.get_data()
def del_node(root, data):
- if root.getdata() == data:
- if root.getleft() is not None and root.getright() is not None:
- temp_data = getLeftMost(root.getright())
- root.setdata(temp_data)
- root.setright(del_node(root.getright(), temp_data))
- elif root.getleft() is not None:
- root = root.getleft()
+ if root.get_data() == data:
+ if root.get_left() is not None and root.get_right() is not None:
+ temp_data = get_leftMost(root.get_right())
+ root.set_data(temp_data)
+ root.set_right(del_node(root.get_right(), temp_data))
+ elif root.get_left() is not None:
+ root = root.get_left()
else:
- root = root.getright()
- elif root.getdata() > data:
- if root.getleft() is None:
+ root = root.get_right()
+ elif root.get_data() > data:
+ if root.get_left() is None:
print("No such data")
return root
else:
- root.setleft(del_node(root.getleft(), data))
- elif root.getdata() < data:
- if root.getright() is None:
+ root.set_left(del_node(root.get_left(), data))
+ elif root.get_data() < data:
+ if root.get_right() is None:
return root
else:
- root.setright(del_node(root.getright(), data))
+ root.set_right(del_node(root.get_right(), data))
if root is None:
return root
- if getheight(root.getright()) - getheight(root.getleft()) == 2:
- if getheight(root.getright().getright()) > getheight(root.getright().getleft()):
- root = rightrotation(root)
+ if get_height(root.get_right()) - get_height(root.get_left()) == 2:
+ if get_height(root.get_right().get_right()) > \
+ get_height(root.get_right().get_left()):
+ root = left_rotation(root)
else:
- root = lrrotation(root)
- elif getheight(root.getright()) - getheight(root.getleft()) == -2:
- if getheight(root.getleft().getleft()) > getheight(root.getleft().getright()):
- root = leftrotation(root)
+ root = rl_rotation(root)
+ elif get_height(root.get_right()) - get_height(root.get_left()) == -2:
+ if get_height(root.get_left().get_left()) > \
+ get_height(root.get_left().get_right()):
+ root = right_rotation(root)
else:
- root = rlrotation(root)
- height = my_max(getheight(root.getright()), getheight(root.getleft())) + 1
- root.setheight(height)
+ root = lr_rotation(root)
+ height = my_max(get_height(root.get_right()), get_height(root.get_left())) + 1
+ root.set_height(height)
return root
class AVLtree:
+ """
+ An AVL tree doctest
+ Examples:
+ >>> t = AVLtree()
+ >>> t.insert(4)
+ insert:4
+ >>> print(str(t).replace(" \\n","\\n"))
+ 4
+ *************************************
+ >>> t.insert(2)
+ insert:2
+ >>> print(str(t).replace(" \\n","\\n").replace(" \\n","\\n"))
+ 4
+ 2 *
+ *************************************
+ >>> t.insert(3)
+ insert:3
+ right rotation node: 2
+ left rotation node: 4
+ >>> print(str(t).replace(" \\n","\\n").replace(" \\n","\\n"))
+ 3
+ 2 4
+ *************************************
+ >>> t.get_height()
+ 2
+ >>> t.del_node(3)
+ delete:3
+ >>> print(str(t).replace(" \\n","\\n").replace(" \\n","\\n"))
+ 4
+ 2 *
+ *************************************
+ """
def __init__(self):
self.root = None
- def getheight(self):
+ def get_height(self):
# print("yyy")
- return getheight(self.root)
+ return get_height(self.root)
def insert(self, data):
print("insert:" + str(data))
@@ -233,56 +271,54 @@ def del_node(self, data):
return
self.root = del_node(self.root, data)
- def traversale(self): # a level traversale, gives a more intuitive look on the tree
+ def __str__(self): # a level traversale, gives a more intuitive look on the tree
+ output = ""
q = my_queue()
q.push(self.root)
- layer = self.getheight()
+ layer = self.get_height()
if layer == 0:
- return
+ return output
cnt = 0
- while not q.isEmpty():
+ while not q.is_empty():
node = q.pop()
space = " " * int(math.pow(2, layer - 1))
- print(space, end="")
+ output += space
if node is None:
- print("*", end="")
+ output += "*"
q.push(None)
q.push(None)
else:
- print(node.getdata(), end="")
- q.push(node.getleft())
- q.push(node.getright())
- print(space, end="")
+ output += str(node.get_data())
+ q.push(node.get_left())
+ q.push(node.get_right())
+ output += space
cnt = cnt + 1
for i in range(100):
if cnt == math.pow(2, i) - 1:
layer = layer - 1
if layer == 0:
- print()
- print("*************************************")
- return
- print()
+ output += "\n*************************************"
+ return output
+ output += "\n"
break
- print()
- print("*************************************")
- return
+ output += "\n*************************************"
+ return output
- def test(self):
- getheight(None)
- print("****")
- self.getheight()
+
+def _test():
+ import doctest
+ doctest.testmod()
if __name__ == "__main__":
+ _test()
t = AVLtree()
- t.traversale()
lst = list(range(10))
random.shuffle(lst)
for i in lst:
t.insert(i)
- t.traversale()
-
+ print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
- t.traversale()
+ print(str(t))
| it's true definition of AVL tree,change left and right rotation,and add avl_tree doctest
### **Describe your change:**
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### **Checklist:**
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [ ] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| https://api.github.com/repos/TheAlgorithms/Python/pulls/2145 | 2020-06-22T03:22:25Z | 2020-06-25T07:55:14Z | 2020-06-25T07:55:14Z | 2020-06-25T07:55:14Z | 3,252 | TheAlgorithms/Python | 30,213 |
fix for Files property missing on Computer module | diff --git a/interpreter/core/computer/computer.py b/interpreter/core/computer/computer.py
index 175f71586..7ccdd2d73 100644
--- a/interpreter/core/computer/computer.py
+++ b/interpreter/core/computer/computer.py
@@ -14,7 +14,7 @@
from .skills.skills import Skills
from .sms.sms import SMS
from .terminal.terminal import Terminal
-
+from .files.files import Files
class Computer:
def __init__(self, interpreter):
@@ -39,6 +39,7 @@ def __init__(self, interpreter):
self.skills = Skills(self)
self.docs = Docs(self)
self.ai = Ai(self)
+ self.files = Files(self)
self.emit_images = True
self.api_base = "https://api.openinterpreter.com/v0"
| ### Describe the changes you have made:
Import the missing Files module into the Computer module
### Reference any relevant issues (e.g. "Fixes #000"):
https://github.com/OpenInterpreter/open-interpreter/issues/1106
### Pre-Submission Checklist (optional but appreciated):
- [ n/a ] I have included relevant documentation updates (stored in /docs)
- [ x ] I have read `docs/CONTRIBUTING.md`
- [ x ] I have read `docs/ROADMAP.md`
### OS Tests (optional but appreciated):
- [ ] Tested on Windows
- [ ] Tested on MacOS
- [ x ] Tested on Linux
| https://api.github.com/repos/OpenInterpreter/open-interpreter/pulls/1107 | 2024-03-21T23:21:57Z | 2024-03-24T05:58:34Z | 2024-03-24T05:58:34Z | 2024-03-24T05:58:34Z | 185 | OpenInterpreter/open-interpreter | 40,875 |
lambda: Fix adding environment variables to functions previously not having any | diff --git a/lib/ansible/modules/cloud/amazon/lambda.py b/lib/ansible/modules/cloud/amazon/lambda.py
index 2c5ad38226eec3..0a0a9897b33766 100644
--- a/lib/ansible/modules/cloud/amazon/lambda.py
+++ b/lib/ansible/modules/cloud/amazon/lambda.py
@@ -330,7 +330,7 @@ def main():
func_kwargs.update({'Timeout': timeout})
if memory_size and current_config['MemorySize'] != memory_size:
func_kwargs.update({'MemorySize': memory_size})
- if (environment_variables is not None) and (current_config['Environment']['Variables'] != environment_variables):
+ if (environment_variables is not None) and (current_config.get('Environment', {}).get('Variables', {}) != environment_variables):
func_kwargs.update({'Environment':{'Variables': environment_variables}})
if dead_letter_arn is not None:
if current_config.get('DeadLetterConfig'):
diff --git a/test/units/modules/cloud/amazon/test_lambda.py b/test/units/modules/cloud/amazon/test_lambda.py
index db718df59f8484..049d1394bc2aad 100644
--- a/test/units/modules/cloud/amazon/test_lambda.py
+++ b/test/units/modules/cloud/amazon/test_lambda.py
@@ -65,6 +65,9 @@ def set_module_args(args):
"timeout" : 3,
"handler": 'lambda_python.my_handler'
}
+module_args_with_environment=dict(base_module_args, environment_variables={
+ "variable_name": "variable_value"
+})
def make_mock_no_connection_connection(config):
@@ -206,6 +209,30 @@ def test_update_lambda_if_only_one_config_item_changed():
assert(len(lambda_client_double.update_function_code.mock_calls) == 0), \
"updated lambda code when no change should have happened"
+def test_update_lambda_if_added_environment_variable():
+
+ set_module_args(module_args_with_environment)
+ (boto3_conn_double,lambda_client_double)=make_mock_connection(base_lambda_config)
+
+ with patch.object(lda, 'boto3_conn', boto3_conn_double):
+ try:
+ lda.main()
+ except SystemExit:
+ pass
+
+ # guard against calling other than for a lambda connection (e.g. IAM)
+ assert(len(boto3_conn_double.mock_calls) == 1), "multiple boto connections used unexpectedly"
+ assert(len(lambda_client_double.update_function_configuration.mock_calls) > 0), \
+ "failed to update lambda function when configuration changed"
+ assert(len(lambda_client_double.update_function_configuration.mock_calls) < 2), \
+ "lambda function update called multiple times when only one time should be needed"
+ assert(len(lambda_client_double.update_function_code.mock_calls) == 0), \
+ "updated lambda code when no change should have happened"
+
+ (update_args, update_kwargs)=lambda_client_double.update_function_configuration.call_args
+ assert (len(update_kwargs) > 0), "expected update configuration called with keyword args, none found"
+ assert update_kwargs['Environment']['Variables'] == module_args_with_environment['environment_variables']
+
def test_dont_update_lambda_if_nothing_changed():
set_module_args(base_module_args)
| ##### ISSUE TYPE
<!--- Pick one below and delete the rest: -->
- Bugfix Pull Request
##### COMPONENT NAME
<!--- Name of the module/plugin/module/task -->
lambda
##### ANSIBLE VERSION
<!--- Paste verbatim output from “ansible --version” between quotes below -->
```
ansible 2.2.1.0
config file = /home/mmaslowski/ansible/ansible.cfg
configured module search path = ['./modules/']
```
with `lambda` module copied from newest `devel`.
##### SUMMARY
<!--- Describe the change, including rationale and design decisions -->
I had an AWS Lambda function created without setting any environment variables (e.g. from the `lambda` module from Ansible 2.2 where it did not support setting them). I changed my playbook to invoke the `lambda` module passing environment variables to the function, now I get:
```
An exception occurred during task execution. To see the full traceback, use -vvv. The error was: KeyError: 'Environment'
fatal: [localhost]: FAILED! => {"changed": false, "failed": true, "module_stderr": "Traceback (most recent call last):\n File \"/tmp/ansible_MXxNLc/ansible_module_lambda.py\", line 510, in <module>\n main()\n File \"/tmp/ansible_MXxNLc/ansible_module_lambda.py\", line 333, in main\n if (environment_variables is not None) and (current_config['Environment']['Variables'] != environment_variables):\nKeyError: 'Environment'\n", "module_stdout": "", "msg": "MODULE FAILURE"}
```
This results from the `Environment` key missing from the config returned by boto3's `get_function`. https://docs.aws.amazon.com/lambda/latest/dg/API_GetFunction.html and linked data types suggest it not being required in the return value.
My commit makes the module consider them empty if they are not returned by the API.
Tested with these versions of boto packages:
```
boto (2.42.0)
boto3 (1.4.4)
botocore (1.5.19)
```
<!---
If you are fixing an existing issue, please include "Fixes #nnn" in your
commit message and your description; but you should still explain what
the change does.
-->
<!-- Paste verbatim command output below, e.g. before and after your change --> | https://api.github.com/repos/ansible/ansible/pulls/22313 | 2017-03-06T13:56:03Z | 2017-03-06T16:10:40Z | 2017-03-06T16:10:40Z | 2019-04-26T20:52:34Z | 701 | ansible/ansible | 49,053 |
[Update core.py]: comment the support for with-statement | diff --git a/gym/core.py b/gym/core.py
index 11ba5842d1e..8657afdec2d 100644
--- a/gym/core.py
+++ b/gym/core.py
@@ -145,9 +145,11 @@ def __str__(self):
return '<{}<{}>>'.format(type(self).__name__, self.spec.id)
def __enter__(self):
+ """Support with-statement for the environment. """
return self
def __exit__(self, *args):
+ """Support with-statement for the environment. """
self.close()
# propagate exception
return False
| https://api.github.com/repos/openai/gym/pulls/1627 | 2019-08-01T20:26:54Z | 2019-10-25T22:48:21Z | 2019-10-25T22:48:21Z | 2019-10-25T22:48:21Z | 142 | openai/gym | 5,859 | |
Replace 404 url | diff --git a/youtube_dl/extractor/howstuffworks.py b/youtube_dl/extractor/howstuffworks.py
index 68684b99744..fccc2388408 100644
--- a/youtube_dl/extractor/howstuffworks.py
+++ b/youtube_dl/extractor/howstuffworks.py
@@ -28,13 +28,13 @@ class HowStuffWorksIE(InfoExtractor):
}
},
{
- 'url': 'http://adventure.howstuffworks.com/39516-deadliest-catch-jakes-farewell-pots-video.htm',
+ 'url': 'http://adventure.howstuffworks.com/7199-survival-zone-food-and-water-in-the-savanna-video.htm',
'info_dict': {
- 'id': '553470',
- 'display_id': 'deadliest-catch-jakes-farewell-pots',
+ 'id': '453464',
+ 'display_id': 'survival-zone-food-and-water-in-the-savanna',
'ext': 'mp4',
- 'title': 'Deadliest Catch: Jake\'s Farewell Pots',
- 'description': 'md5:9632c346d5e43ee238028c9cefd8dbbc',
+ 'title': 'Survival Zone: Food and Water In the Savanna',
+ 'description': 'md5:7e1c89f6411434970c15fa094170c371',
'thumbnail': 're:^https?://.*\.jpg$',
},
'params': {
| https://api.github.com/repos/ytdl-org/youtube-dl/pulls/3923 | 2014-10-11T10:34:20Z | 2014-10-11T18:48:11Z | 2014-10-11T18:48:11Z | 2014-10-11T23:44:29Z | 345 | ytdl-org/youtube-dl | 50,204 | |
Fix ci status badge error | diff --git a/README.md b/README.md
index d2b5df45d9..6a31b02fdc 100644
--- a/README.md
+++ b/README.md
@@ -21,7 +21,7 @@
[](https://httpie.org/docs/cli)
[](https://pypi.python.org/pypi/httpie)
-[](https://github.com/httpie/httpie/actions)
+[](https://github.com/httpie/httpie/actions)
[](https://codecov.io/gh/httpie/httpie)
</div>
| Hi, this PR fixes the CI badge status error due to the recent update of shields.io. You can read the details here: https://github.com/badges/shields/issues/8671.
Also, I changed to show the status of `tests` instead of `Build`. I couldn't find `Build` workflow so I dug into the commit history and found out that the `Build` workflow was actually removed a while ago (https://github.com/httpie/httpie/commit/4c8633c6e51f388523ab4fa649040934402a4fc9#diff-1db27d93186e46d3b441ece35801b244db8ee144ff1405ca27a163bfe878957f). The previous badge might have been showing a false positive status. 😅 After this fix, it will show the correct status again!
**Before**
<img width="924" alt="Screenshot 2023-01-03 at 1 44 42" src="https://user-images.githubusercontent.com/1425259/210260734-cc3599af-d485-4ff8-a1b1-ef7b250053eb.png">
**After**
<img width="947" alt="Screenshot 2023-01-03 at 2 00 34" src="https://user-images.githubusercontent.com/1425259/210260729-4308e112-48e5-4774-a7eb-8f644c59915c.png">
| https://api.github.com/repos/httpie/cli/pulls/1464 | 2023-01-02T17:01:04Z | 2023-01-04T11:17:18Z | 2023-01-04T11:17:18Z | 2023-01-04T12:12:07Z | 312 | httpie/cli | 34,058 |
[extractor/gronkh] add duration and chapters | diff --git a/yt_dlp/extractor/gronkh.py b/yt_dlp/extractor/gronkh.py
index b9370e36c11..1ae0a689369 100644
--- a/yt_dlp/extractor/gronkh.py
+++ b/yt_dlp/extractor/gronkh.py
@@ -3,6 +3,7 @@
from .common import InfoExtractor
from ..utils import (
OnDemandPagedList,
+ float_or_none,
traverse_obj,
unified_strdate,
)
@@ -19,7 +20,9 @@ class GronkhIE(InfoExtractor):
'title': 'H.O.R.D.E. - DAS ZWEiTE ZEiTALTER 🎲 Session 1',
'view_count': int,
'thumbnail': 'https://01.cdn.vod.farm/preview/9e2555d3a23bf4e5c5b7c6b3b70a9d84.jpg',
- 'upload_date': '20221111'
+ 'upload_date': '20221111',
+ 'chapters': 'count:3',
+ 'duration': 31463,
},
'params': {'skip_download': True}
}, {
@@ -30,7 +33,8 @@ class GronkhIE(InfoExtractor):
'title': 'GTV0536, 2021-10-01 - MARTHA IS DEAD #FREiAB1830 !FF7 !horde !archiv',
'view_count': int,
'thumbnail': 'https://01.cdn.vod.farm/preview/6436746cce14e25f751260a692872b9b.jpg',
- 'upload_date': '20211001'
+ 'upload_date': '20211001',
+ 'duration': 32058,
},
'params': {'skip_download': True}
}, {
@@ -56,6 +60,12 @@ def _real_extract(self, url):
'upload_date': unified_strdate(data_json.get('created_at')),
'formats': formats,
'subtitles': subtitles,
+ 'duration': float_or_none(data_json.get('source_length')),
+ 'chapters': traverse_obj(data_json, (
+ 'chapters', lambda _, v: float_or_none(v['offset']) is not None, {
+ 'title': 'title',
+ 'start_time': ('offset', {float_or_none}),
+ })) or None,
}
| **IMPORTANT**: PRs without the template will be CLOSED
### Description of your *pull request* and other information
<!--
Explanation of your *pull request* in arbitrary form goes here. Please **make sure the description explains the purpose and effect** of your *pull request* and is worded well enough to be understood. Provide as much **context and examples** as possible
-->
Also parse the duration and chapters on gronkh.tv.
Fixes #
<details open><summary>Template</summary> <!-- OPEN is intentional -->
<!--
# PLEASE FOLLOW THE GUIDE BELOW
- You will be asked some questions, please read them **carefully** and answer honestly
- Put an `x` into all the boxes `[ ]` relevant to your *pull request* (like [x])
- Use *Preview* tab to see how your *pull request* will actually look like
-->
### Before submitting a *pull request* make sure you have:
- [x] At least skimmed through [contributing guidelines](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#developer-instructions) including [yt-dlp coding conventions](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#yt-dlp-coding-conventions)
- [x] [Searched](https://github.com/yt-dlp/yt-dlp/search?q=is%3Apr&type=Issues) the bugtracker for similar pull requests
- [x] Checked the code with [flake8](https://pypi.python.org/pypi/flake8) and [ran relevant tests](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#developer-instructions)
### In order to be accepted and merged into yt-dlp each piece of code must be in public domain or released under [Unlicense](http://unlicense.org/). Check all of the following options that apply:
- [x] I am the original author of this code and I am willing to release it under [Unlicense](http://unlicense.org/)
- [ ] I am not the original author of this code but it is in public domain or released under [Unlicense](http://unlicense.org/) (provide reliable evidence)
### What is the purpose of your *pull request*?
- [x] Fix or improvement to an extractor (Make sure to add/update tests)
- [ ] New extractor ([Piracy websites will not be accepted](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#is-the-website-primarily-used-for-piracy))
- [ ] Core bug fix/improvement
- [ ] New feature (It is strongly [recommended to open an issue first](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#adding-new-feature-or-making-overarching-changes))
</details>
| https://api.github.com/repos/yt-dlp/yt-dlp/pulls/6817 | 2023-04-15T09:06:12Z | 2023-04-16T17:20:10Z | 2023-04-16T17:20:10Z | 2023-04-16T17:29:48Z | 549 | yt-dlp/yt-dlp | 7,726 |
[clipfish] update test cases | diff --git a/youtube_dl/extractor/clipfish.py b/youtube_dl/extractor/clipfish.py
index bb52e0c6ff7..0920f6219e1 100644
--- a/youtube_dl/extractor/clipfish.py
+++ b/youtube_dl/extractor/clipfish.py
@@ -12,7 +12,7 @@ class ClipfishIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?clipfish\.de/(?:[^/]+/)+video/(?P<id>[0-9]+)'
_TEST = {
'url': 'http://www.clipfish.de/special/ugly-americans/video/4343170/s01-e01-ugly-americans-date-in-der-hoelle/',
- 'md5': '720563e467b86374c194bdead08d207d',
+ 'md5': 'b9a5dc46294154c1193e2d10e0c95693',
'info_dict': {
'id': '4343170',
'ext': 'mp4',
| ## Please follow the guide below
- You will be asked some questions, please read them **carefully** and answer honestly
- Put an `x` into all the boxes [ ] relevant to your *pull request* (like that [x])
- Use *Preview* tab to see how your *pull request* will actually look like
---
### Before submitting a *pull request* make sure you have:
- [x] At least skimmed through [adding new extractor tutorial](https://github.com/rg3/youtube-dl#adding-support-for-a-new-site) and [youtube-dl coding conventions](https://github.com/rg3/youtube-dl#youtube-dl-coding-conventions) sections
- [x] [Searched](https://github.com/rg3/youtube-dl/search?q=is%3Apr&type=Issues) the bugtracker for similar pull requests
### In order to be accepted and merged into youtube-dl each piece of code must be in public domain or released under [Unlicense](http://unlicense.org/). Check one of the following options:
- [x] I am the original author of this code and I am willing to release it under [Unlicense](http://unlicense.org/)
- [ ] I am not the original author of this code but it is in public domain or released under [Unlicense](http://unlicense.org/) (provide reliable evidence)
### What is the purpose of your *pull request*?
- [ ] Bug fix
- [x] Improvement
- [ ] New extractor
- [ ] New feature
---
### Description of your *pull request* and other information
Explanation of your *pull request* in arbitrary form goes here. Please make sure the description explains the purpose and effect of your *pull request* and is worded well enough to be understood. Provide as much context and examples as possible.
| https://api.github.com/repos/ytdl-org/youtube-dl/pulls/12865 | 2017-04-27T16:12:00Z | 2017-04-27T19:51:31Z | 2017-04-27T19:51:31Z | 2017-04-27T19:55:27Z | 249 | ytdl-org/youtube-dl | 50,454 |
`BaseTracer` helper method for `Run` lookup | diff --git a/libs/core/langchain_core/tracers/base.py b/libs/core/langchain_core/tracers/base.py
index c37aec08d211d5..db0301b2a1a5fd 100644
--- a/libs/core/langchain_core/tracers/base.py
+++ b/libs/core/langchain_core/tracers/base.py
@@ -92,6 +92,17 @@ def _get_execution_order(self, parent_run_id: Optional[str] = None) -> int:
return parent_run.child_execution_order + 1
+ def _get_run(self, run_id: UUID, run_type: str | None = None) -> Run:
+ try:
+ run = self.run_map[str(run_id)]
+ except KeyError as exc:
+ raise TracerException(f"No indexed run ID {run_id}.") from exc
+ if run_type is not None and run.run_type != run_type:
+ raise TracerException(
+ f"Found {run.run_type} run at ID {run_id}, but expected {run_type} run."
+ )
+ return run
+
def on_llm_start(
self,
serialized: Dict[str, Any],
@@ -138,13 +149,7 @@ def on_llm_new_token(
**kwargs: Any,
) -> Run:
"""Run on new LLM token. Only available when streaming is enabled."""
- if not run_id:
- raise TracerException("No run_id provided for on_llm_new_token callback.")
-
- run_id_ = str(run_id)
- llm_run = self.run_map.get(run_id_)
- if llm_run is None or llm_run.run_type != "llm":
- raise TracerException(f"No LLM Run found to be traced for {run_id}")
+ llm_run = self._get_run(run_id, run_type="llm")
event_kwargs: Dict[str, Any] = {"token": token}
if chunk:
event_kwargs["chunk"] = chunk
@@ -165,12 +170,7 @@ def on_retry(
run_id: UUID,
**kwargs: Any,
) -> Run:
- if not run_id:
- raise TracerException("No run_id provided for on_retry callback.")
- run_id_ = str(run_id)
- llm_run = self.run_map.get(run_id_)
- if llm_run is None:
- raise TracerException("No Run found to be traced for on_retry")
+ llm_run = self._get_run(run_id)
retry_d: Dict[str, Any] = {
"slept": retry_state.idle_for,
"attempt": retry_state.attempt_number,
@@ -196,13 +196,7 @@ def on_retry(
def on_llm_end(self, response: LLMResult, *, run_id: UUID, **kwargs: Any) -> Run:
"""End a trace for an LLM run."""
- if not run_id:
- raise TracerException("No run_id provided for on_llm_end callback.")
-
- run_id_ = str(run_id)
- llm_run = self.run_map.get(run_id_)
- if llm_run is None or llm_run.run_type != "llm":
- raise TracerException(f"No LLM Run found to be traced for {run_id}")
+ llm_run = self._get_run(run_id, run_type="llm")
llm_run.outputs = response.dict()
for i, generations in enumerate(response.generations):
for j, generation in enumerate(generations):
@@ -225,13 +219,7 @@ def on_llm_error(
**kwargs: Any,
) -> Run:
"""Handle an error for an LLM run."""
- if not run_id:
- raise TracerException("No run_id provided for on_llm_error callback.")
-
- run_id_ = str(run_id)
- llm_run = self.run_map.get(run_id_)
- if llm_run is None or llm_run.run_type != "llm":
- raise TracerException(f"No LLM Run found to be traced for {run_id}")
+ llm_run = self._get_run(run_id, run_type="llm")
llm_run.error = repr(error)
llm_run.end_time = datetime.utcnow()
llm_run.events.append({"name": "error", "time": llm_run.end_time})
@@ -286,12 +274,7 @@ def on_chain_end(
**kwargs: Any,
) -> Run:
"""End a trace for a chain run."""
- if not run_id:
- raise TracerException("No run_id provided for on_chain_end callback.")
- chain_run = self.run_map.get(str(run_id))
- if chain_run is None:
- raise TracerException(f"No chain Run found to be traced for {run_id}")
-
+ chain_run = self._get_run(run_id)
chain_run.outputs = (
outputs if isinstance(outputs, dict) else {"output": outputs}
)
@@ -312,12 +295,7 @@ def on_chain_error(
**kwargs: Any,
) -> Run:
"""Handle an error for a chain run."""
- if not run_id:
- raise TracerException("No run_id provided for on_chain_error callback.")
- chain_run = self.run_map.get(str(run_id))
- if chain_run is None:
- raise TracerException(f"No chain Run found to be traced for {run_id}")
-
+ chain_run = self._get_run(run_id)
chain_run.error = repr(error)
chain_run.end_time = datetime.utcnow()
chain_run.events.append({"name": "error", "time": chain_run.end_time})
@@ -366,12 +344,7 @@ def on_tool_start(
def on_tool_end(self, output: str, *, run_id: UUID, **kwargs: Any) -> Run:
"""End a trace for a tool run."""
- if not run_id:
- raise TracerException("No run_id provided for on_tool_end callback.")
- tool_run = self.run_map.get(str(run_id))
- if tool_run is None or tool_run.run_type != "tool":
- raise TracerException(f"No tool Run found to be traced for {run_id}")
-
+ tool_run = self._get_run(run_id, run_type="tool")
tool_run.outputs = {"output": output}
tool_run.end_time = datetime.utcnow()
tool_run.events.append({"name": "end", "time": tool_run.end_time})
@@ -387,12 +360,7 @@ def on_tool_error(
**kwargs: Any,
) -> Run:
"""Handle an error for a tool run."""
- if not run_id:
- raise TracerException("No run_id provided for on_tool_error callback.")
- tool_run = self.run_map.get(str(run_id))
- if tool_run is None or tool_run.run_type != "tool":
- raise TracerException(f"No tool Run found to be traced for {run_id}")
-
+ tool_run = self._get_run(run_id, run_type="tool")
tool_run.error = repr(error)
tool_run.end_time = datetime.utcnow()
tool_run.events.append({"name": "error", "time": tool_run.end_time})
@@ -445,12 +413,7 @@ def on_retriever_error(
**kwargs: Any,
) -> Run:
"""Run when Retriever errors."""
- if not run_id:
- raise TracerException("No run_id provided for on_retriever_error callback.")
- retrieval_run = self.run_map.get(str(run_id))
- if retrieval_run is None or retrieval_run.run_type != "retriever":
- raise TracerException(f"No retriever Run found to be traced for {run_id}")
-
+ retrieval_run = self._get_run(run_id, run_type="retriever")
retrieval_run.error = repr(error)
retrieval_run.end_time = datetime.utcnow()
retrieval_run.events.append({"name": "error", "time": retrieval_run.end_time})
@@ -462,11 +425,7 @@ def on_retriever_end(
self, documents: Sequence[Document], *, run_id: UUID, **kwargs: Any
) -> Run:
"""Run when Retriever ends running."""
- if not run_id:
- raise TracerException("No run_id provided for on_retriever_end callback.")
- retrieval_run = self.run_map.get(str(run_id))
- if retrieval_run is None or retrieval_run.run_type != "retriever":
- raise TracerException(f"No retriever Run found to be traced for {run_id}")
+ retrieval_run = self._get_run(run_id, run_type="retriever")
retrieval_run.outputs = {"documents": documents}
retrieval_run.end_time = datetime.utcnow()
retrieval_run.events.append({"name": "end", "time": retrieval_run.end_time})
| I observed the same run ID extraction logic is repeated many times in `BaseTracer`.
This PR creates a helper method for DRY code. | https://api.github.com/repos/langchain-ai/langchain/pulls/14139 | 2023-12-01T20:10:08Z | 2023-12-02T22:05:50Z | 2023-12-02T22:05:50Z | 2023-12-02T22:49:34Z | 1,943 | langchain-ai/langchain | 42,794 |
bitz XBT mapping | diff --git a/js/bitz.js b/js/bitz.js
index 079a17433d29..01d8b56be084 100644
--- a/js/bitz.js
+++ b/js/bitz.js
@@ -142,6 +142,7 @@ module.exports = class bitz extends Exchange {
// https://github.com/ccxt/ccxt/issues/3881
// https://support.bit-z.pro/hc/en-us/articles/360007500654-BOX-BOX-Token-
'BOX': 'BOX Token',
+ 'XBT': 'XBT',
'LEO': 'LeoCoin',
'XRB': 'NANO',
'PXC': 'Pixiecoin',
| https://www.bitz.ai/en/exchange/xbt_usdt
conflict with BTC | https://api.github.com/repos/ccxt/ccxt/pulls/9900 | 2021-08-30T16:13:52Z | 2021-08-30T16:37:12Z | 2021-08-30T16:37:12Z | 2021-08-30T16:37:12Z | 153 | ccxt/ccxt | 13,731 |
Use submit and blur for quick settings textbox | diff --git a/modules/ui_settings.py b/modules/ui_settings.py
index 0c560b30f9f..a6076bf3060 100644
--- a/modules/ui_settings.py
+++ b/modules/ui_settings.py
@@ -260,13 +260,20 @@ def add_functionality(self, demo):
component = self.component_dict[k]
info = opts.data_labels[k]
- change_handler = component.release if hasattr(component, 'release') else component.change
- change_handler(
- fn=lambda value, k=k: self.run_settings_single(value, key=k),
- inputs=[component],
- outputs=[component, self.text_settings],
- show_progress=info.refresh is not None,
- )
+ if isinstance(component, gr.Textbox):
+ methods = [component.submit, component.blur]
+ elif hasattr(component, 'release'):
+ methods = [component.release]
+ else:
+ methods = [component.change]
+
+ for method in methods:
+ method(
+ fn=lambda value, k=k: self.run_settings_single(value, key=k),
+ inputs=[component],
+ outputs=[component, self.text_settings],
+ show_progress=info.refresh is not None,
+ )
button_set_checkpoint = gr.Button('Change checkpoint', elem_id='change_checkpoint', visible=False)
button_set_checkpoint.click(
| ## Description
[[Bug]: Can't normal edit Directory name pattern after add it to Quicksettings list ! ](https://github.com/AUTOMATIC1111/stable-diffusion-webui/issues/11740)
when textbox is in quick settings fit one typing too fast can cause the textbox to glitch
the issue is caused by the settings update speed is unable to keep up with the typing speed
my solution
switch to `blur` `submit` https://www.gradio.app/docs/textbox
only update settings when textbook is released or when Enter is pressed
it was using `change` since `release` is not available
`submit` is a bit redundant, but I suppose someone could press `Enter` then press `Ctrl Enter` to generate something
unfortunately `submit` doesn't trigger on `control enter`
maybe a better solution could be achieved by buffer the input using JavaScript
but I am bad with JavaScript and I don't want to add any complications
## Screenshots/videos:
issue example in https://github.com/AUTOMATIC1111/stable-diffusion-webui/issues/11740
## Checklist:
- [x] I have read [contributing wiki page](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Contributing)
- [x] I have performed a self-review of my own code
- [x] My code follows the [style guidelines](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Contributing#code-style)
- [x] My code passes [tests](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Tests)
| https://api.github.com/repos/AUTOMATIC1111/stable-diffusion-webui/pulls/11750 | 2023-07-12T14:35:58Z | 2023-07-13T11:49:48Z | 2023-07-13T11:49:48Z | 2023-07-13T17:30:05Z | 294 | AUTOMATIC1111/stable-diffusion-webui | 40,507 |
Improve prompts-from-file script to support negative prompts and sampler-by-name | diff --git a/scripts/prompts_from_file.py b/scripts/prompts_from_file.py
index 32fe6bdbaf4..6e118ddb552 100644
--- a/scripts/prompts_from_file.py
+++ b/scripts/prompts_from_file.py
@@ -9,6 +9,7 @@
import modules.scripts as scripts
import gradio as gr
+from modules import sd_samplers
from modules.processing import Processed, process_images
from PIL import Image
from modules.shared import opts, cmd_opts, state
@@ -44,6 +45,7 @@ def process_boolean_tag(tag):
"seed_resize_from_h": process_int_tag,
"seed_resize_from_w": process_int_tag,
"sampler_index": process_int_tag,
+ "sampler_name": process_string_tag,
"batch_size": process_int_tag,
"n_iter": process_int_tag,
"steps": process_int_tag,
@@ -66,14 +68,28 @@ def cmdargs(line):
arg = args[pos]
assert arg.startswith("--"), f'must start with "--": {arg}'
+ assert pos+1 < len(args), f'missing argument for command line option {arg}'
+
tag = arg[2:]
+ if tag == "prompt" or tag == "negative_prompt":
+ pos += 1
+ prompt = args[pos]
+ pos += 1
+ while pos < len(args) and not args[pos].startswith("--"):
+ prompt += " "
+ prompt += args[pos]
+ pos += 1
+ res[tag] = prompt
+ continue
+
+
func = prompt_tags.get(tag, None)
assert func, f'unknown commandline option: {arg}'
- assert pos+1 < len(args), f'missing argument for command line option {arg}'
-
val = args[pos+1]
+ if tag == "sampler_name":
+ val = sd_samplers.samplers_map.get(val.lower(), None)
res[tag] = func(val)
| I updated the prompt-from-file script so that you can add custom prompts and custom negative prompts when also using other flags. I also added support for specifying a sampler by name rather than by index.
This significantly increases the flexibility of the script for power users. To the best of my knowledge, this does not break compatibility with existing text files. All existing text files should continue to work as expected after this change. | https://api.github.com/repos/AUTOMATIC1111/stable-diffusion-webui/pulls/5699 | 2022-12-13T17:08:13Z | 2022-12-24T08:16:09Z | 2022-12-24T08:16:09Z | 2023-10-01T18:55:35Z | 454 | AUTOMATIC1111/stable-diffusion-webui | 39,876 |
get_handler_file_from_name when path contains periods | diff --git a/localstack/services/awslambda/lambda_utils.py b/localstack/services/awslambda/lambda_utils.py
index aafd9878746da..0d47456700305 100644
--- a/localstack/services/awslambda/lambda_utils.py
+++ b/localstack/services/awslambda/lambda_utils.py
@@ -128,24 +128,35 @@ def is_provided_runtime(runtime_details: Union[LambdaFunction, str]) -> bool:
return runtime.startswith("provided")
+def format_name_to_path(handler_name: str, delimiter: str, extension: str):
+ file_path = handler_name.rpartition(delimiter)[0]
+ if delimiter == ":":
+ file_path = file_path.split(delimiter)[0]
+
+ if os.path.sep not in file_path:
+ file_path = file_path.replace(".", os.path.sep)
+
+ if file_path.startswith(f".{os.path.sep}"):
+ file_path = file_path[2:]
+
+ return f"{file_path}{extension}"
+
+
def get_handler_file_from_name(handler_name: str, runtime: str = None):
runtime = runtime or LAMBDA_DEFAULT_RUNTIME
+
if runtime.startswith(LAMBDA_RUNTIME_PROVIDED):
return "bootstrap"
- delimiter = "."
if runtime.startswith(LAMBDA_RUNTIME_NODEJS):
- file_ext = ".js"
- elif runtime.startswith(LAMBDA_RUNTIME_GOLANG):
- file_ext = ""
- elif runtime.startswith(tuple(DOTNET_LAMBDA_RUNTIMES)):
- file_ext = ".dll"
- delimiter = ":"
- elif runtime.startswith(LAMBDA_RUNTIME_RUBY):
- file_ext = ".rb"
- else:
- handler_name = handler_name.rpartition(delimiter)[0].replace(delimiter, os.path.sep)
- file_ext = ".py"
- return "%s%s" % (handler_name.split(delimiter)[0], file_ext)
+ return format_name_to_path(handler_name, ".", ".js")
+ if runtime.startswith(LAMBDA_RUNTIME_GOLANG):
+ return format_name_to_path(handler_name, ".", "")
+ if runtime.startswith(tuple(DOTNET_LAMBDA_RUNTIMES)):
+ return format_name_to_path(handler_name, ":", ".dll")
+ if runtime.startswith(LAMBDA_RUNTIME_RUBY):
+ return format_name_to_path(handler_name, ".", ".rb")
+
+ return format_name_to_path(handler_name, ".", ".py")
def is_java_lambda(lambda_details):
diff --git a/tests/unit/services/awslambda/__init__.py b/tests/unit/services/awslambda/__init__.py
new file mode 100644
index 0000000000000..e69de29bb2d1d
diff --git a/tests/unit/services/awslambda/test_lambda_utils.py b/tests/unit/services/awslambda/test_lambda_utils.py
new file mode 100644
index 0000000000000..73c4ecb0fae35
--- /dev/null
+++ b/tests/unit/services/awslambda/test_lambda_utils.py
@@ -0,0 +1,43 @@
+from localstack.services.awslambda.lambda_utils import (
+ LAMBDA_RUNTIME_DOTNETCORE31,
+ LAMBDA_RUNTIME_GOLANG,
+ LAMBDA_RUNTIME_NODEJS,
+ LAMBDA_RUNTIME_PROVIDED,
+ LAMBDA_RUNTIME_RUBY,
+ format_name_to_path,
+ get_handler_file_from_name,
+)
+
+
+class TestLambdaUtils:
+ def test_format_name_to_path(self):
+ assert ".build/handler.js" == format_name_to_path(".build/handler.execute", ".", ".js")
+ assert "handler" == format_name_to_path("handler.execute", ".", "")
+ assert "CSharpHandlers.dll" == format_name_to_path(
+ "./CSharpHandlers::AwsDotnetCsharp.Handler::CreateProfileAsync",
+ ":",
+ ".dll",
+ )
+ assert "test/handler.rb" == format_name_to_path("test.handler.execute", ".", ".rb")
+ assert "test.handler.py" == format_name_to_path("./test.handler.execute", ".", ".py")
+ assert "../handler.js" == format_name_to_path("../handler.execute", ".", ".js")
+
+ def test_get_handler_file_from_name(self):
+ assert ".build/handler.js" == get_handler_file_from_name(
+ ".build/handler.execute", LAMBDA_RUNTIME_NODEJS
+ )
+ assert ".build/handler" == get_handler_file_from_name(
+ "./.build/handler.execute", LAMBDA_RUNTIME_GOLANG
+ )
+ assert "CSharpHandlers.dll" == get_handler_file_from_name(
+ "./CSharpHandlers::AwsDotnetCsharp.Handler::CreateProfileAsync",
+ LAMBDA_RUNTIME_DOTNETCORE31,
+ )
+ assert "test/handler.rb" == get_handler_file_from_name(
+ "test.handler.execute", LAMBDA_RUNTIME_RUBY
+ )
+ assert "test.handler" == get_handler_file_from_name(
+ "./test.handler.execute", LAMBDA_RUNTIME_GOLANG
+ )
+ assert "../handler.py" == get_handler_file_from_name("../handler.execute")
+ assert "bootstrap" == get_handler_file_from_name("", LAMBDA_RUNTIME_PROVIDED)
| Fixes for get_handler_file_from_name, fixing incorrectly returning '.js' from '.build/handler.execute' (Typescript).
Revisiting #1774 / #1775
Also appears related to both #5485 and #3969 with '.webpack/service/first.hello'
This appears to have either regressed or not been fixed, due to `handler_name.split(delimiter)[0]` will return '.js' from '.build/handler.execute' instead of '.build/handler.js'
Have added in a few tests to with my assumptions on how it's currently working.
Please do let me know if any of these assumptions are incorrect, or anything else for that matter and I'll update the PR.
Thanks,
Matt | https://api.github.com/repos/localstack/localstack/pulls/5849 | 2022-04-12T16:26:52Z | 2022-04-19T19:28:42Z | 2022-04-19T19:28:41Z | 2022-04-19T20:29:32Z | 1,151 | localstack/localstack | 28,753 |
add API from asterank.com for minor planets | diff --git a/README.md b/README.md
index b7cdd386aa..59c8bc5a91 100644
--- a/README.md
+++ b/README.md
@@ -128,6 +128,11 @@ A collective list of JSON APIs for use in web development.
| Drupal.org | Drupal.org API | No | [Go!](https://www.drupal.org/drupalorg/api) |
| Libraries.io | Open source software libraries | No | [Go!](https://libraries.io/api) |
+### Planets
+| API | Description | OAuth |Link |
+|---|---|---|---|
+| Minor Planet Center | Asterank.com API | No | [Go!](http://www.asterank.com/mpc) |
+
### Security
| API | Description | OAuth |Link |
| asterank.com offers a couple of neat api's. One of the most interesting is the Minor Planets which I added to your list.
| https://api.github.com/repos/public-apis/public-apis/pulls/175 | 2016-04-24T10:20:02Z | 2016-04-24T12:29:49Z | 2016-04-24T12:29:49Z | 2016-04-24T12:29:49Z | 181 | public-apis/public-apis | 36,172 |
Support MPS backend for MacOS devices | diff --git a/README.md b/README.md
index d0f438ea5e..9c805fa8e1 100644
--- a/README.md
+++ b/README.md
@@ -90,6 +90,11 @@ This runs on the CPU only and does not require GPU. It requires around 60GB of C
python3 -m fastchat.serve.cli --model-name /path/to/vicuna/weights --device cpu
```
+### Metal Backend (Mac computers with Apple silicon or AMD GPUs)
+```
+python3 -m fastchat.serve.cli --model-name /path/to/vicuna/weights --device mps
+```
+
### Others (Quantization, Low-end Devices, and More Platforms)
You can load in 8-bit mode to reduce GPU memory usage with slightly degraded model quality.
diff --git a/fastchat/serve/cli.py b/fastchat/serve/cli.py
index 1b09d2d3df..a317e19a53 100644
--- a/fastchat/serve/cli.py
+++ b/fastchat/serve/cli.py
@@ -9,10 +9,13 @@
from transformers import AutoTokenizer, AutoModelForCausalLM, LlamaTokenizer
from fastchat.conversation import conv_templates, SeparatorStyle
+from fastchat.serve.monkey_patch_non_inplace import replace_llama_attn_with_non_inplace_operations
def load_model(model_name, device, num_gpus, load_8bit=False):
- if device == "cuda":
+ if device == "cpu":
+ kwargs = {}
+ elif device == "cuda":
kwargs = {"torch_dtype": torch.float16}
if load_8bit:
if num_gpus != "auto" and int(num_gpus) != 1:
@@ -28,18 +31,22 @@ def load_model(model_name, device, num_gpus, load_8bit=False):
"device_map": "auto",
"max_memory": {i: "13GiB" for i in range(num_gpus)},
})
- elif device == "cpu":
- kwargs = {}
+ elif device == "mps":
+ # Avoid bugs in mps backend by not using in-place operations.
+ kwargs = {"torch_dtype": torch.float16}
+ replace_llama_attn_with_non_inplace_operations()
else:
raise ValueError(f"Invalid device: {device}")
- tokenizer = AutoTokenizer.from_pretrained(model_name)
+ tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=False)
model = AutoModelForCausalLM.from_pretrained(model_name,
low_cpu_mem_usage=True, **kwargs)
# calling model.cuda() mess up weights if loading 8-bit weights
if device == "cuda" and num_gpus == 1 and not load_8bit:
- model.cuda()
+ model.to("cuda")
+ elif device == "mps":
+ model.to("mps")
return model, tokenizer
@@ -78,6 +85,11 @@ def generate_stream(tokenizer, model, params, device,
past_key_values = out.past_key_values
last_token_logits = logits[0][-1]
+
+ if device == "mps":
+ # Switch to CPU by avoiding some bugs in mps backend.
+ last_token_logits = last_token_logits.float().to("cpu")
+
if temperature < 1e-4:
token = int(torch.argmax(last_token_logits))
else:
@@ -155,7 +167,7 @@ def main(args):
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--model-name", type=str, default="facebook/opt-350m")
- parser.add_argument("--device", type=str, choices=["cuda", "cpu"], default="cuda")
+ parser.add_argument("--device", type=str, choices=["cpu", "cuda", "mps"], default="cuda")
parser.add_argument("--num-gpus", type=str, default="1")
parser.add_argument("--load-8bit", action="store_true")
parser.add_argument("--conv-template", type=str, default="v1")
diff --git a/fastchat/serve/monkey_patch_non_inplace.py b/fastchat/serve/monkey_patch_non_inplace.py
new file mode 100644
index 0000000000..23a2c13c17
--- /dev/null
+++ b/fastchat/serve/monkey_patch_non_inplace.py
@@ -0,0 +1,98 @@
+"""
+Monkey patch the llama implementation in the huggingface/transformers library.
+Avoid bugs in mps backend by not using in-place operations.
+"""
+import math
+from typing import List, Optional, Tuple
+
+import torch
+from torch import nn
+import transformers
+
+
+def rotate_half(x):
+ """Rotates half the hidden dims of the input."""
+ x1 = x[..., : x.shape[-1] // 2].clone()
+ x2 = x[..., x.shape[-1] // 2 :].clone()
+ return torch.cat((-x2, x1), dim=-1)
+
+
+def apply_rotary_pos_emb(q, k, cos, sin, position_ids):
+ gather_indices = position_ids[:, None, :, None] # [bs, 1, seq_len, 1]
+ gather_indices = gather_indices.repeat(1, cos.shape[1], 1, cos.shape[3])
+ cos = torch.gather(cos.repeat(gather_indices.shape[0], 1, 1, 1), 2, gather_indices)
+ sin = torch.gather(sin.repeat(gather_indices.shape[0], 1, 1, 1), 2, gather_indices)
+ q_embed = (q * cos) + (rotate_half(q) * sin)
+ k_embed = (k * cos) + (rotate_half(k) * sin)
+ return q_embed, k_embed
+
+
+def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
+ output_attentions: bool = False,
+ use_cache: bool = False,
+) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
+ bsz, q_len, _ = hidden_states.size()
+
+ query_states = self.q_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
+ key_states = self.k_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
+ value_states = self.v_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
+
+ kv_seq_len = key_states.shape[-2]
+ if past_key_value is not None:
+ kv_seq_len += past_key_value[0].shape[-2]
+ cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
+ # [bsz, nh, t, hd]
+
+ if past_key_value is not None:
+ # reuse k, v, self_attention
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
+
+ past_key_value = (key_states, value_states) if use_cache else None
+
+ attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
+
+ if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
+ raise ValueError(
+ f"Attention weights should be of size {(bsz * self.num_heads, q_len, kv_seq_len)}, but is"
+ f" {attn_weights.size()}"
+ )
+
+ if attention_mask is not None:
+ if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
+ raise ValueError(
+ f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
+ )
+ attn_weights = attn_weights + attention_mask
+ attn_weights = torch.max(attn_weights, torch.tensor(torch.finfo(attn_weights.dtype).min))
+
+ # upcast attention to fp32
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
+ attn_output = torch.matmul(attn_weights, value_states)
+
+ if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
+ raise ValueError(
+ f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
+ f" {attn_output.size()}"
+ )
+
+ attn_output = attn_output.transpose(1, 2)
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
+
+ attn_output = self.o_proj(attn_output)
+
+ if not output_attentions:
+ attn_weights = None
+
+ return attn_output, attn_weights, past_key_value
+
+
+def replace_llama_attn_with_non_inplace_operations():
+ """Avoid bugs in mps backend by not using in-place operations."""
+ transformers.models.llama.modeling_llama.LlamaAttention.forward = forward
diff --git a/pyproject.toml b/pyproject.toml
index fcd13b23bb..32172e4dab 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
[project]
name = "fschat"
-version = "0.1.4"
+version = "0.1.5"
description = "An open platform for training, serving, and evaluating large language model based chatbots."
readme = "README.md"
requires-python = ">=3.8"
@@ -14,7 +14,7 @@ classifiers = [
]
dependencies = [
"accelerate", "fastapi", "gradio==3.23", "markdown2[all]", "numpy",
- "requests", "sentencepiece", "tokenizers==0.12.1",
+ "requests", "sentencepiece", "tokenizers>=0.12.1",
"torch", "uvicorn", "wandb",
"transformers @ git+https://github.com/huggingface/transformers.git"
]
| https://api.github.com/repos/lm-sys/FastChat/pulls/250 | 2023-04-06T14:29:10Z | 2023-04-06T14:42:42Z | 2023-04-06T14:42:42Z | 2023-06-29T11:48:26Z | 2,384 | lm-sys/FastChat | 41,381 | |
Force nginx tests to run during CI | diff --git a/tests/boulder-integration.sh b/tests/boulder-integration.sh
index 08c4826768a..d86a6fb8c78 100755
--- a/tests/boulder-integration.sh
+++ b/tests/boulder-integration.sh
@@ -203,7 +203,9 @@ common revoke --cert-path "$root/conf/live/le2.wtf/cert.pem" \
common unregister
-if type nginx;
+# Most CI systems set this variable to true.
+# If the tests are running as part of CI, Nginx should be available.
+if ${CI:-false} || type nginx;
then
. ./certbot-nginx/tests/boulder-integration.sh
fi
| If for some reason our Nginx set up gets broken in Travis, our tests will currently silently pass without making us aware of the issue. Let's fix this!
[Travis source](https://docs.travis-ci.com/user/environment-variables/#Default-Environment-Variables)
[Circle source](https://circleci.com/docs/1.0/environment-variables/) | https://api.github.com/repos/certbot/certbot/pulls/4558 | 2017-04-27T21:18:25Z | 2017-05-16T19:19:08Z | 2017-05-16T19:19:08Z | 2017-05-16T19:19:12Z | 152 | certbot/certbot | 3,633 |
Fix syntax error | diff --git a/requests/auth.py b/requests/auth.py
index 4529ec7aac..edf4c8dcd7 100644
--- a/requests/auth.py
+++ b/requests/auth.py
@@ -51,7 +51,7 @@ def __eq__(self, other):
return all([
self.username == getattr(other, 'username', None),
self.password == getattr(other, 'password', None)
- ]
+ ])
def __ne__(self, other):
return not self == other
@@ -235,7 +235,7 @@ def __eq__(self, other):
return all([
self.username == getattr(other, 'username', None),
self.password == getattr(other, 'password', None)
- ]
+ ])
def __ne__(self, other):
return not self == other
| https://api.github.com/repos/psf/requests/pulls/2986 | 2016-01-30T18:56:10Z | 2016-01-30T19:14:20Z | 2016-01-30T19:14:20Z | 2021-09-08T05:00:59Z | 187 | psf/requests | 32,437 | |
FIX: localstack is running inside the docker config flag | diff --git a/localstack/config.py b/localstack/config.py
index ba5f00842cfb1..b8ae3a032b786 100644
--- a/localstack/config.py
+++ b/localstack/config.py
@@ -245,11 +245,28 @@ def ping(host):
def in_docker():
- """ Returns True if running in a docker container, else False """
+ """
+ Returns True if running in a docker container, else False
+ Ref. https://docs.docker.com/config/containers/runmetrics/#control-groups
+ """
if not os.path.exists('/proc/1/cgroup'):
return False
+ try:
+ if any([
+ os.path.exists('/sys/fs/cgroup/memory/docker/'),
+ any(['docker-' in file_names for file_names in os.listdir('/sys/fs/cgroup/memory/system.slice')]),
+ os.path.exists('/sys/fs/cgroup/docker/'),
+ any(['docker-' in file_names for file_names in os.listdir('/sys/fs/cgroup/system.slice/')]),
+ ]):
+ return False
+ except Exception:
+ pass
with open('/proc/1/cgroup', 'rt') as ifh:
- return 'docker' in ifh.read()
+ os_hostname = open('/etc/hostname', 'rt').read().strip()
+ content = ifh.read()
+ if os_hostname in content or 'docker' in content:
+ return True
+ return False
is_in_docker = in_docker()
| Added mechanism to check whether localstack instance is running inside the docker or on the host. It also will support cgroup v2.
Fix for #3675 | https://api.github.com/repos/localstack/localstack/pulls/3717 | 2021-03-13T09:37:03Z | 2021-03-14T11:45:22Z | 2021-03-14T11:45:22Z | 2021-03-14T11:45:22Z | 332 | localstack/localstack | 29,364 |
Use region-agnostic bucket create for Transcribe tests | diff --git a/tests/aws/services/transcribe/test_transcribe.py b/tests/aws/services/transcribe/test_transcribe.py
index 2d9d49ace0176..6f25632c9e81d 100644
--- a/tests/aws/services/transcribe/test_transcribe.py
+++ b/tests/aws/services/transcribe/test_transcribe.py
@@ -241,6 +241,7 @@ def test_transcribe_start_job(
output_bucket,
output_key,
s3_bucket,
+ s3_create_bucket,
cleanups,
snapshot,
aws_client,
@@ -263,7 +264,7 @@ def _cleanup():
if output_bucket is not None:
params["OutputBucketName"] = output_bucket
- aws_client.s3.create_bucket(Bucket=output_bucket)
+ s3_create_bucket(Bucket=output_bucket)
cleanups.append(_cleanup)
if output_key is not None:
params["OutputKey"] = output_key
| ## Motivation
This PR fixes test execution error when a non-default `TEST_AWS_REGION_NAME` is used.
This happens because S3 CreateBucket requires `LocationConstraint` information when any region other than `us-east-1` is used. Our `s3_create_bucket` fixture takes care of such situations.
## Implementation
Just use the `s3_create_bucket` fixture to create the S3 bucket instead of the direct S3 CreateBucket. | https://api.github.com/repos/localstack/localstack/pulls/9318 | 2023-10-09T13:14:20Z | 2023-10-10T09:13:51Z | 2023-10-10T09:13:51Z | 2023-10-10T09:13:55Z | 204 | localstack/localstack | 29,323 |
#921: Try printing alias before trying to fix a command | diff --git a/thefuck/entrypoints/main.py b/thefuck/entrypoints/main.py
index 013468702..865b9ca1a 100644
--- a/thefuck/entrypoints/main.py
+++ b/thefuck/entrypoints/main.py
@@ -22,10 +22,13 @@ def main():
elif known_args.version:
logs.version(get_installation_info().version,
sys.version.split()[0], shell.info())
- elif known_args.command or 'TF_HISTORY' in os.environ:
- fix_command(known_args)
+ # It's important to check if an alias is being requested before checking if
+ # `TF_HISTORY` is in `os.environ`, otherwise it might mess with subshells.
+ # Check https://github.com/nvbn/thefuck/issues/921 for reference
elif known_args.alias:
print_alias(known_args)
+ elif known_args.command or 'TF_HISTORY' in os.environ:
+ fix_command(known_args)
elif known_args.shell_logger:
try:
from .shell_logger import shell_logger # noqa: E402
| The shell functions could be changed in a way to unset `TF_HISTORY` before firing the subprocess. But it would be prone to regressions. So, changing the order of `if` statements and adding a comment should suffice. | https://api.github.com/repos/nvbn/thefuck/pulls/923 | 2019-06-04T18:35:46Z | 2019-06-26T18:01:39Z | 2019-06-26T18:01:39Z | 2019-06-26T18:38:50Z | 244 | nvbn/thefuck | 30,625 |
Add entity name translations to AVM Fritz!SmartHome | diff --git a/homeassistant/components/fritzbox/__init__.py b/homeassistant/components/fritzbox/__init__.py
index 38f0e375e874d5..bd246dd914fc53 100644
--- a/homeassistant/components/fritzbox/__init__.py
+++ b/homeassistant/components/fritzbox/__init__.py
@@ -113,8 +113,8 @@ def __init__(
self.ain = ain
if entity_description is not None:
+ self._attr_has_entity_name = True
self.entity_description = entity_description
- self._attr_name = f"{self.data.name} {entity_description.name}"
self._attr_unique_id = f"{ain}_{entity_description.key}"
else:
self._attr_name = self.data.name
diff --git a/homeassistant/components/fritzbox/binary_sensor.py b/homeassistant/components/fritzbox/binary_sensor.py
index 35fd41240dfc76..f87beb34079c9e 100644
--- a/homeassistant/components/fritzbox/binary_sensor.py
+++ b/homeassistant/components/fritzbox/binary_sensor.py
@@ -40,14 +40,14 @@ class FritzBinarySensorEntityDescription(
BINARY_SENSOR_TYPES: Final[tuple[FritzBinarySensorEntityDescription, ...]] = (
FritzBinarySensorEntityDescription(
key="alarm",
- name="Alarm",
+ translation_key="alarm",
device_class=BinarySensorDeviceClass.WINDOW,
suitable=lambda device: device.has_alarm, # type: ignore[no-any-return]
is_on=lambda device: device.alert_state, # type: ignore[no-any-return]
),
FritzBinarySensorEntityDescription(
key="lock",
- name="Button Lock on Device",
+ translation_key="lock",
device_class=BinarySensorDeviceClass.LOCK,
entity_category=EntityCategory.CONFIG,
suitable=lambda device: device.lock is not None,
@@ -55,7 +55,7 @@ class FritzBinarySensorEntityDescription(
),
FritzBinarySensorEntityDescription(
key="device_lock",
- name="Button Lock via UI",
+ translation_key="device_lock",
device_class=BinarySensorDeviceClass.LOCK,
entity_category=EntityCategory.CONFIG,
suitable=lambda device: device.device_lock is not None,
@@ -87,17 +87,6 @@ class FritzboxBinarySensor(FritzBoxDeviceEntity, BinarySensorEntity):
entity_description: FritzBinarySensorEntityDescription
- def __init__(
- self,
- coordinator: FritzboxDataUpdateCoordinator,
- ain: str,
- entity_description: FritzBinarySensorEntityDescription,
- ) -> None:
- """Initialize the FritzBox entity."""
- super().__init__(coordinator, ain, entity_description)
- self._attr_name = f"{self.data.name} {entity_description.name}"
- self._attr_unique_id = f"{ain}_{entity_description.key}"
-
@property
def is_on(self) -> bool | None:
"""Return true if sensor is on."""
diff --git a/homeassistant/components/fritzbox/sensor.py b/homeassistant/components/fritzbox/sensor.py
index a048a7bba540b9..33b4b8d5152713 100644
--- a/homeassistant/components/fritzbox/sensor.py
+++ b/homeassistant/components/fritzbox/sensor.py
@@ -91,7 +91,7 @@ def value_scheduled_preset(device: FritzhomeDevice) -> str:
SENSOR_TYPES: Final[tuple[FritzSensorEntityDescription, ...]] = (
FritzSensorEntityDescription(
key="temperature",
- name="Temperature",
+ translation_key="temperature",
native_unit_of_measurement=UnitOfTemperature.CELSIUS,
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
@@ -101,7 +101,7 @@ def value_scheduled_preset(device: FritzhomeDevice) -> str:
),
FritzSensorEntityDescription(
key="humidity",
- name="Humidity",
+ translation_key="humidity",
native_unit_of_measurement=PERCENTAGE,
device_class=SensorDeviceClass.HUMIDITY,
state_class=SensorStateClass.MEASUREMENT,
@@ -110,7 +110,7 @@ def value_scheduled_preset(device: FritzhomeDevice) -> str:
),
FritzSensorEntityDescription(
key="battery",
- name="Battery",
+ translation_key="battery",
native_unit_of_measurement=PERCENTAGE,
device_class=SensorDeviceClass.BATTERY,
entity_category=EntityCategory.DIAGNOSTIC,
@@ -119,7 +119,7 @@ def value_scheduled_preset(device: FritzhomeDevice) -> str:
),
FritzSensorEntityDescription(
key="power_consumption",
- name="Power Consumption",
+ translation_key="power_consumption",
native_unit_of_measurement=UnitOfPower.WATT,
device_class=SensorDeviceClass.POWER,
state_class=SensorStateClass.MEASUREMENT,
@@ -128,7 +128,7 @@ def value_scheduled_preset(device: FritzhomeDevice) -> str:
),
FritzSensorEntityDescription(
key="voltage",
- name="Voltage",
+ translation_key="voltage",
native_unit_of_measurement=UnitOfElectricPotential.VOLT,
device_class=SensorDeviceClass.VOLTAGE,
state_class=SensorStateClass.MEASUREMENT,
@@ -137,7 +137,7 @@ def value_scheduled_preset(device: FritzhomeDevice) -> str:
),
FritzSensorEntityDescription(
key="electric_current",
- name="Electric Current",
+ translation_key="electric_current",
native_unit_of_measurement=UnitOfElectricCurrent.AMPERE,
device_class=SensorDeviceClass.CURRENT,
state_class=SensorStateClass.MEASUREMENT,
@@ -146,7 +146,7 @@ def value_scheduled_preset(device: FritzhomeDevice) -> str:
),
FritzSensorEntityDescription(
key="total_energy",
- name="Total Energy",
+ translation_key="total_energy",
native_unit_of_measurement=UnitOfEnergy.KILO_WATT_HOUR,
device_class=SensorDeviceClass.ENERGY,
state_class=SensorStateClass.TOTAL_INCREASING,
@@ -156,7 +156,7 @@ def value_scheduled_preset(device: FritzhomeDevice) -> str:
# Thermostat Sensors
FritzSensorEntityDescription(
key="comfort_temperature",
- name="Comfort Temperature",
+ translation_key="comfort_temperature",
native_unit_of_measurement=UnitOfTemperature.CELSIUS,
device_class=SensorDeviceClass.TEMPERATURE,
suitable=suitable_comfort_temperature,
@@ -164,7 +164,7 @@ def value_scheduled_preset(device: FritzhomeDevice) -> str:
),
FritzSensorEntityDescription(
key="eco_temperature",
- name="Eco Temperature",
+ translation_key="eco_temperature",
native_unit_of_measurement=UnitOfTemperature.CELSIUS,
device_class=SensorDeviceClass.TEMPERATURE,
suitable=suitable_eco_temperature,
@@ -172,7 +172,7 @@ def value_scheduled_preset(device: FritzhomeDevice) -> str:
),
FritzSensorEntityDescription(
key="nextchange_temperature",
- name="Next Scheduled Temperature",
+ translation_key="nextchange_temperature",
native_unit_of_measurement=UnitOfTemperature.CELSIUS,
device_class=SensorDeviceClass.TEMPERATURE,
suitable=suitable_nextchange_temperature,
@@ -180,20 +180,20 @@ def value_scheduled_preset(device: FritzhomeDevice) -> str:
),
FritzSensorEntityDescription(
key="nextchange_time",
- name="Next Scheduled Change Time",
+ translation_key="nextchange_time",
device_class=SensorDeviceClass.TIMESTAMP,
suitable=suitable_nextchange_time,
native_value=lambda device: utc_from_timestamp(device.nextchange_endperiod),
),
FritzSensorEntityDescription(
key="nextchange_preset",
- name="Next Scheduled Preset",
+ translation_key="nextchange_preset",
suitable=suitable_nextchange_temperature,
native_value=value_nextchange_preset,
),
FritzSensorEntityDescription(
key="scheduled_preset",
- name="Current Scheduled Preset",
+ translation_key="scheduled_preset",
suitable=suitable_nextchange_temperature,
native_value=value_scheduled_preset,
),
diff --git a/homeassistant/components/fritzbox/strings.json b/homeassistant/components/fritzbox/strings.json
index 738c454e237dcb..0b4becd6ff7e49 100644
--- a/homeassistant/components/fritzbox/strings.json
+++ b/homeassistant/components/fritzbox/strings.json
@@ -36,5 +36,41 @@
"error": {
"invalid_auth": "[%key:common::config_flow::error::invalid_auth%]"
}
+ },
+ "entity": {
+ "binary_sensor": {
+ "alarm": { "name": "Alarm" },
+ "device_lock": { "name": "Button lock via UI" },
+ "lock": { "name": "Button lock on device" }
+ },
+ "sensor": {
+ "battery": {
+ "name": "[%key:component::sensor::entity_component::battery::name%]"
+ },
+ "comfort_temperature": { "name": "Comfort temperature" },
+ "eco_temperature": { "name": "Eco temperature" },
+ "electric_current": {
+ "name": "[%key:component::sensor::entity_component::current::name%]"
+ },
+ "humidity": {
+ "name": "[%key:component::sensor::entity_component::humidity::name%]"
+ },
+ "nextchange_preset": { "name": "Next scheduled preset" },
+ "nextchange_temperature": { "name": "Next scheduled temperature" },
+ "nextchange_time": { "name": "Next scheduled change time" },
+ "power_consumption": {
+ "name": "[%key:component::sensor::entity_component::power::name%]"
+ },
+ "scheduled_preset": { "name": "Current scheduled preset" },
+ "temperature": {
+ "name": "[%key:component::sensor::entity_component::temperature::name%]"
+ },
+ "total_energy": {
+ "name": "[%key:component::sensor::entity_component::energy::name%]"
+ },
+ "voltage": {
+ "name": "[%key:component::sensor::entity_component::voltage::name%]"
+ }
+ }
}
}
diff --git a/tests/components/fritzbox/test_binary_sensor.py b/tests/components/fritzbox/test_binary_sensor.py
index b230194356c077..ac6b702147a805 100644
--- a/tests/components/fritzbox/test_binary_sensor.py
+++ b/tests/components/fritzbox/test_binary_sensor.py
@@ -48,7 +48,7 @@ async def test_setup(hass: HomeAssistant, fritz: Mock) -> None:
assert state.state == STATE_OFF
assert (
state.attributes[ATTR_FRIENDLY_NAME]
- == f"{CONF_FAKE_NAME} Button Lock on Device"
+ == f"{CONF_FAKE_NAME} Button lock on device"
)
assert state.attributes[ATTR_DEVICE_CLASS] == BinarySensorDeviceClass.LOCK
assert ATTR_STATE_CLASS not in state.attributes
@@ -57,7 +57,7 @@ async def test_setup(hass: HomeAssistant, fritz: Mock) -> None:
assert state
assert state.state == STATE_OFF
assert (
- state.attributes[ATTR_FRIENDLY_NAME] == f"{CONF_FAKE_NAME} Button Lock via UI"
+ state.attributes[ATTR_FRIENDLY_NAME] == f"{CONF_FAKE_NAME} Button lock via UI"
)
assert state.attributes[ATTR_DEVICE_CLASS] == BinarySensorDeviceClass.LOCK
assert ATTR_STATE_CLASS not in state.attributes
diff --git a/tests/components/fritzbox/test_climate.py b/tests/components/fritzbox/test_climate.py
index 06c1c1cdaf1522..edfaf73e3b810a 100644
--- a/tests/components/fritzbox/test_climate.py
+++ b/tests/components/fritzbox/test_climate.py
@@ -85,7 +85,7 @@ async def test_setup(hass: HomeAssistant, fritz: Mock) -> None:
assert state
assert state.state == "22.0"
assert (
- state.attributes[ATTR_FRIENDLY_NAME] == f"{CONF_FAKE_NAME} Comfort Temperature"
+ state.attributes[ATTR_FRIENDLY_NAME] == f"{CONF_FAKE_NAME} Comfort temperature"
)
assert state.attributes[ATTR_UNIT_OF_MEASUREMENT] == UnitOfTemperature.CELSIUS
assert ATTR_STATE_CLASS not in state.attributes
@@ -93,7 +93,7 @@ async def test_setup(hass: HomeAssistant, fritz: Mock) -> None:
state = hass.states.get(f"{SENSOR_DOMAIN}.{CONF_FAKE_NAME}_eco_temperature")
assert state
assert state.state == "16.0"
- assert state.attributes[ATTR_FRIENDLY_NAME] == f"{CONF_FAKE_NAME} Eco Temperature"
+ assert state.attributes[ATTR_FRIENDLY_NAME] == f"{CONF_FAKE_NAME} Eco temperature"
assert state.attributes[ATTR_UNIT_OF_MEASUREMENT] == UnitOfTemperature.CELSIUS
assert ATTR_STATE_CLASS not in state.attributes
@@ -104,7 +104,7 @@ async def test_setup(hass: HomeAssistant, fritz: Mock) -> None:
assert state.state == "22.0"
assert (
state.attributes[ATTR_FRIENDLY_NAME]
- == f"{CONF_FAKE_NAME} Next Scheduled Temperature"
+ == f"{CONF_FAKE_NAME} Next scheduled temperature"
)
assert state.attributes[ATTR_UNIT_OF_MEASUREMENT] == UnitOfTemperature.CELSIUS
assert ATTR_STATE_CLASS not in state.attributes
@@ -116,7 +116,7 @@ async def test_setup(hass: HomeAssistant, fritz: Mock) -> None:
assert state.state == "1970-01-01T00:00:00+00:00"
assert (
state.attributes[ATTR_FRIENDLY_NAME]
- == f"{CONF_FAKE_NAME} Next Scheduled Change Time"
+ == f"{CONF_FAKE_NAME} Next scheduled change time"
)
assert ATTR_STATE_CLASS not in state.attributes
@@ -125,7 +125,7 @@ async def test_setup(hass: HomeAssistant, fritz: Mock) -> None:
assert state.state == PRESET_COMFORT
assert (
state.attributes[ATTR_FRIENDLY_NAME]
- == f"{CONF_FAKE_NAME} Next Scheduled Preset"
+ == f"{CONF_FAKE_NAME} Next scheduled preset"
)
assert ATTR_STATE_CLASS not in state.attributes
@@ -136,7 +136,7 @@ async def test_setup(hass: HomeAssistant, fritz: Mock) -> None:
assert state.state == PRESET_ECO
assert (
state.attributes[ATTR_FRIENDLY_NAME]
- == f"{CONF_FAKE_NAME} Current Scheduled Preset"
+ == f"{CONF_FAKE_NAME} Current scheduled preset"
)
assert ATTR_STATE_CLASS not in state.attributes
diff --git a/tests/components/fritzbox/test_switch.py b/tests/components/fritzbox/test_switch.py
index fdc3cbff2a55b5..17c51af9d987fc 100644
--- a/tests/components/fritzbox/test_switch.py
+++ b/tests/components/fritzbox/test_switch.py
@@ -62,16 +62,16 @@ async def test_setup(hass: HomeAssistant, fritz: Mock) -> None:
SensorStateClass.MEASUREMENT,
],
[
- f"{SENSOR_DOMAIN}.{CONF_FAKE_NAME}_power_consumption",
+ f"{SENSOR_DOMAIN}.{CONF_FAKE_NAME}_power",
"5.678",
- f"{CONF_FAKE_NAME} Power Consumption",
+ f"{CONF_FAKE_NAME} Power",
UnitOfPower.WATT,
SensorStateClass.MEASUREMENT,
],
[
- f"{SENSOR_DOMAIN}.{CONF_FAKE_NAME}_total_energy",
+ f"{SENSOR_DOMAIN}.{CONF_FAKE_NAME}_energy",
"1.234",
- f"{CONF_FAKE_NAME} Total Energy",
+ f"{CONF_FAKE_NAME} Energy",
UnitOfEnergy.KILO_WATT_HOUR,
SensorStateClass.TOTAL_INCREASING,
],
@@ -83,9 +83,9 @@ async def test_setup(hass: HomeAssistant, fritz: Mock) -> None:
SensorStateClass.MEASUREMENT,
],
[
- f"{SENSOR_DOMAIN}.{CONF_FAKE_NAME}_electric_current",
+ f"{SENSOR_DOMAIN}.{CONF_FAKE_NAME}_current",
"0.025",
- f"{CONF_FAKE_NAME} Electric Current",
+ f"{CONF_FAKE_NAME} Current",
UnitOfElectricCurrent.AMPERE,
SensorStateClass.MEASUREMENT,
],
| <!--
You are amazing! Thanks for contributing to our project!
Please, DO NOT DELETE ANY TEXT from this template! (unless instructed).
-->
## Proposed change
<!--
Describe the big picture of your changes here to communicate to the
maintainers why we should accept this pull request. If it fixes a bug
or resolves a feature request, be sure to link to that issue in the
additional information section.
-->
## Type of change
<!--
What type of change does your PR introduce to Home Assistant?
NOTE: Please, check only 1! box!
If your PR requires multiple boxes to be checked, you'll most likely need to
split it into multiple PRs. This makes things easier and faster to code review.
-->
- [ ] Dependency upgrade
- [ ] Bugfix (non-breaking change which fixes an issue)
- [ ] New integration (thank you!)
- [x] New feature (which adds functionality to an existing integration)
- [ ] Deprecation (breaking change to happen in the future)
- [ ] Breaking change (fix/feature causing existing functionality to break)
- [ ] Code quality improvements to existing code or addition of tests
## Additional information
<!--
Details are important, and help maintainers processing your PR.
Please be sure to fill out additional details, if applicable.
-->
- This PR fixes or closes issue: fixes #
- This PR is related to issue:
- Link to documentation pull request:
## Checklist
<!--
Put an `x` in the boxes that apply. You can also fill these out after
creating the PR. If you're unsure about any of them, don't hesitate to ask.
We're here to help! This is simply a reminder of what we are going to look
for before merging your code.
-->
- [x] The code change is tested and works locally.
- [x] Local tests pass. **Your PR cannot be merged unless tests pass**
- [x] There is no commented out code in this PR.
- [x] I have followed the [development checklist][dev-checklist]
- [x] I have followed the [perfect PR recommendations][perfect-pr]
- [x] The code has been formatted using Black (`black --fast homeassistant tests`)
- [x] Tests have been added to verify that the new code works.
If user exposed functionality or configuration variables are added/changed:
- [ ] Documentation added/updated for [www.home-assistant.io][docs-repository]
If the code communicates with devices, web services, or third-party tools:
- [ ] The [manifest file][manifest-docs] has all fields filled out correctly.
Updated and included derived files by running: `python3 -m script.hassfest`.
- [ ] New or updated dependencies have been added to `requirements_all.txt`.
Updated by running `python3 -m script.gen_requirements_all`.
- [ ] For the updated dependencies - a link to the changelog, or at minimum a diff between library versions is added to the PR description.
- [ ] Untested files have been added to `.coveragerc`.
<!--
This project is very active and we have a high turnover of pull requests.
Unfortunately, the number of incoming pull requests is higher than what our
reviewers can review and merge so there is a long backlog of pull requests
waiting for review. You can help here!
By reviewing another pull request, you will help raise the code quality of
that pull request and the final review will be faster. This way the general
pace of pull request reviews will go up and your wait time will go down.
When picking a pull request to review, try to choose one that hasn't yet
been reviewed.
Thanks for helping out!
-->
To help with the load of incoming pull requests:
- [ ] I have reviewed two other [open pull requests][prs] in this repository.
[prs]: https://github.com/home-assistant/core/pulls?q=is%3Aopen+is%3Apr+-author%3A%40me+-draft%3Atrue+-label%3Awaiting-for-upstream+sort%3Acreated-desc+review%3Anone+-status%3Afailure
<!--
Thank you for contributing <3
Below, some useful links you could explore:
-->
[dev-checklist]: https://developers.home-assistant.io/docs/en/development_checklist.html
[manifest-docs]: https://developers.home-assistant.io/docs/en/creating_integration_manifest.html
[quality-scale]: https://developers.home-assistant.io/docs/en/next/integration_quality_scale_index.html
[docs-repository]: https://github.com/home-assistant/home-assistant.io
[perfect-pr]: https://developers.home-assistant.io/docs/review-process/#creating-the-perfect-pr
| https://api.github.com/repos/home-assistant/core/pulls/90707 | 2023-04-03T10:17:15Z | 2023-04-03T17:04:09Z | 2023-04-03T17:04:09Z | 2023-04-04T21:01:54Z | 3,731 | home-assistant/core | 39,077 |
docs: replace redirection with tee command with sudo for file creation | diff --git a/docs/installation/methods.yml b/docs/installation/methods.yml
index 1cc09762b8..290b0e2436 100644
--- a/docs/installation/methods.yml
+++ b/docs/installation/methods.yml
@@ -39,7 +39,7 @@ tools:
install:
- curl -SsL https://packages.httpie.io/deb/KEY.gpg | sudo gpg --dearmor -o /usr/share/keyrings/httpie.gpg
# - curl -SsL -o /etc/apt/sources.list.d/httpie.list https://packages.httpie.io/deb/httpie.list
- - sudo echo "deb [arch=amd64 signed-by=/usr/share/keyrings/httpie.gpg] https://packages.httpie.io/deb ./" > /etc/apt/sources.list.d/httpie.list
+ - echo "deb [arch=amd64 signed-by=/usr/share/keyrings/httpie.gpg] https://packages.httpie.io/deb ./" | sudo tee /etc/apt/sources.list.d/httpie.list > /dev/null
- sudo apt update
- sudo apt install httpie
upgrade:
|
This pull request addresses an issue where using `sudo`-prefixed `echo` does not grant elevated permissions for the subsequent redirection operation, resulting in a failure to write files under `/etc/apt`.
By replacing the redirection with a `sudo`-prefixed `tee` command, this change ensures that files are written with root privileges, effectively resolving the permission issue. This modification is essential for operations requiring write access to protected directories. | https://api.github.com/repos/httpie/cli/pulls/1557 | 2024-02-07T11:58:52Z | 2024-03-04T14:34:57Z | 2024-03-04T14:34:57Z | 2024-03-04T14:35:08Z | 255 | httpie/cli | 33,931 |
bybit: set endTime when since is not empty | diff --git a/ts/src/bybit.ts b/ts/src/bybit.ts
index 3759a09fe3e7..50ac008862df 100644
--- a/ts/src/bybit.ts
+++ b/ts/src/bybit.ts
@@ -2235,12 +2235,15 @@ export default class bybit extends Exchange {
*/
this.checkRequiredSymbol ('fetchFundingRateHistory', symbol);
await this.loadMarkets ();
+ if (limit === undefined) {
+ limit = 200;
+ }
const request = {
// 'category': '', // Product type. linear,inverse
// 'symbol': '', // Symbol name
// 'startTime': 0, // The start timestamp (ms)
// 'endTime': 0, // The end timestamp (ms)
- // 'limit': 0, // Limit for data size per page. [1, 200]. Default: 200
+ 'limit': limit, // Limit for data size per page. [1, 200]. Default: 200
};
const market = this.market (symbol);
symbol = market['symbol'];
@@ -2260,9 +2263,12 @@ export default class bybit extends Exchange {
params = this.omit (params, [ 'endTime', 'till', 'until' ]);
if (endTime !== undefined) {
request['endTime'] = endTime;
- }
- if (limit !== undefined) {
- request['limit'] = limit;
+ } else {
+ if (since !== undefined) {
+ // end time is required when since is not empty
+ const fundingInterval = 60 * 60 * 8 * 1000;
+ request['endTime'] = since + limit * fundingInterval;
+ }
}
const response = await this.publicGetV5MarketFundingHistory (this.extend (request, params));
//
| fix https://github.com/ccxt/ccxt/issues/15990
Bybit will return `Time is invalid` if `startTime` is set and `endTime` is empty. In this PR, I fix this issue.
```
$ node examples/js/cli bybit fetchFundingRateHistory ETH/USDT:USDT 'undefined' 10 '{"endTime":1673063081000}' --test
2023-04-05T09:37:02.032Z iteration 0 passed in 171 ms
symbol | fundingRate | timestamp | datetime
----------------------------------------------------------------------
ETH/USDT:USDT | 0.0001 | 1672790400000 | 2023-01-04T00:00:00.000Z
ETH/USDT:USDT | 0.0001 | 1672819200000 | 2023-01-04T08:00:00.000Z
ETH/USDT:USDT | 0.0001 | 1672848000000 | 2023-01-04T16:00:00.000Z
ETH/USDT:USDT | 0.0001 | 1672876800000 | 2023-01-05T00:00:00.000Z
ETH/USDT:USDT | 0.0001 | 1672905600000 | 2023-01-05T08:00:00.000Z
ETH/USDT:USDT | 0.0001 | 1672934400000 | 2023-01-05T16:00:00.000Z
ETH/USDT:USDT | 0.0001 | 1672963200000 | 2023-01-06T00:00:00.000Z
ETH/USDT:USDT | 0.0001 | 1672992000000 | 2023-01-06T08:00:00.000Z
ETH/USDT:USDT | 0.0001 | 1673020800000 | 2023-01-06T16:00:00.000Z
ETH/USDT:USDT | 0.0001 | 1673049600000 | 2023-01-07T00:00:00.000Z
$ node examples/js/cli bybit fetchFundingRateHistory ETH/USDT:USDT 1673063081000 10 --test
2023-04-05T09:35:44.501Z iteration 0 passed in 269 ms
symbol | fundingRate | timestamp | datetime
----------------------------------------------------------------------
ETH/USDT:USDT | 0.0001 | 1673078400000 | 2023-01-07T08:00:00.000Z
ETH/USDT:USDT | 0.0001 | 1673107200000 | 2023-01-07T16:00:00.000Z
ETH/USDT:USDT | 0.0001 | 1673136000000 | 2023-01-08T00:00:00.000Z
ETH/USDT:USDT | 0.0001 | 1673164800000 | 2023-01-08T08:00:00.000Z
ETH/USDT:USDT | 0.0001 | 1673193600000 | 2023-01-08T16:00:00.000Z
ETH/USDT:USDT | 0.0001 | 1673222400000 | 2023-01-09T00:00:00.000Z
ETH/USDT:USDT | 0.0001 | 1673251200000 | 2023-01-09T08:00:00.000Z
ETH/USDT:USDT | 0.0001 | 1673280000000 | 2023-01-09T16:00:00.000Z
ETH/USDT:USDT | 0.0001 | 1673308800000 | 2023-01-10T00:00:00.000Z
ETH/USDT:USDT | 0.0001 | 1673337600000 | 2023-01-10T08:00:00.000Z
``` | https://api.github.com/repos/ccxt/ccxt/pulls/17486 | 2023-04-05T09:38:03Z | 2023-04-06T09:56:16Z | 2023-04-06T09:56:16Z | 2023-04-06T09:56:17Z | 413 | ccxt/ccxt | 13,914 |
Add DINOv2 models with register tokens. Convert pos embed to not overlap with cls token. | diff --git a/timm/models/vision_transformer.py b/timm/models/vision_transformer.py
index 1ccb96db4e..0eb66176d0 100644
--- a/timm/models/vision_transformer.py
+++ b/timm/models/vision_transformer.py
@@ -567,7 +567,11 @@ def get_classifier(self):
def reset_classifier(self, num_classes: int, global_pool=None):
self.num_classes = num_classes
if global_pool is not None:
- assert global_pool in ('', 'avg', 'token')
+ assert global_pool in ('', 'avg', 'token', 'map')
+ if global_pool == 'map' and self.attn_pool is None:
+ assert False, "Cannot currently add attention pooling in reset_classifier()."
+ elif global_pool != 'map ' and self.attn_pool is not None:
+ self.attn_pool = None # remove attention pooling
self.global_pool = global_pool
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
@@ -937,10 +941,14 @@ def _convert_openai_clip(state_dict, model):
def _convert_dinov2(state_dict, model):
import re
out_dict = {}
+ state_dict.pop("mask_token", None)
+ if 'register_tokens' in state_dict:
+ # convert dinov2 w/ registers to no_embed_class timm model (neither cls or reg tokens overlap pos embed)
+ out_dict['reg_token'] = state_dict.pop('register_tokens')
+ out_dict['cls_token'] = state_dict.pop('cls_token') + state_dict['pos_embed'][:, 0]
+ out_dict['pos_embed'] = state_dict.pop('pos_embed')[:, 1:]
for k, v in state_dict.items():
- if k == "mask_token":
- continue
- elif re.match(r"blocks\.(\d+)\.mlp\.w12\.(?:weight|bias)", k):
+ if re.match(r"blocks\.(\d+)\.mlp\.w12\.(?:weight|bias)", k):
out_dict[k.replace("w12", "fc1")] = v
continue
elif re.match(r"blocks\.(\d+)\.mlp\.w3\.(?:weight|bias)", k):
@@ -1229,6 +1237,32 @@ def _cfg(url='', **kwargs):
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0,
input_size=(3, 518, 518), crop_pct=1.0),
+ # DINOv2 pretrained w/ registers - https://arxiv.org/abs/2309.16588 (no classifier head, for fine-tune/features only)
+ 'vit_small_patch14_reg4_dinov2.lvd142m': _cfg(
+ url='https://dl.fbaipublicfiles.com/dinov2/dinov2_vits14/dinov2_vits14_reg4_pretrain.pth',
+ hf_hub_id='timm/',
+ license='apache-2.0',
+ mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0,
+ input_size=(3, 518, 518), crop_pct=1.0),
+ 'vit_base_patch14_reg4_dinov2.lvd142m': _cfg(
+ url='https://dl.fbaipublicfiles.com/dinov2/dinov2_vitb14/dinov2_vitb14_reg4_pretrain.pth',
+ hf_hub_id='timm/',
+ license='apache-2.0',
+ mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0,
+ input_size=(3, 518, 518), crop_pct=1.0),
+ 'vit_large_patch14_reg4_dinov2.lvd142m': _cfg(
+ url='https://dl.fbaipublicfiles.com/dinov2/dinov2_vitl14/dinov2_vitl14_reg4_pretrain.pth',
+ hf_hub_id='timm/',
+ license='apache-2.0',
+ mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0,
+ input_size=(3, 518, 518), crop_pct=1.0),
+ 'vit_giant_patch14_reg4_dinov2.lvd142m': _cfg(
+ url='https://dl.fbaipublicfiles.com/dinov2/dinov2_vitg14/dinov2_vitg14_reg4_pretrain.pth',
+ hf_hub_id='timm/',
+ license='apache-2.0',
+ mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0,
+ input_size=(3, 518, 518), crop_pct=1.0),
+
# ViT ImageNet-21K-P pretraining by MILL
'vit_base_patch16_224_miil.in21k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/vit_base_patch16_224_in21k_miil-887286df.pth',
@@ -2173,9 +2207,7 @@ def vit_huge_patch14_xp_224(pretrained=False, **kwargs) -> VisionTransformer:
def vit_small_patch14_dinov2(pretrained=False, **kwargs) -> VisionTransformer:
""" ViT-S/14 for DINOv2
"""
- model_args = dict(
- patch_size=14, embed_dim=384, depth=12, num_heads=6, init_values=1e-5, img_size=518,
- )
+ model_args = dict(patch_size=14, embed_dim=384, depth=12, num_heads=6, init_values=1e-5, img_size=518)
model = _create_vision_transformer(
'vit_small_patch14_dinov2', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@@ -2185,9 +2217,7 @@ def vit_small_patch14_dinov2(pretrained=False, **kwargs) -> VisionTransformer:
def vit_base_patch14_dinov2(pretrained=False, **kwargs) -> VisionTransformer:
""" ViT-B/14 for DINOv2
"""
- model_args = dict(
- patch_size=14, embed_dim=768, depth=12, num_heads=12, init_values=1e-5, img_size=518,
- )
+ model_args = dict(patch_size=14, embed_dim=768, depth=12, num_heads=12, init_values=1e-5, img_size=518)
model = _create_vision_transformer(
'vit_base_patch14_dinov2', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@@ -2197,9 +2227,7 @@ def vit_base_patch14_dinov2(pretrained=False, **kwargs) -> VisionTransformer:
def vit_large_patch14_dinov2(pretrained=False, **kwargs) -> VisionTransformer:
""" ViT-L/14 for DINOv2
"""
- model_args = dict(
- patch_size=14, embed_dim=1024, depth=24, num_heads=16, init_values=1e-5, img_size=518,
- )
+ model_args = dict(patch_size=14, embed_dim=1024, depth=24, num_heads=16, init_values=1e-5, img_size=518)
model = _create_vision_transformer(
'vit_large_patch14_dinov2', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@@ -2209,12 +2237,10 @@ def vit_large_patch14_dinov2(pretrained=False, **kwargs) -> VisionTransformer:
def vit_giant_patch14_dinov2(pretrained=False, **kwargs) -> VisionTransformer:
""" ViT-G/14 for DINOv2
"""
-
# The hidden_features of SwiGLU is calculated by:
# hidden_features = (int(hidden_features * 2 / 3) + 7) // 8 * 8
# When embed_dim=1536, hidden_features=4096
# With SwiGLUPacked, we need to set hidden_features = 2 * 4096 = 8192
-
model_args = dict(
patch_size=14, embed_dim=1536, depth=40, num_heads=24, init_values=1e-5,
mlp_ratio=2.66667 * 2, mlp_layer=SwiGLUPacked, img_size=518, act_layer=nn.SiLU
@@ -2224,6 +2250,62 @@ def vit_giant_patch14_dinov2(pretrained=False, **kwargs) -> VisionTransformer:
return model
+@register_model
+def vit_small_patch14_reg4_dinov2(pretrained=False, **kwargs) -> VisionTransformer:
+ """ ViT-S/14 for DINOv2 w/ 4 registers
+ """
+ model_args = dict(
+ patch_size=14, embed_dim=384, depth=12, num_heads=6, init_values=1e-5,
+ reg_tokens=4, no_embed_class=True,
+ )
+ model = _create_vision_transformer(
+ 'vit_small_patch14_reg4_dinov2', pretrained=pretrained, **dict(model_args, **kwargs))
+ return model
+
+
+@register_model
+def vit_base_patch14_reg4_dinov2(pretrained=False, **kwargs) -> VisionTransformer:
+ """ ViT-B/14 for DINOv2 w/ 4 registers
+ """
+ model_args = dict(
+ patch_size=14, embed_dim=768, depth=12, num_heads=12, init_values=1e-5,
+ reg_tokens=4, no_embed_class=True,
+ )
+ model = _create_vision_transformer(
+ 'vit_base_patch14_reg4_dinov2', pretrained=pretrained, **dict(model_args, **kwargs))
+ return model
+
+
+@register_model
+def vit_large_patch14_reg4_dinov2(pretrained=False, **kwargs) -> VisionTransformer:
+ """ ViT-L/14 for DINOv2 w/ 4 registers
+ """
+ model_args = dict(
+ patch_size=14, embed_dim=1024, depth=24, num_heads=16, init_values=1e-5,
+ reg_tokens=4, no_embed_class=True,
+ )
+ model = _create_vision_transformer(
+ 'vit_large_patch14_reg4_dinov2', pretrained=pretrained, **dict(model_args, **kwargs))
+ return model
+
+
+@register_model
+def vit_giant_patch14_reg4_dinov2(pretrained=False, **kwargs) -> VisionTransformer:
+ """ ViT-G/14 for DINOv2
+ """
+ # The hidden_features of SwiGLU is calculated by:
+ # hidden_features = (int(hidden_features * 2 / 3) + 7) // 8 * 8
+ # When embed_dim=1536, hidden_features=4096
+ # With SwiGLUPacked, we need to set hidden_features = 2 * 4096 = 8192
+ model_args = dict(
+ patch_size=14, embed_dim=1536, depth=40, num_heads=24, init_values=1e-5, mlp_ratio=2.66667 * 2,
+ mlp_layer=SwiGLUPacked, act_layer=nn.SiLU, reg_tokens=4, no_embed_class=True,
+ )
+ model = _create_vision_transformer(
+ 'vit_giant_patch14_reg4_dinov2', pretrained=pretrained, **dict(model_args, **kwargs))
+ return model
+
+
@register_model
def vit_base_patch16_siglip_224(pretrained=False, **kwargs) -> VisionTransformer:
model_args = dict(
| https://api.github.com/repos/huggingface/pytorch-image-models/pulls/2014 | 2023-10-30T00:07:08Z | 2023-10-30T06:03:48Z | 2023-10-30T06:03:48Z | 2023-10-30T06:08:09Z | 2,748 | huggingface/pytorch-image-models | 16,399 | |
fix(urllib3): :bug: could not find urllib3 DEFAULT_CIPHERS | diff --git a/httpie/ssl_.py b/httpie/ssl_.py
index b9438543eb..ac02ddb272 100644
--- a/httpie/ssl_.py
+++ b/httpie/ssl_.py
@@ -4,12 +4,50 @@
from httpie.adapters import HTTPAdapter
# noinspection PyPackageRequirements
from urllib3.util.ssl_ import (
- DEFAULT_CIPHERS, create_urllib3_context,
+ create_urllib3_context,
resolve_ssl_version,
)
-DEFAULT_SSL_CIPHERS = DEFAULT_CIPHERS
+# Default ciphers imported from urllib3 as a work around for https://github.com/httpie/httpie/issues/1499
+# Removed from urllib3 in this commit: https://github.com/urllib3/urllib3/commit/e5eac0c
+####################
+# A secure default.
+# Sources for more information on TLS ciphers:
+#
+# - https://wiki.mozilla.org/Security/Server_Side_TLS
+# - https://www.ssllabs.com/projects/best-practices/index.html
+# - https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/
+#
+# The general intent is:
+# - prefer cipher suites that offer perfect forward secrecy (DHE/ECDHE),
+# - prefer ECDHE over DHE for better performance,
+# - prefer any AES-GCM and ChaCha20 over any AES-CBC for better performance and
+# security,
+# - prefer AES-GCM over ChaCha20 because hardware-accelerated AES is common,
+# - disable NULL authentication, MD5 MACs, DSS, and other
+# insecure ciphers for security reasons.
+# - NOTE: TLS 1.3 cipher suites are managed through a different interface
+# not exposed by CPython (yet!) and are enabled by default if they're available.
+DEFAULT_SSL_CIPHERS = ":".join(
+ [
+ "ECDHE+AESGCM",
+ "ECDHE+CHACHA20",
+ "DHE+AESGCM",
+ "DHE+CHACHA20",
+ "ECDH+AESGCM",
+ "DH+AESGCM",
+ "ECDH+AES",
+ "DH+AES",
+ "RSA+AESGCM",
+ "RSA+AES",
+ "!aNULL",
+ "!eNULL",
+ "!MD5",
+ "!DSS",
+ "!AESCCM",
+ ]
+)
SSL_VERSION_ARG_MAPPING = {
'ssl2.3': 'PROTOCOL_SSLv23',
'ssl3': 'PROTOCOL_SSLv3',
| Default ciphers imported from urllib3 as a work around for https://github.com/httpie/httpie/issues/1499
Removed from urllib3 in this commit: https://github.com/urllib3/urllib3/commit/e5eac0c | https://api.github.com/repos/httpie/cli/pulls/1505 | 2023-05-18T17:14:28Z | 2023-05-19T19:18:56Z | 2023-05-19T19:18:56Z | 2023-05-31T09:24:15Z | 574 | httpie/cli | 33,993 |
Update README.md | diff --git a/README.md b/README.md
index 9011b728..f458f1c3 100644
--- a/README.md
+++ b/README.md
@@ -231,6 +231,7 @@ For a list of free-to-attend meetups and local events, go [here](https://github.
* [Clojush](https://github.com/lspector/Clojush) - The Push programming language and the PushGP genetic programming system implemented in Clojure
* [Infer](https://github.com/aria42/infer) - Inference and machine learning in clojure
* [Clj-ML](https://github.com/antoniogarrote/clj-ml) - A machine learning library for Clojure built on top of Weka and friends
+* [DL4CLJ](https://github.com/engagor/dl4clj/) - Clojure wrapper for Deeplearning4j
* [Encog](https://github.com/jimpil/enclog) - Clojure wrapper for Encog (v3) (Machine-Learning framework that specializes in neural-nets)
* [Fungp](https://github.com/vollmerm/fungp) - A genetic programming library for Clojure
* [Statistiker](https://github.com/clojurewerkz/statistiker) - Basic Machine Learning algorithms in Clojure.
| Added DL4CLJ, a Clojure wrapper for Deeplearning4j | https://api.github.com/repos/josephmisiti/awesome-machine-learning/pulls/351 | 2017-02-25T00:28:09Z | 2017-02-25T05:53:08Z | 2017-02-25T05:53:08Z | 2017-02-25T05:53:13Z | 301 | josephmisiti/awesome-machine-learning | 52,344 |
Use reference strings in Nexia | diff --git a/homeassistant/components/nexia/strings.json b/homeassistant/components/nexia/strings.json
index dcfb40b898ac65..876ea2d656f11b 100644
--- a/homeassistant/components/nexia/strings.json
+++ b/homeassistant/components/nexia/strings.json
@@ -10,12 +10,12 @@
}
},
"error": {
- "cannot_connect": "Failed to connect, please try again",
- "invalid_auth": "Invalid authentication",
- "unknown": "Unexpected error"
+ "cannot_connect": "[%key:common::config_flow::error::cannot_connect%]",
+ "invalid_auth": "[%key:common::config_flow::error::invalid_auth%]",
+ "unknown": "[%key:common::config_flow::error::unknown%]"
},
"abort": {
- "already_configured": "This nexia home is already configured"
+ "already_configured": "[%key:common::config_flow::abort::already_configured_device%]"
}
}
-}
\ No newline at end of file
+}
| <!--
You are amazing! Thanks for contributing to our project!
Please, DO NOT DELETE ANY TEXT from this template! (unless instructed).
-->
## Breaking change
<!--
If your PR contains a breaking change for existing users, it is important
to tell them what breaks, how to make it work again and why we did this.
This piece of text is published with the release notes, so it helps if you
write it towards our users, not us.
Note: Remove this section if this PR is NOT a breaking change.
-->
## Proposed change
<!--
Describe the big picture of your changes here to communicate to the
maintainers why we should accept this pull request. If it fixes a bug
or resolves a feature request, be sure to link to that issue in the
additional information section.
-->
Use reference strings in Nexia
## Type of change
<!--
What type of change does your PR introduce to Home Assistant?
NOTE: Please, check only 1! box!
If your PR requires multiple boxes to be checked, you'll most likely need to
split it into multiple PRs. This makes things easier and faster to code review.
-->
- [ ] Dependency upgrade
- [ ] Bugfix (non-breaking change which fixes an issue)
- [ ] New integration (thank you!)
- [ ] New feature (which adds functionality to an existing integration)
- [ ] Breaking change (fix/feature causing existing functionality to break)
- [x] Code quality improvements to existing code or addition of tests
## Example entry for `configuration.yaml`:
<!--
Supplying a configuration snippet, makes it easier for a maintainer to test
your PR. Furthermore, for new integrations, it gives an impression of how
the configuration would look like.
Note: Remove this section if this PR does not have an example entry.
-->
```yaml
# Example configuration.yaml
```
## Additional information
<!--
Details are important, and help maintainers processing your PR.
Please be sure to fill out additional details, if applicable.
-->
- This PR fixes or closes issue: fixes #
- This PR is related to issue: #40578
- Link to documentation pull request:
## Checklist
<!--
Put an `x` in the boxes that apply. You can also fill these out after
creating the PR. If you're unsure about any of them, don't hesitate to ask.
We're here to help! This is simply a reminder of what we are going to look
for before merging your code.
-->
- [ ] The code change is tested and works locally.
- [ ] Local tests pass. **Your PR cannot be merged unless tests pass**
- [ ] There is no commented out code in this PR.
- [ ] I have followed the [development checklist][dev-checklist]
- [ ] The code has been formatted using Black (`black --fast homeassistant tests`)
- [ ] Tests have been added to verify that the new code works.
If user exposed functionality or configuration variables are added/changed:
- [ ] Documentation added/updated for [www.home-assistant.io][docs-repository]
If the code communicates with devices, web services, or third-party tools:
- [ ] The [manifest file][manifest-docs] has all fields filled out correctly.
Updated and included derived files by running: `python3 -m script.hassfest`.
- [ ] New or updated dependencies have been added to `requirements_all.txt`.
Updated by running `python3 -m script.gen_requirements_all`.
- [ ] Untested files have been added to `.coveragerc`.
The integration reached or maintains the following [Integration Quality Scale][quality-scale]:
<!--
The Integration Quality Scale scores an integration on the code quality
and user experience. Each level of the quality scale consists of a list
of requirements. We highly recommend getting your integration scored!
-->
- [ ] No score or internal
- [ ] 🥈 Silver
- [ ] 🥇 Gold
- [ ] 🏆 Platinum
<!--
This project is very active and we have a high turnover of pull requests.
Unfortunately, the number of incoming pull requests is higher than what our
reviewers can review and merge so there is a long backlog of pull requests
waiting for review. You can help here!
By reviewing another pull request, you will help raise the code quality of
that pull request and the final review will be faster. This way the general
pace of pull request reviews will go up and your wait time will go down.
When picking a pull request to review, try to choose one that hasn't yet
been reviewed.
Thanks for helping out!
-->
To help with the load of incoming pull requests:
- [ ] I have reviewed two other [open pull requests][prs] in this repository.
[prs]: https://github.com/home-assistant/core/pulls?q=is%3Aopen+is%3Apr+-author%3A%40me+-draft%3Atrue+-label%3Awaiting-for-upstream+sort%3Acreated-asc+-review%3Aapproved
<!--
Thank you for contributing <3
Below, some useful links you could explore:
-->
[dev-checklist]: https://developers.home-assistant.io/docs/en/development_checklist.html
[manifest-docs]: https://developers.home-assistant.io/docs/en/creating_integration_manifest.html
[quality-scale]: https://developers.home-assistant.io/docs/en/next/integration_quality_scale_index.html
[docs-repository]: https://github.com/home-assistant/home-assistant.io
| https://api.github.com/repos/home-assistant/core/pulls/41210 | 2020-10-04T13:15:47Z | 2020-10-04T13:38:52Z | 2020-10-04T13:38:52Z | 2020-10-04T13:38:53Z | 244 | home-assistant/core | 38,930 |
Adding a very rudimentary admin page that displays a table of users | diff --git a/website/src/components/UsersCell.tsx b/website/src/components/UsersCell.tsx
new file mode 100644
index 0000000000..b2b04c83dc
--- /dev/null
+++ b/website/src/components/UsersCell.tsx
@@ -0,0 +1,44 @@
+import { Table, TableCaption, TableContainer, Tbody, Td, Th, Thead, Tr } from "@chakra-ui/react";
+import { useState } from "react";
+import fetcher from "src/lib/fetcher";
+import useSWR from "swr";
+
+/**
+ * Fetches users from the users api route and then presents them in a simple Chakra table.
+ */
+const UsersCell = () => {
+ // Fetch and save the users.
+ const [users, setUsers] = useState([]);
+ const { isLoading } = useSWR("/api/admin/users", fetcher, {
+ onSuccess: setUsers,
+ });
+
+ // Present users in a naive table.
+ return (
+ <TableContainer>
+ <Table variant="simple">
+ <TableCaption>Users</TableCaption>
+ <Thead>
+ <Tr>
+ <Th>Id</Th>
+ <Th>Email</Th>
+ <Th>Name</Th>
+ <Th>Role</Th>
+ </Tr>
+ </Thead>
+ <Tbody>
+ {users.map((user, index) => (
+ <Tr key={index}>
+ <Td>{user.id}</Td>
+ <Td>{user.email}</Td>
+ <Td>{user.name}</Td>
+ <Td>{user.role}</Td>
+ </Tr>
+ ))}
+ </Tbody>
+ </Table>
+ </TableContainer>
+ );
+};
+
+export default UsersCell;
diff --git a/website/src/pages/admin/index.tsx b/website/src/pages/admin/index.tsx
new file mode 100644
index 0000000000..60d61903c0
--- /dev/null
+++ b/website/src/pages/admin/index.tsx
@@ -0,0 +1,49 @@
+import Head from "next/head";
+import { useRouter } from "next/router";
+import { useSession } from "next-auth/react";
+import { useEffect } from "react";
+import { getTransparentHeaderLayout } from "src/components/Layout";
+import UsersCell from "src/components/UsersCell";
+
+/**
+ * Provides the admin index page that will display a list of users and give
+ * admins the ability to manage their access rights.
+ */
+const AdminIndex = () => {
+ const router = useRouter();
+ const { data: session, status } = useSession();
+
+ // Check when the user session is loaded and re-route if the user is not an
+ // admin. This follows the suggestion by NextJS for handling private pages:
+ // https://nextjs.org/docs/api-reference/next/router#usage
+ //
+ // All admin pages should use the same check and routing steps.
+ useEffect(() => {
+ if (status === "loading") {
+ return;
+ }
+ if (session?.user?.role === "admin") {
+ return;
+ }
+ router.push("/");
+ }, [session, status]);
+
+ // Show the final page.
+ // TODO(#237): Display a component that fetches actual user data.
+ return (
+ <>
+ <Head>
+ <title>Open Assistant</title>
+ <meta
+ name="description"
+ content="Conversational AI for everyone. An open source project to create a chat enabled GPT LLM run by LAION and contributors around the world."
+ />
+ </Head>
+ <main className="oa-basic-theme">{status === "loading" ? "loading..." : <UsersCell />}</main>
+ </>
+ );
+};
+
+AdminIndex.getLayout = getTransparentHeaderLayout;
+
+export default AdminIndex;
diff --git a/website/src/pages/api/admin/users.ts b/website/src/pages/api/admin/users.ts
new file mode 100644
index 0000000000..186bb2536c
--- /dev/null
+++ b/website/src/pages/api/admin/users.ts
@@ -0,0 +1,31 @@
+import { getToken } from "next-auth/jwt";
+import client from "src/lib/prismadb";
+
+/**
+ * Returns a list of user results from the database when the requesting user is
+ * a logged in admin.
+ */
+const handler = async (req, res) => {
+ const token = await getToken({ req });
+
+ // Return nothing if the user isn't registered or if the user isn't an admin.
+ if (!token || token.role !== "admin") {
+ res.status(403).end();
+ return;
+ }
+
+ // Fetch 20 users.
+ const users = await client.user.findMany({
+ select: {
+ id: true,
+ role: true,
+ name: true,
+ email: true,
+ },
+ take: 20,
+ });
+
+ res.status(200).json(users);
+};
+
+export default handler;
| This starts #237 with a very basic and minimally styled version. This does not include any management functionality, that will be in a follow up PR. | https://api.github.com/repos/LAION-AI/Open-Assistant/pulls/476 | 2023-01-07T10:14:17Z | 2023-01-07T11:17:02Z | 2023-01-07T11:17:02Z | 2023-01-07T11:17:03Z | 1,171 | LAION-AI/Open-Assistant | 37,168 |
Update README.md | diff --git a/README.md b/README.md
index e0f3994a22553..2973ac4704bbe 100644
--- a/README.md
+++ b/README.md
@@ -243,7 +243,7 @@ defined in [`config.py`](https://github.com/localstack/localstack/blob/master/lo
For example, to dynamically set `KINESIS_ERROR_PROBABILITY=1` at runtime, use the following command:
```
-curl -v -d '{"variable":"KINESIS_ERROR_PROBABILITY","value":1}' 'http://localhost:4568/?_config_'
+curl -v -d '{"variable":"KINESIS_ERROR_PROBABILITY","value":1}' 'http://localhost:4566/?_config_'
```
### Service health checks
@@ -284,7 +284,7 @@ The local directory `/ls_tmp` must contains the three files (server.test.pem, se
You can point your `aws` CLI to use the local infrastructure, for example:
```
-aws --endpoint-url=http://localhost:4568 kinesis list-streams
+aws --endpoint-url=http://localhost:4566 kinesis list-streams
{
"StreamNames": []
}
| Port refers to 4568, but commands run successfully on port 4566. Not sure if typo or version change, or if I have fundamentally misunderstood the purpose of the example commands...
**Please refer to the contribution guidelines in the README when submitting PRs.**
┆Issue is synchronized with this [Jira Bug](https://localstack.atlassian.net/browse/LOC-342) by [Unito](https://www.unito.io/learn-more)
| https://api.github.com/repos/localstack/localstack/pulls/3265 | 2020-11-22T14:11:17Z | 2020-11-22T23:32:18Z | 2020-11-22T23:32:18Z | 2020-11-22T23:32:19Z | 262 | localstack/localstack | 29,374 |
Simplify url_basename | diff --git a/youtube_dl/utils.py b/youtube_dl/utils.py
index d5069dcca98..4c7ad89c0b3 100644
--- a/youtube_dl/utils.py
+++ b/youtube_dl/utils.py
@@ -1092,7 +1092,5 @@ def remove_start(s, start):
def url_basename(url):
- m = re.match(r'(?:https?:|)//[^/]+/(?:[^?#]+/)?([^/?#]+)/?(?:[?#]|$)', url)
- if not m:
- return u''
- return m.group(1)
+ path = compat_urlparse.urlparse(url).path
+ return path.strip(u'/').split(u'/')[-1]
| Use urlparse from the standard library.
I think it does the same, the tests pass. I prefer that to a terrorific regex.
| https://api.github.com/repos/ytdl-org/youtube-dl/pulls/1997 | 2013-12-17T13:58:14Z | 2013-12-17T15:08:48Z | 2013-12-17T15:08:48Z | 2013-12-17T15:08:53Z | 162 | ytdl-org/youtube-dl | 49,923 |
Update instances of acme-staging url to acme-staging-v02 (#5734) | diff --git a/certbot/constants.py b/certbot/constants.py
index a6878824b58..9dfc00c6b46 100644
--- a/certbot/constants.py
+++ b/certbot/constants.py
@@ -107,7 +107,7 @@
dns_route53=False
)
-STAGING_URI = "https://acme-staging.api.letsencrypt.org/directory"
+STAGING_URI = "https://acme-staging-v02.api.letsencrypt.org/directory"
# The set of reasons for revoking a certificate is defined in RFC 5280 in
# section 5.3.1. The reasons that users are allowed to submit are restricted to
diff --git a/certbot/tests/storage_test.py b/certbot/tests/storage_test.py
index 6c0970e7202..09c752ebe6b 100644
--- a/certbot/tests/storage_test.py
+++ b/certbot/tests/storage_test.py
@@ -726,7 +726,7 @@ def test_is_test_cert(self):
self.test_rc.configuration["renewalparams"] = {}
rp = self.test_rc.configuration["renewalparams"]
self.assertEqual(self.test_rc.is_test_cert, False)
- rp["server"] = "https://acme-staging.api.letsencrypt.org/directory"
+ rp["server"] = "https://acme-staging-v02.api.letsencrypt.org/directory"
self.assertEqual(self.test_rc.is_test_cert, True)
rp["server"] = "https://staging.someotherca.com/directory"
self.assertEqual(self.test_rc.is_test_cert, True)
diff --git a/certbot/tests/testdata/sample-renewal.conf b/certbot/tests/testdata/sample-renewal.conf
index 52b3ec45cc5..04f9ae8ca20 100644
--- a/certbot/tests/testdata/sample-renewal.conf
+++ b/certbot/tests/testdata/sample-renewal.conf
@@ -61,7 +61,7 @@ chain_path = /home/ubuntu/letsencrypt/chain.pem
break_my_certs = False
standalone = True
manual = False
-server = https://acme-staging.api.letsencrypt.org/directory
+server = https://acme-staging-v02.api.letsencrypt.org/directory
standalone_supported_challenges = "tls-sni-01,http-01"
webroot = False
os_packages_only = False
diff --git a/docs/contributing.rst b/docs/contributing.rst
index 654528e3db5..45cd2e9f2d4 100644
--- a/docs/contributing.rst
+++ b/docs/contributing.rst
@@ -43,7 +43,7 @@ each shell where you're working:
.. code-block:: shell
source ./venv/bin/activate
- export SERVER=https://acme-staging.api.letsencrypt.org/directory
+ export SERVER=https://acme-staging-v02.api.letsencrypt.org/directory
source tests/integration/_common.sh
After that, your shell will be using the virtual environment, your copy of
@@ -443,10 +443,10 @@ For squeeze you will need to:
FreeBSD
-------
-Packages can be installed on FreeBSD using ``pkg``,
-or any other port-management tool (``portupgrade``, ``portmanager``, etc.)
-from the pre-built package or can be built and installed from ports.
-Either way will ensure proper installation of all the dependencies required
+Packages can be installed on FreeBSD using ``pkg``,
+or any other port-management tool (``portupgrade``, ``portmanager``, etc.)
+from the pre-built package or can be built and installed from ports.
+Either way will ensure proper installation of all the dependencies required
for the package.
FreeBSD by default uses ``tcsh``. In order to activate virtualenv (see
diff --git a/examples/dev-cli.ini b/examples/dev-cli.ini
index c02038ca1f8..a405a0aefda 100644
--- a/examples/dev-cli.ini
+++ b/examples/dev-cli.ini
@@ -1,5 +1,5 @@
# Always use the staging/testing server - avoids rate limiting
-server = https://acme-staging.api.letsencrypt.org/directory
+server = https://acme-staging-v02.api.letsencrypt.org/directory
# This is an example configuration file for developers
config-dir = /tmp/le/conf
| * update instances of acme-staging url to acme-staging-v02
* keep example client as v1
* keep deactivate script as v1
(cherry picked from commit 5ecb68f2ed41474d65f70d309d2bd05c61fd6faf) | https://api.github.com/repos/certbot/certbot/pulls/5746 | 2018-03-16T23:42:00Z | 2018-03-17T00:11:13Z | 2018-03-17T00:11:13Z | 2018-03-17T00:11:16Z | 969 | certbot/certbot | 3,543 |
Added kanye.rest | diff --git a/README.md b/README.md
index 1a015120dc..1bdf6e31ea 100644
--- a/README.md
+++ b/README.md
@@ -601,6 +601,7 @@ API | Description | Auth | HTTPS | CORS |
| [FavQs.com](https://favqs.com/api) | FavQs allows you to collect, discover and share your favorite quotes | `apiKey` | Yes | Unknown |
| [Forismatic](http://forismatic.com/en/api/) | Inspirational Quotes | No | No | Unknown |
| [icanhazdadjoke](https://icanhazdadjoke.com/api) | The largest selection of dad jokes on the internet | No | Yes | Unknown |
+| [kanye.rest](https://kanye.rest) | REST API for random Kanye West quotes | No | Yes | Yes |
| [Medium](https://github.com/Medium/medium-api-docs) | Community of readers and writers offering unique perspectives on ideas | `OAuth` | Yes | Unknown |
| [Quotes on Design](https://quotesondesign.com/api-v4-0/) | Inspirational Quotes | No | Yes | Unknown |
| [Traitify](https://app.traitify.com/developer) | Assess, collect and analyze Personality | No | Yes | Unknown |
| Thank you for taking the time to work on a Pull Request for this project!
To ensure your PR is dealt with swiftly please check the following:
- [x] Your submissions are formatted according to the guidelines in the [contributing guide](CONTRIBUTING.md)
- [x] Your additions are ordered alphabetically
- [x] Your submission has a useful description
- [x] The description does not end with punctuation
- [x] Each table column should be padded with one space on either side
- [x] You have searched the repository for any relevant issues or pull requests
- [x] Any category you are creating has the minimum requirement of 3 items
- [x] All changes have been [squashed][squash-link] into a single commit
[squash-link]: <https://github.com/todotxt/todo.txt-android/wiki/Squash-All-Commits-Related-to-a-Single-Issue-into-a-Single-Commit>
| https://api.github.com/repos/public-apis/public-apis/pulls/901 | 2019-02-27T03:43:07Z | 2019-04-21T14:20:09Z | 2019-04-21T14:20:09Z | 2020-01-25T18:30:16Z | 286 | public-apis/public-apis | 35,561 |
Fixed linting errors | diff --git a/lib/tests/streamlit/metrics_util_test.py b/lib/tests/streamlit/metrics_util_test.py
index 17c2e2684a2e..2e6764da8324 100644
--- a/lib/tests/streamlit/metrics_util_test.py
+++ b/lib/tests/streamlit/metrics_util_test.py
@@ -43,7 +43,8 @@ def test_machine_id_from_etc(self):
"streamlit.metrics_util.os.path.isfile"
) as path_isfile:
- path_isfile = lambda path: path == "/etc/machine-id"
+ def path_isfile(path):
+ return path == "/etc/machine-id"
machine_id = metrics_util._get_machine_id()
self.assertEqual(machine_id, file_data)
@@ -60,7 +61,8 @@ def test_machine_id_from_dbus(self):
"streamlit.metrics_util.os.path.isfile"
) as path_isfile:
- path_isfile = lambda path: path == "/var/lib/dbus/machine-id"
+ def path_isfile(path):
+ return path == "/var/lib/dbus/machine-id"
machine_id = metrics_util._get_machine_id()
self.assertEqual(machine_id, file_data)
@@ -87,7 +89,8 @@ def test_machine_id_v3_from_etc(self):
"streamlit.metrics_util.os.path.isfile"
) as path_isfile:
- path_isfile = lambda path: path == "/etc/machine-id"
+ def path_isfile(path):
+ return path == "/etc/machine-id"
machine_id = metrics_util._get_machine_id_v3()
self.assertEqual(machine_id, file_data)
@@ -104,7 +107,8 @@ def test_machine_id_v3_from_dbus(self):
"streamlit.metrics_util.os.path.isfile"
) as path_isfile:
- path_isfile = lambda path: path == "/var/lib/dbus/machine-id"
+ def path_isfile(path):
+ return path == "/var/lib/dbus/machine-id"
machine_id = metrics_util._get_machine_id_v3()
self.assertEqual(machine_id, file_data)
@@ -121,7 +125,7 @@ def test_machine_id_v3_from_node(self):
@patch("streamlit.metrics_util.file_util.get_streamlit_file_path", mock_get_path)
def test_stable_id_not_exists(self):
- """Test creating a stable id """
+ """Test creating a stable id"""
with patch("streamlit.metrics_util.os.path.exists", return_value=False), patch(
"streamlit.metrics_util.uuid.uuid4", return_value=UUID
@@ -135,7 +139,7 @@ def test_stable_id_not_exists(self):
@patch("streamlit.metrics_util.file_util.get_streamlit_file_path", mock_get_path)
def test_stable_id_exists_and_valid(self):
- """Test getting a stable valid id """
+ """Test getting a stable valid id"""
with patch("streamlit.metrics_util.os.path.exists", return_value=True), patch(
"streamlit.file_util.open", mock_open(read_data=UUID)
@@ -147,7 +151,7 @@ def test_stable_id_exists_and_valid(self):
@patch("streamlit.metrics_util.file_util.get_streamlit_file_path", mock_get_path)
def test_stable_id_exists_and_invalid(self):
- """Test getting a stable invalid id """
+ """Test getting a stable invalid id"""
with patch("streamlit.metrics_util.os.path.exists", return_value=True), patch(
"streamlit.metrics_util.uuid.uuid4", return_value=UUID
| Linting errors have been failing nightly builds and PRs. This is due to an upgrade in the black formatting library. This attempts to fix the linting errors. | https://api.github.com/repos/streamlit/streamlit/pulls/3178 | 2021-04-26T16:55:04Z | 2021-04-26T17:09:06Z | 2021-04-26T17:09:06Z | 2021-07-24T00:37:17Z | 782 | streamlit/streamlit | 22,405 |
Moved VERBS back to cli.py | diff --git a/letsencrypt/cli.py b/letsencrypt/cli.py
index 85a0d1d8a78..662e1a94b37 100644
--- a/letsencrypt/cli.py
+++ b/letsencrypt/cli.py
@@ -629,9 +629,13 @@ class HelpfulArgumentParser(object):
"""
def __init__(self, args, plugins, detect_defaults=False):
-
from letsencrypt import main
- self.VERBS = main.VERBS
+ self.VERBS = {"auth": main.obtain_cert, "certonly": main.obtain_cert,
+ "config_changes": main.config_changes, "run": main.run,
+ "install": main.install, "plugins": main.plugins_cmd,
+ "renew": renew, "revoke": main.revoke,
+ "rollback": main.rollback, "everything": main.run}
+
# List of topics for which additional help can be provided
HELP_TOPICS = ["all", "security", "paths", "automation", "testing"] + list(self.VERBS)
diff --git a/letsencrypt/main.py b/letsencrypt/main.py
index 19636b93e3e..264f7625eba 100644
--- a/letsencrypt/main.py
+++ b/letsencrypt/main.py
@@ -701,15 +701,6 @@ def main(cli_args=sys.argv[1:]):
return config.func(config, plugins)
-# Maps verbs/subcommands to the functions that implement them
-# In principle this should live in cli.HelpfulArgumentParser, but
-# due to issues with import cycles and testing, it lives here
-VERBS = {"auth": obtain_cert, "certonly": obtain_cert,
- "config_changes": config_changes, "everything": run,
- "install": install, "plugins": plugins_cmd, "renew": cli.renew,
- "revoke": revoke, "rollback": rollback, "run": run}
-
-
if __name__ == "__main__":
err_string = main()
if err_string:
diff --git a/letsencrypt/tests/cli_test.py b/letsencrypt/tests/cli_test.py
index c5865206d1b..7b901d41034 100644
--- a/letsencrypt/tests/cli_test.py
+++ b/letsencrypt/tests/cli_test.py
@@ -79,7 +79,7 @@ def _call_stdout(self, args):
return ret, None, stderr, client
def test_no_flags(self):
- with MockedVerb("run") as mock_run:
+ with mock.patch('letsencrypt.main.run') as mock_run:
self._call([])
self.assertEqual(1, mock_run.call_count)
@@ -190,7 +190,7 @@ def test_install_abspath(self):
chain = 'chain'
fullchain = 'fullchain'
- with MockedVerb('install') as mock_install:
+ with mock.patch('letsencrypt.main.install') as mock_install:
self._call(['install', '--cert-path', cert, '--key-path', 'key',
'--chain-path', 'chain',
'--fullchain-path', 'fullchain'])
@@ -248,7 +248,7 @@ def test_configurator_selection(self, mock_exe_exists):
unused_config, auth, unused_installer = mock_init.call_args[0]
self.assertTrue(isinstance(auth, manual.Authenticator))
- with MockedVerb("certonly") as mock_certonly:
+ with mock.patch('letsencrypt.main.obtain_cert') as mock_certonly:
self._call(["auth", "--standalone"])
self.assertEqual(1, mock_certonly.call_count)
@@ -321,7 +321,7 @@ def test_certonly_abspath(self):
chain = 'chain'
fullchain = 'fullchain'
- with MockedVerb('certonly') as mock_obtaincert:
+ with mock.patch('letsencrypt.main.obtain_cert') as mock_obtaincert:
self._call(['certonly', '--cert-path', cert, '--key-path', 'key',
'--chain-path', 'chain',
'--fullchain-path', 'fullchain'])
@@ -900,7 +900,7 @@ def test_read_file(self):
self.assertEqual(contents, test_contents)
def test_agree_dev_preview_config(self):
- with MockedVerb('run') as mocked_run:
+ with mock.patch('letsencrypt.main.run') as mocked_run:
self._call(['-c', test_util.vector_path('cli.ini')])
self.assertTrue(mocked_run.called)
@@ -1010,34 +1010,5 @@ def test_find_duplicative_names(self, unused_makedir):
self.assertEqual(result, (None, None))
-class MockedVerb(object):
- """Simple class that can be used for mocking out verbs/subcommands.
-
- Storing a dictionary of verbs and the functions that implement them
- in letsencrypt.cli makes mocking much more complicated. This class
- can be used as a simple context manager for mocking out verbs in CLI
- tests. For example:
-
- with MockedVerb("run") as mock_run:
- self._call([])
- self.assertEqual(1, mock_run.call_count)
-
- """
- def __init__(self, verb_name):
- self.verb_dict = main.VERBS
- self.verb_func = None
- self.verb_name = verb_name
-
- def __enter__(self):
- self.verb_func = self.verb_dict[self.verb_name]
- mocked_func = mock.MagicMock()
- self.verb_dict[self.verb_name] = mocked_func
-
- return mocked_func
-
- def __exit__(self, unused_type, unused_value, unused_trace):
- self.verb_dict[self.verb_name] = self.verb_func
-
-
if __name__ == '__main__':
unittest.main() # pragma: no cover
| After looking at this more closely, I agree with @pde that `VERBS` is much nicer in `cli.py` than `main.py`. This PR moves it back to `cli.py` and incorporates the testing fixes to make that work. By moving `VERBS` creation into `__init__`, `MockedVerb` isn't necessary at all.
| https://api.github.com/repos/certbot/certbot/pulls/2666 | 2016-03-15T03:16:19Z | 2016-03-16T08:49:46Z | 2016-03-16T08:49:46Z | 2016-04-02T19:51:49Z | 1,290 | certbot/certbot | 587 |
gh-68395: Avoid naming conflicts by mangling variable names in Argument Clinic | diff --git a/Lib/test/clinic.test b/Lib/test/clinic.test
index 53e5df5ba872ed..564205274edd73 100644
--- a/Lib/test/clinic.test
+++ b/Lib/test/clinic.test
@@ -4102,3 +4102,172 @@ exit:
static PyObject *
test_paramname_module_impl(PyObject *module, PyObject *mod)
/*[clinic end generated code: output=4a2a849ecbcc8b53 input=afefe259667f13ba]*/
+
+/*[clinic input]
+mangle1
+
+ args: object
+ kwnames: object
+ return_value: object
+ _keywords: object
+ _parser: object
+ argsbuf: object
+ fastargs: object
+ nargs: object
+ noptargs: object
+
+[clinic start generated code]*/
+
+PyDoc_STRVAR(mangle1__doc__,
+"mangle1($module, /, args, kwnames, return_value, _keywords, _parser,\n"
+" argsbuf, fastargs, nargs, noptargs)\n"
+"--\n"
+"\n");
+
+#define MANGLE1_METHODDEF \
+ {"mangle1", _PyCFunction_CAST(mangle1), METH_FASTCALL|METH_KEYWORDS, mangle1__doc__},
+
+static PyObject *
+mangle1_impl(PyObject *module, PyObject *args, PyObject *kwnames,
+ PyObject *return_value, PyObject *_keywords, PyObject *_parser,
+ PyObject *argsbuf, PyObject *fastargs, PyObject *nargs,
+ PyObject *noptargs);
+
+static PyObject *
+mangle1(PyObject *module, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames)
+{
+ PyObject *return_value = NULL;
+ #if defined(Py_BUILD_CORE) && !defined(Py_BUILD_CORE_MODULE)
+
+ #define NUM_KEYWORDS 9
+ static struct {
+ PyGC_Head _this_is_not_used;
+ PyObject_VAR_HEAD
+ PyObject *ob_item[NUM_KEYWORDS];
+ } _kwtuple = {
+ .ob_base = PyVarObject_HEAD_INIT(&PyTuple_Type, NUM_KEYWORDS)
+ .ob_item = { &_Py_ID(args), &_Py_ID(kwnames), &_Py_ID(return_value), &_Py_ID(_keywords), &_Py_ID(_parser), &_Py_ID(argsbuf), &_Py_ID(fastargs), &_Py_ID(nargs), &_Py_ID(noptargs), },
+ };
+ #undef NUM_KEYWORDS
+ #define KWTUPLE (&_kwtuple.ob_base.ob_base)
+
+ #else // !Py_BUILD_CORE
+ # define KWTUPLE NULL
+ #endif // !Py_BUILD_CORE
+
+ static const char * const _keywords[] = {"args", "kwnames", "return_value", "_keywords", "_parser", "argsbuf", "fastargs", "nargs", "noptargs", NULL};
+ static _PyArg_Parser _parser = {
+ .keywords = _keywords,
+ .fname = "mangle1",
+ .kwtuple = KWTUPLE,
+ };
+ #undef KWTUPLE
+ PyObject *argsbuf[9];
+ PyObject *__clinic_args;
+ PyObject *__clinic_kwnames;
+ PyObject *__clinic_return_value;
+ PyObject *__clinic__keywords;
+ PyObject *__clinic__parser;
+ PyObject *__clinic_argsbuf;
+ PyObject *__clinic_fastargs;
+ PyObject *__clinic_nargs;
+ PyObject *__clinic_noptargs;
+
+ args = _PyArg_UnpackKeywords(args, nargs, NULL, kwnames, &_parser, 9, 9, 0, argsbuf);
+ if (!args) {
+ goto exit;
+ }
+ __clinic_args = args[0];
+ __clinic_kwnames = args[1];
+ __clinic_return_value = args[2];
+ __clinic__keywords = args[3];
+ __clinic__parser = args[4];
+ __clinic_argsbuf = args[5];
+ __clinic_fastargs = args[6];
+ __clinic_nargs = args[7];
+ __clinic_noptargs = args[8];
+ return_value = mangle1_impl(module, __clinic_args, __clinic_kwnames, __clinic_return_value, __clinic__keywords, __clinic__parser, __clinic_argsbuf, __clinic_fastargs, __clinic_nargs, __clinic_noptargs);
+
+exit:
+ return return_value;
+}
+
+static PyObject *
+mangle1_impl(PyObject *module, PyObject *args, PyObject *kwnames,
+ PyObject *return_value, PyObject *_keywords, PyObject *_parser,
+ PyObject *argsbuf, PyObject *fastargs, PyObject *nargs,
+ PyObject *noptargs)
+/*[clinic end generated code: output=083e5076be9987c3 input=a3ed51bdedf8a3c7]*/
+
+/*[clinic input]
+mangle2
+
+ args: object
+ kwargs: object
+ return_value: object
+
+[clinic start generated code]*/
+
+PyDoc_STRVAR(mangle2__doc__,
+"mangle2($module, /, args, kwargs, return_value)\n"
+"--\n"
+"\n");
+
+#define MANGLE2_METHODDEF \
+ {"mangle2", _PyCFunction_CAST(mangle2), METH_FASTCALL|METH_KEYWORDS, mangle2__doc__},
+
+static PyObject *
+mangle2_impl(PyObject *module, PyObject *args, PyObject *kwargs,
+ PyObject *return_value);
+
+static PyObject *
+mangle2(PyObject *module, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames)
+{
+ PyObject *return_value = NULL;
+ #if defined(Py_BUILD_CORE) && !defined(Py_BUILD_CORE_MODULE)
+
+ #define NUM_KEYWORDS 3
+ static struct {
+ PyGC_Head _this_is_not_used;
+ PyObject_VAR_HEAD
+ PyObject *ob_item[NUM_KEYWORDS];
+ } _kwtuple = {
+ .ob_base = PyVarObject_HEAD_INIT(&PyTuple_Type, NUM_KEYWORDS)
+ .ob_item = { &_Py_ID(args), &_Py_ID(kwargs), &_Py_ID(return_value), },
+ };
+ #undef NUM_KEYWORDS
+ #define KWTUPLE (&_kwtuple.ob_base.ob_base)
+
+ #else // !Py_BUILD_CORE
+ # define KWTUPLE NULL
+ #endif // !Py_BUILD_CORE
+
+ static const char * const _keywords[] = {"args", "kwargs", "return_value", NULL};
+ static _PyArg_Parser _parser = {
+ .keywords = _keywords,
+ .fname = "mangle2",
+ .kwtuple = KWTUPLE,
+ };
+ #undef KWTUPLE
+ PyObject *argsbuf[3];
+ PyObject *__clinic_args;
+ PyObject *__clinic_kwargs;
+ PyObject *__clinic_return_value;
+
+ args = _PyArg_UnpackKeywords(args, nargs, NULL, kwnames, &_parser, 3, 3, 0, argsbuf);
+ if (!args) {
+ goto exit;
+ }
+ __clinic_args = args[0];
+ __clinic_kwargs = args[1];
+ __clinic_return_value = args[2];
+ return_value = mangle2_impl(module, __clinic_args, __clinic_kwargs, __clinic_return_value);
+
+exit:
+ return return_value;
+}
+
+static PyObject *
+mangle2_impl(PyObject *module, PyObject *args, PyObject *kwargs,
+ PyObject *return_value)
+/*[clinic end generated code: output=2ebb62aaefe7590a input=391766fee51bad7a]*/
diff --git a/Tools/clinic/clinic.py b/Tools/clinic/clinic.py
index b8b2b75c749152..e192821029d3f6 100755
--- a/Tools/clinic/clinic.py
+++ b/Tools/clinic/clinic.py
@@ -43,7 +43,18 @@
NO_VARARG = "PY_SSIZE_T_MAX"
CLINIC_PREFIX = "__clinic_"
-CLINIC_PREFIXED_ARGS = {"args"}
+CLINIC_PREFIXED_ARGS = {
+ "_keywords",
+ "_parser",
+ "args",
+ "argsbuf",
+ "fastargs",
+ "kwargs",
+ "kwnames",
+ "nargs",
+ "noptargs",
+ "return_value",
+}
class Unspecified:
def __repr__(self):
| Add all internally used variable names to CLINIC_PREFIXED_ARGS.
<!-- gh-issue-number: gh-68395 -->
* Issue: gh-68395
<!-- /gh-issue-number -->
| https://api.github.com/repos/python/cpython/pulls/104065 | 2023-05-01T21:45:15Z | 2023-05-05T11:40:19Z | 2023-05-05T11:40:19Z | 2023-05-05T11:40:23Z | 1,952 | python/cpython | 4,637 |
[2.16] fix installing roles with symlinks containing '..' (#82165) | diff --git a/changelogs/fragments/ansible-galaxy-role-install-symlink.yml b/changelogs/fragments/ansible-galaxy-role-install-symlink.yml
new file mode 100644
index 00000000000000..856c501455c07a
--- /dev/null
+++ b/changelogs/fragments/ansible-galaxy-role-install-symlink.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - ansible-galaxy role install - normalize tarfile paths and symlinks using ``ansible.utils.path.unfrackpath`` and consider them valid as long as the realpath is in the tarfile's role directory (https://github.com/ansible/ansible/issues/81965).
diff --git a/lib/ansible/galaxy/role.py b/lib/ansible/galaxy/role.py
index 2354ef7c3a11e8..e7c5e0122d1ad2 100644
--- a/lib/ansible/galaxy/role.py
+++ b/lib/ansible/galaxy/role.py
@@ -42,6 +42,7 @@
from ansible.module_utils.urls import open_url
from ansible.playbook.role.requirement import RoleRequirement
from ansible.utils.display import Display
+from ansible.utils.path import is_subpath, unfrackpath
display = Display()
@@ -393,43 +394,41 @@ def install(self):
# we only extract files, and remove any relative path
# bits that might be in the file for security purposes
# and drop any containing directory, as mentioned above
- if member.isreg() or member.issym():
- for attr in ('name', 'linkname'):
- attr_value = getattr(member, attr, None)
- if not attr_value:
- continue
- n_attr_value = to_native(attr_value)
- n_archive_parent_dir = to_native(archive_parent_dir)
- n_parts = n_attr_value.replace(n_archive_parent_dir, "", 1).split(os.sep)
- n_final_parts = []
- for n_part in n_parts:
- # TODO if the condition triggers it produces a broken installation.
- # It will create the parent directory as an empty file and will
- # explode if the directory contains valid files.
- # Leaving this as is since the whole module needs a rewrite.
- #
- # Check if we have any files with illegal names,
- # and display a warning if so. This could help users
- # to debug a broken installation.
- if not n_part:
- continue
- if n_part == '..':
- display.warning(f"Illegal filename '{n_part}': '..' is not allowed")
- continue
- if n_part.startswith('~'):
- display.warning(f"Illegal filename '{n_part}': names cannot start with '~'")
- continue
- if '$' in n_part:
- display.warning(f"Illegal filename '{n_part}': names cannot contain '$'")
- continue
- n_final_parts.append(n_part)
- setattr(member, attr, os.path.join(*n_final_parts))
-
- if _check_working_data_filter():
- # deprecated: description='extract fallback without filter' python_version='3.11'
- role_tar_file.extract(member, to_native(self.path), filter='data') # type: ignore[call-arg]
+ if not (member.isreg() or member.issym()):
+ continue
+
+ for attr in ('name', 'linkname'):
+ if not (attr_value := getattr(member, attr, None)):
+ continue
+
+ if attr_value.startswith(os.sep) and not is_subpath(attr_value, archive_parent_dir):
+ err = f"Invalid {attr} for tarfile member: path {attr_value} is not a subpath of the role {archive_parent_dir}"
+ raise AnsibleError(err)
+
+ if attr == 'linkname':
+ # Symlinks are relative to the link
+ relative_to_archive_dir = os.path.dirname(getattr(member, 'name', ''))
+ archive_dir_path = os.path.join(archive_parent_dir, relative_to_archive_dir, attr_value)
else:
- role_tar_file.extract(member, to_native(self.path))
+ # Normalize paths that start with the archive dir
+ attr_value = attr_value.replace(archive_parent_dir, "", 1)
+ attr_value = os.path.join(*attr_value.split(os.sep)) # remove leading os.sep
+ archive_dir_path = os.path.join(archive_parent_dir, attr_value)
+
+ resolved_archive = unfrackpath(archive_parent_dir)
+ resolved_path = unfrackpath(archive_dir_path)
+ if not is_subpath(resolved_path, resolved_archive):
+ err = f"Invalid {attr} for tarfile member: path {resolved_path} is not a subpath of the role {resolved_archive}"
+ raise AnsibleError(err)
+
+ relative_path = os.path.join(*resolved_path.replace(resolved_archive, "", 1).split(os.sep)) or '.'
+ setattr(member, attr, relative_path)
+
+ if _check_working_data_filter():
+ # deprecated: description='extract fallback without filter' python_version='3.11'
+ role_tar_file.extract(member, to_native(self.path), filter='data') # type: ignore[call-arg]
+ else:
+ role_tar_file.extract(member, to_native(self.path))
# write out the install info file for later use
self._write_galaxy_install_info()
diff --git a/test/integration/targets/ansible-galaxy-role/files/create-role-archive.py b/test/integration/targets/ansible-galaxy-role/files/create-role-archive.py
index cfd908c17b2eb5..487666381fe4d0 100755
--- a/test/integration/targets/ansible-galaxy-role/files/create-role-archive.py
+++ b/test/integration/targets/ansible-galaxy-role/files/create-role-archive.py
@@ -2,6 +2,7 @@
"""Create a role archive which overwrites an arbitrary file."""
import argparse
+import os
import pathlib
import tarfile
import tempfile
@@ -18,6 +19,15 @@ def main() -> None:
create_archive(args.archive, args.content, args.target)
+def generate_files_from_path(path):
+ if os.path.isdir(path):
+ for subpath in os.listdir(path):
+ _path = os.path.join(path, subpath)
+ yield from generate_files_from_path(_path)
+ elif os.path.isfile(path):
+ yield pathlib.Path(path)
+
+
def create_archive(archive_path: pathlib.Path, content_path: pathlib.Path, target_path: pathlib.Path) -> None:
with (
tarfile.open(name=archive_path, mode='w') as role_archive,
@@ -35,10 +45,15 @@ def create_archive(archive_path: pathlib.Path, content_path: pathlib.Path, targe
role_archive.add(meta_main_path)
role_archive.add(symlink_path)
- content_tarinfo = role_archive.gettarinfo(content_path, str(symlink_path))
+ for path in generate_files_from_path(content_path):
+ if path == content_path:
+ arcname = str(symlink_path)
+ else:
+ arcname = os.path.join(temp_dir_path, path)
- with content_path.open('rb') as content_file:
- role_archive.addfile(content_tarinfo, content_file)
+ content_tarinfo = role_archive.gettarinfo(path, arcname)
+ with path.open('rb') as file_content:
+ role_archive.addfile(content_tarinfo, file_content)
if __name__ == '__main__':
diff --git a/test/integration/targets/ansible-galaxy-role/tasks/dir-traversal.yml b/test/integration/targets/ansible-galaxy-role/tasks/dir-traversal.yml
index c70e899879f87a..1c17daf7dd4eaf 100644
--- a/test/integration/targets/ansible-galaxy-role/tasks/dir-traversal.yml
+++ b/test/integration/targets/ansible-galaxy-role/tasks/dir-traversal.yml
@@ -23,6 +23,9 @@
command:
cmd: ansible-galaxy role install --roles-path '{{ remote_tmp_dir }}/dir-traversal/roles' dangerous.tar
chdir: '{{ remote_tmp_dir }}/dir-traversal/source'
+ environment:
+ ANSIBLE_NOCOLOR: True
+ ANSIBLE_FORCE_COLOR: False
ignore_errors: true
register: galaxy_install_dangerous
@@ -42,3 +45,86 @@
- dangerous_overwrite_content.content|default('')|b64decode == ''
- not dangerous_overwrite_stat.stat.exists
- galaxy_install_dangerous is failed
+ - "'is not a subpath of the role' in (galaxy_install_dangerous.stderr | regex_replace('\n', ' '))"
+
+- name: remove tarfile for next test
+ file:
+ path: '{{ item }}'
+ state: absent
+ loop:
+ - '{{ remote_tmp_dir }}/dir-traversal/source/dangerous.tar'
+ - '{{ remote_tmp_dir }}/dir-traversal/roles/dangerous.tar'
+
+- name: build dangerous dir traversal role that includes .. in the symlink path
+ script:
+ chdir: '{{ remote_tmp_dir }}/dir-traversal/source'
+ cmd: create-role-archive.py dangerous.tar content.txt {{ remote_tmp_dir }}/dir-traversal/source/../target/target-file-to-overwrite.txt
+ executable: '{{ ansible_playbook_python }}'
+
+- name: install dangerous role
+ command:
+ cmd: 'ansible-galaxy role install --roles-path {{ remote_tmp_dir }}/dir-traversal/roles dangerous.tar'
+ chdir: '{{ remote_tmp_dir }}/dir-traversal/source'
+ environment:
+ ANSIBLE_NOCOLOR: True
+ ANSIBLE_FORCE_COLOR: False
+ ignore_errors: true
+ register: galaxy_install_dangerous
+
+- name: check for overwritten file
+ stat:
+ path: '{{ remote_tmp_dir }}/dir-traversal/target/target-file-to-overwrite.txt'
+ register: dangerous_overwrite_stat
+
+- name: get overwritten content
+ slurp:
+ path: '{{ remote_tmp_dir }}/dir-traversal/target/target-file-to-overwrite.txt'
+ register: dangerous_overwrite_content
+ when: dangerous_overwrite_stat.stat.exists
+
+- assert:
+ that:
+ - dangerous_overwrite_content.content|default('')|b64decode == ''
+ - not dangerous_overwrite_stat.stat.exists
+ - galaxy_install_dangerous is failed
+ - "'is not a subpath of the role' in (galaxy_install_dangerous.stderr | regex_replace('\n', ' '))"
+
+- name: remove tarfile for next test
+ file:
+ path: '{{ remote_tmp_dir }}/dir-traversal/source/dangerous.tar'
+ state: absent
+
+- name: build dangerous dir traversal role that includes .. in the relative symlink path
+ script:
+ chdir: '{{ remote_tmp_dir }}/dir-traversal/source'
+ cmd: create-role-archive.py dangerous_rel.tar content.txt ../context.txt
+
+- name: install dangerous role with relative symlink
+ command:
+ cmd: 'ansible-galaxy role install --roles-path {{ remote_tmp_dir }}/dir-traversal/roles dangerous_rel.tar'
+ chdir: '{{ remote_tmp_dir }}/dir-traversal/source'
+ environment:
+ ANSIBLE_NOCOLOR: True
+ ANSIBLE_FORCE_COLOR: False
+ ignore_errors: true
+ register: galaxy_install_dangerous
+
+- name: check for symlink outside role
+ stat:
+ path: "{{ remote_tmp_dir | realpath }}/dir-traversal/roles/symlink"
+ register: symlink_outside_role
+
+- assert:
+ that:
+ - not symlink_outside_role.stat.exists
+ - galaxy_install_dangerous is failed
+ - "'is not a subpath of the role' in (galaxy_install_dangerous.stderr | regex_replace('\n', ' '))"
+
+- name: remove test directories
+ file:
+ path: '{{ remote_tmp_dir }}/dir-traversal/{{ item }}'
+ state: absent
+ loop:
+ - source
+ - target
+ - roles
diff --git a/test/integration/targets/ansible-galaxy-role/tasks/main.yml b/test/integration/targets/ansible-galaxy-role/tasks/main.yml
index e94176d450de8a..5f88a557652e6c 100644
--- a/test/integration/targets/ansible-galaxy-role/tasks/main.yml
+++ b/test/integration/targets/ansible-galaxy-role/tasks/main.yml
@@ -25,14 +25,17 @@
- name: Valid role archive
command: "tar cf {{ remote_tmp_dir }}/valid-role.tar {{ remote_tmp_dir }}/role.d"
-- name: Invalid file
- copy:
- content: ""
+- name: Add invalid symlink
+ file:
+ state: link
+ src: "~/invalid"
dest: "{{ remote_tmp_dir }}/role.d/tasks/~invalid.yml"
+ force: yes
-- name: Invalid file
- copy:
- content: ""
+- name: Add another invalid symlink
+ file:
+ state: link
+ src: "/"
dest: "{{ remote_tmp_dir }}/role.d/tasks/invalid$name.yml"
- name: Valid requirements file
@@ -66,3 +69,4 @@
command: ansible-galaxy role remove invalid-testrole
- import_tasks: dir-traversal.yml
+- import_tasks: valid-role-symlinks.yml
diff --git a/test/integration/targets/ansible-galaxy-role/tasks/valid-role-symlinks.yml b/test/integration/targets/ansible-galaxy-role/tasks/valid-role-symlinks.yml
new file mode 100644
index 00000000000000..8a60b2efcc803d
--- /dev/null
+++ b/test/integration/targets/ansible-galaxy-role/tasks/valid-role-symlinks.yml
@@ -0,0 +1,78 @@
+- name: create test directories
+ file:
+ path: '{{ remote_tmp_dir }}/dir-traversal/{{ item }}'
+ state: directory
+ loop:
+ - source
+ - target
+ - roles
+
+- name: create subdir in the role content to test relative symlinks
+ file:
+ dest: '{{ remote_tmp_dir }}/dir-traversal/source/role_subdir'
+ state: directory
+
+- copy:
+ dest: '{{ remote_tmp_dir }}/dir-traversal/source/role_subdir/.keep'
+ content: ''
+
+- set_fact:
+ installed_roles: "{{ remote_tmp_dir | realpath }}/dir-traversal/roles"
+
+- name: build role with symlink to a directory in the role
+ script:
+ chdir: '{{ remote_tmp_dir }}/dir-traversal/source'
+ cmd: create-role-archive.py safe-link-dir.tar ./ role_subdir/..
+ executable: '{{ ansible_playbook_python }}'
+
+- name: install role successfully
+ command:
+ cmd: 'ansible-galaxy role install --roles-path {{ remote_tmp_dir }}/dir-traversal/roles safe-link-dir.tar'
+ chdir: '{{ remote_tmp_dir }}/dir-traversal/source'
+ register: galaxy_install_ok
+
+- name: check for the directory symlink in the role
+ stat:
+ path: "{{ installed_roles }}/safe-link-dir.tar/symlink"
+ register: symlink_in_role
+
+- assert:
+ that:
+ - symlink_in_role.stat.exists
+ - symlink_in_role.stat.lnk_source == installed_roles + '/safe-link-dir.tar'
+
+- name: remove tarfile for next test
+ file:
+ path: '{{ remote_tmp_dir }}/dir-traversal/source/safe-link-dir.tar'
+ state: absent
+
+- name: build role with safe relative symlink
+ script:
+ chdir: '{{ remote_tmp_dir }}/dir-traversal/source'
+ cmd: create-role-archive.py safe.tar ./ role_subdir/../context.txt
+ executable: '{{ ansible_playbook_python }}'
+
+- name: install role successfully
+ command:
+ cmd: 'ansible-galaxy role install --roles-path {{ remote_tmp_dir }}/dir-traversal/roles safe.tar'
+ chdir: '{{ remote_tmp_dir }}/dir-traversal/source'
+ register: galaxy_install_ok
+
+- name: check for symlink in role
+ stat:
+ path: "{{ installed_roles }}/safe.tar/symlink"
+ register: symlink_in_role
+
+- assert:
+ that:
+ - symlink_in_role.stat.exists
+ - symlink_in_role.stat.lnk_source == installed_roles + '/safe.tar/context.txt'
+
+- name: remove test directories
+ file:
+ path: '{{ remote_tmp_dir }}/dir-traversal/{{ item }}'
+ state: absent
+ loop:
+ - source
+ - target
+ - roles
| ##### SUMMARY
Backporting #82165
Set the tarfile attribute to a normalized value from unfrackpath instead of validating path parts and omiting potentially invald parts
Allow tarfile paths/links containing '..', '$', '~' as long as the normalized realpath is in the tarfile's role directory
(cherry picked from commit 3a42a0036875c8cab6a62ab9ea67a365e1dd4781)
##### ISSUE TYPE
- Bugfix Pull Request
| https://api.github.com/repos/ansible/ansible/pulls/82323 | 2023-11-30T23:14:41Z | 2024-01-18T23:24:24Z | 2024-01-18T23:24:24Z | 2024-02-15T14:00:09Z | 3,744 | ansible/ansible | 49,655 |
Parameterized cache decorator metrics names | diff --git a/lib/streamlit/runtime/caching/__init__.py b/lib/streamlit/runtime/caching/__init__.py
index 1d37ec2aa582..f72f730ef9c8 100644
--- a/lib/streamlit/runtime/caching/__init__.py
+++ b/lib/streamlit/runtime/caching/__init__.py
@@ -106,18 +106,18 @@ def suppress_cached_st_function_warning() -> Iterator[None]:
)
# Create and export public API singletons.
-cache_data = CacheDataAPI()
-cache_resource = CacheResourceAPI()
+cache_data = CacheDataAPI(decorator_metric_name="cache_data")
+cache_resource = CacheResourceAPI(decorator_metric_name="cache_resource")
# Deprecated singletons
experimental_memo = deprecate_obj_name(
- cache_data,
+ CacheDataAPI(decorator_metric_name="experimental_memo"),
old_name="experimental_memo",
new_name="cache_data",
removal_date="2023.04.01",
)
experimental_singleton = deprecate_obj_name(
- cache_resource,
+ CacheResourceAPI(decorator_metric_name="experimental_singleton"),
old_name="experimental_singleton",
new_name="cache_resource",
removal_date="2023.04.01",
diff --git a/lib/streamlit/runtime/caching/cache_data_api.py b/lib/streamlit/runtime/caching/cache_data_api.py
index b1baa0ab614b..a4d12f6a2d69 100644
--- a/lib/streamlit/runtime/caching/cache_data_api.py
+++ b/lib/streamlit/runtime/caching/cache_data_api.py
@@ -211,6 +211,21 @@ class CacheDataAPI:
st.cache_data.clear().
"""
+ def __init__(self, decorator_metric_name: str):
+ """Create a CacheDataAPI instance.
+
+ Parameters
+ ----------
+ decorator_metric_name
+ The metric name to record for decorator usage. `@st.experimental_memo` is
+ deprecated, but we're still supporting it and tracking its usage separately
+ from `@st.cache_data`.
+ """
+
+ # Parameterize the decorator metric name.
+ # (Ignore spurious mypy complaints - https://github.com/python/mypy/issues/2427)
+ self._decorator = gather_metrics(decorator_metric_name, self._decorator) # type: ignore
+
# Type-annotate the decorator function.
# (See https://mypy.readthedocs.io/en/stable/generics.html#decorator-factories)
F = TypeVar("F", bound=Callable[..., Any])
@@ -234,10 +249,6 @@ def __call__(
) -> Callable[[F], F]:
...
- # __call__ should be a static method, but there's a mypy bug that
- # breaks type checking for overloaded static functions:
- # https://github.com/python/mypy/issues/7781
- @gather_metrics("cache_data")
def __call__(
self,
func: F | None = None,
@@ -248,6 +259,27 @@ def __call__(
max_entries: int | None = None,
ttl: float | timedelta | None = None,
experimental_allow_widgets: bool = False,
+ ):
+ return self._decorator(
+ func,
+ persist=persist,
+ show_spinner=show_spinner,
+ suppress_st_warning=suppress_st_warning,
+ max_entries=max_entries,
+ ttl=ttl,
+ experimental_allow_widgets=experimental_allow_widgets,
+ )
+
+ @staticmethod
+ def _decorator(
+ func: F | None = None,
+ *,
+ persist: str | None = None,
+ show_spinner: bool | str = True,
+ suppress_st_warning: bool = False,
+ max_entries: int | None = None,
+ ttl: float | timedelta | None = None,
+ experimental_allow_widgets: bool = False,
):
"""Function decorator to cache function executions.
diff --git a/lib/streamlit/runtime/caching/cache_resource_api.py b/lib/streamlit/runtime/caching/cache_resource_api.py
index f3f5c4baba9b..95f5e5e54ca2 100644
--- a/lib/streamlit/runtime/caching/cache_resource_api.py
+++ b/lib/streamlit/runtime/caching/cache_resource_api.py
@@ -136,6 +136,21 @@ class CacheResourceAPI:
and st.cache_resource.clear().
"""
+ def __init__(self, decorator_metric_name: str):
+ """Create a CacheResourceAPI instance.
+
+ Parameters
+ ----------
+ decorator_metric_name
+ The metric name to record for decorator usage. `@st.experimental_singleton` is
+ deprecated, but we're still supporting it and tracking its usage separately
+ from `@st.cache_resource`.
+ """
+
+ # Parameterize the decorator metric name.
+ # (Ignore spurious mypy complaints - https://github.com/python/mypy/issues/2427)
+ self._decorator = gather_metrics(decorator_metric_name, self._decorator) # type: ignore
+
# Type-annotate the decorator function.
# (See https://mypy.readthedocs.io/en/stable/generics.html#decorator-factories)
@@ -157,10 +172,6 @@ def __call__(
) -> Callable[[F], F]:
...
- # __call__ should be a static method, but there's a mypy bug that
- # breaks type checking for overloaded static functions:
- # https://github.com/python/mypy/issues/7781
- @gather_metrics("cache_resource")
def __call__(
self,
func: F | None = None,
@@ -168,6 +179,21 @@ def __call__(
show_spinner: bool | str = True,
suppress_st_warning=False,
experimental_allow_widgets: bool = False,
+ ):
+ return self._decorator(
+ func,
+ show_spinner=show_spinner,
+ suppress_st_warning=suppress_st_warning,
+ experimental_allow_widgets=experimental_allow_widgets,
+ )
+
+ @staticmethod
+ def _decorator(
+ func: F | None = None,
+ *,
+ show_spinner: bool | str = True,
+ suppress_st_warning=False,
+ experimental_allow_widgets: bool = False,
):
"""Function decorator to store cached resources.
diff --git a/lib/tests/streamlit/runtime/caching/cache_data_api_test.py b/lib/tests/streamlit/runtime/caching/cache_data_api_test.py
index 93d3ec331dbb..494a71372193 100644
--- a/lib/tests/streamlit/runtime/caching/cache_data_api_test.py
+++ b/lib/tests/streamlit/runtime/caching/cache_data_api_test.py
@@ -215,6 +215,28 @@ def bar(x):
self.assertEqual([0, 0, 0], foo_vals)
self.assertEqual([0, 0], bar_vals)
+ def test_multiple_api_names(self):
+ """`st.experimental_memo` is effectively an alias for `st.cache_data`, and we
+ support both APIs while experimental_memo is being deprecated.
+ """
+ num_calls = [0]
+
+ def foo():
+ num_calls[0] += 1
+ return 42
+
+ # Annotate a function with both `cache_data` and `experimental_memo`.
+ cache_data_func = st.cache_data(foo)
+ memo_func = st.experimental_memo(foo)
+
+ # Call both versions of the function and assert the results.
+ self.assertEqual(42, cache_data_func())
+ self.assertEqual(42, memo_func())
+
+ # Because these decorators share the same cache, calling both functions
+ # results in just a single call to the decorated function.
+ self.assertEqual(1, num_calls[0])
+
class CacheDataPersistTest(DeltaGeneratorTestCase):
"""st.cache_data disk persistence tests"""
diff --git a/lib/tests/streamlit/runtime/caching/cache_resource_api_test.py b/lib/tests/streamlit/runtime/caching/cache_resource_api_test.py
index 3fc74580fa59..532e9fe5ff46 100644
--- a/lib/tests/streamlit/runtime/caching/cache_resource_api_test.py
+++ b/lib/tests/streamlit/runtime/caching/cache_resource_api_test.py
@@ -72,6 +72,28 @@ def f():
self.assertEqual(r1, [1, 1])
self.assertEqual(r2, [1, 1])
+ def test_multiple_api_names(self):
+ """`st.experimental_singleton` is effectively an alias for `st.cache_resource`, and we
+ support both APIs while experimental_singleton is being deprecated.
+ """
+ num_calls = [0]
+
+ def foo():
+ num_calls[0] += 1
+ return 42
+
+ # Annotate a function with both `cache_resource` and `experimental_singleton`.
+ cache_resource_func = st.cache_resource(foo)
+ memo_func = st.experimental_singleton(foo)
+
+ # Call both versions of the function and assert the results.
+ self.assertEqual(42, cache_resource_func())
+ self.assertEqual(42, memo_func())
+
+ # Because these decorators share the same cache, calling both functions
+ # results in just a single call to the decorated function.
+ self.assertEqual(1, num_calls[0])
+
class CacheResourceStatsProviderTest(unittest.TestCase):
def setUp(self):
diff --git a/lib/tests/streamlit/runtime/metrics_util_test.py b/lib/tests/streamlit/runtime/metrics_util_test.py
index 049e7d19805b..58766d29fab8 100644
--- a/lib/tests/streamlit/runtime/metrics_util_test.py
+++ b/lib/tests/streamlit/runtime/metrics_util_test.py
@@ -252,8 +252,6 @@ def test_public_api_commands(self):
"empty",
"progress",
"get_option",
- "experimental_singleton",
- "experimental_memo",
}
# Create a list of all public API names in the `st` module (minus
| - The data team wants to track metrics on both the deprecated cache decorator names (`@st.experimental_memo`, `@st.experimental_singleton`) AND the new decorators names (`@st.cache_data`, `@st.cache_resource`)
- This PR parameterizes `CacheDataAPI` and `CacheResourceAPI` with the decorator metric name, to achieve the above.
- This means that we now have _two_ instances of both `CacheDataAPI` and `CacheResourceAPI` (one instance for both supported names). The underlying cache storage remains the same (that is, both `CacheDataAPI` instances share the same on-disk and in-memory cache storage; and the same is true of `CacheResourceAPI`).
Tests:
- `metrics_util_test.test_public_api_commands` no longer ignores st.experimental_memo/singleton, so these are now properly tracked as their own standalone metrics-gathering APIs
- `cache_data_api_test` and `cache_resource_api_test` now include tests asserting that decorator variants share the same storage. | https://api.github.com/repos/streamlit/streamlit/pulls/5818 | 2022-12-05T23:48:46Z | 2022-12-06T17:32:46Z | 2022-12-06T17:32:46Z | 2022-12-06T17:34:32Z | 2,215 | streamlit/streamlit | 21,852 |
Add support for Phind-CodeLlama models (#2415) | diff --git a/fastchat/conversation.py b/fastchat/conversation.py
index fcf882c5c4..496bba7db0 100644
--- a/fastchat/conversation.py
+++ b/fastchat/conversation.py
@@ -930,6 +930,19 @@ def get_conv_template(name: str) -> Conversation:
)
)
+# Phind template
+register_conv_template(
+ Conversation(
+ name="phind",
+ system_message="### System Prompt\nYou are an intelligent programming assistant.",
+ roles=("### User Message", "### Assistant"),
+ messages=(),
+ offset=0,
+ sep_style=SeparatorStyle.ADD_COLON_SINGLE,
+ sep="\n\n",
+ )
+)
+
if __name__ == "__main__":
print("Vicuna template:")
diff --git a/fastchat/model/model_adapter.py b/fastchat/model/model_adapter.py
index 296b53c8f3..30d00c40b4 100644
--- a/fastchat/model/model_adapter.py
+++ b/fastchat/model/model_adapter.py
@@ -1593,6 +1593,16 @@ def get_default_conv_template(self, model_path: str) -> Conversation:
return get_conv_template("llama-2")
+class PhindCodeLlamaAdapter(CodeLlamaAdapter):
+ """The model adapter for Phind Code Llama"""
+
+ def match(self, model_path: str):
+ return "phind-codellama-" in model_path.lower()
+
+ def get_default_conv_template(self, model_path: str) -> Conversation:
+ return get_conv_template("phind")
+
+
# Note: the registration order matters.
# The one registered earlier has a higher matching priority.
register_model_adapter(PeftModelAdapter)
@@ -1650,6 +1660,7 @@ def get_default_conv_template(self, model_path: str) -> Conversation:
register_model_adapter(VigogneChatAdapter)
register_model_adapter(OpenLLaMaOpenInstructAdapter)
register_model_adapter(ReaLMAdapter)
+register_model_adapter(PhindCodeLlamaAdapter)
register_model_adapter(CodeLlamaAdapter)
# After all adapters, try the default base adapter.
| <!-- Thank you for your contribution! -->
<!-- Please add a reviewer to the assignee section when you create a PR. If you don't have the access to it, we will shortly find a reviewer and assign them to your PR. -->
## Why are these changes needed?
<!-- Please give a short summary of the change and the problem this solves. -->
Phind-Codellama was trained on a different instruction template than Code Llama.
The rest is the same.
So we add the conversation template and inherit from the model adapter of Code Llama to override `get_default_conv_template`.
## Related issue number (if applicable)
<!-- For example: "Closes #1234" -->
Closes #2415.
## Checks
- [x] I've run `format.sh` to lint the changes in this PR.
- [x] I've included any doc changes needed.
- [x] I've made sure the relevant tests are passing (if applicable).
| https://api.github.com/repos/lm-sys/FastChat/pulls/2416 | 2023-09-13T11:32:09Z | 2023-09-18T01:58:03Z | 2023-09-18T01:58:03Z | 2023-09-18T01:58:04Z | 476 | lm-sys/FastChat | 41,046 |
Adding IP2Location, FraudLabs Pro, etc... | diff --git a/README.md b/README.md
index b77e964245..8911817762 100644
--- a/README.md
+++ b/README.md
@@ -128,6 +128,7 @@ API | Description | Auth | HTTPS | CORS |
| [Freelancer](https://developers.freelancer.com) | Hire freelancers to get work done | `OAuth` | Yes | Unknown |
| [Gmail](https://developers.google.com/gmail/api/) | Flexible, RESTful access to the user's inbox | `OAuth` | Yes | Unknown |
| [Google Analytics](https://developers.google.com/analytics/) | Collect, configure and analyze your data to reach the right audience | `OAuth` | Yes | Unknown |
+| [MailboxValidator](https://www.mailboxvalidator.com/api-single-validation) | Validate email address to improve deliverability | `apiKey` | Yes | Unknown |
| [mailgun](https://www.mailgun.com/) | Email Service | `apiKey` | Yes | Unknown |
| [markerapi](http://www.markerapi.com/) | Trademark Search | No | No | Unknown |
| [Ticksel](https://ticksel.com) | Friendly website analytics made for humans | No | Yes | Unknown |
@@ -348,6 +349,7 @@ API | Description | Auth | HTTPS | CORS |
### Fraud Prevention
API | Description | Auth | HTTPS | CORS |
|---|---|---|---|---|
+| [FraudLabs Pro](https://www.fraudlabspro.com/developer/api/screen-order) | Screen order information using AI to detect frauds | `apiKey` | Yes | Unknown |
| [Whitepages Pro](https://pro.whitepages.com/developer/documentation/identity-check-api/) | Global identity verification with phone, address, email and IP | `apiKey` | Yes | Unknown |
| [Whitepages Pro](https://pro.whitepages.com/developer/documentation/phone-reputation-api/) | Phone reputation to detect spammy phones | `apiKey` | Yes | Unknown |
| [Whitepages Pro](https://pro.whitepages.com/developer/documentation/reverse-phone-api/) | Get an owner’s name, address, demographics based on the phone number | `apiKey` | Yes | Unknown |
@@ -404,6 +406,7 @@ API | Description | Auth | HTTPS | CORS |
| [GeoApi](https://api.gouv.fr/api/geoapi.html) | French geographical data | No | Yes | Unknown |
| [Geocod.io](https://www.geocod.io/) | Address geocoding / reverse geocoding in bulk | `apiKey` | Yes | Unknown |
| [Geocode.xyz](https://geocode.xyz/) | Provides worldwide forward/reverse geocoding, batch geocoding and geoparsing | No | Yes | Unknown |
+| [GeoDataSource](https://www.geodatasource.com/web-service) | Geocoding of city name by using latitude and longitude coordinates | `apiKey` | Yes | Unknown |
| [GeoJS](https://geojs.io/) | IP geolocation with ChatOps integration | No | Yes | Yes |
| [GeoNames](http://www.geonames.org/export/web-services.html) | Place names and other geographical data | No | No | Unknown |
| [geoPlugin](https://www.geoplugin.com) | IP geolocation and currency conversion | No | Yes | Yes |
@@ -417,6 +420,8 @@ API | Description | Auth | HTTPS | CORS |
| [IP Location](https://ipapi.co/) | Find IP address location information | No | Yes | Unknown |
| [IP Sidekick](https://ipsidekick.com) | Geolocation API that returns extra information about an IP address | `apiKey` | Yes | Unknown |
| [IP Vigilante](https://www.ipvigilante.com/) | Free IP Geolocation API | No | Yes | Unknown |
+| [IP2Location](https://www.ip2location.com/web-service/ip2location) | IP geolocation web service to get more than 55 parameters | `apiKey` | Yes | Unknown |
+| [IP2Proxy](https://www.ip2location.com/web-service/ip2proxy) | Detect proxy and VPN using IP address | `apiKey` | Yes | Unknown |
| [IPGeolocationAPI.com](https://ipgeolocationapi.com/) | Locate your visitors by IP with country details | No | Yes | Yes |
| [ipstack](https://ipstack.com/) | Locate and identify website visitors by IP address | `apiKey` | Yes | Unknown |
| [LocationIQ](https://locationiq.org/docs/) | Provides forward/reverse geocoding and batch geocoding | `apiKey` | Yes | Yes |
| Add Various API that provided free usage.
- [x] Your submissions are formatted according to the guidelines in the [contributing guide](CONTRIBUTING.md)
- [x] Your additions are ordered alphabetically
- [x] Your submission has a useful description
- [x] The description does not end with punctuation
- [x] Each table column should be padded with one space on either side
- [x] You have searched the repository for any relevant issues or pull requests
- [x] Any category you are creating has the minimum requirement of 3 items
- [x] All changes have been [squashed][squash-link] into a single commit | https://api.github.com/repos/public-apis/public-apis/pulls/922 | 2019-03-27T05:28:19Z | 2019-10-06T10:53:46Z | 2019-10-06T10:53:46Z | 2019-10-06T10:53:50Z | 1,002 | public-apis/public-apis | 36,040 |
Bybit :: fix empty OHLCV | diff --git a/js/bybit.js b/js/bybit.js
index 814b63e883e2..091dbd64fba8 100644
--- a/js/bybit.js
+++ b/js/bybit.js
@@ -2049,7 +2049,7 @@ module.exports = class bybit extends Exchange {
// }
//
const result = this.safeValue (response, 'result');
- const ohlcvs = this.safeValue (result, 'list');
+ const ohlcvs = this.safeValue (result, 'list', []);
return this.parseOHLCVs (ohlcvs, market, timeframe, since, limit);
}
| - relates to https://github.com/ccxt/ccxt/issues/16097 | https://api.github.com/repos/ccxt/ccxt/pulls/16100 | 2022-12-15T15:31:13Z | 2022-12-15T15:33:23Z | 2022-12-15T15:33:23Z | 2022-12-15T15:33:23Z | 146 | ccxt/ccxt | 12,985 |
fix bedrock client initialisation | diff --git a/llama_index/embeddings/bedrock.py b/llama_index/embeddings/bedrock.py
index 0add636f79a6d..a90352aaacf33 100644
--- a/llama_index/embeddings/bedrock.py
+++ b/llama_index/embeddings/bedrock.py
@@ -201,7 +201,7 @@ def from_credentials(
def _get_embedding(self, payload: str, type: Literal["text", "query"]) -> Embedding:
if self._client is None:
- self.set_credentials(self.model_name)
+ self.set_credentials()
if self._client is None:
raise ValueError("Client not set")
| # Description
The first parameter of the `set_credentials` method is the AWS region. The current call leads to an exception `botocore.exceptions.InvalidRegionError: Provided region_name 'amazon.titan-embed-text-v1' doesn't match a supported format.`
Fixes # (issue)
## Type of Change
Please delete options that are not relevant.
- [ ] Bug fix (non-breaking change which fixes an issue)
# How Has This Been Tested?
Please describe the tests that you ran to verify your changes. Provide instructions so we can reproduce. Please also list any relevant details for your test configuration
- [ ] I stared at the code and made sure it makes sense
# Suggested Checklist:
- [ ] I have performed a self-review of my own code
- [ ] I have commented my code, particularly in hard-to-understand areas
- [ ] I have made corresponding changes to the documentation
- [ ] I have added Google Colab support for the newly added notebooks.
- [ ] My changes generate no new warnings
- [ ] I have added tests that prove my fix is effective or that my feature works
- [ ] New and existing unit tests pass locally with my changes
- [ ] I ran `make format; make lint` to appease the lint gods
| https://api.github.com/repos/run-llama/llama_index/pulls/9671 | 2023-12-22T14:17:00Z | 2023-12-22T15:32:01Z | 2023-12-22T15:32:01Z | 2023-12-22T15:32:01Z | 153 | run-llama/llama_index | 6,541 |
Too many c's | diff --git a/lib/streamlit/__init__.py b/lib/streamlit/__init__.py
index b92f169585ae..d137db76d2b6 100644
--- a/lib/streamlit/__init__.py
+++ b/lib/streamlit/__init__.py
@@ -718,7 +718,7 @@ def stop():
>>> if not name:
>>> st.warning('Please input a name.')
>>> st.stop()
- >>> st.succcess('Thank you for inputting a name.')
+ >>> st.success('Thank you for inputting a name.')
"""
raise StopException()
| Quick typo fix!
---
**Contribution License Agreement**
By submitting this pull request you agree that all contributions to this project are made under the Apache 2.0 license.
| https://api.github.com/repos/streamlit/streamlit/pulls/1842 | 2020-08-11T23:26:54Z | 2020-08-12T04:35:04Z | 2020-08-12T04:35:04Z | 2020-10-01T17:14:46Z | 135 | streamlit/streamlit | 22,250 |
🔧 Update sponsors: add Coherence | diff --git a/README.md b/README.md
index 968ccf7a7472f..874abf8c65e29 100644
--- a/README.md
+++ b/README.md
@@ -53,6 +53,7 @@ The key features are:
<a href="https://reflex.dev" target="_blank" title="Reflex"><img src="https://fastapi.tiangolo.com/img/sponsors/reflex.png"></a>
<a href="https://github.com/scalar/scalar/?utm_source=fastapi&utm_medium=website&utm_campaign=main-badge" target="_blank" title="Scalar: Beautiful Open-Source API References from Swagger/OpenAPI files"><img src="https://fastapi.tiangolo.com/img/sponsors/scalar.svg"></a>
<a href="https://www.propelauth.com/?utm_source=fastapi&utm_campaign=1223&utm_medium=mainbadge" target="_blank" title="Auth, user management and more for your B2B product"><img src="https://fastapi.tiangolo.com/img/sponsors/propelauth.png"></a>
+<a href="https://www.withcoherence.com/?utm_medium=advertising&utm_source=fastapi&utm_campaign=banner%20january%2024" target="_blank" title="Coherence"><img src="https://fastapi.tiangolo.com/img/sponsors/coherence.png"></a>
<a href="https://training.talkpython.fm/fastapi-courses" target="_blank" title="FastAPI video courses on demand from people you trust"><img src="https://fastapi.tiangolo.com/img/sponsors/talkpython-v2.jpg"></a>
<a href="https://testdriven.io/courses/tdd-fastapi/" target="_blank" title="Learn to build high-quality web apps with best practices"><img src="https://fastapi.tiangolo.com/img/sponsors/testdriven.svg"></a>
<a href="https://github.com/deepset-ai/haystack/" target="_blank" title="Build powerful search from composable, open source building blocks"><img src="https://fastapi.tiangolo.com/img/sponsors/haystack-fastapi.svg"></a>
diff --git a/docs/en/data/sponsors.yml b/docs/en/data/sponsors.yml
index 0ce434b5e28c7..fd8518ce33b07 100644
--- a/docs/en/data/sponsors.yml
+++ b/docs/en/data/sponsors.yml
@@ -20,6 +20,9 @@ gold:
- url: https://www.propelauth.com/?utm_source=fastapi&utm_campaign=1223&utm_medium=mainbadge
title: Auth, user management and more for your B2B product
img: https://fastapi.tiangolo.com/img/sponsors/propelauth.png
+ - url: https://www.withcoherence.com/?utm_medium=advertising&utm_source=fastapi&utm_campaign=banner%20january%2024
+ title: Coherence
+ img: https://fastapi.tiangolo.com/img/sponsors/coherence.png
silver:
- url: https://training.talkpython.fm/fastapi-courses
title: FastAPI video courses on demand from people you trust
diff --git a/docs/en/data/sponsors_badge.yml b/docs/en/data/sponsors_badge.yml
index 4078454a8ca40..00cbec7d28355 100644
--- a/docs/en/data/sponsors_badge.yml
+++ b/docs/en/data/sponsors_badge.yml
@@ -23,3 +23,8 @@ logins:
- svixhq
- Alek99
- codacy
+ - zanfaruqui
+ - scalar
+ - bump-sh
+ - andrew-propelauth
+ - svix
diff --git a/docs/en/docs/deployment/cloud.md b/docs/en/docs/deployment/cloud.md
index 29f0ad1f6f7f5..d34fbe2f719b5 100644
--- a/docs/en/docs/deployment/cloud.md
+++ b/docs/en/docs/deployment/cloud.md
@@ -14,3 +14,4 @@ You might want to try their services and follow their guides:
* <a href="https://docs.platform.sh/languages/python.html?utm_source=fastapi-signup&utm_medium=banner&utm_campaign=FastAPI-signup-June-2023" class="external-link" target="_blank">Platform.sh</a>
* <a href="https://docs.porter.run/language-specific-guides/fastapi" class="external-link" target="_blank">Porter</a>
+* <a href="https://docs.withcoherence.com/docs/configuration/frameworks?utm_medium=advertising&utm_source=fastapi&utm_campaign=banner%20january%2024#fast-api-example" class="external-link" target="_blank">Coherence</a>
diff --git a/docs/en/docs/img/sponsors/coherence-banner.png b/docs/en/docs/img/sponsors/coherence-banner.png
new file mode 100644
index 0000000000000..1d495965920e7
Binary files /dev/null and b/docs/en/docs/img/sponsors/coherence-banner.png differ
diff --git a/docs/en/docs/img/sponsors/coherence.png b/docs/en/docs/img/sponsors/coherence.png
new file mode 100644
index 0000000000000..d48c4edc4df96
Binary files /dev/null and b/docs/en/docs/img/sponsors/coherence.png differ
diff --git a/docs/en/overrides/main.html b/docs/en/overrides/main.html
index 476b436767448..eaab6b630792b 100644
--- a/docs/en/overrides/main.html
+++ b/docs/en/overrides/main.html
@@ -64,6 +64,12 @@
<img class="sponsor-image" src="/img/sponsors/propelauth-banner.png" />
</a>
</div>
+ <div class="item">
+ <a title="Coherence" style="display: block; position: relative;" href="https://www.withcoherence.com/?utm_medium=advertising&utm_source=fastapi&utm_campaign=banner%20january%2024" target="_blank">
+ <span class="sponsor-badge">sponsor</span>
+ <img class="sponsor-image" src="/img/sponsors/coherence-banner.png" />
+ </a>
+ </div>
</div>
</div>
{% endblock %}
| 🔧 Update sponsors: add Coherence | https://api.github.com/repos/tiangolo/fastapi/pulls/11066 | 2024-01-31T22:08:34Z | 2024-01-31T22:13:52Z | 2024-01-31T22:13:52Z | 2024-01-31T22:13:53Z | 1,408 | tiangolo/fastapi | 23,608 |
Add docstrings for Clickhouse class methods | diff --git a/.gitignore b/.gitignore
index c24d6e3f564984..aed12c91c6a3b3 100644
--- a/.gitignore
+++ b/.gitignore
@@ -116,6 +116,7 @@ celerybeat.pid
.env
.envrc
.venv*
+venv*
env/
ENV/
env.bak/
diff --git a/libs/community/langchain_community/vectorstores/clickhouse.py b/libs/community/langchain_community/vectorstores/clickhouse.py
index 816ccd2aacab59..bc541084547876 100644
--- a/libs/community/langchain_community/vectorstores/clickhouse.py
+++ b/libs/community/langchain_community/vectorstores/clickhouse.py
@@ -211,12 +211,48 @@ def __init__(
@property
def embeddings(self) -> Embeddings:
+ """Provides access to the embedding mechanism used by the Clickhouse instance.
+
+ This property allows direct access to the embedding function or model being
+ used by the Clickhouse instance to convert text documents into embedding vectors
+ for vector similarity search.
+
+ Returns:
+ The `Embeddings` instance associated with this Clickhouse instance.
+ """
return self.embedding_function
def escape_str(self, value: str) -> str:
+ """Escape special characters in a string for Clickhouse SQL queries.
+
+ This method is used internally to prepare strings for safe insertion
+ into SQL queries by escaping special characters that might otherwise
+ interfere with the query syntax.
+
+ Args:
+ value: The string to be escaped.
+
+ Returns:
+ The escaped string, safe for insertion into SQL queries.
+ """
return "".join(f"{self.BS}{c}" if c in self.must_escape else c for c in value)
def _build_insert_sql(self, transac: Iterable, column_names: Iterable[str]) -> str:
+ """Construct an SQL query for inserting data into the Clickhouse database.
+
+ This method formats and constructs an SQL `INSERT` query string using the
+ provided transaction data and column names. It is utilized internally during
+ the process of batch insertion of documents and their embeddings into the
+ database.
+
+ Args:
+ transac: iterable of tuples, representing a row of data to be inserted.
+ column_names: iterable of strings representing the names of the columns
+ into which data will be inserted.
+
+ Returns:
+ A string containing the constructed SQL `INSERT` query.
+ """
ks = ",".join(column_names)
_data = []
for n in transac:
@@ -231,6 +267,17 @@ def _build_insert_sql(self, transac: Iterable, column_names: Iterable[str]) -> s
return i_str
def _insert(self, transac: Iterable, column_names: Iterable[str]) -> None:
+ """Execute an SQL query to insert data into the Clickhouse database.
+
+ This method performs the actual insertion of data into the database by
+ executing the SQL query constructed by `_build_insert_sql`. It's a critical
+ step in adding new documents and their associated data into the vector store.
+
+ Args:
+ transac:iterable of tuples, representing a row of data to be inserted.
+ column_names: An iterable of strings representing the names of the columns
+ into which data will be inserted.
+ """
_insert_query = self._build_insert_sql(transac, column_names)
self.client.command(_insert_query)
@@ -345,6 +392,21 @@ def __repr__(self) -> str:
def _build_query_sql(
self, q_emb: List[float], topk: int, where_str: Optional[str] = None
) -> str:
+ """Construct an SQL query for performing a similarity search.
+
+ This internal method generates an SQL query for finding the top-k most similar
+ vectors in the database to a given query vector.It allows for optional filtering
+ conditions to be applied via a WHERE clause.
+
+ Args:
+ q_emb: The query vector as a list of floats.
+ topk: The number of top similar items to retrieve.
+ where_str: opt str representing additional WHERE conditions for the query
+ Defaults to None.
+
+ Returns:
+ A string containing the SQL query for the similarity search.
+ """
q_emb_str = ",".join(map(str, q_emb))
if where_str:
where_str = f"PREWHERE {where_str}"
| Thank you for contributing to LangChain!
- [ ] **PR title**: "package: description"
- Where "package" is whichever of langchain, community, core, experimental, etc. is being modified. Use "docs: ..." for purely docs changes, "templates: ..." for template changes, "infra: ..." for CI changes.
- Example: "community: add foobar LLM"
- [ ] **PR message**: ***Delete this entire checklist*** and replace with
- **Description:** a description of the change
- **Issue:** the issue # it fixes, if applicable
- **Dependencies:** any dependencies required for this change
- **Twitter handle:** if your PR gets announced, and you'd like a mention, we'll gladly shout you out!
- [ ] **Add tests and docs**: If you're adding a new integration, please include
1. a test for the integration, preferably unit tests that do not rely on network access,
2. an example notebook showing its use. It lives in `docs/docs/integrations` directory.
- [x] **Lint and test**: Run `make format`, `make lint` and `make test` from the root of the package(s) you've modified. See contribution guidelines for more: https://python.langchain.com/docs/contributing/
Additional guidelines:
- Make sure optional dependencies are imported within a function.
- Please do not add dependencies to pyproject.toml files (even optional ones) unless they are required for unit tests.
- Most PRs should not touch more than one package.
- Changes should be backwards compatible.
- If you are adding something to community, do not re-import it in langchain.
If no one reviews your PR within a few days, please @-mention one of baskaryan, efriis, eyurtsev, hwchase17.
| https://api.github.com/repos/langchain-ai/langchain/pulls/19195 | 2024-03-17T07:04:28Z | 2024-03-19T04:03:13Z | 2024-03-19T04:03:13Z | 2024-03-19T04:03:13Z | 996 | langchain-ai/langchain | 43,307 |
updated: only fetch on metered connection when necessary | diff --git a/common/params.cc b/common/params.cc
index b1fc15e4c5dfbc..b416b801a8db95 100644
--- a/common/params.cc
+++ b/common/params.cc
@@ -161,6 +161,7 @@ std::unordered_map<std::string, uint32_t> keys = {
{"NavPastDestinations", PERSISTENT},
{"NavSettingLeftSide", PERSISTENT},
{"NavSettingTime24h", PERSISTENT},
+ {"NetworkMetered", PERSISTENT},
{"ObdMultiplexingChanged", CLEAR_ON_MANAGER_START | CLEAR_ON_ONROAD_TRANSITION},
{"ObdMultiplexingEnabled", CLEAR_ON_MANAGER_START | CLEAR_ON_ONROAD_TRANSITION},
{"Offroad_BadNvme", CLEAR_ON_MANAGER_START},
@@ -201,6 +202,7 @@ std::unordered_map<std::string, uint32_t> keys = {
{"UpdaterNewReleaseNotes", CLEAR_ON_MANAGER_START},
{"UpdaterState", CLEAR_ON_MANAGER_START},
{"UpdaterTargetBranch", CLEAR_ON_MANAGER_START},
+ {"UpdaterLastFetchTime", PERSISTENT},
{"Version", PERSISTENT},
{"VisionRadarToggle", PERSISTENT},
{"WheeledBody", PERSISTENT},
diff --git a/selfdrive/thermald/thermald.py b/selfdrive/thermald/thermald.py
index 7011ff0a99d7a6..75e091febbbd36 100755
--- a/selfdrive/thermald/thermald.py
+++ b/selfdrive/thermald/thermald.py
@@ -447,6 +447,8 @@ def thermald_thread(end_event, hw_queue) -> None:
except Exception:
cloudlog.exception("failed to save offroad status")
+ params.put_bool_nonblocking("NetworkMetered", (msg.deviceState.networkType != NetworkType.wifi))
+
count += 1
should_start_prev = should_start
diff --git a/selfdrive/updated.py b/selfdrive/updated.py
index a623aaefc83612..8a46a11a78612d 100755
--- a/selfdrive/updated.py
+++ b/selfdrive/updated.py
@@ -35,26 +35,42 @@
DAYS_NO_CONNECTIVITY_MAX = 14 # do not allow to engage after this many days
DAYS_NO_CONNECTIVITY_PROMPT = 10 # send an offroad prompt after this many days
+class UserRequest:
+ NONE = 0
+ CHECK = 1
+ FETCH = 2
+
class WaitTimeHelper:
def __init__(self):
self.ready_event = threading.Event()
- self.only_check_for_update = False
+ self.user_request = UserRequest.NONE
signal.signal(signal.SIGHUP, self.update_now)
signal.signal(signal.SIGUSR1, self.check_now)
def update_now(self, signum: int, frame) -> None:
cloudlog.info("caught SIGHUP, attempting to downloading update")
- self.only_check_for_update = False
+ self.user_request = UserRequest.FETCH
self.ready_event.set()
def check_now(self, signum: int, frame) -> None:
cloudlog.info("caught SIGUSR1, checking for updates")
- self.only_check_for_update = True
+ self.user_request = UserRequest.CHECK
self.ready_event.set()
def sleep(self, t: float) -> None:
self.ready_event.wait(timeout=t)
+def write_time_to_param(params, param) -> None:
+ t = datetime.datetime.utcnow()
+ params.put(param, t.isoformat().encode('utf8'))
+
+def read_time_from_param(params, param) -> Optional[datetime.datetime]:
+ t = params.get(param, encoding='utf8')
+ try:
+ return datetime.datetime.fromisoformat(t)
+ except (TypeError, ValueError):
+ pass
+ return None
def run(cmd: List[str], cwd: Optional[str] = None) -> str:
return subprocess.check_output(cmd, cwd=cwd, stderr=subprocess.STDOUT, encoding='utf8')
@@ -266,14 +282,11 @@ def set_params(self, update_success: bool, failed_count: int, exception: Optiona
last_update = datetime.datetime.utcnow()
if update_success:
- t = last_update.isoformat()
- self.params.put("LastUpdateTime", t.encode('utf8'))
+ write_time_to_param(self.params, "LastUpdateTime")
else:
- try:
- t = self.params.get("LastUpdateTime", encoding='utf8')
- last_update = datetime.datetime.fromisoformat(t)
- except (TypeError, ValueError):
- pass
+ t = read_time_from_param(self.params, "LastUpdateTime")
+ if t is not None:
+ last_update = t
if exception is None:
self.params.remove("LastUpdateException")
@@ -421,10 +434,7 @@ def main() -> None:
updater = Updater()
update_failed_count = 0 # TODO: Load from param?
-
- # no fetch on the first time
wait_helper = WaitTimeHelper()
- wait_helper.only_check_for_update = True
# invalidate old finalized update
set_consistent_flag(False)
@@ -458,10 +468,16 @@ def main() -> None:
updater.check_for_update()
# download update
- if wait_helper.only_check_for_update:
- cloudlog.info("skipping fetch this cycle")
+ last_fetch = read_time_from_param(params, "UpdaterLastFetchTime")
+ timed_out = last_fetch is None or (datetime.datetime.utcnow() - last_fetch > datetime.timedelta(days=3))
+ user_requested_fetch = wait_helper.user_request == UserRequest.FETCH
+ if params.get_bool("NetworkMetered") and not timed_out and not user_requested_fetch:
+ cloudlog.info("skipping fetch, connection metered")
+ elif wait_helper.user_request == UserRequest.CHECK:
+ cloudlog.info("skipping fetch, only checking")
else:
updater.fetch_update()
+ write_time_to_param(params, "UpdaterLastFetchTime")
update_failed_count = 0
except subprocess.CalledProcessError as e:
cloudlog.event(
@@ -485,7 +501,7 @@ def main() -> None:
cloudlog.exception("uncaught updated exception while setting params, shouldn't happen")
# infrequent attempts if we successfully updated recently
- wait_helper.only_check_for_update = False
+ wait_helper.user_request = UserRequest.NONE
wait_helper.sleep(5*60 if update_failed_count > 0 else 1.5*60*60)
| - [x] metered can wait a few days
- [x] same policy for non-metered connections
- [x] download button in UI always fetches | https://api.github.com/repos/commaai/openpilot/pulls/31041 | 2024-01-17T23:03:43Z | 2024-01-18T00:30:08Z | 2024-01-18T00:30:08Z | 2024-01-18T01:04:07Z | 1,469 | commaai/openpilot | 9,215 |
Fix the syntax highlighting in the example | diff --git a/docs/user/advanced.rst b/docs/user/advanced.rst
index 728ffbb684..f0b94b4f80 100644
--- a/docs/user/advanced.rst
+++ b/docs/user/advanced.rst
@@ -395,8 +395,8 @@ To do that, just set files to a list of tuples of ``(form_field_name, file_info)
>>> url = 'https://httpbin.org/post'
>>> multiple_files = [
- ('images', ('foo.png', open('foo.png', 'rb'), 'image/png')),
- ('images', ('bar.png', open('bar.png', 'rb'), 'image/png'))]
+ ... ('images', ('foo.png', open('foo.png', 'rb'), 'image/png')),
+ ... ('images', ('bar.png', open('bar.png', 'rb'), 'image/png'))]
>>> r = requests.post(url, files=multiple_files)
>>> r.text
{
| Previously, dots were not included, which was breaking multiline syntax highlighting in for example in interactive mode. Example:

Here is how it looks after the fix:

| https://api.github.com/repos/psf/requests/pulls/5276 | 2019-11-27T14:09:40Z | 2020-02-18T07:19:39Z | 2020-02-18T07:19:39Z | 2021-08-29T00:07:06Z | 216 | psf/requests | 32,480 |
[extractors/odnoklassniki] Add support for mobile URLs (closes #16081) | diff --git a/youtube_dl/extractor/odnoklassniki.py b/youtube_dl/extractor/odnoklassniki.py
index 5c8b37e18bf..d87d0960fbb 100644
--- a/youtube_dl/extractor/odnoklassniki.py
+++ b/youtube_dl/extractor/odnoklassniki.py
@@ -19,7 +19,7 @@
class OdnoklassnikiIE(InfoExtractor):
- _VALID_URL = r'https?://(?:(?:www|m|mobile)\.)?(?:odnoklassniki|ok)\.ru/(?:video(?:embed)?|web-api/video/moviePlayer|live)/(?P<id>[\d-]+)'
+ _VALID_URL = r'https?://(?:(?:www|m|mobile)\.)?(?:odnoklassniki|ok)\.ru/(?:video(?:embed)?/|web-api/video/moviePlayer/|live/|dk\?.*?st\.mvId=)(?P<id>[\d-]+)'
_TESTS = [{
# metadata in JSON
'url': 'http://ok.ru/video/20079905452',
@@ -101,6 +101,9 @@ class OdnoklassnikiIE(InfoExtractor):
}, {
'url': 'https://www.ok.ru/live/484531969818',
'only_matching': True,
+ }, {
+ 'url': 'https://m.ok.ru/dk?st.cmd=movieLayer&st.discId=863789452017&st.retLoc=friend&st.rtu=%2Fdk%3Fst.cmd%3DfriendMovies%26st.mode%3Down%26st.mrkId%3D%257B%2522uploadedMovieMarker%2522%253A%257B%2522marker%2522%253A%25221519410114503%2522%252C%2522hasMore%2522%253Atrue%257D%252C%2522sharedMovieMarker%2522%253A%257B%2522marker%2522%253Anull%252C%2522hasMore%2522%253Afalse%257D%257D%26st.friendId%3D561722190321%26st.frwd%3Don%26_prevCmd%3DfriendMovies%26tkn%3D7257&st.discType=MOVIE&st.mvId=863789452017&_prevCmd=friendMovies&tkn=3648#lst#',
+ 'only_matching': True,
}]
def _real_extract(self, url):
| ### Before submitting a *pull request* make sure you have:
- [x] At least skimmed through [adding new extractor tutorial](https://github.com/rg3/youtube-dl#adding-support-for-a-new-site) and [youtube-dl coding conventions](https://github.com/rg3/youtube-dl#youtube-dl-coding-conventions) sections
- [x] [Searched](https://github.com/rg3/youtube-dl/search?q=is%3Apr&type=Issues) the bugtracker for similar pull requests
- [x] Checked the code with [flake8](https://pypi.python.org/pypi/flake8)
### In order to be accepted and merged into youtube-dl each piece of code must be in public domain or released under [Unlicense](http://unlicense.org/). Check one of the following options:
- [x] I am the original author of this code and I am willing to release it under [Unlicense](http://unlicense.org/)
- [ ] I am not the original author of this code but it is in public domain or released under [Unlicense](http://unlicense.org/) (provide reliable evidence)
### What is the purpose of your *pull request*?
- [ ] Bug fix
- [x] Improvement
- [ ] New extractor
- [ ] New feature
---
### Description of your *pull request* and other information
Support for another format of m.ok.ru links as described in https://github.com/rg3/youtube-dl/issues/16081 | https://api.github.com/repos/ytdl-org/youtube-dl/pulls/16129 | 2018-04-08T10:49:08Z | 2018-04-08T15:13:00Z | 2018-04-08T15:13:00Z | 2018-04-08T15:13:01Z | 585 | ytdl-org/youtube-dl | 49,979 |
Litellm/fixes | diff --git a/llama_index/llms/litellm.py b/llama_index/llms/litellm.py
index e0325fd27980d..77c2f317b1435 100644
--- a/llama_index/llms/litellm.py
+++ b/llama_index/llms/litellm.py
@@ -29,7 +29,6 @@
acompletion_with_retry,
completion_with_retry,
from_openai_message_dict,
- is_chat_model,
is_function_calling_model,
openai_modelname_to_contextsize,
to_openai_message_dicts,
@@ -61,6 +60,7 @@ def __init__(
max_retries: int = 10,
api_key: Optional[str] = None,
api_type: Optional[str] = None,
+ api_base: Optional[str] = None,
callback_manager: Optional[CallbackManager] = None,
**kwargs: Any,
) -> None:
@@ -78,6 +78,8 @@ def __init__(
additional_kwargs["api_key"] = api_key
if api_type is not None:
additional_kwargs["api_type"] = api_type
+ if api_base is not None:
+ additional_kwargs["api_base"] = api_base
super().__init__(
model=model,
@@ -107,7 +109,7 @@ def metadata(self) -> LLMMetadata:
return LLMMetadata(
context_window=openai_modelname_to_contextsize(self._get_model_name()),
num_output=self.max_tokens or -1,
- is_chat_model=self._is_chat_model,
+ is_chat_model=True,
is_function_calling_model=is_function_calling_model(self._get_model_name()),
model_name=self.model,
)
@@ -132,10 +134,12 @@ def stream_chat(
@llm_completion_callback()
def complete(self, prompt: str, **kwargs: Any) -> CompletionResponse:
+ # litellm assumes all llms are chat llms
if self._is_chat_model:
complete_fn = chat_to_completion_decorator(self._chat)
else:
complete_fn = self._complete
+
return complete_fn(prompt, **kwargs)
@llm_completion_callback()
@@ -148,7 +152,8 @@ def stream_complete(self, prompt: str, **kwargs: Any) -> CompletionResponseGen:
@property
def _is_chat_model(self) -> bool:
- return is_chat_model(self._get_model_name())
+ # litellm assumes all llms are chat llms
+ return True
@property
def _model_kwargs(self) -> Dict[str, Any]:
@@ -174,6 +179,11 @@ def _chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse:
message_dicts = to_openai_message_dicts(messages)
all_kwargs = self._get_all_kwargs(**kwargs)
+ if "max_tokens" in all_kwargs and all_kwargs["max_tokens"] is None:
+ all_kwargs.pop(
+ "max_tokens"
+ ) # don't send max_tokens == None, this throws errors for Non OpenAI providers
+
response = completion_with_retry(
is_chat_model=self._is_chat_model,
max_retries=self.max_retries,
@@ -198,6 +208,10 @@ def _stream_chat(
message_dicts = to_openai_message_dicts(messages)
all_kwargs = self._get_all_kwargs(**kwargs)
+ if "max_tokens" in all_kwargs and all_kwargs["max_tokens"] is None:
+ all_kwargs.pop(
+ "max_tokens"
+ ) # don't send max_tokens == None, this throws errors for Non OpenAI providers
def gen() -> ChatResponseGen:
content = ""
@@ -243,58 +257,10 @@ def gen() -> ChatResponseGen:
return gen()
def _complete(self, prompt: str, **kwargs: Any) -> CompletionResponse:
- if self._is_chat_model:
- raise ValueError("This model is a chat model.")
-
- all_kwargs = self._get_all_kwargs(**kwargs)
- if self.max_tokens is None:
- # NOTE: non-chat completion endpoint requires max_tokens to be set
- max_tokens = self._get_max_token_for_prompt(prompt)
- all_kwargs["max_tokens"] = max_tokens
-
- response = completion_with_retry(
- is_chat_model=self._is_chat_model,
- max_retries=self.max_retries,
- prompt=prompt,
- stream=False,
- **all_kwargs,
- )
- text = response["choices"][0]["text"]
- return CompletionResponse(
- text=text,
- raw=response,
- additional_kwargs=self._get_response_token_counts(response),
- )
+ raise NotImplementedError("litellm assumes all llms are chat llms.")
def _stream_complete(self, prompt: str, **kwargs: Any) -> CompletionResponseGen:
- if self._is_chat_model:
- raise ValueError("This model is a chat model.")
-
- all_kwargs = self._get_all_kwargs(**kwargs)
- if self.max_tokens is None:
- # NOTE: non-chat completion endpoint requires max_tokens to be set
- max_tokens = self._get_max_token_for_prompt(prompt)
- all_kwargs["max_tokens"] = max_tokens
-
- def gen() -> CompletionResponseGen:
- text = ""
- for response in completion_with_retry(
- is_chat_model=self._is_chat_model,
- max_retries=self.max_retries,
- prompt=prompt,
- stream=True,
- **all_kwargs,
- ):
- delta = response["choices"][0]["text"]
- text += delta
- yield CompletionResponse(
- delta=delta,
- text=text,
- raw=response,
- additional_kwargs=self._get_response_token_counts(response),
- )
-
- return gen()
+ raise NotImplementedError("litellm assumes all llms are chat llms.")
def _get_max_token_for_prompt(self, prompt: str) -> int:
try:
@@ -304,7 +270,12 @@ def _get_max_token_for_prompt(self, prompt: str) -> int:
"Please install tiktoken to use the max_tokens=None feature."
)
context_window = self.metadata.context_window
- encoding = tiktoken.encoding_for_model(self._get_model_name())
+ try:
+ encoding = tiktoken.encoding_for_model(self._get_model_name())
+ except KeyError:
+ encoding = encoding = tiktoken.get_encoding(
+ "cl100k_base"
+ ) # default to using cl10k_base
tokens = encoding.encode(prompt)
max_token = context_window - len(tokens)
if max_token <= 0:
@@ -452,57 +423,9 @@ async def gen() -> ChatResponseAsyncGen:
return gen()
async def _acomplete(self, prompt: str, **kwargs: Any) -> CompletionResponse:
- if self._is_chat_model:
- raise ValueError("This model is a chat model.")
-
- all_kwargs = self._get_all_kwargs(**kwargs)
- if self.max_tokens is None:
- # NOTE: non-chat completion endpoint requires max_tokens to be set
- max_tokens = self._get_max_token_for_prompt(prompt)
- all_kwargs["max_tokens"] = max_tokens
-
- response = await acompletion_with_retry(
- is_chat_model=self._is_chat_model,
- max_retries=self.max_retries,
- prompt=prompt,
- stream=False,
- **all_kwargs,
- )
- text = response["choices"][0]["text"]
- return CompletionResponse(
- text=text,
- raw=response,
- additional_kwargs=self._get_response_token_counts(response),
- )
+ raise NotImplementedError("litellm assumes all llms are chat llms.")
async def _astream_complete(
self, prompt: str, **kwargs: Any
) -> CompletionResponseAsyncGen:
- if self._is_chat_model:
- raise ValueError("This model is a chat model.")
-
- all_kwargs = self._get_all_kwargs(**kwargs)
- if self.max_tokens is None:
- # NOTE: non-chat completion endpoint requires max_tokens to be set
- max_tokens = self._get_max_token_for_prompt(prompt)
- all_kwargs["max_tokens"] = max_tokens
-
- async def gen() -> CompletionResponseAsyncGen:
- text = ""
- async for response in await acompletion_with_retry(
- is_chat_model=self._is_chat_model,
- max_retries=self.max_retries,
- prompt=prompt,
- stream=True,
- **all_kwargs,
- ):
- delta = response["choices"][0]["text"]
- text += delta
- yield CompletionResponse(
- delta=delta,
- text=text,
- raw=response,
- additional_kwargs=self._get_response_token_counts(response),
- )
-
- return gen()
+ raise NotImplementedError("litellm assumes all llms are chat llms.")
diff --git a/tests/llms/test_litellm.py b/tests/llms/test_litellm.py
index 5e044f0221049..3d6643f18d50b 100644
--- a/tests/llms/test_litellm.py
+++ b/tests/llms/test_litellm.py
@@ -148,3 +148,38 @@ def test_chat_model_basic(monkeypatch: MonkeyPatch) -> None:
def test_metadata() -> None:
llm = LiteLLM(model="gpt-3.5-turbo")
assert isinstance(llm.metadata.context_window, int)
+
+
+@pytest.mark.skipif(litellm is None, reason="litellm not installed")
+def test_deep_infra() -> None:
+ # deep infra call
+ llm = LiteLLM(
+ model="deepinfra/meta-llama/Llama-2-70b-chat-hf", max_tokens=10, api_key=""
+ )
+ message = ChatMessage(role="user", content="why does LiteLLM love LlamaIndex")
+ chat_response = llm.chat([message])
+ print("\ndeepinfra Chat response\n")
+ print(chat_response)
+
+
+@pytest.mark.skipif(litellm is None, reason="litellm not installed")
+def test_openai() -> None:
+ llm = LiteLLM(model="gpt-3.5-turbo", api_key="")
+ message = ChatMessage(role="user", content="why does LiteLLM love LlamaIndex")
+ chat_response = llm.chat([message])
+ print("gpt-3.5-turbo Chat response\n")
+ print(chat_response)
+
+
+@pytest.mark.skipif(litellm is None, reason="litellm not installed")
+def test_tg_ai() -> None:
+ # deep infra call
+ llm = LiteLLM(
+ model="together_ai/togethercomputer/Llama-2-7B-32K-Instruct",
+ max_tokens=10,
+ api_key="",
+ )
+ message = ChatMessage(role="user", content="why does LiteLLM love LlamaIndex")
+ chat_response = llm.chat([message])
+ print("\ntogetherai Chat response\n")
+ print(chat_response)
| # Description
Fixes LiteLLM Huggingface, Deep Infra integration
Fixes # (issue)
## Type of Change
Please delete options that are not relevant.
- [x] Bug fix (non-breaking change which fixes an issue)
- [ ] New feature (non-breaking change which adds functionality)
- [ ] Breaking change (fix or feature that would cause existing functionality to not work as expected)
- [ ] This change requires a documentation update
# How Has This Been Tested?
Please describe the tests that you ran to verify your changes. Provide instructions so we can reproduce. Please also list any relevant details for your test configuration
- [ ] Added new unit/integration tests
- [ ] Added new notebook (that tests end-to-end)
- [ ] I stared at the code and made sure it makes sense
# Suggested Checklist:
- [ ] I have performed a self-review of my own code
- [ ] I have commented my code, particularly in hard-to-understand areas
- [ ] I have made corresponding changes to the documentation
- [ ] My changes generate no new warnings
- [ ] I have added tests that prove my fix is effective or that my feature works
- [ ] New and existing unit tests pass locally with my changes
- [ ] I ran `make format; make lint` to appease the lint gods
| https://api.github.com/repos/run-llama/llama_index/pulls/7885 | 2023-09-28T21:08:24Z | 2023-10-22T04:30:52Z | 2023-10-22T04:30:52Z | 2023-10-22T04:30:52Z | 2,505 | run-llama/llama_index | 6,907 |
add answers to aws-cloud-practitioner.md | diff --git a/certificates/aws-cloud-practitioner.md b/certificates/aws-cloud-practitioner.md
index 6a77583cd..e7a0b1b21 100644
--- a/certificates/aws-cloud-practitioner.md
+++ b/certificates/aws-cloud-practitioner.md
@@ -400,8 +400,8 @@ Learn more [here](https://aws.amazon.com/snowmobile)
<details>
<summary>What is IAM? What are some of its features?</summary><br><b>
+IAM stands for Identity and Access Management, and is used for managing users, groups, access policies & roles
Full explanation is [here](https://aws.amazon.com/iam)
-In short: it's used for managing users, groups, access policies & roles
</b></details>
<details>
@@ -570,7 +570,7 @@ Read more about it [here](https://aws.amazon.com/sns)
<details>
<summary>What is the shared responsibility model? What AWS is responsible for and what the user is responsible for based on the shared responsibility model?</summary><br><b>
-The shared responsibility model defines what the customer is responsible for and what AWS is responsible for.
+The shared responsibility model defines what the customer is responsible for and what AWS is responsible for. For example, AWS is responsible for security "of" the cloud, while the customer is responsible for security "in" the cloud.
More on the shared responsibility model [here](https://aws.amazon.com/compliance/shared-responsibility-model)
</b></details>
@@ -611,6 +611,8 @@ Learn more [here](https://aws.amazon.com/inspector)
<details>
<summary>What is AWS Guarduty?</summary><br><b>
+
+Guarduty is a threat detection service that monitors your AWS accounts to help detect and mitigate malicious activity
</b></details>
<details>
@@ -621,6 +623,8 @@ AWS definition: "AWS Shield is a managed Distributed Denial of Service (DDoS) pr
<details>
<summary>What is AWS WAF? Give an example of how it can used and describe what resources or services you can use it with</summary><br><b>
+
+An AWS Web Application Firewall (WAF) can filter out unwanted web traffic (bots), and protect against attacks like SQL injection and cross-site scripting. One service you could use it with would be Amazon CloudFront, a CDN service, to block attacks before they reach your origin servers
</b></details>
<details>
@@ -697,6 +701,11 @@ Learn more [here](https://aws.amazon.com/certificate-manager)
<details>
<summary>What is AWS RDS?</summary><br><b>
+
+Amazon Relational Database Service (RDS) is a service for setting up and managing resizable, cost-efficient relational databases
+resource
+
+Learn more [here](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Welcome.html)
</b></details>
<details>
@@ -730,7 +739,7 @@ Learn more [here](https://aws.amazon.com/dynamodb/dax)
<details>
<summary>What is AWS Redshift and how is it different than RDS?</summary><br><b>
-cloud data warehouse
+AWS Redshift is a cloud data warehousing service that is geared towards handling massive amounts of data (think petabytes) and being able to execute complex queries. In contrast, Amazon RDS is best suited for things like web applications requiring simple queries with more frequent transactions, and on a smaller scale.
</b></details>
<details>
@@ -815,7 +824,7 @@ CloudFormation
<details>
<summary>Which service would you use for building a website or web application?</summary><br><b>
-Lightsail
+Lightsail or Elastic Beanstalk
</b></details>
<details>
| Added some answers to the aws-cloud-practitioner.md.
I answered a few unanswered questions, and I also made a few modifications to existing answers.
Namely,
a) The answer to the question "What is IAM?" was modified to include what the acronym stands for (Identity and Access Management)
b) The answer to the question "what is AWS Redshift and how is it different than RDS" only had a partial answer, and didn't answer how it was different than RDS
c) The answer to the question "which service would you use for building a website or web application" only listed Lightsail, while Elastic Beanstalk is a common alternative depending on project requirements.
d) The answer to the Shared Responsibility Model was made more specific by including the associated key phrase | https://api.github.com/repos/bregman-arie/devops-exercises/pulls/396 | 2023-06-12T14:38:26Z | 2024-02-02T13:19:39Z | 2024-02-02T13:19:38Z | 2024-02-02T13:19:39Z | 838 | bregman-arie/devops-exercises | 17,669 |
[Windows|Unix] Avoid to re-execute challenges already validated | diff --git a/CHANGELOG.md b/CHANGELOG.md
index 72482035667..07d00e3face 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -6,7 +6,8 @@ Certbot adheres to [Semantic Versioning](https://semver.org/).
### Added
-*
+* Avoid to process again challenges that are already validated
+ when a certificate is issued.
### Changed
@@ -22,6 +23,7 @@ all Certbot components during releases for the time being, however, the only
package with changes other than its version number was:
* acme
+* certbot
More details about these changes can be found on our GitHub repo.
diff --git a/certbot-nginx/tests/boulder-integration.conf.sh b/certbot-nginx/tests/boulder-integration.conf.sh
index 4374f9094d6..470eab28e26 100755
--- a/certbot-nginx/tests/boulder-integration.conf.sh
+++ b/certbot-nginx/tests/boulder-integration.conf.sh
@@ -1,3 +1,4 @@
+#!/usr/bin/env bash
# Based on
# https://www.exratione.com/2014/03/running-nginx-as-a-non-root-user/
# https://github.com/exratione/non-root-nginx/blob/9a77f62e5d5cb9c9026fd62eece76b9514011019/nginx.conf
@@ -52,7 +53,7 @@ http {
listen 5002 $default_server;
# IPv6.
listen [::]:5002 $default_server;
- server_name nginx.wtf nginx2.wtf;
+ server_name nginx.wtf nginx-tls.wtf nginx2.wtf;
root $root/webroot;
diff --git a/certbot-nginx/tests/boulder-integration.sh b/certbot-nginx/tests/boulder-integration.sh
index 2a24e645fba..03425734d03 100755
--- a/certbot-nginx/tests/boulder-integration.sh
+++ b/certbot-nginx/tests/boulder-integration.sh
@@ -39,8 +39,8 @@ nginx -v
reload_nginx
certbot_test_nginx --domains nginx.wtf run
test_deployment_and_rollback nginx.wtf
-certbot_test_nginx --domains nginx.wtf run --preferred-challenges tls-sni
-test_deployment_and_rollback nginx.wtf
+certbot_test_nginx --domains nginx-tls.wtf run --preferred-challenges tls-sni
+test_deployment_and_rollback nginx-tls.wtf
certbot_test_nginx --domains nginx2.wtf --preferred-challenges http
test_deployment_and_rollback nginx2.wtf
# Overlapping location block and server-block-level return 301
diff --git a/certbot/auth_handler.py b/certbot/auth_handler.py
index efee4914363..3dfaaf26f4e 100644
--- a/certbot/auth_handler.py
+++ b/certbot/auth_handler.py
@@ -31,7 +31,7 @@ class AuthHandler(object):
:class:`~acme.challenges.Challenge` types
:type auth: :class:`certbot.interfaces.IAuthenticator`
- :ivar acme.client.BackwardsCompatibleClientV2 acme: ACME client API.
+ :ivar acme.client.BackwardsCompatibleClientV2 acme_client: ACME client API.
:ivar account: Client's Account
:type account: :class:`certbot.account.Account`
@@ -40,9 +40,9 @@ class AuthHandler(object):
type strings with the most preferred challenge listed first
"""
- def __init__(self, auth, acme, account, pref_challs):
+ def __init__(self, auth, acme_client, account, pref_challs):
self.auth = auth
- self.acme = acme
+ self.acme = acme_client
self.account = account
self.pref_challs = pref_challs
@@ -85,19 +85,26 @@ def handle_authorizations(self, orderr, best_effort=False):
self.verify_authzr_complete(aauthzrs)
# Only return valid authorizations
- retVal = [aauthzr.authzr for aauthzr in aauthzrs
- if aauthzr.authzr.body.status == messages.STATUS_VALID]
+ ret_val = [aauthzr.authzr for aauthzr in aauthzrs
+ if aauthzr.authzr.body.status == messages.STATUS_VALID]
- if not retVal:
+ if not ret_val:
raise errors.AuthorizationError(
"Challenges failed for all domains")
- return retVal
+ return ret_val
def _choose_challenges(self, aauthzrs):
- """Retrieve necessary challenges to satisfy server."""
- logger.info("Performing the following challenges:")
- for aauthzr in aauthzrs:
+ """
+ Retrieve necessary and pending challenges to satisfy server.
+ NB: Necessary and already validated challenges are not retrieved,
+ as they can be reused for a certificate issuance.
+ """
+ pending_authzrs = [aauthzr for aauthzr in aauthzrs
+ if aauthzr.authzr.body.status != messages.STATUS_VALID]
+ if pending_authzrs:
+ logger.info("Performing the following challenges:")
+ for aauthzr in pending_authzrs:
aauthzr_challenges = aauthzr.authzr.body.challenges
if self.acme.acme_version == 1:
combinations = aauthzr.authzr.body.combinations
@@ -125,7 +132,7 @@ def _has_challenges(self, aauthzrs):
def _solve_challenges(self, aauthzrs):
"""Get Responses for challenges from authenticators."""
- resp = [] # type: Collection[acme.challenges.ChallengeResponse]
+ resp = [] # type: Collection[challenges.ChallengeResponse]
all_achalls = self._get_all_achalls(aauthzrs)
try:
if all_achalls:
@@ -531,7 +538,7 @@ def _report_failed_challs(failed_achalls):
"""
problems = collections.defaultdict(list)\
- # type: DefaultDict[str, List[achallenges.KeyAuthorizationAnnotatedChallenge]]
+ # type: DefaultDict[str, List[achallenges.KeyAuthorizationAnnotatedChallenge]]
for achall in failed_achalls:
if achall.error:
problems[achall.error.typ].append(achall)
diff --git a/certbot/tests/auth_handler_test.py b/certbot/tests/auth_handler_test.py
index e1319b6146e..fe0ece12e9f 100644
--- a/certbot/tests/auth_handler_test.py
+++ b/certbot/tests/auth_handler_test.py
@@ -57,7 +57,7 @@ def test_unrecognized(self):
errors.Error, self.handler._challenge_factory, authzr, [0])
-class HandleAuthorizationsTest(unittest.TestCase):
+class HandleAuthorizationsTest(unittest.TestCase): # pylint: disable=too-many-public-methods
"""handle_authorizations test.
This tests everything except for all functions under _poll_challenges.
@@ -316,6 +316,24 @@ def test_incomplete_authzr_error(self, mock_verify, mock_poll):
self.assertEqual(
self.mock_auth.cleanup.call_args[0][0][0].typ, "tls-sni-01")
+ def test_validated_challenge_not_rerun(self):
+ # With pending challenge, we expect the challenge to be tried, and fail.
+ authzr = acme_util.gen_authzr(
+ messages.STATUS_PENDING, "0",
+ [acme_util.HTTP01],
+ [messages.STATUS_PENDING], False)
+ mock_order = mock.MagicMock(authorizations=[authzr])
+ self.assertRaises(
+ errors.AuthorizationError, self.handler.handle_authorizations, mock_order)
+
+ # With validated challenge; we expect the challenge not be tried again, and succeed.
+ authzr = acme_util.gen_authzr(
+ messages.STATUS_VALID, "0",
+ [acme_util.HTTP01],
+ [messages.STATUS_VALID], False)
+ mock_order = mock.MagicMock(authorizations=[authzr])
+ self.handler.handle_authorizations(mock_order)
+
def _validate_all(self, aauthzrs, unused_1, unused_2):
for i, aauthzr in enumerate(aauthzrs):
azr = aauthzr.authzr
| In response to #5342.
Currently, certbot will execute the operations necessary to validate a challenge even if the challenge has already been validated before against the acme ca server. This can occur for instance if a certificate is asked and issue correctly, then deleted locally, then asked again.
It is a corner case, but it will lead to some heavy operations (like updating a DNS zone, or creating an HTTP server) that are not needed.
This PR corrects this behavior by not executing challenges already validated, and use them directly instead to issue the certificate.
Fixes #5342 | https://api.github.com/repos/certbot/certbot/pulls/6551 | 2018-12-03T21:56:59Z | 2019-01-09T20:52:54Z | 2019-01-09T20:52:54Z | 2019-01-09T20:52:55Z | 1,921 | certbot/certbot | 439 |
fix upgrade script (bm25 nits) | diff --git a/docs/examples/retrievers/composable_retrievers.ipynb b/docs/examples/retrievers/composable_retrievers.ipynb
index 065170a5a86f2..78d8515001303 100644
--- a/docs/examples/retrievers/composable_retrievers.ipynb
+++ b/docs/examples/retrievers/composable_retrievers.ipynb
@@ -33,11 +33,12 @@
"outputs": [],
"source": [
"%pip install llama-index-storage-docstore-mongodb\n",
- "%pip install llama-index-readers-file\n",
+ "%pip install llama-index-vector-stores-qdrant\n",
"%pip install llama-index-storage-docstore-firestore\n",
- "%pip install llama-index-storage-docstore-dynamodb\n",
+ "%pip install llama-index-retrievers-bm25\n",
"%pip install llama-index-storage-docstore-redis\n",
- "%pip install llama-index-vector-stores-qdrant"
+ "%pip install llama-index-storage-docstore-dynamodb\n",
+ "%pip install llama-index-readers-file"
]
},
{
@@ -122,7 +123,7 @@
"outputs": [],
"source": [
"from llama_index.core import VectorStoreIndex, StorageContext\n",
- "from llama_index.core.retrievers import BM25Retriever\n",
+ "from llama_index.retrievers.bm25 import BM25Retriever\n",
"from llama_index.vector_stores.qdrant import QdrantVectorStore\n",
"from qdrant_client import QdrantClient\n",
"\n",
@@ -422,7 +423,7 @@
],
"metadata": {
"kernelspec": {
- "display_name": ".venv",
+ "display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
@@ -439,5 +440,5 @@
}
},
"nbformat": 4,
- "nbformat_minor": 2
+ "nbformat_minor": 4
}
diff --git a/llama-index-core/llama_index/core/command_line/mappings.json b/llama-index-core/llama_index/core/command_line/mappings.json
index 147feef03a336..16326b162dbf6 100644
--- a/llama-index-core/llama_index/core/command_line/mappings.json
+++ b/llama-index-core/llama_index/core/command_line/mappings.json
@@ -305,7 +305,6 @@
"RecursiveRetriever": "llama_index.core.retrievers",
"AutoMergingRetriever": "llama_index.core.retrievers",
"RouterRetriever": "llama_index.core.retrievers",
- "BM25Retriever": "llama_index.core.retrievers",
"QueryFusionRetriever": "llama_index.core.retrievers",
"# SQLSQLRetriever": "llama_index.core.retrievers",
"NLSQLRetriever": "llama_index.core.retrievers",
@@ -393,6 +392,7 @@
"set_google_config": "llama_index.vector_stores.google",
"GoogleVectorStore": "llama_index.vector_stores.google",
"MetalVectorStore": "llama_index.vector_stores.metal",
+ "BM25Retriever": "llama_index.retrievers.bm25",
"PathwayRetriever": "llama_index.retrievers.pathway",
"YouRetriever": "llama_index.retrievers.you",
"ZillizCloudPipelineIndex": "llama_index.indices.managed.zilliz",
diff --git a/llama-index-core/llama_index/core/command_line/upgrade.py b/llama-index-core/llama_index/core/command_line/upgrade.py
index 20db64956f3aa..291a53f72d62d 100644
--- a/llama-index-core/llama_index/core/command_line/upgrade.py
+++ b/llama-index-core/llama_index/core/command_line/upgrade.py
@@ -48,12 +48,16 @@ def _parse_from_imports(
new_imports[new_import_parent].append(module)
else:
print(f"Module not found: {module}\nSwitching to core")
+ # get back the llama_index module that's being imported.
new_import_parent = (
- imported_modules[0]
- .split(" import ")[0]
- .split("from ")[-1]
- .replace("llama_index", "llama_index.core")
+ imported_modules[0].split(" import ")[0].split("from ")[-1]
)
+ # if the parent contains `llama_index.core` already, then skip
+ if "llama_index.core" not in new_import_parent:
+ new_import_parent = new_import_parent.replace(
+ "llama_index", "llama_index.core"
+ )
+
if new_import_parent not in new_imports:
new_imports[new_import_parent] = [module]
else:
| the upgrade script isn't idempotent, and this change doesn't really fix all the issues, but it does prevent replacing modules that already have "llama_index.core" with another core (llama_index.core.core) | https://api.github.com/repos/run-llama/llama_index/pulls/10624 | 2024-02-13T04:10:13Z | 2024-02-17T06:13:28Z | 2024-02-17T06:13:28Z | 2024-02-17T17:14:35Z | 1,126 | run-llama/llama_index | 6,131 |
[MRG] Support pd.NA in StringDtype columns for SimpleImputer | diff --git a/doc/whats_new/v1.1.rst b/doc/whats_new/v1.1.rst
index 95367bb35ce10..e5d9bd49dc63f 100644
--- a/doc/whats_new/v1.1.rst
+++ b/doc/whats_new/v1.1.rst
@@ -81,6 +81,9 @@ Changelog
:mod:`sklearn.impute`
.....................
+- |Enhancement| Added support for `pd.NA` in :class:`SimpleImputer`.
+ :pr:`21114` by :user:`Ying Xiong <yxiong>`.
+
- |API| Adds :meth:`get_feature_names_out` to :class:`impute.SimpleImputer`,
:class:`impute.KNNImputer`, :class:`impute.IterativeImputer`, and
:class:`impute.MissingIndicator`. :pr:`21078` by `Thomas Fan`_.
diff --git a/sklearn/impute/_base.py b/sklearn/impute/_base.py
index c97a8d24d4578..a4e02615975f0 100644
--- a/sklearn/impute/_base.py
+++ b/sklearn/impute/_base.py
@@ -17,10 +17,14 @@
from ..utils.validation import FLOAT_DTYPES
from ..utils.validation import _check_feature_names_in
from ..utils._mask import _get_mask
+from ..utils import _is_pandas_na
from ..utils import is_scalar_nan
def _check_inputs_dtype(X, missing_values):
+ if _is_pandas_na(missing_values):
+ # Allow using `pd.NA` as missing values to impute numerical arrays.
+ return
if X.dtype.kind in ("f", "i", "u") and not isinstance(missing_values, numbers.Real):
raise ValueError(
"'X' and 'missing_values' types are expected to be"
@@ -136,11 +140,11 @@ class SimpleImputer(_BaseImputer):
Parameters
----------
- missing_values : int, float, str, np.nan or None, default=np.nan
+ missing_values : int, float, str, np.nan, None or pandas.NA, default=np.nan
The placeholder for the missing values. All occurrences of
`missing_values` will be imputed. For pandas' dataframes with
nullable integer dtypes with missing values, `missing_values`
- should be set to `np.nan`, since `pd.NA` will be converted to `np.nan`.
+ can be set to either `np.nan` or `pd.NA`.
strategy : str, default='mean'
The imputation strategy.
@@ -269,10 +273,10 @@ def _validate_input(self, X, in_fit):
else:
dtype = FLOAT_DTYPES
- if not is_scalar_nan(self.missing_values):
- force_all_finite = True
- else:
+ if _is_pandas_na(self.missing_values) or is_scalar_nan(self.missing_values):
force_all_finite = "allow-nan"
+ else:
+ force_all_finite = True
try:
X = self._validate_data(
@@ -604,6 +608,13 @@ def inverse_transform(self, X):
X_original[full_mask] = self.missing_values
return X_original
+ def _more_tags(self):
+ return {
+ "allow_nan": (
+ _is_pandas_na(self.missing_values) or is_scalar_nan(self.missing_values)
+ )
+ }
+
def get_feature_names_out(self, input_features=None):
"""Get output feature names for transformation.
diff --git a/sklearn/impute/tests/test_impute.py b/sklearn/impute/tests/test_impute.py
index 9a4da4a9230a0..1a009db28ee5b 100644
--- a/sklearn/impute/tests/test_impute.py
+++ b/sklearn/impute/tests/test_impute.py
@@ -28,6 +28,16 @@
from sklearn.impute._base import _most_frequent
+def _assert_array_equal_and_same_dtype(x, y):
+ assert_array_equal(x, y)
+ assert x.dtype == y.dtype
+
+
+def _assert_allclose_and_same_dtype(x, y):
+ assert_allclose(x, y)
+ assert x.dtype == y.dtype
+
+
def _check_statistics(X, X_true, strategy, statistics, missing_values):
"""Utility function for testing imputation for a given strategy.
@@ -1495,6 +1505,66 @@ def test_most_frequent(expected, array, dtype, extra_value, n_repeat):
)
+def test_simple_impute_pd_na():
+ pd = pytest.importorskip("pandas", minversion="1.0")
+
+ # Impute pandas array of string types.
+ df = pd.DataFrame({"feature": pd.Series(["abc", None, "de"], dtype="string")})
+ imputer = SimpleImputer(missing_values=pd.NA, strategy="constant", fill_value="na")
+ _assert_array_equal_and_same_dtype(
+ imputer.fit_transform(df), np.array([["abc"], ["na"], ["de"]], dtype=object)
+ )
+
+ # Impute pandas array of string types without any missing values.
+ df = pd.DataFrame({"feature": pd.Series(["abc", "de", "fgh"], dtype="string")})
+ imputer = SimpleImputer(fill_value="ok", strategy="constant")
+ _assert_array_equal_and_same_dtype(
+ imputer.fit_transform(df), np.array([["abc"], ["de"], ["fgh"]], dtype=object)
+ )
+
+ # Impute pandas array of integer types.
+ df = pd.DataFrame({"feature": pd.Series([1, None, 3], dtype="Int64")})
+ imputer = SimpleImputer(missing_values=pd.NA, strategy="constant", fill_value=-1)
+ _assert_allclose_and_same_dtype(
+ imputer.fit_transform(df), np.array([[1], [-1], [3]], dtype="float64")
+ )
+
+ # Use `np.nan` also works.
+ imputer = SimpleImputer(missing_values=np.nan, strategy="constant", fill_value=-1)
+ _assert_allclose_and_same_dtype(
+ imputer.fit_transform(df), np.array([[1], [-1], [3]], dtype="float64")
+ )
+
+ # Impute pandas array of integer types with 'median' strategy.
+ df = pd.DataFrame({"feature": pd.Series([1, None, 2, 3], dtype="Int64")})
+ imputer = SimpleImputer(missing_values=pd.NA, strategy="median")
+ _assert_allclose_and_same_dtype(
+ imputer.fit_transform(df), np.array([[1], [2], [2], [3]], dtype="float64")
+ )
+
+ # Impute pandas array of integer types with 'mean' strategy.
+ df = pd.DataFrame({"feature": pd.Series([1, None, 2], dtype="Int64")})
+ imputer = SimpleImputer(missing_values=pd.NA, strategy="mean")
+ _assert_allclose_and_same_dtype(
+ imputer.fit_transform(df), np.array([[1], [1.5], [2]], dtype="float64")
+ )
+
+ # Impute pandas array of float types.
+ df = pd.DataFrame({"feature": pd.Series([1.0, None, 3.0], dtype="float64")})
+ imputer = SimpleImputer(missing_values=pd.NA, strategy="constant", fill_value=-2.0)
+ _assert_allclose_and_same_dtype(
+ imputer.fit_transform(df), np.array([[1.0], [-2.0], [3.0]], dtype="float64")
+ )
+
+ # Impute pandas array of float types with 'median' strategy.
+ df = pd.DataFrame({"feature": pd.Series([1.0, None, 2.0, 3.0], dtype="float64")})
+ imputer = SimpleImputer(missing_values=pd.NA, strategy="median")
+ _assert_allclose_and_same_dtype(
+ imputer.fit_transform(df),
+ np.array([[1.0], [2.0], [2.0], [3.0]], dtype="float64"),
+ )
+
+
def test_missing_indicator_feature_names_out():
"""Check that missing indicator return the feature names with a prefix."""
pd = pytest.importorskip("pandas")
diff --git a/sklearn/utils/__init__.py b/sklearn/utils/__init__.py
index 8290318d35deb..6c5b4e5830691 100644
--- a/sklearn/utils/__init__.py
+++ b/sklearn/utils/__init__.py
@@ -15,6 +15,7 @@
import struct
import timeit
from pathlib import Path
+from contextlib import suppress
import warnings
import numpy as np
@@ -986,6 +987,30 @@ def get_chunk_n_rows(row_bytes, *, max_n_rows=None, working_memory=None):
return chunk_n_rows
+def _is_pandas_na(x):
+ """Test if x is pandas.NA.
+
+ We intentionally do not use this function to return `True` for `pd.NA` in
+ `is_scalar_nan`, because estimators that support `pd.NA` are the exception
+ rather than the rule at the moment. When `pd.NA` is more universally
+ supported, we may reconsider this decision.
+
+ Parameters
+ ----------
+ x : any type
+
+ Returns
+ -------
+ boolean
+ """
+ with suppress(ImportError):
+ from pandas import NA
+
+ return x is NA
+
+ return False
+
+
def is_scalar_nan(x):
"""Tests if x is NaN.
diff --git a/sklearn/utils/_mask.py b/sklearn/utils/_mask.py
index 699a2c1cc1725..d57cf839d962f 100644
--- a/sklearn/utils/_mask.py
+++ b/sklearn/utils/_mask.py
@@ -1,11 +1,20 @@
import numpy as np
from scipy import sparse as sp
+from contextlib import suppress
from . import is_scalar_nan
from .fixes import _object_dtype_isnan
def _get_dense_mask(X, value_to_mask):
+ with suppress(ImportError, AttributeError):
+ # We also suppress `AttributeError` because older versions of pandas do
+ # not have `NA`.
+ import pandas
+
+ if value_to_mask is pandas.NA:
+ return pandas.isna(X)
+
if is_scalar_nan(value_to_mask):
if X.dtype.kind == "f":
Xt = np.isnan(X)
| <!--
Thanks for contributing a pull request! Please ensure you have taken a look at
the contribution guidelines: https://github.com/scikit-learn/scikit-learn/blob/main/CONTRIBUTING.md
-->
#### Reference Issues/PRs
<!--
Example: Fixes #1234. See also #3456.
Please use keywords (e.g., Fixes) to create link to the issues or pull requests
you resolved, so that they will automatically be closed when your pull request
is merged. See https://github.com/blog/1506-closing-issues-via-pull-requests
-->
Fixes #21112 .
#### What does this implement/fix? Explain your changes.
This is a starting point for discussing potential fixes for #21112 , containing two parts:
1. Make `sklearn.utils.is_scalar_nan(x)` return true when `x` is `pd.NA`. This is necessary for `imputer._validate_input` to successfully validate `pd.StringDtype` data with `pd.NA`.
2. Support `pd.NA` in `sklearn.utils._mask._get_dense_mask`.
With these changes, the code snippet in #21112 will run successfully and imputes `pd.NA` to empty strings.
#### Any other comments?
I am new in contributing to sklearn and unfamiliar with the custom and norm (e.g. what's the proper way to import pandas). This PR is just a proof-of-concept to initiate some discussion. If the direction looks promising, I can update the code to adhere to package's convention, add documentation and unit tests, etc. Please kindly advice. Thanks!
<!--
Please be aware that we are a loose team of volunteers so patience is
necessary; assistance handling other issues is very welcome. We value
all user contributions, no matter how minor they are. If we are slow to
review, either the pull request needs some benchmarking, tinkering,
convincing, etc. or more likely the reviewers are simply busy. In either
case, we ask for your understanding during the review process.
For more information, see our FAQ on this topic:
http://scikit-learn.org/dev/faq.html#why-is-my-pull-request-not-getting-any-attention.
Thanks for contributing!
-->
| https://api.github.com/repos/scikit-learn/scikit-learn/pulls/21114 | 2021-09-23T05:20:58Z | 2021-11-05T11:13:22Z | 2021-11-05T11:13:22Z | 2021-11-05T11:13:22Z | 2,425 | scikit-learn/scikit-learn | 46,179 |
Fix OVO Energy Sensors | diff --git a/homeassistant/components/ovo_energy/__init__.py b/homeassistant/components/ovo_energy/__init__.py
index 3aff51fa044ee3..e98e81ba1c2f02 100644
--- a/homeassistant/components/ovo_energy/__init__.py
+++ b/homeassistant/components/ovo_energy/__init__.py
@@ -40,7 +40,14 @@ async def async_update_data() -> OVODailyUsage:
"""Fetch data from OVO Energy."""
now = datetime.utcnow()
async with async_timeout.timeout(10):
- return await client.get_daily_usage(now.strftime("%Y-%m"))
+ try:
+ await client.authenticate(
+ entry.data[CONF_USERNAME], entry.data[CONF_PASSWORD]
+ )
+ return await client.get_daily_usage(now.strftime("%Y-%m"))
+ except aiohttp.ClientError as exception:
+ _LOGGER.warning(exception)
+ return None
coordinator = DataUpdateCoordinator(
hass,
diff --git a/homeassistant/components/ovo_energy/sensor.py b/homeassistant/components/ovo_energy/sensor.py
index 4b9e2e7080682d..a0781836d6c859 100644
--- a/homeassistant/components/ovo_energy/sensor.py
+++ b/homeassistant/components/ovo_energy/sensor.py
@@ -29,15 +29,29 @@ async def async_setup_entry(
entities = []
- if coordinator.data.electricity:
- currency = coordinator.data.electricity[
- len(coordinator.data.electricity) - 1
- ].cost.currency_unit
- entities.append(OVOEnergyLastElectricityReading(coordinator, client))
- entities.append(OVOEnergyLastElectricityCost(coordinator, client, currency))
- if coordinator.data.gas:
- entities.append(OVOEnergyLastGasReading(coordinator, client))
- entities.append(OVOEnergyLastGasCost(coordinator, client, currency))
+ if coordinator.data:
+ if coordinator.data.electricity:
+ entities.append(OVOEnergyLastElectricityReading(coordinator, client))
+ entities.append(
+ OVOEnergyLastElectricityCost(
+ coordinator,
+ client,
+ coordinator.data.electricity[
+ len(coordinator.data.electricity) - 1
+ ].cost.currency_unit,
+ )
+ )
+ if coordinator.data.gas:
+ entities.append(OVOEnergyLastGasReading(coordinator, client))
+ entities.append(
+ OVOEnergyLastGasCost(
+ coordinator,
+ client,
+ coordinator.data.gas[
+ len(coordinator.data.gas) - 1
+ ].cost.currency_unit,
+ )
+ )
async_add_entities(
entities, True,
| <!--
You are amazing! Thanks for contributing to our project!
Please, DO NOT DELETE ANY TEXT from this template! (unless instructed).
-->
## Proposed change
<!--
Describe the big picture of your changes here to communicate to the
maintainers why we should accept this pull request. If it fixes a bug
or resolves a feature request, be sure to link to that issue in the
additional information section.
-->
- Fixes no electricity currency value
- Fixes update unknown issue #38824
Tested over a few hours. All seems well and updates/does not set an unknown state
## Type of change
<!--
What type of change does your PR introduce to Home Assistant?
NOTE: Please, check only 1! box!
If your PR requires multiple boxes to be checked, you'll most likely need to
split it into multiple PRs. This makes things easier and faster to code review.
-->
- [x] Bugfix (non-breaking change which fixes an issue)
## Additional information
<!--
Details are important, and help maintainers processing your PR.
Please be sure to fill out additional details, if applicable.
-->
- This PR fixes or closes issue: fixes #38824
- This PR is related to issue: #38824
- Link to documentation pull request: N/A
## Checklist
<!--
Put an `x` in the boxes that apply. You can also fill these out after
creating the PR. If you're unsure about any of them, don't hesitate to ask.
We're here to help! This is simply a reminder of what we are going to look
for before merging your code.
-->
- [x] The code change is tested and works locally.
- [x] Local tests pass. **Your PR cannot be merged unless tests pass**
- [x] There is no commented out code in this PR.
- [x] I have followed the [development checklist][dev-checklist]
- [x] The code has been formatted using Black (`black --fast homeassistant tests`)
- [x] Tests have been added to verify that the new code works.
If user exposed functionality or configuration variables are added/changed:
- [x] Documentation added/updated for [www.home-assistant.io][docs-repository]
If the code communicates with devices, web services, or third-party tools:
- [x] The [manifest file][manifest-docs] has all fields filled out correctly.
Updated and included derived files by running: `python3 -m script.hassfest`.
- [x] New or updated dependencies have been added to `requirements_all.txt`.
Updated by running `python3 -m script.gen_requirements_all`.
- [x] Untested files have been added to `.coveragerc`.
The integration reached or maintains the following [Integration Quality Scale][quality-scale]:
<!--
The Integration Quality Scale scores an integration on the code quality
and user experience. Each level of the quality scale consists of a list
of requirements. We highly recommend getting your integration scored!
-->
- [x] 🥈 Silver
<!--
Thank you for contributing <3
Below, some useful links you could explore:
-->
[dev-checklist]: https://developers.home-assistant.io/docs/en/development_checklist.html
[manifest-docs]: https://developers.home-assistant.io/docs/en/creating_integration_manifest.html
[quality-scale]: https://developers.home-assistant.io/docs/en/next/integration_quality_scale_index.html
[docs-repository]: https://github.com/home-assistant/home-assistant.io
| https://api.github.com/repos/home-assistant/core/pulls/38849 | 2020-08-13T16:36:10Z | 2020-08-14T12:06:32Z | 2020-08-14T12:06:32Z | 2020-08-15T05:14:43Z | 599 | home-assistant/core | 38,989 |
Update test.py | diff --git a/test.py b/test.py
index 36d18132c78..1c6ea103489 100644
--- a/test.py
+++ b/test.py
@@ -68,7 +68,7 @@ def test(data,
model.eval()
is_coco = data.endswith('coco.yaml') # is COCO dataset
with open(data) as f:
- data = yaml.load(f, Loader=yaml.FullLoader) # model dict
+ data = yaml.load(f, Loader=yaml.SafeLoader) # model dict
check_dataset(data) # check
nc = 1 if single_cls else int(data['nc']) # number of classes
iouv = torch.linspace(0.5, 0.95, 10).to(device) # iou vector for mAP@0.5:0.95
| Fix for Arbitary Code Execution in Test file
## 🛠️ PR Summary
<sub>Made with ❤️ by [Ultralytics Actions](https://github.com/ultralytics/actions)<sub>
### 🌟 Summary
Enhancing security by using safe YAML loader in testing script.
### 📊 Key Changes
- Switched from `yaml.FullLoader` to `yaml.SafeLoader` in the test script.
### 🎯 Purpose & Impact
- 🛡️ **Increased Security**: `yaml.SafeLoader` is used to load a YAML file in a way that prevents the execution of arbitrary code, which could be an exploit vector if untrusted YAML files were loaded.
- 🚀 **Stability for Users**: This change prioritizes user safety without affecting the functionality of the test script, ensuring that users can run tests on their datasets securely. | https://api.github.com/repos/ultralytics/yolov5/pulls/1969 | 2021-01-18T07:27:35Z | 2021-01-18T18:46:46Z | 2021-01-18T18:46:46Z | 2024-01-19T19:46:18Z | 191 | ultralytics/yolov5 | 25,546 |
Update openai.py | diff --git a/CHANGELOG.md b/CHANGELOG.md
index c826317ed7c23..57f3151b9b54c 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -5,6 +5,7 @@
### Bug Fixes / Nits
- Fix elasticsearch hybrid scoring (#7852)
- Replace `get_color_mapping` and `print_text` Langchain dependency with internal implementation (#7845)
+- Fix async streaming with azure (#7856)
- Avoid `NotImplementedError()` in sub question generator (#7855)
## [0.8.35] - 2023-09-27
diff --git a/llama_index/llms/openai.py b/llama_index/llms/openai.py
index 35e088fbf264b..1545866378458 100644
--- a/llama_index/llms/openai.py
+++ b/llama_index/llms/openai.py
@@ -457,6 +457,9 @@ async def gen() -> ChatResponseAsyncGen:
stream=True,
**all_kwargs,
):
+ if len(response["choices"]) == 0 and response.get("prompt_annotations"):
+ # open ai sends empty response first while streaming ignore it
+ continue
if len(response["choices"]) > 0:
delta = response["choices"][0]["delta"]
else:
@@ -474,7 +477,10 @@ async def gen() -> ChatResponseAsyncGen:
if function_call.get("function_name", "") is None:
del function_call["function_name"]
else:
- function_call["arguments"] += function_call_delta["arguments"]
+ function_call["arguments"] = (
+ function_call.get("arguments", "")
+ + function_call_delta["arguments"]
+ )
additional_kwargs = {}
if function_call is not None:
| Fixed bug in astream_chat
# Description
Azure openai api when used in astream_chat mode with query engine tools returns error :
`query_engine_tools = [
QueryEngineTool(
query_engine=vector_engine,
metadata=ToolMetadata(
name="ask_vector_engine", description="Tool to query vector db to get questions answered from related files , good to fetch detail if specific"
),
),
QueryEngineTool(
query_engine=graph_query_engine,
metadata=ToolMetadata(
name="ask_knowledge_index", description="Tool to query knowledge graph index , use this tool to fetch relations and then use vector engine to detail the answer ,use recursively until statisfied"
),
),
]
agent= OpenAIAgent.from_tools(query_engine_tools, verbose=True,llm=AZURE_LLM)
response = await agent2.astream_chat(
"hi what is the pdf's about ?"
)
response_gen = response.response_gen
async for token in response.async_response_gen():
print(token, end="")
`
The error was
`
WARNING:llama_index.chat_engine.types:Encountered exception writing response to history: 'arguments'
`
Fixes # (issue)
## Type of Change
Please delete options that are not relevant.
- [ ☑] Bug fix (non-breaking change which fixes an issue)
# How Has This Been Tested?
Please describe the tests that you ran to verify your changes. Provide instructions so we can reproduce. Please also list any relevant details for your test configuration
- [ ☑] I stared at the code and made sure it makes sense
# Suggested Checklist:
- [ ☑] I have performed a self-review of my own code
- [ ☑] I have commented my code, particularly in hard-to-understand areas
| https://api.github.com/repos/run-llama/llama_index/pulls/7856 | 2023-09-27T16:35:56Z | 2023-09-27T17:18:25Z | 2023-09-27T17:18:25Z | 2023-10-01T16:25:44Z | 408 | run-llama/llama_index | 6,885 |
[nightly-test] add 4-nodes shuffle-data-loader test | diff --git a/release/nightly_tests/nightly_tests.yaml b/release/nightly_tests/nightly_tests.yaml
index d2be830a56845..56cafc554f948 100644
--- a/release/nightly_tests/nightly_tests.yaml
+++ b/release/nightly_tests/nightly_tests.yaml
@@ -283,6 +283,29 @@
--data-dir s3://core-nightly-test/shuffle-data/
--no-stats
+# Stress test shuffle_data_loader.
+- name: shuffle_data_loader_4_nodes
+ cluster:
+ app_config: shuffle_data_loader/shuffle_data_loader_app_config.yaml
+ compute_template: shuffle_data_loader/shuffle_data_loader_compute_4_nodes.yaml
+
+ run:
+ timeout: 7200
+ prepare: python wait_cluster.py 4 600
+ script: >
+ python shuffle_data_loader/benchmark.py
+ --num-rows 400_000_000
+ --num-files 50
+ --num-row-groups-per-file 5
+ --num-reducers 32 --num-trainers 16
+ --batch-size 250000
+ --cluster
+ --num-trials 1
+ --num-epochs 10
+ --max-concurrent-epochs 2
+ --data-dir s3://core-nightly-test/shuffle-data/
+ --no-stats
+
- name: dask_on_ray_1tb_sort
cluster:
app_config: dask_on_ray/dask_on_ray_app_config.yaml
diff --git a/release/nightly_tests/shuffle_data_loader/benchmark.py b/release/nightly_tests/shuffle_data_loader/benchmark.py
index e20452649fa93..0627053272f7d 100644
--- a/release/nightly_tests/shuffle_data_loader/benchmark.py
+++ b/release/nightly_tests/shuffle_data_loader/benchmark.py
@@ -125,6 +125,11 @@ def run_trials(num_epochs,
"""
print("Using from-memory shuffler.")
all_stats = []
+ pg = ray.util.placement_group(
+ [{
+ "CPU": 0.1
+ } for _ in range(num_trainers)], strategy="SPREAD")
+ ray.get(pg.ready())
if collect_stats:
stats_collector = TrialStatsCollector.remote(
num_epochs, len(filenames), num_reducers, num_trainers)
@@ -137,7 +142,7 @@ def run_trials(num_epochs,
except AttributeError:
# Python 3.6 doesn't support nullcontext().
object_store_stats_collector = contextlib.suppress()
- batch_consumer = BatchConsumer(num_trainers, num_epochs, None,
+ batch_consumer = BatchConsumer(num_trainers, num_epochs, pg,
max_concurrent_epochs, stats_collector)
# Wait until batch consumer actors have been created.
batch_consumer.actors_ready()
diff --git a/release/nightly_tests/shuffle_data_loader/shuffle_data_loader_compute_4_nodes.yaml b/release/nightly_tests/shuffle_data_loader/shuffle_data_loader_compute_4_nodes.yaml
new file mode 100644
index 0000000000000..4c99d3f1d7056
--- /dev/null
+++ b/release/nightly_tests/shuffle_data_loader/shuffle_data_loader_compute_4_nodes.yaml
@@ -0,0 +1,25 @@
+cloud_id: cld_17WvYIBBkdgLwEUNcLeRAE
+region: us-west-2
+
+max_workers: 4
+
+aws:
+ IamInstanceProfile: {"Name": "ray-autoscaler-v1"}
+ BlockDeviceMappings:
+ - DeviceName: /dev/sda1
+ Ebs:
+ VolumeSize: 500
+
+head_node_type:
+ name: head_node
+ instance_type: i3.4xlarge
+ resources: {"object_store_memory": 53687091200}
+
+worker_node_types:
+ - name: worker_node
+ instance_type: m5.4xlarge
+ min_workers: 4
+ max_workers: 4
+ use_spot: false
+ resources:
+ cpu: 16
| add shuffle_data_loader_4_nodes that run shuffle across 4 nodes with 400million rows.
Test plan:
-[x] schedule a run https://beta.anyscale.com/o/anyscale-internal/projects/prj_SVFGM5yBqK6DHCfLtRMryXHM/clusters/ses_Fie17wKrUbaLTRFKuDtQa6ee | https://api.github.com/repos/ray-project/ray/pulls/17155 | 2021-07-16T19:19:32Z | 2021-07-20T00:46:23Z | 2021-07-20T00:46:23Z | 2021-07-20T00:46:23Z | 933 | ray-project/ray | 19,002 |
MNT Replace pytest.warns(None) in test_ridge.py | diff --git a/sklearn/linear_model/tests/test_ridge.py b/sklearn/linear_model/tests/test_ridge.py
index fc20e0b576b46..02e42fc4b6254 100644
--- a/sklearn/linear_model/tests/test_ridge.py
+++ b/sklearn/linear_model/tests/test_ridge.py
@@ -4,6 +4,7 @@
from itertools import product
import pytest
+import warnings
from sklearn.utils import _IS_32BIT
from sklearn.utils._testing import assert_almost_equal
@@ -1384,9 +1385,9 @@ def test_ridge_fit_intercept_sparse(solver):
dense_ridge = Ridge(solver="sparse_cg", tol=1e-12)
sparse_ridge = Ridge(solver=solver, tol=1e-12, positive=positive)
dense_ridge.fit(X, y)
- with pytest.warns(None) as record:
+ with warnings.catch_warnings():
+ warnings.simplefilter("error", UserWarning)
sparse_ridge.fit(X_csr, y)
- assert not [w.message for w in record]
assert np.allclose(dense_ridge.intercept_, sparse_ridge.intercept_)
assert np.allclose(dense_ridge.coef_, sparse_ridge.coef_)
@@ -1413,9 +1414,9 @@ def test_ridge_fit_intercept_sparse_sag():
dense_ridge = Ridge(**params)
sparse_ridge = Ridge(**params)
dense_ridge.fit(X, y)
- with pytest.warns(None) as record:
+ with warnings.catch_warnings():
+ warnings.simplefilter("error", UserWarning)
sparse_ridge.fit(X_csr, y)
- assert not [w.message for w in record]
assert np.allclose(dense_ridge.intercept_, sparse_ridge.intercept_, rtol=1e-4)
assert np.allclose(dense_ridge.coef_, sparse_ridge.coef_, rtol=1e-4)
with pytest.warns(UserWarning, match='"sag" solver requires.*'):
| <!--
Thanks for contributing a pull request! Please ensure you have taken a look at
the contribution guidelines: https://github.com/scikit-learn/scikit-learn/blob/main/CONTRIBUTING.md
-->
#### Reference Issues/PRs
<!--
Example: Fixes #1234. See also #3456.
Please use keywords (e.g., Fixes) to create link to the issues or pull requests
you resolved, so that they will automatically be closed when your pull request
is merged. See https://github.com/blog/1506-closing-issues-via-pull-requests
-->
Partially addresses #22572.
#### What does this implement/fix? Explain your changes.
Replaces `pytest.warns(None)` in `test_ridge.py` with `warnings.catch_warnings()` filters that error on `UserWarning` - based on review of commit history, the tests are intended to catch a previous `UserWarning` for solvers that could not fit intercepts for sparse inputs.
<!--
Please be aware that we are a loose team of volunteers so patience is
necessary; assistance handling other issues is very welcome. We value
all user contributions, no matter how minor they are. If we are slow to
review, either the pull request needs some benchmarking, tinkering,
convincing, etc. or more likely the reviewers are simply busy. In either
case, we ask for your understanding during the review process.
For more information, see our FAQ on this topic:
http://scikit-learn.org/dev/faq.html#why-is-my-pull-request-not-getting-any-attention.
Thanks for contributing!
-->
| https://api.github.com/repos/scikit-learn/scikit-learn/pulls/22917 | 2022-03-22T04:11:23Z | 2022-03-22T12:47:38Z | 2022-03-22T12:47:38Z | 2022-03-22T12:47:38Z | 454 | scikit-learn/scikit-learn | 46,080 |
Add a job to test doc building (for realsies this time) | diff --git a/.github/workflows/build_doc_test.yml b/.github/workflows/build_doc_test.yml
new file mode 100644
index 0000000000000..348cc484a93d0
--- /dev/null
+++ b/.github/workflows/build_doc_test.yml
@@ -0,0 +1,49 @@
+name: Documentation test build
+
+on:
+ pull_request:
+ paths:
+ - "src/**"
+ - "docs/**"
+ - ".github/**"
+
+jobs:
+ build_and_package:
+ runs-on: ubuntu-latest
+ defaults:
+ run:
+ shell: bash -l {0}
+
+ steps:
+ - uses: actions/checkout@v2
+
+ - name: Loading cache.
+ uses: actions/cache@v2
+ id: cache
+ with:
+ path: ~/.cache/pip
+ key: v1-test_build_doc
+ restore-keys: |
+ v1-test_build_doc-${{ hashFiles('setup.py') }}
+ v1-test_build_doc
+
+ - name: Setup environment
+ run: |
+ sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev
+
+ pip install git+https://github.com/huggingface/doc-builder
+ pip install git+https://github.com/huggingface/transformers#egg=transformers[dev]
+
+ export TORCH_VERSION=$(python -c "from torch import version; print(version.__version__.split('+')[0])")
+ pip install torch-scatter -f https://data.pyg.org/whl/torch-${TORCH_VERSION}+cpu.html
+
+ pip install torchvision
+ python -m pip install 'git+https://github.com/facebookresearch/detectron2.git'
+
+ sudo apt install tesseract-ocr
+ pip install pytesseract
+ pip install pytorch-quantization --extra-index-url https://pypi.ngc.nvidia.com
+
+ - name: Make documentation
+ run: |
+ doc-builder build transformers ./transformers/docs/source
| # What does this PR do?
This PR takes over from #14645 to do it properly this time, meaning:
- we don't rely on secrets anymore which doesn't work for pull-request
- we don't need the secrets as we are not pushing anywhere
- we actually checkout the content after the PR merge, and not just master 🤦
| https://api.github.com/repos/huggingface/transformers/pulls/14662 | 2021-12-07T18:12:45Z | 2021-12-09T12:01:03Z | 2021-12-09T12:01:03Z | 2021-12-09T16:58:31Z | 475 | huggingface/transformers | 12,070 |
rename PaddleOCR to PPOCR | diff --git a/deploy/cpp_infer/include/paddleocr.h b/deploy/cpp_infer/include/paddleocr.h
index 499fbee317..6db9d86cb1 100644
--- a/deploy/cpp_infer/include/paddleocr.h
+++ b/deploy/cpp_infer/include/paddleocr.h
@@ -39,10 +39,10 @@ using namespace paddle_infer;
namespace PaddleOCR {
-class PaddleOCR {
+class PPOCR {
public:
- explicit PaddleOCR();
- ~PaddleOCR();
+ explicit PPOCR();
+ ~PPOCR();
std::vector<std::vector<OCRPredictResult>>
ocr(std::vector<cv::String> cv_all_img_names, bool det = true,
bool rec = true, bool cls = true);
diff --git a/deploy/cpp_infer/include/utility.h b/deploy/cpp_infer/include/utility.h
index 33e995fd02..eb18c06244 100644
--- a/deploy/cpp_infer/include/utility.h
+++ b/deploy/cpp_infer/include/utility.h
@@ -65,6 +65,8 @@ class Utility {
static bool PathExists(const std::string &path);
+ static void CreateDir(const std::string &path);
+
static void print_result(const std::vector<OCRPredictResult> &ocr_result);
};
diff --git a/deploy/cpp_infer/src/main.cpp b/deploy/cpp_infer/src/main.cpp
index 66ac795f59..b6085257e7 100644
--- a/deploy/cpp_infer/src/main.cpp
+++ b/deploy/cpp_infer/src/main.cpp
@@ -69,7 +69,7 @@ int main(int argc, char **argv) {
cv::glob(FLAGS_image_dir, cv_all_img_names);
std::cout << "total images num: " << cv_all_img_names.size() << endl;
- PaddleOCR::PaddleOCR ocr = PaddleOCR::PaddleOCR();
+ PPOCR ocr = PPOCR();
std::vector<std::vector<OCRPredictResult>> ocr_results =
ocr.ocr(cv_all_img_names, FLAGS_det, FLAGS_rec, FLAGS_cls);
diff --git a/deploy/cpp_infer/src/paddleocr.cpp b/deploy/cpp_infer/src/paddleocr.cpp
index 861461a01b..e7b3777e78 100644
--- a/deploy/cpp_infer/src/paddleocr.cpp
+++ b/deploy/cpp_infer/src/paddleocr.cpp
@@ -17,11 +17,9 @@
#include "auto_log/autolog.h"
#include <numeric>
-#include <sys/stat.h>
-
namespace PaddleOCR {
-PaddleOCR::PaddleOCR() {
+PPOCR::PPOCR() {
if (FLAGS_det) {
this->detector_ = new DBDetector(
FLAGS_det_model_dir, FLAGS_use_gpu, FLAGS_gpu_id, FLAGS_gpu_mem,
@@ -45,8 +43,8 @@ PaddleOCR::PaddleOCR() {
}
};
-void PaddleOCR::det(cv::Mat img, std::vector<OCRPredictResult> &ocr_results,
- std::vector<double> ×) {
+void PPOCR::det(cv::Mat img, std::vector<OCRPredictResult> &ocr_results,
+ std::vector<double> ×) {
std::vector<std::vector<std::vector<int>>> boxes;
std::vector<double> det_times;
@@ -63,9 +61,9 @@ void PaddleOCR::det(cv::Mat img, std::vector<OCRPredictResult> &ocr_results,
times[2] += det_times[2];
}
-void PaddleOCR::rec(std::vector<cv::Mat> img_list,
- std::vector<OCRPredictResult> &ocr_results,
- std::vector<double> ×) {
+void PPOCR::rec(std::vector<cv::Mat> img_list,
+ std::vector<OCRPredictResult> &ocr_results,
+ std::vector<double> ×) {
std::vector<std::string> rec_texts(img_list.size(), "");
std::vector<float> rec_text_scores(img_list.size(), 0);
std::vector<double> rec_times;
@@ -80,9 +78,9 @@ void PaddleOCR::rec(std::vector<cv::Mat> img_list,
times[2] += rec_times[2];
}
-void PaddleOCR::cls(std::vector<cv::Mat> img_list,
- std::vector<OCRPredictResult> &ocr_results,
- std::vector<double> ×) {
+void PPOCR::cls(std::vector<cv::Mat> img_list,
+ std::vector<OCRPredictResult> &ocr_results,
+ std::vector<double> ×) {
std::vector<int> cls_labels(img_list.size(), 0);
std::vector<float> cls_scores(img_list.size(), 0);
std::vector<double> cls_times;
@@ -98,8 +96,8 @@ void PaddleOCR::cls(std::vector<cv::Mat> img_list,
}
std::vector<std::vector<OCRPredictResult>>
-PaddleOCR::ocr(std::vector<cv::String> cv_all_img_names, bool det, bool rec,
- bool cls) {
+PPOCR::ocr(std::vector<cv::String> cv_all_img_names, bool det, bool rec,
+ bool cls) {
std::vector<double> time_info_det = {0, 0, 0};
std::vector<double> time_info_rec = {0, 0, 0};
std::vector<double> time_info_cls = {0, 0, 0};
@@ -139,7 +137,7 @@ PaddleOCR::ocr(std::vector<cv::String> cv_all_img_names, bool det, bool rec,
}
} else {
if (!Utility::PathExists(FLAGS_output) && FLAGS_det) {
- mkdir(FLAGS_output.c_str(), 0777);
+ Utility::CreateDir(FLAGS_output);
}
for (int i = 0; i < cv_all_img_names.size(); ++i) {
@@ -188,9 +186,8 @@ PaddleOCR::ocr(std::vector<cv::String> cv_all_img_names, bool det, bool rec,
return ocr_results;
} // namespace PaddleOCR
-void PaddleOCR::log(std::vector<double> &det_times,
- std::vector<double> &rec_times,
- std::vector<double> &cls_times, int img_num) {
+void PPOCR::log(std::vector<double> &det_times, std::vector<double> &rec_times,
+ std::vector<double> &cls_times, int img_num) {
if (det_times[0] + det_times[1] + det_times[2] > 0) {
AutoLogger autolog_det("ocr_det", FLAGS_use_gpu, FLAGS_use_tensorrt,
FLAGS_enable_mkldnn, FLAGS_cpu_threads, 1, "dynamic",
@@ -212,7 +209,7 @@ void PaddleOCR::log(std::vector<double> &det_times,
autolog_cls.report();
}
}
-PaddleOCR::~PaddleOCR() {
+PPOCR::~PPOCR() {
if (this->detector_ != nullptr) {
delete this->detector_;
}
diff --git a/deploy/cpp_infer/src/utility.cpp b/deploy/cpp_infer/src/utility.cpp
index 339e992daa..45b8104626 100644
--- a/deploy/cpp_infer/src/utility.cpp
+++ b/deploy/cpp_infer/src/utility.cpp
@@ -16,10 +16,15 @@
#include <include/utility.h>
#include <iostream>
#include <ostream>
-#include <sys/stat.h>
-#include <sys/types.h>
+
#include <vector>
+#ifdef _WIN32
+#include <direct.h>
+#else
+#include <sys/stat.h>
+#endif
+
namespace PaddleOCR {
std::vector<std::string> Utility::ReadDict(const std::string &path) {
@@ -206,6 +211,14 @@ bool Utility::PathExists(const std::string &path) {
#endif // !_WIN32
}
+void Utility::CreateDir(const std::string &path) {
+#ifdef _WIN32
+ _mkdir(path.c_str());
+#else
+ mkdir(path.c_str(), 0777);
+#endif // !_WIN32
+}
+
void Utility::print_result(const std::vector<OCRPredictResult> &ocr_result) {
for (int i = 0; i < ocr_result.size(); i++) {
std::cout << i << "\t";
| 1. fix mkdir error in win
2. Rename PaddleOCR to PPOCR to avoid name conflict with namespace | https://api.github.com/repos/PaddlePaddle/PaddleOCR/pulls/6044 | 2022-04-22T13:26:52Z | 2022-04-23T03:06:23Z | 2022-04-23T03:06:23Z | 2022-04-23T03:06:23Z | 1,923 | PaddlePaddle/PaddleOCR | 42,380 |
Update/run ci script | diff --git a/.github/workflows/ci_workflow.yml b/.github/workflows/ci_workflow.yml
index 56fb24fe0..06ffc6e9e 100644
--- a/.github/workflows/ci_workflow.yml
+++ b/.github/workflows/ci_workflow.yml
@@ -14,5 +14,5 @@ jobs:
- name: Give executable permissions to run_ci.sh inside the scripts directory
run: chmod a+x scripts/run_ci.sh
- name: Run the ci script inside the scripts folder
- run: sh scripts/run_ci.sh
+ run: bash scripts/run_ci.sh
shell: bash
\ No newline at end of file
diff --git a/scripts/run_ci.sh b/scripts/run_ci.sh
index 69fddfd0f..88c60ffde 100755
--- a/scripts/run_ci.sh
+++ b/scripts/run_ci.sh
@@ -1,15 +1,15 @@
-#!/bin/bash
-# These are the same steps we are running in Travis CI
-
-
-find . -name "*.md" -not -path "./tests/*" | \
- xargs -I {} \
- python $(dirname "$0")/../tests/syntax_lint.py {} > /dev/null
-mdPassed=$?
-flake8 --max-line-length=100 . && echo "PEP8 Passed"
-pyPassed=$?
-if [ $pyPassed -eq 0 ] && [ $mdPassed -eq 0 ];then
- exit 0
-else
- exit 1
-fi
+#!/usr/bin/env bash
+
+set -euo pipefail
+
+PROJECT_DIR="$(dirname $(readlink -f ${BASH_SOURCE[0]}))/.."
+
+MD_FILES=$(find ${PROJECT_DIR} -name "*.md" -not -path "${PROJECT_DIR}/tests/*")
+
+for file in ${MD_FILES[@]}; do
+ python ${PROJECT_DIR}/tests/syntax_lint.py ${file} > /dev/null
+done
+
+echo "- Syntax lint tests on MD files passed sucessfully"
+
+flake8 --max-line-length=100 . && echo "- PEP8 Passed"
\ No newline at end of file
| I'd like to propose an improvement to the `run_ci` script.
- Change shebang for portability.
- Add shell options in the script. If there's an error it will automatically exit. e.g:
```bash
[2023-02-23 23:03:58] {afuscoar@afuscoar} (~/.Personal/Projects/devops-exercises) (update/run_ci-script)$ -> bash scripts/run_ci.sh
/home/afuscoar/.Personal/Projects/devops-exercises/scripts/../README.md failed
Missing opening detail tag round line 4
[2023-02-23 23:03:59] {afuscoar@afuscoar} (~/.Personal/Projects/devops-exercises) (update/run_ci-script)$ -> echo $?
1
```
- Change the test execution for better readability. | https://api.github.com/repos/bregman-arie/devops-exercises/pulls/352 | 2023-02-23T22:08:23Z | 2023-03-25T11:05:56Z | 2023-03-25T11:05:56Z | 2023-03-25T11:05:57Z | 477 | bregman-arie/devops-exercises | 17,589 |
[MRG+1] Make RedirectMiddleware respect Spider.handle_httpstatus_list | diff --git a/docs/topics/downloader-middleware.rst b/docs/topics/downloader-middleware.rst
index a6a2f7d6241..6d986bbf761 100644
--- a/docs/topics/downloader-middleware.rst
+++ b/docs/topics/downloader-middleware.rst
@@ -715,6 +715,15 @@ settings (see the settings documentation for more info):
If :attr:`Request.meta <scrapy.http.Request.meta>` has ``dont_redirect``
key set to True, the request will be ignored by this middleware.
+If you want to handle some redirect status codes in your spider, you can
+specify these in the ``handle_httpstatus_list`` spider attribute.
+
+For example, if you want the redirect middleware to ignore 301 and 302
+responses (and pass them through to your spider) you can do this::
+
+ class MySpider(CrawlSpider):
+ handle_httpstatus_list = [301, 302]
+
RedirectMiddleware settings
~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/scrapy/downloadermiddlewares/redirect.py b/scrapy/downloadermiddlewares/redirect.py
index f439f43ae8c..363e56cb802 100644
--- a/scrapy/downloadermiddlewares/redirect.py
+++ b/scrapy/downloadermiddlewares/redirect.py
@@ -54,7 +54,8 @@ class RedirectMiddleware(BaseRedirectMiddleware):
"""Handle redirection of requests based on response status and meta-refresh html tag"""
def process_response(self, request, response, spider):
- if request.meta.get('dont_redirect', False):
+ if (request.meta.get('dont_redirect', False) or
+ response.status in getattr(spider, 'handle_httpstatus_list', [])):
return response
if request.method == 'HEAD':
diff --git a/tests/test_downloadermiddleware_redirect.py b/tests/test_downloadermiddleware_redirect.py
index 7e88e71af11..9b00caa519b 100644
--- a/tests/test_downloadermiddleware_redirect.py
+++ b/tests/test_downloadermiddleware_redirect.py
@@ -10,9 +10,9 @@
class RedirectMiddlewareTest(unittest.TestCase):
def setUp(self):
- crawler = get_crawler(Spider)
- self.spider = crawler._create_spider('foo')
- self.mw = RedirectMiddleware.from_crawler(crawler)
+ self.crawler = get_crawler(Spider)
+ self.spider = self.crawler._create_spider('foo')
+ self.mw = RedirectMiddleware.from_crawler(self.crawler)
def test_priority_adjust(self):
req = Request('http://a.com')
@@ -129,6 +129,17 @@ def test_redirect_urls(self):
self.assertEqual(req3.url, 'http://scrapytest.org/redirected2')
self.assertEqual(req3.meta['redirect_urls'], ['http://scrapytest.org/first', 'http://scrapytest.org/redirected'])
+ def test_spider_handling(self):
+ smartspider = self.crawler._create_spider('smarty')
+ smartspider.handle_httpstatus_list = [404, 301, 302]
+ url = 'http://www.example.com/301'
+ url2 = 'http://www.example.com/redirected'
+ req = Request(url)
+ rsp = Response(url, headers={'Location': url2}, status=301)
+ r = self.mw.process_response(req, rsp, smartspider)
+ self.assertIs(r, rsp)
+
+
class MetaRefreshMiddlewareTest(unittest.TestCase):
def setUp(self):
| Inspired by #1334
I have decided not to include the `HTTPERROR_ALLOWED_CODES` setting, as, unlike the `handle_httpstatus_list` Spider attribute, the name of that setting implies that it is directly tied to the HttpErrorMiddleware.
| https://api.github.com/repos/scrapy/scrapy/pulls/1364 | 2015-07-16T10:57:47Z | 2015-08-03T02:00:00Z | 2015-08-03T02:00:00Z | 2015-08-21T14:33:42Z | 773 | scrapy/scrapy | 34,778 |
The expected value changed for the proxies keyword | diff --git a/test_requests.py b/test_requests.py
index 13b9d64ced..dbb38064aa 100755
--- a/test_requests.py
+++ b/test_requests.py
@@ -1258,7 +1258,7 @@ class TestRedirects:
'cert': None,
'timeout': None,
'allow_redirects': False,
- 'proxies': None,
+ 'proxies': {},
}
def test_requests_are_updated_each_time(self):
| It used to be None but a recent PR changed that before my last one was merged
Fixes #1975
| https://api.github.com/repos/psf/requests/pulls/1976 | 2014-03-26T13:13:26Z | 2014-03-26T15:34:36Z | 2014-03-26T15:34:36Z | 2021-09-08T23:06:27Z | 108 | psf/requests | 32,783 |
feat(open-pr-comments): queue task in webhook | diff --git a/src/sentry/integrations/github/webhook.py b/src/sentry/integrations/github/webhook.py
index 66ac0fd4050bd..7753c5d2d5abf 100644
--- a/src/sentry/integrations/github/webhook.py
+++ b/src/sentry/integrations/github/webhook.py
@@ -14,7 +14,7 @@
from django.views.decorators.csrf import csrf_exempt
from rest_framework.request import Request
-from sentry import analytics, options
+from sentry import analytics, features, options
from sentry.api.api_publish_status import ApiPublishStatus
from sentry.api.base import Endpoint, all_silo_endpoint
from sentry.constants import EXTENSION_LANGUAGE_MAP, ObjectStatus
@@ -40,6 +40,7 @@
from sentry.services.hybrid_cloud.user.service import user_service
from sentry.shared_integrations.exceptions import ApiError
from sentry.silo import SiloMode
+from sentry.tasks.integrations.github.open_pr_comment import open_pr_comment_workflow
from sentry.utils import json, metrics
from sentry.utils.json import JSONData
@@ -450,6 +451,7 @@ def _handle(
title = pull_request["title"]
body = pull_request["body"]
user = pull_request["user"]
+ action = event["action"]
"""
The value of the merge_commit_sha attribute changes depending on the
@@ -503,7 +505,7 @@ def _handle(
author.preload_users()
try:
- PullRequest.objects.update_or_create(
+ pr, created = PullRequest.objects.update_or_create(
organization_id=organization.id,
repository_id=repo.id,
key=number,
@@ -515,6 +517,22 @@ def _handle(
"merge_commit_sha": merge_commit_sha,
},
)
+
+ if action == "opened" and created:
+ if not features.has("organizations:integrations-open-pr-comment", organization):
+ logger.info(
+ "github.open_pr_comment.flag_missing",
+ extra={"organization_id": organization.id},
+ )
+ return
+
+ metrics.incr("github.open_pr_comment.queue_task")
+ logger.info(
+ "github.open_pr_comment.queue_task",
+ extra={"pr_id": pr.id},
+ )
+ open_pr_comment_workflow.delay(pr_id=pr.id)
+
except IntegrityError:
pass
diff --git a/src/sentry/tasks/integrations/github/open_pr_comment.py b/src/sentry/tasks/integrations/github/open_pr_comment.py
index 500c372b9709d..2ca34c6d4ccc8 100644
--- a/src/sentry/tasks/integrations/github/open_pr_comment.py
+++ b/src/sentry/tasks/integrations/github/open_pr_comment.py
@@ -31,6 +31,7 @@
PullRequestIssue,
create_or_update_comment,
format_comment_url,
+ get_pr_comment,
)
from sentry.templatetags.sentry_helpers import small_count
from sentry.types.referrer_ids import GITHUB_OPEN_PR_BOT_REFERRER
@@ -363,9 +364,11 @@ def open_pr_comment_workflow(pr_id: int) -> None:
issue_list: List[Dict[str, Any]] = list(itertools.chain.from_iterable(top_issues_per_file))
issue_id_list: List[int] = [issue["group_id"] for issue in issue_list]
+ pr_comment = get_pr_comment(pr_id, comment_type=CommentType.OPEN_PR)
+
try:
create_or_update_comment(
- pr_comment=None,
+ pr_comment=pr_comment,
client=client,
repo=repo,
pr_key=pull_request.key,
diff --git a/src/sentry/tasks/integrations/github/pr_comment.py b/src/sentry/tasks/integrations/github/pr_comment.py
index acebcd2e235f6..915c324fede03 100644
--- a/src/sentry/tasks/integrations/github/pr_comment.py
+++ b/src/sentry/tasks/integrations/github/pr_comment.py
@@ -147,6 +147,13 @@ def get_comment_contents(issue_list: List[int]) -> List[PullRequestIssue]:
]
+def get_pr_comment(pr_id: int, comment_type: int) -> PullRequestComment | None:
+ pr_comment_query = PullRequestComment.objects.filter(
+ pull_request__id=pr_id, comment_type=comment_type
+ )
+ return pr_comment_query[0] if pr_comment_query.exists() else None
+
+
def create_or_update_comment(
pr_comment: PullRequestComment | None,
client: GitHubAppsClient,
@@ -213,12 +220,7 @@ def github_comment_workflow(pullrequest_id: int, project_id: int):
logger.info("github.pr_comment.option_missing", extra={"organization_id": org_id})
return
- pr_comment = None
- pr_comment_query = PullRequestComment.objects.filter(
- pull_request__id=pullrequest_id, comment_type=CommentType.MERGED_PR
- )
- if pr_comment_query.exists():
- pr_comment = pr_comment_query[0]
+ pr_comment = get_pr_comment(pr_id=pullrequest_id, comment_type=CommentType.MERGED_PR)
try:
project = Project.objects.get_from_cache(id=project_id)
diff --git a/tests/sentry/integrations/github/test_webhooks.py b/tests/sentry/integrations/github/test_webhooks.py
index ab4299101ae7b..a23651a68704d 100644
--- a/tests/sentry/integrations/github/test_webhooks.py
+++ b/tests/sentry/integrations/github/test_webhooks.py
@@ -26,6 +26,7 @@
from sentry.models.repository import Repository
from sentry.silo import SiloMode
from sentry.testutils.cases import APITestCase
+from sentry.testutils.helpers.features import with_feature
from sentry.testutils.silo import (
all_silo_test,
assume_test_silo_mode,
@@ -524,7 +525,9 @@ def _setup_repo_test(self, project):
assert response.status_code == 204
- def test_opened(self):
+ @with_feature("organizations:integrations-open-pr-comment")
+ @patch("sentry.integrations.github.webhook.metrics")
+ def test_opened(self, mock_metrics):
project = self.project # force creation
group = self.create_group(project=project, short_id=7)
@@ -555,6 +558,24 @@ def test_opened(self):
self.assert_group_link(group, pr)
+ mock_metrics.incr.assert_called_with("github.open_pr_comment.queue_task")
+
+ @patch("sentry.integrations.github.webhook.metrics")
+ def test_opened_missing_feature_flag(self, mock_metrics):
+ project = self.project # force creation
+ self.create_group(project=project, short_id=7)
+
+ Repository.objects.create(
+ organization_id=project.organization.id,
+ external_id="35129377",
+ provider="integrations:github",
+ name="baxterthehacker/public-repo",
+ )
+
+ self._setup_repo_test(project)
+
+ assert mock_metrics.incr.call_count == 0
+
@patch("sentry.integrations.github.webhook.metrics")
def test_creates_missing_repo(self, mock_metrics):
project = self.project # force creation
@@ -715,7 +736,9 @@ def test_edited(self):
self.assert_group_link(group, pr)
- def test_closed(self):
+ @with_feature("organizations:integrations-open-pr-comment")
+ @patch("sentry.integrations.github.webhook.metrics")
+ def test_closed(self, mock_metrics):
project = self.project # force creation
future_expires = datetime.now().replace(microsecond=0) + timedelta(minutes=5)
@@ -760,6 +783,8 @@ def test_closed(self):
assert pr.author.name == "baxterthehacker"
assert pr.merge_commit_sha == "0d1a26e67d8f5eaf1f6ba5c57fc3c7d91ac0fd1c"
+ assert mock_metrics.incr.call_count == 0
+
def assert_group_link(self, group, pr):
link = GroupLink.objects.all().first()
assert link
diff --git a/tests/sentry/tasks/integrations/github/test_open_pr_comment.py b/tests/sentry/tasks/integrations/github/test_open_pr_comment.py
index a3d16bdfe3108..bf6cc9b49e7c9 100644
--- a/tests/sentry/tasks/integrations/github/test_open_pr_comment.py
+++ b/tests/sentry/tasks/integrations/github/test_open_pr_comment.py
@@ -2,6 +2,7 @@
import pytest
import responses
+from django.utils import timezone
from sentry.models.group import Group
from sentry.models.options.organization_option import OrganizationOption
@@ -459,7 +460,7 @@ def test_comment_format(self):
)
-@region_silo_test(stable=True)
+@region_silo_test
class TestOpenPRCommentWorkflow(GithubCommentTestCase):
def setUp(self):
super().setUp()
@@ -520,6 +521,55 @@ def test_comment_workflow(
assert pull_request_comment_query[0].comment_type == CommentType.OPEN_PR
mock_metrics.incr.assert_called_with("github_open_pr_comment.comment_created")
+ @patch("sentry.tasks.integrations.github.open_pr_comment.get_pr_filenames")
+ @patch(
+ "sentry.tasks.integrations.github.open_pr_comment.get_projects_and_filenames_from_source_file"
+ )
+ @patch("sentry.tasks.integrations.github.open_pr_comment.get_top_5_issues_by_count_for_file")
+ @patch("sentry.tasks.integrations.github.open_pr_comment.safe_for_comment", return_value=True)
+ @patch("sentry.tasks.integrations.github.pr_comment.metrics")
+ @responses.activate
+ def test_comment_workflow_comment_exists(
+ self,
+ mock_metrics,
+ mock_safe_for_comment,
+ mock_issues,
+ mock_reverse_codemappings,
+ mock_pr_filenames,
+ ):
+ # two filenames, the second one has a toggle table
+ mock_pr_filenames.return_value = ["foo.py", "bar.py"]
+ mock_reverse_codemappings.return_value = ([self.project], ["foo.py"])
+
+ mock_issues.return_value = self.groups
+
+ now = timezone.now()
+ PullRequestComment.objects.create(
+ external_id=1,
+ pull_request=self.pr,
+ created_at=now,
+ updated_at=now,
+ group_ids=[0, 1],
+ comment_type=CommentType.OPEN_PR,
+ )
+
+ responses.add(
+ responses.PATCH,
+ self.base_url + "/repos/getsentry/sentry/issues/comments/1",
+ json={"id": 1},
+ headers={"X-Ratelimit-Limit": "60", "X-Ratelimit-Remaining": "59"},
+ )
+
+ open_pr_comment_workflow(self.pr.id)
+
+ pull_request_comment_query = PullRequestComment.objects.all()
+ pr_comment = pull_request_comment_query[0]
+ assert len(pull_request_comment_query) == 1
+ assert pr_comment.external_id == 1
+ assert pr_comment.comment_type == CommentType.OPEN_PR
+ assert pr_comment.created_at != pr_comment.updated_at
+ mock_metrics.incr.assert_called_with("github_open_pr_comment.comment_updated")
+
@patch("sentry.tasks.integrations.github.open_pr_comment.get_pr_filenames")
@patch(
"sentry.tasks.integrations.github.open_pr_comment.get_projects_and_filenames_from_source_file"
| Queue the open PR comment workflow task in the Github webhook if a PR has been opened and a `PullRequest` object was newly created.
This PR also includes a small fix to check for an existing comment in the open PR comment workflow.
For ER-1812 | https://api.github.com/repos/getsentry/sentry/pulls/60656 | 2023-11-27T22:43:22Z | 2023-11-28T21:39:19Z | 2023-11-28T21:39:19Z | 2023-12-14T00:02:19Z | 2,572 | getsentry/sentry | 43,789 |
Some changes in teams | diff --git a/website/public/locales/en/common.json b/website/public/locales/en/common.json
index fe8157842f..38068ceae1 100644
--- a/website/public/locales/en/common.json
+++ b/website/public/locales/en/common.json
@@ -47,6 +47,7 @@
"submit": "Submit",
"submit_your_answer": "Submit your answer",
"success": "Success",
+ "team_message": "Open Assistant is only possible through the efforts of these amazing people",
"terms_of_service": "Terms of Service",
"title": "Open Assistant",
"trollboard": "Trollboard",
diff --git a/website/public/locales/ru/common.json b/website/public/locales/ru/common.json
index ce97168b11..321984cdbd 100644
--- a/website/public/locales/ru/common.json
+++ b/website/public/locales/ru/common.json
@@ -47,11 +47,13 @@
"submit": "Отправить",
"submit_your_answer": "Отправить ваш ответ",
"success": "Успешно",
+ "team_message": "Open Assistant возможен благодаря усилиям этих потрясающих людей",
"terms_of_service": "Пользовательское соглашение",
"title": "Open Assistant",
"trollboard": "Доска позора",
"user_leaderboard": "Таблица лидеров",
"users": "Пользователи",
"users_dashboard": "Панель управления пользователями",
- "yes": "Да"
+ "yes": "Да",
+ "who_are_we": "Кто мы?"
}
diff --git a/website/public/locales/ru/labelling.json b/website/public/locales/ru/labelling.json
index 2ac03379dc..9a5d9057fb 100644
--- a/website/public/locales/ru/labelling.json
+++ b/website/public/locales/ru/labelling.json
@@ -75,6 +75,6 @@
"threatening": "Угрожающее",
"threatening.one_desc": "Контент, содержащий в себе угрозу к человеку/группе людей",
"unhelpful": "Бесполезное",
- "violent": "Жестокое",
+ "violent": "Опасное",
"violent.one_desc": "Поощряет или не способствует осуждению следующих категорий: насилие, жестокое обращение, терроризм, самоповреждение"
}
diff --git a/website/src/data/team.json b/website/src/data/team.json
index f6e7405457..8fb3bf4b7b 100644
--- a/website/src/data/team.json
+++ b/website/src/data/team.json
@@ -99,6 +99,12 @@
"title": "Data Scientist",
"githubURL": "https://github.com/Jmete",
"imageURL": "https://avatars.githubusercontent.com/u/22094024?v=4"
+ },
+ "almostEvil___": {
+ "name": "almostEvil___",
+ "title": "Community Moderator & Web Moderator & Translator & Others",
+ "githubURL": "https://github.com/0x22almostEvil",
+ "imageURL": "https://avatars.githubusercontent.com/u/122345469?v=4"
}
},
"groups": [
@@ -121,6 +127,10 @@
{
"name": "Safety Team",
"members": ["SummerSigh", "shahules786"]
+ },
+ {
+ "name": "Moderation Team",
+ "members": ["almostEvil___"]
}
]
}
diff --git a/website/src/pages/team.tsx b/website/src/pages/team.tsx
index e461e11a72..18a9ec7bc5 100644
--- a/website/src/pages/team.tsx
+++ b/website/src/pages/team.tsx
@@ -2,6 +2,7 @@ export { getDefaultStaticProps as getStaticProps } from "src/lib/default_static_
import { Avatar, Badge, Box, Card, CardBody, Flex, Grid, Heading, Text } from "@chakra-ui/react";
import { Github } from "lucide-react";
import Head from "next/head";
+import { useTranslation } from "next-i18next";
import Link from "next/link";
import React from "react";
import { getTransparentHeaderLayout } from "src/components/Layout";
@@ -10,10 +11,11 @@ import data from "../data/team.json";
const Team = () => {
const { groups, people } = data;
+ const { t } = useTranslation();
return (
<>
<Head>
- <title>Who are we - Open Assistant</title>
+ <title>{t("who_are_we")} - Open Assistant</title>
<meta name="description" content="The team begind Open Assistant" />
</Head>
<Box fontFamily="Inter" p="6" className="oa-basic-theme">
@@ -21,9 +23,9 @@ const Team = () => {
<Card>
<CardBody display="flex" flexDirection={"column"} gap={6}>
<Heading as="h1" size="xl">
- Who are we?
+ {t("who_are_we")}
</Heading>
- <Text>Open Assistant is only possible through the efforts of these amazing people</Text>
+ <Text>{t("team_message")}</Text>
{groups.map((group) => (
<React.Fragment key={group.name}>
<Text as="h2" fontWeight="bold" size="lg">
| Added Moderation Team,
Added myself,
Extracted locales + Translated to RU,
Rethought the name of one of the labels and thought I found a more befitting word to translate it | https://api.github.com/repos/LAION-AI/Open-Assistant/pulls/2120 | 2023-03-19T11:48:16Z | 2023-03-19T13:05:20Z | 2023-03-19T13:05:20Z | 2023-03-19T14:14:41Z | 1,272 | LAION-AI/Open-Assistant | 37,606 |
[9gag] Adjusted for current website | diff --git a/youtube_dl/extractor/ninegag.py b/youtube_dl/extractor/ninegag.py
index dc6a27d3643..3753bc0a27b 100644
--- a/youtube_dl/extractor/ninegag.py
+++ b/youtube_dl/extractor/ninegag.py
@@ -3,102 +3,146 @@
import re
from .common import InfoExtractor
-from ..utils import str_to_int
+from ..utils import (
+ determine_ext,
+ url_or_none,
+ int_or_none,
+ float_or_none,
+ ExtractorError
+)
class NineGagIE(InfoExtractor):
IE_NAME = '9gag'
- _VALID_URL = r'https?://(?:www\.)?9gag(?:\.com/tv|\.tv)/(?:p|embed)/(?P<id>[a-zA-Z0-9]+)(?:/(?P<display_id>[^?#/]+))?'
+ _VALID_URL = r'https?://(?:www\.)?9gag\.com/gag/(?P<id>[a-zA-Z0-9]+)'
_TESTS = [{
- 'url': 'http://9gag.com/tv/p/Kk2X5/people-are-awesome-2013-is-absolutely-awesome',
+ 'url': 'https://9gag.com/gag/an5Qz5b',
'info_dict': {
- 'id': 'kXzwOKyGlSA',
- 'ext': 'mp4',
- 'description': 'This 3-minute video will make you smile and then make you feel untalented and insignificant. Anyway, you should share this awesomeness. (Thanks, Dino!)',
- 'title': '\"People Are Awesome 2013\" Is Absolutely Awesome',
- 'uploader_id': 'UCdEH6EjDKwtTe-sO2f0_1XA',
- 'uploader': 'CompilationChannel',
- 'upload_date': '20131110',
- 'view_count': int,
- },
- 'add_ie': ['Youtube'],
+ 'id': 'an5Qz5b',
+ 'ext': 'webm',
+ 'title': 'Dogs playing tetherball',
+ 'upload_date': '20191108',
+ 'timestamp': 1573243994,
+ 'age_limit': 0,
+ 'categories': [
+ 'Wholesome'
+ ],
+ 'tags': [
+ 'Dog'
+ ]
+ }
}, {
- 'url': 'http://9gag.com/tv/p/aKolP3',
+ 'url': 'https://9gag.com/gag/ae5Ag7B',
'info_dict': {
- 'id': 'aKolP3',
- 'ext': 'mp4',
- 'title': 'This Guy Travelled 11 countries In 44 days Just To Make This Amazing Video',
- 'description': "I just saw more in 1 minute than I've seen in 1 year. This guy's video is epic!!",
- 'uploader_id': 'rickmereki',
- 'uploader': 'Rick Mereki',
- 'upload_date': '20110803',
- 'view_count': int,
- },
- 'add_ie': ['Vimeo'],
- }, {
- 'url': 'http://9gag.com/tv/p/KklwM',
- 'only_matching': True,
- }, {
- 'url': 'http://9gag.tv/p/Kk2X5',
- 'only_matching': True,
- }, {
- 'url': 'http://9gag.com/tv/embed/a5Dmvl',
- 'only_matching': True,
+ 'id': 'ae5Ag7B',
+ 'ext': 'webm',
+ 'title': 'Capybara Agility Training',
+ 'upload_date': '20191108',
+ 'timestamp': 1573237208,
+ 'age_limit': 0,
+ 'categories': [
+ 'Awesome'
+ ],
+ 'tags': [
+ 'Weimaraner',
+ 'American Pit Bull Terrier'
+ ]
+ }
}]
- _EXTERNAL_VIDEO_PROVIDER = {
- '1': {
- 'url': '%s',
- 'ie_key': 'Youtube',
- },
- '2': {
- 'url': 'http://player.vimeo.com/video/%s',
- 'ie_key': 'Vimeo',
- },
- '3': {
- 'url': 'http://instagram.com/p/%s',
- 'ie_key': 'Instagram',
- },
- '4': {
- 'url': 'http://vine.co/v/%s',
- 'ie_key': 'Vine',
- },
+ _EXTERNAL_VIDEO_PROVIDERS = {
+ 'Youtube': 'https://youtube.com/watch?v=%s'
}
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('id')
- display_id = mobj.group('display_id') or video_id
+ video_id = self._match_id(url)
+ webpage = self._download_webpage(url, video_id)
+ rawJsonData = self._search_regex(
+ r'window._config\s*=\s*JSON.parse\(["\']({.+?})["\']\);',
+ webpage,
+ 'data')
+ rawJsonData = rawJsonData.replace('\\"', '"').replace('\\\\/', '/')
+ data = self._parse_json(rawJsonData, video_id)['data']['post']
+
+ if data['type'] == 'Video':
+ vid = data['video']['id']
+ ie_key = data['video']['source'].capitalize()
+ return {
+ '_type': 'url_transparent',
+ 'url': self._EXTERNAL_VIDEO_PROVIDERS[ie_key] % vid,
+ 'ie_key': ie_key,
+ 'id': vid,
+ 'duration': data['video'].get('duration'),
+ 'start_time': data['video'].get('startTs')
+ }
- webpage = self._download_webpage(url, display_id)
+ if data['type'] == 'EmbedVideo':
+ vid = data['video']['id']
+ ie_key = data['video']['source'].capitalize()
+ return {
+ '_type': 'url_transparent',
+ 'url': data['video']['embedUrl'],
+ #'ie_key': vid,
+ 'start_time': data['video'].get('startTs')
+ }
- post_view = self._parse_json(
- self._search_regex(
- r'var\s+postView\s*=\s*new\s+app\.PostView\({\s*post:\s*({.+?})\s*,\s*posts:\s*prefetchedCurrentPost',
- webpage, 'post view'),
- display_id)
+ if data['type'] != 'Animated':
+ raise ExtractorError(
+ 'The given url does not contain a video',
+ expected=True)
- ie_key = None
- source_url = post_view.get('sourceUrl')
- if not source_url:
- external_video_id = post_view['videoExternalId']
- external_video_provider = post_view['videoExternalProvider']
- source_url = self._EXTERNAL_VIDEO_PROVIDER[external_video_provider]['url'] % external_video_id
- ie_key = self._EXTERNAL_VIDEO_PROVIDER[external_video_provider]['ie_key']
- title = post_view['title']
- description = post_view.get('description')
- view_count = str_to_int(post_view.get('externalView'))
- thumbnail = post_view.get('thumbnail_700w') or post_view.get('ogImageUrl') or post_view.get('thumbnail_300w')
+ duration = None
+ formats = []
+ thumbnails = []
+ for key in data['images']:
+ image = data['images'][key]
+ if 'duration' in image and duration is None:
+ duration = int_or_none(image['duration'])
+ url = url_or_none(image.get('url'))
+ if url == None:
+ continue
+ ext = determine_ext(url)
+ if ext == 'jpg' or ext == 'png':
+ thumbnail = {
+ 'url': url,
+ 'width': float_or_none(image.get('width')),
+ 'height': float_or_none(image.get('height'))
+ }
+ thumbnails.append(thumbnail)
+ elif ext == 'webm' or ext == 'mp4':
+ formats.append({
+ 'format_id': re.sub(r'.*_([^\.]+).(.*)', r'\1_\2', url),
+ 'ext': ext,
+ 'url': url,
+ 'width': float_or_none(image.get('width')),
+ 'height': float_or_none(image.get('height'))
+ })
+ section = None
+ postSection = data.get('postSection')
+ if postSection != None and 'name' in postSection:
+ section = re.sub(r'\\[^\\]{5}', '', postSection['name'])
+ age_limit = int_or_none(data.get('nsfw'))
+ if age_limit != None:
+ age_limit = age_limit * 18
+ tags = None
+ if 'tags' in data:
+ tags = []
+ for tag in data.get('tags') or []:
+ tags.append(tag.get('key'))
return {
- '_type': 'url_transparent',
- 'url': source_url,
- 'ie_key': ie_key,
'id': video_id,
- 'display_id': display_id,
- 'title': title,
- 'description': description,
- 'view_count': view_count,
- 'thumbnail': thumbnail,
+ 'title': data['title'],
+ 'timestamp': int_or_none(data.get('creationTs')),
+ 'duration': duration,
+ 'formats': formats,
+ 'thumbnails': thumbnails,
+ 'like_count': int_or_none(data.get('upVoteCount')),
+ 'dislike_count': int_or_none(data.get('downVoteCount')),
+ 'comment_count': int_or_none(data.get('commentsCount')),
+ 'age_limit': age_limit,
+ 'categories': [section],
+ 'tags': tags,
+ 'is_live': False
}
| ## Please follow the guide below
- You will be asked some questions, please read them **carefully** and answer honestly
- Put an `x` into all the boxes [ ] relevant to your *pull request* (like that [x])
- Use *Preview* tab to see how your *pull request* will actually look like
---
### Before submitting a *pull request* make sure you have:
- [x] At least skimmed through [adding new extractor tutorial](https://github.com/ytdl-org/youtube-dl#adding-support-for-a-new-site) and [youtube-dl coding conventions](https://github.com/ytdl-org/youtube-dl#youtube-dl-coding-conventions) sections
- [x] [Searched](https://github.com/ytdl-org/youtube-dl/search?q=is%3Apr&type=Issues) the bugtracker for similar pull requests
- [x] Checked the code with [flake8](https://pypi.python.org/pypi/flake8)
### In order to be accepted and merged into youtube-dl each piece of code must be in public domain or released under [Unlicense](http://unlicense.org/). Check one of the following options:
- [x] I am the original author of this code and I am willing to release it under [Unlicense](http://unlicense.org/)
- [ ] I am not the original author of this code but it is in public domain or released under [Unlicense](http://unlicense.org/) (provide reliable evidence)
### What is the purpose of your *pull request*?
- [x] Bug fix
- [ ] Improvement
- [ ] New extractor
- [ ] New feature
---
### Description of your *pull request* and other information
Explanation of your *pull request* in arbitrary form goes here. Please make sure the description explains the purpose and effect of your *pull request* and is worded well enough to be understood. Provide as much context and examples as possible.
The website has changed in the past 4 years and the original extractor stopped working. I rewrote it to be able to handle this. | https://api.github.com/repos/ytdl-org/youtube-dl/pulls/23022 | 2019-11-09T02:30:56Z | 2021-01-19T09:21:37Z | 2021-01-19T09:21:37Z | 2021-01-27T02:38:04Z | 2,337 | ytdl-org/youtube-dl | 50,550 |
Updated ReadMe link to requirements | diff --git a/README.md b/README.md
index 5593ee4..dd054b6 100644
--- a/README.md
+++ b/README.md
@@ -35,7 +35,7 @@
## Requirements
-Please follow the requirements of [https://github.com/NVlabs/stylegan3](https://github.com/NVlabs/stylegan3).
+Please follow the requirements of [NVlabs/stylegan3](https://github.com/NVlabs/stylegan3#requirements).
## Download pre-trained StyleGAN2 weights
| Made requirements link go directly to instructions section of README. | https://api.github.com/repos/XingangPan/DragGAN/pulls/73 | 2023-06-26T19:49:03Z | 2023-06-27T02:16:35Z | 2023-06-27T02:16:35Z | 2023-06-27T02:16:35Z | 119 | XingangPan/DragGAN | 26,830 |
#534: Improve open rule | diff --git a/README.md b/README.md
index 395253f4a..29d73ff13 100644
--- a/README.md
+++ b/README.md
@@ -202,7 +202,7 @@ using the matched rule and runs it. Rules enabled by default are as follows:
* `npm_wrong_command` – fixes wrong npm commands like `npm urgrade`;
* `no_command` – fixes wrong console commands, for example `vom/vim`;
* `no_such_file` – creates missing directories with `mv` and `cp` commands;
-* `open` – prepends `http` to address passed to `open`;
+* `open` – either prepends `http://` to address passed to `open` or create a new file or directory and passes it to `open`;
* `pip_unknown_command` – fixes wrong `pip` commands, for example `pip instatl/pip install`;
* `python_command` – prepends `python` when you trying to run not executable/without `./` python script;
* `python_execute` – appends missing `.py` when executing Python files;
diff --git a/tests/rules/test_open.py b/tests/rules/test_open.py
index c80eeca8b..9186adf45 100644
--- a/tests/rules/test_open.py
+++ b/tests/rules/test_open.py
@@ -1,31 +1,49 @@
import pytest
-from thefuck.rules.open import match, get_new_command
+from thefuck.rules.open import is_arg_url, match, get_new_command
from tests.utils import Command
-@pytest.mark.parametrize('command', [
- Command(script='open foo.com'),
- Command(script='open foo.ly'),
- Command(script='open foo.org'),
- Command(script='open foo.net'),
- Command(script='open foo.se'),
- Command(script='open foo.io'),
- Command(script='xdg-open foo.com'),
- Command(script='gnome-open foo.com'),
- Command(script='kde-open foo.com')])
-def test_match(command):
- assert match(command)
-
-
-@pytest.mark.parametrize('command, new_command', [
- (Command('open foo.com'), 'open http://foo.com'),
- (Command('open foo.ly'), 'open http://foo.ly'),
- (Command('open foo.org'), 'open http://foo.org'),
- (Command('open foo.net'), 'open http://foo.net'),
- (Command('open foo.se'), 'open http://foo.se'),
- (Command('open foo.io'), 'open http://foo.io'),
- (Command('xdg-open foo.io'), 'xdg-open http://foo.io'),
- (Command('gnome-open foo.io'), 'gnome-open http://foo.io'),
- (Command('kde-open foo.io'), 'kde-open http://foo.io')])
-def test_get_new_command(command, new_command):
- assert get_new_command(command) == new_command
+@pytest.fixture
+def stderr(script):
+ return 'The file {} does not exist.\n'.format(script.split(' ', 1)[1])
+
+
+@pytest.mark.parametrize('script', [
+ 'open foo.com',
+ 'open foo.edu',
+ 'open foo.info',
+ 'open foo.io',
+ 'open foo.ly',
+ 'open foo.me',
+ 'open foo.net',
+ 'open foo.org',
+ 'open foo.se',
+ 'open www.foo.ru'])
+def test_is_arg_url(script):
+ assert is_arg_url(Command(script))
+
+
+@pytest.mark.parametrize('script', ['open foo', 'open bar.txt', 'open egg.doc'])
+def test_not_is_arg_url(script):
+ assert not is_arg_url(Command(script))
+
+
+@pytest.mark.parametrize('script', [
+ 'open foo.com',
+ 'xdg-open foo.com',
+ 'gnome-open foo.com',
+ 'kde-open foo.com',
+ 'open nonest'])
+def test_match(script, stderr):
+ assert match(Command(script, stderr=stderr))
+
+
+@pytest.mark.parametrize('script, new_command', [
+ ('open foo.io', ['open http://foo.io']),
+ ('xdg-open foo.io', ['xdg-open http://foo.io']),
+ ('gnome-open foo.io', ['gnome-open http://foo.io']),
+ ('kde-open foo.io', ['kde-open http://foo.io']),
+ ('open nonest', ['touch nonest && open nonest',
+ 'mkdir nonest && open nonest'])])
+def test_get_new_command(script, new_command, stderr):
+ assert get_new_command(Command(script, stderr=stderr)) == new_command
diff --git a/thefuck/rules/open.py b/thefuck/rules/open.py
index b46f497d7..86f200879 100644
--- a/thefuck/rules/open.py
+++ b/thefuck/rules/open.py
@@ -5,22 +5,36 @@
# The file ~/github.com does not exist.
# Perhaps you meant 'http://github.com'?
#
-from thefuck.utils import for_app
+from thefuck.shells import shell
+from thefuck.utils import eager, for_app
+
+
+def is_arg_url(command):
+ return ('.com' in command.script or
+ '.edu' in command.script or
+ '.info' in command.script or
+ '.io' in command.script or
+ '.ly' in command.script or
+ '.me' in command.script or
+ '.net' in command.script or
+ '.org' in command.script or
+ '.se' in command.script or
+ 'www.' in command.script)
@for_app('open', 'xdg-open', 'gnome-open', 'kde-open')
def match(command):
- return ('.com' in command.script
- or '.net' in command.script
- or '.org' in command.script
- or '.ly' in command.script
- or '.io' in command.script
- or '.se' in command.script
- or '.edu' in command.script
- or '.info' in command.script
- or '.me' in command.script
- or 'www.' in command.script)
+ return (is_arg_url(command) or
+ command.stderr.strip().startswith('The file ') and
+ command.stderr.strip().endswith(' does not exist.'))
+@eager
def get_new_command(command):
- return command.script.replace('open ', 'open http://')
+ stderr = command.stderr.strip()
+ if is_arg_url(command):
+ yield command.script.replace('open ', 'open http://')
+ elif stderr.startswith('The file ') and stderr.endswith(' does not exist.'):
+ arg = command.script.split(' ', 1)[1]
+ for option in ['touch', 'mkdir']:
+ yield shell.and_(u'{} {}'.format(option, arg), command.script)
| This change improves the `open` rule and its tests and implement one of the features suggested by @mlk on #534.
| https://api.github.com/repos/nvbn/thefuck/pulls/535 | 2016-08-12T04:12:01Z | 2016-08-13T12:20:12Z | 2016-08-13T12:20:12Z | 2016-08-13T22:24:22Z | 1,553 | nvbn/thefuck | 30,877 |
Depend on a stable version of curl_cffi | diff --git a/requirements.txt b/requirements.txt
index 7f5cb01167..6ec1953dae 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,6 +1,6 @@
requests
pycryptodome
-curl_cffi>=0.5.10b4
+curl_cffi>=0.5.10
aiohttp
certifi
browser_cookie3
@@ -26,4 +26,4 @@ async-property
undetected-chromedriver
asyncstdlib
async_property
-bs4
\ No newline at end of file
+bs4
| https://api.github.com/repos/xtekky/gpt4free/pulls/1359 | 2023-12-17T06:57:13Z | 2023-12-19T20:46:07Z | 2023-12-19T20:46:07Z | 2023-12-19T20:46:20Z | 136 | xtekky/gpt4free | 38,049 | |
added Graph-Powered Machine Learning | diff --git a/books.md b/books.md
index 5ef2bf31..4f6387b5 100644
--- a/books.md
+++ b/books.md
@@ -37,6 +37,7 @@ The following is a list of free, open source books on machine learning, statisti
* [Hands‑On Machine Learning with Scikit‑Learn and TensorFlow](http://index-of.es/Varios-2/Hands%20on%20Machine%20Learning%20with%20Scikit%20Learn%20and%20Tensorflow.pdf) - Aurélien Géron
* [R for Data Science: Import, Tidy, Transform, Visualize, and Model Data](http://r4ds.had.co.nz/) - Wickham and Grolemund. Great as introduction on how to use R.
* [Advanced R](http://adv-r.had.co.nz/) - Hadley Wickham. More advanced usage of R for programming.
+* [Graph-Powered Machine Learning](https://www.manning.com/books/graph-powered-machine-learning) - Alessandro Negro. Combining graph theory and models to improve machine learning projects
## Deep-Learning
| Hi, I thought this title might be a great addition to your list of books. Thank you for your consideration | https://api.github.com/repos/josephmisiti/awesome-machine-learning/pulls/549 | 2018-10-21T11:03:38Z | 2018-10-21T15:54:44Z | 2018-10-21T15:54:44Z | 2018-10-21T15:54:44Z | 250 | josephmisiti/awesome-machine-learning | 52,030 |
Fix zero-1 bug related to stitching reduced grads across comm partitions | diff --git a/deepspeed/runtime/zero/stage1.py b/deepspeed/runtime/zero/stage1.py
index 80b51ffe4d12..c3941a907fbf 100755
--- a/deepspeed/runtime/zero/stage1.py
+++ b/deepspeed/runtime/zero/stage1.py
@@ -249,8 +249,7 @@ def __init__(self,
# RS: divide up the sub-partitions and keep track of offsets for each param
# partition_size = len(self.fp16_groups_flat[i]) / dist.get_world_size(group=self.dp_process_group)
- params_in_rank_sub_partition, params_in_rank_sub_partitions_offsets, \
- params_not_local = self.get_all_sub_partition_info(
+ params_in_rank_sub_partition, params_in_rank_sub_partitions_offsets, params_not_local = self.get_all_sub_partition_info(
tensor_list=self.fp16_groups[i],
all_element_intervals=element_intervals,
local_rank=local_rank,
@@ -591,28 +590,20 @@ def reduce_scatter_gradients(self,
all_comm_partitions.append(single_comm_all_partitions)
- for p in my_params:
- partitions = param_partition_map[p]
- parts = []
- for part in partitions:
- params, offsets = partition_param_map[part]
- found = False
- for p_idx, _p in enumerate(params):
- if p.__hash__() == _p.__hash__():
- found = True
- if offsets[p_idx][0] is not None:
- my_part = part.narrow(0,
- offsets[p_idx][0],
- offsets[p_idx][1])
- parts.append(my_part)
- assert found
- if p is not None:
- updated_grad = _unflatten_dense_tensors(torch.cat(parts), [p])
- p.grad.copy_(updated_grad[0])
+ # stitch together all rank sub partitions for each comm idx
+ flat_comm_grads = []
+ for comm_idx, rank_partitions in enumerate(all_comm_partitions):
+ flat_comm_grads.append(torch.cat(rank_partitions))
+
+ flat_all_grads = torch.cat(flat_comm_grads)
+
+ # copy back reduced gradients but only those needed for this local rank
+ for param, updated_grad in zip(self.fp16_groups[i], _unflatten_dense_tensors(flat_all_grads, self.fp16_groups[i])):
+ if param in my_params:
+ param.grad.copy_(updated_grad)
def step(self, closure=None):
# First compute norm for all group so we know if there is overflow
-
self.overflow = self.overflow_checker.check()
prev_scale = self.loss_scale
@@ -649,6 +640,7 @@ def step(self, closure=None):
#)
#TODO RS: can we safely use dtype of the first sub-partition? i think so
+ # create flat gradient partitions for parameters updated by this process
local_grad_sub_partitions = self.get_flat_sub_partitions(
comm_tensor_list=self.params_in_rank_sub_partitions[i][partition_id],
comm_param_offsets=self.params_in_rank_sub_partitions_offsets[i]
| Has been confirmed from 1p user to have fixed their issue. Also our original repro of the diverged loss shows the issue has fixed as well.
I believe the issue was related to the old copy gradients code was not stitching the sub-partitions together correctly. After simplifying this part of the code substantially this issue appears to be fixed. | https://api.github.com/repos/microsoft/DeepSpeed/pulls/318 | 2020-08-14T06:25:30Z | 2020-09-15T19:59:57Z | 2020-09-15T19:59:57Z | 2020-11-13T07:37:04Z | 679 | microsoft/DeepSpeed | 10,915 |
Add SwinIR and new features to readme | diff --git a/README.md b/README.md
index d355db34458..c1538100982 100644
--- a/README.md
+++ b/README.md
@@ -20,6 +20,8 @@ A browser interface based on Gradio library for Stable Diffusion.
- CodeFormer, face restoration tool as an alternative to GFPGAN
- RealESRGAN, neural network upscaler
- ESRGAN, neural network with a lot of third party models
+ - SwinIR, neural network upscaler
+ - LDSR, Latent diffusion super resolution upscaling
- Resizing aspect ratio options
- Sampling method selection
- Interrupt processing at any time
@@ -41,6 +43,10 @@ A browser interface based on Gradio library for Stable Diffusion.
- Seed resizing
- CLIP interrogator
- Prompt Editing
+- Batch Processing
+- Img2img Alternative
+- Highres Fix
+- LDSR Upscaling
## Installation and Running
Make sure the required [dependencies](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Dependencies) are met and follow the instructions available for both [NVidia](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Install-and-Run-on-NVidia-GPUs) (recommended) and [AMD](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Install-and-Run-on-AMD-GPUs) GPUs.
@@ -79,6 +85,8 @@ The documentation was moved from this README over to the project's [wiki](https:
- GFPGAN - https://github.com/TencentARC/GFPGAN.git
- CodeFormer - https://github.com/sczhou/CodeFormer
- ESRGAN - https://github.com/xinntao/ESRGAN
+- SwinIR - https://github.com/JingyunLiang/SwinIR
+- LDSR - https://github.com/Hafiidz/latent-diffusion
- Ideas for optimizations - https://github.com/basujindal/stable-diffusion
- Doggettx - Cross Attention layer optimization - https://github.com/Doggettx/stable-diffusion, original idea for prompt editing.
- Idea for SD upscale - https://github.com/jquesnelle/txt2imghd
| https://api.github.com/repos/AUTOMATIC1111/stable-diffusion-webui/pulls/816 | 2022-09-21T21:10:42Z | 2022-09-22T04:18:00Z | 2022-09-22T04:18:00Z | 2022-09-22T04:18:00Z | 510 | AUTOMATIC1111/stable-diffusion-webui | 39,714 | |
skip bcast when enable pp but pp_group_size=1 | diff --git a/deepspeed/runtime/pipe/engine.py b/deepspeed/runtime/pipe/engine.py
index 6286aa6cf0c4..19b14f69b003 100644
--- a/deepspeed/runtime/pipe/engine.py
+++ b/deepspeed/runtime/pipe/engine.py
@@ -524,8 +524,8 @@ def _aggregate_total_loss(self):
assert self.global_rank in self.grid.pp_group
losses = torch.Tensor([self.dp_group_loss, agg_loss]).to(self.device)
- dist.broadcast(tensor=losses, src=self.global_rank, group=self.mpu.get_pipe_parallel_group())
-
+ if self.is_pipe_parallel:
+ dist.broadcast(tensor=losses, src=self.global_rank, group=self.mpu.get_pipe_parallel_group())
else:
# Get loss from last stage
src_rank = self.grid.stage_to_global(self.num_stages - 1)
| @guoyejun | https://api.github.com/repos/microsoft/DeepSpeed/pulls/3915 | 2023-07-10T05:50:33Z | 2023-07-14T20:48:22Z | 2023-07-14T20:48:22Z | 2023-07-14T20:48:23Z | 197 | microsoft/DeepSpeed | 10,298 |
BUG: GH2808 Apply with invalid returned indices raise correct Exception | diff --git a/RELEASE.rst b/RELEASE.rst
index 3e935879c197e..17a2fa0ba2d0d 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -258,6 +258,7 @@ pandas 0.11.0
- Upcast/split blocks when needed in a mixed DataFrame when setitem
with an indexer (GH3216_)
- Invoking df.applymap on a dataframe with dupe cols now raises a ValueError (GH2786_)
+ - Apply with invalid returned indices raise correct Exception (GH2808_)
.. _GH622: https://github.com/pydata/pandas/issues/622
.. _GH797: https://github.com/pydata/pandas/issues/797
@@ -304,6 +305,7 @@ pandas 0.11.0
.. _GH2867: https://github.com/pydata/pandas/issues/2867
.. _GH2803: https://github.com/pydata/pandas/issues/2803
.. _GH2807: https://github.com/pydata/pandas/issues/2807
+.. _GH2808: https://github.com/pydata/pandas/issues/2808
.. _GH2849: https://github.com/pydata/pandas/issues/2849
.. _GH2850: https://github.com/pydata/pandas/issues/2850
.. _GH2898: https://github.com/pydata/pandas/issues/2898
diff --git a/pandas/core/index.py b/pandas/core/index.py
index 9551ae12deb07..ac352eef4acfe 100644
--- a/pandas/core/index.py
+++ b/pandas/core/index.py
@@ -1404,8 +1404,8 @@ def __new__(cls, levels=None, labels=None, sortorder=None, names=None):
subarr.names = [None] * subarr.nlevels
else:
if len(names) != subarr.nlevels:
- raise AssertionError(('Length of names must be same as level '
- '(%d), got %d') % (subarr.nlevels))
+ raise AssertionError(('Length of names (%d) must be same as level '
+ '(%d)') % (len(names),subarr.nlevels))
subarr.names = list(names)
@@ -2765,13 +2765,13 @@ def _handle_legacy_indexes(indexes):
def _get_consensus_names(indexes):
- consensus_name = indexes[0].names
- for index in indexes[1:]:
- if index.names != consensus_name:
- consensus_name = [None] * index.nlevels
- break
- return consensus_name
+ # find the non-none names, need to tupleify to make
+ # the set hashable, then reverse on return
+ consensus_names = set([ tuple(i.names) for i in indexes if all(n is not None for n in i.names) ])
+ if len(consensus_names) == 1:
+ return list(list(consensus_names)[0])
+ return [None] * indexes[0].nlevels
def _maybe_box(idx):
from pandas.tseries.api import DatetimeIndex, PeriodIndex
diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py
index 9e623de5483ab..7aad2e0b734b1 100644
--- a/pandas/tests/test_groupby.py
+++ b/pandas/tests/test_groupby.py
@@ -568,6 +568,71 @@ def f(x):
assert_series_equal(agged, expected, check_dtype=False)
self.assert_(issubclass(agged.dtype.type, np.dtype(dtype).type))
+ def test_indices_concatenation_order(self):
+
+ # GH 2808
+
+ def f1(x):
+ y = x[(x.b % 2) == 1]**2
+ if y.empty:
+ multiindex = MultiIndex(
+ levels = [[]]*2,
+ labels = [[]]*2,
+ names = ['b', 'c']
+ )
+ res = DataFrame(None,
+ columns=['a'],
+ index=multiindex)
+ return res
+ else:
+ y = y.set_index(['b','c'])
+ return y
+
+ def f2(x):
+ y = x[(x.b % 2) == 1]**2
+ if y.empty:
+ return DataFrame()
+ else:
+ y = y.set_index(['b','c'])
+ return y
+
+ def f3(x):
+ y = x[(x.b % 2) == 1]**2
+ if y.empty:
+ multiindex = MultiIndex(
+ levels = [[]]*2,
+ labels = [[]]*2,
+ names = ['foo', 'bar']
+ )
+ res = DataFrame(None,
+ columns=['a','b'],
+ index=multiindex)
+ return res
+ else:
+ return y
+
+ df = DataFrame({'a':[1,2,2,2],
+ 'b':range(4),
+ 'c':range(5,9)})
+
+ df2 = DataFrame({'a':[3,2,2,2],
+ 'b':range(4),
+ 'c':range(5,9)})
+
+
+ # correct result
+ result1 = df.groupby('a').apply(f1)
+ result2 = df2.groupby('a').apply(f1)
+ assert_frame_equal(result1, result2)
+
+ # should fail (not the same number of levels)
+ self.assertRaises(AssertionError, df.groupby('a').apply, f2)
+ self.assertRaises(AssertionError, df2.groupby('a').apply, f2)
+
+ # should fail (incorrect shape)
+ self.assertRaises(AssertionError, df.groupby('a').apply, f3)
+ self.assertRaises(AssertionError, df2.groupby('a').apply, f3)
+
def test_attr_wrapper(self):
grouped = self.ts.groupby(lambda x: x.weekday())
diff --git a/pandas/tools/merge.py b/pandas/tools/merge.py
index 87e3ab7f2409b..1643a6bfb2655 100644
--- a/pandas/tools/merge.py
+++ b/pandas/tools/merge.py
@@ -1221,6 +1221,11 @@ def _make_concat_multiindex(indexes, keys, levels=None, names=None):
if len(names) == len(levels):
names = list(names)
else:
+ # make sure that all of the passed indices have the same nlevels
+ if not len(set([ i.nlevels for i in indexes ])) == 1:
+ raise AssertionError("Cannot concat indices that do"
+ " not have the same number of levels")
+
# also copies
names = names + _get_consensus_names(indexes)
| closes #2808
| https://api.github.com/repos/pandas-dev/pandas/pulls/3228 | 2013-04-01T12:25:15Z | 2013-04-02T12:24:50Z | 2013-04-02T12:24:50Z | 2014-07-04T06:31:36Z | 1,553 | pandas-dev/pandas | 45,742 |
Add Webdam | diff --git a/README.md b/README.md
index 1eca18533a..30a707d22c 100644
--- a/README.md
+++ b/README.md
@@ -953,6 +953,7 @@ API | Description | Auth | HTTPS | CORS |
| [ReSmush.it](https://resmush.it/api) | Photo optimization | No | No | Unknown |
| [Unsplash](https://unsplash.com/developers) | Photography | `OAuth` | Yes | Unknown |
| [Wallhaven](https://wallhaven.cc/help/api) | Wallpapers | `apiKey` | Yes | Unknown |
+| [Webdam](https://www.damsuccess.com/hc/en-us/articles/202134055-REST-API) | Images | `OAuth` | Yes | Unknown |
**[⬆ Back to Index](#index)**
### Science & Math
| <!-- Thank you for taking the time to work on a Pull Request for this project! -->
<!-- To ensure your PR is dealt with swiftly please check the following: -->
- [X] My submission is formatted according to the guidelines in the [contributing guide](/CONTRIBUTING.md)
- [x] My addition is ordered alphabetically
- [X] My submission has a useful description
- [x] The description does not end with punctuation
- [x] Each table column is padded with one space on either side
- [x] I have searched the repository for any relevant issues or pull requests
- [x] Any category I am creating has the minimum requirement of 3 items
- [x] All changes have been [squashed][squash-link] into a single commit
[squash-link]: <https://github.com/todotxt/todo.txt-android/wiki/Squash-All-Commits-Related-to-a-Single-Issue-into-a-Single-Commit>
This is new PR of PR #1583. Referencing issue #2002 | https://api.github.com/repos/public-apis/public-apis/pulls/2101 | 2021-10-02T12:58:47Z | 2021-10-03T07:00:04Z | 2021-10-03T07:00:04Z | 2021-10-03T07:00:04Z | 193 | public-apis/public-apis | 35,373 |
Updated to Python 3 this files | diff --git a/pscheck.py b/pscheck.py
index 2153b19c88..399e955def 100644
--- a/pscheck.py
+++ b/pscheck.py
@@ -31,7 +31,6 @@ def ps():
except:
print("There was a problem with the program.")
-
def main():
if os.name == "posix": # Unix/Linux/MacOS/BSD/etc
ps() # Call the function
diff --git a/psunotify.py b/psunotify.py
index d3a39ea54d..cca7613935 100644
--- a/psunotify.py
+++ b/psunotify.py
@@ -20,7 +20,9 @@
urls[int]=urls[int].replace("<b>","")
urls[int]=urls[int].replace("</b>","")
int=int+1
+
print(urls)
+
for url in urls:
try:
temp=url.split("/")
@@ -32,7 +34,7 @@
file=open('psu2'+q+'.pdf','wb')
file.write(r.read())
file.close()
+
print("Done")
except urllib2.URLError as e:
print("Sorry there exists a problem with this URL Please Download this Manually "+str(url))
-
diff --git a/python_sms.py b/python_sms.py
index 7f2928c3ec..a8e96d6880 100644
--- a/python_sms.py
+++ b/python_sms.py
@@ -71,6 +71,7 @@
response = urllib2.urlopen(req)
response_url = response.geturl()
if response_url == url:
+
print('SMS sent!')
except urllib2.URLError as e:
print('Send failed!')
diff --git a/recyclebin.py b/recyclebin.py
index a9e3c568ec..f48f7c7288 100644
--- a/recyclebin.py
+++ b/recyclebin.py
@@ -1,48 +1,49 @@
from __future__ import print_function
-# Script Name : recyclebin.py
-# Author : Craig Richards
-# Created : 07th June 2013
-# Last Modified :
-# Version : 1.0
-
-# Modifications :
-
-# Description : Scans the recyclebin and displays the files in there, originally got this script from the Violent Python book
-
-import os # Load the Module
-import optparse # Load the Module
-from _winreg import * # Load the Module
-
-def sid2user(sid): # Start of the function to gather the user
- try:
- key = OpenKey(HKEY_LOCAL_MACHINE, "SOFTWARE\Microsoft\Windows NT\CurrentVersion\ProfileList" + '\\' + sid)
- (value, type) = QueryValueEx(key, 'ProfileImagePath')
- user = value.split('\\')[-1]
- return user
- except:
- return sid
-
-
-def returnDir(): # Start of the function to search through the recyclebin
- dirs=['c:\\Recycler\\','C:\\Recycled\\','C:\\$RECYCLE.BIN\\']
- #dirs=['c:\\$RECYCLE.BIN\\']
- for recycleDir in dirs:
- if os.path.isdir(recycleDir):
- return recycleDir
- return None
-
-def findRecycled(recycleDir): # Start of the function, list the contents of the recyclebin
- dirList = os.listdir(recycleDir)
- for sid in dirList:
- files = os.listdir(recycleDir + sid)
- user = sid2user(sid)
- print('\n[*] Listing Files for User: ' + str(user))
- for file in files:
- print('[+] Found File: ' + str(file))
-
-def main():
- recycleDir = returnDir()
- findRecycled(recycleDir)
-
-if __name__ == '__main__':
- main()
\ No newline at end of file
+# Script Name : recyclebin.py
+# Author : Craig Richards
+# Created : 07th June 2013
+# Last Modified :
+# Version : 1.0
+
+# Modifications :
+
+# Description : Scans the recyclebin and displays the files in there, originally got this script from the Violent Python book
+
+import os # Load the Module
+import optparse # Load the Module
+from _winreg import * # Load the Module
+
+def sid2user(sid): # Start of the function to gather the user
+ try:
+ key = OpenKey(HKEY_LOCAL_MACHINE, "SOFTWARE\Microsoft\Windows NT\CurrentVersion\ProfileList" + '\\' + sid)
+ (value, type) = QueryValueEx(key, 'ProfileImagePath')
+ user = value.split('\\')[-1]
+ return user
+ except:
+ return sid
+
+
+def returnDir(): # Start of the function to search through the recyclebin
+ dirs=['c:\\Recycler\\','C:\\Recycled\\','C:\\$RECYCLE.BIN\\']
+ #dirs=['c:\\$RECYCLE.BIN\\']
+ for recycleDir in dirs:
+ if os.path.isdir(recycleDir):
+ return recycleDir
+ return None
+
+def findRecycled(recycleDir): # Start of the function, list the contents of the recyclebin
+ dirList = os.listdir(recycleDir)
+ for sid in dirList:
+ files = os.listdir(recycleDir + sid)
+ user = sid2user(sid)
+
+ print('\n[*] Listing Files for User: ' + str(user))
+ for file in files:
+ print('[+] Found File: ' + str(file))
+
+def main():
+ recycleDir = returnDir()
+ findRecycled(recycleDir)
+
+if __name__ == '__main__':
+ main()
diff --git a/script_count.py b/script_count.py
index 0b49b566e9..aea3c52d5d 100644
--- a/script_count.py
+++ b/script_count.py
@@ -34,6 +34,7 @@ def github(): # Start of the function just to count the fil
github_dir = os.path.join(dropbox, 'github') # Joins the paths to get the github directory - 1.1
github_count = sum((len(f) for _, _, f in os.walk(github_dir))) # Get a count for all the files in the directory
if github_count > 5: # If the number of files is greater then 5, then print the following messages
+
print('\nYou have too many in here, start uploading !!!!!')
print('You have: ' + str(github_count) + ' waiting to be uploaded to github!!')
elif github_count == 0: # Unless the count is 0, then print the following messages
@@ -45,6 +46,7 @@ def development(): # Start of the function just to count the
dev_dir = os.path.join(path, 'development') # Joins the paths to get the development directory - 1.1
dev_count = sum((len(f) for _, _, f in os.walk(dev_dir))) # Get a count for all the files in the directory
if dev_count > 10: # If the number of files is greater then 10, then print the following messages
+
print('\nYou have too many in here, finish them or delete them !!!!!')
print('You have: ' + str(dev_count) + ' waiting to be finished!!')
elif dev_count ==0: # Unless the count is 0, then print the following messages
diff --git a/sendemail.py b/sendemail.py
index 82509e1cfc..f878c0539c 100644
--- a/sendemail.py
+++ b/sendemail.py
@@ -29,7 +29,9 @@ def get_credentials():
flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)
flow.user_agent = APPLICATION_NAME
credentials = tools.run_flow(flow, store)
+
print('Storing credentials to ' + credential_path)
+
return credentials
def SendMessage(sender, to, subject, msgHtml, msgPlain, attachmentFile=None):
@@ -46,7 +48,9 @@ def SendMessage(sender, to, subject, msgHtml, msgPlain, attachmentFile=None):
def SendMessageInternal(service, user_id, message):
try:
message = (service.users().messages().send(userId=user_id, body=message).execute())
+
print('Message Id: %s' % message['id'])
+
return message
except errors.HttpError as error:
print('An error occurred: %s' % error)
diff --git a/spotlight.py b/spotlight.py
index 0ccdd398ed..1ee4f0e7e4 100644
--- a/spotlight.py
+++ b/spotlight.py
@@ -1,66 +1,68 @@
-""" Script To Copy Spotlight(Lockscreen) Images from Windows """
+""" Script To Copy Spotlight(Lockscreen) Images from Windows """
from __future__ import print_function
-import os
-import shutil
-import errno
-import hashlib
-from PIL import Image
-
-def md5(fname):
- """ Function to return the MD5 Digest of a file """
-
- hash_md5 = hashlib.md5()
- with open(fname, "rb") as file_var:
- for chunk in iter(lambda: file_var.read(4096), b""):
- hash_md5.update(chunk)
- return hash_md5.hexdigest()
-
-def make_folder(folder_name):
- """Function to make the required folers"""
- try:
- os.makedirs(folder_name)
- except OSError as exc:
- if exc.errno == errno.EEXIST and os.path.isdir(folder_name):
- pass
- else:
- print("Error! Could not create a folder")
- raise
-
-def get_spotlight_wallpapers(target_folder):
- """Fetches wallpapers from source folder inside AppData to the
- newly created folders in C:\\Users\\['user.name']\\Pictures"""
- #PATHS REQUIRED TO FETCH AND STORE WALLPAPERS
- #Creating necessary folders
-
- source_folder = os.environ['HOME']+"\\AppData\\Local\\Packages\\"
- source_folder += "Microsoft.Windows.ContentDeliveryManager_cw5n1h2txyewy"
- source_folder += "\\LocalState\\Assets"
- spotlight_path_mobile = target_folder+"\\Mobile"
- spotlight_path_desktop = target_folder+"\\Desktop"
- make_folder(spotlight_path_mobile)
- make_folder(spotlight_path_desktop)
-
-
- #Fetching files from the source dir
- for filename in os.listdir(source_folder):
- filename = source_folder+"\\"+filename
- #if size of file is less than 100 KB, ignore the file
- if os.stat(filename).st_size > 100000:
- #Check resolution and classify based upon the resolution of the images
-
- #name the file equal to the MD5 of the file, so that no duplicate files are to be copied
- img_file = Image.open(filename)
- if img_file.size[0] >= 1080:
- if img_file.size[0] > img_file.size[1]:
- temp_path = spotlight_path_desktop+"\\"+md5(filename)
- else:
- temp_path = spotlight_path_mobile+"\\"+md5(filename)
- #If file doesn't exist, copy the file to the new folders
- if not os.path.exists(temp_path+".png"):
- shutil.copy(filename, temp_path+".png")
-
-if __name__ == '__main__':
- PATH = raw_input("Enter directory path:")
- get_spotlight_wallpapers(PATH)
- print("Lockscreen images have been copied to \""+PATH+"\"")
-
\ No newline at end of file
+import os
+import shutil
+import errno
+import hashlib
+from PIL import Image
+
+def md5(fname):
+ """ Function to return the MD5 Digest of a file """
+
+ hash_md5 = hashlib.md5()
+ with open(fname, "rb") as file_var:
+ for chunk in iter(lambda: file_var.read(4096), b""):
+ hash_md5.update(chunk)
+ return hash_md5.hexdigest()
+
+def make_folder(folder_name):
+ """Function to make the required folers"""
+ try:
+ os.makedirs(folder_name)
+ except OSError as exc:
+ if exc.errno == errno.EEXIST and os.path.isdir(folder_name):
+ pass
+ else:
+ print("Error! Could not create a folder")
+ raise
+
+def get_spotlight_wallpapers(target_folder):
+ """Fetches wallpapers from source folder inside AppData to the
+ newly created folders in C:\\Users\\['user.name']\\Pictures"""
+ #PATHS REQUIRED TO FETCH AND STORE WALLPAPERS
+ #Creating necessary folders
+
+ source_folder = os.environ['HOME']+"\\AppData\\Local\\Packages\\"
+ source_folder += "Microsoft.Windows.ContentDeliveryManager_cw5n1h2txyewy"
+ source_folder += "\\LocalState\\Assets"
+ spotlight_path_mobile = target_folder+"\\Mobile"
+ spotlight_path_desktop = target_folder+"\\Desktop"
+ make_folder(spotlight_path_mobile)
+ make_folder(spotlight_path_desktop)
+
+
+ #Fetching files from the source dir
+ for filename in os.listdir(source_folder):
+ filename = source_folder+"\\"+filename
+ #if size of file is less than 100 KB, ignore the file
+ if os.stat(filename).st_size > 100000:
+ #Check resolution and classify based upon the resolution of the images
+
+ #name the file equal to the MD5 of the file, so that no duplicate files are to be copied
+ img_file = Image.open(filename)
+ if img_file.size[0] >= 1080:
+ if img_file.size[0] > img_file.size[1]:
+ temp_path = spotlight_path_desktop+"\\"+md5(filename)
+ else:
+ temp_path = spotlight_path_mobile+"\\"+md5(filename)
+ #If file doesn't exist, copy the file to the new folders
+ if not os.path.exists(temp_path+".png"):
+ shutil.copy(filename, temp_path+".png")
+
+if __name__ == '__main__':
+ PATH = raw_input("Enter directory path:")
+ get_spotlight_wallpapers(PATH)
+ print("Lockscreen images have been copied to \""+PATH+"\"")
+
+
+
| Updated print statements to Python 3
--------------------------------------------------------------------------------------
While testing, I had a Python version issue and changed some files to python3.
Is it okay if I try to change the rest of the files? | https://api.github.com/repos/geekcomputers/Python/pulls/440 | 2018-12-04T08:13:45Z | 2018-12-14T11:26:28Z | 2018-12-14T11:26:28Z | 2018-12-14T11:26:29Z | 3,319 | geekcomputers/Python | 31,140 |
Add funshion support, fix #215, replace #601, #604 | diff --git a/README.md b/README.md
index bcec3fc7f0..cb3550d183 100644
--- a/README.md
+++ b/README.md
@@ -43,6 +43,7 @@ Fork me on GitHub: <https://github.com/soimort/you-get>
* DouyuTV (斗鱼) <http://www.douyutv.com>
* eHow <http://www.ehow.com>
* Facebook <http://facebook.com>
+* Fun.tv (风行, Funshion) <http://www.fun.tv/>
* Google Drive <http://docs.google.com>
* ifeng (凤凰视频) <http://v.ifeng.com>
* iQIYI (爱奇艺) <http://www.iqiyi.com>
diff --git a/src/you_get/common.py b/src/you_get/common.py
index dde25b38ec..0a79ab985e 100755
--- a/src/you_get/common.py
+++ b/src/you_get/common.py
@@ -922,7 +922,7 @@ def script_main(script_name, download, download_playlist = None):
sys.exit(1)
def url_to_module(url):
- from .extractors import netease, w56, acfun, baidu, baomihua, bilibili, blip, catfun, cntv, cbs, coursera, dailymotion, dongting, douban, douyutv, ehow, facebook, freesound, google, sina, ifeng, alive, instagram, iqiyi, joy, jpopsuki, khan, ku6, kugou, kuwo, letv, lizhi, magisto, miaopai, miomio, mixcloud, mtv81, nicovideo, pptv, qianmo, qq, sohu, songtaste, soundcloud, ted, theplatform, tudou, tucao, tumblr, twitter, vid48, videobam, vidto, vimeo, vine, vk, xiami, yinyuetai, youku, youtube, zhanqi
+ from .extractors import netease, w56, acfun, baidu, baomihua, bilibili, blip, catfun, cntv, cbs, coursera, dailymotion, dongting, douban, douyutv, ehow, facebook, freesound, funshion, google, sina, ifeng, alive, instagram, iqiyi, joy, jpopsuki, khan, ku6, kugou, kuwo, letv, lizhi, magisto, miaopai, miomio, mixcloud, mtv81, nicovideo, pptv, qianmo, qq, sohu, songtaste, soundcloud, ted, theplatform, tudou, tucao, tumblr, twitter, vid48, videobam, vidto, vimeo, vine, vk, xiami, yinyuetai, youku, youtube, zhanqi
video_host = r1(r'https?://([^/]+)/', url)
video_url = r1(r'https?://[^/]+(.*)', url)
@@ -953,6 +953,7 @@ def url_to_module(url):
'ehow': ehow,
'facebook': facebook,
'freesound': freesound,
+ 'fun': funshion,
'google': google,
'iask': sina,
'ifeng': ifeng,
diff --git a/src/you_get/extractors/__init__.py b/src/you_get/extractors/__init__.py
index 39256d1190..198bc55b03 100755
--- a/src/you_get/extractors/__init__.py
+++ b/src/you_get/extractors/__init__.py
@@ -15,6 +15,7 @@
from .ehow import *
from .facebook import *
from .freesound import *
+from .funshion import *
from .google import *
from .ifeng import *
from .instagram import *
diff --git a/src/you_get/extractors/funshion.py b/src/you_get/extractors/funshion.py
new file mode 100755
index 0000000000..29339699fa
--- /dev/null
+++ b/src/you_get/extractors/funshion.py
@@ -0,0 +1,154 @@
+#!/usr/bin/env python
+
+__all__ = ['funshion_download']
+
+from ..common import *
+import urllib.error
+import json
+
+#----------------------------------------------------------------------
+def funshion_download(url, output_dir = '.', merge = False, info_only = False):
+ """"""
+ if re.match(r'http://www.fun.tv/vplay/v-(\w+)', url): #single video
+ funshion_download_by_url(url, output_dir = '.', merge = False, info_only = False)
+ elif re.match(r'http://www.fun.tv/vplay/g-(\w+)', url): #whole drama
+ funshion_download_by_drama_url(url, output_dir = '.', merge = False, info_only = False)
+ else:
+ return
+
+# Logics for single video until drama
+#----------------------------------------------------------------------
+def funshion_download_by_url(url, output_dir = '.', merge = False, info_only = False):
+ """lots of stuff->None
+ Main wrapper for single video download.
+ """
+ if re.match(r'http://www.fun.tv/vplay/v-(\w+)', url):
+ match = re.search(r'http://www.fun.tv/vplay/v-(\d+)(.?)', url)
+ vid = match.group(1)
+ funshion_download_by_vid(vid, output_dir = '.', merge = False, info_only = False)
+
+#----------------------------------------------------------------------
+def funshion_download_by_vid(vid, output_dir = '.', merge = False, info_only = False):
+ """vid->None
+ Secondary wrapper for single video download.
+ """
+ title = funshion_get_title_by_vid(vid)
+ url_list = funshion_vid_to_urls(vid)
+
+ for url in url_list:
+ type, ext, size = url_info(url)
+ print_info(site_info, title, type, size)
+
+ if not info_only:
+ download_urls(url_list, title, ext, total_size=None, output_dir=output_dir, merge=merge)
+
+#----------------------------------------------------------------------
+def funshion_get_title_by_vid(vid):
+ """vid->str
+ Single video vid to title."""
+ html = get_content('http://pv.funshion.com/v5/video/profile?id={vid}&cl=aphone&uc=5'.format(vid = vid))
+ c = json.loads(html)
+ return c['name']
+
+#----------------------------------------------------------------------
+def funshion_vid_to_urls(vid):
+ """str->str
+ Select one resolution for single video download."""
+ html = get_content('http://pv.funshion.com/v5/video/play/?id={vid}&cl=aphone&uc=5'.format(vid = vid))
+ return select_url_from_video_api(html)
+
+#Logics for drama until helper functions
+#----------------------------------------------------------------------
+def funshion_download_by_drama_url(url, output_dir = '.', merge = False, info_only = False):
+ """str->None
+ url = 'http://www.fun.tv/vplay/g-95785/'
+ """
+ if re.match(r'http://www.fun.tv/vplay/g-(\w+)', url):
+ match = re.search(r'http://www.fun.tv/vplay/g-(\d+)(.?)', url)
+ id = match.group(1)
+
+ video_list = funshion_drama_id_to_vid(id)
+
+ for video in video_list:
+ funshion_download_by_id((video[0], id), output_dir = '.', merge = False, info_only = False)
+ # id is for drama, vid not the same as the ones used in single video
+
+#----------------------------------------------------------------------
+def funshion_download_by_id(vid_id_tuple, output_dir = '.', merge = False, info_only = False):
+ """single_episode_id, drama_id->None
+ Secondary wrapper for single drama video download.
+ """
+ (vid, id) = vid_id_tuple
+ title = funshion_get_title_by_id(vid, id)
+ url_list = funshion_id_to_urls(vid)
+
+ for url in url_list:
+ type, ext, size = url_info(url)
+ print_info(site_info, title, type, size)
+
+ if not info_only:
+ download_urls(url_list, title, ext, total_size=None, output_dir=output_dir, merge=merge)
+
+#----------------------------------------------------------------------
+def funshion_drama_id_to_vid(episode_id):
+ """int->[(int,int),...]
+ id: 95785
+ ->[('626464', '1'), ('626466', '2'), ('626468', '3'),...
+ Drama ID to vids used in drama.
+
+ **THIS VID IS NOT THE SAME WITH THE ONES USED IN SINGLE VIDEO!!**
+ """
+ html = get_content('http://pm.funshion.com/v5/media/episode?id={episode_id}&cl=aphone&uc=5'.format(episode_id = episode_id))
+ c = json.loads(html)
+ #{'definition': [{'name': '流畅', 'code': 'tv'}, {'name': '标清', 'code': 'dvd'}, {'name': '高清', 'code': 'hd'}], 'retmsg': 'ok', 'total': '32', 'sort': '1', 'prevues': [], 'retcode': '200', 'cid': '2', 'template': 'grid', 'episodes': [{'num': '1', 'id': '624728', 'still': None, 'name': '第1集', 'duration': '45:55'}, ], 'name': '太行山上', 'share': 'http://pm.funshion.com/v5/media/share?id=201554&num=', 'media': '201554'}
+ return [(i['id'], i['num']) for i in c['episodes']]
+
+#----------------------------------------------------------------------
+def funshion_id_to_urls(id):
+ """int->list of URL
+ Select video URL for single drama video.
+ """
+ html = get_content('http://pm.funshion.com/v5/media/play/?id={id}&cl=aphone&uc=5'.format(id = id))
+ return select_url_from_video_api(html)
+
+#----------------------------------------------------------------------
+def funshion_get_title_by_id(single_episode_id, drama_id):
+ """single_episode_id, drama_id->str
+ This is for full drama.
+ Get title for single drama video."""
+ html = get_content('http://pm.funshion.com/v5/media/episode?id={id}&cl=aphone&uc=5'.format(id = drama_id))
+ c = json.loads(html)
+
+ for i in c['episodes']:
+ if i['id'] == str(single_episode_id):
+ return c['name'] + ' - ' + i['name']
+
+# Helper functions.
+#----------------------------------------------------------------------
+def select_url_from_video_api(html):
+ """str(html)->str(url)
+
+ Choose the best one.
+
+ Used in both single and drama download.
+
+ code definition:
+ {'tv': 'liuchang',
+ 'dvd': 'biaoqing',
+ 'hd': 'gaoqing',
+ 'sdvd': 'chaoqing'}"""
+ c = json.loads(html)
+ #{'retmsg': 'ok', 'retcode': '200', 'selected': 'tv', 'mp4': [{'filename': '', 'http': 'http://jobsfe.funshion.com/query/v1/mp4/7FCD71C58EBD4336DF99787A63045A8F3016EC51.json', 'filesize': '96748671', 'code': 'tv', 'name': '流畅', 'infohash': '7FCD71C58EBD4336DF99787A63045A8F3016EC51'}...], 'episode': '626464'}
+ video_dic = {}
+ for i in c['mp4']:
+ video_dic[i['code']] = i['http']
+ quality_preference_list = ['sdvd', 'hd', 'dvd', 'sd']
+ url = [video_dic[quality] for quality in quality_preference_list if quality in video_dic][0]
+ html = get_html(url)
+ c = json.loads(html)
+ #'{"return":"succ","client":{"ip":"107.191.**.**","sp":"0","loc":"0"},"playlist":[{"bits":"1638400","tname":"dvd","size":"555811243","urls":["http:\\/\\/61.155.217.4:80\\/play\\/1E070CE31DAA1373B667FD23AA5397C192CA6F7F.mp4",...]}]}'
+ return [i['urls'][0] for i in c['playlist']]
+
+site_info = "funshion"
+download = funshion_download
+download_playlist = playlist_not_supported('funshion')
| This should do.
<!-- Reviewable:start -->
[<img src="https://reviewable.io/review_button.png" height=40 alt="Review on Reviewable"/>](https://reviewable.io/reviews/soimort/you-get/619)
<!-- Reviewable:end -->
| https://api.github.com/repos/soimort/you-get/pulls/619 | 2015-09-02T18:54:41Z | 2015-09-02T18:58:09Z | 2015-09-02T18:58:09Z | 2015-09-02T18:58:12Z | 2,960 | soimort/you-get | 21,463 |
fixbug: WriteCode adds the content of the related code files. | diff --git a/metagpt/actions/write_code.py b/metagpt/actions/write_code.py
index 4c138a124..b20539e78 100644
--- a/metagpt/actions/write_code.py
+++ b/metagpt/actions/write_code.py
@@ -14,12 +14,13 @@
3. Encapsulate the input of RunCode into RunCodeContext and encapsulate the output of RunCode into
RunCodeResult to standardize and unify parameter passing between WriteCode, RunCode, and DebugError.
"""
+import json
from tenacity import retry, stop_after_attempt, wait_random_exponential
from metagpt.actions.action import Action
from metagpt.config import CONFIG
-from metagpt.const import CODE_SUMMARIES_FILE_REPO, TEST_OUTPUTS_FILE_REPO
+from metagpt.const import CODE_SUMMARIES_FILE_REPO, TEST_OUTPUTS_FILE_REPO, TASK_FILE_REPO
from metagpt.logs import logger
from metagpt.schema import CodingContext, Document, RunCodeResult
from metagpt.utils.common import CodeParser
@@ -101,10 +102,11 @@ async def run(self, *args, **kwargs) -> CodingContext:
if test_doc:
test_detail = RunCodeResult.loads(test_doc.content)
logs = test_detail.stderr
+ code_context = await self._get_codes(coding_context.task_doc)
prompt = PROMPT_TEMPLATE.format(
design=coding_context.design_doc.content,
tasks=coding_context.task_doc.content if coding_context.task_doc else "",
- code=coding_context.code_doc.content if coding_context.code_doc else "",
+ code=code_context,
logs=logs,
filename=self.context.filename,
summary_log=summary_doc.content if summary_doc else "",
@@ -115,3 +117,21 @@ async def run(self, *args, **kwargs) -> CodingContext:
coding_context.code_doc = Document(filename=coding_context.filename, root_path=CONFIG.src_workspace)
coding_context.code_doc.content = code
return coding_context
+
+ @staticmethod
+ async def _get_codes(task_doc) -> str:
+ if not task_doc:
+ return ""
+ if not task_doc.content:
+ task_doc.content = FileRepository.get_file(filename=task_doc.filename, relative_path=TASK_FILE_REPO)
+ m = json.loads(task_doc.content)
+ code_filenames = m.get("Task list", [])
+ codes = []
+ src_file_repo = CONFIG.git_repo.new_file_repository(relative_path=CONFIG.src_workspace)
+ for filename in code_filenames:
+ doc = await src_file_repo.get(filename=filename)
+ if not doc:
+ continue
+ codes.append(doc.content)
+ return "\n----------\n".join(codes)
+
| fixbug: WriteCode adds the content of the related code files. | https://api.github.com/repos/geekan/MetaGPT/pulls/553 | 2023-12-12T08:43:50Z | 2023-12-12T08:48:04Z | 2023-12-12T08:48:04Z | 2024-01-02T11:16:56Z | 590 | geekan/MetaGPT | 16,581 |
fix(integrations): Fix GHE repo url | diff --git a/src/sentry/integrations/github_enterprise/repository.py b/src/sentry/integrations/github_enterprise/repository.py
index ecf0f5e99d1a4..8bf48aff238b1 100644
--- a/src/sentry/integrations/github_enterprise/repository.py
+++ b/src/sentry/integrations/github_enterprise/repository.py
@@ -18,7 +18,7 @@ def create_repository(self, organization, data):
integration = Integration.objects.get(
id=data['integration_id'], provider=self.repo_provider)
- base_url = integration.metadata.get('domain_name')
+ base_url = integration.metadata['domain_name'].split('/')[0]
return {
'name': data['identifier'],
'external_id': data['external_id'],
| https://api.github.com/repos/getsentry/sentry/pulls/9260 | 2018-07-31T20:54:25Z | 2018-07-31T22:30:03Z | 2018-07-31T22:30:03Z | 2020-12-21T15:54:16Z | 168 | getsentry/sentry | 44,173 | |
Added Czech and Slovak Nameday API | diff --git a/README.md b/README.md
index 1a08213bf9..553b15ff07 100644
--- a/README.md
+++ b/README.md
@@ -115,6 +115,7 @@ API | Description | Auth | HTTPS | Link |
API | Description | Auth | HTTPS | Link |
|---|---|---|---|---|
| Church Calendar | Catholic liturgical calendar | No | No | [Go!](http://calapi.inadiutorium.cz/) |
+| Czech and Slovak Namedays Calendar | Lookup for a date and returns nameday | No | Yes | [Go!](https://api.abalin.net/) |
| Czech Namedays Calendar | Lookup for a name and returns nameday date | No | No | [Go!](http://svatky.adresa.info/) |
| Google Calendar | Display, create and modify Google calendar events | `OAuth` | Yes | [Go!](https://developers.google.com/google-apps/calendar/) |
| Holidays | Historical data regarding holidays | `apiKey` | Yes | [Go!](https://holidayapi.com/) |
| Thank you for taking the time to work on a Pull Request for this project!
To ensure your PR is dealt with swiftly please check the following:
- [x] Your submissions are formatted according to the guidelines in the [contributing guide](CONTRIBUTING.md).
- [x] Your changes are made in the [README](../README.md) file, not the auto-generated JSON.
- [x] Your additions are ordered alphabetically.
- [x] Your submission has a useful description.
- [x] Each table column should be padded with one space on either side.
- [x] You have searched the repository for any relevant issues or PRs.
- [x] Any category you are creating has the minimum requirement of 3 items.
| https://api.github.com/repos/public-apis/public-apis/pulls/570 | 2017-12-23T11:15:06Z | 2017-12-23T18:23:31Z | 2017-12-23T18:23:31Z | 2017-12-23T18:23:33Z | 239 | public-apis/public-apis | 35,405 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.