title
stringlengths
2
169
diff
stringlengths
235
19.5k
body
stringlengths
0
30.5k
url
stringlengths
48
84
created_at
stringlengths
20
20
closed_at
stringlengths
20
20
merged_at
stringlengths
20
20
updated_at
stringlengths
20
20
diff_len
float64
101
3.99k
repo_name
stringclasses
83 values
__index_level_0__
int64
15
52.7k
Add max capabilities when creating nested stacks
diff --git a/localstack/services/cloudformation/models/cloudformation.py b/localstack/services/cloudformation/models/cloudformation.py index a3bd60abc3b02..e8dd56d8525d1 100644 --- a/localstack/services/cloudformation/models/cloudformation.py +++ b/localstack/services/cloudformation/models/cloudformation.py @@ -48,6 +48,12 @@ def get_nested_stack_params( "StackName": nested_stack_name, "TemplateURL": properties.get("TemplateURL"), "Parameters": stack_parameters, + # TODO: when migrating to resource provider check parity here + "Capabilities": [ + "CAPABILITY_AUTO_EXPAND", + "CAPABILITY_IAM", + "CAPABILITY_NAMED_IAM", + ], } return result
## Motivation Nested stacks can also contain transformations and are thus subject to capability checks. This caused users to report issues regarding failed stack deployments to pop up in support which this PR aims to prevent. ## Changes - When creating a nested stack (`AWS::CloudFormation::Stack`) it will now always created with all possible capabilities. This should be fixed when migrating to a resource provider.
https://api.github.com/repos/localstack/localstack/pulls/8920
2023-08-16T11:00:34Z
2023-08-16T12:24:06Z
2023-08-16T12:24:06Z
2023-08-16T12:24:09Z
171
localstack/localstack
29,293
Fix enable_mod for "run" command
diff --git a/letsencrypt-apache/letsencrypt_apache/configurator.py b/letsencrypt-apache/letsencrypt_apache/configurator.py index 31b5f0bc57a..01c9d4f3045 100644 --- a/letsencrypt-apache/letsencrypt_apache/configurator.py +++ b/letsencrypt-apache/letsencrypt_apache/configurator.py @@ -493,7 +493,7 @@ def prepare_server_https(self, port, temp=False): """ if "ssl_module" not in self.parser.modules: logger.info("Loading mod_ssl into Apache Server") - self.enable_mod("ssl", temp) + self.enable_mod("ssl", temp=temp) # Check for Listen <port> # Note: This could be made to also look for ip:443 combo @@ -952,6 +952,9 @@ def is_site_enabled(self, avail_fp): def enable_site(self, vhost): """Enables an available site, Apache restart required. + .. note:: Does not make sure that the site correctly works or that all + modules are enabled appropriately. + .. todo:: This function should number subdomains before the domain vhost .. todo:: Make sure link is not broken... @@ -965,12 +968,6 @@ def enable_site(self, vhost): if self.is_site_enabled(vhost.filep): return - if vhost.ssl: - # TODO: Make this based on addresses - self.prepare_server_https("443") - if self.save_notes: - self.save() - if "/sites-available/" in vhost.filep: enabled_path = ("%s/sites-enabled/%s" % (self.parser.root, os.path.basename(vhost.filep))) @@ -1138,6 +1135,7 @@ def cleanup(self, achalls): if not self._chall_out: self.revert_challenge_config() self.restart() + self.parser.init_modules() def apache_restart(apache_init_script): diff --git a/letsencrypt-apache/letsencrypt_apache/dvsni.py b/letsencrypt-apache/letsencrypt_apache/dvsni.py index 11e69db61d4..c6c41dc51d8 100644 --- a/letsencrypt-apache/letsencrypt_apache/dvsni.py +++ b/letsencrypt-apache/letsencrypt_apache/dvsni.py @@ -53,7 +53,7 @@ def __init__(self, *args, **kwargs): "le_dvsni_cert_challenge.conf") def perform(self): - """Peform a DVSNI challenge.""" + """Perform a DVSNI challenge.""" if not self.achalls: return [] # Save any changes to the configuration as a precaution diff --git a/letsencrypt-apache/letsencrypt_apache/parser.py b/letsencrypt-apache/letsencrypt_apache/parser.py index e14569abcea..da3fc97e7b4 100644 --- a/letsencrypt-apache/letsencrypt_apache/parser.py +++ b/letsencrypt-apache/letsencrypt_apache/parser.py @@ -51,7 +51,7 @@ def __init__(self, aug, root, ctl): # https://httpd.apache.org/docs/2.4/mod/core.html#ifmodule # This needs to come before locations are set. self.modules = set() - self._init_modules() + self.init_modules() # Set up rest of locations self.loc.update(self._set_locations()) @@ -60,13 +60,15 @@ def __init__(self, aug, root, ctl): # Sites-available is not included naturally in configuration self._parse_file(os.path.join(self.root, "sites-available") + "/*") - def _init_modules(self): + def init_modules(self): """Iterates on the configuration until no new modules are loaded. ..todo:: This should be attempted to be done with a binary to avoid the iteration issue. Else... parse and enable mods at same time. """ + # Since modules are being initiated... clear existing set. + self.modules = set() matches = self.find_dir("LoadModule") iterator = iter(matches) diff --git a/letsencrypt-apache/letsencrypt_apache/tests/complex_parsing_test.py b/letsencrypt-apache/letsencrypt_apache/tests/complex_parsing_test.py index d6112a48653..406b6c39e26 100644 --- a/letsencrypt-apache/letsencrypt_apache/tests/complex_parsing_test.py +++ b/letsencrypt-apache/letsencrypt_apache/tests/complex_parsing_test.py @@ -18,7 +18,7 @@ def setUp(self): # pylint: disable=arguments-differ self.setup_variables() # This needs to happen after due to setup_variables not being run # until after - self.parser._init_modules() # pylint: disable=protected-access + self.parser.init_modules() # pylint: disable=protected-access def tearDown(self): shutil.rmtree(self.temp_dir) diff --git a/letsencrypt-apache/letsencrypt_apache/tests/configurator_test.py b/letsencrypt-apache/letsencrypt_apache/tests/configurator_test.py index 6d16474b3a1..71599bd1d37 100644 --- a/letsencrypt-apache/letsencrypt_apache/tests/configurator_test.py +++ b/letsencrypt-apache/letsencrypt_apache/tests/configurator_test.py @@ -207,20 +207,11 @@ def test_enable_mod_no_disable(self, mock_exe_exists): self.assertRaises( errors.MisconfigurationError, self.config.enable_mod, "ssl") - @mock.patch("letsencrypt.le_util.run_script") - @mock.patch("letsencrypt.le_util.exe_exists") - @mock.patch("letsencrypt_apache.parser.subprocess.Popen") - def test_enable_site(self, mock_popen, mock_exe_exists, mock_run_script): - mock_popen().returncode = 0 - mock_popen().communicate.return_value = ("Define: DUMP_RUN_CFG", "") - mock_exe_exists.return_value = True - + def test_enable_site(self): # Default 443 vhost self.assertFalse(self.vh_truth[1].enabled) self.config.enable_site(self.vh_truth[1]) self.assertTrue(self.vh_truth[1].enabled) - # Mod enabled - self.assertTrue(mock_run_script.called) # Go again to make sure nothing fails self.config.enable_site(self.vh_truth[1]) @@ -302,7 +293,9 @@ def test_add_name_vhost(self): "NameVirtualHost", "*:80")) def test_prepare_server_https(self): - self.config.parser.modules.add("ssl_module") + mock_enable = mock.Mock() + self.config.enable_mod = mock_enable + mock_find = mock.Mock() mock_add_dir = mock.Mock() mock_find.return_value = [] @@ -312,7 +305,12 @@ def test_prepare_server_https(self): self.config.parser.add_dir_to_ifmodssl = mock_add_dir self.config.prepare_server_https("443") - self.config.prepare_server_https("8080") + self.assertEqual(mock_enable.call_args[1], {"temp": False}) + + self.config.prepare_server_https("8080", temp=True) + # Enable mod is temporary + self.assertEqual(mock_enable.call_args[1], {"temp": True}) + self.assertEqual(mock_add_dir.call_count, 2) def test_make_vhost_ssl(self): diff --git a/letsencrypt/plugins/disco.py b/letsencrypt/plugins/disco.py index 51d25112652..b6cdb1f99d0 100644 --- a/letsencrypt/plugins/disco.py +++ b/letsencrypt/plugins/disco.py @@ -215,7 +215,7 @@ def find_init(self, plugin): Returns ``None`` if ``plugin`` is not found in the registry. """ - # use list instead of set beacse PluginEntryPoint is not hashable + # use list instead of set because PluginEntryPoint is not hashable candidates = [plugin_ep for plugin_ep in self._plugins.itervalues() if plugin_ep.initialized and plugin_ep.init() is plugin] assert len(candidates) <= 1 diff --git a/letsencrypt/reverter.py b/letsencrypt/reverter.py index 363de65f4a3..8eed59156ac 100644 --- a/letsencrypt/reverter.py +++ b/letsencrypt/reverter.py @@ -75,7 +75,11 @@ def rollback_checkpoints(self, rollback=1): backups = os.listdir(self.config.backup_dir) backups.sort() - if len(backups) < rollback: + if not backups: + logger.warning( + "Let's Encrypt hasn't modified your configuration, so rollback " + "isn't available.") + elif len(backups) < rollback: logger.warning("Unable to rollback %d checkpoints, only %d exist", rollback, len(backups)) diff --git a/letsencrypt/tests/reverter_test.py b/letsencrypt/tests/reverter_test.py index 59a7e4d9a07..62c47f8d692 100644 --- a/letsencrypt/tests/reverter_test.py +++ b/letsencrypt/tests/reverter_test.py @@ -343,9 +343,16 @@ def test_finalize_checkpoint_no_rename_directory(self, mock_rename): @mock.patch("letsencrypt.reverter.logger") def test_rollback_too_many(self, mock_logger): + # Test no exist warning... self.reverter.rollback_checkpoints(1) self.assertEqual(mock_logger.warning.call_count, 1) + # Test Generic warning + mock_logger.warning.call_count = 0 + self._setup_three_checkpoints() + self.reverter.rollback_checkpoints(4) + self.assertEqual(mock_logger.warning.call_count, 1) + def test_multi_rollback(self): config3 = self._setup_three_checkpoints() self.reverter.rollback_checkpoints(3)
Fixes #690 and #691
https://api.github.com/repos/certbot/certbot/pulls/693
2015-08-17T18:37:25Z
2015-08-17T20:41:07Z
2015-08-17T20:41:06Z
2016-05-06T19:22:13Z
2,243
certbot/certbot
944
[pre-commit.ci] pre-commit autoupdate
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 31e141049441..97603510b426 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.1.11 + rev: v0.1.13 hooks: - id: ruff @@ -33,7 +33,7 @@ repos: - tomli - repo: https://github.com/tox-dev/pyproject-fmt - rev: "1.5.3" + rev: "1.6.0" hooks: - id: pyproject-fmt @@ -61,7 +61,7 @@ repos: additional_dependencies: [types-requests] - repo: https://github.com/pre-commit/mirrors-prettier - rev: "v3.1.0" + rev: "v4.0.0-alpha.8" hooks: - id: prettier types_or: [toml, yaml]
<!--pre-commit.ci start--> updates: - [github.com/astral-sh/ruff-pre-commit: v0.1.11 → v0.1.13](https://github.com/astral-sh/ruff-pre-commit/compare/v0.1.11...v0.1.13) - [github.com/tox-dev/pyproject-fmt: 1.5.3 → 1.6.0](https://github.com/tox-dev/pyproject-fmt/compare/1.5.3...1.6.0) - [github.com/pre-commit/mirrors-prettier: v3.1.0 → v4.0.0-alpha.8](https://github.com/pre-commit/mirrors-prettier/compare/v3.1.0...v4.0.0-alpha.8) <!--pre-commit.ci end-->
https://api.github.com/repos/TheAlgorithms/Python/pulls/11246
2024-01-15T17:59:26Z
2024-01-15T18:19:37Z
2024-01-15T18:19:37Z
2024-01-15T18:19:44Z
272
TheAlgorithms/Python
29,437
Quick fix for sms method
diff --git a/interpreter/core/computer/calendar/calendar.py b/interpreter/core/computer/calendar/calendar.py index 86ad36912..4c39797b4 100644 --- a/interpreter/core/computer/calendar/calendar.py +++ b/interpreter/core/computer/calendar/calendar.py @@ -30,7 +30,16 @@ def get_events(self, start_date=datetime.date.today(), end_date=None): script = f''' set theDate to date "{applescript_start_date}" set endDate to date "{applescript_end_date}" - + tell application "System Events" + set calendarIsRunning to (name of processes) contains "{self.calendar_app}" + if calendarIsRunning then + tell application "{self.calendar_app}" to activate + else + tell application "{self.calendar_app}" to launch + delay 1 -- Wait for the application to open + tell application "{self.calendar_app}" to activate + end if + end tell set outputText to "" @@ -159,6 +168,17 @@ def create_event(self, title: str, start_date: datetime.datetime, end_date: date return "Can't find a default calendar. Please try again and specify a calendar name." script = f''' + -- Open and activate calendar first + tell application "System Events" + set calendarIsRunning to (name of processes) contains "{self.calendar_app}" + if calendarIsRunning then + tell application "{self.calendar_app}" to activate + else + tell application "{self.calendar_app}" to launch + delay 1 -- Wait for the application to open + tell application "{self.calendar_app}" to activate + end if + end tell tell application "{self.calendar_app}" tell calendar "{calendar}" set startDate to date "{applescript_start_date}" @@ -196,6 +216,17 @@ def delete_event(self, event_title: str, start_date: datetime.datetime, calenda # Format datetime for AppleScript applescript_start_date = start_date.strftime('%B %d, %Y %I:%M:%S %p') script = f''' + -- Open and activate calendar first + tell application "System Events" + set calendarIsRunning to (name of processes) contains "{self.calendar_app}" + if calendarIsRunning then + tell application "{self.calendar_app}" to activate + else + tell application "{self.calendar_app}" to launch + delay 1 -- Wait for the application to open + tell application "{self.calendar_app}" to activate + end if + end tell tell application "{self.calendar_app}" -- Specify the name of the calendar where the event is located set myCalendar to calendar "{calendar}" @@ -237,6 +268,14 @@ def get_first_calendar(self) -> str: # Literally just gets the first calendar name of all the calendars on the system. AppleScript does not provide a way to get the "default" calendar script = f""" + -- Open calendar first + tell application "System Events" + set calendarIsRunning to (name of processes) contains "{self.calendar_app}" + if calendarIsRunning is false then + tell application "{self.calendar_app}" to launch + delay 1 -- Wait for the application to open + end if + end tell tell application "{self.calendar_app}" -- Get the name of the first calendar set firstCalendarName to name of first calendar diff --git a/interpreter/core/computer/contacts/contacts.py b/interpreter/core/computer/contacts/contacts.py index 61612d174..890c0dc95 100644 --- a/interpreter/core/computer/contacts/contacts.py +++ b/interpreter/core/computer/contacts/contacts.py @@ -30,7 +30,8 @@ def get_phone_number(self, contact_name): # Language model friendly error message return f"A contact for '{contact_name}' was not found, perhaps one of these similar contacts might be what you are looking for? {names} \n Please try again and provide a more specific contact name." else: - return stout + return stout.replace('\n', '') + def get_email_address(self, contact_name): """ @@ -56,7 +57,7 @@ def get_email_address(self, contact_name): # Language model friendly error message return f"A contact for '{contact_name}' was not found, perhaps one of these similar contacts might be what you are looking for? {names} \n Please try again and provide a more specific contact name." else: - return stout + return stout.replace('\n', '') def get_full_names_from_first_name(self, first_name): diff --git a/interpreter/core/computer/mail/mail.py b/interpreter/core/computer/mail/mail.py index 051de95ce..b5c2e97e7 100644 --- a/interpreter/core/computer/mail/mail.py +++ b/interpreter/core/computer/mail/mail.py @@ -1,3 +1,4 @@ +import os import re import subprocess import platform @@ -17,6 +18,10 @@ def get(self, number=5, unread: bool = True): if platform.system() != 'Darwin': return "This method is only supported on MacOS" + too_many_emails_msg = "" + if number > 50: + number = min(number, 50) + too_many_emails_msg = "This method is limited to 10 emails, returning the first 10: " # This is set up to retry if the number of emails is less than the number requested, but only a max of three times retries = 0 # Initialize the retry counter while retries < 3: @@ -45,10 +50,10 @@ def get(self, number=5, unread: bool = True): continue break elif stdout: - print(stdout) - return stdout - break - return None # Return None if the operation fails after max_retries + if too_many_emails_msg: + return f"{too_many_emails_msg}\n\n{stdout}" + else: + return stdout def send(self, to, subject, body, attachments=None): """ @@ -57,11 +62,24 @@ def send(self, to, subject, body, attachments=None): if platform.system() != 'Darwin': return "This method is only supported on MacOS" + # Strip newlines from the to field + to = to.replace("\n", "") + attachment_clause = '' + delay_seconds = 5 # Default delay in seconds + if attachments: + formatted_attachments = [self.format_path_for_applescript(path) for path in attachments] + # Generate AppleScript to attach each file - attachment_clause = '\n'.join(f'make new attachment with properties {{file name:"{path}"}} at after the last paragraph of the content of new_message' for path in attachments) + attachment_clause = '\n'.join(f'make new attachment with properties {{file name:{path}}} at after the last paragraph of the content of new_message' for path in formatted_attachments) + + + # Calculate the delay based on the size of the attachments + delay_seconds = self.calculate_upload_delay(attachments) + + print(f"Delaying for {delay_seconds} seconds") # In the future, we might consider allowing the llm to specify an email to send from script = f''' tell application "{self.mail_app}" @@ -71,6 +89,7 @@ def send(self, to, subject, body, attachments=None): make new to recipient at end of to recipients with properties {{address:"{to}"}} {attachment_clause} end tell + {f'delay {delay_seconds}' if attachments else ''} send new_message end tell ''' @@ -82,19 +101,46 @@ def send(self, to, subject, body, attachments=None): def unread_count(self): """ - Retrieves the count of unread emails in the inbox. + Retrieves the count of unread emails in the inbox, limited to 50. """ if platform.system() != 'Darwin': return "This method is only supported on MacOS" script = f''' tell application "{self.mail_app}" - return count of (messages of inbox whose read status is false) + set unreadMessages to (messages of inbox whose read status is false) + if (count of unreadMessages) > 50 then + return 50 + else + return count of unreadMessages + end if end tell ''' try: - return int(run_applescript(script)) + unreads = int(run_applescript(script)) + if unreads >= 50: + return "50 or more" + return unreads except subprocess.CalledProcessError as e: print(e) return 0 + # Estimate how long something will take to upload + def calculate_upload_delay(self, attachments): + try: + total_size_mb = sum(os.path.getsize(att) for att in attachments) / (1024 * 1024) + # Assume 1 MBps upload speed, which is conservative on purpose + upload_speed_mbps = 1 + estimated_time_seconds = total_size_mb / upload_speed_mbps + return round(max(0.2, estimated_time_seconds + 1), 1) # Add 1 second buffer, ensure a minimum delay of 1.2 seconds, rounded to one decimal place + except: + # Return a default delay of 5 seconds if an error occurs + return 5 + + + def format_path_for_applescript(self, file_path): + # Escape backslashes, quotes, and curly braces for AppleScript + file_path = file_path.replace('\\', '\\\\').replace('"', '\\"').replace('{', '\\{').replace('}', '\\}') + # Convert to a POSIX path and quote for AppleScript + posix_path = f'POSIX file "{file_path}"' + return posix_path \ No newline at end of file diff --git a/interpreter/core/computer/sms/sms.py b/interpreter/core/computer/sms/sms.py index ebe823d3d..4a811c3e0 100644 --- a/interpreter/core/computer/sms/sms.py +++ b/interpreter/core/computer/sms/sms.py @@ -18,6 +18,8 @@ def send(self, to, message): if platform.system() != 'Darwin': return "This method is only supported on MacOS" + # Remove any newline characters from the recipient number. + to = to.replace("\n", "") # Escape double quotes in the message and recipient variables to prevent script errors. escaped_message = message.replace('"', '\\"') escaped_to = to.replace('"', '\\"')
### Describe the changes you have made: Prevents newline characters from breaking computer.sms.send ### Reference any relevant issues (e.g. "Fixes #000"): ### Pre-Submission Checklist (optional but appreciated): - [ ] I have included relevant documentation updates (stored in /docs) - [x] I have read `docs/CONTRIBUTING.md` - [x] I have read `docs/ROADMAP.md` ### OS Tests (optional but appreciated): - [ ] Tested on Windows - [x] Tested on MacOS - [ ] Tested on Linux
https://api.github.com/repos/OpenInterpreter/open-interpreter/pulls/1067
2024-03-12T00:00:17Z
2024-03-12T05:55:31Z
2024-03-12T05:55:31Z
2024-03-12T05:55:55Z
2,436
OpenInterpreter/open-interpreter
40,767
Korean: add missing translations
diff --git a/selfdrive/ui/translations/main_ko.ts b/selfdrive/ui/translations/main_ko.ts index 1d7ddbe6e842e5..73a5da5ad78b2e 100644 --- a/selfdrive/ui/translations/main_ko.ts +++ b/selfdrive/ui/translations/main_ko.ts @@ -1195,12 +1195,12 @@ location set</source> <message> <location filename="../qt/offroad/settings.cc" line="64"/> <source>🌮 End-to-end longitudinal (extremely alpha) 🌮</source> - <translation type="unfinished"></translation> + <translation>🌮 e2e long 사용 (매우 실험적) 🌮 </translation> </message> <message> <location filename="../qt/offroad/settings.cc" line="65"/> <source>Let the driving model control the gas and brakes, openpilot will drive as it thinks a human would. Super experimental.</source> - <translation type="unfinished"></translation> + <translation>주행모델이 가속과 감속을 제어하도록 하면 openpilot은 운전자가 생각하는것처럼 운전합니다. (매우 실험적)</translation> </message> <message> <location filename="../qt/offroad/settings.cc" line="58"/>
translation unfinished fix
https://api.github.com/repos/commaai/openpilot/pulls/25647
2022-09-02T13:57:45Z
2022-09-05T23:11:23Z
2022-09-05T23:11:23Z
2022-09-07T10:14:11Z
304
commaai/openpilot
8,958
Duplicate word in comments
diff --git a/mvc.py b/mvc.py index 7df613fc..42137ef7 100644 --- a/mvc.py +++ b/mvc.py @@ -6,7 +6,7 @@ def __iter__(self): raise NotImplementedError def get(self, item): - """Returns an an object with a .items() call method + """Returns an object with a .items() call method that iterates over key,value pairs of its information.""" raise NotImplementedError
https://api.github.com/repos/faif/python-patterns/pulls/130
2016-03-07T15:38:08Z
2016-03-08T19:51:59Z
2016-03-08T19:51:59Z
2016-03-08T19:51:59Z
112
faif/python-patterns
33,455
Add period when finished sentence.
diff --git a/README.md b/README.md index 8f0432ffc..9ed3b87ca 100644 --- a/README.md +++ b/README.md @@ -93,7 +93,7 @@ A curated list of awesome Python frameworks, libraries and software. Inspired by * [pyenv](https://github.com/yyuu/pyenv) - Simple Python version management. * [virtualenv](https://pypi.python.org/pypi/virtualenv) - A tool to create isolated Python environments. -* [virtualenvwrapper](https://pypi.python.org/pypi/virtualenvwrapper) - A set of extensions to virtualenv +* [virtualenvwrapper](https://pypi.python.org/pypi/virtualenvwrapper) - A set of extensions to virtualenv. * [virtualenv-api](https://github.com/sjkingo/virtualenv-api) - An API for virtualenv and pip. * [pew](https://pypi.python.org/pypi/pew/) - A set of tools to manage multiple virtual environments. * [Vex](https://github.com/sashahart/vex) - Run a command in the named virtualenv.
https://api.github.com/repos/vinta/awesome-python/pulls/256
2014-11-14T06:46:21Z
2014-11-14T07:54:32Z
2014-11-14T07:54:32Z
2014-11-14T07:54:32Z
253
vinta/awesome-python
26,905
Add Carbon Interface API
diff --git a/README.md b/README.md index a29b46e8f4..8309e6481b 100644 --- a/README.md +++ b/README.md @@ -322,6 +322,7 @@ API | Description | Auth | HTTPS | CORS | API | Description | Auth | HTTPS | CORS | |---|---|---|---|---| | [AirVisual](https://airvisual.com/api) | Air quality and weather data | `apiKey` | Yes | Unknown | +| [Carbon Interface](https://docs.carboninterface.com/) | API to calculate carbon (C02) emissions estimates for common C02 emitting activities | `apiKey` | Yes | Yes | | [GrünstromIndex](https://www.corrently.de/hintergrund/gruenstromindex/index.html) | Green Power Index for Germany (Grünstromindex/GSI) | No | No | Yes | | [OpenAQ](https://docs.openaq.org/) | Open air quality data | `apiKey` | Yes | Unknown | | [PM25.in](http://www.pm25.in/api_doc) | Air quality of China | `apiKey` | No | Unknown |
<!-- Thank you for taking the time to work on a Pull Request for this project! --> <!-- To ensure your PR is dealt with swiftly please check the following: --> - [x] My submission is formatted according to the guidelines in the [contributing guide](CONTRIBUTING.md) - [x] My addition is ordered alphabetically - [x] My submission has a useful description - [x] The description does not end with punctuation - [x] Each table column is padded with one space on either side - [x] I have searched the repository for any relevant issues or pull requests - [x] Any category I am creating has the minimum requirement of 3 items - [x] All changes have been [squashed][squash-link] into a single commit [squash-link]: <https://github.com/todotxt/todo.txt-android/wiki/Squash-All-Commits-Related-to-a-Single-Issue-into-a-Single-Commit>
https://api.github.com/repos/public-apis/public-apis/pulls/1503
2020-12-27T21:45:52Z
2021-04-14T20:56:25Z
2021-04-14T20:56:25Z
2021-04-14T20:56:26Z
253
public-apis/public-apis
35,427
COMPAT: mpl 3.0
diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt index 481c31d2410a9..3e1711edb0f27 100644 --- a/doc/source/whatsnew/v0.24.0.txt +++ b/doc/source/whatsnew/v0.24.0.txt @@ -193,6 +193,8 @@ Other Enhancements - :meth:`Series.resample` and :meth:`DataFrame.resample` have gained the :meth:`Resampler.quantile` (:issue:`15023`). - :meth:`Index.to_frame` now supports overriding column name(s) (:issue:`22580`). - New attribute :attr:`__git_version__` will return git commit sha of current build (:issue:`21295`). +- Compatibility with Matplotlib 3.0 (:issue:`22790`). + .. _whatsnew_0240.api_breaking: Backwards incompatible API changes diff --git a/pandas/plotting/_compat.py b/pandas/plotting/_compat.py index 46ebd4217862d..5032b259e9831 100644 --- a/pandas/plotting/_compat.py +++ b/pandas/plotting/_compat.py @@ -29,3 +29,4 @@ def inner(): _mpl_ge_2_0_1 = _mpl_version('2.0.1', operator.ge) _mpl_ge_2_1_0 = _mpl_version('2.1.0', operator.ge) _mpl_ge_2_2_0 = _mpl_version('2.2.0', operator.ge) +_mpl_ge_3_0_0 = _mpl_version('3.0.0', operator.ge) diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py index 4fa3b51c60ee4..77c97412bd3d7 100644 --- a/pandas/plotting/_core.py +++ b/pandas/plotting/_core.py @@ -32,7 +32,8 @@ from pandas.plotting._compat import (_mpl_ge_1_3_1, _mpl_ge_1_5_0, - _mpl_ge_2_0_0) + _mpl_ge_2_0_0, + _mpl_ge_3_0_0) from pandas.plotting._style import (plot_params, _get_standard_colors) from pandas.plotting._tools import (_subplots, _flatten, table, @@ -843,11 +844,16 @@ def _plot_colorbar(self, ax, **kwds): # For a more detailed description of the issue # see the following link: # https://github.com/ipython/ipython/issues/11215 - img = ax.collections[0] cbar = self.fig.colorbar(img, ax=ax, **kwds) + + if _mpl_ge_3_0_0(): + # The workaround below is no longer necessary. + return + points = ax.get_position().get_points() cbar_points = cbar.ax.get_position().get_points() + cbar.ax.set_position([cbar_points[0, 0], points[0, 1], cbar_points[1, 0] - cbar_points[0, 0], diff --git a/pandas/tests/plotting/common.py b/pandas/tests/plotting/common.py index 09687dd97bd43..5c88926828fa6 100644 --- a/pandas/tests/plotting/common.py +++ b/pandas/tests/plotting/common.py @@ -57,6 +57,7 @@ def setup_method(self, method): self.mpl_ge_2_0_0 = plotting._compat._mpl_ge_2_0_0() self.mpl_ge_2_0_1 = plotting._compat._mpl_ge_2_0_1() self.mpl_ge_2_2_0 = plotting._compat._mpl_ge_2_2_0() + self.mpl_ge_3_0_0 = plotting._compat._mpl_ge_3_0_0() if self.mpl_ge_1_4_0: self.bp_n_objects = 7 diff --git a/pandas/tests/plotting/test_datetimelike.py b/pandas/tests/plotting/test_datetimelike.py index 0abe82d138e5e..de6f6b931987c 100644 --- a/pandas/tests/plotting/test_datetimelike.py +++ b/pandas/tests/plotting/test_datetimelike.py @@ -151,7 +151,7 @@ def test_high_freq(self): freaks = ['ms', 'us'] for freq in freaks: _, ax = self.plt.subplots() - rng = date_range('1/1/2012', periods=100000, freq=freq) + rng = date_range('1/1/2012', periods=100, freq=freq) ser = Series(np.random.randn(len(rng)), rng) _check_plot_works(ser.plot, ax=ax) @@ -1492,7 +1492,11 @@ def test_matplotlib_scatter_datetime64(self): ax.scatter(x="time", y="y", data=df) fig.canvas.draw() label = ax.get_xticklabels()[0] - assert label.get_text() == '2017-12-12' + if self.mpl_ge_3_0_0: + expected = "2017-12-08" + else: + expected = "2017-12-12" + assert label.get_text() == expected def _check_plot_works(f, freq=None, series=None, *args, **kwargs):
Closes #22790
https://api.github.com/repos/pandas-dev/pandas/pulls/22870
2018-09-28T13:06:29Z
2018-09-28T15:06:16Z
2018-09-28T15:06:16Z
2018-09-28T17:59:41Z
1,290
pandas-dev/pandas
45,236
attempt to fix the repetition/hallucination issue identified in #1046
diff --git a/whisper/audio.py b/whisper/audio.py index a19b7ab0..513ab7c9 100644 --- a/whisper/audio.py +++ b/whisper/audio.py @@ -1,6 +1,6 @@ import os from functools import lru_cache -from typing import Union +from typing import Optional, Union import ffmpeg import numpy as np @@ -15,10 +15,8 @@ N_MELS = 80 HOP_LENGTH = 160 CHUNK_LENGTH = 30 -N_SAMPLES = CHUNK_LENGTH * SAMPLE_RATE # 480000: number of samples in a chunk -N_FRAMES = exact_div( - N_SAMPLES, HOP_LENGTH -) # 3000: number of frames in a mel spectrogram input +N_SAMPLES = CHUNK_LENGTH * SAMPLE_RATE # 480000 samples in a 30-second chunk +N_FRAMES = exact_div(N_SAMPLES, HOP_LENGTH) # 3000 frames in a mel spectrogram input N_SAMPLES_PER_TOKEN = HOP_LENGTH * 2 # the initial convolutions has stride 2 FRAMES_PER_SECOND = exact_div(SAMPLE_RATE, HOP_LENGTH) # 10ms per audio frame @@ -100,7 +98,10 @@ def mel_filters(device, n_mels: int = N_MELS) -> torch.Tensor: def log_mel_spectrogram( - audio: Union[str, np.ndarray, torch.Tensor], n_mels: int = N_MELS + audio: Union[str, np.ndarray, torch.Tensor], + n_mels: int = N_MELS, + padding: int = 0, + device: Optional[Union[str, torch.device]] = None, ): """ Compute the log-Mel spectrogram of @@ -113,6 +114,12 @@ def log_mel_spectrogram( n_mels: int The number of Mel-frequency filters, only 80 is supported + padding: int + Number of zero samples to pad to the right + + device: Optional[Union[str, torch.device]] + If given, the audio tensor is moved to this device before STFT + Returns ------- torch.Tensor, shape = (80, n_frames) @@ -123,6 +130,10 @@ def log_mel_spectrogram( audio = load_audio(audio) audio = torch.from_numpy(audio) + if device is not None: + audio = audio.to(device) + if padding > 0: + audio = F.pad(audio, (0, padding)) window = torch.hann_window(N_FFT).to(audio.device) stft = torch.stft(audio, N_FFT, HOP_LENGTH, window=window, return_complex=True) magnitudes = stft[..., :-1].abs() ** 2 diff --git a/whisper/transcribe.py b/whisper/transcribe.py index 20f01477..773e6365 100644 --- a/whisper/transcribe.py +++ b/whisper/transcribe.py @@ -11,6 +11,7 @@ FRAMES_PER_SECOND, HOP_LENGTH, N_FRAMES, + N_SAMPLES, SAMPLE_RATE, log_mel_spectrogram, pad_or_trim, @@ -116,7 +117,9 @@ def transcribe( if dtype == torch.float32: decode_options["fp16"] = False - mel = log_mel_spectrogram(audio) + # Pad 30-seconds of silence to the input audio, for slicing + mel = log_mel_spectrogram(audio, padding=N_SAMPLES) + content_frames = mel.shape[-1] - N_FRAMES if decode_options.get("language", None) is None: if not model.is_multilingual: @@ -212,14 +215,13 @@ def new_segment( } # show the progress bar when verbose is False (if True, transcribed text will be printed) - num_frames = mel.shape[-1] with tqdm.tqdm( - total=num_frames, unit="frames", disable=verbose is not False + total=content_frames, unit="frames", disable=verbose is not False ) as pbar: - while seek < num_frames: + while seek < content_frames: time_offset = float(seek * HOP_LENGTH / SAMPLE_RATE) - mel_segment = mel[:, seek:] - segment_size = min(mel_segment.shape[-1], N_FRAMES) + mel_segment = mel[:, seek : seek + N_FRAMES] + segment_size = min(N_FRAMES, content_frames - seek) segment_duration = segment_size * HOP_LENGTH / SAMPLE_RATE mel_segment = pad_or_trim(mel_segment, N_FRAMES).to(model.device).to(dtype) @@ -246,20 +248,18 @@ def new_segment( current_tokens = [] timestamp_tokens: torch.Tensor = tokens.ge(tokenizer.timestamp_begin) - consecutive = torch.where(timestamp_tokens[:-1] & timestamp_tokens[1:])[ - 0 - ].add_(1) - if ( - len(consecutive) > 0 - ): # if the output contains two consecutive timestamp tokens - if ended_with_single_timestamp := timestamp_tokens[-2:].tolist() == [ - False, - True, - ]: - consecutive = consecutive.tolist() + [len(tokens)] + single_timestamp_ending = timestamp_tokens[-2:].tolist() == [False, True] + + consecutive = torch.where(timestamp_tokens[:-1] & timestamp_tokens[1:])[0] + consecutive.add_(1) + if len(consecutive) > 0: + # if the output contains two consecutive timestamp tokens + slices = consecutive.tolist() + if single_timestamp_ending: + slices.append(len(tokens)) last_slice = 0 - for current_slice in consecutive: + for current_slice in slices: sliced_tokens = tokens[last_slice:current_slice] start_timestamp_pos = ( sliced_tokens[0].item() - tokenizer.timestamp_begin @@ -278,7 +278,7 @@ def new_segment( current_tokens.append(sliced_tokens.tolist()) last_slice = current_slice - if ended_with_single_timestamp: + if single_timestamp_ending: # single timestamp at the end means no speech after the last timestamp. seek += segment_size else: @@ -329,7 +329,7 @@ def new_segment( word_end_timestamps = [ w["end"] for s in current_segments for w in s["words"] ] - if len(consecutive) > 0 and len(word_end_timestamps) > 0: + if not single_timestamp_ending and len(word_end_timestamps) > 0: seek_shift = round( (word_end_timestamps[-1] - time_offset) * FRAMES_PER_SECOND ) @@ -356,7 +356,7 @@ def new_segment( ) # update progress bar - pbar.update(min(num_frames, seek) - previous_seek) + pbar.update(min(content_frames, seek) - previous_seek) return dict( text=tokenizer.decode(all_tokens[len(initial_prompt_tokens) :]),
https://api.github.com/repos/openai/whisper/pulls/1052
2023-03-07T21:48:40Z
2023-03-08T04:08:46Z
2023-03-08T04:08:46Z
2023-03-14T19:35:58Z
1,585
openai/whisper
45,785
add oa_leet10k instruction dataset
diff --git a/data/datasets/__init__.py b/data/datasets/__init__.py index a9ac0b21c1..fb573c7d6c 100644 --- a/data/datasets/__init__.py +++ b/data/datasets/__init__.py @@ -19,6 +19,7 @@ "zhihu-kol": "wangrui6/zhihu-kol", "tell_a_joke": "mikegarts/oa_tell_a_joke_20000", "oa_wiki_qa_bart_10000row": "michaelthwan/oa_wiki_qa_bart_10000row", + "oa_leet10k": "ehartford/oa_leet10k", } SAFETY_DATASETS = {
add oa_leet10k instruction dataset
https://api.github.com/repos/LAION-AI/Open-Assistant/pulls/2407
2023-04-08T17:44:36Z
2023-04-09T13:09:06Z
2023-04-09T13:09:06Z
2023-04-09T13:09:06Z
175
LAION-AI/Open-Assistant
37,029
DeepSpeed webinar announcement
diff --git a/README.md b/README.md index edfcb2a98e6c..5b441da16b11 100755 --- a/README.md +++ b/README.md @@ -26,6 +26,8 @@ information [here](https://innovation.microsoft.com/en-us/exploring-ai-at-scale) # News +* [2020/07/24] [DeepSpeed webinar](https://note.microsoft.com/MSR-Webinar-DeepSpeed-Registration-Live.html) on August 6th, 2020 + [![DeepSpeed webinar](docs/assets/images/webinar-aug2020.png)](https://note.microsoft.com/MSR-Webinar-DeepSpeed-Registration-Live.html) * [2020/05/19] [ZeRO-2 & DeepSpeed: Shattering Barriers of Deep Learning Speed & Scale](https://www.microsoft.com/en-us/research/blog/zero-2-deepspeed-shattering-barriers-of-deep-learning-speed-scale/) <span style="color:dodgerblue">**[_NEW_]**</span> * [2020/05/19] [An Order-of-Magnitude Larger and Faster Training with ZeRO-2](https://www.deepspeed.ai/news/2020/05/18/zero-stage2.html) diff --git a/docs/_layouts/news-home.html b/docs/_layouts/news-home.html index 960576b27cd7..8248eed5b551 100644 --- a/docs/_layouts/news-home.html +++ b/docs/_layouts/news-home.html @@ -16,6 +16,9 @@ <h2>{{ site.data.ui-text[site.locale].recent_posts | default: "Recent Posts" }}< {% assign news = posts | where: "sneak_preview", "false" %} {% for post in news %} {% include archive-single.html %} + {% if post.image %} + <a href="{{ post.link }}"><img src="{{ post.image }}"></a> + {% endif %} {% endfor %} {% include paginator.html %} diff --git a/docs/_posts/2020-07-24-deepspeed-webinar.md b/docs/_posts/2020-07-24-deepspeed-webinar.md new file mode 100644 index 000000000000..6c6bd6bbe581 --- /dev/null +++ b/docs/_posts/2020-07-24-deepspeed-webinar.md @@ -0,0 +1,10 @@ +--- +layout: single +title: "DeepSpeed webinar on August 6th, 2020" +excerpt: "" +categories: news +link: https://note.microsoft.com/MSR-Webinar-DeepSpeed-Registration-Live.html +image: /assets/images/webinar-aug2020.png +new_post: true +date: 2020-07-24 00:00:00 +--- diff --git a/docs/assets/images/webinar-aug2020.png b/docs/assets/images/webinar-aug2020.png new file mode 100644 index 000000000000..cb8577a4aea4 Binary files /dev/null and b/docs/assets/images/webinar-aug2020.png differ diff --git a/docs/index.md b/docs/index.md index 7b99cb550db0..6dea83db268f 100755 --- a/docs/index.md +++ b/docs/index.md @@ -27,9 +27,13 @@ information [here](https://innovation.microsoft.com/en-us/exploring-ai-at-scale) {% assign news = site.posts | where: "sneak_preview", "false" %} {% for post in news limit:5 %} {% if post.link %} - * [{{ post.date | date: "%Y/%m/%d" }}] [{{ post.title }}]({{ post.link }}) {% if post.new_post %} <span style="color:dodgerblue">**NEW!**</span> {% endif %} + {% if post.image %} +* [{{ post.date | date: "%Y/%m/%d" }}] [ {{ post.title }} {% if post.new_post %} <span style="color:dodgerblue">**NEW!**</span> {% endif %} ![]({{ post.image }}) ]({{ post.link }}) + {% else %} +* [{{ post.date | date: "%Y/%m/%d" }}] [{{ post.title }}]({{ post.link }}) {% if post.new_post %} <span style="color:dodgerblue">**NEW!**</span> {% endif %} + {% endif %} {% else %} - * [{{ post.date | date: "%Y/%m/%d"}}] [{{ post.title }}]({{ post.url }}) {% if post.new_post %} <span style="color:dodgerblue">**NEW!**</span> {% endif %} +* [{{ post.date | date: "%Y/%m/%d"}}] [{{ post.title }}]({{ post.url }}) {% if post.new_post %} <span style="color:dodgerblue">**NEW!**</span> {% endif %} {% endif %} {% endfor %}
![image](https://user-images.githubusercontent.com/620322/88426477-1b081080-cda6-11ea-976d-f180daf9a653.png)
https://api.github.com/repos/microsoft/DeepSpeed/pulls/301
2020-07-24T19:06:52Z
2020-07-25T05:13:12Z
2020-07-25T05:13:12Z
2020-07-25T05:13:16Z
1,136
microsoft/DeepSpeed
10,124
AutoAnchor improved initialization robustness
diff --git a/utils/autoanchor.py b/utils/autoanchor.py index 51d4de306ef..a631c21a3b2 100644 --- a/utils/autoanchor.py +++ b/utils/autoanchor.py @@ -125,15 +125,17 @@ def print_results(k, verbose=True): wh = wh0[(wh0 >= 2.0).any(1)] # filter > 2 pixels # wh = wh * (npr.rand(wh.shape[0], 1) * 0.9 + 0.1) # multiply by random scale 0-1 - # Kmeans calculation - LOGGER.info(f'{PREFIX}Running kmeans for {n} anchors on {len(wh)} points...') - s = wh.std(0) # sigmas for whitening - k = kmeans(wh / s, n, iter=30)[0] * s # points - if len(k) != n: # kmeans may return fewer points than requested if wh is insufficient or too similar - LOGGER.warning(f'{PREFIX}WARNING: scipy.cluster.vq.kmeans returned only {len(k)} of {n} requested points') + # Kmeans init + try: + LOGGER.info(f'{PREFIX}Running kmeans for {n} anchors on {len(wh)} points...') + assert n <= len(wh) # apply overdetermined constraint + s = wh.std(0) # sigmas for whitening + k = kmeans(wh / s, n, iter=30)[0] * s # points + assert n == len(k) # kmeans may return fewer points than requested if wh is insufficient or too similar + except Exception: + LOGGER.warning(f'{PREFIX}WARNING: switching strategies from kmeans to random init') k = np.sort(npr.rand(n * 2)).reshape(n, 2) * img_size # random init - wh = torch.tensor(wh, dtype=torch.float32) # filtered - wh0 = torch.tensor(wh0, dtype=torch.float32) # unfiltered + wh, wh0 = (torch.tensor(x, dtype=torch.float32) for x in (wh, wh0)) k = print_results(k, verbose=False) # Plot
Resolves https://github.com/ultralytics/yolov5/issues/6809#issuecomment-1054338046 ## 🛠️ PR Summary <sub>Made with ❤️ by [Ultralytics Actions](https://github.com/ultralytics/actions)<sub> ### 🌟 Summary Refinement of anchor box initialization in YOLOv5's autoanchor utility to handle edge cases. ### 📊 Key Changes - 🔄 Switch from unconditional k-means initialization to conditional with fallback strategy. - 🚸 Added assertions to ensure k-means is applied only when the algorithm can determine the requested number of anchors. - 🔒 In case of an exception (e.g., if k-means fails to return the desired number of anchors), the initialization switches to a random strategy. - ✂️ Changed how `wh` (width-height tensor) and `wh0` (unfiltered width-height tensor) are created, now using a generator expression for brevity and readability. ### 🎯 Purpose & Impact - 🛡️ **Purpose:** To improve the reliability of anchor box initialization by adding constraints and fallback mechanisms. - 🏃‍♂️ **Impact**: Users should experience more consistent performance, especially in cases where k-means clustering previously fell short, resulting in a better object detection model training experience.
https://api.github.com/repos/ultralytics/yolov5/pulls/6854
2022-03-04T09:25:02Z
2022-03-04T09:32:18Z
2022-03-04T09:32:18Z
2024-01-19T12:27:53Z
514
ultralytics/yolov5
25,022
allow moderators to ban user
diff --git a/website/package-lock.json b/website/package-lock.json index 9b369be40f..18b1fc19ab 100644 --- a/website/package-lock.json +++ b/website/package-lock.json @@ -85,6 +85,7 @@ "msw-storybook-addon": "^1.7.0", "prettier": "2.8.1", "prisma": "^4.7.1", + "ts-essentials": "^9.3.0", "ts-node": "^10.9.1", "typescript": "^4.9.4" } @@ -36509,6 +36510,15 @@ "node": ">=6.10" } }, + "node_modules/ts-essentials": { + "version": "9.3.0", + "resolved": "https://registry.npmjs.org/ts-essentials/-/ts-essentials-9.3.0.tgz", + "integrity": "sha512-XeiCboEyBG8UqXZtXl59bWEi4ZgOqRsogFDI6WDGIF1LmzbYiAkIwjkXN6zZWWl4re/lsOqMlYfe8KA0XiiEPw==", + "dev": true, + "peerDependencies": { + "typescript": ">=4.1.0" + } + }, "node_modules/ts-node": { "version": "10.9.1", "resolved": "https://registry.npmjs.org/ts-node/-/ts-node-10.9.1.tgz", @@ -66035,6 +66045,13 @@ "resolved": "https://registry.npmjs.org/ts-dedent/-/ts-dedent-2.2.0.tgz", "integrity": "sha512-q5W7tVM71e2xjHZTlgfTDoPF/SmqKG5hddq9SzR49CH2hayqRKJtQ4mtRlSxKaJlR/+9rEM+mnBHf7I2/BQcpQ==" }, + "ts-essentials": { + "version": "9.3.0", + "resolved": "https://registry.npmjs.org/ts-essentials/-/ts-essentials-9.3.0.tgz", + "integrity": "sha512-XeiCboEyBG8UqXZtXl59bWEi4ZgOqRsogFDI6WDGIF1LmzbYiAkIwjkXN6zZWWl4re/lsOqMlYfe8KA0XiiEPw==", + "dev": true, + "requires": {} + }, "ts-node": { "version": "10.9.1", "resolved": "https://registry.npmjs.org/ts-node/-/ts-node-10.9.1.tgz", diff --git a/website/package.json b/website/package.json index bb09c986b7..5e90432a48 100644 --- a/website/package.json +++ b/website/package.json @@ -103,6 +103,7 @@ "msw-storybook-addon": "^1.7.0", "prettier": "2.8.1", "prisma": "^4.7.1", + "ts-essentials": "^9.3.0", "ts-node": "^10.9.1", "typescript": "^4.9.4" }, diff --git a/website/src/components/Icons/Discord.tsx b/website/src/components/Icons/Discord.tsx index ea1118fb7c..68a0d32896 100644 --- a/website/src/components/Icons/Discord.tsx +++ b/website/src/components/Icons/Discord.tsx @@ -1,8 +1,11 @@ -import { LucideIcon } from "lucide-react"; +import { LucideIcon, LucideProps } from "lucide-react"; +import { forwardRef } from "react"; -export const Discord: LucideIcon = ({ size = 24, ...rest }) => { +// eslint-disable-next-line react/display-name +export const Discord: LucideIcon = forwardRef<SVGSVGElement, LucideProps>(function ({ size = 24, ...rest }, ref) { return ( <svg + ref={ref} xmlns="http://www.w3.org/2000/svg" viewBox="0 0 127.14 96.36" fill="currentColor" @@ -13,4 +16,4 @@ export const Discord: LucideIcon = ({ size = 24, ...rest }) => { <path d="M107.7 8.07A105.15 105.15 0 0 0 81.47 0a72.06 72.06 0 0 0-3.36 6.83 97.68 97.68 0 0 0-29.11 0A72.37 72.37 0 0 0 45.64 0a105.89 105.89 0 0 0-26.25 8.09C2.79 32.65-1.71 56.6.54 80.21a105.73 105.73 0 0 0 32.17 16.15 77.7 77.7 0 0 0 6.89-11.11 68.42 68.42 0 0 1-10.85-5.18c.91-.66 1.8-1.34 2.66-2a75.57 75.57 0 0 0 64.32 0c.87.71 1.76 1.39 2.66 2a68.68 68.68 0 0 1-10.87 5.19 77 77 0 0 0 6.89 11.1 105.25 105.25 0 0 0 32.19-16.14c2.64-27.38-4.51-51.11-18.9-72.15ZM42.45 65.69C36.18 65.69 31 60 31 53s5-12.74 11.43-12.74S54 46 53.89 53s-5.05 12.69-11.44 12.69Zm42.24 0C78.41 65.69 73.25 60 73.25 53s5-12.74 11.44-12.74S96.23 46 96.12 53s-5.04 12.69-11.43 12.69Z" /> </svg> ); -}; +}); diff --git a/website/src/components/Messages/AdminMessageTable.tsx b/website/src/components/Messages/AdminMessageTable.tsx index 90866459ce..7339d68602 100644 --- a/website/src/components/Messages/AdminMessageTable.tsx +++ b/website/src/components/Messages/AdminMessageTable.tsx @@ -8,7 +8,7 @@ import NextLink from "next/link"; import { ROUTES } from "src/lib/routes"; import { Message } from "src/types/Conversation"; import { isKnownEmoji } from "src/types/Emoji"; -import { StrictOmit } from "src/types/utils"; +import { StrictOmit } from "ts-essentials"; import { DataTable, DataTableProps } from "../DataTable/DataTable"; import { DataTableAction } from "../DataTable/DataTableAction"; diff --git a/website/src/components/RoleSelect.tsx b/website/src/components/RoleSelect.tsx index 970ffdce52..e5f8e59ae4 100644 --- a/website/src/components/RoleSelect.tsx +++ b/website/src/components/RoleSelect.tsx @@ -1,9 +1,15 @@ import { Select, SelectProps } from "@chakra-ui/react"; import { forwardRef } from "react"; -import { ElementOf } from "src/types/utils"; +import { ValueOf } from "ts-essentials"; -export const roles = ["general", "admin", "banned", "moderator"] as const; -export type Role = ElementOf<typeof roles>; +export const ROLES = { + GERNERAL: "general", + BANNED: "banned", + ADMIN: "admin", + MODERATOR: "moderator", +} as const; + +export type Role = ValueOf<typeof ROLES>; type RoleSelectProps = Omit<SelectProps, "defaultValue"> & { defaultValue?: Role; @@ -13,7 +19,7 @@ type RoleSelectProps = Omit<SelectProps, "defaultValue"> & { export const RoleSelect = forwardRef<HTMLSelectElement, RoleSelectProps>((props, ref) => { return ( <Select {...props} ref={ref}> - {roles.map((role) => ( + {Object.values(ROLES).map((role) => ( <option value={role} key={role}> {role} </option> diff --git a/website/src/pages/api/admin/update_user.ts b/website/src/pages/api/admin/update_user.ts index 13ee97f51a..af914b431f 100644 --- a/website/src/pages/api/admin/update_user.ts +++ b/website/src/pages/api/admin/update_user.ts @@ -1,21 +1,23 @@ -import { withRole } from "src/lib/auth"; +import { ROLES } from "src/components/RoleSelect"; +import { withAnyRole } from "src/lib/auth"; import { createApiClient } from "src/lib/oasst_client_factory"; import prisma from "src/lib/prismadb"; /** * Update's the user's data in the database. Accessible only to admins. */ -const handler = withRole("admin", async (req, res, token) => { - const { id, auth_method, user_id, notes, role, show_on_leaderboard } = req.body; - - const oasstApiClient = await createApiClient(token); - // If the user is authorized by the web, update their role. - if (auth_method === "local") { - await prisma.user.update({ - where: { id }, - data: { role }, - }); +const handler = withAnyRole(["admin", "moderator"], async (req, res, token) => { + const { id, user_id, notes, role, show_on_leaderboard } = req.body; + // mod can't update user role to mod or admin + if (token.role === ROLES.MODERATOR && (role === ROLES.MODERATOR || role === ROLES.ADMIN)) { + return res.status(403).json({}); } + const oasstApiClient = await createApiClient(token); + await prisma.user.update({ + where: { id }, + data: { role }, + }); + // Tell the backend the user's enabled or not enabled status. await oasstApiClient.set_user_status(user_id, role !== "banned", notes, show_on_leaderboard); diff --git a/website/src/types/utils.ts b/website/src/types/utils.ts deleted file mode 100644 index f3ac0af3e9..0000000000 --- a/website/src/types/utils.ts +++ /dev/null @@ -1,10 +0,0 @@ -/* eslint-disable @typescript-eslint/no-explicit-any */ -// https://github.com/ts-essentials/ts-essentials/blob/25cae45c162f8784e3cdae8f43783d0c66370a57/lib/types.ts#L437 -// eslint-disable-next-line @typescript-eslint/no-explicit-any -export type ElementOf<T extends readonly any[]> = T extends readonly (infer ET)[] ? ET : never; -type AnyRecord<T = any> = Record<KeyofBase, T>; -type KeyofBase = keyof any; - -export type AnyArray<T = any> = Array<T> | ReadonlyArray<T>; - -export type StrictOmit<T extends AnyRecord, K extends keyof T> = T extends AnyArray ? never : Omit<T, K>;
- Allow mod to ban users (mods can only ban and unban user, they can't promote user to mod or admin, they can't ban other mods and admins too). - Fix a bug where the admin can not ban user login in discord. - Forward ref on discord icon component to support tooltip
https://api.github.com/repos/LAION-AI/Open-Assistant/pulls/1458
2023-02-11T06:35:42Z
2023-02-11T08:10:10Z
2023-02-11T08:10:10Z
2023-02-11T08:10:11Z
2,783
LAION-AI/Open-Assistant
37,603
Add files via upload
diff --git a/Armstrong_number.py b/Armstrong_number.py new file mode 100644 index 0000000000..be6ca4bd68 --- /dev/null +++ b/Armstrong_number.py @@ -0,0 +1,12 @@ +#checking for armstrong number +a=input('Enter a number') +n=int(a) +S=0 +while n>0: + d=n%10 + S=S+d*d*d + n=n/10 +if int(a)==S: + print("Armstrong Number") +else: + print("Not an Armstrong Number")
Program to find armstrong number
https://api.github.com/repos/geekcomputers/Python/pulls/1451
2022-01-03T08:45:11Z
2022-01-03T11:01:22Z
2022-01-03T11:01:22Z
2022-01-03T11:01:22Z
138
geekcomputers/Python
31,374
HKG: Car Port for Kia Sorento 2022
diff --git a/RELEASES.md b/RELEASES.md index eea69d295f5b1e..1b5ef8f7a7b97c 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -6,6 +6,7 @@ Version 0.9.1 (2022-12-XX) * Chevrolet Bolt EV 2022-23 support thanks to JasonJShuler! * Genesis GV60 2023 support thanks to sunnyhaibin! * Hyundai Tucson 2022-23 support +* Kia Sorento 2022-23 support thanks to sunnyhaibin! * Kia Sorento Plug-in Hybrid 2022 support thanks to sunnyhaibin! Version 0.9.0 (2022-11-21) diff --git a/docs/CARS.md b/docs/CARS.md index 930b52c8a262f8..ac374501dc0c3f 100644 --- a/docs/CARS.md +++ b/docs/CARS.md @@ -4,7 +4,7 @@ A supported vehicle is one that just works when you install a comma three. All supported cars provide a better experience than any stock system. -# 222 Supported Cars +# 223 Supported Cars |Make|Model|Supported Package|ACC|No ACC accel below|No ALC below|Steering Torque|Resume from stop|Harness| |---|---|---|:---:|:---:|:---:|:---:|:---:|:---:| @@ -110,6 +110,7 @@ A supported vehicle is one that just works when you install a comma three. All s |Kia|Seltos 2021|Smart Cruise Control (SCC)|openpilot available[<sup>1</sup>](#footnotes)|0 mph|0 mph|[![star](assets/icon-star-full.svg)](##)|[![star](assets/icon-star-full.svg)](##)|Hyundai A| |Kia|Sorento 2018|Advanced Smart Cruise Control|Stock|0 mph|0 mph|[![star](assets/icon-star-full.svg)](##)|[![star](assets/icon-star-full.svg)](##)|Hyundai C| |Kia|Sorento 2019|Smart Cruise Control (SCC)|Stock|0 mph|0 mph|[![star](assets/icon-star-full.svg)](##)|[![star](assets/icon-star-full.svg)](##)|Hyundai E| +|Kia|Sorento 2022-23[<sup>5</sup>](#footnotes)|Smart Cruise Control (SCC)|Stock|0 mph|0 mph|[![star](assets/icon-star-full.svg)](##)|[![star](assets/icon-star-full.svg)](##)|Hyundai K| |Kia|Sorento Plug-in Hybrid 2022-23[<sup>5</sup>](#footnotes)|Smart Cruise Control (SCC)|Stock|0 mph|0 mph|[![star](assets/icon-star-full.svg)](##)|[![star](assets/icon-star-full.svg)](##)|Hyundai A| |Kia|Sportage 2023[<sup>5</sup>](#footnotes)|Smart Cruise Control (SCC)|Stock|0 mph|0 mph|[![star](assets/icon-star-full.svg)](##)|[![star](assets/icon-star-full.svg)](##)|Hyundai N| |Kia|Sportage Hybrid 2023[<sup>5</sup>](#footnotes)|Smart Cruise Control (SCC)|openpilot available[<sup>1</sup>](#footnotes)|0 mph|0 mph|[![star](assets/icon-star-full.svg)](##)|[![star](assets/icon-star-full.svg)](##)|Hyundai N| diff --git a/selfdrive/car/hyundai/interface.py b/selfdrive/car/hyundai/interface.py index 6881be5a33acd6..6d6d9833df2623 100644 --- a/selfdrive/car/hyundai/interface.py +++ b/selfdrive/car/hyundai/interface.py @@ -185,10 +185,13 @@ def _get_params(ret, candidate, fingerprint, car_fw, experimental_long): ret.mass = 1767. + STD_CARGO_KG # SX Prestige trim support only ret.wheelbase = 2.756 ret.steerRatio = 13.6 - elif candidate == CAR.KIA_SORENTO_PHEV_4TH_GEN: - ret.mass = 4095.8 * CV.LB_TO_KG + STD_CARGO_KG # weight from EX and above trims, average of FWD and AWD versions (EX, X-Line EX AWD, SX, SX Pestige, X-Line SX Prestige AWD) + elif candidate in (CAR.KIA_SORENTO_4TH_GEN, CAR.KIA_SORENTO_PHEV_4TH_GEN): ret.wheelbase = 2.81 - ret.steerRatio = 13.27 # steering ratio according to Kia News https://www.kiamedia.com/us/en/models/sorento-phev/2022/specifications + ret.steerRatio = 13.5 # average of the platforms + if candidate == CAR.KIA_SORENTO_4TH_GEN: + ret.mass = 3957 * CV.LB_TO_KG + STD_CARGO_KG + else: + ret.mass = 4537 * CV.LB_TO_KG + STD_CARGO_KG # Genesis elif candidate == CAR.GENESIS_GV60_EV_1ST_GEN: diff --git a/selfdrive/car/hyundai/values.py b/selfdrive/car/hyundai/values.py index 1a81e1b1679927..229ca992d55cec 100644 --- a/selfdrive/car/hyundai/values.py +++ b/selfdrive/car/hyundai/values.py @@ -105,6 +105,7 @@ class CAR: KIA_SELTOS = "KIA SELTOS 2021" KIA_SPORTAGE_5TH_GEN = "KIA SPORTAGE 5TH GEN" KIA_SORENTO = "KIA SORENTO GT LINE 2018" + KIA_SORENTO_4TH_GEN = "KIA SORENTO 4TH GEN" KIA_SORENTO_PHEV_4TH_GEN = "KIA SORENTO PLUG-IN HYBRID 4TH GEN" KIA_SPORTAGE_HYBRID_5TH_GEN = "KIA SPORTAGE HYBRID 5TH GEN" KIA_STINGER = "KIA STINGER GT2 2018" @@ -211,7 +212,8 @@ def init_make(self, CP: car.CarParams): HyundaiCarInfo("Kia Sorento 2018", "Advanced Smart Cruise Control", "https://www.youtube.com/watch?v=Fkh3s6WHJz8", harness=Harness.hyundai_c), HyundaiCarInfo("Kia Sorento 2019", video_link="https://www.youtube.com/watch?v=Fkh3s6WHJz8", harness=Harness.hyundai_e), ], - CAR.KIA_SORENTO_PHEV_4TH_GEN: HyundaiCarInfo("Kia Sorento Plug-in Hybrid 2022-23", "Smart Cruise Control (SCC)", harness=Harness.hyundai_a), + CAR.KIA_SORENTO_4TH_GEN: HyundaiCarInfo("Kia Sorento 2022-23", harness=Harness.hyundai_k), + CAR.KIA_SORENTO_PHEV_4TH_GEN: HyundaiCarInfo("Kia Sorento Plug-in Hybrid 2022-23", harness=Harness.hyundai_a), CAR.KIA_SPORTAGE_HYBRID_5TH_GEN: HyundaiCarInfo("Kia Sportage Hybrid 2023", harness=Harness.hyundai_n), CAR.KIA_STINGER: HyundaiCarInfo("Kia Stinger 2018-20", video_link="https://www.youtube.com/watch?v=MJ94qoofYw0", harness=Harness.hyundai_c), CAR.KIA_STINGER_2022: HyundaiCarInfo("Kia Stinger 2022", "All", harness=Harness.hyundai_k), @@ -1543,6 +1545,14 @@ class Buttons: b'\xf1\x00JW1_ RDR ----- 1.00 1.00 99110-CU000 ', ], }, + CAR.KIA_SORENTO_4TH_GEN: { + (Ecu.fwdCamera, 0x7c4, None): [ + b'\xf1\x00MQ4 MFC AT USA LHD 1.00 1.05 99210-R5000 210623', + ], + (Ecu.fwdRadar, 0x7d0, None): [ + b'\xf1\x00MQ4_ SCC FHCUP 1.00 1.06 99110-P2000 ', + ], + }, } CHECKSUM = { @@ -1560,10 +1570,10 @@ class Buttons: "use_fca": {CAR.SONATA, CAR.SONATA_HYBRID, CAR.ELANTRA, CAR.ELANTRA_2021, CAR.ELANTRA_HEV_2021, CAR.KIA_STINGER, CAR.IONIQ_EV_2020, CAR.IONIQ_PHEV, CAR.KONA_EV, CAR.KIA_FORTE, CAR.KIA_NIRO_EV, CAR.PALISADE, CAR.GENESIS_G70, CAR.GENESIS_G70_2020, CAR.KONA, CAR.SANTA_FE, CAR.KIA_SELTOS, CAR.KONA_HEV, CAR.SANTA_FE_2022, CAR.KIA_K5_2021, CAR.IONIQ_HEV_2022, CAR.SANTA_FE_HEV_2022, CAR.SANTA_FE_PHEV_2022, CAR.TUCSON, CAR.KONA_EV_2022, CAR.KIA_STINGER_2022}, } -CANFD_CAR = {CAR.KIA_EV6, CAR.IONIQ_5, CAR.TUCSON_4TH_GEN, CAR.TUCSON_HYBRID_4TH_GEN, CAR.KIA_SPORTAGE_HYBRID_5TH_GEN, CAR.SANTA_CRUZ_1ST_GEN, CAR.KIA_SPORTAGE_5TH_GEN, CAR.GENESIS_GV70_1ST_GEN, CAR.KIA_SORENTO_PHEV_4TH_GEN, CAR.GENESIS_GV60_EV_1ST_GEN} +CANFD_CAR = {CAR.KIA_EV6, CAR.IONIQ_5, CAR.TUCSON_4TH_GEN, CAR.TUCSON_HYBRID_4TH_GEN, CAR.KIA_SPORTAGE_HYBRID_5TH_GEN, CAR.SANTA_CRUZ_1ST_GEN, CAR.KIA_SPORTAGE_5TH_GEN, CAR.GENESIS_GV70_1ST_GEN, CAR.KIA_SORENTO_PHEV_4TH_GEN, CAR.GENESIS_GV60_EV_1ST_GEN, CAR.KIA_SORENTO_4TH_GEN} # The radar does SCC on these cars when HDA I, rather than the camera -CANFD_RADAR_SCC_CAR = {CAR.GENESIS_GV70_1ST_GEN, CAR.KIA_SORENTO_PHEV_4TH_GEN} +CANFD_RADAR_SCC_CAR = {CAR.GENESIS_GV70_1ST_GEN, CAR.KIA_SORENTO_PHEV_4TH_GEN, CAR.KIA_SORENTO_4TH_GEN} # The camera does SCC on these cars, rather than the radar CAMERA_SCC_CAR = {CAR.KONA_EV_2022, } @@ -1628,4 +1638,5 @@ class Buttons: CAR.GENESIS_GV70_1ST_GEN: dbc_dict('hyundai_canfd', None), CAR.KIA_SORENTO_PHEV_4TH_GEN: dbc_dict('hyundai_canfd', None), CAR.GENESIS_GV60_EV_1ST_GEN: dbc_dict('hyundai_canfd', None), + CAR.KIA_SORENTO_4TH_GEN: dbc_dict('hyundai_canfd', None), } diff --git a/selfdrive/car/tests/routes.py b/selfdrive/car/tests/routes.py index 3edc406e19ff21..ac0521cc68bdcc 100644 --- a/selfdrive/car/tests/routes.py +++ b/selfdrive/car/tests/routes.py @@ -100,6 +100,7 @@ CarTestRoute("db68bbe12250812c|2022-12-05--00-54-12", HYUNDAI.TUCSON_4TH_GEN), # 2023 CarTestRoute("36e10531feea61a4|2022-07-25--13-37-42", HYUNDAI.TUCSON_HYBRID_4TH_GEN), CarTestRoute("5875672fc1d4bf57|2020-07-23--21-33-28", HYUNDAI.KIA_SORENTO), + CarTestRoute("1d0d000db3370fd0|2023-01-04--22-28-42", HYUNDAI.KIA_SORENTO_4TH_GEN, segment=5), CarTestRoute("628935d7d3e5f4f7|2022-11-30--01-12-46", HYUNDAI.KIA_SORENTO_PHEV_4TH_GEN), CarTestRoute("9c917ba0d42ffe78|2020-04-17--12-43-19", HYUNDAI.PALISADE), CarTestRoute("05a8f0197fdac372|2022-10-19--14-14-09", HYUNDAI.IONIQ_5), # HDA2 diff --git a/selfdrive/car/torque_data/override.yaml b/selfdrive/car/torque_data/override.yaml index d7b2ec407901cc..3a9f92c0464a1b 100644 --- a/selfdrive/car/torque_data/override.yaml +++ b/selfdrive/car/torque_data/override.yaml @@ -36,6 +36,7 @@ KIA SPORTAGE HYBRID 5TH GEN: [2.5, 2.5, 0.1] GENESIS GV70 1ST GEN: [2.42, 2.42, 0.1] KIA SORENTO PLUG-IN HYBRID 4TH GEN: [2.5, 2.5, 0.1] GENESIS GV60 ELECTRIC 1ST GEN: [2.5, 2.5, 0.1] +KIA SORENTO 4TH GEN: [2.5, 2.5, 0.1] # Dashcam or fallback configured as ideal car mock: [10.0, 10, 0.0]
**Checklist** - [x] added entry to CarInfo in selfdrive/car/*/values.py and ran `selfdrive/car/docs.py` to generate new docs - [x] test route added to [routes.py](https://github.com/commaai/openpilot/blob/master/selfdrive/car/tests/routes.py) - [x] route with openpilot: `1d0d000db3370fd0|2023-01-04--22-28-42` - [x] harness type: [Hyundai K](https://github.com/commaai/openpilot/wiki/Hyundai-Kia-Genesis#harness-guide) - [x] architecture: CAN-FD **Physical setup** - Top port behind comma three - comma Power <-> First Harness Box <-> comma three USB C/Right-Angle C <-> comma three - Bottom port behind comma three - Camera Harness <-> Second Harness Box <-> USB C/C 3.1 <-> Red Panda <-> USB A/A <-> USB A to C **OTG Adapter** <-> comma three - Link to all physical parts during install: - https://discord.com/channels/469524606043160576/524611978208215070/1010961558630838452 Thanks to community Kia Sorento 2022 owner `camojackson#7300` (Discord).
https://api.github.com/repos/commaai/openpilot/pulls/26874
2023-01-04T16:29:44Z
2023-01-12T05:17:58Z
2023-01-12T05:17:58Z
2023-08-20T15:09:56Z
3,356
commaai/openpilot
9,731
Adding negative prompts to Loras in extra networks
diff --git a/extensions-builtin/Lora/ui_edit_user_metadata.py b/extensions-builtin/Lora/ui_edit_user_metadata.py index c7011909055..3160aecfa38 100644 --- a/extensions-builtin/Lora/ui_edit_user_metadata.py +++ b/extensions-builtin/Lora/ui_edit_user_metadata.py @@ -54,12 +54,13 @@ def __init__(self, ui, tabname, page): self.slider_preferred_weight = None self.edit_notes = None - def save_lora_user_metadata(self, name, desc, sd_version, activation_text, preferred_weight, notes): + def save_lora_user_metadata(self, name, desc, sd_version, activation_text, preferred_weight, negative_text, notes): user_metadata = self.get_user_metadata(name) user_metadata["description"] = desc user_metadata["sd version"] = sd_version user_metadata["activation text"] = activation_text user_metadata["preferred weight"] = preferred_weight + user_metadata["negative text"] = negative_text user_metadata["notes"] = notes self.write_user_metadata(name, user_metadata) @@ -127,6 +128,7 @@ def put_values_into_components(self, name): gr.HighlightedText.update(value=gradio_tags, visible=True if tags else False), user_metadata.get('activation text', ''), float(user_metadata.get('preferred weight', 0.0)), + user_metadata.get('negative text', ''), gr.update(visible=True if tags else False), gr.update(value=self.generate_random_prompt_from_tags(tags), visible=True if tags else False), ] @@ -162,7 +164,7 @@ def create_editor(self): self.taginfo = gr.HighlightedText(label="Training dataset tags") self.edit_activation_text = gr.Text(label='Activation text', info="Will be added to prompt along with Lora") self.slider_preferred_weight = gr.Slider(label='Preferred weight', info="Set to 0 to disable", minimum=0.0, maximum=2.0, step=0.01) - + self.edit_negative_text = gr.Text(label='Negative prompt', info="Will be added to negative prompts") with gr.Row() as row_random_prompt: with gr.Column(scale=8): random_prompt = gr.Textbox(label='Random prompt', lines=4, max_lines=4, interactive=False) @@ -198,6 +200,7 @@ def select_tag(activation_text, evt: gr.SelectData): self.taginfo, self.edit_activation_text, self.slider_preferred_weight, + self.edit_negative_text, row_random_prompt, random_prompt, ] @@ -211,7 +214,9 @@ def select_tag(activation_text, evt: gr.SelectData): self.select_sd_version, self.edit_activation_text, self.slider_preferred_weight, + self.edit_negative_text, self.edit_notes, ] + self.setup_save_handler(self.button_save, self.save_lora_user_metadata, edited_components) diff --git a/extensions-builtin/Lora/ui_extra_networks_lora.py b/extensions-builtin/Lora/ui_extra_networks_lora.py index df02c663b12..e714fac4692 100644 --- a/extensions-builtin/Lora/ui_extra_networks_lora.py +++ b/extensions-builtin/Lora/ui_extra_networks_lora.py @@ -45,6 +45,11 @@ def create_item(self, name, index=None, enable_filter=True): if activation_text: item["prompt"] += " + " + quote_js(" " + activation_text) + negative_prompt = item["user_metadata"].get("negative text") + item["negative_prompt"] = quote_js("") + if negative_prompt: + item["negative_prompt"] = quote_js('(' + negative_prompt + ':1)') + sd_version = item["user_metadata"].get("sd version") if sd_version in network.SdVersion.__members__: item["sd_version"] = sd_version diff --git a/javascript/extraNetworks.js b/javascript/extraNetworks.js index 98a7abb745c..f1ad19a66b7 100644 --- a/javascript/extraNetworks.js +++ b/javascript/extraNetworks.js @@ -185,8 +185,10 @@ onUiLoaded(setupExtraNetworks); var re_extranet = /<([^:^>]+:[^:]+):[\d.]+>(.*)/; var re_extranet_g = /<([^:^>]+:[^:]+):[\d.]+>/g; -function tryToRemoveExtraNetworkFromPrompt(textarea, text) { - var m = text.match(re_extranet); +var re_extranet_neg = /\(([^:^>]+:[\d.]+)\)/; +var re_extranet_g_neg = /\(([^:^>]+:[\d.]+)\)/g; +function tryToRemoveExtraNetworkFromPrompt(textarea, text, isNeg) { + var m = text.match(isNeg ? re_extranet_neg : re_extranet); var replaced = false; var newTextareaText; if (m) { @@ -194,8 +196,8 @@ function tryToRemoveExtraNetworkFromPrompt(textarea, text) { var extraTextAfterNet = m[2]; var partToSearch = m[1]; var foundAtPosition = -1; - newTextareaText = textarea.value.replaceAll(re_extranet_g, function(found, net, pos) { - m = found.match(re_extranet); + newTextareaText = textarea.value.replaceAll(isNeg ? re_extranet_g_neg : re_extranet_g, function(found, net, pos) { + m = found.match(isNeg ? re_extranet_neg : re_extranet); if (m[1] == partToSearch) { replaced = true; foundAtPosition = pos; @@ -205,7 +207,7 @@ function tryToRemoveExtraNetworkFromPrompt(textarea, text) { }); if (foundAtPosition >= 0) { - if (newTextareaText.substr(foundAtPosition, extraTextAfterNet.length) == extraTextAfterNet) { + if (extraTextAfterNet && newTextareaText.substr(foundAtPosition, extraTextAfterNet.length) == extraTextAfterNet) { newTextareaText = newTextareaText.substr(0, foundAtPosition) + newTextareaText.substr(foundAtPosition + extraTextAfterNet.length); } if (newTextareaText.substr(foundAtPosition - extraTextBeforeNet.length, extraTextBeforeNet.length) == extraTextBeforeNet) { @@ -230,14 +232,23 @@ function tryToRemoveExtraNetworkFromPrompt(textarea, text) { return false; } -function cardClicked(tabname, textToAdd, allowNegativePrompt) { - var textarea = allowNegativePrompt ? activePromptTextarea[tabname] : gradioApp().querySelector("#" + tabname + "_prompt > label > textarea"); +function updatePromptArea(text, textArea, isNeg) { - if (!tryToRemoveExtraNetworkFromPrompt(textarea, textToAdd)) { - textarea.value = textarea.value + opts.extra_networks_add_text_separator + textToAdd; + if (!tryToRemoveExtraNetworkFromPrompt(textArea, text, isNeg)) { + textArea.value = textArea.value + opts.extra_networks_add_text_separator + text; } - updateInput(textarea); + updateInput(textArea); +} + +function cardClicked(tabname, textToAdd, textToAddNegative, allowNegativePrompt) { + if (textToAddNegative.length > 0) { + updatePromptArea(textToAdd, gradioApp().querySelector("#" + tabname + "_prompt > label > textarea")); + updatePromptArea(textToAddNegative, gradioApp().querySelector("#" + tabname + "_neg_prompt > label > textarea"), true); + } else { + var textarea = allowNegativePrompt ? activePromptTextarea[tabname] : gradioApp().querySelector("#" + tabname + "_prompt > label > textarea"); + updatePromptArea(textToAdd, textarea); + } } function saveCardPreview(event, tabname, filename) { diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py index fe5d3ba3338..b8c02241365 100644 --- a/modules/ui_extra_networks.py +++ b/modules/ui_extra_networks.py @@ -223,7 +223,10 @@ def create_html_for_item(self, item, tabname): onclick = item.get("onclick", None) if onclick is None: - onclick = '"' + html.escape(f"""return cardClicked({quote_js(tabname)}, {item["prompt"]}, {"true" if self.allow_negative_prompt else "false"})""") + '"' + if "negative_prompt" in item: + onclick = '"' + html.escape(f"""return cardClicked({quote_js(tabname)}, {item["prompt"]}, {item["negative_prompt"]}, {"true" if self.allow_negative_prompt else "false"})""") + '"' + else: + onclick = '"' + html.escape(f"""return cardClicked({quote_js(tabname)}, {item["prompt"]}, {'""'}, {"true" if self.allow_negative_prompt else "false"})""") + '"' height = f"height: {shared.opts.extra_networks_card_height}px;" if shared.opts.extra_networks_card_height else '' width = f"width: {shared.opts.extra_networks_card_width}px;" if shared.opts.extra_networks_card_width else ''
## Description This pull request adds the ability to specify a negative prompt with weight that will be added alongside the prompt to their respective texbox, when clicking a lora card in the extra networks tab. If nothing is specified no additional text will be added to the negative prompts. To achieve this I modified `ui_edit_user_metadata.py`, `ui_extra_networks_lora.py` and `ui_extra_networks.py` to incorporate these two new fields. I also had to modify `extraNetworks.js` to allow the prompt and negative prompt to be added at the same time. Additionally I had to add additional regex expressions to match the negative prompt that gets added by clicking on the card. The two new fields are saved into the models json file. ## Screenshots/videos: ![grafik](https://github.com/AUTOMATIC1111/stable-diffusion-webui/assets/6223515/fe350615-eee3-49af-9f63-6791e1e0946d) ![negative_prompt_example](https://github.com/AUTOMATIC1111/stable-diffusion-webui/assets/6223515/f9647c78-fc87-4dd6-9d89-7d6f8a275182) ## Checklist: - [x] I have read [contributing wiki page](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Contributing) - [x] I have performed a self-review of my own code - [x] My code follows the [style guidelines](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Contributing#code-style) - [x] My code passes [tests](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Tests)
https://api.github.com/repos/AUTOMATIC1111/stable-diffusion-webui/pulls/14475
2023-12-30T21:37:55Z
2023-12-31T19:32:29Z
2023-12-31T19:32:28Z
2023-12-31T19:32:29Z
2,081
AUTOMATIC1111/stable-diffusion-webui
39,964
Improve contributing.md
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 884bae3ddf6..f3d6d687b9c 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,3 +1,12 @@ +Keras 3 is a high-velocity open-source project. We welcome contributions! + +Contributions can be made in a variety of ways, including coding, enriching documentation, refining docstrings, and providing code examples. + + +## Current items open for contributions +At[this link](https://github.com/keras-team/keras/issues/18442), you'll find a list of items where you help is needed! + + ## How to contribute code Follow these steps to submit your code contribution. @@ -9,7 +18,7 @@ exist) and discussing your proposed changes. This way, we can give you feedback and validate the proposed changes. If the changes are minor (simple bug fix or documentation fix), then feel free -to open a PR without discussion. +to open a Pull Request (PR) without discussion. ### Step 2. Make code changes @@ -31,6 +40,7 @@ the check will pass. ![CLA signed](https://github.com/keras-team/keras/assets/1091026/71c26353-e3b5-4135-8bae-64693c717775) + ### Step 5. Code review If the tests fail, look into the error messages and try to fix them. @@ -56,7 +66,7 @@ for your reference. ## Setup environment We provide two ways of setting up a development environment. One is to use a -dev container, and the other one is to setup a local environment by installing +dev container, and the other one is to set up a local environment by installing the dev tools needed. ### Option 1: GitHub Codespace or dev container @@ -65,9 +75,9 @@ We support GitHub Codespaces, Visual Studio Code dev containers and JetBrain dev containers. Please see the [Dev container documentation](https://github.com/keras-team/keras/tree/master/.devcontainer). -### Option 2: Setup a local environment +### Option 2: Set up a local environment -To setup your local dev environment, you will need the following tools. +To set up your local dev environment, you will need the following tools. 1. [git](https://github.com/) for code repository management. 2. [python](https://www.python.org/) to build and code in Keras. @@ -99,7 +109,7 @@ section of the README. ## Code style -The Keras uses [Black](https://black.readthedocs.io/en/stable/) and +Keras uses [Black](https://black.readthedocs.io/en/stable/) and [isort](https://pycqa.github.io/isort/) to format the code. Please refer to [requirements-common.txt](https://github.com/keras-team/keras/blob/master/requirements-common.txt) for the required versions. Run the following command **at the root directory of
I've made small updates to `CONTRIBUTING.md` to improve clarity and ease for new contributors.
https://api.github.com/repos/keras-team/keras/pulls/19115
2024-01-29T19:18:57Z
2024-01-29T21:03:20Z
2024-01-29T21:03:20Z
2024-04-02T18:40:03Z
698
keras-team/keras
47,744
ngrok logging does not use the shared logger module
diff --git a/extensions/ngrok/script.py b/extensions/ngrok/script.py index 46f39bd327..7bfb9f6e1f 100644 --- a/extensions/ngrok/script.py +++ b/extensions/ngrok/script.py @@ -8,7 +8,6 @@ # See this example for full list of options: https://github.com/ngrok/ngrok-py/blob/main/examples/ngrok-connect-full.py # or the README.md in this directory. -import logging from modules import shared # Pick up host/port command line arguments @@ -31,6 +30,6 @@ def ui(): try: import ngrok tunnel = ngrok.connect(**options) - logging.info(f"Ingress established at: {tunnel.url()}") + shared.logger.info(f"Ingress established at: {tunnel.url()}") except ModuleNotFoundError: - logging.error("===> ngrok library not found, please run `pip install -r extensions/ngrok/requirements.txt`") + shared.logger.error("===> ngrok library not found, please run `pip install -r extensions/ngrok/requirements.txt`")
- when running `start_linux.sh`, ngrok will not display the created URL unless the logs are run through the shared logger module ## Checklist: - [x] I have read the [Contributing guidelines](https://github.com/oobabooga/text-generation-webui/wiki/Contributing-guidelines).
https://api.github.com/repos/oobabooga/text-generation-webui/pulls/5570
2024-02-23T20:38:21Z
2024-02-25T05:35:59Z
2024-02-25T05:35:59Z
2024-02-25T05:35:59Z
245
oobabooga/text-generation-webui
25,956
zh-Hans: Fix typo
diff --git a/README-zh-Hans.md b/README-zh-Hans.md index bce8bfb131..f1a2f9e3cd 100644 --- a/README-zh-Hans.md +++ b/README-zh-Hans.md @@ -1621,7 +1621,7 @@ Notes | 设计类似于 Google 的搜索引擎 | [queue.acm.org](http://queue.acm.org/detail.cfm?id=988407)<br/>[stackexchange.com](http://programmers.stackexchange.com/questions/38324/interview-question-how-would-you-implement-google-search)<br/>[ardendertat.com](http://www.ardendertat.com/2012/01/11/implementing-search-engines/)<br>[stanford.edu](http://infolab.stanford.edu/~backrub/google.html) | | 设计类似于 Google 的可扩展网络爬虫 | [quora.com](https://www.quora.com/How-can-I-build-a-web-crawler-from-scratch) | | 设计 Google 文档 | [code.google.com](https://code.google.com/p/google-mobwrite/)<br/>[neil.fraser.name](https://neil.fraser.name/writing/sync/) | -| 设计类似 Redis 的建值存储 | [slideshare.net](http://www.slideshare.net/dvirsky/introduction-to-redis) | +| 设计类似 Redis 的键值存储 | [slideshare.net](http://www.slideshare.net/dvirsky/introduction-to-redis) | | 设计类似 Memcached 的缓存系统 | [slideshare.net](http://www.slideshare.net/oemebamo/introduction-to-memcached) | | 设计类似亚马逊的推荐系统 | [hulu.com](http://tech.hulu.com/blog/2011/09/19/recommendation-system.html)<br/>[ijcai13.org](http://ijcai13.org/files/tutorial_slides/td3.pdf) | | 设计类似 Bitly 的短链接系统 | [n00tc0d3r.blogspot.com](http://n00tc0d3r.blogspot.com/) |
https://api.github.com/repos/donnemartin/system-design-primer/pulls/246
2019-01-15T02:37:22Z
2019-01-20T19:59:43Z
2019-01-20T19:59:43Z
2019-01-20T20:00:20Z
488
donnemartin/system-design-primer
36,806
Parallelize SNS message delivery for improved performance
diff --git a/localstack/services/sns/sns_listener.py b/localstack/services/sns/sns_listener.py index a99ee2ced3a77..366dfd44b425d 100644 --- a/localstack/services/sns/sns_listener.py +++ b/localstack/services/sns/sns_listener.py @@ -7,6 +7,7 @@ import six import requests import xmltodict +import asyncio from flask import Response as FlaskResponse from requests.models import Response, Request from six.moves.urllib import parse as urlparse @@ -295,133 +296,150 @@ def unsubscribe_sqs_queue(queue_url): subscriptions.remove(subscriber) -def message_to_subscribers(message_id, message, topic_arn, req_data, headers, subscription_arn=None, skip_checks=False): +def message_to_subscribers(message_id, message, topic_arn, req_data, headers, subscription_arn=None, + skip_checks=False): sns_backend = SNSBackend.get() subscriptions = sns_backend.sns_subscriptions.get(topic_arn, []) - for subscriber in list(subscriptions): - if subscription_arn not in [None, subscriber['SubscriptionArn']]: - continue - - filter_policy = json.loads(subscriber.get('FilterPolicy') or '{}') - message_attributes = get_message_attributes(req_data) - if not skip_checks and not check_filter_policy(filter_policy, message_attributes): - LOG.info('SNS filter policy %s does not match attributes %s' % (filter_policy, message_attributes)) - continue - - if subscriber['Protocol'] == 'sms': - event = { - 'topic_arn': topic_arn, - 'endpoint': subscriber['Endpoint'], - 'message_content': req_data['Message'][0] - } - sns_backend.sms_messages.append(event) - LOG.info('Delivering SMS message to %s: %s', subscriber['Endpoint'], req_data['Message'][0]) - - elif subscriber['Protocol'] == 'sqs': - queue_url = None - - try: - endpoint = subscriber['Endpoint'] - - if 'sqs_queue_url' in subscriber: - queue_url = subscriber.get('sqs_queue_url') - elif '://' in endpoint: - queue_url = endpoint - else: - queue_name = endpoint.split(':')[5] - queue_url = aws_stack.get_sqs_queue_url(queue_name) - subscriber['sqs_queue_url'] = queue_url - - message_group_id = req_data.get('MessageGroupId')[0] if req_data.get('MessageGroupId') else '' - - sqs_client = aws_stack.connect_to_service('sqs') - - # TODO remove this kwargs if we stop using ElasticMQ entirely - kwargs = {'MessageGroupId': message_group_id} if SQS_BACKEND_IMPL == 'moto' else {} - sqs_client.send_message( - QueueUrl=queue_url, - MessageBody=create_sns_message_body(subscriber, req_data, message_id), - MessageAttributes=create_sqs_message_attributes(subscriber, message_attributes), - MessageSystemAttributes=create_sqs_system_attributes(headers), - **kwargs - ) - except Exception as exc: - LOG.warning('Unable to forward SNS message to SQS: %s %s' % (exc, traceback.format_exc())) - sns_error_to_dead_letter_queue(subscriber['SubscriptionArn'], req_data, str(exc)) - if 'NonExistentQueue' in str(exc): - LOG.info('Removing non-existent queue "%s" subscribed to topic "%s"' % (queue_url, topic_arn)) - subscriptions.remove(subscriber) - - elif subscriber['Protocol'] == 'lambda': - try: - external_url = external_service_url('sns') - unsubscribe_url = '%s/?Action=Unsubscribe&SubscriptionArn=%s' % (external_url, - subscriber['SubscriptionArn']) - response = lambda_api.process_sns_notification( - subscriber['Endpoint'], - topic_arn, - subscriber['SubscriptionArn'], - message, - message_id, - message_attributes, - unsubscribe_url, - subject=req_data.get('Subject', [None])[0] - ) - if isinstance(response, Response): - response.raise_for_status() - elif isinstance(response, FlaskResponse): - if response.status_code >= 400: - raise Exception('Error response (code %s): %s' % (response.status_code, response.data)) - except Exception as exc: - LOG.warning('Unable to run Lambda function on SNS message: %s %s' % (exc, traceback.format_exc())) - sns_error_to_dead_letter_queue(subscriber['SubscriptionArn'], req_data, str(exc)) - - elif subscriber['Protocol'] in ['http', 'https']: - msg_type = (req_data.get('Type') or ['Notification'])[0] - try: - message_body = create_sns_message_body(subscriber, req_data, message_id) - except Exception: - continue - try: - response = requests.post( - subscriber['Endpoint'], - headers={ - 'Content-Type': 'text/plain', - # AWS headers according to - # https://docs.aws.amazon.com/sns/latest/dg/sns-message-and-json-formats.html#http-header - 'x-amz-sns-message-type': msg_type, - 'x-amz-sns-topic-arn': subscriber['TopicArn'], - 'x-amz-sns-subscription-arn': subscriber['SubscriptionArn'], - 'User-Agent': 'Amazon Simple Notification Service Agent' - }, - data=message_body, - verify=False - ) + + async def wait_for_messages_sent(): + await asyncio.wait([ + message_to_subscriber(message_id, message, topic_arn, req_data, headers, subscription_arn, skip_checks, + sns_backend, subscriber, subscriptions) for subscriber in list(subscriptions) + ]) + + asyncio.run(wait_for_messages_sent()) + + +async def message_to_subscriber(message_id, message, topic_arn, req_data, + headers, subscription_arn, skip_checks, sns_backend, subscriber, subscriptions): + + if subscription_arn not in [None, subscriber['SubscriptionArn']]: + return + + filter_policy = json.loads(subscriber.get('FilterPolicy') or '{}') + message_attributes = get_message_attributes(req_data) + if not skip_checks and not check_filter_policy(filter_policy, message_attributes): + LOG.info('SNS filter policy %s does not match attributes %s' % (filter_policy, message_attributes)) + return + if subscriber['Protocol'] == 'sms': + event = { + 'topic_arn': topic_arn, + 'endpoint': subscriber['Endpoint'], + 'message_content': req_data['Message'][0] + } + sns_backend.sms_messages.append(event) + LOG.info('Delivering SMS message to %s: %s', subscriber['Endpoint'], req_data['Message'][0]) + return + + elif subscriber['Protocol'] == 'sqs': + queue_url = None + + try: + endpoint = subscriber['Endpoint'] + + if 'sqs_queue_url' in subscriber: + queue_url = subscriber.get('sqs_queue_url') + elif '://' in endpoint: + queue_url = endpoint + else: + queue_name = endpoint.split(':')[5] + queue_url = aws_stack.get_sqs_queue_url(queue_name) + subscriber['sqs_queue_url'] = queue_url + + message_group_id = req_data.get('MessageGroupId')[0] if req_data.get('MessageGroupId') else '' + + sqs_client = aws_stack.connect_to_service('sqs') + + # TODO remove this kwargs if we stop using ElasticMQ entirely + kwargs = {'MessageGroupId': message_group_id} if SQS_BACKEND_IMPL == 'moto' else {} + sqs_client.send_message( + QueueUrl=queue_url, + MessageBody=create_sns_message_body(subscriber, req_data, message_id), + MessageAttributes=create_sqs_message_attributes(subscriber, message_attributes), + MessageSystemAttributes=create_sqs_system_attributes(headers), + **kwargs + ) + except Exception as exc: + LOG.warning('Unable to forward SNS message to SQS: %s %s' % (exc, traceback.format_exc())) + sns_error_to_dead_letter_queue(subscriber['SubscriptionArn'], req_data, str(exc)) + if 'NonExistentQueue' in str(exc): + LOG.info('Removing non-existent queue "%s" subscribed to topic "%s"' % (queue_url, topic_arn)) + subscriptions.remove(subscriber) + return + + elif subscriber['Protocol'] == 'lambda': + try: + external_url = external_service_url('sns') + unsubscribe_url = '%s/?Action=Unsubscribe&SubscriptionArn=%s' % (external_url, + subscriber['SubscriptionArn']) + response = lambda_api.process_sns_notification( + subscriber['Endpoint'], + topic_arn, + subscriber['SubscriptionArn'], + message, + message_id, + message_attributes, + unsubscribe_url, + subject=req_data.get('Subject', [None])[0] + ) + if isinstance(response, Response): response.raise_for_status() - except Exception as exc: - LOG.info('Received error on sending SNS message, putting to DLQ (if configured): %s' % exc) - sns_error_to_dead_letter_queue(subscriber['SubscriptionArn'], req_data, str(exc)) - - elif subscriber['Protocol'] == 'application': - try: - sns_client = aws_stack.connect_to_service('sns') - sns_client.publish(TargetArn=subscriber['Endpoint'], Message=message) - except Exception as exc: - LOG.warning('Unable to forward SNS message to SNS platform app: %s %s' % (exc, traceback.format_exc())) - sns_error_to_dead_letter_queue(subscriber['SubscriptionArn'], req_data, str(exc)) - - elif subscriber['Protocol'] == 'email': - ses_client = aws_stack.connect_to_service('ses') - if subscriber.get('Endpoint'): - ses_client.verify_email_address(EmailAddress=subscriber.get('Endpoint')) - ses_client.verify_email_address(EmailAddress='admin@localstack.com') - - ses_client.send_email(Source='admin@localstack.com', - Message={'Body': {'Text': {'Data': message}}, - 'Subject': {'Data': 'SNS-Subscriber-Endpoint'}}, - Destination={'ToAddresses': [subscriber.get('Endpoint')]}) - else: - LOG.warning('Unexpected protocol "%s" for SNS subscription' % subscriber['Protocol']) + elif isinstance(response, FlaskResponse): + if response.status_code >= 400: + raise Exception('Error response (code %s): %s' % (response.status_code, response.data)) + except Exception as exc: + LOG.warning('Unable to run Lambda function on SNS message: %s %s' % (exc, traceback.format_exc())) + sns_error_to_dead_letter_queue(subscriber['SubscriptionArn'], req_data, str(exc)) + return + + elif subscriber['Protocol'] in ['http', 'https']: + msg_type = (req_data.get('Type') or ['Notification'])[0] + try: + message_body = create_sns_message_body(subscriber, req_data, message_id) + except Exception: + return + try: + response = requests.post( + subscriber['Endpoint'], + headers={ + 'Content-Type': 'text/plain', + # AWS headers according to + # https://docs.aws.amazon.com/sns/latest/dg/sns-message-and-json-formats.html#http-header + 'x-amz-sns-message-type': msg_type, + 'x-amz-sns-topic-arn': subscriber['TopicArn'], + 'x-amz-sns-subscription-arn': subscriber['SubscriptionArn'], + 'User-Agent': 'Amazon Simple Notification Service Agent' + }, + data=message_body, + verify=False + ) + response.raise_for_status() + except Exception as exc: + LOG.info('Received error on sending SNS message, putting to DLQ (if configured): %s' % exc) + sns_error_to_dead_letter_queue(subscriber['SubscriptionArn'], req_data, str(exc)) + return + + elif subscriber['Protocol'] == 'application': + try: + sns_client = aws_stack.connect_to_service('sns') + sns_client.publish(TargetArn=subscriber['Endpoint'], Message=message) + except Exception as exc: + LOG.warning('Unable to forward SNS message to SNS platform app: %s %s' % (exc, traceback.format_exc())) + sns_error_to_dead_letter_queue(subscriber['SubscriptionArn'], req_data, str(exc)) + return + + elif subscriber['Protocol'] == 'email': + ses_client = aws_stack.connect_to_service('ses') + if subscriber.get('Endpoint'): + ses_client.verify_email_address(EmailAddress=subscriber.get('Endpoint')) + ses_client.verify_email_address(EmailAddress='admin@localstack.com') + + ses_client.send_email(Source='admin@localstack.com', + Message={'Body': {'Text': {'Data': message}}, + 'Subject': {'Data': 'SNS-Subscriber-Endpoint'}}, + Destination={'ToAddresses': [subscriber.get('Endpoint')]}) + else: + LOG.warning('Unexpected protocol "%s" for SNS subscription' % subscriber['Protocol']) def publish_message(topic_arn, req_data, headers, subscription_arn=None, skip_checks=False):
(Re-creating PR #4101 to trigger PR build.) > With this PR the SNS service delivers the messages to the subscribers of a topic in an async way. > Addresses #3670
https://api.github.com/repos/localstack/localstack/pulls/4103
2021-06-06T07:42:02Z
2021-06-06T18:07:55Z
2021-06-06T18:07:55Z
2021-06-06T18:07:55Z
3,002
localstack/localstack
28,992
Modified the shaders slightly
diff --git a/manimlib/shaders/inserts/finalize_color.glsl b/manimlib/shaders/inserts/finalize_color.glsl index 0664deb0b5..e7b64eee3a 100644 --- a/manimlib/shaders/inserts/finalize_color.glsl +++ b/manimlib/shaders/inserts/finalize_color.glsl @@ -17,16 +17,16 @@ vec4 add_light(vec4 color, float shadow){ if(gloss == 0.0 && shadow == 0.0) return color; + float camera_distance = focal_distance; + // Assume everything has already been rotated such that camera is in the z-direction + vec3 to_camera = vec3(0, 0, camera_distance) - point; + vec3 to_light = light_coords - point; + // TODO, do we actually want this? It effectively treats surfaces as two-sided - if(unit_normal.z < 0){ + if(dot(to_camera,unit_normal) < 0){ unit_normal *= -1; } - // TODO, read this in as a uniform? - float camera_distance = 6; - // Assume everything has already been rotated such that camera is in the z-direction - vec3 to_camera = vec3(0, 0, camera_distance) - point; - vec3 to_light = light_coords - point; vec3 light_reflection = -to_light + 2 * unit_normal * dot(to_light, unit_normal); float dot_prod = dot(normalize(light_reflection), normalize(to_camera)); float shine = gloss * exp(-3 * pow(1 - dot_prod, 2)); diff --git a/manimlib/shaders/surface/frag.glsl b/manimlib/shaders/surface/frag.glsl index db90527559..707621a14e 100644 --- a/manimlib/shaders/surface/frag.glsl +++ b/manimlib/shaders/surface/frag.glsl @@ -3,6 +3,7 @@ uniform vec3 light_source_position; uniform float gloss; uniform float shadow; +uniform float focal_distance; in vec3 xyz_coords; in vec3 v_normal; diff --git a/manimlib/shaders/textured_surface/frag.glsl b/manimlib/shaders/textured_surface/frag.glsl index ab45dad646..616b06e249 100644 --- a/manimlib/shaders/textured_surface/frag.glsl +++ b/manimlib/shaders/textured_surface/frag.glsl @@ -6,6 +6,7 @@ uniform float num_textures; uniform vec3 light_source_position; uniform float gloss; uniform float shadow; +uniform float focal_distance; in vec3 xyz_coords; in vec3 v_normal; diff --git a/manimlib/shaders/true_dot/frag.glsl b/manimlib/shaders/true_dot/frag.glsl index a89653595d..0be9a2abe1 100644 --- a/manimlib/shaders/true_dot/frag.glsl +++ b/manimlib/shaders/true_dot/frag.glsl @@ -4,6 +4,7 @@ uniform vec3 light_source_position; uniform float gloss; uniform float shadow; uniform float anti_alias_width; +uniform float focal_distance; in vec4 color; in float radius;
## Motivation - `camera_distance` is set to be 6 and doesn't change with the `focal_distance`. - The z coordinate is used to check whether the surface is facing the camera. It generates the wrong result which is particularly obvious when `focal_distance` is small. ## Proposed changes - Replace `float camera_distance = 6;` with `float camera_distance = focal_distance;`, and add `uniform float focal_distance;` in some shaders. - Use the dot product to check whether the surface is facing the camera. ## Test **Code**: ```python from manimlib import * class Test(Scene): CONFIG = { "camera_class": ThreeDCamera, "camera_config": { "focal_distance": 0.5, "light_source_position": [1, 1, -2], "frame_config":{ "frame_shape": (4, 3), } } } def construct(self): sphere = Sphere(radius=2) sphere.shift(DOWN*3.3) sphere.set_color('#cccccc') self.add(sphere) self.wait(1) ``` **Result**: Before: ![image](https://user-images.githubusercontent.com/36598223/120894198-d72e9a80-c649-11eb-807f-3a6e2e97c324.png) After: ![image](https://user-images.githubusercontent.com/36598223/120894205-e01f6c00-c649-11eb-97fd-dd95980d4649.png)
https://api.github.com/repos/3b1b/manim/pulls/1530
2021-06-05T14:07:41Z
2021-06-14T16:44:02Z
2021-06-14T16:44:02Z
2021-06-14T16:44:02Z
744
3b1b/manim
18,057
bpo-32228: Reset raw_pos after unwinding the raw stream
diff --git a/Lib/test/test_io.py b/Lib/test/test_io.py index 9bfe4b0bc6e4be..dc1c7c8e72a776 100644 --- a/Lib/test/test_io.py +++ b/Lib/test/test_io.py @@ -1723,6 +1723,23 @@ def test_truncate(self): with self.open(support.TESTFN, "rb", buffering=0) as f: self.assertEqual(f.read(), b"abc") + def test_truncate_after_write(self): + # Ensure that truncate preserves the file position after + # writes longer than the buffer size. + # Issue: https://bugs.python.org/issue32228 + with self.open(support.TESTFN, "wb") as f: + # Fill with some buffer + f.write(b'\x00' * 10000) + buffer_sizes = [8192, 4096, 200] + for buffer_size in buffer_sizes: + with self.open(support.TESTFN, "r+b", buffering=buffer_size) as f: + f.write(b'\x00' * (buffer_size + 1)) + # After write write_pos and write_end are set to 0 + f.read(1) + # read operation makes sure that pos != raw_pos + f.truncate() + self.assertEqual(f.tell(), buffer_size + 2) + @support.requires_resource('cpu') def test_threads(self): try: diff --git a/Misc/NEWS.d/next/Library/2017-12-22-16-47-41.bpo-32228.waPx3q.rst b/Misc/NEWS.d/next/Library/2017-12-22-16-47-41.bpo-32228.waPx3q.rst new file mode 100644 index 00000000000000..3bbe7c495f82d3 --- /dev/null +++ b/Misc/NEWS.d/next/Library/2017-12-22-16-47-41.bpo-32228.waPx3q.rst @@ -0,0 +1 @@ +Ensure that ``truncate()`` preserves the file position (as reported by ``tell()``) after writes longer than the buffer size. diff --git a/Modules/_io/bufferedio.c b/Modules/_io/bufferedio.c index d7e82b9dba1ac2..fa6ece8e947451 100644 --- a/Modules/_io/bufferedio.c +++ b/Modules/_io/bufferedio.c @@ -1292,7 +1292,6 @@ _io__Buffered_seek_impl(buffered *self, PyObject *targetobj, int whence) if (res == NULL) goto end; Py_CLEAR(res); - _bufferedwriter_reset_buf(self); } /* TODO: align on block boundary and read buffer if needed? */ @@ -1857,8 +1856,6 @@ _bufferedwriter_raw_write(buffered *self, char *start, Py_ssize_t len) return n; } -/* `restore_pos` is 1 if we need to restore the raw stream position at - the end, 0 otherwise. */ static PyObject * _bufferedwriter_flush_unlocked(buffered *self) { @@ -1899,9 +1896,18 @@ _bufferedwriter_flush_unlocked(buffered *self) goto error; } - _bufferedwriter_reset_buf(self); end: + /* This ensures that after return from this function, + VALID_WRITE_BUFFER(self) returns false. + + This is a required condition because when a tell() is called + after flushing and if VALID_READ_BUFFER(self) is false, we need + VALID_WRITE_BUFFER(self) to be false to have + RAW_OFFSET(self) == 0. + + Issue: https://bugs.python.org/issue32228 */ + _bufferedwriter_reset_buf(self); Py_RETURN_NONE; error:
This issue was happening because - after write in the given case, `write_pos` and `write_end` are ending up to be 0. Nothing wrong here. But when doing truncate, we flush writer changes. During that, even when `write_pos == write_end`, we've to reset `write_end to -1`. That was not happening. <!-- issue-number: bpo-32228 --> https://bugs.python.org/issue32228 <!-- /issue-number -->
https://api.github.com/repos/python/cpython/pulls/4858
2017-12-14T05:46:17Z
2018-01-28T16:00:09Z
2018-01-28T16:00:09Z
2018-01-28T16:42:59Z
892
python/cpython
4,694
Fix super tiny type error
diff --git a/timm/scheduler/cosine_lr.py b/timm/scheduler/cosine_lr.py index e2c975fb79..4eaaa86a81 100644 --- a/timm/scheduler/cosine_lr.py +++ b/timm/scheduler/cosine_lr.py @@ -8,6 +8,7 @@ import math import numpy as np import torch +from typing import List from .scheduler import Scheduler @@ -77,7 +78,7 @@ def __init__( else: self.warmup_steps = [1 for _ in self.base_values] - def _get_lr(self, t): + def _get_lr(self, t: int) -> List[float]: if t < self.warmup_t: lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps] else: diff --git a/timm/scheduler/multistep_lr.py b/timm/scheduler/multistep_lr.py index 10f2fb5044..e5db556d43 100644 --- a/timm/scheduler/multistep_lr.py +++ b/timm/scheduler/multistep_lr.py @@ -53,7 +53,7 @@ def get_curr_decay_steps(self, t): # assumes self.decay_t is sorted return bisect.bisect_right(self.decay_t, t + 1) - def _get_lr(self, t): + def _get_lr(self, t: int) -> List[float]: if t < self.warmup_t: lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps] else: diff --git a/timm/scheduler/plateau_lr.py b/timm/scheduler/plateau_lr.py index 9f8271579b..e868bd5e58 100644 --- a/timm/scheduler/plateau_lr.py +++ b/timm/scheduler/plateau_lr.py @@ -5,6 +5,7 @@ Hacked together by / Copyright 2020 Ross Wightman """ import torch +from typing import List from .scheduler import Scheduler @@ -106,5 +107,5 @@ def _apply_noise(self, epoch): param_group['lr'] = new_lr self.restore_lr = restore_lr - def _get_lr(self, t: int) -> float: + def _get_lr(self, t: int) -> List[float]: assert False, 'should not be called as step is overridden' diff --git a/timm/scheduler/poly_lr.py b/timm/scheduler/poly_lr.py index 906f6acf82..8875e15bfe 100644 --- a/timm/scheduler/poly_lr.py +++ b/timm/scheduler/poly_lr.py @@ -6,6 +6,7 @@ """ import math import logging +from typing import List import torch @@ -73,7 +74,7 @@ def __init__( else: self.warmup_steps = [1 for _ in self.base_values] - def _get_lr(self, t): + def _get_lr(self, t: int) -> List[float]: if t < self.warmup_t: lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps] else: diff --git a/timm/scheduler/scheduler.py b/timm/scheduler/scheduler.py index 4ae2e2aeb6..583357f7c5 100644 --- a/timm/scheduler/scheduler.py +++ b/timm/scheduler/scheduler.py @@ -1,6 +1,6 @@ import abc from abc import ABC -from typing import Any, Dict, Optional +from typing import Any, Dict, List, Optional import torch @@ -65,10 +65,10 @@ def load_state_dict(self, state_dict: Dict[str, Any]) -> None: self.__dict__.update(state_dict) @abc.abstractmethod - def _get_lr(self, t: int) -> float: + def _get_lr(self, t: int) -> List[float]: pass - def _get_values(self, t: int, on_epoch: bool = True) -> Optional[float]: + def _get_values(self, t: int, on_epoch: bool = True) -> Optional[List[float]]: proceed = (on_epoch and self.t_in_epochs) or (not on_epoch and not self.t_in_epochs) if not proceed: return None diff --git a/timm/scheduler/step_lr.py b/timm/scheduler/step_lr.py index 70a45a70d4..c205d43715 100644 --- a/timm/scheduler/step_lr.py +++ b/timm/scheduler/step_lr.py @@ -6,6 +6,8 @@ """ import math import torch +from typing import List + from .scheduler import Scheduler @@ -51,7 +53,7 @@ def __init__( else: self.warmup_steps = [1 for _ in self.base_values] - def _get_lr(self, t): + def _get_lr(self, t: int) -> List[float]: if t < self.warmup_t: lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps] else: diff --git a/timm/scheduler/tanh_lr.py b/timm/scheduler/tanh_lr.py index 48acc61b03..94455302c6 100644 --- a/timm/scheduler/tanh_lr.py +++ b/timm/scheduler/tanh_lr.py @@ -8,6 +8,7 @@ import math import numpy as np import torch +from typing import List from .scheduler import Scheduler @@ -75,7 +76,7 @@ def __init__( else: self.warmup_steps = [1 for _ in self.base_values] - def _get_lr(self, t): + def _get_lr(self, t: int) -> List[float]: if t < self.warmup_t: lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps] else:
IMHO, for example, CosineLRScheduler returns list of floats, instead of a single float. Therefore, the type signature may need to be updated. Please correct me if I am wrong!
https://api.github.com/repos/huggingface/pytorch-image-models/pulls/2124
2024-03-23T03:27:44Z
2024-04-02T21:31:38Z
2024-04-02T21:31:38Z
2024-04-03T00:39:19Z
1,394
huggingface/pytorch-image-models
16,166
Format Document
diff --git a/ARKA.py b/ARKA.py index 0611cba1c1..0132c4ce54 100644 --- a/ARKA.py +++ b/ARKA.py @@ -1,9 +1,8 @@ -def sumOfSeries(n): - x = (n * (n + 1) / 2) - return (int)(x * x) +def sumOfSeries(n): + x = n * (n + 1) / 2 + return (int)(x * x) - -# Driver Function +# Driver Function n = 5 -print(sumOfSeries(n)) +print(sumOfSeries(n))
https://api.github.com/repos/geekcomputers/Python/pulls/1774
2022-10-14T17:15:16Z
2022-10-14T20:12:07Z
2022-10-14T20:12:07Z
2022-10-14T20:18:08Z
155
geekcomputers/Python
31,738
Fix Trainer for Datasets that don't have dict items
diff --git a/src/transformers/trainer_utils.py b/src/transformers/trainer_utils.py index afc2e0d1561cd..46fd0cdd05b6f 100644 --- a/src/transformers/trainer_utils.py +++ b/src/transformers/trainer_utils.py @@ -676,6 +676,8 @@ def __init__( self.message_logged = False def _remove_columns(self, feature: dict) -> dict: + if not isinstance(feature, dict): + return feature if not self.message_logged and self.logger and self.model_name: ignored_columns = list(set(feature.keys()) - set(self.signature_columns)) if len(ignored_columns) > 0:
# What does this PR do? This PR fixes a break in `Trainer` when the dataset items are not dictionaries.
https://api.github.com/repos/huggingface/transformers/pulls/17239
2022-05-13T15:35:28Z
2022-05-13T15:49:23Z
2022-05-13T15:49:23Z
2022-05-15T11:51:29Z
158
huggingface/transformers
12,002
Add: PowerBI
diff --git a/config.py b/config.py index 57c98307a..1d8aafc07 100644 --- a/config.py +++ b/config.py @@ -90,6 +90,9 @@ # TODO: check if the classname exists ALIASES = { "onprem": { + "analytics": { + "Powerbi": "PowerBI" + }, "ci": { "Circleci": "CircleCI", "Concourseci": "ConcourseCI", diff --git a/diagrams/onprem/analytics.py b/diagrams/onprem/analytics.py index a62e40175..182c28cd9 100644 --- a/diagrams/onprem/analytics.py +++ b/diagrams/onprem/analytics.py @@ -44,6 +44,10 @@ class Presto(_Analytics): _icon = "presto.png" +class Powerbi(_Analytics): + _icon = "powerbi.png" + + class Singer(_Analytics): _icon = "singer.png" @@ -65,3 +69,5 @@ class Tableau(_Analytics): # Aliases + +PowerBI = Powerbi diff --git a/docs/nodes/onprem.md b/docs/nodes/onprem.md index ef64a7a9d..2d8137276 100644 --- a/docs/nodes/onprem.md +++ b/docs/nodes/onprem.md @@ -21,6 +21,7 @@ Node classes list of onprem provider. - **diagrams.onprem.analytics.Metabase** - **diagrams.onprem.analytics.Norikra** - **diagrams.onprem.analytics.Presto** +- **diagrams.onprem.analytics.Powerbi**, **PowerBI** (alias) - **diagrams.onprem.analytics.Singer** - **diagrams.onprem.analytics.Spark** - **diagrams.onprem.analytics.Storm** diff --git a/resources/onprem/analytics/powerbi.png b/resources/onprem/analytics/powerbi.png new file mode 100644 index 000000000..9783fc946 Binary files /dev/null and b/resources/onprem/analytics/powerbi.png differ
Adding PowerBI as an on-premise analytics resource. N.B. Apparently someone forgot to generate the documentation for "OPNSense" alias. I double checked and it is the right alias in this PR.
https://api.github.com/repos/mingrammer/diagrams/pulls/337
2020-10-05T12:50:28Z
2021-01-27T12:28:56Z
2021-01-27T12:28:56Z
2021-01-27T12:28:56Z
494
mingrammer/diagrams
52,720
tik_tak game
diff --git a/tik_tak.py b/tik_tak.py new file mode 100644 index 0000000000..e4b4d28942 --- /dev/null +++ b/tik_tak.py @@ -0,0 +1,108 @@ + +l=["anything",1,2,3,4,5,6,7,8,9] +i=0 +j=9 +print("\n\t\t\tTIK-TAC-TOE") +def board(): + #import os + #os.system('cls') + print("\n\n") + print(" | |" ) + print("",l[1]," | ",l[2]," | ",l[3] ) + print("____|_____|____") + print(" | |" ) + print("",l[4]," | ",l[5]," | ",l[6] ) + print("____|_____|____") + print(" | |" ) + print("",l[7]," | ",l[8]," | ",l[9] ) + print(" | |" ) +def enter_number(p1,p2): + global i + global j + k=9 + while(j): + if k==0: + break + + if i==0: + x=int(input("\nplayer 1 :- ")) + if x<=0: + print("chose number from given board") + else: + for e in range(1,10): + if l[e]==x: + l[e]=p1 + board() + c=checkwin() + if c==1: + print("\n\n Congratulation ! player 1 win ") + return + + + i=1 + j-=1 + k-=1 + if k==0: + print("\n\nGame is over") + break + + if k==0: + + break + + if i==1: + y=int(input("\nplayer 2 :- ")) + if y<=0: + print("chose number from given board") + #return + else: + for e in range(1,10): + if l[e]==y: + l[e]=p2 + board() + w=checkwin() + if w==1: + print("\n\n Congratulation ! player 2 win") + return + + i=0 + j-=1 + k-=1 + + +def checkwin(): + if l[1]==l[2]==l[3]: + + return 1 + elif l[4]==l[5]==l[6]: + + return 1 + elif l[7]==l[8]==l[9]: + + return 1 + elif l[1]==l[4]==l[7]: + + return 1 + + elif l[2]==l[5]==l[8]: + + return 1 + elif l[3]==l[6]==l[9]: + + return 1 + elif l[1]==l[5]==l[9]: + + return 1 + elif l[3]==l[5]==l[7]: + + return 1 + else: + print("\n\nGame continue") + +def main(): + board() + p1=input("\n\nplayer 1 chose your sign [0/x] = ") + p2=input("player 2 chose your sign [0/x] = ") + enter_number(p1,p2) + print("\n\n\t\t\tDeveloped By :- UTKARSH MATHUR") +main()
https://api.github.com/repos/geekcomputers/Python/pulls/364
2018-09-14T05:53:43Z
2018-09-24T11:02:57Z
2018-09-24T11:02:57Z
2018-09-24T11:02:57Z
882
geekcomputers/Python
31,597
introduce debug utils
diff --git a/deepspeed/runtime/engine.py b/deepspeed/runtime/engine.py index 4692bb709079..3c695d921a5a 100755 --- a/deepspeed/runtime/engine.py +++ b/deepspeed/runtime/engine.py @@ -38,6 +38,7 @@ import deepspeed.runtime.lr_schedules as lr_schedules from deepspeed.utils import logger, log_dist, init_distributed from deepspeed.utils.timer import ThroughputTimer, SynchronizedWallClockTimer +from deepspeed.utils.debug import debug_extract_module_and_param_names from deepspeed.runtime.progressive_layer_drop import ProgressiveLayerDrop from deepspeed.runtime.eigenvalue import Eigenvalue @@ -122,6 +123,9 @@ def __init__(self, self.gas_boundary_ctr = 0 self.dist_backend = "nccl" + # for debug purposes - can then debug print: debug_get_module_name(module) + debug_extract_module_and_param_names(model) + # Set config using config_params for backwards compat if self.config is None and config_params is not None: self.config = config_params diff --git a/deepspeed/runtime/zero/partition_parameters.py b/deepspeed/runtime/zero/partition_parameters.py index 04a82d177611..5904b01cc45b 100755 --- a/deepspeed/runtime/zero/partition_parameters.py +++ b/deepspeed/runtime/zero/partition_parameters.py @@ -18,6 +18,7 @@ from ..utils import see_memory_usage from deepspeed.utils import log_dist, init_distributed +from deepspeed.utils.debug import debug_param2name_id_shape, debug_module2name, debug_param2name, debug_param2name_id_shape_status, printflock, log_rank_file from ..swap_tensor.partitioned_param_swapper import AsyncPartitionedParameterSwapper, PartitionedParamStatus from ..config import DeepSpeedConfig @@ -27,8 +28,14 @@ def print_rank_0(message, debug=False, force=False): - if torch.distributed.get_rank() == 0 and (debug or force): + rank = torch.distributed.get_rank() + if rank == 0 and (debug or force): print(message) + # other variations + # - print for all ranks w/o interleaving + # printflock(f"[{rank}] {message}") + # - print to log file per rank + # log_rank_file(rank, message) def is_zero_param(parameter): @@ -481,12 +488,12 @@ def _post_init_method(self, module): force=False) global param_count - for name, param in module.named_parameters(recurse=False): + for param in module.parameters(recurse=False): param_count += param.numel() if not is_zero_param(param): self._convert_to_deepspeed_param(param) print_rank_0( - f"Partitioning param with ds id {param.ds_id} and shape {param.data.shape}" + f"Partitioning param {debug_param2name_id_shape(param)} module={debug_module2name(module)}" ) param.partition() see_memory_usage( @@ -797,23 +804,23 @@ def _allgather_param(self, param, async_op=False, hierarchy=0): assert tensor_size == aligned_param_size, f'param id {param.ds_id} aligned size {aligned_param_size} does not match tensor size {tensor_size}' print_rank_0( - f"{'--'* hierarchy}---- Before allocating Allgather param with id {param.ds_id} and status {param.ds_status} Partition Size {partition_size} and data shape {param.ds_shape}" + f"{'--'* hierarchy}---- Before allocating allgather param {debug_param2name_id_shape_status(param)} partition size={partition_size}" ) see_memory_usage( - f'Before allocate allgather param {param.ds_id} {param.ds_status} {aligned_param_size} {partition_size} {param.ds_shape}', + f'Before allocate allgather param {debug_param2name_id_shape_status(param)} partition_size={partition_size} ', force=False) flat_tensor = torch.zeros(aligned_param_size, dtype=param.dtype, device=param.device).view(-1) see_memory_usage( - f'After allocate allgather param {param.ds_id} {param.ds_status} {aligned_param_size} {partition_size} {param.ds_shape}', + f'After allocate allgather param {debug_param2name_id_shape_status(param)} {aligned_param_size} {partition_size} ', force=False) torch.cuda.synchronize() print_rank_0( - f"{'--'* hierarchy}----Allgather param with id {param.ds_id} and status {param.ds_status} Partition Size {partition_size} and data shape {param.ds_shape}" + f"{'--'* hierarchy}----allgather param with {debug_param2name_id_shape_status(param)} partition size={partition_size}" ) # if not flat_tensor.numel() > 100000: # replicated_tensor = flat_tensor.narrow(0, diff --git a/deepspeed/runtime/zero/stage3.py b/deepspeed/runtime/zero/stage3.py index d492df42067d..5d47edaa44ac 100755 --- a/deepspeed/runtime/zero/stage3.py +++ b/deepspeed/runtime/zero/stage3.py @@ -32,11 +32,18 @@ pg_correctness_test = False FWD_MODULE_STACK = list() +from deepspeed.utils.debug import debug_module2name_id, debug_param2name_id_numel, debug_param2name_id_shape_device, debug_module2name_class, printflock, log_rank_file -def print_rank_0(message, debug=False, force=False): - if torch.distributed.get_rank() == 0 and (debug or force): - logger.info(message) +def print_rank_0(message, debug=False, force=True): + rank = torch.distributed.get_rank() + if rank == 0 and (debug or force): + print(message) + # other variations + # - print for all ranks w/o interleaving + # printflock(f"[{rank}] {message}") + # - print to log file per rank + # log_rank_file(rank, message) def input(msg): @@ -211,7 +218,7 @@ def get_params_to_prefetch(self, sub_module, numel=2000000): # tracing failed. The sub_module passed at the step_id must match with the sub_module during tracing if sub_module.id != self.sub_module_trace[self.step_id]: print_rank_0( - f"Tracing failed. Prefetching is disabled at sub-module: {sub_module.id}" + f"Tracing failed. Prefetching is disabled at sub-module: {debug_module2name_id(sub_module)}" ) return [] @@ -390,11 +397,13 @@ def reset_step(self): def fetch_sub_module(self, sub_module): partitioned_params = [] params_in_flight = False - #print_rank_0(f"{'--' * self.hierarchy}Fetching params in module {sub_module.__class__.__name__}") + # print_rank_0(f"{'--' * self.hierarchy}Fetching params in module {sub_module.__class__.__name__}") params_to_fetch = [ param for _, param in sub_module.named_parameters(recurse=False) ] + # print([n for n,p in sub_module.named_parameters(recurse=False)]) + if hasattr(sub_module, 'ds_external_parameters'): print_rank_0( f"{'--' * self.hierarchy}--Fetching external parameters {sub_module.ds_external_parameters()}" @@ -407,7 +416,7 @@ def fetch_sub_module(self, sub_module): for param in params_to_fetch: param.ds_active_sub_modules += 1 print_rank_0( - f"{'--' * self.hierarchy}--Fetching parameters {param.ds_id} with active sub modules {param.ds_active_sub_modules}" + f"{'--' * self.hierarchy}--Fetching parameters {param.ds_id} {param.ds_shape} with active sub modules {param.ds_active_sub_modules}" ) if param.ds_status == ZeroParamStatus.AVAILABLE: @@ -441,14 +450,14 @@ def fetch_sub_module(self, sub_module): for _, param in sub_module.named_parameters(recurse=False): param.ds_status = ZeroParamStatus.AVAILABLE print_rank_0( - f"Param id {param.ds_id}, Shape {param.shape}, device {param.device} norm {param.norm()}", + f"Param {debug_param2name_id_shape_device(param)} norm={param.norm()}", force=False) #print_rank_0(f"After fetching (id, shape, device): {[(param.ds_id, param.shape, param.device) for param in sub_module.named_parameters(recurse=False)]}") def release_sub_module(self, sub_module): self.hierarchy -= 1 print_rank_0( - f"{'--' * self.hierarchy}Releasing params in module {sub_module.__class__.__name__}" + f"{'--' * self.hierarchy}Releasing params in module {debug_module2name_class(sub_module)}" ) params_to_release = [ param for _, @@ -468,31 +477,31 @@ def release_sub_module(self, sub_module): if not param.ds_active_sub_modules and not self._keep_for_later( sub_module) and not param.ds_persist: print_rank_0( - f"{'--' * self.hierarchy}--Releasing parameters {param.ds_id} with numel {param.numel()} active sub modules {param.ds_active_sub_modules} and keep for later {self._keep_for_later(sub_module)}", + f"{'--' * self.hierarchy}--Releasing parameter {debug_param2name_id_numel(param)} active sub modules {param.ds_active_sub_modules} and keep for later {self._keep_for_later(sub_module)}", force=False) # Keeping track of number of elements that are consumed by available parameters self._decrement_available_parameter_numel(param.ds_numel) see_memory_usage( - f"Before releasing param {param.ds_id} with numel {param.numel()}", + f"Before releasing param {debug_param2name_id_numel(param)}", force=False) param.partition(hierarchy=self.hierarchy) see_memory_usage( - f"After releasing param {param.ds_id} has numel {param.numel()} ", + f"After releasing param {debug_param2name_id_numel(param)}", force=False) param.ds_status = ZeroParamStatus.NOT_AVAILABLE else: print_rank_0( - f"{'--' * self.hierarchy}--Did not release parameters {param.ds_id} with numel {param.numel()} with active sub modules {param.ds_active_sub_modules}, keep for later {self._keep_for_later(sub_module)} and persistence {param.ds_persist}", + f"{'--' * self.hierarchy}--Did not release param {debug_param2name_id_numel(param)} with active sub modules {param.ds_active_sub_modules}, keep for later={self._keep_for_later(sub_module)} and persistence={param.ds_persist}", force=False) def release_and_reset_parameter(self, param): param.ds_active_sub_modules = 0 if param.ds_status == ZeroParamStatus.AVAILABLE: print_rank_0( - f"Releasing unpartitioned {param.ds_id} active sub-modules {param.ds_active_sub_modules} size {param.ds_numel} and persisitence {param.ds_persist}" + f"Releasing unpartitioned param {debug_param2name_id_numel(param)} active sub-modules {param.ds_active_sub_modules} and persisitence {param.ds_persist}" ) self._decrement_available_parameter_numel(param.ds_numel) param.partition() diff --git a/deepspeed/utils/debug.py b/deepspeed/utils/debug.py new file mode 100644 index 000000000000..06f966dfa7a5 --- /dev/null +++ b/deepspeed/utils/debug.py @@ -0,0 +1,122 @@ +""" debug utils """ + +import fcntl + +# for debug purposes map module and param objects to their fully qualified names +module_names = {} +param_names = {} + + +def debug_extract_module_and_param_names(model): + # extract the fully qualified names as soon as the model is acquired + global module_names + global param_names + # XXX: can probably make a map of param2module and vice-versa + module_names = {module: name for name, module in model.named_modules()} + param_names = {param: name for name, param in model.named_parameters()} + + +def debug_module2name(module): + if module in module_names: + return module_names[module] + else: + return "unknown" + + +def debug_module2name_id(module): + return f"name={debug_module2name(module)} id={module.id}" + + +def debug_module2name_class(module): + return f"name={debug_module2name(module)} {module.__class__.__name__}" + + +def debug_param2name(param): + if param in param_names: + return param_names[param] + else: + return "unknown" + + +def debug_param2name_id(param): + return f"name={debug_param2name(param)} id={param.ds_id}" + + +def debug_param2name_id_shape(param): + return f"name={debug_param2name(param)} id={param.ds_id} shape={param.data.shape}" + + +def debug_param2name_id_shape_device(param): + return f"name={debug_param2name(param)} id={param.ds_id} shape={param.data.shape} device={param.device}" + + +def debug_param2name_id_numel(param): + return f"name={debug_param2name(param)} id={param.ds_id} numel={param.numel()}" + + +def debug_param2name_id_shape_status(param): + return f"name={debug_param2name(param)} id={param.ds_id} shape={param.data.shape} status={param.ds_status}" + + +def printflock(*msgs): + """ + + For printing messages for all concurrent gpus w/o getting interleaved text. + + This is useful when debugging issues where multi-gpus don't sync. + + 1. Enable the force debug in say partitioning and zero3 files + 2. Override the usual versions with :: + + def print_rank_0(message, debug=False, force=True): + rank = torch.distributed.get_rank() + printflock(f"[{rank}] {message}") + 3. run the program and you get both logs non-interleaved + + But this makes it very difficult to make sense of the output, so the ``log_rank_file`` helper + function might be more useful, as it's easier to send each log stream into a separate file and + then compare those. + + """ + + with open(__file__, "r") as fh: + fcntl.flock(fh, fcntl.LOCK_EX) + try: + print(*msgs) + finally: + fcntl.flock(fh, fcntl.LOCK_UN) + + +fh = None + + +def log_rank_file(rank, *msgs): + """ + Print to a log file of the given rank + + This is useful for debugging hanging in sync processes. Here is a possible workflow: + + 1. Enable the force debug in say partitioning and zero3 files + 2. Override the usual versions of print_rank_0 in those files with :: + + def print_rank_0(message, debug=False, force=True): + rank = torch.distributed.get_rank() + log_rank_file(rank, message) + + 3. run the program + 4. fix up the expected differences, e.g. different cuda numbers :: + + perl -pi -e 's|cuda:1|cuda:0|' log_rank_* + + 5. now diff and see where names and ids diverge - you will find where the gpus don't do the same + work (e.g. when some layers get conditionally skipped on one gpu but not all) + + diff -u log_rank_0.txt log_rank_1.txt | less + + """ + global fh + if fh is None: + fh = open(f"log_rank_{rank}.txt", "w") + for m in msgs: + fh.write(f"{m}\n") + fh.flush()
While working on integration of wav2vec2 https://github.com/huggingface/transformers/pull/11638 which is dramatically different from transformers models, I run into multiple problems in the model and deepspeed and the current debug tools were very difficult to use. So I developed a whole set of tools which I placed into a new file: `deepspeed/utils/debug.py`. The key change is that now you don't need to compare param ids, which is hard especially when params aren't in sync across different gpus. Instead now you get fully qualified param names in the logs, plus ids, classes, etc. I already integrated a lot of those in zero3, engine and partitioning modules as this is where I was debugging, but it should be easy to start using those in other modules as need arises. One other addition is instrumenting `print_rank_0` to be able to 1. print for all ranks w/o interleaving prints and 2. to log into a file per rank to enable debugging synchronization bugs which requires comparing the logs on each gpu. e.g. wav2vec2 skips layers w/o a sync and that was a hard one to track. It probably should also be moved to `debug.py` as now there are multiple copies of its variations. Hopefully the new debug utils are self-explanatory, but please feel free to further rename them to fit your standards/needs.
https://api.github.com/repos/microsoft/DeepSpeed/pulls/1136
2021-06-05T06:11:33Z
2021-06-23T20:08:30Z
2021-06-23T20:08:29Z
2021-06-23T20:31:37Z
3,714
microsoft/DeepSpeed
10,143
Minor fixes to bigip_cli_alias
diff --git a/lib/ansible/modules/network/f5/bigip_cli_alias.py b/lib/ansible/modules/network/f5/bigip_cli_alias.py index ae39da6df34bfb..e66819dab3d6e1 100644 --- a/lib/ansible/modules/network/f5/bigip_cli_alias.py +++ b/lib/ansible/modules/network/f5/bigip_cli_alias.py @@ -405,8 +405,9 @@ def main(): supports_check_mode=spec.supports_check_mode, ) + client = F5RestClient(**module.params) + try: - client = F5RestClient(**module.params) mm = ModuleManager(module=module, client=client) results = mm.exec_module() cleanup_tokens(client) diff --git a/test/units/modules/network/f5/test_bigip_cli_alias.py b/test/units/modules/network/f5/test_bigip_cli_alias.py index 8b6effd43586c2..6f525d32f98422 100644 --- a/test/units/modules/network/f5/test_bigip_cli_alias.py +++ b/test/units/modules/network/f5/test_bigip_cli_alias.py @@ -8,16 +8,12 @@ import os import json -import pytest import sys from nose.plugins.skip import SkipTest if sys.version_info < (2, 7): raise SkipTest("F5 Ansible modules require Python >= 2.7") -from units.compat import unittest -from units.compat.mock import Mock -from units.compat.mock import patch from ansible.module_utils.basic import AnsibleModule try: @@ -25,17 +21,25 @@ from library.modules.bigip_cli_alias import ModuleParameters from library.modules.bigip_cli_alias import ModuleManager from library.modules.bigip_cli_alias import ArgumentSpec - from library.module_utils.network.f5.common import F5ModuleError - from library.module_utils.network.f5.common import iControlUnexpectedHTTPError - from test.unit.modules.utils import set_module_args + + # In Ansible 2.8, Ansible changed import paths. + from test.units.compat import unittest + from test.units.compat.mock import Mock + from test.units.compat.mock import patch + + from test.units.modules.utils import set_module_args except ImportError: try: from ansible.modules.network.f5.bigip_cli_alias import ApiParameters from ansible.modules.network.f5.bigip_cli_alias import ModuleParameters from ansible.modules.network.f5.bigip_cli_alias import ModuleManager from ansible.modules.network.f5.bigip_cli_alias import ArgumentSpec - from ansible.module_utils.network.f5.common import F5ModuleError - from ansible.module_utils.network.f5.common import iControlUnexpectedHTTPError + + # Ansible 2.8 imports + from units.compat import unittest + from units.compat.mock import Mock + from units.compat.mock import patch + from units.modules.utils import set_module_args except ImportError: raise SkipTest("F5 Ansible modules require the f5-sdk Python library") @@ -83,8 +87,6 @@ def test_api_parameters(self): assert p.description == 'Run the bash shell' -@patch('ansible.module_utils.f5_utils.AnsibleF5Client._get_mgmt_root', - return_value=True) class TestManager(unittest.TestCase): def setUp(self):
##### SUMMARY <!--- Describe the change below, including rationale and design decisions --> <!--- HINT: Include "Fixes #nnn" if you are fixing an existing issue --> Minor fixes to bigip_cli_alias ##### ISSUE TYPE <!--- Pick one below and delete the rest --> - Bugfix Pull Request ##### COMPONENT NAME <!--- Write the short name of the module, plugin, task or feature below --> bigip_cli_alias ##### ANSIBLE VERSION <!--- Paste verbatim output from "ansible --version" between quotes --> ```paste below ansible 2.8.0.dev0 config file = None configured module search path = ['/root/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /usr/local/lib/python3.6/site-packages/ansible executable location = /usr/local/bin/ansible python version = 3.6.7 (default, Oct 24 2018, 22:47:56) [GCC 6.3.0 20170516] ``` ##### ADDITIONAL INFORMATION <!--- Include additional information to help people understand the change here --> <!--- A step-by-step reproduction of the problem is helpful if there is no related issue --> <!--- Paste verbatim command output below, e.g. before and after your change --> ```paste below ```
https://api.github.com/repos/ansible/ansible/pulls/48452
2018-11-10T04:13:50Z
2018-11-10T04:39:51Z
2018-11-10T04:39:51Z
2019-07-22T17:14:59Z
734
ansible/ansible
48,864
blib2to3: support unparenthesized wulruses in more places
diff --git a/CHANGES.md b/CHANGES.md index 47e64cfa274..d28d766f4c0 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -11,6 +11,8 @@ hardened to handle more edge cases during quote normalization (#2437) - Avoid changing a function return type annotation's type to a tuple by adding a trailing comma (#2384) +- Parsing support has been added for unparenthesized walruses in set literals, set + comprehensions, and indices (#2447). ### _Blackd_ diff --git a/src/blib2to3/Grammar.txt b/src/blib2to3/Grammar.txt index 69b9af96608..ac8a067378d 100644 --- a/src/blib2to3/Grammar.txt +++ b/src/blib2to3/Grammar.txt @@ -157,14 +157,14 @@ testlist_gexp: (namedexpr_test|star_expr) ( old_comp_for | (',' (namedexpr_test| lambdef: 'lambda' [varargslist] ':' test trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME subscriptlist: subscript (',' subscript)* [','] -subscript: test | [test] ':' [test] [sliceop] +subscript: test [':=' test] | [test] ':' [test] [sliceop] sliceop: ':' [test] exprlist: (expr|star_expr) (',' (expr|star_expr))* [','] testlist: test (',' test)* [','] dictsetmaker: ( ((test ':' test | '**' expr) (comp_for | (',' (test ':' test | '**' expr))* [','])) | - ((test | star_expr) - (comp_for | (',' (test | star_expr))* [','])) ) + ((test [':=' test] | star_expr) + (comp_for | (',' (test [':=' test] | star_expr))* [','])) ) classdef: 'class' NAME ['(' [arglist] ')'] ':' suite diff --git a/src/blib2to3/README b/src/blib2to3/README index a43f15cb37d..ccad28337b6 100644 --- a/src/blib2to3/README +++ b/src/blib2to3/README @@ -13,4 +13,9 @@ Reasons for forking: - ability to Cythonize Change Log: -- Changes default logger used by Driver \ No newline at end of file +- Changes default logger used by Driver +- Backported the following upstream parser changes: + - "bpo-42381: Allow walrus in set literals and set comprehensions (GH-23332)" + https://github.com/python/cpython/commit/cae60187cf7a7b26281d012e1952fafe4e2e97e9 + - "bpo-42316: Allow unparenthesized walrus operator in indexes (GH-23317)" + https://github.com/python/cpython/commit/b0aba1fcdc3da952698d99aec2334faa79a8b68c diff --git a/tests/data/pep_572_py310.py b/tests/data/pep_572_py310.py new file mode 100644 index 00000000000..2aef589ce8d --- /dev/null +++ b/tests/data/pep_572_py310.py @@ -0,0 +1,4 @@ +# Unparenthesized walruses are now allowed in indices since Python 3.10. +x[a:=0] +x[a:=0, b:=1] +x[5, b:=0] diff --git a/tests/data/pep_572_py39.py b/tests/data/pep_572_py39.py new file mode 100644 index 00000000000..7bbd5091197 --- /dev/null +++ b/tests/data/pep_572_py39.py @@ -0,0 +1,7 @@ +# Unparenthesized walruses are now allowed in set literals & set comprehensions +# since Python 3.9 +{x := 1, 2, 3} +{x4 := x ** 5 for x in range(7)} +# We better not remove the parentheses here (since it's a 3.10 feature) +x[(a := 1)] +x[(a := 1), (b := 3)] diff --git a/tests/test_black.py b/tests/test_black.py index 5c720507216..8a37f7c65b4 100644 --- a/tests/test_black.py +++ b/tests/test_black.py @@ -26,6 +26,7 @@ import pytest import unittest from unittest.mock import patch, MagicMock +from parameterized import parameterized import click from click import unstyle @@ -299,6 +300,14 @@ def test_pep_572_version_detection(self) -> None: versions = black.detect_target_versions(root) self.assertIn(black.TargetVersion.PY38, versions) + @parameterized.expand([(3, 9), (3, 10)]) + def test_pep_572_newer_syntax(self, major: int, minor: int) -> None: + source, expected = read_data(f"pep_572_py{major}{minor}") + actual = fs(source, mode=DEFAULT_MODE) + self.assertFormatEqual(expected, actual) + if sys.version_info >= (major, minor): + black.assert_equivalent(source, actual) + def test_expression_ff(self) -> None: source, expected = read_data("expression") tmp_file = Path(black.dump_to_file(source))
### Description Implementation stolen from PR davidhalter/parso#162. Thanks parso! I could add support for these newer syntactical constructs in the target version detection logic, but until I get diff-shades up and running I don't feel very comfortable adding the code. ### Checklist - did you ... - [x] Add a CHANGELOG entry if necessary? - [x] Add / update tests if necessary? - [x] Add new / update outdated documentation? -- not applicable
https://api.github.com/repos/psf/black/pulls/2447
2021-08-26T20:29:58Z
2021-08-26T20:59:01Z
2021-08-26T20:59:01Z
2021-08-26T20:59:04Z
1,280
psf/black
24,388
Issue35925 Remove trailing commas
diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py index 724558bd49ea2..274860b3fdb5c 100644 --- a/pandas/tests/test_multilevel.py +++ b/pandas/tests/test_multilevel.py @@ -1846,7 +1846,7 @@ def test_multilevel_index_loc_order(self, dim, keys, expected): # GH 22797 # Try to respect order of keys given for MultiIndex.loc kwargs = {dim: [["c", "a", "a", "b", "b"], [1, 1, 2, 1, 2]]} - df = pd.DataFrame(np.arange(25).reshape(5, 5), **kwargs,) + df = pd.DataFrame(np.arange(25).reshape(5, 5), **kwargs) exp_index = MultiIndex.from_arrays(expected) if dim == "index": res = df.loc[keys, :] diff --git a/pandas/tests/test_nanops.py b/pandas/tests/test_nanops.py index 0d60e6e8a978f..c45e4508c6153 100644 --- a/pandas/tests/test_nanops.py +++ b/pandas/tests/test_nanops.py @@ -285,7 +285,7 @@ def test_nansum(self, skipna): def test_nanmean(self, skipna): self.check_funs( - nanops.nanmean, np.mean, skipna, allow_obj=False, allow_date=False, + nanops.nanmean, np.mean, skipna, allow_obj=False, allow_date=False ) def test_nanmean_overflow(self): diff --git a/pandas/tests/window/moments/test_moments_consistency_rolling.py b/pandas/tests/window/moments/test_moments_consistency_rolling.py index a3de8aa69f840..158b994cf03ae 100644 --- a/pandas/tests/window/moments/test_moments_consistency_rolling.py +++ b/pandas/tests/window/moments/test_moments_consistency_rolling.py @@ -95,7 +95,7 @@ def test_rolling_apply_consistency( with warnings.catch_warnings(): warnings.filterwarnings( - "ignore", message=".*(empty slice|0 for slice).*", category=RuntimeWarning, + "ignore", message=".*(empty slice|0 for slice).*", category=RuntimeWarning ) # test consistency between rolling_xyz() and either (a) # rolling_apply of Series.xyz(), or (b) rolling_apply of @@ -107,7 +107,7 @@ def test_rolling_apply_consistency( functions = no_nan_functions + base_functions for (f, require_min_periods, name) in functions: rolling_f = getattr( - x.rolling(window=window, center=center, min_periods=min_periods), name, + x.rolling(window=window, center=center, min_periods=min_periods), name ) if ( @@ -492,7 +492,7 @@ def test_moment_functions_zero_length_pairwise(): df2["a"] = df2["a"].astype("float64") df1_expected = DataFrame( - index=pd.MultiIndex.from_product([df1.index, df1.columns]), columns=Index([]), + index=pd.MultiIndex.from_product([df1.index, df1.columns]), columns=Index([]) ) df2_expected = DataFrame( index=pd.MultiIndex.from_product( @@ -635,7 +635,7 @@ def test_rolling_consistency(consistency_data, window, min_periods, center): # with empty/0-length Series/DataFrames with warnings.catch_warnings(): warnings.filterwarnings( - "ignore", message=".*(empty slice|0 for slice).*", category=RuntimeWarning, + "ignore", message=".*(empty slice|0 for slice).*", category=RuntimeWarning ) # test consistency between different rolling_* moments diff --git a/pandas/tests/window/moments/test_moments_ewm.py b/pandas/tests/window/moments/test_moments_ewm.py index 89d46a8bb6cb5..a83bfabc4a048 100644 --- a/pandas/tests/window/moments/test_moments_ewm.py +++ b/pandas/tests/window/moments/test_moments_ewm.py @@ -73,7 +73,7 @@ def simple_wma(s, w): (s1, True, True, [(1.0 - alpha), np.nan, 1.0]), (s1, False, False, [(1.0 - alpha) ** 2, np.nan, alpha]), (s1, False, True, [(1.0 - alpha), np.nan, alpha]), - (s2, True, False, [np.nan, (1.0 - alpha) ** 3, np.nan, np.nan, 1.0, np.nan],), + (s2, True, False, [np.nan, (1.0 - alpha) ** 3, np.nan, np.nan, 1.0, np.nan]), (s2, True, True, [np.nan, (1.0 - alpha), np.nan, np.nan, 1.0, np.nan]), ( s2, @@ -95,7 +95,7 @@ def simple_wma(s, w): alpha * ((1.0 - alpha) ** 2 + alpha), ], ), - (s3, False, True, [(1.0 - alpha) ** 2, np.nan, (1.0 - alpha) * alpha, alpha],), + (s3, False, True, [(1.0 - alpha) ** 2, np.nan, (1.0 - alpha) * alpha, alpha]), ]: expected = simple_wma(s, Series(w)) result = s.ewm(com=com, adjust=adjust, ignore_na=ignore_na).mean() diff --git a/pandas/tests/window/moments/test_moments_rolling.py b/pandas/tests/window/moments/test_moments_rolling.py index 81f020fe7de23..da256e80dff7e 100644 --- a/pandas/tests/window/moments/test_moments_rolling.py +++ b/pandas/tests/window/moments/test_moments_rolling.py @@ -150,14 +150,14 @@ def get_result(obj, window, min_periods=None, center=False): series_xp = ( get_result( - series.reindex(list(series.index) + s), window=25, min_periods=minp, + series.reindex(list(series.index) + s), window=25, min_periods=minp ) .shift(-12) .reindex(series.index) ) frame_xp = ( get_result( - frame.reindex(list(frame.index) + s), window=25, min_periods=minp, + frame.reindex(list(frame.index) + s), window=25, min_periods=minp ) .shift(-12) .reindex(frame.index) @@ -169,14 +169,14 @@ def get_result(obj, window, min_periods=None, center=False): else: series_xp = ( get_result( - series.reindex(list(series.index) + s), window=25, min_periods=0, + series.reindex(list(series.index) + s), window=25, min_periods=0 ) .shift(-12) .reindex(series.index) ) frame_xp = ( get_result( - frame.reindex(list(frame.index) + s), window=25, min_periods=0, + frame.reindex(list(frame.index) + s), window=25, min_periods=0 ) .shift(-12) .reindex(frame.index) diff --git a/pandas/tests/window/test_base_indexer.py b/pandas/tests/window/test_base_indexer.py index 2300d8dd5529b..ab73e075eed04 100644 --- a/pandas/tests/window/test_base_indexer.py +++ b/pandas/tests/window/test_base_indexer.py @@ -88,8 +88,8 @@ def get_window_bounds(self, num_values, min_periods, center, closed): @pytest.mark.parametrize( "func,np_func,expected,np_kwargs", [ - ("count", len, [3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 2.0, np.nan], {},), - ("min", np.min, [0.0, 1.0, 2.0, 3.0, 4.0, 6.0, 6.0, 7.0, 8.0, np.nan], {},), + ("count", len, [3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 2.0, np.nan], {}), + ("min", np.min, [0.0, 1.0, 2.0, 3.0, 4.0, 6.0, 6.0, 7.0, 8.0, np.nan], {}), ( "max", np.max, @@ -204,7 +204,7 @@ def test_rolling_forward_skewness(constructor): @pytest.mark.parametrize( "func,expected", [ - ("cov", [2.0, 2.0, 2.0, 97.0, 2.0, -93.0, 2.0, 2.0, np.nan, np.nan],), + ("cov", [2.0, 2.0, 2.0, 97.0, 2.0, -93.0, 2.0, 2.0, np.nan, np.nan]), ( "corr", [ diff --git a/pandas/tests/window/test_pairwise.py b/pandas/tests/window/test_pairwise.py index e82d4b8cbf770..7425cc5df4c2f 100644 --- a/pandas/tests/window/test_pairwise.py +++ b/pandas/tests/window/test_pairwise.py @@ -195,7 +195,7 @@ def test_cov_mulittindex(self): columns = MultiIndex.from_product([list("ab"), list("xy"), list("AB")]) index = range(3) - df = DataFrame(np.arange(24).reshape(3, 8), index=index, columns=columns,) + df = DataFrame(np.arange(24).reshape(3, 8), index=index, columns=columns) result = df.ewm(alpha=0.1).cov() diff --git a/pandas/tests/window/test_rolling.py b/pandas/tests/window/test_rolling.py index 8d72e2cb92ca9..67b20fd2d6daa 100644 --- a/pandas/tests/window/test_rolling.py +++ b/pandas/tests/window/test_rolling.py @@ -73,7 +73,7 @@ def test_constructor_with_timedelta_window(window): # GH 15440 n = 10 df = DataFrame( - {"value": np.arange(n)}, index=pd.date_range("2015-12-24", periods=n, freq="D"), + {"value": np.arange(n)}, index=pd.date_range("2015-12-24", periods=n, freq="D") ) expected_data = np.append([0.0, 1.0], np.arange(3.0, 27.0, 3)) @@ -92,7 +92,7 @@ def test_constructor_timedelta_window_and_minperiods(window, raw): # GH 15305 n = 10 df = DataFrame( - {"value": np.arange(n)}, index=pd.date_range("2017-08-08", periods=n, freq="D"), + {"value": np.arange(n)}, index=pd.date_range("2017-08-08", periods=n, freq="D") ) expected = DataFrame( {"value": np.append([np.NaN, 1.0], np.arange(3.0, 27.0, 3))}, @@ -153,7 +153,7 @@ def test_closed_one_entry(func): def test_closed_one_entry_groupby(func): # GH24718 ser = pd.DataFrame( - data={"A": [1, 1, 2], "B": [3, 2, 1]}, index=pd.date_range("2000", periods=3), + data={"A": [1, 1, 2], "B": [3, 2, 1]}, index=pd.date_range("2000", periods=3) ) result = getattr( ser.groupby("A", sort=False)["B"].rolling("10D", closed="left"), func @@ -182,7 +182,7 @@ def test_closed_one_entry_groupby(func): def test_closed_min_max_datetime(input_dtype, func, closed, expected): # see gh-21704 ser = pd.Series( - data=np.arange(10).astype(input_dtype), index=pd.date_range("2000", periods=10), + data=np.arange(10).astype(input_dtype), index=pd.date_range("2000", periods=10) ) result = getattr(ser.rolling("3D", closed=closed), func)() diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py index f80ff1a53cd69..8ef6dac2862db 100644 --- a/pandas/tseries/frequencies.py +++ b/pandas/tseries/frequencies.py @@ -548,7 +548,7 @@ def is_superperiod(source, target) -> bool: def _maybe_coerce_freq(code) -> str: - """ we might need to coerce a code to a rule_code + """we might need to coerce a code to a rule_code and uppercase it Parameters diff --git a/pandas/util/_test_decorators.py b/pandas/util/_test_decorators.py index 0dad8c7397e37..ca7b99492bbf7 100644 --- a/pandas/util/_test_decorators.py +++ b/pandas/util/_test_decorators.py @@ -186,10 +186,10 @@ def skip_if_no(package: str, min_version: Optional[str] = None): is_platform_windows(), reason="not used on win32" ) skip_if_has_locale = pytest.mark.skipif( - _skip_if_has_locale(), reason=f"Specific locale is set {locale.getlocale()[0]}", + _skip_if_has_locale(), reason=f"Specific locale is set {locale.getlocale()[0]}" ) skip_if_not_us_locale = pytest.mark.skipif( - _skip_if_not_us_locale(), reason=f"Specific locale is set {locale.getlocale()[0]}", + _skip_if_not_us_locale(), reason=f"Specific locale is set {locale.getlocale()[0]}" ) skip_if_no_scipy = pytest.mark.skipif( _skip_if_no_scipy(), reason="Missing SciPy requirement"
#35925 Files edited: - pandas/tests/test_multilevel.py - pandas/tests/test_nanops.py - pandas/tests/window/moments/test_moments_consistency_rolling.py - pandas/tests/window/moments/test_moments_ewm.py - pandas/tests/window/moments/test_moments_rolling.py - pandas/tests/window/test_base_indexer.py - pandas/tests/window/test_pairwise.py - pandas/tests/window/test_rolling.py - pandas/tseries/frequencies.py - pandas/util/_test_decorators.py
https://api.github.com/repos/pandas-dev/pandas/pulls/35996
2020-08-30T17:56:06Z
2020-08-31T09:59:18Z
2020-08-31T09:59:18Z
2020-08-31T09:59:26Z
3,387
pandas-dev/pandas
45,140
Fix code sample for "65539 local variables" example
diff --git a/README.md b/README.md index 68b6c09..44ba6e4 100644 --- a/README.md +++ b/README.md @@ -2264,7 +2264,8 @@ nan ```py import dis exec(""" - def f():* """ + """ + def f(): + """ + """ """.join(["X"+str(x)+"=" + str(x) for x in range(65539)])) f()
https://api.github.com/repos/satwikkansal/wtfpython/pulls/60
2018-01-27T16:35:10Z
2018-01-29T10:10:03Z
2018-01-29T10:10:03Z
2018-01-29T10:39:49Z
114
satwikkansal/wtfpython
25,779
[MRG] Makes roc_auc_score and average_precision_score docstrings more explicit
diff --git a/sklearn/metrics/ranking.py b/sklearn/metrics/ranking.py index 2003ed8b314c8..fde1f1c441125 100644 --- a/sklearn/metrics/ranking.py +++ b/sklearn/metrics/ranking.py @@ -116,7 +116,7 @@ def average_precision_score(y_true, y_score, average="macro", Parameters ---------- y_true : array, shape = [n_samples] or [n_samples, n_classes] - True binary labels in binary label indicators. + True binary labels (either {0, 1} or {-1, 1}). y_score : array, shape = [n_samples] or [n_samples, n_classes] Target scores, can either be probability estimates of the positive @@ -200,7 +200,7 @@ def roc_auc_score(y_true, y_score, average="macro", sample_weight=None): Parameters ---------- y_true : array, shape = [n_samples] or [n_samples, n_classes] - True binary labels in binary label indicators. + True binary labels (either {0, 1} or {-1, 1}). y_score : array, shape = [n_samples] or [n_samples, n_classes] Target scores, can either be probability estimates of the positive @@ -438,8 +438,8 @@ def roc_curve(y_true, y_score, pos_label=None, sample_weight=None, ---------- y_true : array, shape = [n_samples] - True binary labels in range {0, 1} or {-1, 1}. If labels are not - binary, pos_label should be explicitly given. + True binary labels. If labels are not either {-1, 1} or {0, 1}, then + pos_label should be explicitly given. y_score : array, shape = [n_samples] Target scores, can either be probability estimates of the positive
<!-- Thanks for contributing a pull request! Please ensure you have taken a look at the contribution guidelines: https://github.com/scikit-learn/scikit-learn/blob/master/CONTRIBUTING.md#Contributing-Pull-Requests --> #### Reference Issue <!-- Example: Fixes #1234 --> Fixes issue #9554 #### What does this implement/fix? Explain your changes. Changes the description of the `y_true` parameter for `metrics.roc_auc_score` and `metrics.average_precision_score` to be slightly more explicit about the allowed values. This is more in line with the `metrics.roc_curve` and `metrics.precision_recall_curve` description of `y_true`. Specifically, this PR makes the following modification. Current description: `True binary labels in binary label indicators.` Modified description: `True binary labels (either {0, 1} or {-1, 1}) or binary label indicators.` This could help with avoid confusion in situations like the following ```python from sklearn.metrics import roc_auc_score import numpy as np y_true = np.array([-1, -1, 0, 0]) y_scores = np.array([0.1, 0.4, 0.35, 0.8]) roc_auc_score(y_true, y_scores) ``` which yields ```python ValueError: Data is not binary and pos_label is not specified ``` Even though there are only two unique classes in `y_true`. <!-- Please be aware that we are a loose team of volunteers so patience is necessary; assistance handling other issues is very welcome. We value all user contributions, no matter how minor they are. If we are slow to review, either the pull request needs some benchmarking, tinkering, convincing, etc. or more likely the reviewers are simply busy. In either case, we ask for your understanding during the review process. For more information, see our FAQ on this topic: http://scikit-learn.org/dev/faq.html#why-is-my-pull-request-not-getting-any-attention. Thanks for contributing! -->
https://api.github.com/repos/scikit-learn/scikit-learn/pulls/9557
2017-08-15T19:20:30Z
2017-08-22T00:02:03Z
2017-08-22T00:02:03Z
2017-08-22T00:02:03Z
440
scikit-learn/scikit-learn
46,411
proper initialization
diff --git a/homeassistant/components/sensor/tellduslive.py b/homeassistant/components/sensor/tellduslive.py index ae05ce47e195ab..364b790ce6f03e 100644 --- a/homeassistant/components/sensor/tellduslive.py +++ b/homeassistant/components/sensor/tellduslive.py @@ -18,7 +18,6 @@ ATTR_LAST_UPDATED = "time_last_updated" _LOGGER = logging.getLogger(__name__) -DEPENDENCIES = ['tellduslive'] SENSOR_TYPE_TEMP = "temp" SENSOR_TYPE_HUMIDITY = "humidity" @@ -43,6 +42,8 @@ def setup_platform(hass, config, add_devices, discovery_info=None): """ Sets up Tellstick sensors. """ + if discovery_info is None: + return sensors = tellduslive.NETWORK.get_sensors() devices = [] diff --git a/homeassistant/components/switch/tellduslive.py b/homeassistant/components/switch/tellduslive.py index d515dcb50a29cc..b6c7af3ce12aba 100644 --- a/homeassistant/components/switch/tellduslive.py +++ b/homeassistant/components/switch/tellduslive.py @@ -15,11 +15,12 @@ from homeassistant.helpers.entity import ToggleEntity _LOGGER = logging.getLogger(__name__) -DEPENDENCIES = ['tellduslive'] def setup_platform(hass, config, add_devices, discovery_info=None): """ Find and return Tellstick switches. """ + if discovery_info is None: + return switches = tellduslive.NETWORK.get_switches() add_devices([TelldusLiveSwitch(switch["name"], switch["id"])
fix issue where sensors and switches were duplicated because of component getting initialized twice. closes #913
https://api.github.com/repos/home-assistant/core/pulls/931
2016-01-18T18:47:41Z
2016-01-19T02:01:12Z
2016-01-19T02:01:12Z
2017-03-17T20:23:25Z
385
home-assistant/core
39,230
Run LocalStack tests against LocalStack Pro in Github Action
diff --git a/.github/workflows/pro-integration.yml b/.github/workflows/pro-integration.yml new file mode 100644 index 0000000000000..f6cd54fd34646 --- /dev/null +++ b/.github/workflows/pro-integration.yml @@ -0,0 +1,130 @@ +name: integration-tests-against-pro +on: + workflow_dispatch: + inputs: + targetRef: + description: 'LocalStack Pro Ref to test with' + required: true + default: 'master' + pull_request: + branches: + - master + push: + branches: + - master + +jobs: + run-integration-tests: + runs-on: ubuntu-latest + defaults: + run: + working-directory: localstack-ext + environment: localstack-ext-tests + if: github.event.pull_request.head.repo.full_name == github.repository # skip job if fork PR + steps: + - name: Checkout Pro + uses: actions/checkout@v2 + with: + repository: localstack/localstack-ext + ref: ${{ github.event.inputs.targetRef }} + token: ${{ secrets.PRO_ACCESS_TOKEN }} + path: localstack-ext + - name: Checkout Open Source + uses: actions/checkout@v2 + with: + path: localstack + - name: Set up Python 3.8 + id: setup-python + uses: actions/setup-python@v2 + with: + python-version: 3.8 + - name: Set up Node 14.x + uses: actions/setup-node@v2 + with: + node-version: 14.x + - name: Set up JDK 11 + uses: actions/setup-java@v2 + with: + java-version: '11' + distribution: 'temurin' + - uses: hashicorp/setup-terraform@v1 + with: + terraform_version: 0.13.7 + - name: Set up system wide dependencies + run: | + sudo apt-get install libsasl2-dev jq + - name: Cache LocalStack-ext dependencies (venv) + uses: actions/cache@v2 + id: ext-cache + with: + path: localstack-ext/.venv + key: ${{ runner.os }}-python-${{ steps.setup-python.outputs.python-version }}-venv-${{ hashFiles('localstack-ext/setup.cfg', 'localstack-ext/pyproject.toml') }} + - name: Install Python Dependencies for LocalStack Pro + run: make install + - name: Cache LocalStack community dependencies (venv, infra) + uses: actions/cache@v2 + id: os-cache + with: + path: | + localstack/.venv + localstack/localstack/infra + localstack/localstack/node_modules + key: ${{ runner.os }}-python-${{ steps.setup-python.outputs.python-version }}-venv-${{ hashFiles('localstack/localstack/services/install.py', 'localstack/requirements.txt', 'localstack/localstack/constants.py') }} + - name: Install Dependencies for LocalStack Community # lambda tests look for libraries in this virtualenv + working-directory: localstack + run: | + make install + - name: Link community LocalStack into Pro venv + run: | + source .venv/bin/activate + pip install -e ../localstack + - name: Test LocalStack Pro startup + env: + DEBUG: 1 + DNS_ADDRESS: 0 + LOCALSTACK_API_KEY: "test" + run: | + source .venv/bin/activate + bin/test_localstack_pro.sh + - name: Run community integration tests + env: + DEBUG: 0 + PROXY_MAX_RETRIES: 0 + DNS_ADDRESS: 0 + LAMBDA_EXECUTOR: "local" + LOCALSTACK_API_KEY: "test" + AWS_SECRET_ACCESS_KEY: "test" + AWS_ACCESS_KEY_ID: "test" + AWS_DEFAULT_REGION: "us-east-1" + HOST_TMP_FOLDER: /tmp/localstack + run: | + source .venv/bin/activate + python -m pytest --durations=10 --junitxml=target/reports/pytest.xml --show-capture=no ../localstack/tests/integration/ + - name: Run Lambda Tests for lambda executor docker + env: + DEBUG: 0 + PROXY_MAX_RETRIES: 0 + DNS_ADDRESS: 0 + LAMBDA_EXECUTOR: "docker" + LOCALSTACK_API_KEY: "test" + HOST_TMP_FOLDER: /tmp/localstack + run: | + source .venv/bin/activate + python -m pytest --durations=10 --show-capture=no --junitxml=target/reports/lambda-docker.xml -o junit_suite_name='lambda-docker' ../localstack/tests/integration/test_lambda.py ../localstack/tests/integration/test_integration.py + - name: Run Lambda Tests for lambda executor docker-reuse + env: + DEBUG: 0 + PROXY_MAX_RETRIES: 0 + DNS_ADDRESS: 0 + LAMBDA_EXECUTOR: "docker-reuse" + LOCALSTACK_API_KEY: "test" + HOST_TMP_FOLDER: /tmp/localstack + run: | + source .venv/bin/activate + python -m pytest --durations=10 --show-capture=no --junitxml=target/reports/lambda-docker-reuse.xml -o junit_suite_name='lambda-docker-reuse' ../localstack/tests/integration/test_lambda.py ../localstack/tests/integration/test_integration.py + - name: Publish LocalStack Community Integration Test Results + uses: EnricoMi/publish-unit-test-result-action@v1 + if: always() + with: + files: localstack-ext/target/reports/*.xml + check_name: LocalStack integration with Pro
This Action runs the LocalStack integration tests against a Pro Instance, to check if any problems arise and reports them. This should provide more confidence that LocalStack changes do not brick any features in Pro or vice versa. To be merged after #4485
https://api.github.com/repos/localstack/localstack/pulls/4486
2021-08-25T11:33:51Z
2021-09-08T11:19:36Z
2021-09-08T11:19:36Z
2021-09-08T14:16:31Z
1,394
localstack/localstack
28,909
Update README.md
diff --git a/README.md b/README.md index 9cc4603d544..c2f7885b220 100644 --- a/README.md +++ b/README.md @@ -60,6 +60,7 @@ Translations * [Georgian](https://github.com/sqlmapproject/sqlmap/blob/master/doc/translations/README-ka-GE.md) * [German](https://github.com/sqlmapproject/sqlmap/blob/master/doc/translations/README-de-GER.md) * [Greek](https://github.com/sqlmapproject/sqlmap/blob/master/doc/translations/README-gr-GR.md) +* [Hindi](https://github.com/sqlmapproject/sqlmap/blob/master/doc/translations/README-in-HI.md) * [Indonesian](https://github.com/sqlmapproject/sqlmap/blob/master/doc/translations/README-id-ID.md) * [Italian](https://github.com/sqlmapproject/sqlmap/blob/master/doc/translations/README-it-IT.md) * [Japanese](https://github.com/sqlmapproject/sqlmap/blob/master/doc/translations/README-ja-JP.md) @@ -73,4 +74,4 @@ Translations * [Spanish](https://github.com/sqlmapproject/sqlmap/blob/master/doc/translations/README-es-MX.md) * [Turkish](https://github.com/sqlmapproject/sqlmap/blob/master/doc/translations/README-tr-TR.md) * [Ukrainian](https://github.com/sqlmapproject/sqlmap/blob/master/doc/translations/README-uk-UA.md) -* [Vietnamese](https://github.com/sqlmapproject/sqlmap/blob/master/doc/translations/README-vi-VN.md) \ No newline at end of file +* [Vietnamese](https://github.com/sqlmapproject/sqlmap/blob/master/doc/translations/README-vi-VN.md)
https://api.github.com/repos/sqlmapproject/sqlmap/pulls/5552
2023-10-22T06:19:26Z
2023-10-22T09:11:51Z
2023-10-22T09:11:50Z
2023-10-22T09:11:51Z
407
sqlmapproject/sqlmap
14,933
Bugfix for renaming of faces
diff --git a/tools/alignments.py b/tools/alignments.py index 6a1cbe498b..4ae27ab8be 100644 --- a/tools/alignments.py +++ b/tools/alignments.py @@ -38,8 +38,9 @@ def process(self): job = Draw(self.alignments, self.args) elif self.args.job == "extract": job = Extract(self.alignments, self.args) - elif self.args.job in("missing-alignments", "missing-frames", "leftover-faces", - "multi-faces", "no-faces"): + elif self.args.job in("missing-alignments", "missing-frames", + "multi-faces", "leftover-faces", + "no-faces"): job = Check(self.alignments, self.args) elif self.args.job == "remove": job = RemoveAlignments(self.alignments, self.args) @@ -172,8 +173,7 @@ def __init__(self, folder, verbose): self.verbose = verbose self.folder = folder self.check_folder_exists() - self.file_list_sorted = sorted([item - for item in self.process_folder()]) + self.file_list_sorted = self.sorted_items() self.items = self.load_items() self.count = len(self.file_list_sorted) if self.verbose: @@ -194,6 +194,11 @@ def valid_extension(filename): extension = os.path.splitext(filename)[1] return bool(extension in _image_extensions) + @staticmethod + def sorted_items(): + """ Override for specific folder processing """ + return list() + @staticmethod def process_folder(): """ Override for specific folder processing """ @@ -243,6 +248,11 @@ def load_items(self): faces[original_file].append(index) return faces + def sorted_items(self): + """ Return the items sorted by filename then index """ + return sorted([item for item in self.process_folder()], + key=lambda x: (x[2], x[3])) + class Frames(MediaLoader): """ Object to hold the frames that are to be checked against """ @@ -264,6 +274,10 @@ def load_items(self): frame[frame.rfind("."):]) return frames + def sorted_items(self): + """ Return the items sorted by filename """ + return sorted([item for item in self.process_folder()]) + class Draw(object): """ Draw Alignments on passed in images """ @@ -444,7 +458,7 @@ def process(self): def faces_count_matches(self, item): """ Check the selected face exits """ image_name, number_alignments = item[0], item[2] - number_faces = len(self.faces.items.get(image_name, [])) + number_faces = len(self.faces.items.get(image_name, None)) return bool(number_alignments == 0 or number_alignments == number_faces) @@ -526,10 +540,10 @@ def __init__(self, alignments, arguments): def get_source_dir(self, arguments): """ Set the correct source dir """ - if hasattr(arguments, "faces_dir"): + if hasattr(arguments, "faces_dir") and arguments.faces_dir: self.type = "faces" source_dir = arguments.faces_dir - elif hasattr(arguments, "frames_dir"): + elif hasattr(arguments, "frames_dir") and arguments.frames_dir: self.type = "frames" source_dir = arguments.frames_dir else: @@ -559,9 +573,11 @@ def validate(self): "there will be nothing to move. " "Defaulting to output: console") self.output = "console" - elif self.type == "faces" and self.job != "multi-faces" and self.job != "leftover-faces": - print("WARNING: The selected folder is not valid. Only folder set " - "with '-fc' is supported for 'multi-faces' and 'leftover-faces'") + elif self.type == "faces" and self.job not in ("multi-faces", + "leftover-faces"): + print("WARNING: The selected folder is not valid. " + "Only folder set with '-fc' is supported for " + "'multi-faces' and 'leftover-faces'") exit(0) def compile_output(self): @@ -617,10 +633,9 @@ def get_leftover_faces(self): for item in self.items: frame_id = item[2] + item[1] - if frame_id not in self.alignments_data or self.alignments_data[frame_id] <= item[3]: + if (frame_id not in self.alignments_data + or self.alignments_data[frame_id] <= item[3]): yield item[0] + item[1] - - print("Done") def output_results(self): """ Output the results in the requested format """ diff --git a/tools/cli.py b/tools/cli.py index 9c4de18451..478521bcd4 100644 --- a/tools/cli.py +++ b/tools/cli.py @@ -20,8 +20,9 @@ def get_argument_list(self): "type": str, "choices": ("draw", "extract", "missing-alignments", - "missing-frames", "leftover-faces", "multi-faces", - "no-faces", "reformat", "remove"), + "missing-frames", "leftover-faces", + "multi-faces", "no-faces", + "reformat", "remove"), "required": True, "help": "R|Choose which action you want to " "perform.\nNB: All actions require an "
An issue occurred when more than 10 faces were found within a frame. On removing frames and renaming, indexes were not processed in the correct order, meaning that files could potentially be renamed to existing file names. This fixes that bug
https://api.github.com/repos/deepfakes/faceswap/pulls/485
2018-08-28T21:21:21Z
2018-08-28T21:21:37Z
2018-08-28T21:21:37Z
2018-08-28T21:21:37Z
1,251
deepfakes/faceswap
18,596
Update lstm_seq2seq.py(from 22% to 87% acc)
diff --git a/examples/lstm_seq2seq.py b/examples/lstm_seq2seq.py index 88fa5768858..e320fd1bf85 100644 --- a/examples/lstm_seq2seq.py +++ b/examples/lstm_seq2seq.py @@ -114,6 +114,7 @@ for i, (input_text, target_text) in enumerate(zip(input_texts, target_texts)): for t, char in enumerate(input_text): encoder_input_data[i, t, input_token_index[char]] = 1. + encoder_input_data[i, t + 1:, input_token_index[' ']] = 1. for t, char in enumerate(target_text): # decoder_target_data is ahead of decoder_input_data by one timestep decoder_input_data[i, t, target_token_index[char]] = 1. @@ -121,7 +122,8 @@ # decoder_target_data will be ahead by one timestep # and will not include the start character. decoder_target_data[i, t - 1, target_token_index[char]] = 1. - + decoder_input_data[i, t + 1:, target_token_index[' ']] = 1. + decoder_target_data[i, t:, target_token_index[' ']] = 1. # Define an input sequence and process it. encoder_inputs = Input(shape=(None, num_encoder_tokens)) encoder = LSTM(latent_dim, return_state=True) @@ -145,7 +147,8 @@ model = Model([encoder_inputs, decoder_inputs], decoder_outputs) # Run training -model.compile(optimizer='rmsprop', loss='categorical_crossentropy') +model.compile(optimizer='rmsprop', loss='categorical_crossentropy', + metrics=['accuracy']) model.fit([encoder_input_data, decoder_input_data], decoder_target_data, batch_size=batch_size, epochs=epochs,
I added the codes for applying one-hot encoding on the end of sentences about encoder_input_data, decoder_input_data, and decoder_target_data. I added an accuracy metric for model training. The original code has 22% accuracy, but the proposed code had 87% validation accuracy. <!-- Please make sure you've read and understood our contributing guidelines; https://github.com/keras-team/keras/blob/master/CONTRIBUTING.md --> ### Summary Applying one-hot encoding on the end of sentences ### Related Issues #13266 ### PR Overview - [n] This PR requires new unit tests [y/n] (make sure tests are included) - [n] This PR requires to update the documentation [y/n] (make sure the docs are up-to-date) - [y] This PR is backwards compatible [y/n] - [n] This PR changes the current API [y/n] (all API changes need to be approved by fchollet)
https://api.github.com/repos/keras-team/keras/pulls/13269
2019-08-30T06:27:05Z
2019-09-11T20:19:52Z
2019-09-11T20:19:52Z
2019-09-11T20:19:52Z
392
keras-team/keras
47,769
bitfinex2: unify market
diff --git a/js/bitfinex2.js b/js/bitfinex2.js index fc75093e7138..9ca7e06d17f0 100644 --- a/js/bitfinex2.js +++ b/js/bitfinex2.js @@ -990,8 +990,9 @@ module.exports = class bitfinex2 extends Exchange { */ await this.loadMarkets (); const precision = this.safeValue (this.options, 'precision', 'R0'); + const market = this.market (symbol); const request = { - 'symbol': this.marketId (symbol), + 'symbol': market['id'], 'precision': precision, }; if (limit !== undefined) { @@ -1001,7 +1002,7 @@ module.exports = class bitfinex2 extends Exchange { const orderbook = await this.publicGetBookSymbolPrecision (fullRequest); const timestamp = this.milliseconds (); const result = { - 'symbol': symbol, + 'symbol': market['symbol'], 'bids': [], 'asks': [], 'timestamp': timestamp,
https://api.github.com/repos/ccxt/ccxt/pulls/14140
2022-07-01T03:20:23Z
2022-07-01T05:04:54Z
2022-07-01T05:04:54Z
2022-07-01T05:04:54Z
242
ccxt/ccxt
13,095
Added new sandbox configs to sweep.yaml
diff --git a/sweep.yaml b/sweep.yaml index dd2e801d3e..a4aac75937 100644 --- a/sweep.yaml +++ b/sweep.yaml @@ -9,7 +9,10 @@ description: 'Python project for AI code generation with next token prediction L # Default Values: https://github.com/sweepai/sweep/blob/main/sweep.yaml sandbox: - enabled: true - image: "Python3" - install: "python -m venv venv; . venv/bin/activate; pip install black" - formatter: ". venv/bin/activate; black {files}" + install: + - pre-commit install + check: + - pre-commit run --files {file_path} + +rules: + - Ensure all new functions and classes have very clear, concise and up-to-date docstrings. Take gpt_engineer/ai.py as a good example.
This PR adds the new sandbox configs to `sweep.yaml` which runs pre-commit before every Sweep commit. I also added rules, which makes Sweep ensure that all newly edited functions and classes will have up-to-date docstrings.
https://api.github.com/repos/gpt-engineer-org/gpt-engineer/pulls/748
2023-09-29T22:18:09Z
2023-10-02T17:55:00Z
2023-10-02T17:55:00Z
2023-10-02T17:55:01Z
207
gpt-engineer-org/gpt-engineer
33,362
Fix index variable in for loop in paddlestructure.cpp
diff --git a/deploy/cpp_infer/src/paddlestructure.cpp b/deploy/cpp_infer/src/paddlestructure.cpp index b2e35f8c77..994df0ca0b 100644 --- a/deploy/cpp_infer/src/paddlestructure.cpp +++ b/deploy/cpp_infer/src/paddlestructure.cpp @@ -144,7 +144,7 @@ PaddleStructure::rebuild_table(std::vector<std::string> structure_html_tags, std::vector<std::vector<float>> dis_list(structure_boxes.size(), std::vector<float>(3, 100000.0)); for (int j = 0; j < structure_boxes.size(); j++) { - if (structure_boxes[i].size() == 8) { + if (structure_boxes[j].size() == 8) { structure_box = Utility::xyxyxyxy2xyxy(structure_boxes[j]); } else { structure_box = structure_boxes[j];
PaddleStructure::rebuild_table函数中`structure_boxes`的索引用错了,可能导致dis和iou无法正确计算。 原代码段: ``` for (int j = 0; j < structure_boxes.size(); j++) { if (structure_boxes[i].size() == 8) { structure_box = Utility::xyxyxyxy2xyxy(structure_boxes[j]); } else { structure_box = structure_boxes[j]; } dis_list[j][0] = this->dis(ocr_box, structure_box); dis_list[j][1] = 1 - Utility::iou(ocr_box, structure_box); dis_list[j][2] = j; } ``` 应改为: ``` for (int j = 0; j < structure_boxes.size(); j++) { if (structure_boxes[j].size() == 8) { structure_box = Utility::xyxyxyxy2xyxy(structure_boxes[j]); } else { structure_box = structure_boxes[j]; } dis_list[j][0] = this->dis(ocr_box, structure_box); dis_list[j][1] = 1 - Utility::iou(ocr_box, structure_box); dis_list[j][2] = j; } ```
https://api.github.com/repos/PaddlePaddle/PaddleOCR/pulls/10810
2023-09-02T03:33:26Z
2023-09-21T02:32:43Z
2023-09-21T02:32:43Z
2023-09-21T09:40:45Z
209
PaddlePaddle/PaddleOCR
42,659
Update expected values in CodeGen tests
diff --git a/tests/models/codegen/test_modeling_codegen.py b/tests/models/codegen/test_modeling_codegen.py index 37919b04398a7..b59adc78181d1 100644 --- a/tests/models/codegen/test_modeling_codegen.py +++ b/tests/models/codegen/test_modeling_codegen.py @@ -503,7 +503,7 @@ def test_codegen_sample(self): output_seq_tt_strs = tokenizer.batch_decode(output_seq_tt, skip_special_tokens=True) if torch_device == "cuda": - EXPECTED_OUTPUT_STR = 'def hello_world():\n print("Hello World")\n\n#' + EXPECTED_OUTPUT_STR = 'def hello_world():\n print("Hello World")\n return True\n\nresult =' else: EXPECTED_OUTPUT_STR = "def hello_world():\r\n print('Hello, World.')\r\n\r\n\r"
# What does this PR do? Update expected values in CodeGen test `test_codegen_sample`. The currently value works for other GPU, but for Nvidia T4, we need the values in this PR. Note that `do_sample` will call `self.sample` (in `generatioin_utils.py`) which uses `torch.multinomial`, which is not 100% reproducible across different accelerators.
https://api.github.com/repos/huggingface/transformers/pulls/17888
2022-06-27T06:17:39Z
2022-07-01T13:33:37Z
2022-07-01T13:33:37Z
2022-07-01T13:45:01Z
201
huggingface/transformers
12,132
Added a command for appending flows to server replay list
diff --git a/CHANGELOG.md b/CHANGELOG.md index 64292246ea..fc72f5d1ba 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,8 @@ * ASGI/WSGI apps can now listen on all ports for a specific hostname. This makes it simpler to accept both HTTP and HTTPS. ([#5725](https://github.com/mitmproxy/mitmproxy/pull/5725), @mhils) +* Add `replay.server.add` command for adding flows to server replay buffer + ([#5851](https://github.com/mitmproxy/mitmproxy/pull/5851), @italankin) ### Breaking Changes diff --git a/mitmproxy/addons/serverplayback.py b/mitmproxy/addons/serverplayback.py index 7419f8ac5f..6602cb5f80 100644 --- a/mitmproxy/addons/serverplayback.py +++ b/mitmproxy/addons/serverplayback.py @@ -114,6 +114,13 @@ def load_flows(self, flows: Sequence[flow.Flow]) -> None: Replay server responses from flows. """ self.flowmap = {} + self.add_flows(flows) + + @command.command("replay.server.add") + def add_flows(self, flows: Sequence[flow.Flow]) -> None: + """ + Add responses from flows to server replay list. + """ for f in flows: if isinstance(f, http.HTTPFlow): lst = self.flowmap.setdefault(self._hash(f), []) diff --git a/test/mitmproxy/addons/test_serverplayback.py b/test/mitmproxy/addons/test_serverplayback.py index 77d4f28367..b538909fc5 100644 --- a/test/mitmproxy/addons/test_serverplayback.py +++ b/test/mitmproxy/addons/test_serverplayback.py @@ -58,6 +58,27 @@ def test_server_playback(): assert not sp.flowmap +def test_add_flows(): + sp = serverplayback.ServerPlayback() + with taddons.context(sp) as tctx: + tctx.configure(sp) + f1 = tflow.tflow(resp=True) + f2 = tflow.tflow(resp=True) + + sp.load_flows([f1]) + sp.add_flows([f2]) + + assert sp.next_flow(f1) + assert sp.flowmap + assert sp.next_flow(f2) + assert not sp.flowmap + + sp.add_flows([f1]) + assert sp.flowmap + assert sp.next_flow(f1) + assert not sp.flowmap + + def test_ignore_host(): sp = serverplayback.ServerPlayback() with taddons.context(sp) as tctx:
#### Description Added a command `replay.server.add` for appending flows to server replay responses. I find it easier to use when you need to add new flows for server playback instead of replacing them. #### Checklist - [x] I have updated tests where applicable. - [x] I have added an entry to the CHANGELOG.
https://api.github.com/repos/mitmproxy/mitmproxy/pulls/5851
2023-01-06T12:28:46Z
2023-01-06T15:49:55Z
2023-01-06T15:49:55Z
2023-01-06T15:49:55Z
633
mitmproxy/mitmproxy
28,310
Update export format docstrings
diff --git a/README.md b/README.md index fa0645d4fd2..59abd084572 100644 --- a/README.md +++ b/README.md @@ -62,15 +62,14 @@ See the [YOLOv5 Docs](https://docs.ultralytics.com) for full documentation on tr <details open> <summary>Install</summary> -[**Python>=3.6.0**](https://www.python.org/) is required with all -[requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) installed including -[**PyTorch>=1.7**](https://pytorch.org/get-started/locally/): -<!-- $ sudo apt update && apt install -y libgl1-mesa-glx libsm6 libxext6 libxrender-dev --> +Clone repo and install [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) in a +[**Python>=3.6.0**](https://www.python.org/) environment, including +[**PyTorch>=1.7**](https://pytorch.org/get-started/locally/). ```bash -$ git clone https://github.com/ultralytics/yolov5 -$ cd yolov5 -$ pip install -r requirements.txt +git clone https://github.com/ultralytics/yolov5 # clone +cd yolov5 +pip install -r requirements.txt # install ``` </details> @@ -78,8 +77,9 @@ $ pip install -r requirements.txt <details open> <summary>Inference</summary> -Inference with YOLOv5 and [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36). Models automatically download -from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases). +Inference with YOLOv5 and [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36) +. [Models](https://github.com/ultralytics/yolov5/tree/master/models) download automatically from the latest +YOLOv5 [release](https://github.com/ultralytics/yolov5/releases). ```python import torch @@ -104,17 +104,17 @@ results.print() # or .show(), .save(), .crop(), .pandas(), etc. <details> <summary>Inference with detect.py</summary> -`detect.py` runs inference on a variety of sources, downloading models automatically from -the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases) and saving results to `runs/detect`. +`detect.py` runs inference on a variety of sources, downloading [models](https://github.com/ultralytics/yolov5/tree/master/models) automatically from +the latest YOLOv5 [release](https://github.com/ultralytics/yolov5/releases) and saving results to `runs/detect`. ```bash -$ python detect.py --source 0 # webcam - img.jpg # image - vid.mp4 # video - path/ # directory - path/*.jpg # glob - 'https://youtu.be/Zgi9g1ksQHc' # YouTube - 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream +python detect.py --source 0 # webcam + img.jpg # image + vid.mp4 # video + path/ # directory + path/*.jpg # glob + 'https://youtu.be/Zgi9g1ksQHc' # YouTube + 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream ``` </details> @@ -122,16 +122,20 @@ $ python detect.py --source 0 # webcam <details> <summary>Training</summary> -Run commands below to reproduce results -on [COCO](https://github.com/ultralytics/yolov5/blob/master/data/scripts/get_coco.sh) dataset (dataset auto-downloads on -first use). Training times for YOLOv5s/m/l/x are 2/4/6/8 days on a single V100 (multi-GPU times faster). Use the -largest `--batch-size` your GPU allows (batch sizes shown for 16 GB devices). +The commands below reproduce YOLOv5 [COCO](https://github.com/ultralytics/yolov5/blob/master/data/scripts/get_coco.sh) +results. [Models](https://github.com/ultralytics/yolov5/tree/master/models) +and [datasets](https://github.com/ultralytics/yolov5/tree/master/data) download automatically from the latest +YOLOv5 [release](https://github.com/ultralytics/yolov5/releases). Training times for YOLOv5n/s/m/l/x are +1/2/4/6/8 days on a V100 GPU ([Multi-GPU](https://github.com/ultralytics/yolov5/issues/475) times faster). Use the +largest `--batch-size` possible, or pass `--batch-size -1` for +YOLOv5 [AutoBatch](https://github.com/ultralytics/yolov5/pull/5092). Batch sizes shown for V100-16GB. ```bash -$ python train.py --data coco.yaml --cfg yolov5s.yaml --weights '' --batch-size 64 - yolov5m 40 - yolov5l 24 - yolov5x 16 +python train.py --data coco.yaml --cfg yolov5n.yaml --weights '' --batch-size 128 + yolov5s 64 + yolov5m 40 + yolov5l 24 + yolov5x 16 ``` <img width="800" src="https://user-images.githubusercontent.com/26833433/90222759-949d8800-ddc1-11ea-9fa1-1c97eed2b963.png"> @@ -225,6 +229,7 @@ We are super excited about our first-ever Ultralytics YOLOv5 🚀 EXPORT Competi ### Pretrained Checkpoints [assets]: https://github.com/ultralytics/yolov5/releases + [TTA]: https://github.com/ultralytics/yolov5/issues/303 |Model |size<br><sup>(pixels) |mAP<sup>val<br>0.5:0.95 |mAP<sup>val<br>0.5 |Speed<br><sup>CPU b1<br>(ms) |Speed<br><sup>V100 b1<br>(ms) |Speed<br><sup>V100 b32<br>(ms) |params<br><sup>(M) |FLOPs<br><sup>@640 (B) @@ -257,7 +262,6 @@ We love your input! We want to make contributing to YOLOv5 as easy and transpare <a href="https://github.com/ultralytics/yolov5/graphs/contributors"><img src="https://opencollective.com/ultralytics/contributors.svg?width=990" /></a> - ## <div align="center">Contact</div> For YOLOv5 bugs and feature requests please visit [GitHub Issues](https://github.com/ultralytics/yolov5/issues). For business inquiries or diff --git a/detect.py b/detect.py index e6e74ea7dfe..1393f79746f 100644 --- a/detect.py +++ b/detect.py @@ -2,14 +2,26 @@ """ Run inference on images, videos, directories, streams, etc. -Usage: - $ python path/to/detect.py --weights yolov5s.pt --source 0 # webcam - img.jpg # image - vid.mp4 # video - path/ # directory - path/*.jpg # glob +Usage - sources: + $ python path/to/detect.py --weights yolov5s.pt --source 0 # webcam + img.jpg # image + vid.mp4 # video + path/ # directory + path/*.jpg # glob 'https://youtu.be/Zgi9g1ksQHc' # YouTube 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream + +Usage - formats: + $ python path/to/detect.py --weights yolov5s.pt # PyTorch + yolov5s.torchscript # TorchScript + yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn + yolov5s.mlmodel # CoreML (under development) + yolov5s_openvino_model # OpenVINO (under development) + yolov5s_saved_model # TensorFlow SavedModel + yolov5s.pb # TensorFlow protobuf + yolov5s.tflite # TensorFlow Lite + yolov5s_edgetpu.tflite # TensorFlow Edge TPU + yolov5s.engine # TensorRT """ import argparse diff --git a/export.py b/export.py index a0758010e81..67e32305ded 100644 --- a/export.py +++ b/export.py @@ -2,18 +2,19 @@ """ Export a YOLOv5 PyTorch model to other formats. TensorFlow exports authored by https://github.com/zldrobit -Format | Example | `--include ...` argument ---- | --- | --- -PyTorch | yolov5s.pt | - -TorchScript | yolov5s.torchscript | `torchscript` -ONNX | yolov5s.onnx | `onnx` -CoreML | yolov5s.mlmodel | `coreml` -OpenVINO | yolov5s_openvino_model/ | `openvino` -TensorFlow SavedModel | yolov5s_saved_model/ | `saved_model` -TensorFlow GraphDef | yolov5s.pb | `pb` -TensorFlow Lite | yolov5s.tflite | `tflite` -TensorFlow.js | yolov5s_web_model/ | `tfjs` -TensorRT | yolov5s.engine | `engine` +Format | Example | `--include ...` argument +--- | --- | --- +PyTorch | yolov5s.pt | - +TorchScript | yolov5s.torchscript | `torchscript` +ONNX | yolov5s.onnx | `onnx` +CoreML | yolov5s.mlmodel | `coreml` +OpenVINO | yolov5s_openvino_model/ | `openvino` +TensorFlow SavedModel | yolov5s_saved_model/ | `saved_model` +TensorFlow GraphDef | yolov5s.pb | `pb` +TensorFlow Lite | yolov5s.tflite | `tflite` +TensorFlow Edge TPU | yolov5s_edgetpu.tflite | `edgetpu` +TensorFlow.js | yolov5s_web_model/ | `tfjs` +TensorRT | yolov5s.engine | `engine` Usage: $ python path/to/export.py --weights yolov5s.pt --include torchscript onnx coreml openvino saved_model tflite tfjs @@ -27,6 +28,7 @@ yolov5s_saved_model yolov5s.pb yolov5s.tflite + yolov5s_edgetpu.tflite yolov5s.engine TensorFlow.js: diff --git a/train.py b/train.py index 304c001b654..bd2fb5898cb 100644 --- a/train.py +++ b/train.py @@ -1,10 +1,17 @@ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license """ -Train a YOLOv5 model on a custom dataset +Train a YOLOv5 model on a custom dataset. + +Models and datasets download automatically from the latest YOLOv5 release. +Models: https://github.com/ultralytics/yolov5/tree/master/models +Datasets: https://github.com/ultralytics/yolov5/tree/master/data +Tutorial: https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data Usage: - $ python path/to/train.py --data coco128.yaml --weights yolov5s.pt --img 640 + $ python path/to/train.py --data coco128.yaml --weights yolov5s.pt --img 640 # from pretrained (RECOMMENDED) + $ python path/to/train.py --data coco128.yaml --weights '' --cfg yolov5s.yaml --img 640 # from scratch """ + import argparse import math import os diff --git a/val.py b/val.py index c1fcf61b468..f7c9ef5e60d 100644 --- a/val.py +++ b/val.py @@ -3,7 +3,19 @@ Validate a trained YOLOv5 model accuracy on a custom dataset Usage: - $ python path/to/val.py --data coco128.yaml --weights yolov5s.pt --img 640 + $ python path/to/val.py --weights yolov5s.pt --data coco128.yaml --img 640 + +Usage - formats: + $ python path/to/val.py --weights yolov5s.pt # PyTorch + yolov5s.torchscript # TorchScript + yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn + yolov5s.mlmodel # CoreML (under development) + yolov5s_openvino_model # OpenVINO (under development) + yolov5s_saved_model # TensorFlow SavedModel + yolov5s.pb # TensorFlow protobuf + yolov5s.tflite # TensorFlow Lite + yolov5s_edgetpu.tflite # TensorFlow Edge TPU + yolov5s.engine # TensorRT """ import argparse
General docstring improvements. ## 🛠️ PR Summary <sub>Made with ❤️ by [Ultralytics Actions](https://github.com/ultralytics/actions)<sub> ### 🌟 Summary Simplification and expansion of installation, inference, and training instructions; add model format options for detect.py and val.py. ### 📊 Key Changes - Streamlined install instructions with a more concise format. - Inference instructions now clarify that models automatically download from the latest release. - detect.py supports a wider range of model formats, such as TorchScript, ONNX, TensorFlow SavedModel, etc. - Training instructions mention YOLOv5 [AutoBatch](https://github.com/ultralytics/yolov5/pull/5092) feature and provide an updated table for batch sizes on V100-16GB GPUs. - Added model format usage examples for val.py, mirroring the additions to detect.py. ### 🎯 Purpose & Impact - Simplified instructions aim to improve the user experience in setting up and using YOLOv5. - Expanded model format support in detect.py and val.py allows for more versatility and ease of deployment across different platforms and devices. - These changes make YOLOv5 more accessible to a broader audience, facilitating the adoption and integration of the model into various projects and applications.
https://api.github.com/repos/ultralytics/yolov5/pulls/6151
2022-01-02T00:04:32Z
2022-01-03T00:09:46Z
2022-01-03T00:09:46Z
2024-01-19T13:45:04Z
3,287
ultralytics/yolov5
25,166
🌐 Add German translation for `docs/de/docs/advanced/response-cookies.md`
diff --git a/docs/de/docs/advanced/response-cookies.md b/docs/de/docs/advanced/response-cookies.md new file mode 100644 index 0000000000000..0f09bd4441197 --- /dev/null +++ b/docs/de/docs/advanced/response-cookies.md @@ -0,0 +1,49 @@ +# Response-Cookies + +## Einen `Response`-Parameter verwenden + +Sie können einen Parameter vom Typ `Response` in Ihrer *Pfadoperation-Funktion* deklarieren. + +Und dann können Sie Cookies in diesem *vorübergehenden* Response-Objekt setzen. + +```Python hl_lines="1 8-9" +{!../../../docs_src/response_cookies/tutorial002.py!} +``` + +Anschließend können Sie wie gewohnt jedes gewünschte Objekt zurückgeben (ein `dict`, ein Datenbankmodell, usw.). + +Und wenn Sie ein `response_model` deklariert haben, wird es weiterhin zum Filtern und Konvertieren des von Ihnen zurückgegebenen Objekts verwendet. + +**FastAPI** verwendet diese *vorübergehende* Response, um die Cookies (auch Header und Statuscode) zu extrahieren und fügt diese in die endgültige Response ein, die den von Ihnen zurückgegebenen Wert enthält, gefiltert nach einem beliebigen `response_model`. + +Sie können den `Response`-Parameter auch in Abhängigkeiten deklarieren und darin Cookies (und Header) setzen. + +## Eine `Response` direkt zurückgeben + +Sie können Cookies auch erstellen, wenn Sie eine `Response` direkt in Ihrem Code zurückgeben. + +Dazu können Sie eine Response erstellen, wie unter [Eine Response direkt zurückgeben](response-directly.md){.internal-link target=_blank} beschrieben. + +Setzen Sie dann Cookies darin und geben Sie sie dann zurück: + +```Python hl_lines="10-12" +{!../../../docs_src/response_cookies/tutorial001.py!} +``` + +!!! tip "Tipp" + Beachten Sie, dass, wenn Sie eine Response direkt zurückgeben, anstatt den `Response`-Parameter zu verwenden, FastAPI diese direkt zurückgibt. + + Sie müssen also sicherstellen, dass Ihre Daten vom richtigen Typ sind. Z. B. sollten diese mit JSON kompatibel sein, wenn Sie eine `JSONResponse` zurückgeben. + + Und auch, dass Sie keine Daten senden, die durch ein `response_model` hätten gefiltert werden sollen. + +### Mehr Informationen + +!!! note "Technische Details" + Sie können auch `from starlette.responses import Response` oder `from starlette.responses import JSONResponse` verwenden. + + **FastAPI** bietet dieselben `starlette.responses` auch via `fastapi.responses` an, als Annehmlichkeit für Sie, den Entwickler. Die meisten verfügbaren Responses kommen aber direkt von Starlette. + + Und da die `Response` häufig zum Setzen von Headern und Cookies verwendet wird, stellt **FastAPI** diese auch unter `fastapi.Response` bereit. + +Um alle verfügbaren Parameter und Optionen anzuzeigen, sehen Sie sich deren <a href="https://www.starlette.io/responses/#set-cookie" class="external-link" target="_blank">Dokumentation in Starlette</a> an.
← `advanced/additional-responses.md` (#10626) → `advanced/response-headers.md` (#10628) [German translation progress](https://github.com/tiangolo/fastapi/discussions/10582)
https://api.github.com/repos/tiangolo/fastapi/pulls/10627
2023-11-12T08:37:56Z
2024-03-30T20:19:18Z
2024-03-30T20:19:18Z
2024-04-01T01:22:00Z
753
tiangolo/fastapi
23,549
A few more README fixes
diff --git a/README.md b/README.md index 7535d6d5845..f44cc0fa89f 100644 --- a/README.md +++ b/README.md @@ -24,6 +24,9 @@ To use keras-core, you should also install the backend of choice: `tensorflow`, Note that `tensorflow` is required for using certain Keras 3 features: certain preprocessing layers as well as `tf.data` pipelines. +**Note:** If you are using the `keras-core` package you also need to switch your Keras import. +Use `import keras_core as keras`. This is a temporary step until the release of Keras 3 on PyPI. + ### Local installation Keras 3 is compatible with Linux and MacOS systems. To install a local development version: @@ -61,7 +64,7 @@ In Colab, you can do: import os os.environ["KERAS_BACKEND"] = "jax" -import keras as keras +import keras ``` **Note:** The backend must be configured before importing `keras`, and the backend cannot be changed after @@ -70,8 +73,8 @@ the package has been imported. ## Backwards compatibility Keras 3 is intended to work as a drop-in replacement for `tf.keras` (when using the TensorFlow backend). Just take your -existing `tf.keras` code, change the `keras` imports to `keras`, make sure that your calls to `model.save()` are using -the up-to-date `.keras` format, and you're done. +existing `tf.keras` code, make sure that your calls to `model.save()` are using the up-to-date `.keras` format, and you're +done. If your `tf.keras` model does not include custom components, you can start running it on top of JAX or PyTorch immediately.
https://api.github.com/repos/keras-team/keras/pulls/18517
2023-09-27T21:22:46Z
2023-09-27T22:00:07Z
2023-09-27T22:00:07Z
2023-09-27T22:00:11Z
409
keras-team/keras
47,874
DOC: Follows ISO 639-1 code
diff --git a/doc/cheatsheet/Pandas_Cheat_Sheet_JA.pdf b/doc/cheatsheet/Pandas_Cheat_Sheet_JA.pdf new file mode 100644 index 0000000000000..daa65a944e68a Binary files /dev/null and b/doc/cheatsheet/Pandas_Cheat_Sheet_JA.pdf differ diff --git a/doc/cheatsheet/Pandas_Cheat_Sheet_JA.pptx b/doc/cheatsheet/Pandas_Cheat_Sheet_JA.pptx new file mode 100644 index 0000000000000..6270a71e20ee8 Binary files /dev/null and b/doc/cheatsheet/Pandas_Cheat_Sheet_JA.pptx differ diff --git a/doc/cheatsheet/Pandas_Cheat_Sheet_JP.pdf b/doc/cheatsheet/Pandas_Cheat_Sheet_JP.pdf deleted file mode 100644 index 746d1b6c980fe..0000000000000 Binary files a/doc/cheatsheet/Pandas_Cheat_Sheet_JP.pdf and /dev/null differ diff --git a/doc/cheatsheet/Pandas_Cheat_Sheet_JP.pptx b/doc/cheatsheet/Pandas_Cheat_Sheet_JP.pptx deleted file mode 100644 index f8b98a6f1f8e4..0000000000000 Binary files a/doc/cheatsheet/Pandas_Cheat_Sheet_JP.pptx and /dev/null differ
## Changes - change suffix `_JP` to `_JA` according to `ISO 639` - fixed typo in `Pandas_Cheat_Sheet_JA.pdf` - translated `Pandas_Cheat_Sheet_JA.pptx` in Japanese
https://api.github.com/repos/pandas-dev/pandas/pulls/22657
2018-09-10T08:51:47Z
2018-09-30T21:27:18Z
2018-09-30T21:27:18Z
2018-09-30T21:27:18Z
340
pandas-dev/pandas
45,719
minor changes to cartpole+acrobot, upgrade to v1
diff --git a/gym/envs/__init__.py b/gym/envs/__init__.py index a0edcec270e..31f8ff2554f 100644 --- a/gym/envs/__init__.py +++ b/gym/envs/__init__.py @@ -57,6 +57,13 @@ reward_threshold=195.0, ) +register( + id='CartPole-v1', + entry_point='gym.envs.classic_control:CartPoleEnv', + timestep_limit=500, + reward_threshold=475.0, +) + register( id='MountainCar-v0', entry_point='gym.envs.classic_control:MountainCarEnv', @@ -71,10 +78,9 @@ ) register( - id='Acrobot-v0', + id='Acrobot-v1', entry_point='gym.envs.classic_control:AcrobotEnv', - timestep_limit=200, - reward_threshold=-100 + timestep_limit=500, ) # Box2d diff --git a/gym/envs/classic_control/acrobot.py b/gym/envs/classic_control/acrobot.py index 55cae8cf343..ecc2ab8fe2b 100644 --- a/gym/envs/classic_control/acrobot.py +++ b/gym/envs/classic_control/acrobot.py @@ -2,6 +2,7 @@ from gym import core, spaces from gym.utils import seeding import numpy as np +from numpy import sin, cos, pi import time __copyright__ = "Copyright 2013, RLPy http://acl.mit.edu/RLPy" @@ -80,7 +81,7 @@ class AcrobotEnv(core.Env): def __init__(self): self.viewer = None - high = np.array([np.pi, np.pi, self.MAX_VEL_1, self.MAX_VEL_2]) + high = np.array([1.0, 1.0, 1.0, 1.0, self.MAX_VEL_1, self.MAX_VEL_2]) low = -high self.observation_space = spaces.Box(low, high) self.action_space = spaces.Discrete(3) @@ -92,7 +93,7 @@ def _seed(self, seed=None): def _reset(self): self.state = self.np_random.uniform(low=-0.1, high=0.1, size=(4,)) - return self.state + return self._get_ob() def _step(self, a): s = self.state @@ -115,14 +116,18 @@ def _step(self, a): # self.s_continuous = ns_continuous[-1] # We only care about the state # at the ''final timestep'', self.dt - ns[0] = wrap(ns[0], -np.pi, np.pi) - ns[1] = wrap(ns[1], -np.pi, np.pi) + ns[0] = wrap(ns[0], -pi, pi) + ns[1] = wrap(ns[1], -pi, pi) ns[2] = bound(ns[2], -self.MAX_VEL_1, self.MAX_VEL_1) ns[3] = bound(ns[3], -self.MAX_VEL_2, self.MAX_VEL_2) - self.state = ns.copy() + self.state = ns terminal = self._terminal() reward = -1. if not terminal else 0. - return (np.array(self.state), reward, terminal, {}) + return (self._get_ob(), reward, terminal, {}) + + def _get_ob(self): + s = self.state + return np.array([cos(s[0]), np.sin(s[0]), cos(s[1]), sin(s[1]), s[2], s[3]]) def _terminal(self): s = self.state
cartpole: bump timestep limit to 500, so you need to have a decent balancing policy to solve it acrobot: change observation to use cos/sin of angles, and increase timestep limit so acrobot gets solved by random exploration
https://api.github.com/repos/openai/gym/pulls/260
2016-07-23T21:40:57Z
2016-08-06T07:15:50Z
2016-08-06T07:15:50Z
2017-01-04T17:57:43Z
874
openai/gym
5,823
Fix placeholder value in FastGen Blog
diff --git a/blogs/deepspeed-fastgen/2024-01-19/README.md b/blogs/deepspeed-fastgen/2024-01-19/README.md index b734b3f50dea..a98c9856164c 100644 --- a/blogs/deepspeed-fastgen/2024-01-19/README.md +++ b/blogs/deepspeed-fastgen/2024-01-19/README.md @@ -25,7 +25,7 @@ Today, we are happy to share that we are improving DeepSpeed-FastGen along three areas: i) three new model families, ii) performance optimizations, and iii) feature enhancements: - **New Model Families** - We introduce support for Mixtral (MoE), Falcon, and Phi-2 model families in DeepSpeed-FastGen. Our inference optimizations for these models provide up to a.bX improvement in latency and a.bX improvement in effective throughput over other state-of-the-art frameworks like vLLM. + We introduce support for Mixtral (MoE), Falcon, and Phi-2 model families in DeepSpeed-FastGen. Our inference optimizations for these models provide up to 2.5X improvement in effective throughput over other state-of-the-art frameworks like vLLM. - **Performance Optimizations**
https://api.github.com/repos/microsoft/DeepSpeed/pulls/5000
2024-01-23T17:55:36Z
2024-01-23T17:59:37Z
2024-01-23T17:59:37Z
2024-01-23T17:59:40Z
285
microsoft/DeepSpeed
10,411
Fix typo in flask_container_ci/README.md
diff --git a/exercises/flask_container_ci/README.md b/exercises/flask_container_ci/README.md index 35c85eea3..e4ac21db1 100644 --- a/exercises/flask_container_ci/README.md +++ b/exercises/flask_container_ci/README.md @@ -13,8 +13,8 @@ If any of the following steps is not working, it is expected from you to fix the 1. Move to `challenges/flask_container_ci` directory, if you are not already there 1. Run `export FLASK_APP=app/main.py` -1. To run the app execute `flask run`. If it doesn't works, fix it -3. Access `http://127.0.0.1:5000`. You should see the following +1. To run the app execute `flask run`. If it doesn't work, fix it +3. Access `http://127.0.0.1:5000`. You should see the following: ``` { @@ -31,7 +31,7 @@ If any of the following steps is not working, it is expected from you to fix the * /users - all users data * /users/<username> - data on the specific chosen user -5. When accessing /users, the data returned should not include the id of the user, only its name and description. Also, the data should be ordered by users names. +5. When accessing /users, the data returned should not include the id of the user, only its name and description. Also, the data should be ordered by usernames. ## Containers
https://api.github.com/repos/bregman-arie/devops-exercises/pulls/123
2020-12-18T00:20:33Z
2020-12-18T11:29:47Z
2020-12-18T11:29:47Z
2020-12-18T11:29:47Z
355
bregman-arie/devops-exercises
17,668
handling transcribe exceptions.
diff --git a/whisper/transcribe.py b/whisper/transcribe.py index 509e322e..d5b3d433 100644 --- a/whisper/transcribe.py +++ b/whisper/transcribe.py @@ -1,5 +1,6 @@ import argparse import os +import traceback import warnings from typing import TYPE_CHECKING, Optional, Tuple, Union @@ -468,8 +469,12 @@ def valid_model_name(name): warnings.warn("--max_words_per_line has no effect with --max_line_width") writer_args = {arg: args.pop(arg) for arg in word_options} for audio_path in args.pop("audio"): - result = transcribe(model, audio_path, temperature=temperature, **args) - writer(result, audio_path, **writer_args) + try: + result = transcribe(model, audio_path, temperature=temperature, **args) + writer(result, audio_path, **writer_args) + except Exception as e: + traceback.print_exc() + print(f"Skipping {audio_path} due to {type(e).__name__}: {str(e)}") if __name__ == "__main__":
When processing multiple files through CLI, whisper dies on the first transcribe failure. This change allows the processing to keep going and displays which files failed processing.
https://api.github.com/repos/openai/whisper/pulls/1682
2023-09-28T17:01:45Z
2023-11-06T10:06:20Z
2023-11-06T10:06:20Z
2023-11-06T10:06:20Z
260
openai/whisper
45,788
fix(webhooks): Include event ID (autoincrementing) in request body
diff --git a/src/sentry/features/__init__.py b/src/sentry/features/__init__.py index 9166d44d5fe9a..989a9a75cf38b 100644 --- a/src/sentry/features/__init__.py +++ b/src/sentry/features/__init__.py @@ -76,6 +76,7 @@ default_manager.add('organizations:gitlab-integration', OrganizationFeature) # NOQA default_manager.add('organizations:jira-server-integration', OrganizationFeature) # NOQA default_manager.add('organizations:large-debug-files', OrganizationFeature) # NOQA +default_manager.add('organizations:legacy-event-id', OrganizationFeature) # NOQA # Project scoped features default_manager.add('projects:similarity-view', ProjectFeature) # NOQA diff --git a/src/sentry/plugins/sentry_webhooks/plugin.py b/src/sentry/plugins/sentry_webhooks/plugin.py index 4a12a90fd6a3e..2be1793bfd467 100644 --- a/src/sentry/plugins/sentry_webhooks/plugin.py +++ b/src/sentry/plugins/sentry_webhooks/plugin.py @@ -8,7 +8,9 @@ from django.conf import settings from django.utils.translation import ugettext_lazy as _ +from sentry import features from sentry.exceptions import PluginError +from sentry.models import Event from sentry.plugins.bases import notify from sentry.http import is_valid_url, safe_urlopen from sentry.utils.safe import safe_execute @@ -90,7 +92,11 @@ def get_group_data(self, group, event, triggering_rules): data['event'] = dict(event.data or {}) data['event']['tags'] = event.get_tags() data['event']['event_id'] = event.event_id - data['event']['id'] = event.id + if features.has('organizations:legacy-event-id', group.project.organization): + data['event']['id'] = Event.objects.filter( + project_id=event.project_id, + event_id=event.event_id, + ).values_list('id', flat=True).get() return data def get_webhook_urls(self, project): diff --git a/tests/sentry/plugins/sentry_webhooks/test_plugin.py b/tests/sentry/plugins/sentry_webhooks/test_plugin.py index ce14ac06a68c0..3705d36bd8199 100644 --- a/tests/sentry/plugins/sentry_webhooks/test_plugin.py +++ b/tests/sentry/plugins/sentry_webhooks/test_plugin.py @@ -31,7 +31,8 @@ def test_simple_notification(self): notification = Notification(event=event, rule=rule) self.project.update_option('webhooks:urls', 'http://example.com') - self.plugin.notify(notification) + with self.feature('organizations:legacy-event-id'): + self.plugin.notify(notification) assert len(responses.calls) == 1
See ISSUE-237 for a lot more context. From that ticket: > This was "broken" as a result of post-processing changes that switched to using a Kafka consumer of the event stream as the mechanism for events being dispatched.
https://api.github.com/repos/getsentry/sentry/pulls/11086
2018-12-18T22:00:33Z
2018-12-18T23:35:18Z
2018-12-18T23:35:18Z
2020-12-21T00:02:32Z
636
getsentry/sentry
43,919
Missing Support for TLSv1.3 - Disabled hostname and certificate validation
diff --git a/lib/request/httpshandler.py b/lib/request/httpshandler.py index 05e1ccf8be8..03c4079dc48 100644 --- a/lib/request/httpshandler.py +++ b/lib/request/httpshandler.py @@ -69,6 +69,11 @@ def create_sock(): sock = create_sock() if protocol not in _contexts: _contexts[protocol] = ssl.SSLContext(protocol) + + # Disable certificate and hostname validation enabled by default with PROTOCOL_TLS_CLIENT + _contexts[protocol].check_hostname = False + _contexts[protocol].verify_mode = ssl.CERT_NONE + if getattr(self, "cert_file", None) and getattr(self, "key_file", None): _contexts[protocol].load_cert_chain(certfile=self.cert_file, keyfile=self.key_file) try:
Introduction of the `PROTOCOL_TLS_CLIENT` constant in Issue #5392 enables `ssl.CERT_REQUIRED` and `SSLContext.check_hostname`, which will restrict sqlmap from scanning endpoints that present unknown, self-signed, expired, or otherwise invalid certificates - including invalid Subject Names and Subject Alternative Names. To address this issue, I created a patch that will allow sqlmap to accept these certificates.
https://api.github.com/repos/sqlmapproject/sqlmap/pulls/5395
2023-04-17T22:19:38Z
2023-04-24T13:06:57Z
2023-04-24T13:06:57Z
2023-04-24T13:06:58Z
190
sqlmapproject/sqlmap
14,998
Add training scripts for Baichuan & Clean BOS/EOS tokens during data cleaning
diff --git a/fastchat/data/optional_replace.py b/fastchat/data/optional_replace.py new file mode 100644 index 0000000000..1114151a9b --- /dev/null +++ b/fastchat/data/optional_replace.py @@ -0,0 +1,82 @@ +""" +Do optional replace of bos/eos/pad/unk. + +Usage: +python3 -m fastchat.data.optional_replace --in input.json --out output.json --model-name-or-path <your_token_path> + +Requirement: +pip3 install transformers tqdm +""" +import argparse +import json +import traceback + +import transformers +from tqdm import tqdm + + +def replace_special_tokens( + tokenizer: transformers.PreTrainedTokenizer, text: str +) -> str: + if not text: + return text + + def _insert_vline(token: str) -> str: + if len(token) < 2: + return " " + elif len(token) == 2: + return f"{token[0]}|{token[1]}" + else: + return f"{token[:1]}|{token[1:-1]}|{token[-1:]}" + + if tokenizer.bos_token: + text = text.replace(tokenizer.bos_token, _insert_vline(tokenizer.bos_token)) + if tokenizer.eos_token: + text = text.replace(tokenizer.eos_token, _insert_vline(tokenizer.eos_token)) + if tokenizer.pad_token: + text = text.replace(tokenizer.pad_token, _insert_vline(tokenizer.pad_token)) + if tokenizer.unk_token: + text = text.replace(tokenizer.unk_token, _insert_vline(tokenizer.unk_token)) + return text + + +def replace(conv, tokenizer): + # Replace bos/eos/pad/unk tokens + if tokenizer: + try: + for sentence in conv["conversations"]: + sentence["value"] = replace_special_tokens(tokenizer, sentence["value"]) + except Exception as e: + traceback.print_exc() + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--in-file", type=str, required=True) + parser.add_argument("--out-file", type=str) + parser.add_argument( + "--model-name-or-path", + type=str, + help="The directory or address where the model token is stored.", + ) + args = parser.parse_args() + + in_file = args.in_file + out_file = args.out_file + tokenizer = None + if args.model_name_or_path: + tokenizer = transformers.AutoTokenizer.from_pretrained( + args.model_name_or_path, + trust_remote_code=True, + use_fast=False, + ) + + if out_file is None: + out_file = f"{in_file}_replace.json" + + content = json.load(open(in_file, "r")) + + for conv in tqdm(content): + replace(conv, tokenizer) + + json.dump(content, open(out_file, "w"), indent=2, ensure_ascii=False) diff --git a/fastchat/data/prepare_all.py b/fastchat/data/prepare_all.py index 071e4e20a7..ed013bcac0 100644 --- a/fastchat/data/prepare_all.py +++ b/fastchat/data/prepare_all.py @@ -14,7 +14,8 @@ def run_cmd(cmd): cmd_list = [ f"python3 -m fastchat.data.clean_sharegpt --in {prefix}_html.json --out {prefix}_clean.json", f"python3 -m fastchat.data.optional_clean --in {prefix}_clean.json --out {prefix}_clean_lang.json --skip-lang ko", - f"python3 -m fastchat.data.split_long_conversation --in {prefix}_clean_lang.json --out {prefix}_clean_lang_split.json --model-name {llama_weights}", + f"python3 -m fastchat.data.optional_replace --in {prefix}_clean_lang.json --out {prefix}_clean_lang_replace.json --model-name-or-path {llama_weights}", + f"python3 -m fastchat.data.split_long_conversation --in {prefix}_clean_lang_replace.json --out {prefix}_clean_lang_split.json --model-name {llama_weights}", f"python3 -m fastchat.data.filter_wrong_format --in {prefix}_clean_lang_split.json --out {prefix}_clean_lang_split.json", f"python3 -m fastchat.data.split_train_test --in {prefix}_clean_lang_split.json --ratio 0.99", f"python3 -m fastchat.data.hardcoded_questions", diff --git a/fastchat/train/train_baichuan.py b/fastchat/train/train_baichuan.py new file mode 100644 index 0000000000..0eddd9512b --- /dev/null +++ b/fastchat/train/train_baichuan.py @@ -0,0 +1,327 @@ +# This code is based on tatsu-lab/stanford_alpaca. Below is the original copyright: +# +# Copyright 2023 Rohan Taori, Ishaan Gulrajani, Tianyi Zhang, Yann Dubois, Xuechen Li +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dataclasses import dataclass, field +import json +import jsonlines +import pathlib +from multiprocessing import Pool +from typing import Dict, Optional, Sequence + +import numpy as np +import torch +from torch.utils.data import Dataset +import transformers +from transformers import Trainer +from transformers.trainer_pt_utils import LabelSmoother + +from fastchat.conversation import SeparatorStyle +from fastchat.model.model_adapter import get_conversation_template + +IGNORE_TOKEN_ID = LabelSmoother.ignore_index + + +@dataclass +class ModelArguments: + model_name_or_path: Optional[str] = field(default="facebook/opt-125m") + + +@dataclass +class DataArguments: + data_path: str = field( + default=None, metadata={"help": "Path to the training data."} + ) + lazy_preprocess: bool = False + + +@dataclass +class TrainingArguments(transformers.TrainingArguments): + cache_dir: Optional[str] = field(default=None) + optim: str = field(default="adamw_torch") + model_max_length: int = field( + default=512, + metadata={ + "help": "Maximum sequence length. Sequences will be right padded (and possibly truncated)." + }, + ) + + +local_rank = None + + +def rank0_print(*args): + if local_rank == 0: + print(*args) + + +def safe_save_model_for_hf_trainer(trainer: transformers.Trainer, output_dir: str): + """Collects the state dict and dump to disk.""" + state_dict = trainer.model.state_dict() + if trainer.args.should_save: + cpu_state_dict = {key: value.cpu() for key, value in state_dict.items()} + del state_dict + trainer._save(output_dir, state_dict=cpu_state_dict) # noqa + + +def apply_prompt_template(sources, systems=None): + conv = get_conversation_template("vicuna") + roles = {"human": conv.roles[0], "gpt": conv.roles[1]} + conversations = [] + for i, source in enumerate(sources): + if roles[source[0]["from"]] != conv.roles[0]: + source = source[1:] + + conv.messages = [] + for j, sentence in enumerate(source): + role = roles[sentence["from"]] + assert role == conv.roles[j % 2], f"{i}" + conv.append_message(role, sentence["value"]) + if systems and systems[i]: + conv.system = systems[i] + prompt = conv.get_prompt() + conversations.append(prompt) + return conversations, conv + + +def tokenize_conversations(conversations, tokenizer): + input_ids = tokenizer( + conversations, + return_tensors="pt", + padding="max_length", + max_length=tokenizer.model_max_length, + truncation=True, + ).input_ids + targets = input_ids.clone() + return input_ids, targets + + +def mask_targets(conversations, targets, tokenizer, conv): + sep = conv.sep + conv.roles[1] + ": " + for conversation, target in zip(conversations, targets): + total_len = int(target.ne(tokenizer.pad_token_id).sum()) + + turns = conversation.split(conv.sep2) + cur_len = 0 + target[:cur_len] = IGNORE_TOKEN_ID + for i, turn in enumerate(turns): + if turn == "": + break + turn_len = len(tokenizer(turn + conv.sep2).input_ids) + + parts = turn.split(sep) + if len(parts) != 2: + break + parts[0] += sep + instruction_len = len(tokenizer(parts[0]).input_ids) - 1 + + target[cur_len : cur_len + instruction_len] = IGNORE_TOKEN_ID + cur_len += turn_len + + target[cur_len:] = IGNORE_TOKEN_ID + + if False: # Inspect and check the correctness of masking + z = target.clone() + z = torch.where(z == IGNORE_TOKEN_ID, tokenizer.unk_token_id, z) + rank0_print(tokenizer.decode(z)) + + if cur_len < tokenizer.model_max_length: + if cur_len != total_len: + target[:] = IGNORE_TOKEN_ID + rank0_print( + f"WARNING: tokenization mismatch: {cur_len} vs. {total_len}." + f" (ignored)" + ) + return targets + + +def preprocess(sources, tokenizer: transformers.PreTrainedTokenizer, **kwargs) -> Dict: + systems = None if not kwargs else kwargs.get("systems", None) + + # If the data volume is small, process it directly in the main thread + if len(sources) <= 1000: + conversations, conv = apply_prompt_template(sources, systems) + input_ids, targets = tokenize_conversations(conversations, tokenizer) + targets = mask_targets(conversations, targets, tokenizer, conv) + else: # If the data volume is large, use multithreading for processing + with Pool() as p: + conversations, conv = p.apply_async( + apply_prompt_template, (sources, tokenizer, systems) + ).get() + input_ids, targets = p.apply_async( + tokenize_conversations, (conversations, tokenizer) + ).get() + targets = p.apply_async( + mask_targets, (conversations, targets, tokenizer, conv) + ).get() + p.close() + p.join() + + return dict( + input_ids=input_ids, + labels=targets, + attention_mask=input_ids.ne(tokenizer.pad_token_id), + ) + + +class SupervisedDataset(Dataset): + """Dataset for supervised fine-tuning.""" + + def __init__(self, raw_data, tokenizer: transformers.PreTrainedTokenizer): + super(SupervisedDataset, self).__init__() + + rank0_print("Formatting inputs...") + systems = [example.get("system", "") for example in raw_data] + sources = [example["conversations"] for example in raw_data] + + data_dict = preprocess(sources, tokenizer, systems=systems) + + self.input_ids = data_dict["input_ids"] + self.labels = data_dict["labels"] + self.attention_mask = data_dict["attention_mask"] + + def __len__(self): + return len(self.input_ids) + + def __getitem__(self, i) -> Dict[str, torch.Tensor]: + return dict( + input_ids=self.input_ids[i], + labels=self.labels[i], + attention_mask=self.attention_mask[i], + ) + + +class LazySupervisedDataset(Dataset): + """Dataset for supervised fine-tuning.""" + + def __init__(self, raw_data, tokenizer: transformers.PreTrainedTokenizer): + super(LazySupervisedDataset, self).__init__() + self.tokenizer = tokenizer + + rank0_print("Formatting inputs...Skip in lazy mode") + self.raw_data = raw_data + self.cached_data_dict = {} + + def __len__(self): + return len(self.raw_data) + + def __getitem__(self, i) -> Dict[str, torch.Tensor]: + if i in self.cached_data_dict: + return self.cached_data_dict[i] + + ret = preprocess( + [self.raw_data[i]["conversations"]], + self.tokenizer, + systems=[self.raw_data[i].get("system", "")], + ) + ret = dict( + input_ids=ret["input_ids"][0], + labels=ret["labels"][0], + attention_mask=ret["attention_mask"][0], + ) + self.cached_data_dict[i] = ret + + return ret + + +def make_supervised_data_module( + tokenizer: transformers.PreTrainedTokenizer, data_args, train_ratio=0.98 +) -> Dict: + """Make dataset and collator for supervised fine-tuning.""" + train_ratio = min(train_ratio, 1.0) + dataset_cls = ( + LazySupervisedDataset if data_args.lazy_preprocess else SupervisedDataset + ) + rank0_print("Loading data...") + data_path = data_args.data_path + if data_path.endswith(".json"): + raw_data = json.load(open(data_path, "r")) + elif data_path.endswith(".jsonl"): + with jsonlines.open(data_path, mode="r") as reader: + raw_data = [item for item in reader] + + # Split train/test + np.random.seed(0) + perm = np.random.permutation(len(raw_data)) + split = int(len(perm) * train_ratio) + train_indices = perm[:split] + if train_ratio < 1: + eval_indices = perm[split:] + else: + # if train_ratio==1, we use 5% of data as eval data, make sure trainer will not throw error when eval data is empty + eval_indices = perm[-int(len(perm) * 0.05) :] + train_raw_data = [raw_data[i] for i in train_indices] + eval_raw_data = [raw_data[i] for i in eval_indices] + rank0_print(f"#train {len(train_raw_data)}, #eval {len(eval_raw_data)}") + + train_dataset = dataset_cls(train_raw_data, tokenizer=tokenizer) + eval_dataset = dataset_cls(eval_raw_data, tokenizer=tokenizer) + return dict(train_dataset=train_dataset, eval_dataset=eval_dataset) + + +def train(): + global local_rank + + parser = transformers.HfArgumentParser( + (ModelArguments, DataArguments, TrainingArguments) + ) + model_args, data_args, training_args = parser.parse_args_into_dataclasses() + local_rank = training_args.local_rank + config = transformers.AutoConfig.from_pretrained( + model_args.model_name_or_path, + trust_remote_code=True, + cache_dir=training_args.cache_dir, + ) + model = transformers.AutoModelForCausalLM.from_pretrained( + model_args.model_name_or_path, + config=config, + trust_remote_code=True, + cache_dir=training_args.cache_dir, + ) + model.config.use_cache = False + # Tie the weights + model.tie_weights() + + tokenizer = transformers.AutoTokenizer.from_pretrained( + model_args.model_name_or_path, + config=config, + trust_remote_code=True, + cache_dir=training_args.cache_dir, + model_max_length=training_args.model_max_length, + padding_side="right", + use_fast=False, + ) + # NOTE: if the token_id exceed the vocab_size will cause failing in training process! we need add special config and resize the embedding size! + tokenizer.pad_token = tokenizer.unk_token + print(f"tokens len: {len(tokenizer)}") + model.resize_token_embeddings(len(tokenizer)) + + data_module = make_supervised_data_module( + tokenizer=tokenizer, train_ratio=0.98, data_args=data_args + ) + trainer = Trainer( + model=model, tokenizer=tokenizer, args=training_args, **data_module + ) + + if list(pathlib.Path(training_args.output_dir).glob("checkpoint-*")): + trainer.train(resume_from_checkpoint=True) + else: + trainer.train() + trainer.save_state() + safe_save_model_for_hf_trainer(trainer=trainer, output_dir=training_args.output_dir) + + +if __name__ == "__main__": + train()
<!-- Thank you for your contribution! --> <!-- Please add a reviewer to the assignee section when you create a PR. If you don't have the access to it, we will shortly find a reviewer and assign them to your PR. --> ## Why are these changes needed? 1. Compared to llama, there are certain differences in the word segmentation methods of Baichuan, which makes the original logic less suitable. Therefore, separate a training task script; 2. Add the replacement logic of bos/eos/pad/unk token in the data sample preprocessing stage to avoid interference during intermediate processing <!-- Please give a short summary of the change and the problem this solves. --> ## Related issue number (if applicable) #1894 <!-- For example: "Closes #1234" --> ## Checks - [x] I've run `format.sh` to lint the changes in this PR. - [ ] I've included any doc changes needed. - [ ] I've made sure the relevant tests are passing (if applicable).
https://api.github.com/repos/lm-sys/FastChat/pulls/1940
2023-07-13T08:52:50Z
2023-07-16T21:02:17Z
2023-07-16T21:02:17Z
2023-07-16T21:09:18Z
3,907
lm-sys/FastChat
41,365
Revert using own cast app for media
diff --git a/homeassistant/components/cast/media_player.py b/homeassistant/components/cast/media_player.py index f62a73860f39dd..788da18e8bd059 100644 --- a/homeassistant/components/cast/media_player.py +++ b/homeassistant/components/cast/media_player.py @@ -39,7 +39,6 @@ SUPPORT_VOLUME_SET, ) from homeassistant.const import ( - CAST_APP_ID_HOMEASSISTANT, CONF_HOST, EVENT_HOMEASSISTANT_STOP, STATE_IDLE, @@ -292,7 +291,6 @@ async def async_set_cast_info(self, cast_info): ), ChromeCastZeroconf.get_zeroconf(), ) - chromecast.media_controller.app_id = CAST_APP_ID_HOMEASSISTANT self._chromecast = chromecast if CAST_MULTIZONE_MANAGER_KEY not in self.hass.data:
## Proposed change <!-- Describe the big picture of your changes here to communicate to the maintainers why we should accept this pull request. If it fixes a bug or resolves a feature request, be sure to link to that issue in the additional information section. --> We are reverting this for now so we can split the Lovelace and media app. This will make it easier to make a good user experience. ## Type of change <!-- What type of change does your PR introduce to Home Assistant? NOTE: Please, check only 1! box! If your PR requires multiple boxes to be checked, you'll most likely need to split it into multiple PRs. This makes things easier and faster to code review. --> - [ ] Dependency upgrade - [X] Bugfix (non-breaking change which fixes an issue) - [ ] New integration (thank you!) - [ ] New feature (which adds functionality to an existing integration) - [ ] Breaking change (fix/feature causing existing functionality to break) - [ ] Code quality improvements to existing code or addition of tests ## Example entry for `configuration.yaml`: <!-- Supplying a configuration snippet, makes it easier for a maintainer to test your PR. Furthermore, for new integrations, it gives an impression of how the configuration would look like. Note: Remove this section if this PR does not have an example entry. --> ```yaml # Example configuration.yaml ``` ## Additional information <!-- Details are important, and help maintainers processing your PR. Please be sure to fill out additional details, if applicable. --> - This PR fixes or closes issue: fixes # - This PR is related to issue: - Link to documentation pull request: ## Checklist <!-- Put an `x` in the boxes that apply. You can also fill these out after creating the PR. If you're unsure about any of them, don't hesitate to ask. We're here to help! This is simply a reminder of what we are going to look for before merging your code. --> - [ ] The code change is tested and works locally. - [ ] Local tests pass. **Your PR cannot be merged unless tests pass** - [ ] There is no commented out code in this PR. - [ ] I have followed the [development checklist][dev-checklist] - [ ] The code has been formatted using Black (`black --fast homeassistant tests`) - [ ] Tests have been added to verify that the new code works. If user exposed functionality or configuration variables are added/changed: - [ ] Documentation added/updated for [www.home-assistant.io][docs-repository] If the code communicates with devices, web services, or third-party tools: - [ ] The [manifest file][manifest-docs] has all fields filled out correctly. Updated and included derived files by running: `python3 -m script.hassfest`. - [ ] New or updated dependencies have been added to `requirements_all.txt`. Updated by running `python3 -m script.gen_requirements_all`. - [ ] Untested files have been added to `.coveragerc`. The integration reached or maintains the following [Integration Quality Scale][quality-scale]: <!-- The Integration Quality Scale scores an integration on the code quality and user experience. Each level of the quality scale consists of a list of requirements. We highly recommend getting your integration scored! --> - [ ] No score or internal - [ ] 🥈 Silver - [ ] 🥇 Gold - [ ] 🏆 Platinum <!-- This project is very active and we have a high turnover of pull requests. Unfortunately, the number of incoming pull requests is higher than what our reviewers can review and merge so there is a long backlog of pull requests waiting for review. You can help here! By reviewing another pull request, you will help raise the code quality of that pull request and the final review will be faster. This way the general pace of pull request reviews will go up and your wait time will go down. When picking a pull request to review, try to choose one that hasn't yet been reviewed. Thanks for helping out! --> To help with the load of incoming pull requests: - [ ] I have reviewed two other [open pull requests][prs] in this repository. [prs]: https://github.com/home-assistant/core/pulls?q=is%3Aopen+is%3Apr+-author%3A%40me+-draft%3Atrue+-label%3Awaiting-for-upstream+sort%3Acreated-asc+-review%3Aapproved <!-- Thank you for contributing <3 Below, some useful links you could explore: --> [dev-checklist]: https://developers.home-assistant.io/docs/en/development_checklist.html [manifest-docs]: https://developers.home-assistant.io/docs/en/creating_integration_manifest.html [quality-scale]: https://developers.home-assistant.io/docs/en/next/integration_quality_scale_index.html [docs-repository]: https://github.com/home-assistant/home-assistant.io
https://api.github.com/repos/home-assistant/core/pulls/40937
2020-10-01T09:04:24Z
2020-10-01T09:05:01Z
2020-10-01T09:05:01Z
2020-10-01T19:58:20Z
189
home-assistant/core
39,446
Fix UPDATING.md
diff --git a/UPDATING.md b/UPDATING.md index e47917e002bb2..2ffc8348bfa9c 100644 --- a/UPDATING.md +++ b/UPDATING.md @@ -1710,9 +1710,9 @@ https://cloud.google.com/compute/docs/disks/performance Hence, the default value for `master_disk_size` in `DataprocCreateClusterOperator` has been changed from 500GB to 1TB. -#### `<airflow class="providers google c"></airflow>loud.operators.bigquery.BigQueryGetDatasetTablesOperator` +#### `airflow.providers.google.cloud.operators.bigquery.BigQueryGetDatasetTablesOperator` -We changed signature of BigQueryGetDatasetTablesOperator. +We changed signature of `BigQueryGetDatasetTablesOperator`. Before:
**Before**: ![image](https://user-images.githubusercontent.com/8811558/125213840-eee1ea00-e2ab-11eb-9fb8-86ecbea6de42.png) **After**: ![image](https://user-images.githubusercontent.com/8811558/125213858-0620d780-e2ac-11eb-8443-b48f88e69ea0.png) <!-- Thank you for contributing! Please make sure that your code changes are covered with tests. And in case of new features or big changes remember to adjust the documentation. Feel free to ping committers for the review! In case of existing issue, reference it using one of the following: closes: #ISSUE related: #ISSUE How to write a good git commit message: http://chris.beams.io/posts/git-commit/ --> --- **^ Add meaningful description above** Read the **[Pull Request Guidelines](https://github.com/apache/airflow/blob/main/CONTRIBUTING.rst#pull-request-guidelines)** for more information. In case of fundamental code change, Airflow Improvement Proposal ([AIP](https://cwiki.apache.org/confluence/display/AIRFLOW/Airflow+Improvements+Proposals)) is needed. In case of a new dependency, check compliance with the [ASF 3rd Party License Policy](https://www.apache.org/legal/resolved.html#category-x). In case of backwards incompatible changes please leave a note in [UPDATING.md](https://github.com/apache/airflow/blob/main/UPDATING.md).
https://api.github.com/repos/apache/airflow/pulls/16933
2021-07-11T23:56:49Z
2021-07-12T00:21:01Z
2021-07-12T00:21:01Z
2021-07-12T21:22:57Z
187
apache/airflow
14,633
Expose font_aspect_ratio as parameter on SVG export
diff --git a/CHANGELOG.md b/CHANGELOG.md index d6eebd4fd..9a0d43253 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,6 +14,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Add support for `FORCE_COLOR` env var https://github.com/Textualize/rich/pull/2449 - Allow a `max_depth` argument to be passed to the `install()` hook https://github.com/Textualize/rich/issues/2486 - Document using `None` as name in `__rich_repr__` for tuple positional args https://github.com/Textualize/rich/pull/2379 +- Add `font_aspect_ratio` parameter in SVG export https://github.com/Textualize/rich/pull/2539/files ### Fixed diff --git a/rich/console.py b/rich/console.py index 8676e40b2..974a4e310 100644 --- a/rich/console.py +++ b/rich/console.py @@ -2261,18 +2261,22 @@ def export_svg( theme: Optional[TerminalTheme] = None, clear: bool = True, code_format: str = CONSOLE_SVG_FORMAT, + font_aspect_ratio: float = 0.61, ) -> str: """ Generate an SVG from the console contents (requires record=True in Console constructor). Args: path (str): The path to write the SVG to. - title (str): The title of the tab in the output image + title (str, optional): The title of the tab in the output image theme (TerminalTheme, optional): The ``TerminalTheme`` object to use to style the terminal clear (bool, optional): Clear record buffer after exporting. Defaults to ``True`` - code_format (str): Format string used to generate the SVG. Rich will inject a number of variables + code_format (str, optional): Format string used to generate the SVG. Rich will inject a number of variables into the string in order to form the final SVG output. The default template used and the variables injected by Rich can be found by inspecting the ``console.CONSOLE_SVG_FORMAT`` variable. + font_aspect_ratio (float, optional): The width to height ratio of the font used in the ``code_format`` + string. Defaults to 0.61, which is the width to height ratio of Fira Code (the default font). + If you aren't specifying a different font inside ``code_format``, you probably don't need this. """ from rich.cells import cell_len @@ -2316,7 +2320,7 @@ def get_svg_style(style: Style) -> str: width = self.width char_height = 20 - char_width = char_height * 0.61 + char_width = char_height * font_aspect_ratio line_height = char_height * 1.22 margin_top = 1 @@ -2505,23 +2509,28 @@ def save_svg( theme: Optional[TerminalTheme] = None, clear: bool = True, code_format: str = CONSOLE_SVG_FORMAT, + font_aspect_ratio: float = 0.61, ) -> None: """Generate an SVG file from the console contents (requires record=True in Console constructor). Args: path (str): The path to write the SVG to. - title (str): The title of the tab in the output image + title (str, optional): The title of the tab in the output image theme (TerminalTheme, optional): The ``TerminalTheme`` object to use to style the terminal clear (bool, optional): Clear record buffer after exporting. Defaults to ``True`` - code_format (str): Format string used to generate the SVG. Rich will inject a number of variables + code_format (str, optional): Format string used to generate the SVG. Rich will inject a number of variables into the string in order to form the final SVG output. The default template used and the variables injected by Rich can be found by inspecting the ``console.CONSOLE_SVG_FORMAT`` variable. + font_aspect_ratio (float, optional): The width to height ratio of the font used in the ``code_format`` + string. Defaults to 0.61, which is the width to height ratio of Fira Code (the default font). + If you aren't specifying a different font inside ``code_format``, you probably don't need this. """ svg = self.export_svg( title=title, theme=theme, clear=clear, code_format=code_format, + font_aspect_ratio=font_aspect_ratio, ) with open(path, "wt", encoding="utf-8") as write_file: write_file.write(svg)
## Type of changes - [ ] Bug fix - [ ] New feature - [ ] Documentation / docstrings - [ ] Tests - [ ] Other ## Checklist - [x] I've run the latest [black](https://github.com/psf/black) with default args on new code. - [x] I've updated CHANGELOG.md and CONTRIBUTORS.md where appropriate. - [ ] I've added tests for new code. - [x] I accept that @willmcgugan may be pedantic in the code review. ## Description Exposes font_aspect_ratio as a parameter on export_svg and save_svg rather than hardcoding 0.61. This is for users who would like more control over the rendering to accomodate different fonts used in their format string.
https://api.github.com/repos/Textualize/rich/pulls/2539
2022-09-23T10:55:03Z
2022-09-23T11:13:28Z
2022-09-23T11:13:28Z
2022-09-23T11:13:29Z
1,081
Textualize/rich
48,275
added --pdb command option to enable pdb debugger on failure.
diff --git a/scrapy/command.py b/scrapy/command.py index 872e59a5aff..9a80dae9f96 100644 --- a/scrapy/command.py +++ b/scrapy/command.py @@ -4,6 +4,7 @@ import os from optparse import OptionGroup +from twisted.python import failure from scrapy import log from scrapy.utils.conf import arglist_to_dict @@ -80,6 +81,7 @@ def add_options(self, parser): help="write process ID to FILE") group.add_option("-s", "--set", action="append", default=[], metavar="NAME=VALUE", \ help="set/override setting (may be repeated)") + group.add_option("--pdb", action="store_true", help="enable pdb on failure") parser.add_option_group(group) def process_options(self, args, opts): @@ -103,6 +105,9 @@ def process_options(self, args, opts): with open(opts.pidfile, "w") as f: f.write(str(os.getpid()) + os.linesep) + if opts.pdb: + failure.startDebugMode() + def run(self, args, opts): """ Entry point for running commands
This just enable twisted's debug feature on failure objects. A test for this feature it would go like twisted's tests does: https://github.com/twisted/twisted/blob/master/twisted/test/test_failure.py#L543 As I haven't seen a similar test in scrapy for options-only I didn't knew if it was worth to add a test.
https://api.github.com/repos/scrapy/scrapy/pulls/242
2013-02-11T16:50:49Z
2013-02-12T02:56:39Z
2013-02-12T02:56:39Z
2013-02-12T02:57:40Z
270
scrapy/scrapy
35,029
[extractor/TwitCasting] expand extractor regex
diff --git a/yt_dlp/extractor/twitcasting.py b/yt_dlp/extractor/twitcasting.py index dff353a4f9e..3890d5d8fb0 100644 --- a/yt_dlp/extractor/twitcasting.py +++ b/yt_dlp/extractor/twitcasting.py @@ -22,7 +22,7 @@ class TwitCastingIE(InfoExtractor): - _VALID_URL = r'https?://(?:[^/]+\.)?twitcasting\.tv/(?P<uploader_id>[^/]+)/(?:movie|twplayer)/(?P<id>\d+)' + _VALID_URL = r'https?://(?:[^/?#]+\.)?twitcasting\.tv/(?P<uploader_id>[^/?#]+)/(?:movie|twplayer)/(?P<id>\d+)' _M3U8_HEADERS = { 'Origin': 'https://twitcasting.tv', 'Referer': 'https://twitcasting.tv/', @@ -231,7 +231,7 @@ def find_dmu(x): class TwitCastingLiveIE(InfoExtractor): - _VALID_URL = r'https?://(?:[^/]+\.)?twitcasting\.tv/(?P<id>[^/]+)/?(?:[#?]|$)' + _VALID_URL = r'https?://(?:[^/?#]+\.)?twitcasting\.tv/(?P<id>[^/?#]+)/?(?:[#?]|$)' _TESTS = [{ 'url': 'https://twitcasting.tv/ivetesangalo', 'only_matching': True, @@ -265,8 +265,15 @@ def _real_extract(self, url): class TwitCastingUserIE(InfoExtractor): - _VALID_URL = r'https?://(?:[^/]+\.)?twitcasting\.tv/(?P<id>[^/]+)/show/?(?:[#?]|$)' + _VALID_URL = r'https?://(?:[^/?#]+\.)?twitcasting\.tv/(?P<id>[^/?#]+)/(:?show|archive)/?(?:[#?]|$)' _TESTS = [{ + 'url': 'https://twitcasting.tv/natsuiromatsuri/archive/', + 'info_dict': { + 'id': 'natsuiromatsuri', + 'title': 'natsuiromatsuri - Live History', + }, + 'playlist_mincount': 235, + }, { 'url': 'https://twitcasting.tv/noriyukicas/show', 'only_matching': True, }]
**IMPORTANT**: PRs without the template will be CLOSED ### Description of your *pull request* and other information <!-- Explanation of your *pull request* in arbitrary form goes here. Please **make sure the description explains the purpose and effect** of your *pull request* and is worded well enough to be understood. Provide as much **context and examples** as possible --> ADD DESCRIPTION HERE Fixes #7597 Expand regex to match an alternative format of uploader VOD link, as is discussed in related issue. <details open><summary>Template</summary> <!-- OPEN is intentional --> <!-- # PLEASE FOLLOW THE GUIDE BELOW - You will be asked some questions, please read them **carefully** and answer honestly - Put an `x` into all the boxes `[ ]` relevant to your *pull request* (like [x]) - Use *Preview* tab to see how your *pull request* will actually look like --> ### Before submitting a *pull request* make sure you have: - [x] At least skimmed through [contributing guidelines](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#developer-instructions) including [yt-dlp coding conventions](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#yt-dlp-coding-conventions) - [x] [Searched](https://github.com/yt-dlp/yt-dlp/search?q=is%3Apr&type=Issues) the bugtracker for similar pull requests - [x] Checked the code with [flake8](https://pypi.python.org/pypi/flake8) and [ran relevant tests](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#developer-instructions) ### In order to be accepted and merged into yt-dlp each piece of code must be in public domain or released under [Unlicense](http://unlicense.org/). Check all of the following options that apply: - [x] I am the original author of this code and I am willing to release it under [Unlicense](http://unlicense.org/) - [ ] I am not the original author of this code but it is in public domain or released under [Unlicense](http://unlicense.org/) (provide reliable evidence) ### What is the purpose of your *pull request*? - [x] Fix or improvement to an extractor (Make sure to add/update tests) - [ ] New extractor ([Piracy websites will not be accepted](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#is-the-website-primarily-used-for-piracy)) - [ ] Core bug fix/improvement - [ ] New feature (It is strongly [recommended to open an issue first](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#adding-new-feature-or-making-overarching-changes)) <!-- Do NOT edit/remove anything below this! --> </details><details><summary>Copilot Summary</summary> <!-- copilot:all --> ### <samp>🤖 Generated by Copilot at f7d6b5b</samp> ### Summary 📺🗃️🛠️ <!-- 1. 📺 - This emoji represents live streaming and video content, which is what TwitCasting provides. 2. 🗃️ - This emoji represents archives and collections, which is what the `/archive` URL shows. 3. 🛠️ - This emoji represents tools and improvements, which is what the modified regex pattern and the new test case are. --> Modify `TwitCastingUserIE` to support user archive URLs and add a test case. This enables downloading all past live streams of a user from `yt_dlp/extractor/twitcasting.py`. > _`TwitCastingUserIE`_ > _Matches archive URLs now_ > _Autumn of live streams_ ### Walkthrough * Allow the extractor to handle user archive pages by modifying the URL pattern and adding a test case ([link](https://github.com/yt-dlp/yt-dlp/pull/8120/files?diff=unified&w=0#diff-da40a341858d2a623c9ce4832d7be0117985c613125ccc6184b3e078f8251dd0L268-R276), yt_dlp/extractor/twitcasting.py) </details>
https://api.github.com/repos/yt-dlp/yt-dlp/pulls/8120
2023-09-16T02:45:54Z
2023-09-16T20:43:12Z
2023-09-16T20:43:12Z
2023-09-16T21:14:15Z
576
yt-dlp/yt-dlp
7,317
Fix "module not found" error in Docker
diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml index b3eda7f62b..66aad3be60 100644 --- a/docker/docker-compose.yml +++ b/docker/docker-compose.yml @@ -8,7 +8,7 @@ services: image: fastchat:latest ports: - "21001:21001" - entrypoint: ["python3", "-m", "fastchat.serve.controller", "--host", "0.0.0.0", "--port", "21001"] + entrypoint: ["python3.9", "-m", "fastchat.serve.controller", "--host", "0.0.0.0", "--port", "21001"] fastchat-model-worker: build: context: . @@ -25,7 +25,7 @@ services: - driver: nvidia count: 1 capabilities: [gpu] - entrypoint: ["python3", "-m", "fastchat.serve.model_worker", "--model-name", 'fastchat-t5-3b-v1.0', "--model-path", "lmsys/fastchat-t5-3b-v1.0", "--worker-address", "http://fastchat-model-worker:21002", "--controller-address", "http://fastchat-controller:21001", "--host", "0.0.0.0", "--port", "21002"] + entrypoint: ["python3.9", "-m", "fastchat.serve.model_worker", "--model-name", 'fastchat-t5-3b-v1.0', "--model-path", "lmsys/fastchat-t5-3b-v1.0", "--worker-address", "http://fastchat-model-worker:21002", "--controller-address", "http://fastchat-controller:21001", "--host", "0.0.0.0", "--port", "21002"] fastchat-api-server: build: context: . @@ -35,6 +35,6 @@ services: image: fastchat:latest ports: - "8000:8000" - entrypoint: ["python3", "-m", "fastchat.serve.openai_api_server", "--controller-address", "http://fastchat-controller:21001", "--host", "0.0.0.0", "--port", "8000"] + entrypoint: ["python3.9", "-m", "fastchat.serve.openai_api_server", "--controller-address", "http://fastchat-controller:21001", "--host", "0.0.0.0", "--port", "8000"] volumes: huggingface:
<!-- Thank you for your contribution! --> <!-- Please add a reviewer to the assignee section when you create a PR. If you don't have the access to it, we will shortly find a reviewer and assign them to your PR. --> ## Why are these changes needed? When attempting to run via Docker Compose on a fresh Linux install with GPU enabled, I got the following error. ![image](https://github.com/lm-sys/FastChat/assets/3521582/85dc23d1-388f-4951-97c2-ccb5213d024d) This was because by default, the container tries to use Python 3.8, though we want 3.9 to properly run. This enforces 3.9 for the containers in compose. <!-- Please give a short summary of the change and the problem this solves. --> ## Related issue number (if applicable) N/A <!-- For example: "Closes #1234" --> ## Checks - [x] I've run `format.sh` to lint the changes in this PR. - [x] I've included any doc changes needed. - [x] I've made sure the relevant tests are passing (if applicable).
https://api.github.com/repos/lm-sys/FastChat/pulls/1681
2023-06-13T19:53:12Z
2023-06-14T07:29:15Z
2023-06-14T07:29:15Z
2023-06-14T19:34:49Z
577
lm-sys/FastChat
41,755
Add sorting transformer for snapshots
diff --git a/CODEOWNERS b/CODEOWNERS index 50fd48aaee6d3..ca5d0d9e5fde2 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -40,6 +40,11 @@ # Analytics client /localstack/utils/analytics/ @thrau +# Snapshot testing +/localstack/testing/snapshots/ @dominikschubert @steffyP +/localstack/testing/pytest/ @dominikschubert +/localstack/testing/pytest/snapshot.py @dominikschubert @steffyP + ###################### ### SERVICE OWNERS ### ###################### diff --git a/localstack/testing/snapshots/transformer.py b/localstack/testing/snapshots/transformer.py index 5cd88f825785f..f8285178c5ea3 100644 --- a/localstack/testing/snapshots/transformer.py +++ b/localstack/testing/snapshots/transformer.py @@ -3,7 +3,7 @@ import os import re from re import Pattern -from typing import Callable, Optional, Protocol +from typing import Any, Callable, Optional, Protocol from jsonpath_ng.ext import parse @@ -203,8 +203,29 @@ def transform(self, input_data: dict, *, ctx: TransformContext) -> dict: class GenericTransformer: - def __init__(self, fn: Callable[[dict], dict]): + def __init__(self, fn: Callable[[dict, TransformContext], dict]): self.fn = fn - def transform(self, input_data: dict) -> dict: - return self.fn(input_data) + def transform(self, input_data: dict, *, ctx: TransformContext) -> dict: + return self.fn(input_data, ctx) + + +class SortingTransformer: + key: str + sorting_fn: Callable[[...], Any] + + # TODO: add support for jsonpath + def __init__(self, key: str, sorting_fn: Callable[[...], Any]): + """Sorts a list at `key` with the given `sorting_fn` (argument for `sorted(list, key=sorting_fn)`)""" + self.key = key + self.sorting_fn = sorting_fn + + def transform(self, input_data: dict, *, ctx: TransformContext = None) -> dict: + for k, v in input_data.items(): + if k == self.key: + if not isinstance(v, list): + raise ValueError("SortingTransformer should only be applied to lists.") + input_data[k] = sorted(v, key=self.sorting_fn) + elif isinstance(v, dict): + input_data[k] = self.transform(v, ctx=ctx) + return input_data diff --git a/tests/unit/utils/testing/test_snapshots.py b/tests/unit/utils/testing/test_snapshots.py index 7695a7c5a7acd..8bfdbfb1f0fda 100644 --- a/tests/unit/utils/testing/test_snapshots.py +++ b/tests/unit/utils/testing/test_snapshots.py @@ -1,7 +1,7 @@ import pytest from localstack.testing.snapshots import SnapshotSession -from localstack.testing.snapshots.transformer import KeyValueBasedTransformer +from localstack.testing.snapshots.transformer import KeyValueBasedTransformer, SortingTransformer from localstack.testing.snapshots.transformer_utility import _resource_name_transformer @@ -119,3 +119,34 @@ def test_dot_in_skip_verification_path(self): skip_path_escaped = ["$..aab", "$..b.'a.aa'"] sm._assert_all(skip_verification_paths=skip_path_escaped) + + +def test_sorting_transformer(): + original_dict = { + "a": { + "b": [ + {"name": "c-123"}, + {"name": "a-123"}, + {"name": "b-123"}, + ] + }, + "a2": { + "b": [ + {"name": "b-123"}, + {"name": "a-123"}, + {"name": "c-123"}, + ] + }, + } + + sorted_items = [ + {"name": "a-123"}, + {"name": "b-123"}, + {"name": "c-123"}, + ] + + transformer = SortingTransformer("b", lambda x: x["name"]) + transformed_dict = transformer.transform(original_dict) + + assert transformed_dict["a"]["b"] == sorted_items + assert transformed_dict["a2"]["b"] == sorted_items
Makes it easier to reliably compare lists via snapshots by sorting them via a transformer instead of having to manually sorting the object before calling `match`. Usage: ```python snapshot.add_transformer(SortingTransformer("Aliases", lambda x: x["Name"])) ```
https://api.github.com/repos/localstack/localstack/pulls/6822
2022-09-05T17:49:00Z
2022-09-06T13:47:18Z
2022-09-06T13:47:18Z
2022-09-06T13:47:25Z
1,026
localstack/localstack
28,833
Correct an English grammatic error in README.md
diff --git a/README.md b/README.md index 934d16e6a7..4392886867 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ -XX-Net - A Reborn of Goagent +XX-Net - A Reborn Goagent ======== 翻墙工具套件 A firewall circumvention toolkit * GAE proxy, 稳定、易用、快速
A small improvement: "A Reborn of Goagent" should be "A Reborn Goagent".
https://api.github.com/repos/XX-net/XX-Net/pulls/1856
2016-01-07T15:03:48Z
2016-01-08T00:59:04Z
2016-01-08T00:59:04Z
2016-01-08T00:59:04Z
105
XX-net/XX-Net
17,293
[ci] remove old prepare_docker
diff --git a/.buildkite/core.rayci.yml b/.buildkite/core.rayci.yml index e2bee4c6d3983..0cc3c37327d41 100644 --- a/.buildkite/core.rayci.yml +++ b/.buildkite/core.rayci.yml @@ -270,7 +270,6 @@ steps: instance_type: medium commands: - bazel run //ci/ray_ci:build_in_docker -- docker --python-version 3.8 --platform cpu --canonical-tag ha_integration - - docker tag rayproject/ray:ha_integration ray_ci:v1 - bazel run //ci/ray_ci:test_in_docker -- //python/ray/tests/... core --only-tags ha_integration depends_on: - manylinux diff --git a/.buildkite/serve.rayci.yml b/.buildkite/serve.rayci.yml index e0cc9d3f2d541..555151b7eebae 100644 --- a/.buildkite/serve.rayci.yml +++ b/.buildkite/serve.rayci.yml @@ -154,7 +154,6 @@ steps: instance_type: medium commands: - bazel run //ci/ray_ci:build_in_docker -- docker --python-version 3.8 --platform cpu --canonical-tag ha_integration - - docker tag rayproject/ray:ha_integration ray_ci:v1 - bazel run //ci/ray_ci:test_in_docker -- //python/ray/serve/tests/... serve --only-tags ha_integration depends_on: diff --git a/ci/ci.sh b/ci/ci.sh index 6aedb17507d55..8cfe4df863567 100755 --- a/ci/ci.sh +++ b/ci/ci.sh @@ -177,35 +177,6 @@ test_core() { bazel test --config=ci --build_tests_only $(./ci/run/bazel_export_options) -- "${args[@]}" } -prepare_docker() { - rm "${WORKSPACE_DIR}"/python/dist/* ||: - pushd "${WORKSPACE_DIR}/python" - pip install -e . --verbose - python setup.py bdist_wheel - tmp_dir="/tmp/prepare_docker_$RANDOM" - mkdir -p $tmp_dir - cp "${WORKSPACE_DIR}"/python/dist/*.whl $tmp_dir - wheel=$(ls "${WORKSPACE_DIR}"/python/dist/) - base_image=$(python -c "import sys; print(f'rayproject/ray-deps:nightly-py{sys.version_info[0]}{sys.version_info[1]}-cpu')") - echo " - FROM $base_image - - ENV LC_ALL=C.UTF-8 - ENV LANG=C.UTF-8 - COPY ./*.whl / - EXPOSE 8000 - EXPOSE 10001 - RUN pip install /${wheel}[serve] - RUN (sudo apt update || true) && sudo apt install curl -y - " > $tmp_dir/Dockerfile - - pushd $tmp_dir - docker build . -t ray_ci:v1 - popd - - popd -} - # For running Serve tests on Windows. test_serve() { if [ "${OSTYPE}" = msys ]; then diff --git a/python/ray/tests/conftest_docker.py b/python/ray/tests/conftest_docker.py index aae45ec68d18d..914f19732ff79 100644 --- a/python/ray/tests/conftest_docker.py +++ b/python/ray/tests/conftest_docker.py @@ -80,7 +80,7 @@ def print_logs(self): head_node_container_name = "gcs" + str(int(time.time())) head_node = container( - image="ray_ci:v1", + image="rayproject/ray:ha_integration", name=head_node_container_name, network="{gcs_network.name}", command=[ @@ -111,7 +111,7 @@ def print_logs(self): ) worker_node = container( - image="ray_ci:v1", + image="rayproject/ray:ha_integration", network="{gcs_network.name}", command=[ "ray",
and directly tag images as `ha_integration`
https://api.github.com/repos/ray-project/ray/pulls/41719
2023-12-08T03:50:38Z
2023-12-08T06:20:07Z
2023-12-08T06:20:07Z
2023-12-08T06:20:08Z
967
ray-project/ray
19,608
Bump rwkv from 0.7.2 to 0.7.3
diff --git a/requirements.txt b/requirements.txt index 882dc3000f..be51203212 100644 --- a/requirements.txt +++ b/requirements.txt @@ -8,7 +8,7 @@ markdown numpy peft==0.2.0 requests -rwkv==0.7.2 +rwkv==0.7.3 safetensors==0.3.0 sentencepiece pyyaml
Bumps [rwkv](https://github.com/BlinkDL/ChatRWKV) from 0.7.2 to 0.7.3. <details> <summary>Commits</summary> <ul> <li>See full diff in <a href="https://github.com/BlinkDL/ChatRWKV/commits">compare view</a></li> </ul> </details> <br /> [![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=rwkv&package-manager=pip&previous-version=0.7.2&new-version=0.7.3)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) --- <details> <summary>Dependabot commands and options</summary> <br /> You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself) </details>
https://api.github.com/repos/oobabooga/text-generation-webui/pulls/842
2023-04-06T20:49:41Z
2023-04-06T20:52:27Z
2023-04-06T20:52:27Z
2023-04-06T20:52:35Z
102
oobabooga/text-generation-webui
26,499
Small docstring fix
diff --git a/deepspeed/runtime/fp16/unfused_optimizer.py b/deepspeed/runtime/fp16/unfused_optimizer.py index 7b2373927be7..14271255df2e 100755 --- a/deepspeed/runtime/fp16/unfused_optimizer.py +++ b/deepspeed/runtime/fp16/unfused_optimizer.py @@ -216,7 +216,7 @@ def step(self, closure=None): norm_group_value = get_weight_norm(grads_for_norm, mpu=self.mpu) norm_groups.append(norm_group_value) - # copying gradients to fp32 to wor k with fp32 parameters + # copying gradients to fp32 to work with fp32 parameters for fp32_param, fp16_param in zip(self.fp32_groups[i], self.fp16_groups[i]): if fp16_param.grad is None: fp32_param.grad = torch.zeros(fp16_param.size(), dtype=fp32_param.dtype, device=fp32_param.device) diff --git a/deepspeed/runtime/utils.py b/deepspeed/runtime/utils.py index 7024b93d6820..b066090265c8 100755 --- a/deepspeed/runtime/utils.py +++ b/deepspeed/runtime/utils.py @@ -394,7 +394,6 @@ def get_grad_norm(parameters, norm_type=2, mpu=None): Arguments: parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a single Tensor that will have gradients normalized - max_norm (float or int): max norm of the gradients norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for infinity norm. @@ -491,12 +490,12 @@ def get_weight_norm(parameters, norm_type=2, mpu=None): Arguments: parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a single Tensor that will have gradients normalized - max_norm (float or int): max norm of the gradients norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for infinity norm. Returns: Total norm of the parameters (viewed as a single vector). + -1 if the norm value is NaN or Inf. """ if isinstance(parameters, torch.Tensor): parameters = [parameters]
https://api.github.com/repos/microsoft/DeepSpeed/pulls/4431
2023-10-01T11:40:06Z
2023-10-03T17:17:26Z
2023-10-03T17:17:26Z
2023-10-03T20:32:53Z
519
microsoft/DeepSpeed
10,679
add ut to check code saving
diff --git a/tests/data/rsp_cache.json b/tests/data/rsp_cache.json index 40d7d3953..75fc9ceb2 100644 --- a/tests/data/rsp_cache.json +++ b/tests/data/rsp_cache.json @@ -389,5 +389,11 @@ "reflection": "The implementation failed the test case where the input array is [1, 5, 2, 3, 4]. The issue arises because the code does not handle the sorting of elements with the same number of ones in their binary representation correctly. According to the context, for similar number of ones, the elements should be sorted based on their decimal value in ascending order. However, the output [1, 2, 4, 3, 5] shows that 3 and 4 are not in the correct order, indicating that the sorting by decimal value is not functioning as intended. To fix this issue, we should ensure that the sorting is stable, so that when two elements have the same number of ones, they maintain their relative order if their decimal values are already in ascending order.", "improved_impl": "def sort_array(arr):\n # Helper function to count the number of ones in the binary representation\n def count_ones(n):\n return bin(n).count('1')\n \n # Sort the array using a custom key function\n # The key function returns a tuple (number of ones, value) for each element\n # This ensures that if two elements have the same number of ones, they are sorted by their value\n # We use a stable sorting algorithm to maintain the relative order of elements with the same number of ones\n sorted_arr = sorted(arr, key=lambda x: (count_ones(x), x), reverse=True)\n sorted_arr = sorted(sorted_arr, key=count_ones)\n \n return sorted_arr\n" }, + "[{\"role\": \"system\", \"content\": \"You are Code Interpreter, a world-class programmer that can complete any goal by executing code. Strictly follow the plan and generate code step by step. Each step of the code will be executed on the user's machine, and the user will provide the code execution results to you.**Notice: The code for the next step depends on the code for the previous step. Must reuse variables in the lastest other code directly, dont creat it again, it is very import for you. Use !pip install in a standalone block to install missing packages.Usually the libraries you need are already installed.Dont check if packages already imported.**\"}, {\"role\": \"user\", \"content\": \"\\n## User Requirement\\nRun data analysis on sklearn Iris dataset, include a plot\\n## Context\\n\\n## Current Plan\\n[\\n {\\n \\\"task_id\\\": \\\"1\\\",\\n \\\"dependent_task_ids\\\": [],\\n \\\"instruction\\\": \\\"Load the sklearn Iris dataset.\\\",\\n \\\"task_type\\\": \\\"other\\\",\\n \\\"code\\\": \\\"from sklearn.datasets import load_iris\\\\niris_data = load_iris()\\\",\\n \\\"result\\\": \\\"a successful run\\\",\\n \\\"is_success\\\": true,\\n \\\"is_finished\\\": true\\n },\\n {\\n \\\"task_id\\\": \\\"2\\\",\\n \\\"dependent_task_ids\\\": [\\n \\\"1\\\"\\n ],\\n \\\"instruction\\\": \\\"Perform exploratory data analysis on the Iris dataset.\\\",\\n \\\"task_type\\\": \\\"eda\\\",\\n \\\"code\\\": \\\"\\\",\\n \\\"result\\\": \\\"\\\",\\n \\\"is_success\\\": false,\\n \\\"is_finished\\\": false\\n },\\n {\\n \\\"task_id\\\": \\\"3\\\",\\n \\\"dependent_task_ids\\\": [\\n \\\"2\\\"\\n ],\\n \\\"instruction\\\": \\\"Create a plot visualizing the Iris dataset.\\\",\\n \\\"task_type\\\": \\\"other\\\",\\n \\\"code\\\": \\\"\\\",\\n \\\"result\\\": \\\"\\\",\\n \\\"is_success\\\": false,\\n \\\"is_finished\\\": false\\n }\\n]\\n## Current Task\\n{\\\"task_id\\\":\\\"2\\\",\\\"dependent_task_ids\\\":[\\\"1\\\"],\\\"instruction\\\":\\\"Perform exploratory data analysis on the Iris dataset.\\\",\\\"task_type\\\":\\\"eda\\\",\\\"code\\\":\\\"\\\",\\\"result\\\":\\\"\\\",\\\"is_success\\\":false,\\\"is_finished\\\":false}\\n\"}, {\"role\": \"user\", \"content\": \"\\n# Instruction\\nWrite complete code for 'Current Task'. And avoid duplicating code from finished tasks, such as repeated import of packages, reading data, etc.\\nSpecifically, \\n\\n# Capabilities\\n- You can utilize pre-defined tools in any code lines from 'Available Tools' in the form of Python Class.\\n- You can freely combine the use of any other public packages, like sklearn, numpy, pandas, etc..\\n\\n# Available Tools (can be empty):\\nEach Class tool is described in JSON format. When you call a tool, import the tool first.\\n{}\\n\\n# Constraints:\\n- Ensure the output new code is executable in the same Jupyter notebook with previous tasks code have been executed.\\n- Always prioritize using pre-defined tools for the same functionality.\\n\"}]": { + "code": "import pandas as pd\n\n# Convert the Iris dataset to a DataFrame for easier manipulation\niris_df = pd.DataFrame(data=iris_data.data, columns=iris_data.feature_names)\niris_df['target'] = iris_data.target\niris_df['target_names'] = iris_df['target'].apply(lambda x: iris_data.target_names[x])\n\n# Display basic information about the dataset\niris_df.info()\n\n# Display statistical summary of the dataset\niris_df.describe()\n\n# Display the first few rows of the dataset\ndisplay(iris_df.head())" + }, + "[{\"role\": \"system\", \"content\": \"You are Code Interpreter, a world-class programmer that can complete any goal by executing code. Strictly follow the plan and generate code step by step. Each step of the code will be executed on the user's machine, and the user will provide the code execution results to you.**Notice: The code for the next step depends on the code for the previous step. Must reuse variables in the lastest other code directly, dont creat it again, it is very import for you. Use !pip install in a standalone block to install missing packages.Usually the libraries you need are already installed.Dont check if packages already imported.**\"}, {\"role\": \"user\", \"content\": \"\\n## User Requirement\\nRun data analysis on sklearn Iris dataset, include a plot\\n## Context\\n\\n## Current Plan\\n[\\n {\\n \\\"task_id\\\": \\\"1\\\",\\n \\\"dependent_task_ids\\\": [],\\n \\\"instruction\\\": \\\"Load the sklearn Iris dataset.\\\",\\n \\\"task_type\\\": \\\"other\\\",\\n \\\"code\\\": \\\"from sklearn.datasets import load_iris\\\\niris_data = load_iris()\\\",\\n \\\"result\\\": \\\"a successful run\\\",\\n \\\"is_success\\\": true,\\n \\\"is_finished\\\": true\\n },\\n {\\n \\\"task_id\\\": \\\"2\\\",\\n \\\"dependent_task_ids\\\": [\\n \\\"1\\\"\\n ],\\n \\\"instruction\\\": \\\"Perform exploratory data analysis on the Iris dataset.\\\",\\n \\\"task_type\\\": \\\"eda\\\",\\n \\\"code\\\": \\\"import pandas as pd\\\\n\\\\n# Convert the Iris dataset to a DataFrame for easier manipulation\\\\niris_df = pd.DataFrame(data=iris_data.data, columns=iris_data.feature_names)\\\\niris_df['target'] = iris_data.target\\\\niris_df['target_names'] = iris_df['target'].apply(lambda x: iris_data.target_names[x])\\\\n\\\\n# Display basic information about the dataset\\\\niris_df.info()\\\\n\\\\n# Display statistical summary of the dataset\\\\niris_df.describe()\\\\n\\\\n# Display the first few rows of the dataset\\\\ndisplay(iris_df.head())\\\",\\n \\\"result\\\": \\\"a successful run\\\",\\n \\\"is_success\\\": true,\\n \\\"is_finished\\\": true\\n },\\n {\\n \\\"task_id\\\": \\\"3\\\",\\n \\\"dependent_task_ids\\\": [\\n \\\"2\\\"\\n ],\\n \\\"instruction\\\": \\\"Create a plot visualizing the Iris dataset.\\\",\\n \\\"task_type\\\": \\\"other\\\",\\n \\\"code\\\": \\\"\\\",\\n \\\"result\\\": \\\"\\\",\\n \\\"is_success\\\": false,\\n \\\"is_finished\\\": false\\n }\\n]\\n## Current Task\\n{\\\"task_id\\\":\\\"3\\\",\\\"dependent_task_ids\\\":[\\\"2\\\"],\\\"instruction\\\":\\\"Create a plot visualizing the Iris dataset.\\\",\\\"task_type\\\":\\\"other\\\",\\\"code\\\":\\\"\\\",\\\"result\\\":\\\"\\\",\\\"is_success\\\":false,\\\"is_finished\\\":false}\\n\"}, {\"role\": \"user\", \"content\": \"\\n# Instruction\\nWrite complete code for 'Current Task'. And avoid duplicating code from finished tasks, such as repeated import of packages, reading data, etc.\\nSpecifically, \\n\\n# Capabilities\\n- You can utilize pre-defined tools in any code lines from 'Available Tools' in the form of Python Class.\\n- You can freely combine the use of any other public packages, like sklearn, numpy, pandas, etc..\\n\\n# Available Tools (can be empty):\\nEach Class tool is described in JSON format. When you call a tool, import the tool first.\\n{}\\n\\n# Constraints:\\n- Ensure the output new code is executable in the same Jupyter notebook with previous tasks code have been executed.\\n- Always prioritize using pre-defined tools for the same functionality.\\n\"}]": { + "code": "import matplotlib.pyplot as plt\nimport seaborn as sns\n\n# Set the style of seaborn\nsns.set_style('whitegrid')\n\n# Pairplot to visualize the relationships between features\nsns.pairplot(iris_df, hue='target_names', markers='+')\nplt.show()" + }, "\n## context\n\n\n-----\n\n## format example\n[CONTENT]\n{\n \"invoice\": \"False\"\n}\n[/CONTENT]\n\n## nodes: \"<node>: <type> # <instruction>\"\n- invoice: <class 'bool'> # if it's a invoice file, return True else False\n\n\n## constraint\nLanguage: Please use the same language as Human INPUT.\nFormat: output wrapped inside [CONTENT][/CONTENT] like format example, nothing else.\n\n## action\nFollow instructions of nodes, generate output and make sure it follows the format example.\n": "[CONTENT]\n{\n \"invoice\": \"True\"\n}\n[/CONTENT]" } \ No newline at end of file diff --git a/tests/metagpt/roles/ci/test_code_interpreter.py b/tests/metagpt/roles/ci/test_code_interpreter.py index f23292965..9d2f2429b 100644 --- a/tests/metagpt/roles/ci/test_code_interpreter.py +++ b/tests/metagpt/roles/ci/test_code_interpreter.py @@ -17,3 +17,7 @@ async def test_code_interpreter(mocker, auto_run): rsp = await ci.run(requirement) logger.info(rsp) assert len(rsp.content) > 0 + + finished_tasks = ci.planner.plan.get_finished_tasks() + assert len(finished_tasks) > 0 + assert len(finished_tasks[0].code) > 0 # check one task to see if code is recorded
https://api.github.com/repos/geekan/MetaGPT/pulls/857
2024-02-06T16:00:39Z
2024-02-06T16:01:56Z
2024-02-06T16:01:56Z
2024-02-06T16:01:56Z
2,794
geekan/MetaGPT
16,936
Set WindowsProactorEventLoopPolicy after importing sk_function in python3.9+
diff --git a/metagpt/_compat.py b/metagpt/_compat.py index 91bc1e5a1..c442bd7de 100644 --- a/metagpt/_compat.py +++ b/metagpt/_compat.py @@ -2,19 +2,22 @@ import sys import warnings -if sys.implementation.name == "cpython" and platform.system() == "Windows" and sys.version_info[:2] == (3, 9): +if sys.implementation.name == "cpython" and platform.system() == "Windows": import asyncio - from asyncio.proactor_events import _ProactorBasePipeTransport - from semantic_kernel.orchestration import sk_function as _ # noqa: F401 + if sys.version_info[:2] == (3, 9): + from asyncio.proactor_events import _ProactorBasePipeTransport - # https://github.com/python/cpython/pull/92842 - def pacth_del(self, _warn=warnings.warn): - if self._sock is not None: - _warn(f"unclosed transport {self!r}", ResourceWarning, source=self) - self._sock.close() + # https://github.com/python/cpython/pull/92842 + def pacth_del(self, _warn=warnings.warn): + if self._sock is not None: + _warn(f"unclosed transport {self!r}", ResourceWarning, source=self) + self._sock.close() - _ProactorBasePipeTransport.__del__ = pacth_del + _ProactorBasePipeTransport.__del__ = pacth_del - # caused by https://github.com/microsoft/semantic-kernel/pull/1416 - asyncio.set_event_loop_policy(asyncio.WindowsProactorEventLoopPolicy()) + if sys.version_info >= (3, 9, 0): + from semantic_kernel.orchestration import sk_function as _ # noqa: F401 + + # caused by https://github.com/microsoft/semantic-kernel/pull/1416 + asyncio.set_event_loop_policy(asyncio.WindowsProactorEventLoopPolicy())
- fix #360
https://api.github.com/repos/geekan/MetaGPT/pulls/362
2023-09-24T17:00:41Z
2023-09-25T05:37:53Z
2023-09-25T05:37:53Z
2023-09-26T01:59:49Z
473
geekan/MetaGPT
16,815
[ie/tiktok] Restore `carrier_region` API param
diff --git a/yt_dlp/extractor/tiktok.py b/yt_dlp/extractor/tiktok.py index 295e14932a8..3f5261ad968 100644 --- a/yt_dlp/extractor/tiktok.py +++ b/yt_dlp/extractor/tiktok.py @@ -155,6 +155,7 @@ def _build_api_query(self, query): 'locale': 'en', 'ac2': 'wifi5g', 'uoo': '1', + 'carrier_region': 'US', 'op_region': 'US', 'build_number': self._APP_INFO['app_version'], 'region': 'US',
Avoids some geo-blocks, see https://github.com/yt-dlp/yt-dlp/issues/9506#issuecomment-2041044419 Thanks @oifj34f34f <details open><summary>Template</summary> <!-- OPEN is intentional --> ### Before submitting a *pull request* make sure you have: - [x] At least skimmed through [contributing guidelines](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#developer-instructions) including [yt-dlp coding conventions](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#yt-dlp-coding-conventions) - [x] [Searched](https://github.com/yt-dlp/yt-dlp/search?q=is%3Apr&type=Issues) the bugtracker for similar pull requests - [x] Checked the code with [flake8](https://pypi.python.org/pypi/flake8) and [ran relevant tests](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#developer-instructions) ### In order to be accepted and merged into yt-dlp each piece of code must be in public domain or released under [Unlicense](http://unlicense.org/). Check all of the following options that apply: - [x] I am the original author of this code and I am willing to release it under [Unlicense](http://unlicense.org/) ### What is the purpose of your *pull request*? - [x] Fix or improvement to an extractor (Make sure to add/update tests) </details>
https://api.github.com/repos/yt-dlp/yt-dlp/pulls/9637
2024-04-06T23:34:13Z
2024-04-07T15:32:11Z
2024-04-07T15:32:11Z
2024-04-07T15:32:11Z
157
yt-dlp/yt-dlp
7,374
`torch.split()` 1.7.0 compatibility fix
diff --git a/utils/loss.py b/utils/loss.py index a06330e034b..bf9b592d4ad 100644 --- a/utils/loss.py +++ b/utils/loss.py @@ -108,13 +108,15 @@ def __init__(self, model, autobalance=False): if g > 0: BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g) - det = de_parallel(model).model[-1] # Detect() module - self.balance = {3: [4.0, 1.0, 0.4]}.get(det.nl, [4.0, 1.0, 0.25, 0.06, 0.02]) # P3-P7 - self.ssi = list(det.stride).index(16) if autobalance else 0 # stride 16 index + m = de_parallel(model).model[-1] # Detect() module + self.balance = {3: [4.0, 1.0, 0.4]}.get(m.nl, [4.0, 1.0, 0.25, 0.06, 0.02]) # P3-P7 + self.ssi = list(m.stride).index(16) if autobalance else 0 # stride 16 index self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, 1.0, h, autobalance + self.na = m.na # number of anchors + self.nc = m.nc # number of classes + self.nl = m.nl # number of layers + self.anchors = m.anchors self.device = device - for k in 'na', 'nc', 'nl', 'anchors': - setattr(self, k, getattr(det, k)) def __call__(self, p, targets): # predictions, targets lcls = torch.zeros(1, device=self.device) # class loss @@ -129,7 +131,8 @@ def __call__(self, p, targets): # predictions, targets n = b.shape[0] # number of targets if n: - pxy, pwh, _, pcls = pi[b, a, gj, gi].tensor_split((2, 4, 5), dim=1) # target-subset of predictions + # pxy, pwh, _, pcls = pi[b, a, gj, gi].tensor_split((2, 4, 5), dim=1) # faster, requires torch 1.8.0 + pxy, pwh, _, pcls = pi[b, a, gj, gi].split((2, 2, 1, self.nc), 1) # target-subset of predictions # Regression pxy = pxy.sigmoid() * 2 - 0.5
Fix for https://github.com/ultralytics/yolov5/issues/7085#issuecomment-1075224274 ## 🛠️ PR Summary <sub>Made with ❤️ by [Ultralytics Actions](https://github.com/ultralytics/actions)<sub> ### 🌟 Summary Enhancement of the loss calculation functionality in the YOLOv5 model. ### 📊 Key Changes - Refactored variable names for clarity, changing `det` to `m` representing the `Detect()` module. - Direct assignment of the number of anchors (`na`), number of classes (`nc`), number of layers (`nl`), and anchors from the model `m` to the loss module for better readability and maintainability. - Modified the method for splitting prediction tensors to support a broader range of PyTorch versions by using `split` instead of `tensor_split`. ### 🎯 Purpose & Impact - These changes aim to make the code clearer and more maintainable, enabling easier future enhancements and debugging. - With the update to the tensor splitting method, this improvement increases compatibility with different versions of PyTorch, allowing more users to work with the model regardless of their PyTorch version. - The well-defined assignments and restructuring are expected to have a minimal direct impact on end-users but will provide a more robust foundation for future updates and potential new features.
https://api.github.com/repos/ultralytics/yolov5/pulls/7102
2022-03-22T15:58:36Z
2022-03-22T16:36:05Z
2022-03-22T16:36:05Z
2024-01-19T12:03:15Z
678
ultralytics/yolov5
25,598
keras_v2+python3, `load_weights_from_hdf5_group` character type bug
diff --git a/keras/engine/topology.py b/keras/engine/topology.py index 85687439d9f..0c00abc4afb 100644 --- a/keras/engine/topology.py +++ b/keras/engine/topology.py @@ -2839,11 +2839,11 @@ def load_weights_from_hdf5_group(f, layers): and weights file. """ if 'keras_version' in f.attrs: - original_keras_version = f.attrs['keras_version'] + original_keras_version = f.attrs['keras_version'].decode('utf8') else: original_keras_version = '1' if 'backend' in f.attrs: - original_backend = f.attrs['backend'] + original_backend = f.attrs['backend'].decode('utf8') else: original_backend = None @@ -2911,11 +2911,11 @@ def load_weights_from_hdf5_group_by_name(f, layers): and weights file. """ if 'keras_version' in f.attrs: - original_keras_version = f.attrs['keras_version'] + original_keras_version = f.attrs['keras_version'].decode('utf8') else: original_keras_version = '1' if 'backend' in f.attrs: - original_backend = f.attrs['backend'] + original_backend = f.attrs['backend'].decode('utf8') else: original_backend = None
Forgetting to decode bytes strings in `keras.engine.topology.load_weights_from_hdf5_group` and `load_weights_from_hdf5_group_by_name`. `h5py` returns string as `bytes` object in Python3. `load_weights_from_hdf5_group()` call `preprocess_weights_for_loading()` with `original_backend` as bytes string. https://github.com/fchollet/keras/blob/master/keras/engine/topology.py#L2879-L2882 Unnecessary weight preprocessing will be called, because string comparison between bytes and string is always false in python3. ```py >>> b'youjo' == 'youjo' False ``` https://github.com/fchollet/keras/blob/master/keras/engine/topology.py#L2817 ## test case ```py import keras x = keras.layers.Input(shape=(32, 32, 3)) h = keras.layers.Conv2D(10, (10, 10))(x) model = keras.models.Model(x, h) model.save('conv2d.h5') del model model = keras.models.load_model('./conv2d.h5') ``` In python2, no error. However in python3, following error occurs. ``` W tensorflow/core/platform/cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use SSE3 instructions, but these are available on your machine and could speed up CPU computations. W tensorflow/core/platform/cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use SSE4.1 instructions, but these are available on your machine and could speed up CPU computations. W tensorflow/core/platform/cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use SSE4.2 instructions, but these are available on your machine and could speed up CPU computations. W tensorflow/core/platform/cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use AVX instructions, but these are available on your machine and could speed up CPU computations. Using TensorFlow backend. Traceback (most recent call last): File "/home/cocuh/.virtualenvs/keras2/lib/python3.6/site-packages/h5py/_hl/selections.py", line 85, in select int(a) TypeError: int() argument must be a string, a bytes-like object or a number, not 'list' During handling of the above exception, another exception occurred: Traceback (most recent call last): File "hoge.py", line 10, in <module> model = keras.models.load_model('./conv2d.h5') File "/home/cocuh/.virtualenvs/keras2/lib/python3.6/site-packages/keras/models.py", line 235, in load_model topology.load_weights_from_hdf5_group(f['model_weights'], model.layers) File "/home/cocuh/.virtualenvs/keras2/lib/python3.6/site-packages/keras/engine/topology.py", line 2882, in load_weights_from_hdf5_group original_backend) File "/home/cocuh/.virtualenvs/keras2/lib/python3.6/site-packages/keras/engine/topology.py", line 2823, in preprocess_weights_for_loading weights[0] = conv_utils.convert_kernel(weights[0]) File "/home/cocuh/.virtualenvs/keras2/lib/python3.6/site-packages/keras/utils/conv_utils.py", line 86, in convert_kernel return np.copy(kernel[slices]) File "h5py/_objects.pyx", line 54, in h5py._objects.with_phil.wrapper (/tmp/pip-tnf92dft-build/h5py/_objects.c:2853) File "h5py/_objects.pyx", line 55, in h5py._objects.with_phil.wrapper (/tmp/pip-tnf92dft-build/h5py/_objects.c:2811) File "/home/cocuh/.virtualenvs/keras2/lib/python3.6/site-packages/h5py/_hl/dataset.py", line 462, in __getitem__ selection = sel.select(self.shape, args, dsid=self.id) File "/home/cocuh/.virtualenvs/keras2/lib/python3.6/site-packages/h5py/_hl/selections.py", line 88, in select sel[args] File "/home/cocuh/.virtualenvs/keras2/lib/python3.6/site-packages/h5py/_hl/selections.py", line 356, in __getitem__ if sorted(arg) != list(arg): TypeError: '<' not supported between instances of 'NoneType' and 'int' ```
https://api.github.com/repos/keras-team/keras/pulls/5762
2017-03-14T13:16:19Z
2017-03-14T15:32:00Z
2017-03-14T15:32:00Z
2017-03-14T23:18:21Z
322
keras-team/keras
47,220
BOTMETA: Update team_mysql
diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index c88cfb0c98b447..930f73bc439313 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -1350,7 +1350,7 @@ macros: team_macos: dagwieers kyleabenson team_manageiq: gtanzillo abellotti zgalor yaacov cben team_meraki: dagwieers kbreit - team_mysql: [] + team_mysql: bmalynovytch dagwieers michaelcoburn oneiroi tolland Xyon team_netapp: hulquest lmprice ndswartz amit0701 schmots1 carchi8py lonico team_netbox: sieben anthony25 fragmentedpacket nikkytub pilou- team_netscaler: chiradeep giorgos-nikolopoulos
##### SUMMARY Add team members to the team. Welcome ! ##### ISSUE TYPE - Docs Pull Request ##### COMPONENT NAME BOTMETA.yml
https://api.github.com/repos/ansible/ansible/pulls/52212
2019-02-14T02:00:40Z
2019-02-14T02:47:14Z
2019-02-14T02:47:14Z
2019-07-25T16:48:49Z
213
ansible/ansible
49,161
delay imports for replace policies and fix missing req
diff --git a/deepspeed/module_inject/replace_module.py b/deepspeed/module_inject/replace_module.py index c58130f23185..31f16cce8109 100755 --- a/deepspeed/module_inject/replace_module.py +++ b/deepspeed/module_inject/replace_module.py @@ -387,6 +387,9 @@ def replace_module(model, orig_class, replace_fn, _replace_policy): policy.update({orig_class: (replace_fn, _replace_policy)}) else: for plcy in replace_policies: + # instantiate a throw-away policy in order to populate the _orig_layer_class + _ = plcy(None) + assert plcy._orig_layer_class != None policy.update({plcy._orig_layer_class: (replace_fn, plcy)}) replaced_module, _ = _replace_module(model, policy) diff --git a/deepspeed/module_inject/replace_policy.py b/deepspeed/module_inject/replace_policy.py index 2bbec0f9ff62..325bb37efd74 100755 --- a/deepspeed/module_inject/replace_policy.py +++ b/deepspeed/module_inject/replace_policy.py @@ -41,16 +41,18 @@ def layerNorm(self): class HFBertLayerPolicy(DSPolicy): - try: - import transformers - _orig_layer_class = transformers.models.bert.modeling_bert.BertLayer - except: - _orig_layer_class = None + _orig_layer_class = None def __init__(self, client_module, inference=False, preln=False): super().__init__(inference) self.client_module = client_module self.preln = preln + if HFBertLayerPolicy._orig_layer_class is None: + try: + import transformers + HFBertLayerPolicy._orig_layer_class = transformers.models.bert.modeling_bert.BertLayer + except: + HFBertLayerPolicy._orig_layer_class = None def get_hidden_heads(self): return self.client_module.attention.self.query.weight.data.shape[1], \ @@ -98,15 +100,16 @@ def layerNorm(self): class HFGPTNEOLayerPolicy(DSPolicy): - try: - import transformers - _orig_layer_class = transformers.models.gpt_neo.modeling_gpt_neo.GPTNeoBlock - except: - _orig_layer_class = None + _orig_layer_class = None def __init__(self, client_module, inference=True): super().__init__(inference, scale_attention=False) self.client_module = client_module + try: + import transformers + HFGPTNEOLayerPolicy._orig_layer_class = transformers.models.gpt_neo.modeling_gpt_neo.GPTNeoBlock + except: + HFGPTNEOLayerPolicy._orig_layer_class = None def get_hidden_heads(self): return self.client_module.attn.attention.q_proj.weight.data.shape[1], \ @@ -141,11 +144,7 @@ def layerNorm(self): class MegatronLayerPolicy(DSPolicy): - try: - import megatron - _orig_layer_class = megatron.model.ParallelTransformerLayer - except: - _orig_layer_class = None + _orig_layer_class = None def __init__(self, client_module, version=0, inference=True): super().__init__(inference) @@ -153,6 +152,13 @@ def __init__(self, client_module, version=0, inference=True): # we use megatron version to differentiate between the old and new # megatron-lm source code self.version = version + if MegatronLayerPolicy._orig_layer_class is None: + try: + import megatron + from megatron.model.transformer import ParallelTransformerLayer + MegatronLayerPolicy._orig_layer_class = ParallelTransformerLayer + except ImportError: + MegatronLayerPolicy._orig_layer_class = None def get_hidden_heads(self): return self.client_module.attention.query_key_value.weight.data.shape[1], \ @@ -187,16 +193,17 @@ def layerNorm(self): class HFGPT2LayerPolicy(DSPolicy): - try: - import transformers - _orig_layer_class = transformers.models.gpt2.modeling_gpt2.GPT2Block - except: - _orig_layer_class = None + _orig_layer_class = None def __init__(self, client_module, inference=True): # HuggingFace GPT2 uses convolutional layer instead of linear layer super().__init__(inference, linear_layer=False) self.client_module = client_module + try: + import transformers + HFGPT2LayerPolicy._orig_layer_class = transformers.models.gpt2.modeling_gpt2.GPT2Block + except ImportError: + HFGPT2LayerPolicy._orig_layer_class = None def get_hidden_heads(self): return self.client_module.attn.embed_dim, \ diff --git a/requirements/requirements-dev.txt b/requirements/requirements-dev.txt index caedac15d709..1cf960afcfab 100644 --- a/requirements/requirements-dev.txt +++ b/requirements/requirements-dev.txt @@ -6,3 +6,5 @@ clang-format sphinx recommonmark sphinx-rtd-theme +megatron-lm==1.1.5 +importlib-metadata>=4
- [x] Add requirement for dev on importlib-metadata > 4, we were seeing pytest issues with importlib-metadata 2.0 - [x] Delay import of static class variables, this was causing our unit test framework to crash since importing megatron seems to initialize cuda which causes us python/pytorch multi-process issues :( - [x] fix incorrect import path for ParallelTransformerLayer
https://api.github.com/repos/microsoft/DeepSpeed/pulls/1100
2021-05-24T22:06:01Z
2021-05-24T23:43:37Z
2021-05-24T23:43:37Z
2021-05-24T23:43:39Z
1,231
microsoft/DeepSpeed
10,734
fix appveyor
diff --git a/Pipfile b/Pipfile index 51e8e8469f..3e0fd729eb 100644 --- a/Pipfile +++ b/Pipfile @@ -1,24 +1,24 @@ [[source]] url = "https://pypi.org/simple/" verify_ssl = true +name = "pypi" [dev-packages] - pytest = ">=2.8.0" codecov = "*" -"pytest-httpbin" = "==0.0.7" -"pytest-mock" = "*" -"pytest-cov" = "*" -"pytest-xdist" = "*" +pytest-httpbin = ">=0.0.7" +pytest-mock = "*" +pytest-cov = "*" +pytest-xdist = "*" alabaster = "*" -"readme-renderer" = "*" +readme-renderer = "*" sphinx = "<=1.5.5" pysocks = "*" docutils = "*" "flake8" = "*" tox = "*" detox = "*" -httpbin = "==0.5.0" +httpbin = ">=0.7.0" [packages] -"e1839a8" = {path = ".", editable = true, extras=["socks"]} \ No newline at end of file +"e1839a8" = {path = ".", editable = true, extras = ["socks"]} diff --git a/appveyor.yml b/appveyor.yml index 8c84a9f3d5..d3f3ca19b7 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -43,6 +43,7 @@ install: # about it being out of date. - "python -m pip install --upgrade pip wheel" - "C:\\MinGW\\bin\\mingw32-make" + - "pipenv install -e .[socks] --skip-lock" test_script: - "C:\\MinGW\\bin\\mingw32-make coverage"
I don't know what's going on with Appveyor/Pipenv but it's no longer liking `-e .` installs out of the Pipfile. I went back to a version of Pipenv that we have successful builds for and it's still broken. I'd like to get builds fixed while we determine root cause.
https://api.github.com/repos/psf/requests/pulls/4647
2018-05-16T06:04:37Z
2018-05-16T13:14:38Z
2018-05-16T13:14:38Z
2021-09-03T00:10:58Z
444
psf/requests
32,629
Remove references to the `sel` object in shell.rst
diff --git a/docs/topics/shell.rst b/docs/topics/shell.rst index 5c1cfbd475f..8fce0cea18a 100644 --- a/docs/topics/shell.rst +++ b/docs/topics/shell.rst @@ -85,9 +85,6 @@ Those objects are: * ``response`` - a :class:`~scrapy.http.Response` object containing the last fetched page - * ``sel`` - a :class:`~scrapy.selector.Selector` object constructed - with the last response fetched - * ``settings`` - the current :ref:`Scrapy settings <topics-settings>` Example of shell session @@ -117,7 +114,6 @@ all start with the ``[s]`` prefix):: [s] item {} [s] request <GET http://scrapy.org> [s] response <200 http://scrapy.org> - [s] sel <Selector xpath=None data=u'<html>\n <head>\n <meta charset="utf-8'> [s] settings <scrapy.settings.Settings object at 0x2bfd650> [s] spider <Spider 'default' at 0x20c6f50> [s] Useful shortcuts: @@ -129,8 +125,8 @@ all start with the ``[s]`` prefix):: After that, we can start playing with the objects:: - >>> sel.xpath("//h2/text()").extract()[0] - u'Welcome to Scrapy' + >>> response.xpath("//h1/text()").extract()[0] + u'Meet Scrapy' >>> fetch("http://slashdot.org") [s] Available Scrapy objects: @@ -138,7 +134,6 @@ After that, we can start playing with the objects:: [s] item {} [s] request <GET http://slashdot.org> [s] response <200 http://slashdot.org> - [s] sel <Selector xpath=None data=u'<html lang="en">\n<head>\n\n\n\n\n<script id="'> [s] settings <scrapy.settings.Settings object at 0x2bfd650> [s] spider <Spider 'default' at 0x20c6f50> [s] Useful shortcuts: @@ -146,7 +141,7 @@ After that, we can start playing with the objects:: [s] fetch(req_or_url) Fetch request (or URL) and update local objects [s] view(response) View response in a browser - >>> sel.xpath('//title/text()').extract() + >>> response.xpath('//title/text()').extract() [u'Slashdot: News for nerds, stuff that matters'] >>> request = request.replace(method="POST") @@ -203,7 +198,7 @@ When you run the spider, you will get something similar to this:: Then, you can check if the extraction code is working:: - >>> sel.xpath('//h1[@class="fn"]') + >>> response.xpath('//h1[@class="fn"]') [] Nope, it doesn't. So you can open the response in your web browser and see if
The current documentation has references to the deprecated `sel` when interacting with the shell. I've removed them and replaced uses of `sel.xpath` with `response.xpath` instead.
https://api.github.com/repos/scrapy/scrapy/pulls/1154
2015-04-12T17:44:43Z
2015-04-13T00:55:36Z
2015-04-13T00:55:36Z
2015-04-13T00:55:36Z
730
scrapy/scrapy
34,761
Update common software development questions
diff --git a/topics/software_development/README.md b/topics/software_development/README.md index 022f8cd83..be91232b1 100644 --- a/topics/software_development/README.md +++ b/topics/software_development/README.md @@ -41,6 +41,8 @@ <details> <summary>What programming language do you prefer to use for DevOps related tasks? Why specifically this one?</summary><br><b> + +For example, Python. It's multipurpose, easy-to-learn, continuously-evolving, and open-source. And it's very popular today </b></details> <details> @@ -60,18 +62,30 @@ Statements are instructions executed by the interpreter like variable assignment <details> <summary>What is Object Oriented Programming? Why is it important?</summary><br><b> + +[educative.io](https://www.educative.io/blog/object-oriented-programming) "Object-Oriented Programming (OOP) is a programming paradigm in computer science that relies on the concept of classes and objects. It is used to structure a software program into simple, reusable pieces of code blueprints (usually called classes), which are used to create individual instances of objects." + +OOP is the mainstream paradigm today. Most of the big services are wrote with OOP </b></details> <details> <summary>Explain Composition</summary><br><b> -</b></details> -<details> -<summary>What is a compiler?</summary><br><b> +Composition - ability to build a complex object from other objects </b></details> <details> -<summary>What is an interpreter?</summary><br><b> +<summary>What is a compiler and interpreter?</summary><br><b> + +[bzfar.org](https://www.bzfar.org/publ/algorithms_programming/programming_languages/translators_compiler_vs_interpetator/42-1-0-50) + +Compiler: + +"A compiler is a translator used to convert high-level programming language to low-level programming language. It converts the whole program in one session and reports errors detected after the conversion. Compiler takes time to do its work as it translates high-level code to lower-level code all at once and then saves it to memory." + +Interpreter: + +"Just like a compiler, is a translator used to convert high-level programming language to low-level programming language. It converts the program line by line and reports errors detected at once, while doing the conversion. With this, it is easier to detect errors than in a compiler." </b></details> <details> @@ -84,35 +98,52 @@ SOLID design principles are about: SOLID is: -* Single Responsibility - A class should only have a single responsibility -* Open-Closed - An entity should be open for extension, but closed for modification. What this practically means is that you should extend functionality by adding a new code and not by modifying it. Your system should be separated into components so it can be easily extended without breaking everything. +* Single Responsibility - A class* should have one ~responsibility~ reason to change. It was edited by Robert Martin due to wrong understanding of principle +* Open-Closed - A class should be open for extension, but closed for modification. What this practically means is that you should extend functionality by adding a new code and not by modifying it. Your system should be separated into components so it can be easily extended without breaking everything * Liskov Substitution - Any derived class should be able to substitute the its parent without altering its corrections. Practically, every part of the code will get the expected result no matter which part is using it -* Interface segregation - A client should never depend on anything it doesn't uses +* Interface Segregation - A client should never depend on anything it doesn't uses. Big interfaces must be splitted to smaller interfaces if needed * Dependency Inversion - High level modules should depend on abstractions, not low level modules + +*there also can be module, component, entity, etc. Depends on project structure and programming language </b></details> <details> <summary>What is YAGNI? What is your opinion on it?</summary><br><b> + +YAGNI - You aren't gonna need it. You must add functionality that will be used. No need to add functionality that is not directly needed </b></details> <details> <summary>What is DRY? What is your opinion on it?</summary><br><b> + +DRY - Don't repeat yourself. Actually it means that you shouldn't duplicate logic and use functions/classes instead. But this must be done smartly and pay attention to the domain logic. Same code lines don't always mean duplication </b></details> <details> <summary>What are the four pillars of object oriented programming?</summary><br><b> + +* Abstraction - you don't need to know how this class implemented. You need to know what functionality does it provide (interface) and how to use it +* Encapsulation - keep fields for class purposes private (or protected) and provide public methods if needed. We must keep the data and code safe within the class itself +* Inheritance - gives ability to create class that shares some of attributes of existing classes +* Polymorphism - same methods in different contexts can do different things. Method overloading and overriding are some forms of polymorphism </b></details> <details> <summary>Explain recursion</summary><br><b> + +Recursion - process (or strategy), when function calls itself. It has recursive case and exit case. In recursive case we call function again, in exit case we finish function without calling it again. If we don't have exit case - function will work infinite, until memory overload or call stack limit </b></details> <details> -<summary>Explain Inversion of Control</summary><br><b> +<summary>Explain Inversion of Control (IoC)</summary><br><b> + +Inversion of Control - design principle, used to achieve loose coupling. You must use some abstraction layer to access some functionality (similar to SOLID Dependency Inversion) </b></details> <details> -<summary>Explain Dependency Injection</summary><br><b> +<summary>Explain Dependency Injection (DI)</summary><br><b> + +Dependency Injection - deisgn pattern, used with IoC. Our object fields (dependecies) must be configurated by external objects </b></details> <details> @@ -129,15 +160,29 @@ True <details> <summary>Explain big O notation</summary><br><b> + +[habr.com](https://habr.com/ru/post/559518/) "We can use Big O notation to compare and search different solutions to find which solution is best. The best solution is one that consumes less amount of time and space. Generally, time and space are two parameters that determine the efficiency of the algorithm. + + Big O Notation tells accurately how long an algorithm takes to run. It is a basic analysis of algorithm efficiency. It describes the execution time required. It depends on the size of input data that essentially passes in. Big O notation gives us algorithm complexity in terms of input size. For the large size of input data, the execution time will be slow as compared to the small size of input data. Big O notation is used to analyze space and time." </b></details> <details> <summary>What is "Duck Typing"?</summary><br><b> + +"When I see a bird that walks like a duck and swims like a duck and quacks like a duck, I call that bird a duck." + +This is direction in programming, where we are checking properties of object, but not it's type </b></details> <details> <summary>Explain string interpolation</summary><br><b> -</b></details> + +String interpolation - process of evaluating of string literal. For example (JS):</b> +```js +const messages = 5; +console.log(`You have ${messages} new messages`); // You have 5 new messages +``` +</details> ##### Common algorithms
https://api.github.com/repos/bregman-arie/devops-exercises/pulls/342
2023-01-31T12:20:43Z
2023-02-02T11:00:42Z
2023-02-02T11:00:42Z
2023-02-02T14:12:37Z
1,769
bregman-arie/devops-exercises
17,647
Add EUI-48 and EUI-64 (MAC address) highlighting
diff --git a/CHANGELOG.md b/CHANGELOG.md index 3a05e6811..0913a7906 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,7 +5,11 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). -## [8.0.1] - unreleased +## [8.1.0] - unreleased + +### Added + +- Added highlighting of EUI-48 and EUI-64 (MAC addresses) ### Changed diff --git a/rich/default_styles.py b/rich/default_styles.py index e320e3cad..ce1614373 100644 --- a/rich/default_styles.py +++ b/rich/default_styles.py @@ -61,6 +61,8 @@ "repr.comma": Style(bold=True), "repr.ipv4": Style(bold=True, color="bright_green"), "repr.ipv6": Style(bold=True, color="bright_green"), + "repr.eui48": Style(bold=True, color="bright_green"), + "repr.eui64": Style(bold=True, color="bright_green"), "repr.tag_start": Style(bold=True), "repr.tag_name": Style(color="bright_magenta", bold=True), "repr.tag_contents": Style(color="default"), diff --git a/rich/highlighter.py b/rich/highlighter.py index 9fe859556..02a606bdd 100644 --- a/rich/highlighter.py +++ b/rich/highlighter.py @@ -80,6 +80,12 @@ class ReprHighlighter(RegexHighlighter): r"(?P<path>\B(\/[\w\.\-\_\+]+)*\/)(?P<filename>[\w\.\-\_\+]*)?", r"(?P<ipv4>[0-9]{1,3}\.[0-9]{1,3}\.[0-gt9]{1,3}\.[0-9]{1,3})", r"(?P<ipv6>([A-Fa-f0-9]{1,4}::?){1,7}[A-Fa-f0-9]{1,4})", + r"(?P<eui48>([0-9A-Fa-f]{1,2}-){5}[0-9A-Fa-f]{1,2})", # EUI-48 6x2 hyphen + r"(?P<eui64>([0-9A-Fa-f]{1,2}-){7}[0-9A-Fa-f]{1,2})", # EUI-64 8x2 hyphen + r"(?P<eui48>([0-9A-Fa-f]{1,2}:){5}[0-9A-Fa-f]{1,2})", # EUI-48 6x2 colon + r"(?P<eui64>([0-9A-Fa-f]{1,2}:){7}[0-9A-Fa-f]{1,2})", # EUI-64 8x2 colon + r"(?P<eui48>([0-9A-Fa-f]{4}\.){2}[0-9A-Fa-f]{4})", # EUI-48 3x4 dot + r"(?P<eui64>([0-9A-Fa-f]{4}\.){3}[0-9A-Fa-f]{4})", # EUI-64 4x4 dot r"(?<!\\)(?P<str>b?\'\'\'.*?(?<!\\)\'\'\'|b?\'.*?(?<!\\)\'|b?\"\"\".*?(?<!\\)\"\"\"|b?\".*?(?<!\\)\")", r"(?P<url>https?:\/\/[0-9a-zA-Z\$\-\_\+\!`\(\)\,\.\?\/\;\:\&\=\%\#]*)", r"(?P<uuid>[a-fA-F0-9]{8}\-[a-fA-F0-9]{4}\-[a-fA-F0-9]{4}\-[a-fA-F0-9]{4}\-[a-fA-F0-9]{12})", diff --git a/tests/test_highlighter.py b/tests/test_highlighter.py index 5e3798e86..242452dba 100644 --- a/tests/test_highlighter.py +++ b/tests/test_highlighter.py @@ -1,8 +1,34 @@ +"""Tests for the higlighter classes.""" import pytest -from rich.highlighter import NullHighlighter + +from rich.highlighter import NullHighlighter, ReprHighlighter +from rich.text import Span, Text def test_wrong_type(): highlighter = NullHighlighter() with pytest.raises(TypeError): highlighter([]) + + +@pytest.mark.parametrize( + "style_name, test_str", + [ + ("repr.eui48", "01-23-45-67-89-AB"), # 6x2 hyphen + ("repr.eui64", "01-23-45-FF-FE-67-89-AB"), # 8x2 hyphen + ("repr.eui48", "01:23:45:67:89:AB"), # 6x2 colon + ("repr.eui64", "01:23:45:FF:FE:67:89:AB"), # 8x2 colon + ("repr.eui48", "0123.4567.89AB"), # 3x4 dot + ("repr.eui64", "0123.45FF.FE67.89AB"), # 4x4 dot + ("repr.eui48", "ed-ed-ed-ed-ed-ed"), # lowercase + ("repr.eui48", "ED-ED-ED-ED-ED-ED"), # uppercase + ("repr.eui48", "Ed-Ed-Ed-Ed-Ed-Ed"), # mixed case + ("repr.eui48", "0-00-1-01-2-02"), # dropped zero + ], +) +def test_highlight_regex(style_name: str, test_str: str): + """Tests for the regular expressions used in ReprHighlighter.""" + text = Text(test_str) + highlighter = ReprHighlighter() + highlighter.highlight(text) + assert text._spans[-1] == Span(0, len(test_str), style_name)
## Type of changes - [ ] Bug fix - [x] New feature - [ ] Documentation / docstrings - [ ] Tests - [ ] Other ## Checklist - [x] I've run the latest [black](https://github.com/psf/black) with default args on new code. - [x] I've updated CHANGELOG.md and CONTRIBUTORS.md where appropriate. - [x] I've added tests for new code. - [x] I accept that @willmcgugan may be pedantic in the code review. ## Description Fixes #279. Adds highlighting for IEEE EUI-48 and EUI-64 (mac addresses) accepting three formats: * Six (EUI-48) or eight (EUI-64) groups of two hex digits with hyphen separators: `01-23-45-67-89-AB` or `01-23-45-FF-FE-67-89-AB` * Six (EUI-48) or eight (EUI-64) groups of two hex digits with colon separators: `01:23:45:67:89:AB` or `01:23:45:FF:FE:67:89:AB` * Three (EUI-48) or four (EUI-64) groups of four hex digits with dot separators `0123.4567.89AB` or `0123.45FF.FE67.89AB` The first two forms are seemingly the most common, and are recommended in this IEEE document: https://standards.ieee.org/content/dam/ieee-standards/standards/web/documents/tutorials/eui.pdf I've found the third form scattered around places, although don't have an "authoritative" reference. The highlighting accepts case-insensitive hex digits (e.g. `aa`. `aA`, and `AA`), and unpadded hex groups (e.g. `00:01` and `0:1`).
https://api.github.com/repos/Textualize/rich/pulls/369
2020-10-08T19:52:37Z
2020-10-09T14:35:24Z
2020-10-09T14:35:24Z
2020-10-09T14:35:24Z
1,520
Textualize/rich
48,140
dialog display on squeeze (fixes #280)
diff --git a/docs/using.rst b/docs/using.rst index eb53fc54a9b..463b6524bc6 100644 --- a/docs/using.rst +++ b/docs/using.rst @@ -36,7 +36,6 @@ For squezze you will need to: (``sudo`` is not installed by default) before running the bootstrap script. - Use ``virtualenv --no-site-packages -p python`` instead of ``-p python2``. -- Use text mode ``sudo ./venv/bin/letsencrypt --text`` (`#280`_) .. _`#280`: https://github.com/letsencrypt/lets-encrypt-preview/issues/280 diff --git a/setup.py b/setup.py index c399179e44f..e25c914c426 100644 --- a/setup.py +++ b/setup.py @@ -38,7 +38,7 @@ def read_file(filename, encoding='utf8'): 'PyOpenSSL', 'pyrfc3339', 'python-augeas', - 'python2-pythondialog', + 'python2-pythondialog>=3.2.2rc1', # Debian squeeze support, cf. #280 'pytz', 'requests', 'werkzeug',
@jdkasten, @pavgup does this work for you?
https://api.github.com/repos/certbot/certbot/pulls/337
2015-04-05T07:59:39Z
2015-04-14T20:02:29Z
2015-04-14T20:02:29Z
2016-05-06T19:21:28Z
288
certbot/certbot
2,004
Adjust `--preview` documentation
diff --git a/src/black/__init__.py b/src/black/__init__.py index 6192f5c0f8..6a703e4504 100644 --- a/src/black/__init__.py +++ b/src/black/__init__.py @@ -258,7 +258,7 @@ def validate_regex( "--preview", is_flag=True, help=( - "Enable potentially disruptive style changes that will be added to Black's main" + "Enable potentially disruptive style changes that may be added to Black's main" " functionality in the next major release." ), )
We may not adopt all preview changes immediately.
https://api.github.com/repos/psf/black/pulls/2833
2022-01-30T19:27:46Z
2022-01-30T19:53:46Z
2022-01-30T19:53:46Z
2022-01-30T19:54:20Z
130
psf/black
23,938
Add webview docs and examples, Set webview as default
diff --git a/README.md b/README.md index 451ec57de3..c07a1d4b22 100644 --- a/README.md +++ b/README.md @@ -41,7 +41,7 @@ As per the survey, here is a list of improvements to come - [ ] 🚧 Improve Documentation (in /docs & Guides, Howtos, & Do video tutorials) - [x] Improve the provider status list & updates - [ ] Tutorials on how to reverse sites to write your own wrapper (PoC only ofc) -- [ ] Improve the Bing wrapper. (might write a new wrapper in golang as it is very fast) +- [x] Improve the Bing wrapper. (Wait and Retry or reuse conversation) - [ ] Write a standard provider performance test to improve the stability - [ ] Potential support and development of local models - [ ] 🚧 Improve compatibility and error handling @@ -170,7 +170,33 @@ image_url = response.data[0].url - New Client API like the OpenAI Python library: [/docs/client](/docs/client.md) - Legacy API with python modules: [/docs/legacy](/docs/legacy.md) -#### Web UI +### Webview GUI + +Open the GUI in a window of your OS. Runs on a local/static/ssl server with a js api. Supports login into the OpenAI Chat, Image Upload and streamed Text Generation. + +Supports all platforms, but only Linux tested. + +1. Install all requirements with: + +```bash +pip install g4f[webview] +``` + +2. Follow the OS specific steps here: + [pywebview installation](https://pywebview.flowrl.com/guide/installation.html#dependencies) + +3. Run the app with: + +```python +from g4f.gui.webview import run_webview +run_webview(debug=True) +``` +or execute the following command: +```bash +python -m g4f.gui.webview -debug +``` + +#### Webserver To start the web interface, type the following codes in python: @@ -237,7 +263,7 @@ set G4F_PROXY=http://host:port | [bing.com](https://bing.com/chat) | `g4f.Provider.Bing` | ❌ | ✔️ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ | | [chatgpt.ai](https://chatgpt.ai) | `g4f.Provider.ChatgptAi` | ❌ | ✔️ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ | | [liaobots.site](https://liaobots.site) | `g4f.Provider.Liaobots` | ✔️ | ✔️ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ | -| [chat.openai.com](https://chat.openai.com) | `g4f.Provider.OpenaiChat` | ✔️ | ✔️ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ✔️ | +| [chat.openai.com](https://chat.openai.com) | `g4f.Provider.OpenaiChat` | ✔️ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ✔️ | | [raycast.com](https://raycast.com) | `g4f.Provider.Raycast` | ✔️ | ✔️ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ✔️ | | [beta.theb.ai](https://beta.theb.ai) | `g4f.Provider.Theb` | ✔️ | ✔️ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ | | [you.com](https://you.com) | `g4f.Provider.You` | ✔️ | ✔️ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ | diff --git a/g4f/Provider/Bing.py b/g4f/Provider/Bing.py index a1d14d8785..f8b06dd1f8 100644 --- a/g4f/Provider/Bing.py +++ b/g4f/Provider/Bing.py @@ -414,7 +414,7 @@ async def stream_generate( image_request = await upload_image( session, image, - "Balanced" if Tones.copilot == "Copilot" else tone, + "Balanced" if tone == Tones.copilot else tone, headers ) if image else None async with session.ws_connect( diff --git a/g4f/gui/client/static/js/chat.v1.js b/g4f/gui/client/static/js/chat.v1.js index bcef4a78a5..f9bc456852 100644 --- a/g4f/gui/client/static/js/chat.v1.js +++ b/g4f/gui/client/static/js/chat.v1.js @@ -240,26 +240,26 @@ async function add_message_chunk(message) { } } -cameraInput?.addEventListener("click", (e) => { - if (window?.pywebview) { - e.preventDefault(); - pywebview.api.choose_file(); - } -}) +// fileInput?.addEventListener("click", (e) => { +// if (window?.pywebview) { +// e.preventDefault(); +// pywebview.api.choose_file(); +// } +// }); cameraInput?.addEventListener("click", (e) => { if (window?.pywebview) { e.preventDefault(); pywebview.api.take_picture(); } -}) +}); imageInput?.addEventListener("click", (e) => { if (window?.pywebview) { e.preventDefault(); pywebview.api.choose_image(); } -}) +}); const ask_gpt = async () => { regenerate.classList.add(`regenerate-hidden`); diff --git a/g4f/gui/server/api.py b/g4f/gui/server/api.py index 3adb88f433..e7683812fb 100644 --- a/g4f/gui/server/api.py +++ b/g4f/gui/server/api.py @@ -19,12 +19,12 @@ filters=[["Image", "*.jpg", "*.jpeg", "*.png", "*.webp", "*.svg"]], ) has_plyer = True -except (ImportError, NameError): +except ImportError: has_plyer = False try: from android.runnable import run_on_ui_thread - from android.storage import app_storage_path - from android.permissions import request_permissions, Permission + import android.permissions + from android.permissions import Permission from android.permissions import _RequestPermissionsManager _RequestPermissionsManager.register_callback() from .android_gallery import user_select_image @@ -161,7 +161,7 @@ def set_selected(self, input_id: str = None): def request_permissions(self): if has_android: - request_permissions([ + android.permissions.request_permissions([ Permission.CAMERA, Permission.READ_EXTERNAL_STORAGE, Permission.WRITE_EXTERNAL_STORAGE diff --git a/g4f/gui/webview.py b/g4f/gui/webview.py index 36ad0e6002..b015dbed94 100644 --- a/g4f/gui/webview.py +++ b/g4f/gui/webview.py @@ -16,6 +16,7 @@ def run_webview( debug: bool = False, + ssl: bool = True, storage_path: str = None ): if getattr(sys, 'frozen', False): @@ -36,7 +37,7 @@ def run_webview( private_mode=False, storage_path=storage_path, debug=debug, - ssl=True + ssl=ssl ) if __name__ == "__main__": diff --git a/requirements.txt b/requirements.txt index def8c7e35d..671b23945d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -14,11 +14,9 @@ platformdirs fastapi uvicorn flask -py-arkose-generator -undetected-chromedriver>=3.5.5 brotli beautifulsoup4 -setuptools aiohttp_socks -selenium-wire -gpt4all \ No newline at end of file +gpt4all +pywebview +plyer \ No newline at end of file diff --git a/setup.py b/setup.py index b6c106c4ed..fa997b5060 100644 --- a/setup.py +++ b/setup.py @@ -18,23 +18,24 @@ 'all': [ "curl_cffi>=0.6.2", "certifi", - "async-property", # openai - "py-arkose-generator", # openai + #"py-arkose-generator", # not working "browser_cookie3", # get_cookies "PyExecJS", # GptForLove "duckduckgo-search>=5.0" ,# internet.search "beautifulsoup4", # internet.search and bing.create_images "brotli", # openai - "platformdirs", # webdriver - "undetected-chromedriver>=3.5.5", # webdriver - "setuptools", # webdriver + #"undetected-chromedriver>=3.5.5", # webdriver + #"setuptools", # webdriver + "pywebview", + "platformdirs", + "plyer", "aiohttp_socks", # proxy "pillow", # image "cairosvg", # svg image "werkzeug", "flask", # gui "loguru", "fastapi", "uvicorn", "nest_asyncio", # api - "selenium-wire" + #"selenium-wire" ], "image": [ "pillow", @@ -47,6 +48,11 @@ "setuptools", "selenium-wire" ], + "webview": [ + "webview", + "platformdirs", + "plyer" + ], "openai": [ "async-property", "py-arkose-generator",
https://api.github.com/repos/xtekky/gpt4free/pulls/1742
2024-03-22T11:49:17Z
2024-03-22T12:01:48Z
2024-03-22T12:01:48Z
2024-03-22T12:01:55Z
2,335
xtekky/gpt4free
38,028
🔥 Removed support for Python 3.6
diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 88c791d5a..dd94314b9 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -21,8 +21,8 @@ jobs: python: ["3.7", "3.8", "3.9", "3.10", "3.11"] runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 - - uses: actions/setup-python@v3 + - uses: actions/checkout@v3 + - uses: actions/setup-python@v4 with: python-version: ${{ matrix.python }} - name: Setup Graphviz diff --git a/README.md b/README.md index 0b15594d4..73511f7a6 100644 --- a/README.md +++ b/README.md @@ -41,7 +41,7 @@ Diagrams lets you draw the cloud system architecture **in Python code**. It was ## Getting Started -It requires **Python 3.6** or higher, check your Python version first. +It requires **Python 3.7** or higher, check your Python version first. It uses [Graphviz](https://www.graphviz.org/) to render the diagram, so you need to [install Graphviz](https://graphviz.gitlab.io/download/) to use **diagrams**. After installing graphviz (or already have it), install the **diagrams**. diff --git a/pyproject.toml b/pyproject.toml index 05eb30082..93705dcd0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -13,7 +13,6 @@ include = ["resources/**/*"] python = "^3.7" graphviz = ">=0.13.2,<0.21.0" jinja2 = ">=2.10,<4.0" -contextvars = { version = "^2.4", python = "~3.6" } typed-ast = "^1.5.4" [tool.poetry.dev-dependencies]
Related #855
https://api.github.com/repos/mingrammer/diagrams/pulls/856
2023-02-22T10:09:03Z
2023-10-30T14:07:32Z
2023-10-30T14:07:32Z
2023-10-30T14:22:16Z
487
mingrammer/diagrams
52,678
Update sites.md
diff --git a/sites.md b/sites.md index 612c18ab6..f546b6940 100644 --- a/sites.md +++ b/sites.md @@ -1,4 +1,4 @@ -## List Of Supported Sites (304 Sites In Total!) +## List Of Supported Sites (305 Sites In Total!) 1. [2Dimensions](https://2Dimensions.com/) 2. [3dnews](http://forum.3dnews.ru/) 3. [4pda](https://4pda.ru/)
https://api.github.com/repos/sherlock-project/sherlock/pulls/742
2020-09-01T07:50:42Z
2020-09-01T11:58:40Z
2020-09-01T11:58:40Z
2020-09-01T11:58:40Z
117
sherlock-project/sherlock
36,313
moved Neon from C++ to Python
diff --git a/README.md b/README.md index 47fd2521..a7c83d59 100644 --- a/README.md +++ b/README.md @@ -127,7 +127,6 @@ For a list of free machine learning books available for download, go [here](http * [Stan](http://mc-stan.org/) - A probabilistic programming language implementing full Bayesian statistical inference with Hamiltonian Monte Carlo sampling * [BanditLib](https://github.com/jkomiyama/banditlib) - A simple Multi-armed Bandit library. * [Timbl](http://ilk.uvt.nl/timbl) - A software package/C++ library implementing several memory-based learning algorithms, among which IB1-IG, an implementation of k-nearest neighbor classification, and IGTree, a decision-tree approximation of IB1-IG. Commonly used for NLP. -* [neon](https://github.com/NervanaSystems/neon) - Nervana's [high-performance](https://github.com/soumith/convnet-benchmarks) Python-based Deep Learning framework [DEEP LEARNING] <a name="cpp-nlp" /> #### Natural Language Processing @@ -643,6 +642,7 @@ on MNIST digits[DEEP LEARNING] * [deap](https://github.com/deap/deap) - Evolutionary algorithm framework. * [pydeep](https://github.com/andersbll/deeppy) - Deep Learning In Python * [mlxtend](https://github.com/rasbt/mlxtend) - A library consisting of useful tools for data science and machine learning tasks. +* [neon](https://github.com/NervanaSystems/neon) - Nervana's [high-performance](https://github.com/soumith/convnet-benchmarks) Python-based Deep Learning framework [DEEP LEARNING] <a name="python-data-analysis" /> #### Data Analysis / Data Visualization
https://api.github.com/repos/josephmisiti/awesome-machine-learning/pulls/153
2015-06-04T00:33:15Z
2015-06-04T01:06:23Z
2015-06-04T01:06:23Z
2015-06-04T01:06:26Z
422
josephmisiti/awesome-machine-learning
52,079
PostgreSQL integration test suite: really generate locales on Debian
diff --git a/test/integration/targets/setup_postgresql_db/tasks/main.yml b/test/integration/targets/setup_postgresql_db/tasks/main.yml index 10f26c11aab29b..c811f1dd6c87b1 100644 --- a/test/integration/targets/setup_postgresql_db/tasks/main.yml +++ b/test/integration/targets/setup_postgresql_db/tasks/main.yml @@ -95,12 +95,13 @@ group: "{{ pg_group }}" mode: "0644" -- name: Generate pt_BR locale (Debian) - command: locale-gen pt_BR - when: ansible_os_family == 'Debian' - -- name: Generate es_ES locale (Debian) - command: locale-gen es_ES +- name: Generate locales (Debian) + locale_gen: + name: '{{ item }}' + state: present + with_items: + - pt_BR + - es_ES when: ansible_os_family == 'Debian' - name: install i18ndata
##### SUMMARY With Debian, PostgreSQL integration test suite doesn't correctly setup test environment: required locales aren't generated. `/usr/sbin/locale-gen` behaves differently on Debian and Ubuntu. This pull-request propose to use the `locale-gen` ansible module instead of the `locale-gen` command. ##### ISSUE TYPE <!--- Pick one below and delete the rest: --> - Bugfix Pull Request ##### COMPONENT NAME PostgreSQL integration test suite. ##### ANSIBLE VERSION ``` ansible 2.4.0 (devel c2370f14dd) ``` ##### ADDITIONAL INFORMATION Tested using: ``` ANSIBLE_REMOTE_USER=root ANSIBLE_ROLES_PATH=test/integration/targets ansible-playbook -i 'testhost,' -e ansible_host=$HOST_IP --tags=test_postgresql,test_postgresql_db,test_postgresql_privs,test_postgresql_user,needs_privileged test/integration/destructive.yml ```
https://api.github.com/repos/ansible/ansible/pulls/23613
2017-04-14T15:48:01Z
2017-04-28T01:10:21Z
2017-04-28T01:10:21Z
2019-04-26T21:09:03Z
229
ansible/ansible
49,224
[extractor/mx3] Add extractor
diff --git a/yt_dlp/extractor/_extractors.py b/yt_dlp/extractor/_extractors.py index 62103f13c14..c0f6b32b697 100644 --- a/yt_dlp/extractor/_extractors.py +++ b/yt_dlp/extractor/_extractors.py @@ -1124,6 +1124,11 @@ MusicdexArtistIE, MusicdexPlaylistIE, ) +from .mx3 import ( + Mx3IE, + Mx3NeoIE, + Mx3VolksmusikIE, +) from .mxplayer import ( MxplayerIE, MxplayerShowIE, diff --git a/yt_dlp/extractor/mx3.py b/yt_dlp/extractor/mx3.py new file mode 100644 index 00000000000..cb9f50e0cfe --- /dev/null +++ b/yt_dlp/extractor/mx3.py @@ -0,0 +1,171 @@ +import re + +from .common import InfoExtractor +from ..networking import HEADRequest +from ..utils import ( + get_element_by_class, + int_or_none, + try_call, + url_or_none, + urlhandle_detect_ext, +) +from ..utils.traversal import traverse_obj + + +class Mx3BaseIE(InfoExtractor): + _VALID_URL_TMPL = r'https?://(?:www\.)?%s/t/(?P<id>\w+)' + _FORMATS = [{ + 'url': 'player_asset', + 'format_id': 'default', + 'quality': 0, + }, { + 'url': 'player_asset?quality=hd', + 'format_id': 'hd', + 'quality': 1, + }, { + 'url': 'download', + 'format_id': 'download', + 'quality': 2, + }, { + 'url': 'player_asset?quality=source', + 'format_id': 'source', + 'quality': 2, + }] + + def _extract_formats(self, track_id): + formats = [] + for fmt in self._FORMATS: + format_url = f'https://{self._DOMAIN}/tracks/{track_id}/{fmt["url"]}' + urlh = self._request_webpage( + HEADRequest(format_url), track_id, fatal=False, expected_status=404, + note=f'Checking for format {fmt["format_id"]}') + if urlh and urlh.status == 200: + formats.append({ + **fmt, + 'url': format_url, + 'ext': urlhandle_detect_ext(urlh), + 'filesize': int_or_none(urlh.headers.get('Content-Length')), + }) + return formats + + def _real_extract(self, url): + track_id = self._match_id(url) + webpage = self._download_webpage(url, track_id) + more_info = get_element_by_class('single-more-info', webpage) + data = self._download_json(f'https://{self._DOMAIN}/t/{track_id}.json', track_id, fatal=False) + + def get_info_field(name): + return self._html_search_regex( + rf'<dt[^>]*>\s*{name}\s*</dt>\s*<dd[^>]*>(.*?)</dd>', + more_info, name, default=None, flags=re.DOTALL) + + return { + 'id': track_id, + 'formats': self._extract_formats(track_id), + 'genre': self._html_search_regex( + r'<div\b[^>]+class="single-band-genre"[^>]*>([^<]+)</div>', webpage, 'genre', default=None), + 'release_year': int_or_none(get_info_field('Year of creation')), + 'description': get_info_field('Description'), + 'tags': try_call(lambda: get_info_field('Tag').split(', '), list), + **traverse_obj(data, { + 'title': ('title', {str}), + 'artist': (('performer_name', 'artist'), {str}), + 'album_artist': ('artist', {str}), + 'composer': ('composer_name', {str}), + 'thumbnail': (('picture_url_xlarge', 'picture_url'), {url_or_none}), + }, get_all=False), + } + + +class Mx3IE(Mx3BaseIE): + _DOMAIN = 'mx3.ch' + _VALID_URL = Mx3BaseIE._VALID_URL_TMPL % re.escape(_DOMAIN) + _TESTS = [{ + 'url': 'https://mx3.ch/t/1Cru', + 'md5': '7ba09e9826b4447d4e1ce9d69e0e295f', + 'info_dict': { + 'id': '1Cru', + 'ext': 'wav', + 'artist': 'Godina', + 'album_artist': 'Tortue Tortue', + 'composer': 'Olivier Godinat', + 'genre': 'Rock', + 'thumbnail': 'https://mx3.ch/pictures/mx3/file/0101/4643/square_xlarge/1-s-envoler-1.jpg?1630272813', + 'title': "S'envoler", + 'release_year': 2021, + 'tags': [], + } + }, { + 'url': 'https://mx3.ch/t/1LIY', + 'md5': '48293cb908342547827f963a5a2e9118', + 'info_dict': { + 'id': '1LIY', + 'ext': 'mov', + 'artist': 'Tania Kimfumu', + 'album_artist': 'The Broots', + 'composer': 'Emmanuel Diserens', + 'genre': 'Electro', + 'thumbnail': 'https://mx3.ch/pictures/mx3/file/0110/0003/video_xlarge/frame_0000.png?1686963670', + 'title': 'The Broots-Larytta remix "Begging For Help"', + 'release_year': 2023, + 'tags': ['the broots', 'cassata records', 'larytta'], + 'description': '"Begging for Help" Larytta Remix Official Video\nRealized By Kali Donkilie in 2023', + } + }, { + 'url': 'https://mx3.ch/t/1C6E', + 'md5': '1afcd578493ddb8e5008e94bb6d97e25', + 'info_dict': { + 'id': '1C6E', + 'ext': 'wav', + 'artist': 'Alien Bubblegum', + 'album_artist': 'Alien Bubblegum', + 'composer': 'Alien Bubblegum', + 'genre': 'Punk', + 'thumbnail': 'https://mx3.ch/pictures/mx3/file/0101/1551/square_xlarge/pandora-s-box-cover-with-title.png?1627054733', + 'title': 'Wide Awake', + 'release_year': 2021, + 'tags': ['alien bubblegum', 'bubblegum', 'alien', 'pop punk', 'poppunk'], + } + }] + + +class Mx3NeoIE(Mx3BaseIE): + _DOMAIN = 'neo.mx3.ch' + _VALID_URL = Mx3BaseIE._VALID_URL_TMPL % re.escape(_DOMAIN) + _TESTS = [{ + 'url': 'https://neo.mx3.ch/t/1hpd', + 'md5': '6d9986bbae5cac3296ec8813bf965eb2', + 'info_dict': { + 'id': '1hpd', + 'ext': 'wav', + 'artist': 'Baptiste Lopez', + 'album_artist': 'Kammerorchester Basel', + 'composer': 'Jannik Giger', + 'genre': 'Composition, Orchestra', + 'title': 'Troisième œil. Für Kammerorchester (2023)', + 'thumbnail': 'https://neo.mx3.ch/pictures/neo/file/0000/0241/square_xlarge/kammerorchester-basel-group-photo-2_c_-lukasz-rajchert.jpg?1560341252', + 'release_year': 2023, + 'tags': [], + } + }] + + +class Mx3VolksmusikIE(Mx3BaseIE): + _DOMAIN = 'volksmusik.mx3.ch' + _VALID_URL = Mx3BaseIE._VALID_URL_TMPL % re.escape(_DOMAIN) + _TESTS = [{ + 'url': 'https://volksmusik.mx3.ch/t/Zx', + 'md5': 'dd967a7b0c1ef898f3e072cf9c2eae3c', + 'info_dict': { + 'id': 'Zx', + 'ext': 'mp3', + 'artist': 'Ländlerkapelle GrischArt', + 'album_artist': 'Ländlerkapelle GrischArt', + 'composer': 'Urs Glauser', + 'genre': 'Instrumental, Graubünden', + 'title': 'Chämilouf', + 'thumbnail': 'https://volksmusik.mx3.ch/pictures/vxm/file/0000/3815/square_xlarge/grischart1.jpg?1450530120', + 'release_year': 2012, + 'tags': [], + } + }]
**IMPORTANT**: PRs without the template will be CLOSED ### Description of your *pull request* and other information <!-- Explanation of your *pull request* in arbitrary form goes here. Please **make sure the description explains the purpose and effect** of your *pull request* and is worded well enough to be understood. Provide as much **context and examples** as possible --> Add a simple, basic extractor for mx3.ch. (mx3.ch is a site that hosts music uploaded by bands from or in Switzerland. As a first approximation I'd say it's government funded.) <details open><summary>Template</summary> <!-- OPEN is intentional --> <!-- # PLEASE FOLLOW THE GUIDE BELOW - You will be asked some questions, please read them **carefully** and answer honestly - Put an `x` into all the boxes `[ ]` relevant to your *pull request* (like [x]) - Use *Preview* tab to see how your *pull request* will actually look like --> ### Before submitting a *pull request* make sure you have: - [x] At least skimmed through [contributing guidelines](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#developer-instructions) including [yt-dlp coding conventions](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#yt-dlp-coding-conventions) - [x] [Searched](https://github.com/yt-dlp/yt-dlp/search?q=is%3Apr&type=Issues) the bugtracker for similar pull requests - [x] Checked the code with [flake8](https://pypi.python.org/pypi/flake8) and [ran relevant tests](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#developer-instructions) ### In order to be accepted and merged into yt-dlp each piece of code must be in public domain or released under [Unlicense](http://unlicense.org/). Check all of the following options that apply: - [x] I am the original author of this code and I am willing to release it under [Unlicense](http://unlicense.org/) - [ ] I am not the original author of this code but it is in public domain or released under [Unlicense](http://unlicense.org/) (provide reliable evidence) ### What is the purpose of your *pull request*? - [ ] Fix or improvement to an extractor (Make sure to add/update tests) - [x] New extractor ([Piracy websites will not be accepted](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#is-the-website-primarily-used-for-piracy)) - [ ] Core bug fix/improvement - [ ] New feature (It is strongly [recommended to open an issue first](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#adding-new-feature-or-making-overarching-changes)) </details>
https://api.github.com/repos/yt-dlp/yt-dlp/pulls/8736
2023-12-09T17:30:22Z
2024-01-21T02:45:38Z
2024-01-21T02:45:38Z
2024-01-21T02:45:38Z
2,219
yt-dlp/yt-dlp
7,702
A third-party environment named RiverSwim
diff --git a/docs/environments.md b/docs/environments.md index 7451b704ee1..30bf8c905b2 100644 --- a/docs/environments.md +++ b/docs/environments.md @@ -336,6 +336,12 @@ An environment for automated rule-based deductive program verification in the Ke Learn more here: https://github.com/Flunzmas/gym-autokey +### gym-riverswim: A hard-exploration environment + +A simple environment for benchmarking reinforcement learning exploration techniques in a simplified setting. + +Learn more here: https://github.com/erfanMhi/gym-riverswim + ### gym-ccc: Continuous classic control environments Environments that extend gym's classic control and add more problems.
RiverSwim which is a simple hard exploration environment has been added to the list of third-party environments.
https://api.github.com/repos/openai/gym/pulls/2086
2020-11-04T12:38:45Z
2021-07-26T19:51:08Z
2021-07-26T19:51:08Z
2021-07-26T19:51:08Z
168
openai/gym
5,883
Properly fix dagrun update state endpoint
diff --git a/airflow/api/common/experimental/mark_tasks.py b/airflow/api/common/experimental/mark_tasks.py index 945a9cc4a3596..28e733dd96a89 100644 --- a/airflow/api/common/experimental/mark_tasks.py +++ b/airflow/api/common/experimental/mark_tasks.py @@ -131,7 +131,6 @@ def set_state( if sub_dag_run_ids: qry_sub_dag = all_subdag_tasks_query(sub_dag_run_ids, session, state, confirmed_dates) tis_altered += qry_sub_dag.all() - return tis_altered diff --git a/airflow/api_connexion/endpoints/dag_run_endpoint.py b/airflow/api_connexion/endpoints/dag_run_endpoint.py index 93278049ba342..edd4f787fe9bf 100644 --- a/airflow/api_connexion/endpoints/dag_run_endpoint.py +++ b/airflow/api_connexion/endpoints/dag_run_endpoint.py @@ -21,6 +21,10 @@ from sqlalchemy import or_ from airflow._vendor.connexion import NoContent +from airflow.api.common.experimental.mark_tasks import ( + set_dag_run_state_to_failed, + set_dag_run_state_to_success, +) from airflow.api_connexion import security from airflow.api_connexion.exceptions import AlreadyExists, BadRequest, NotFound from airflow.api_connexion.parameters import apply_sorting, check_limit, format_datetime, format_parameters @@ -34,7 +38,7 @@ from airflow.models import DagModel, DagRun from airflow.security import permissions from airflow.utils.session import provide_session -from airflow.utils.state import DagRunState, State +from airflow.utils.state import State from airflow.utils.types import DagRunType @@ -302,6 +306,10 @@ def update_dag_run_state(dag_id: str, dag_run_id: str, session) -> dict: raise BadRequest(detail=str(err)) state = post_body['state'] - dag_run.set_state(state=DagRunState(state)) - session.merge(dag_run) + dag = current_app.dag_bag.get_dag(dag_id) + if state == State.SUCCESS: + set_dag_run_state_to_success(dag, dag_run.execution_date, commit=True) + else: + set_dag_run_state_to_failed(dag, dag_run.execution_date, commit=True) + dag_run = session.query(DagRun).get(dag_run.id) return dagrun_schema.dump(dag_run) diff --git a/tests/api_connexion/endpoints/test_dag_run_endpoint.py b/tests/api_connexion/endpoints/test_dag_run_endpoint.py index 69c06b79dc80d..54410b9d5a217 100644 --- a/tests/api_connexion/endpoints/test_dag_run_endpoint.py +++ b/tests/api_connexion/endpoints/test_dag_run_endpoint.py @@ -27,10 +27,11 @@ from airflow.security import permissions from airflow.utils import timezone from airflow.utils.session import create_session, provide_session +from airflow.utils.state import State from airflow.utils.types import DagRunType from tests.test_utils.api_connexion_utils import assert_401, create_user, delete_roles, delete_user from tests.test_utils.config import conf_vars -from tests.test_utils.db import clear_db_dags, clear_db_runs +from tests.test_utils.db import clear_db_dags, clear_db_runs, clear_db_serialized_dags @pytest.fixture(scope="module") @@ -103,11 +104,13 @@ def setup_attrs(self, configured_app) -> None: self.app = configured_app self.client = self.app.test_client() # type:ignore clear_db_runs() + clear_db_serialized_dags() clear_db_dags() def teardown_method(self) -> None: clear_db_runs() clear_db_dags() + clear_db_serialized_dags() def _create_dag(self, dag_id): dag_instance = DagModel(dag_id=dag_id) @@ -1175,15 +1178,20 @@ def test_should_raises_403_unauthorized(self, username): assert response.status_code == 403 -class TestPostSetDagRunState(TestDagRunEndpoint): +class TestPatchDagRunState(TestDagRunEndpoint): @pytest.mark.parametrize("state", ["failed", "success"]) - @freeze_time(TestDagRunEndpoint.default_time) - def test_should_respond_200(self, state, dag_maker): + def test_should_respond_200(self, state, dag_maker, session): dag_id = "TEST_DAG_ID" dag_run_id = 'TEST_DAG_RUN_ID' - with dag_maker(dag_id): - DummyOperator(task_id='task_id') - dag_maker.create_dagrun(run_id=dag_run_id) + with dag_maker(dag_id) as dag: + task = DummyOperator(task_id='task_id', dag=dag) + self.app.dag_bag.bag_dag(dag, root_dag=dag) + dr = dag_maker.create_dagrun(run_id=dag_run_id) + ti = dr.get_task_instance(task_id='task_id') + ti.task = task + ti.state = State.RUNNING + session.merge(ti) + session.commit() request_json = {"state": state} @@ -1193,16 +1201,19 @@ def test_should_respond_200(self, state, dag_maker): environ_overrides={"REMOTE_USER": "test"}, ) + ti.refresh_from_db() + assert ti.state == state + dr = session.query(DagRun).filter(DagRun.run_id == dr.run_id).first() assert response.status_code == 200 assert response.json == { 'conf': {}, 'dag_id': dag_id, 'dag_run_id': dag_run_id, - 'end_date': self.default_time, - 'execution_date': dag_maker.start_date.isoformat(), + 'end_date': dr.end_date.isoformat(), + 'execution_date': dr.execution_date.isoformat(), 'external_trigger': False, - 'logical_date': dag_maker.start_date.isoformat(), - 'start_date': dag_maker.start_date.isoformat(), + 'logical_date': dr.execution_date.isoformat(), + 'start_date': dr.start_date.isoformat(), 'state': state, }
The dagrun update state endpoint was recently added but is not working as expected. This PR fixes it to work exactly like the UI mark dagrun state API Closes: https://github.com/apache/airflow/issues/18363 --- **^ Add meaningful description above** Read the **[Pull Request Guidelines](https://github.com/apache/airflow/blob/main/CONTRIBUTING.rst#pull-request-guidelines)** for more information. In case of fundamental code change, Airflow Improvement Proposal ([AIP](https://cwiki.apache.org/confluence/display/AIRFLOW/Airflow+Improvements+Proposals)) is needed. In case of a new dependency, check compliance with the [ASF 3rd Party License Policy](https://www.apache.org/legal/resolved.html#category-x). In case of backwards incompatible changes please leave a note in [UPDATING.md](https://github.com/apache/airflow/blob/main/UPDATING.md).
https://api.github.com/repos/apache/airflow/pulls/18370
2021-09-20T09:45:49Z
2021-09-22T20:16:35Z
2021-09-22T20:16:35Z
2021-09-22T20:16:39Z
1,406
apache/airflow
14,569
bitFlyer: Add getboardstate endpoint
diff --git a/js/bitflyer.js b/js/bitflyer.js index af40744acc8b..4310528ce504 100644 --- a/js/bitflyer.js +++ b/js/bitflyer.js @@ -40,6 +40,7 @@ module.exports = class bitflyer extends Exchange { 'getticker', 'getexecutions', 'gethealth', + 'getboardstate', 'getchats', ], },
`getboardstate` allows to determine the current status of the board. It is described only in Japanese documentation. https://lightning.bitflyer.jp/docs?lang=ja#%E6%9D%BF%E3%81%AE%E7%8A%B6%E6%85%8B It can be used in the playground. https://lightning.bitflyer.jp/docs/playground?lang=en
https://api.github.com/repos/ccxt/ccxt/pulls/2443
2018-04-03T08:33:08Z
2018-04-03T08:41:47Z
2018-04-03T08:41:47Z
2018-04-03T08:41:47Z
103
ccxt/ccxt
13,847
`ChatPromptTemplate` is not an `ABC`, it's instantiated directly.
diff --git a/libs/langchain/langchain/prompts/chat.py b/libs/langchain/langchain/prompts/chat.py index b6cd9e0c036109..3c34b9fd974595 100644 --- a/libs/langchain/langchain/prompts/chat.py +++ b/libs/langchain/langchain/prompts/chat.py @@ -337,7 +337,7 @@ def format_messages(self, **kwargs: Any) -> List[BaseMessage]: ] -class ChatPromptTemplate(BaseChatPromptTemplate, ABC): +class ChatPromptTemplate(BaseChatPromptTemplate): """A prompt template for chat models. Use to create flexible templated prompts for chat models.
Its own `__add__` method constructs `ChatPromptTemplate` objects directly, it cannot be abstract. Found while debugging something else with @nfcampos.
https://api.github.com/repos/langchain-ai/langchain/pulls/9468
2023-08-18T18:32:33Z
2023-08-18T18:37:10Z
2023-08-18T18:37:10Z
2023-08-18T18:37:11Z
143
langchain-ai/langchain
43,656
Allow preparing of Requests from Session settings without sending.
diff --git a/requests/models.py b/requests/models.py index 2439153371..f2d8e5fd3a 100644 --- a/requests/models.py +++ b/requests/models.py @@ -217,19 +217,17 @@ def __repr__(self): def prepare(self): """Constructs a :class:`PreparedRequest <PreparedRequest>` for transmission and returns it.""" p = PreparedRequest() - - p.prepare_method(self.method) - p.prepare_url(self.url, self.params) - p.prepare_headers(self.headers) - p.prepare_cookies(self.cookies) - p.prepare_body(self.data, self.files) - p.prepare_auth(self.auth, self.url) - # Note that prepare_auth must be last to enable authentication schemes - # such as OAuth to work on a fully prepared request. - - # This MUST go after prepare_auth. Authenticators could add a hook - p.prepare_hooks(self.hooks) - + p.prepare( + method=self.method, + url=self.url, + headers=self.headers, + files=self.files, + data=self.data, + params=self.params, + auth=self.auth, + cookies=self.cookies, + hooks=self.hooks, + ) return p @@ -264,6 +262,22 @@ def __init__(self): #: dictionary of callback hooks, for internal usage. self.hooks = default_hooks() + def prepare(self, method=None, url=None, headers=None, files=None, + data=None, params=None, auth=None, cookies=None, hooks=None): + """Prepares the the entire request with the given parameters.""" + + self.prepare_method(method) + self.prepare_url(url, params) + self.prepare_headers(headers) + self.prepare_cookies(cookies) + self.prepare_body(data, files) + self.prepare_auth(auth, url) + # Note that prepare_auth must be last to enable authentication schemes + # such as OAuth to work on a fully prepared request. + + # This MUST go after prepare_auth. Authenticators could add a hook + self.prepare_hooks(hooks) + def __repr__(self): return '<PreparedRequest [%s]>' % (self.method) diff --git a/requests/sessions.py b/requests/sessions.py index 664465d84b..b87bd86416 100644 --- a/requests/sessions.py +++ b/requests/sessions.py @@ -228,6 +228,46 @@ def __enter__(self): def __exit__(self, *args): self.close() + def prepare_request(self, request): + """Constructs a :class:`PreparedRequest <PreparedRequest>` for + transmission and returns it. The :class:`PreparedRequest` has settings + merged from the :class:`Request <Request>` instance and those of the + :class:`Session`. + + :param request: :class:`Request` instance to prepare with this + session's settings. + """ + cookies = request.cookies or {} + + # Bootstrap CookieJar. + if not isinstance(cookies, cookielib.CookieJar): + cookies = cookiejar_from_dict(cookies) + + # Merge with session cookies + merged_cookies = RequestsCookieJar() + merged_cookies.update(self.cookies) + merged_cookies.update(cookies) + + + # Set environment's basic authentication if not explicitly set. + auth = request.auth + if self.trust_env and not auth and not self.auth: + auth = get_netrc_auth(request.url) + + p = PreparedRequest() + p.prepare( + method=request.method.upper(), + url=request.url, + files=request.files, + data=request.data, + headers=merge_setting(request.headers, self.headers, dict_class=CaseInsensitiveDict), + params=merge_setting(request.params, self.params), + auth=merge_setting(auth, self.auth), + cookies=merged_cookies, + hooks=merge_setting(request.hooks, self.hooks), + ) + return p + def request(self, method, url, params=None, data=None, @@ -271,20 +311,22 @@ def request(self, method, url, :param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair. """ + # Create the Request. + req = Request( + method = method.upper(), + url = url, + headers = headers, + files = files, + data = data or {}, + params = params or {}, + auth = auth, + cookies = cookies, + hooks = hooks, + ) + prep = self.prepare_request(req) - cookies = cookies or {} proxies = proxies or {} - # Bootstrap CookieJar. - if not isinstance(cookies, cookielib.CookieJar): - cookies = cookiejar_from_dict(cookies) - - # Merge with session cookies - merged_cookies = RequestsCookieJar() - merged_cookies.update(self.cookies) - merged_cookies.update(cookies) - cookies = merged_cookies - # Gather clues from the surrounding environment. if self.trust_env: # Set environment's proxies. @@ -292,10 +334,6 @@ def request(self, method, url, for (k, v) in env_proxies.items(): proxies.setdefault(k, v) - # Set environment's basic authentication if not explicitly set. - if not auth and not self.auth: - auth = get_netrc_auth(url) - # Look for configuration. if not verify and verify is not False: verify = os.environ.get('REQUESTS_CA_BUNDLE') @@ -305,30 +343,11 @@ def request(self, method, url, verify = os.environ.get('CURL_CA_BUNDLE') # Merge all the kwargs. - params = merge_setting(params, self.params) - headers = merge_setting(headers, self.headers, dict_class=CaseInsensitiveDict) - auth = merge_setting(auth, self.auth) proxies = merge_setting(proxies, self.proxies) - hooks = merge_setting(hooks, self.hooks) stream = merge_setting(stream, self.stream) verify = merge_setting(verify, self.verify) cert = merge_setting(cert, self.cert) - # Create the Request. - req = Request() - req.method = method.upper() - req.url = url - req.headers = headers - req.files = files - req.data = data - req.params = params - req.auth = auth - req.cookies = cookies - req.hooks = hooks - - # Prepare the Request. - prep = req.prepare() - # Send the request. send_kwargs = { 'stream': stream, @@ -422,7 +441,7 @@ def send(self, request, **kwargs): # It's possible that users might accidentally send a Request object. # Guard against that specific failure case. - if getattr(request, 'prepare', None): + if not isinstance(request, PreparedRequest): raise ValueError('You can only send PreparedRequests.') # Set up variables needed for resolve_redirects and dispatching of diff --git a/test_requests.py b/test_requests.py index 27d5e676e0..2fe8589829 100755 --- a/test_requests.py +++ b/test_requests.py @@ -458,6 +458,23 @@ def hook(resp, **kwargs): self.assertTrue(hasattr(resp, 'hook_working')) + def test_prepared_from_session(self): + class DummyAuth(requests.auth.AuthBase): + def __call__(self, r): + r.headers['Dummy-Auth-Test'] = 'dummy-auth-test-ok' + return r + + req = requests.Request('GET', httpbin('headers')) + self.assertEqual(req.auth, None) + + s = requests.Session() + s.auth = DummyAuth() + + prep = s.prepare_request(req) + resp = s.send(prep) + + self.assertTrue(resp.json()['headers']['Dummy-Auth-Test'], 'dummy-auth-test-ok') + def test_links(self): r = requests.Response() r.headers = {
Attempt to address #1445. All tests pass. Note that I think it could likely make sense to change `Session.update_request` to be an internal method, since `Session.prepare_request` is really the only public use I can think of having use in practice.
https://api.github.com/repos/psf/requests/pulls/1507
2013-07-31T04:51:20Z
2013-08-01T01:21:28Z
2013-08-01T01:21:28Z
2021-09-09T00:01:13Z
1,837
psf/requests
32,269
feat: generate_repo return project repo
diff --git a/metagpt/startup.py b/metagpt/startup.py index 000b3c5d4..4a077cab7 100644 --- a/metagpt/startup.py +++ b/metagpt/startup.py @@ -1,5 +1,6 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- + import asyncio import shutil from pathlib import Path @@ -9,6 +10,7 @@ from metagpt.config2 import config from metagpt.const import CONFIG_ROOT, METAGPT_ROOT from metagpt.context import Context +from metagpt.utils.project_repo import ProjectRepo app = typer.Typer(add_completion=False, pretty_exceptions_show_locals=False) @@ -26,7 +28,7 @@ def generate_repo( reqa_file, max_auto_summarize_code, recover_path, -): +) -> ProjectRepo: """Run the startup logic. Can be called from CLI or other Python scripts.""" from metagpt.roles import ( Architect, @@ -67,6 +69,8 @@ def generate_repo( company.run_project(idea) asyncio.run(company.run(n_round=n_round)) + return ctx.repo + @app.command("", help="Start a new project.") def startup(
**Features** - `generate_repo` returns `ProjectRepo` type.
https://api.github.com/repos/geekan/MetaGPT/pulls/803
2024-01-29T02:21:26Z
2024-01-30T02:05:27Z
2024-01-30T02:05:27Z
2024-03-19T03:43:47Z
283
geekan/MetaGPT
16,730
sillychange:)
diff --git a/PDF/images.py b/PDF/images.py index 5566344df0..de31ab8c31 100644 --- a/PDF/images.py +++ b/PDF/images.py @@ -8,7 +8,7 @@ # Example to Append all the images inside a folder to pdf pdf = FPDF() -# Size of a A4 Page in mm Where P is for Potrail and L is for Landscape +# Size of a A4 Page in mm Where P is for Potrait and L is for Landscape A4_SIZE = {'P': {'w': 210, 'h': 297}, 'L': {'w': 297, 'h': 210}} # pdf may produce empty page so we need to set auto page break as false pdf.set_auto_page_break(0)
https://api.github.com/repos/geekcomputers/Python/pulls/849
2020-09-30T21:00:52Z
2020-10-02T17:59:47Z
2020-10-02T17:59:47Z
2020-10-02T17:59:47Z
178
geekcomputers/Python
31,159
don't iDisplay if logging
diff --git a/letsencrypt-apache/letsencrypt_apache/configurator.py b/letsencrypt-apache/letsencrypt_apache/configurator.py index ff12055ac32..2a9fb025043 100644 --- a/letsencrypt-apache/letsencrypt_apache/configurator.py +++ b/letsencrypt-apache/letsencrypt_apache/configurator.py @@ -1302,6 +1302,7 @@ def restart(self): """ self.config_test() + logger.debug(self.reverter.view_config_changes(for_logging=True)) self._reload() def _reload(self): diff --git a/letsencrypt-apache/letsencrypt_apache/tls_sni_01.py b/letsencrypt-apache/letsencrypt_apache/tls_sni_01.py index 2049eb57485..ca7985f3563 100644 --- a/letsencrypt-apache/letsencrypt_apache/tls_sni_01.py +++ b/letsencrypt-apache/letsencrypt_apache/tls_sni_01.py @@ -1,12 +1,14 @@ """A class that performs TLS-SNI-01 challenges for Apache""" import os +import logging from letsencrypt.plugins import common from letsencrypt_apache import obj from letsencrypt_apache import parser +logger = logging.getLogger(__name__) class ApacheTlsSni01(common.TLSSNI01): """Class that performs TLS-SNI-01 challenges within the Apache configurator @@ -104,6 +106,7 @@ def _mod_config(self): self.configurator.reverter.register_file_creation( True, self.challenge_conf) + logger.debug("writing a config file with text: %s", config_text) with open(self.challenge_conf, "w") as new_conf: new_conf.write(config_text) diff --git a/letsencrypt/reverter.py b/letsencrypt/reverter.py index d5114ae7100..863074374ce 100644 --- a/letsencrypt/reverter.py +++ b/letsencrypt/reverter.py @@ -94,7 +94,7 @@ def rollback_checkpoints(self, rollback=1): "Unable to load checkpoint during rollback") rollback -= 1 - def view_config_changes(self): + def view_config_changes(self, for_logging=False): """Displays all saved checkpoints. All checkpoints are printed by @@ -144,6 +144,8 @@ def view_config_changes(self): output.append(os.linesep) + if for_logging: + return os.linesep.join(output) zope.component.getUtility(interfaces.IDisplay).notification( os.linesep.join(output), display_util.HEIGHT)
add a flag variable to view_config_changes which returns the changes as a string if we're logging it. I'm unsure how to test since the comments left in https://github.com/letsencrypt/letsencrypt/pull/2009 don't specify where this issue was arising
https://api.github.com/repos/certbot/certbot/pulls/2116
2016-01-08T10:46:53Z
2016-01-08T19:02:15Z
2016-01-08T19:02:15Z
2016-05-06T19:22:01Z
589
certbot/certbot
2,627
wazirx - fetchOHLCV
diff --git a/js/wazirx.js b/js/wazirx.js index aa771f68d915..8e2f96db9f35 100644 --- a/js/wazirx.js +++ b/js/wazirx.js @@ -42,7 +42,7 @@ module.exports = class wazirx extends Exchange { 'fetchMarkets': true, 'fetchMarkOHLCV': false, 'fetchMyTrades': false, - 'fetchOHLCV': false, + 'fetchOHLCV': true, 'fetchOpenInterestHistory': false, 'fetchOpenOrders': true, 'fetchOrder': true, @@ -85,6 +85,7 @@ module.exports = class wazirx extends Exchange { 'ticker/24hr': 1, 'time': 1, 'trades': 1, + 'klines': 1, }, }, 'private': { @@ -126,6 +127,18 @@ module.exports = class wazirx extends Exchange { '94001': InvalidOrder, // {"code":94001,"message":"Stop price not found."} }, }, + 'timeframes': { + '1m': '1m', + '5m': '5m', + '30m': '30m', + '1h': '1h', + '2h': '2h', + '4h': '4h', + '6h': '6h', + '12h': '12h', + '1d': '1d', + '1w': '1w', + }, 'options': { // 'fetchTradesMethod': 'privateGetHistoricalTrades', 'recvWindow': 10000, @@ -247,6 +260,60 @@ module.exports = class wazirx extends Exchange { return result; } + async fetchOHLCV (symbol, timeframe = '1m', since = undefined, limit = undefined, params = {}) { + /** + * @method + * @name wazirx#fetchOHLCV + * @description fetches historical candlestick data containing the open, high, low, and close price, and the volume of a market + * @param {string} symbol unified symbol of the market to fetch OHLCV data for + * @param {string} timeframe the length of time each candle represents. Available values [1m,5m,15m,30m,1h,2h,4h,6h,12h,1d,1w] + * @param {int|undefined} since timestamp in ms of the earliest candle to fetch + * @param {int|undefined} limit the maximum amount of candles to fetch + * @param {object} params extra parameters specific to the wazirx api endpoint + * @param {int|undefined} params.until timestamp in s of the latest candle to fetch + * @returns {[[int]]} A list of candles ordered as timestamp, open, high, low, close, volume + */ + await this.loadMarkets (); + const market = this.market (symbol); + const request = { + 'symbol': market['id'], + 'interval': this.timeframes[timeframe], + }; + if (limit !== undefined) { + request['limit'] = limit; + } + const until = this.safeInteger (params, 'until'); + params = this.omit (params, [ 'until' ]); + if (since !== undefined) { + request['startTime'] = parseInt (since / 1000); + } + if (until !== undefined) { + request['endTime'] = until; + } + const response = await this.publicGetKlines (this.extend (request, params)); + // + // [ + // [1669014360,1402001,1402001,1402001,1402001,0], + // ... + // ] + // + return this.parseOHLCVs (response, market, timeframe, since, limit); + } + + parseOHLCV (ohlcv, market = undefined) { + // + // [1669014300,1402001,1402001,1402001,1402001,0], + // + return [ + this.safeTimestamp (ohlcv, 0), + this.safeNumber (ohlcv, 1), + this.safeNumber (ohlcv, 2), + this.safeNumber (ohlcv, 3), + this.safeNumber (ohlcv, 4), + this.safeNumber (ohlcv, 5), + ]; + } + async fetchOrderBook (symbol, limit = undefined, params = {}) { /** * @method
https://api.github.com/repos/ccxt/ccxt/pulls/15778
2022-11-21T16:33:01Z
2022-11-21T20:51:44Z
2022-11-21T20:51:44Z
2022-11-21T20:51:56Z
1,093
ccxt/ccxt
13,462
[NFC] polish colossalai/builder/builder.py code style
diff --git a/colossalai/builder/builder.py b/colossalai/builder/builder.py index f4ccebfc7a2f..812ab78d7ab6 100644 --- a/colossalai/builder/builder.py +++ b/colossalai/builder/builder.py @@ -7,6 +7,7 @@ from colossalai.registry import * + def build_from_config(module, config: dict): """Returns an object of :class:`module` constructed from `config`. @@ -64,6 +65,7 @@ def build_from_registry(config, registry: Registry): return obj + def build_layer(config): """Returns a layer object of :class:`nn.Module` constructed from `config`. @@ -243,7 +245,6 @@ def build_lr_scheduler(config, optimizer): config_['optimizer'] = optimizer return build_from_registry(config_, LR_SCHEDULERS) - def build_schedule(config): """Returns a schedule of :class:`colossalai.engine.schedule.BaseSchedule`.
https://api.github.com/repos/hpcaitech/ColossalAI/pulls/662
2022-04-03T03:59:03Z
2022-04-03T03:59:57Z
2022-04-03T03:59:57Z
2022-04-03T03:59:57Z
230
hpcaitech/ColossalAI
11,185
Rename ignore_hash to allow_output_mutation
diff --git a/lib/streamlit/caching.py b/lib/streamlit/caching.py index 2c75c996ff11..23bf7147408f 100644 --- a/lib/streamlit/caching.py +++ b/lib/streamlit/caching.py @@ -235,7 +235,7 @@ def _build_caching_func_error_message(persisted, func, caller_frame): {copy_code} ``` - 2. Add `ignore_hash=True` to the `@streamlit.cache` decorator for + 2. Add `allow_output_mutation=True` to the `@streamlit.cache` decorator for `{name}`. This is an escape hatch for advanced users who really know what they're doing. @@ -282,7 +282,7 @@ def _build_caching_block_error_message(persisted, code, line_number_range): 1. *Preferred:* fix the code by removing the mutation. The simplest way to do this is to copy the cached value to a new variable, which you are allowed to mutate. - 2. Add `ignore_hash=True` to the constructor of `streamlit.Cache`. This + 2. Add `allow_output_mutation=True` to the constructor of `streamlit.Cache`. This is an escape hatch for advanced users who really know what they're doing. @@ -317,11 +317,11 @@ def _build_args_mutated_message(func): return message.format(name=func.__name__) -def _read_from_mem_cache(key, ignore_hash): +def _read_from_mem_cache(key, allow_output_mutation): if key in _mem_cache: entry = _mem_cache[key] - if ignore_hash or get_hash(entry.value) == entry.hash: + if allow_output_mutation or get_hash(entry.value) == entry.hash: LOGGER.debug("Memory cache HIT: %s", type(entry.value)) return entry.value, entry.args_mutated else: @@ -332,10 +332,10 @@ def _read_from_mem_cache(key, ignore_hash): raise CacheKeyNotFoundError("Key not found in mem cache") -def _write_to_mem_cache(key, value, ignore_hash, args_mutated): +def _write_to_mem_cache(key, value, allow_output_mutation, args_mutated): _mem_cache[key] = CacheEntry( value=value, - hash=None if ignore_hash else get_hash(value), + hash=None if allow_output_mutation else get_hash(value), args_mutated=args_mutated, ) @@ -374,7 +374,7 @@ def _write_to_disk_cache(key, value, args_mutated): raise CacheError("Unable to write to cache: %s" % e) -def _read_from_cache(key, persisted, ignore_hash, func_or_code, message_opts): +def _read_from_cache(key, persisted, allow_output_mutation, func_or_code, message_opts): """ Read the value from the cache. Our goal is to read from memory if possible. If the data was mutated (hash changed), we show a @@ -382,7 +382,7 @@ def _read_from_cache(key, persisted, ignore_hash, func_or_code, message_opts): or rerun the code. """ try: - return _read_from_mem_cache(key, ignore_hash) + return _read_from_mem_cache(key, allow_output_mutation) except (CacheKeyNotFoundError, CachedObjectWasMutatedError) as e: if isinstance(e, CachedObjectWasMutatedError): if inspect.isroutine(func_or_code): @@ -397,13 +397,13 @@ def _read_from_cache(key, persisted, ignore_hash, func_or_code, message_opts): if persisted: value, args_mutated = _read_from_disk_cache(key) - _write_to_mem_cache(key, value, ignore_hash, args_mutated) + _write_to_mem_cache(key, value, allow_output_mutation, args_mutated) return value, args_mutated raise e -def _write_to_cache(key, value, persist, ignore_hash, args_mutated): - _write_to_mem_cache(key, value, ignore_hash, args_mutated) +def _write_to_cache(key, value, persist, allow_output_mutation, args_mutated): + _write_to_mem_cache(key, value, allow_output_mutation, args_mutated) if persist: _write_to_disk_cache(key, value, args_mutated) @@ -411,9 +411,10 @@ def _write_to_cache(key, value, persist, ignore_hash, args_mutated): def cache( func=None, persist=False, - ignore_hash=False, + allow_output_mutation=False, show_spinner=True, suppress_st_warning=False, + **kwargs ): """Function decorator to memoize function executions. @@ -427,9 +428,11 @@ def cache( persist : boolean Whether to persist the cache on disk. - ignore_hash : boolean - Disable hashing return values. These hash values are otherwise - used to validate that return values are not mutated. + allow_output_mutation : boolean + Streamlit normally shows a warning when return values are not mutated, as that + can have unintended consequences. This is done by hashing the return value internally. + + If you know what you're doing and would like to override this warning, set this to True. show_spinner : boolean Enable the spinner. Default is True to show a spinner when there is @@ -464,21 +467,28 @@ def cache( ... # Fetch data from URL here, and then clean it up. ... return data - To disable hashing return values, set the `ignore_hash` parameter to `True`: + To disable hashing return values, set the `allow_output_mutation` parameter to `True`: - >>> @st.cache(ignore_hash=True) + >>> @st.cache(allow_output_mutation=True) ... def fetch_and_clean_data(url): ... # Fetch data from URL here, and then clean it up. ... return data """ + # Help users migrate to the new kwarg + # Remove this warning after 2020-03-16. + if "ignore_hash" in kwargs: + raise Exception( + "The `ignore_hash` argument has been renamed to `allow_output_mutation`." + ) + # Support passing the params via function decorator, e.g. - # @st.cache(persist=True, ignore_hash=True) + # @st.cache(persist=True, allow_output_mutation=True) if func is None: return lambda f: cache( func=f, persist=persist, - ignore_hash=ignore_hash, + allow_output_mutation=allow_output_mutation, show_spinner=show_spinner, suppress_st_warning=suppress_st_warning, ) @@ -519,7 +529,7 @@ def get_or_set_cache(): caller_frame = inspect.currentframe().f_back try: return_value, args_mutated = _read_from_cache( - key, persist, ignore_hash, func, caller_frame + key, persist, allow_output_mutation, func, caller_frame ) except (CacheKeyNotFoundError, CachedObjectWasMutatedError): with _calling_cached_function(): @@ -533,7 +543,9 @@ def get_or_set_cache(): args_hasher_after.update([args, kwargs]) args_mutated = args_digest_before != args_hasher_after.digest() - _write_to_cache(key, return_value, persist, ignore_hash, args_mutated) + _write_to_cache( + key, return_value, persist, allow_output_mutation, args_mutated + ) if args_mutated: # If we're inside a _nested_ cached function, our @@ -587,9 +599,9 @@ class Cache(dict): """ - def __init__(self, persist=False, ignore_hash=False): + def __init__(self, persist=False, allow_output_mutation=False): self._persist = persist - self._ignore_hash = ignore_hash + self._allow_output_mutation = allow_output_mutation dict.__init__(self) @@ -643,20 +655,20 @@ def has_changes(self): value, _ = _read_from_cache( key, self._persist, - self._ignore_hash, + self._allow_output_mutation, code, [caller_lineno + 1, caller_lineno + len(lines)], ) self.update(value) except (CacheKeyNotFoundError, CachedObjectWasMutatedError): - if self._ignore_hash and not self._persist: + if self._allow_output_mutation and not self._persist: # If we don't hash the results, we don't need to use exec and just return True. # This way line numbers will be correct. _write_to_cache(key, self, False, True, None) return True exec(code, caller_frame.f_globals, caller_frame.f_locals) - _write_to_cache(key, self, self._persist, self._ignore_hash, None) + _write_to_cache(key, self, self._persist, self._allow_output_mutation, None) # Return False so that we have control over the execution. return False diff --git a/lib/streamlit/hashing.py b/lib/streamlit/hashing.py index f3b2185a0f2c..10977fe01d81 100644 --- a/lib/streamlit/hashing.py +++ b/lib/streamlit/hashing.py @@ -175,7 +175,7 @@ def _hashing_error_message(start): following: * **Preferred:** modify your code to avoid using this type of object. - * Or add the argument `ignore_hash=True` to the `st.cache` decorator. + * Or add the argument `allow_output_mutation=True` to the `st.cache` decorator. """ % {"start": start} ).strip("\n") diff --git a/lib/tests/streamlit/caching_test.py b/lib/tests/streamlit/caching_test.py index 86006ac9ce1f..fcb30b70ee6f 100644 --- a/lib/tests/streamlit/caching_test.py +++ b/lib/tests/streamlit/caching_test.py @@ -16,6 +16,7 @@ """st.caching unit tests.""" import threading import unittest +import pytest from mock import patch @@ -39,6 +40,18 @@ def foo(): self.assertEqual(foo(), 42) self.assertEqual(foo(), 42) + def test_deprecated_kwarg(self): + with pytest.raises(Exception) as e: + + @st.cache(ignore_hash=True) + def foo(): + return 42 + + assert ( + "The `ignore_hash` argument has been renamed to `allow_output_mutation`." + in str(e.value) + ) + @patch.object(st, "warning") def test_args(self, warning): called = [False] @@ -207,11 +220,11 @@ def off_test_simple(self): self.assertEqual(c.value, val) - def off_test_ignore_hash(self): + def off_test_allow_output_mutation(self): val = 42 for _ in range(2): - c = st.Cache(ignore_hash=True) + c = st.Cache(allow_output_mutation=True) if c: c.value = val diff --git a/lib/tests/streamlit/help_test.py b/lib/tests/streamlit/help_test.py index 66b78586d28b..b0adb4c7f122 100644 --- a/lib/tests/streamlit/help_test.py +++ b/lib/tests/streamlit/help_test.py @@ -80,12 +80,10 @@ def test_deltagenerator_func(self): self.assertEqual("streamlit", ds.module) if is_python_2: self.assertEqual("<type 'function'>", ds.type) - self.assertEqual("(data, format=u'audio/wav', start_time=0)", - ds.signature) + self.assertEqual("(data, format=u'audio/wav', start_time=0)", ds.signature) else: self.assertEqual("<class 'function'>", ds.type) - self.assertEqual("(data, format='audio/wav', start_time=0)", - ds.signature) + self.assertEqual("(data, format='audio/wav', start_time=0)", ds.signature) self.assertTrue(ds.doc_string.startswith("Display an audio player")) def test_unwrapped_deltagenerator_func(self): @@ -117,7 +115,8 @@ def test_st_cache(self): ds.signature, ( "(func=None, persist=False, " - "ignore_hash=False, show_spinner=True, suppress_st_warning=False)" + "allow_output_mutation=False, show_spinner=True, suppress_st_warning=False, " + "**kwargs)" ), ) self.assertTrue(ds.doc_string.startswith("Function decorator to"))
**Issue:** #412 - Rename `ignore_hash` to `allow_output_mutation` - Help users migrate to the new name by throwing an error if `ignore_hash` is used - Only added to `@st.cache` and not `class Cache` since only the decorator is exposed to the user right now
https://api.github.com/repos/streamlit/streamlit/pulls/422
2019-10-15T19:54:25Z
2019-10-17T16:29:51Z
2019-10-17T16:29:50Z
2019-10-17T16:29:58Z
2,850
streamlit/streamlit
21,956
Fix #13796
diff --git a/modules/prompt_parser.py b/modules/prompt_parser.py index 334efeef317..86b7acb50a8 100644 --- a/modules/prompt_parser.py +++ b/modules/prompt_parser.py @@ -5,7 +5,7 @@ from typing import List import lark -# a prompt like this: "fantasy landscape with a [mountain:lake:0.25] and [an oak:a christmas tree:0.75][ in foreground::0.6][ in background:0.25] [shoddy:masterful:0.5]" +# a prompt like this: "fantasy landscape with a [mountain:lake:0.25] and [an oak:a christmas tree:0.75][ in foreground::0.6][: in background:0.25] [shoddy:masterful:0.5]" # will be represented with prompt_schedule like this (assuming steps=100): # [25, 'fantasy landscape with a mountain and an oak in foreground shoddy'] # [50, 'fantasy landscape with a lake and an oak in foreground in background shoddy']
Fix comment error that makes understanding scheduling more confusing. ## Description Fixes a confusing comment. ## Screenshots/videos: ## Checklist: - [x] I have read [contributing wiki page](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Contributing) - [x] I have performed a self-review of my own code - [x] My code follows the [style guidelines](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Contributing#code-style) - [x] My code passes [tests](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Tests)
https://api.github.com/repos/AUTOMATIC1111/stable-diffusion-webui/pulls/13797
2023-10-29T22:37:50Z
2023-11-03T17:11:55Z
2023-11-03T17:11:55Z
2023-11-03T17:11:55Z
251
AUTOMATIC1111/stable-diffusion-webui
40,488
Update PyPI links
diff --git a/certbot/docs/packaging.rst b/certbot/docs/packaging.rst index 75349ad14e9..89e22bfc1d9 100644 --- a/certbot/docs/packaging.rst +++ b/certbot/docs/packaging.rst @@ -7,21 +7,21 @@ Releases We release packages and upload them to PyPI (wheels and source tarballs). -- https://pypi.python.org/pypi/acme -- https://pypi.python.org/pypi/certbot -- https://pypi.python.org/pypi/certbot-apache -- https://pypi.python.org/pypi/certbot-nginx -- https://pypi.python.org/pypi/certbot-dns-cloudflare -- https://pypi.python.org/pypi/certbot-dns-digitalocean -- https://pypi.python.org/pypi/certbot-dns-dnsimple -- https://pypi.python.org/pypi/certbot-dns-dnsmadeeasy -- https://pypi.python.org/pypi/certbot-dns-google -- https://pypi.python.org/pypi/certbot-dns-linode -- https://pypi.python.org/pypi/certbot-dns-luadns -- https://pypi.python.org/pypi/certbot-dns-nsone -- https://pypi.python.org/pypi/certbot-dns-ovh -- https://pypi.python.org/pypi/certbot-dns-rfc2136 -- https://pypi.python.org/pypi/certbot-dns-route53 +- https://pypi.org/project/acme/ +- https://pypi.org/project/certbot/ +- https://pypi.org/project/certbot-apache/ +- https://pypi.org/project/certbot-nginx/ +- https://pypi.org/project/certbot-dns-cloudflare/ +- https://pypi.org/project/certbot-dns-digitalocean/ +- https://pypi.org/project/certbot-dns-dnsimple/ +- https://pypi.org/project/certbot-dns-dnsmadeeasy/ +- https://pypi.org/project/certbot-dns-google/ +- https://pypi.org/project/certbot-dns-linode/ +- https://pypi.org/project/certbot-dns-luadns/ +- https://pypi.org/project/certbot-dns-nsone/ +- https://pypi.org/project/certbot-dns-ovh/ +- https://pypi.org/project/certbot-dns-rfc2136/ +- https://pypi.org/project/certbot-dns-route53/ The following scripts are used in the process:
Switch from the legacy pypi.python.org/pypi/ to the canonical pypi.org/project/; the former redirects to the latter.
https://api.github.com/repos/certbot/certbot/pulls/9733
2023-07-14T16:26:52Z
2023-07-15T22:58:00Z
2023-07-15T22:58:00Z
2023-07-15T23:10:15Z
587
certbot/certbot
3,610
Update C3 module
diff --git a/models/common.py b/models/common.py index fcd87cbcb81..c3b51a46f14 100644 --- a/models/common.py +++ b/models/common.py @@ -29,7 +29,7 @@ def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, k super(Conv, self).__init__() self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False) self.bn = nn.BatchNorm2d(c2) - self.act = nn.Hardswish() if act else nn.Identity() + self.act = nn.Hardswish() if act is True else (act if isinstance(act, nn.Module) else nn.Identity()) def forward(self, x): return self.act(self.bn(self.conv(x))) @@ -70,6 +70,21 @@ def forward(self, x): return self.cv4(self.act(self.bn(torch.cat((y1, y2), dim=1)))) +class C3(nn.Module): + # CSP Bottleneck with 3 convolutions + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion + super(C3, self).__init__() + c_ = int(c2 * e) # hidden channels + self.cv1 = Conv(c1, c_, 1, 1) + self.cv2 = Conv(c1, c_, 1, 1) + self.cv3 = Conv(2 * c_, c2, 1) # act=FReLU(c2) + self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)]) + # self.m = nn.Sequential(*[CrossConv(c_, c_, 3, 1, g, 1.0, shortcut) for _ in range(n)]) + + def forward(self, x): + return self.cv3(torch.cat((self.m(self.cv1(x)), self.cv2(x)), dim=1)) + + class SPP(nn.Module): # Spatial pyramid pooling layer used in YOLOv3-SPP def __init__(self, c1, c2, k=(5, 9, 13)): diff --git a/models/experimental.py b/models/experimental.py index a2908a15cf3..136e86d7f8e 100644 --- a/models/experimental.py +++ b/models/experimental.py @@ -22,25 +22,6 @@ def forward(self, x): return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x)) -class C3(nn.Module): - # Cross Convolution CSP - def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion - super(C3, self).__init__() - c_ = int(c2 * e) # hidden channels - self.cv1 = Conv(c1, c_, 1, 1) - self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False) - self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False) - self.cv4 = Conv(2 * c_, c2, 1, 1) - self.bn = nn.BatchNorm2d(2 * c_) # applied to cat(cv2, cv3) - self.act = nn.LeakyReLU(0.1, inplace=True) - self.m = nn.Sequential(*[CrossConv(c_, c_, 3, 1, g, 1.0, shortcut) for _ in range(n)]) - - def forward(self, x): - y1 = self.cv3(self.m(self.cv1(x))) - y2 = self.cv2(x) - return self.cv4(self.act(self.bn(torch.cat((y1, y2), dim=1)))) - - class Sum(nn.Module): # Weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070 def __init__(self, n, weight=False): # n: number of inputs diff --git a/models/yolo.py b/models/yolo.py index dacb035ed6f..4ad44afe536 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -11,8 +11,8 @@ sys.path.append('./') # to run '$ python *.py' files in subdirectories logger = logging.getLogger(__name__) -from models.common import Conv, Bottleneck, SPP, DWConv, Focus, BottleneckCSP, Concat, NMS, autoShape -from models.experimental import MixConv2d, CrossConv, C3 +from models.common import Conv, Bottleneck, SPP, DWConv, Focus, BottleneckCSP, C3, Concat, NMS, autoShape +from models.experimental import MixConv2d, CrossConv from utils.autoanchor import check_anchor_order from utils.general import make_divisible, check_file, set_logging from utils.torch_utils import time_synchronized, fuse_conv_and_bn, model_info, scale_img, initialize_weights, \
This PR updates the C3 module and moves it from experimental.py to common.py. ## 🛠️ PR Summary <sub>Made with ❤️ by [Ultralytics Actions](https://github.com/ultralytics/actions)<sub> ### 🌟 Summary Refactoring of the YOLOv5 architecture by modifying the activation function and restructuring the C3 module. ### 📊 Key Changes - Updated the `Conv` class activation to accept custom activation functions. - Moved the `C3` class from `experimental.py` to `common.py` with modifications. - Removed the old `C3` definition from `experimental.py`. - Adjusted import statements in `yolo.py` to reflect the `C3` class relocation. ### 🎯 Purpose & Impact - **Customizable Activation**: Allows passing custom activation functions to the `Conv` class, offering greater flexibility for model customization. - **C3 Module Enhancement**: The introduced changes to the `C3` class, including a simplification and rearrangement, could lead to performance optimizations and make the model more streamlined. - **Impact on Users**: Users should see the YOLOv5 model as more modular and adaptable to different scenarios. The cleanup also potentially improves maintainability and collaboration on the codebase. 🚀
https://api.github.com/repos/ultralytics/yolov5/pulls/1705
2020-12-16T06:09:45Z
2020-12-16T06:13:08Z
2020-12-16T06:13:08Z
2024-01-19T20:06:59Z
1,223
ultralytics/yolov5
24,873
a leftover for.15 compatibility
diff --git a/scrapy/core/engine.py b/scrapy/core/engine.py index bd1a9f04b2e..4ef1d0fc639 100644 --- a/scrapy/core/engine.py +++ b/scrapy/core/engine.py @@ -263,10 +263,8 @@ def close_spider(self, spider, reason='cancelled'): dfd.addBoth(lambda _: slot.scheduler.close(reason)) dfd.addErrback(log.err, spider=spider) - # XXX: spider_stats argument was added for backwards compatibility with - # stats collection refactoring added in 0.15. it should be removed in 0.17. - dfd.addBoth(lambda _: self.signals.send_catch_log_deferred(signal=signals.spider_closed, \ - spider=spider, reason=reason, spider_stats=self.crawler.stats.get_stats())) + dfd.addBoth(lambda _: self.signals.send_catch_log_deferred( + signal=signals.spider_closed, spider=spider, reason=reason)) dfd.addErrback(log.err, spider=spider) dfd.addBoth(lambda _: self.crawler.stats.close_spider(spider, reason=reason))
https://api.github.com/repos/scrapy/scrapy/pulls/925
2014-10-22T19:03:41Z
2014-10-22T20:23:19Z
2014-10-22T20:23:19Z
2014-10-22T20:23:19Z
260
scrapy/scrapy
34,577