title stringlengths 2 169 | diff stringlengths 235 19.5k | body stringlengths 0 30.5k | url stringlengths 48 84 | created_at stringlengths 20 20 | closed_at stringlengths 20 20 | merged_at stringlengths 20 20 | updated_at stringlengths 20 20 | diff_len float64 101 3.99k | repo_name stringclasses 83
values | __index_level_0__ int64 15 52.7k |
|---|---|---|---|---|---|---|---|---|---|---|
[3.8] bpo-38698: Prevent UnboundLocalError to pop up in parse_message_id (GH-17277) | diff --git a/Lib/email/_header_value_parser.py b/Lib/email/_header_value_parser.py
index 1668b4a14e9b91..abdef8189ca6fb 100644
--- a/Lib/email/_header_value_parser.py
+++ b/Lib/email/_header_value_parser.py
@@ -2113,7 +2113,8 @@ def parse_message_id(value):
except errors.HeaderParseError:
message_id.defects.append(errors.InvalidHeaderDefect(
"Expected msg-id but found {!r}".format(value)))
- message_id.append(token)
+ else:
+ message_id.append(token)
return message_id
#
diff --git a/Lib/test/test_email/test__header_value_parser.py b/Lib/test/test_email/test__header_value_parser.py
index e442c44a2a74d0..2f63a3b3e05246 100644
--- a/Lib/test/test_email/test__header_value_parser.py
+++ b/Lib/test/test_email/test__header_value_parser.py
@@ -2638,6 +2638,12 @@ def test_get_msg_id_no_id_right_part(self):
)
self.assertEqual(msg_id.token_type, 'msg-id')
+ def test_get_msg_id_invalid_expected_msg_id_not_found(self):
+ text = "Message-Id: 935-XPB-567:0:86089:180874:0:45327:9:90305:17843586-40@example.com"
+ msg_id = parser.parse_message_id(text)
+ self.assertDefectsEqual(msg_id.all_defects,
+ [errors.InvalidHeaderDefect])
+
def test_get_msg_id_no_angle_start(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_msg_id("msgwithnoankle")
diff --git a/Misc/NEWS.d/next/Library/2019-12-02-10-35-19.bpo-38698.WZnAPQ.rst b/Misc/NEWS.d/next/Library/2019-12-02-10-35-19.bpo-38698.WZnAPQ.rst
new file mode 100644
index 00000000000000..e606acb5dcf573
--- /dev/null
+++ b/Misc/NEWS.d/next/Library/2019-12-02-10-35-19.bpo-38698.WZnAPQ.rst
@@ -0,0 +1,5 @@
+Prevent UnboundLocalError to pop up in parse_message_id
+
+parse_message_id() was improperly using a token defined inside an exception
+handler, which was raising `UnboundLocalError` on parsing an invalid value.
+Patch by Claudiu Popa.
| parse_message_id() was improperly using a token defined inside an exception
handler, which was raising `UnboundLocalError` on parsing an invalid value.
https://bugs.python.org/issue38698
(cherry picked from commit bb815499af855b1759c02535f8d7a9d0358e74e8)
Co-authored-by: Claudiu Popa <pcmanticore@gmail.com>
<!-- issue-number: [bpo-38698](https://bugs.python.org/issue38698) -->
https://bugs.python.org/issue38698
<!-- /issue-number -->
| https://api.github.com/repos/python/cpython/pulls/17476 | 2019-12-05T17:22:44Z | 2019-12-05T17:42:02Z | 2019-12-05T17:42:02Z | 2019-12-05T18:03:42Z | 591 | python/cpython | 4,480 |
fix repeated api call bug | diff --git a/metagpt/provider/openai_api.py b/metagpt/provider/openai_api.py
index 8f3b71c42..e99707ab9 100644
--- a/metagpt/provider/openai_api.py
+++ b/metagpt/provider/openai_api.py
@@ -142,7 +142,7 @@ async def acompletion(self, messages: list[dict], timeout=3) -> ChatCompletion:
async def acompletion_text(self, messages: list[dict], stream=False, timeout=3) -> str:
"""when streaming, print each token in place."""
if stream:
- await self._achat_completion_stream(messages, timeout=timeout)
+ return await self._achat_completion_stream(messages, timeout=timeout)
rsp = await self._achat_completion(messages, timeout=timeout)
return self.get_choice_text(rsp)
| https://api.github.com/repos/geekan/MetaGPT/pulls/1012 | 2024-03-15T07:32:56Z | 2024-03-15T07:33:05Z | 2024-03-15T07:33:05Z | 2024-03-15T07:33:05Z | 186 | geekan/MetaGPT | 16,822 | |
feat: Build the detailed integration view for Sentry Apps | diff --git a/src/sentry/static/sentry/app/routes.jsx b/src/sentry/static/sentry/app/routes.jsx
index c27c0a7de3576..7a4f0b03979b5 100644
--- a/src/sentry/static/sentry/app/routes.jsx
+++ b/src/sentry/static/sentry/app/routes.jsx
@@ -829,6 +829,19 @@ function routes() {
</Route>
</Route>
+ <Redirect from="sentry-apps/" to="integrations/" />
+ <Route name="Sentry Apps" path="sentry-apps/">
+ <Route
+ name="Details"
+ path=":appSlug"
+ componentPromise={() =>
+ import(
+ /* webpackChunkName: "ConfigureIntegration" */ 'app/views/organizationIntegrations/sentryAppDetailedView'
+ )
+ }
+ component={errorHandler(LazyLoad)}
+ />
+ </Route>
<Route name="Integrations" path="integrations/">
<IndexRoute
componentPromise={() =>
diff --git a/src/sentry/static/sentry/app/views/organizationIntegrations/sentryAppDetailedView.tsx b/src/sentry/static/sentry/app/views/organizationIntegrations/sentryAppDetailedView.tsx
new file mode 100644
index 0000000000000..828c8e598c4ab
--- /dev/null
+++ b/src/sentry/static/sentry/app/views/organizationIntegrations/sentryAppDetailedView.tsx
@@ -0,0 +1,343 @@
+import React from 'react';
+import styled from '@emotion/styled';
+import {RouteComponentProps} from 'react-router/lib/Router';
+
+import {addErrorMessage, addSuccessMessage} from 'app/actionCreators/indicator';
+import Access from 'app/components/acl/access';
+import Button from 'app/components/button';
+import PluginIcon from 'app/plugins/components/pluginIcon';
+import space from 'app/styles/space';
+import {t, tct} from 'app/locale';
+import {addQueryParamsToExistingUrl} from 'app/utils/queryString';
+import {
+ installSentryApp,
+ uninstallSentryApp,
+} from 'app/actionCreators/sentryAppInstallations';
+import AsyncComponent from 'app/components/asyncComponent';
+import HookStore from 'app/stores/hookStore';
+import marked, {singleLineRenderer} from 'app/utils/marked';
+import InlineSvg from 'app/components/inlineSvg';
+import Tag from 'app/views/settings/components/tag';
+import {toPermissions} from 'app/utils/consolidatedScopes';
+import CircleIndicator from 'app/components/circleIndicator';
+import {Hooks} from 'app/types/hooks';
+import {
+ IntegrationFeature,
+ SentryApp,
+ Organization,
+ SentryAppInstallation,
+} from 'app/types';
+import withOrganization from 'app/utils/withOrganization';
+import {UninstallButton} from '../settings/organizationDeveloperSettings/sentryApplicationRow/installButtons';
+
+type State = {
+ sentryApp: SentryApp;
+ featureData: IntegrationFeature[];
+};
+
+type Props = {
+ organization: Organization;
+} & RouteComponentProps<{appSlug: string}, {}>;
+
+const defaultFeatureGateComponents = {
+ IntegrationFeatures: p =>
+ p.children({
+ disabled: false,
+ disabledReason: null,
+ ungatedFeatures: p.features,
+ gatedFeatureGroups: [],
+ }),
+ FeatureList: p => (
+ <ul>
+ {p.features.map((f, i) => (
+ <li key={i}>{f.description}</li>
+ ))}
+ </ul>
+ ),
+} as ReturnType<Hooks['integrations:feature-gates']>;
+
+class SentryAppDetailedView extends AsyncComponent<
+ Props & AsyncComponent['props'],
+ State & AsyncComponent['state']
+> {
+ getEndpoints(): ([string, string, any] | [string, string])[] {
+ const {
+ organization,
+ params: {appSlug},
+ } = this.props;
+ const baseEndpoints: ([string, string, any] | [string, string])[] = [
+ ['sentryApp', `/sentry-apps/${appSlug}/`],
+ ['featureData', `/sentry-apps/${appSlug}/features/`],
+ ['appInstalls', `/organizations/${organization.slug}/sentry-app-installations/`],
+ ];
+
+ return baseEndpoints;
+ }
+
+ featureTags(features: IntegrationFeature[]) {
+ return features.map(feature => {
+ const feat = feature.featureGate.replace(/integrations/g, '');
+ return <StyledTag key={feat}>{feat.replace(/-/g, ' ')}</StyledTag>;
+ });
+ }
+
+ get permissions() {
+ return toPermissions(this.state.sentryApp.scopes);
+ }
+
+ isInstalled = () => {
+ return this.state.appInstalls.find(i => i.app.slug === this.state.sentryApp.slug);
+ };
+
+ redirectUser = (install: SentryAppInstallation) => {
+ const {organization} = this.props;
+ const {sentryApp} = this.state;
+ const queryParams = {
+ installationId: install.uuid,
+ code: install.code,
+ orgSlug: organization.slug,
+ };
+ if (sentryApp.redirectUrl) {
+ const redirectUrl = addQueryParamsToExistingUrl(sentryApp.redirectUrl, queryParams);
+ window.location.assign(redirectUrl);
+ }
+ // TODO: Add SplitInstallationIdModal
+ };
+
+ handleInstall = async () => {
+ const {organization} = this.props;
+ const {sentryApp} = this.state;
+
+ // installSentryApp adds a message on failure
+ const install = await installSentryApp(this.api, organization.slug, sentryApp);
+ if (!sentryApp.redirectUrl) {
+ addSuccessMessage(t(`${sentryApp.slug} successfully installed.`));
+ this.setState({appInstalls: [install, ...this.state.appInstalls]});
+ } else {
+ this.redirectUser(install);
+ }
+ };
+
+ handleUninstall = async (install: SentryAppInstallation) => {
+ try {
+ await uninstallSentryApp(this.api, install);
+ const appInstalls = this.state.appInstalls.filter(
+ i => i.app.slug !== this.state.sentryApp.slug
+ );
+ return this.setState({appInstalls});
+ } catch (error) {
+ return addErrorMessage(t(`Unable to uninstall ${this.state.sentryApp.name}`));
+ }
+ };
+
+ renderPermissions() {
+ const permissions = this.permissions;
+ if (!Object.keys(permissions).some(scope => permissions[scope].length > 0)) {
+ return null;
+ }
+
+ return (
+ <React.Fragment>
+ <Title>Permissions</Title>
+ {permissions.read.length > 0 && (
+ <Permission>
+ <Indicator />
+ <Text key="read">
+ {tct('[read] access to [resources] resources', {
+ read: <strong>Read</strong>,
+ resources: permissions.read.join(', '),
+ })}
+ </Text>
+ </Permission>
+ )}
+ {permissions.write.length > 0 && (
+ <Permission>
+ <Indicator />
+ <Text key="write">
+ {tct('[read] and [write] access to [resources] resources', {
+ read: <strong>Read</strong>,
+ write: <strong>Write</strong>,
+ resources: permissions.read.join(', '),
+ })}
+ </Text>
+ </Permission>
+ )}
+ {permissions.admin.length > 0 && (
+ <Permission>
+ <Indicator />
+ <Text key="admin">
+ {tct('[admin] access to [resources] resources', {
+ admin: <strong>Admin</strong>,
+ resources: permissions.read.join(', '),
+ })}
+ </Text>
+ </Permission>
+ )}
+ </React.Fragment>
+ );
+ }
+
+ renderBody() {
+ const {organization} = this.props;
+ const {featureData, sentryApp} = this.state;
+
+ // Prepare the features list
+ const features = (featureData || []).map(f => ({
+ featureGate: f.featureGate,
+ description: (
+ <span dangerouslySetInnerHTML={{__html: singleLineRenderer(f.description)}} />
+ ),
+ }));
+
+ const defaultHook = () => defaultFeatureGateComponents;
+ const featureHook = HookStore.get('integrations:feature-gates')[0] || defaultHook;
+ const {FeatureList, IntegrationFeatures} = featureHook();
+
+ const overview = sentryApp.overview || '';
+ const featureProps = {organization, features};
+
+ return (
+ <React.Fragment>
+ <Flex style={{flexDirection: 'column'}}>
+ <Flex>
+ <PluginIcon pluginId={sentryApp.slug} size={50} />
+ <NameContainer>
+ <Name>{sentryApp.name}</Name>
+ <Flex>{features.length && this.featureTags(features)}</Flex>
+ </NameContainer>
+ <IntegrationFeatures {...featureProps}>
+ {({disabled, disabledReason}) => (
+ <div
+ style={{
+ marginLeft: 'auto',
+ alignSelf: 'center',
+ }}
+ >
+ {disabled && <DisabledNotice reason={disabledReason} />}
+
+ <Access organization={organization} access={['org:integrations']}>
+ {({hasAccess}) => {
+ return !this.isInstalled() ? (
+ <Button
+ size="small"
+ priority="primary"
+ disabled={!hasAccess || disabled}
+ onClick={() => this.handleInstall()}
+ style={{marginLeft: space(1)}}
+ data-test-id="install"
+ >
+ {t('Accept & Install')}
+ </Button>
+ ) : (
+ <UninstallButton
+ install={this.isInstalled()}
+ app={this.state.sentryApp}
+ onClickUninstall={this.handleUninstall}
+ onUninstallModalOpen={() => {}} //TODO: Implement tracking analytics
+ />
+ );
+ }}
+ </Access>
+ </div>
+ )}
+ </IntegrationFeatures>
+ </Flex>
+ <ul className="nav nav-tabs border-bottom" style={{paddingTop: '30px'}}>
+ <li className="active">
+ <a>Information</a>
+ </li>
+ </ul>
+ <Description dangerouslySetInnerHTML={{__html: marked(overview)}} />
+ <FeatureList {...featureProps} provider={{...sentryApp, key: sentryApp.slug}} />
+
+ {this.renderPermissions()}
+ <Footer>
+ <Author>{t('Authored By %s', sentryApp.author)}</Author>
+ </Footer>
+ </Flex>
+ </React.Fragment>
+ );
+ }
+}
+
+const Flex = styled('div')`
+ display: flex;
+`;
+
+const NameContainer = styled('div')`
+ display: flex;
+ align-items: flex-start;
+ flex-direction: column;
+ justify-content: center;
+ padding-left: ${space(2)};
+`;
+
+const Name = styled('div')`
+ font-weight: bold;
+ font-size: 1.4em;
+ margin-bottom: ${space(1)};
+`;
+
+const Description = styled('div')`
+ font-size: 1.5rem;
+ line-height: 2.1rem;
+ margin-bottom: ${space(2)};
+
+ li {
+ margin-bottom: 6px;
+ }
+`;
+
+const Author = styled('div')`
+ color: ${p => p.theme.gray2};
+`;
+
+const DisabledNotice = styled(({reason, ...p}: {reason: React.ReactNode}) => (
+ <div
+ style={{
+ flex: 1,
+ alignItems: 'center',
+ }}
+ {...p}
+ >
+ <InlineSvg src="icon-circle-exclamation" size="1.5em" />
+ <div style={{marginLeft: `${space(1)}`}}>{reason}</div>
+ </div>
+))`
+ color: ${p => p.theme.red};
+ font-size: 0.9em;
+`;
+
+const StyledTag = styled(Tag)`
+ &:not(:first-child) {
+ margin-left: ${space(0.5)};
+ }
+`;
+
+const Text = styled('p')`
+ margin: 0px 6px;
+`;
+
+const Permission = styled('div')`
+ display: flex;
+`;
+
+const Footer = styled('div')`
+ display: flex;
+ padding: 20px 30px;
+ border-top: 1px solid #e2dee6;
+ margin: 20px -30px -30px;
+ justify-content: space-between;
+`;
+
+const Title = styled('p')`
+ margin-bottom: ${space(1)};
+ font-weight: bold;
+`;
+
+const Indicator = styled(p => <CircleIndicator size={7} {...p} />)`
+ margin-top: 7px;
+ color: ${p => p.theme.success};
+`;
+
+export default withOrganization(SentryAppDetailedView);
| ## Problem
Show a detailed description and feature listsentry apps to integration directory and remove buttons from the right side of the list.
## Solution
Similar design to first-party detailed view except no tab for configurations
- Create a new route for detailed view.
- Create a new view component `sentryAppDetailedView.tsx`
# UI
*New App view*
<img width="1440" alt="Screen Shot 2020-02-03 at 6 28 38 PM" src="https://user-images.githubusercontent.com/10491193/73708451-43df0100-46b3-11ea-84dc-f82768302c97.png">
*Fresh Install*
<img width="1440" alt="Screen Shot 2020-02-03 at 6 28 53 PM" src="https://user-images.githubusercontent.com/10491193/73708483-5a855800-46b3-11ea-851b-25d1a753f4e7.png">
*Uninstall Popup*
<img width="1440" alt="Screen Shot 2020-02-03 at 6 29 13 PM" src="https://user-images.githubusercontent.com/10491193/73708506-6a04a100-46b3-11ea-94a4-f28b421eb511.png">
*Successful Uninstall*
<img width="1440" alt="Screen Shot 2020-02-03 at 6 29 30 PM" src="https://user-images.githubusercontent.com/10491193/73708526-7b4dad80-46b3-11ea-850e-d9a226fc870f.png">
| https://api.github.com/repos/getsentry/sentry/pulls/16789 | 2020-02-04T02:32:40Z | 2020-02-04T19:13:32Z | 2020-02-04T19:13:32Z | 2023-05-17T22:06:19Z | 3,027 | getsentry/sentry | 44,222 |
Adds mistral 7b instruct v0.1 to available anyscale models | diff --git a/llama_index/llms/anyscale_utils.py b/llama_index/llms/anyscale_utils.py
index ff674f7da236b..6c7953c8c220a 100644
--- a/llama_index/llms/anyscale_utils.py
+++ b/llama_index/llms/anyscale_utils.py
@@ -9,8 +9,13 @@
"codellama/CodeLlama-34b-Instruct-hf": 16384,
}
+MISTRAL_MODELS = {
+ "mistralai/Mistral-7B-Instruct-v0.1": 4096,
+}
+
ALL_AVAILABLE_MODELS = {
**LLAMA_MODELS,
+ **MISTRAL_MODELS,
}
DISCONTINUED_MODELS: Dict[str, int] = {}
| # Description
Anyscale recently made a `Mistral-7B-Instruct-v0.1` endpoint available. However, it's not possible to use with `llama_index.llms.anyscale` because `anyscale_utils.py` expects the model to be explicitly specified.
See here: https://github.com/run-llama/llama_index/blob/main/llama_index/llms/anyscale_utils.py#L42-L48
Fixes # (issue)
I simply updated the list of models to include the relevant one.
This
## Type of Change
Please delete options that are not relevant.
- [x ] Bug fix (non-breaking change which fixes an issue)
- [ ] New feature (non-breaking change which adds functionality)
- [ ] Breaking change (fix or feature that would cause existing functionality to not work as expected)
- [ ] This change requires a documentation update
# How Has This Been Tested?
Please describe the tests that you ran to verify your changes. Provide instructions so we can reproduce. Please also list any relevant details for your test configuration
- [ ] Added new unit/integration tests
- [ ] Added new notebook (that tests end-to-end)
- [ x] I stared at the code and made sure it makes sense
# Suggested Checklist:
- [ x] I have performed a self-review of my own code
- [ ] I have commented my code, particularly in hard-to-understand areas
- [ ] I have made corresponding changes to the documentation
- [ ] I have added Google Colab support for the newly added notebooks.
- [ ] My changes generate no new warnings
- [ ] I have added tests that prove my fix is effective or that my feature works
- [ ] New and existing unit tests pass locally with my changes
- [ ] I ran `make format; make lint` to appease the lint gods
| https://api.github.com/repos/run-llama/llama_index/pulls/8652 | 2023-11-02T21:44:40Z | 2023-11-02T23:37:40Z | 2023-11-02T23:37:40Z | 2023-11-02T23:37:40Z | 184 | run-llama/llama_index | 6,392 |
Add Llama2 and NoowAi Provider | diff --git a/README.md b/README.md
index 9d0b150f3c..ac52abeaee 100644
--- a/README.md
+++ b/README.md
@@ -325,12 +325,12 @@ asyncio.run(run_all())
##### Proxy Support:
-All providers support specifying a proxy in the create function.
+All providers support specifying a proxy in the create functions.
```py
import g4f
-response = await g4f.ChatCompletion.create(
+response = g4f.ChatCompletion.create(
model=g4f.models.default,
messages=[{"role": "user", "content": "Hello"}],
proxy="http://host:port",
diff --git a/g4f/Provider/ChatForAi.py b/g4f/Provider/ChatForAi.py
index c93e76eef9..718affeb49 100644
--- a/g4f/Provider/ChatForAi.py
+++ b/g4f/Provider/ChatForAi.py
@@ -44,7 +44,7 @@ async def create_async_generator(
**kwargs
},
"botSettings": {},
- "prompt": prompt,
+ "prompt": prompt,
"messages": messages,
"timestamp": timestamp,
"sign": generate_signature(timestamp, prompt, conversation_id)
diff --git a/g4f/Provider/Llama2.py b/g4f/Provider/Llama2.py
new file mode 100644
index 0000000000..b59fde12df
--- /dev/null
+++ b/g4f/Provider/Llama2.py
@@ -0,0 +1,76 @@
+from __future__ import annotations
+
+from aiohttp import ClientSession
+
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider
+
+models = {
+ "7B": {"name": "Llama 2 7B", "version": "d24902e3fa9b698cc208b5e63136c4e26e828659a9f09827ca6ec5bb83014381", "shortened":"7B"},
+ "13B": {"name": "Llama 2 13B", "version": "9dff94b1bed5af738655d4a7cbcdcde2bd503aa85c94334fe1f42af7f3dd5ee3", "shortened":"13B"},
+ "70B": {"name": "Llama 2 70B", "version": "2796ee9483c3fd7aa2e171d38f4ca12251a30609463dcfd4cd76703f22e96cdf", "shortened":"70B"},
+ "Llava": {"name": "Llava 13B", "version": "6bc1c7bb0d2a34e413301fee8f7cc728d2d4e75bfab186aa995f63292bda92fc", "shortened":"Llava"}
+}
+
+class Llama2(AsyncGeneratorProvider):
+ url = "https://www.llama2.ai"
+ supports_gpt_35_turbo = True
+ working = True
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ if not model:
+ model = "70B"
+ if model not in models:
+ raise ValueError(f"Model are not supported: {model}")
+ version = models[model]["version"]
+ headers = {
+ "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/118.0",
+ "Accept": "*/*",
+ "Accept-Language": "de,en-US;q=0.7,en;q=0.3",
+ "Accept-Encoding": "gzip, deflate, br",
+ "Referer": f"{cls.url}/",
+ "Content-Type": "text/plain;charset=UTF-8",
+ "Origin": cls.url,
+ "Connection": "keep-alive",
+ "Sec-Fetch-Dest": "empty",
+ "Sec-Fetch-Mode": "cors",
+ "Sec-Fetch-Site": "same-origin",
+ "Pragma": "no-cache",
+ "Cache-Control": "no-cache",
+ "TE": "trailers"
+ }
+ async with ClientSession(headers=headers) as session:
+ prompt = format_prompt(messages)
+ data = {
+ "prompt": prompt,
+ "version": version,
+ "systemPrompt": kwargs.get("system_message", "You are a helpful assistant."),
+ "temperature": kwargs.get("temperature", 0.75),
+ "topP": kwargs.get("top_p", 0.9),
+ "maxTokens": kwargs.get("max_tokens", 1024),
+ "image": None
+ }
+ started = False
+ async with session.post(f"{cls.url}/api", json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ async for chunk in response.content.iter_any():
+ if not started:
+ chunk = chunk.lstrip()
+ started = True
+ yield chunk.decode()
+
+def format_prompt(messages: Messages):
+ messages = [
+ f"[INST]{message['content']}[/INST]"
+ if message["role"] == "user"
+ else message["content"]
+ for message in messages
+ ]
+ return "\n".join(messages)
\ No newline at end of file
diff --git a/g4f/Provider/NoowAi.py b/g4f/Provider/NoowAi.py
new file mode 100644
index 0000000000..93748258b3
--- /dev/null
+++ b/g4f/Provider/NoowAi.py
@@ -0,0 +1,66 @@
+from __future__ import annotations
+
+import random, string, json
+from aiohttp import ClientSession
+
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider
+
+
+class NoowAi(AsyncGeneratorProvider):
+ url = "https://noowai.com"
+ supports_gpt_35_turbo = True
+ working = True
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ headers = {
+ "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/118.0",
+ "Accept": "*/*",
+ "Accept-Language": "de,en-US;q=0.7,en;q=0.3",
+ "Accept-Encoding": "gzip, deflate, br",
+ "Referer": f"{cls.url}/",
+ "Content-Type": "application/json",
+ "Origin": cls.url,
+ "Alt-Used": "noowai.com",
+ "Connection": "keep-alive",
+ "Sec-Fetch-Dest": "empty",
+ "Sec-Fetch-Mode": "cors",
+ "Sec-Fetch-Site": "same-origin",
+ "Pragma": "no-cache",
+ "Cache-Control": "no-cache",
+ "TE": "trailers"
+ }
+ async with ClientSession(headers=headers) as session:
+ data = {
+ "botId": "default",
+ "customId": "d49bc3670c3d858458576d75c8ea0f5d",
+ "session": "N/A",
+ "chatId": random_string(),
+ "contextId": 25,
+ "messages": messages,
+ "newMessage": messages[-1]["content"],
+ "stream": True
+ }
+ async with session.post(f"{cls.url}/wp-json/mwai-ui/v1/chats/submit", json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ async for line in response.content:
+ if line.startswith(b"data: "):
+ try:
+ line = json.loads(line[6:])
+ assert "type" in line
+ except:
+ raise RuntimeError(f"Broken line: {line.decode()}")
+ if line["type"] == "live":
+ yield line["data"]
+ elif line["type"] == "end":
+ break
+
+def random_string(length: int = 10):
+ return ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(length))
\ No newline at end of file
diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py
index 26f523c72a..ae6ca996f7 100644
--- a/g4f/Provider/__init__.py
+++ b/g4f/Provider/__init__.py
@@ -23,9 +23,10 @@
from .GptForLove import GptForLove
from .GptGo import GptGo
from .GptGod import GptGod
-from .H2o import H2o
from .Liaobots import Liaobots
+from .Llama2 import Llama2
from .Myshell import Myshell
+from .NoowAi import NoowAi
from .Opchatgpts import Opchatgpts
from .Phind import Phind
from .Vercel import Vercel
@@ -82,9 +83,11 @@ class ProviderUtils:
'HuggingChat': HuggingChat,
'Komo': Komo,
'Liaobots': Liaobots,
+ 'Llama2': Llama2,
'Lockchat': Lockchat,
'MikuChat': MikuChat,
'Myshell': Myshell,
+ 'NoowAi': NoowAi,
'Opchatgpts': Opchatgpts,
'OpenAssistant': OpenAssistant,
'OpenaiChat': OpenaiChat,
@@ -148,8 +151,10 @@ class ProviderUtils:
'H2o',
'HuggingChat',
'Liaobots',
+ 'Llama2',
'Lockchat',
'Myshell',
+ 'NoowAi',
'Opchatgpts',
'Raycast',
'OpenaiChat',
diff --git a/g4f/Provider/H2o.py b/g4f/Provider/deprecated/H2o.py
similarity index 95%
rename from g4f/Provider/H2o.py
rename to g4f/Provider/deprecated/H2o.py
index 9fac92a437..47290a3ea1 100644
--- a/g4f/Provider/H2o.py
+++ b/g4f/Provider/deprecated/H2o.py
@@ -5,13 +5,12 @@
from aiohttp import ClientSession
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider, format_prompt
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, format_prompt
class H2o(AsyncGeneratorProvider):
url = "https://gpt-gm.h2o.ai"
- working = False
model = "h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1"
@classmethod
@@ -86,7 +85,6 @@ async def create_async_generator(
async with session.delete(
f"{cls.url}/conversation/{conversationId}",
proxy=proxy,
- json=data
) as response:
response.raise_for_status()
diff --git a/g4f/Provider/deprecated/__init__.py b/g4f/Provider/deprecated/__init__.py
index b37b7edd8f..db48c3fb7b 100644
--- a/g4f/Provider/deprecated/__init__.py
+++ b/g4f/Provider/deprecated/__init__.py
@@ -11,4 +11,5 @@
from .V50 import V50
from .FastGpt import FastGpt
from .Aivvm import Aivvm
-from .Vitalentum import Vitalentum
\ No newline at end of file
+from .Vitalentum import Vitalentum
+from .H2o import H2o
\ No newline at end of file
diff --git a/g4f/models.py b/g4f/models.py
index c2d9b89b79..a0b35ff69c 100644
--- a/g4f/models.py
+++ b/g4f/models.py
@@ -16,6 +16,7 @@
Yqcloud,
Myshell,
FreeGpt,
+ NoowAi,
Vercel,
Aichat,
GPTalk,
@@ -51,8 +52,9 @@ class Model:
name = 'gpt-3.5-turbo',
base_provider = 'openai',
best_provider = RetryProvider([
- AiAsk, Aichat, ChatgptDemo, FreeGpt, GptGo, Liaobots, You,
+ AiAsk, Aichat, ChatgptDemo, FreeGpt, Liaobots, You,
GPTalk, ChatgptLogin, GptChatly, GptForLove, Opchatgpts,
+ NoowAi,
])
)
| https://api.github.com/repos/xtekky/gpt4free/pulls/1081 | 2023-10-15T17:11:01Z | 2023-10-15T22:41:49Z | 2023-10-15T22:41:48Z | 2023-10-25T16:21:42Z | 3,065 | xtekky/gpt4free | 38,105 | |
Update atari_preprocessing.py | diff --git a/gym/wrappers/atari_preprocessing.py b/gym/wrappers/atari_preprocessing.py
index 03a33175453..e839b4a09f4 100644
--- a/gym/wrappers/atari_preprocessing.py
+++ b/gym/wrappers/atari_preprocessing.py
@@ -98,7 +98,6 @@ def __init__(
np.empty(env.observation_space.shape, dtype=np.uint8),
]
- self.ale = env.unwrapped.ale
self.lives = 0
self.game_over = False
@@ -111,6 +110,11 @@ def __init__(
self.observation_space = Box(
low=_low, high=_high, shape=_shape, dtype=_obs_dtype
)
+
+ @property
+ def ale(self):
+ """Make ale as a class property to avoid serialization error"""
+ return self.env.unwrapped.ale
def step(self, action):
"""Applies the preprocessing for an :meth:`env.step`."""
| # Description
Fix AtariPreprocessing pickle error
Fixes # (issue)
## Type of change
Please delete options that are not relevant.
- [x] Bug fix (non-breaking change which fixes an issue)
# Checklist:
- [ ] I have run the [`pre-commit` checks](https://pre-commit.com/) with `pre-commit run --all-files` (see `CONTRIBUTING.md` instructions to set it up)
- [x] I have commented my code, particularly in hard-to-understand areas
- [ ] I have made corresponding changes to the documentation
- [x] My changes generate no new warnings
- [ ] I have added tests that prove my fix is effective or that my feature works
- [x] New and existing unit tests pass locally with my changes
<!--
As you go through the checklist above, you can mark something as done by putting an x character in it
For example,
- [x] I have done this task
- [ ] I have not done this task
-->
| https://api.github.com/repos/openai/gym/pulls/3105 | 2022-09-30T22:30:43Z | 2022-10-04T16:12:19Z | 2022-10-04T16:12:18Z | 2022-10-04T16:12:19Z | 232 | openai/gym | 5,021 |
Detect Orca 2 template | diff --git a/instruction-templates/ChatML.yaml b/instruction-templates/ChatML.yaml
index 5197855dba..9ea2b9c9c1 100644
--- a/instruction-templates/ChatML.yaml
+++ b/instruction-templates/ChatML.yaml
@@ -1,7 +1,6 @@
-user: "user"
-bot: "assistant"
+user: user
+bot: assistant
+turn_template: <|im_start|><|user|>\n<|user-message|><|im_end|>\n<|im_start|><|bot|>\n<|bot-message|><|im_end|>\n
context: |
- <|im_start|><|system-message|>
- <|im_end|>
-turn_template: "<|im_start|><|user|>\n<|user-message|><|im_end|>\n<|im_start|><|bot|>\n<|bot-message|><|im_end|>\n"
-system_message: "system"
+ <|im_start|>system
+ <|system-message|><|im_end|>
diff --git a/models/config.yaml b/models/config.yaml
index 00db01d1bd..b39e0eb4a3 100644
--- a/models/config.yaml
+++ b/models/config.yaml
@@ -180,3 +180,5 @@
instruction_template: 'ChatML'
.*(WizardCoder-Python-34B-V1.0|Phind-CodeLlama-34B-v2|CodeBooga-34B-v0.1):
instruction_template: 'Alpaca'
+.*orca-2-(13|7)b:
+ instruction_template: 'ChatML'
| https://api.github.com/repos/oobabooga/text-generation-webui/pulls/4697 | 2023-11-21T18:26:22Z | 2023-11-21T18:26:43Z | 2023-11-21T18:26:43Z | 2023-11-21T18:31:05Z | 373 | oobabooga/text-generation-webui | 26,319 | |
More nxos integration fixes | diff --git a/test/integration/targets/nxos_banner/tasks/nxapi.yaml b/test/integration/targets/nxos_banner/tasks/nxapi.yaml
new file mode 100644
index 00000000000000..ea525379f7f098
--- /dev/null
+++ b/test/integration/targets/nxos_banner/tasks/nxapi.yaml
@@ -0,0 +1,28 @@
+---
+- name: collect all nxapi test cases
+ find:
+ paths: "{{ role_path }}/tests/nxapi"
+ patterns: "{{ testcase }}.yaml"
+ register: test_cases
+
+- name: set test_items
+ set_fact: test_items="{{ test_cases.files | map(attribute='path') | list }}"
+
+- name: enable nxapi
+ nxos_config:
+ lines:
+ - feature nxapi
+ - nxapi http port 80
+ provider: "{{ cli }}"
+
+- name: run test case
+ include: "{{ test_case_to_run }}"
+ with_items: "{{ test_items }}"
+ loop_control:
+ loop_var: test_case_to_run
+
+- name: disable nxapi
+ nxos_config:
+ lines:
+ - no feature nxapi
+ provider: "{{ cli }}"
diff --git a/test/integration/targets/nxos_user/tests/cli/basic.yaml b/test/integration/targets/nxos_user/tests/cli/basic.yaml
index 86069baba5046b..2adce306157d6c 100644
--- a/test/integration/targets/nxos_user/tests/cli/basic.yaml
+++ b/test/integration/targets/nxos_user/tests/cli/basic.yaml
@@ -4,7 +4,6 @@
name: netend
roles: network-operator
state: present
- authorize: yes
provider: "{{ cli }}"
register: result
@@ -18,7 +17,6 @@
aggregate:
- name: test1
- name: test2
- authorize: yes
state: present
roles: network-admin
provider: "{{ cli }}"
@@ -32,7 +30,6 @@
- name: tearDown
nxos_user:
purge: yes
- authorize: yes
provider: "{{ cli }}"
register: result
diff --git a/test/integration/targets/nxos_user/tests/nxapi/basic.yaml b/test/integration/targets/nxos_user/tests/nxapi/basic.yaml
index f47fc5e40f9ad9..f3bc1ed1a412e4 100644
--- a/test/integration/targets/nxos_user/tests/nxapi/basic.yaml
+++ b/test/integration/targets/nxos_user/tests/nxapi/basic.yaml
@@ -4,7 +4,6 @@
name: netend
roles: network-operator
state: present
- authorize: yes
provider: "{{ nxapi }}"
register: result
@@ -18,7 +17,6 @@
aggregate:
- name: test1
- name: test2
- authorize: yes
state: present
roles: network-admin
provider: "{{ nxapi }}"
@@ -32,7 +30,6 @@
- name: tearDown
nxos_user:
purge: yes
- authorize: yes
provider: "{{ nxapi }}"
register: result
| ##### SUMMARY
<!--- Describe the change, including rationale and design decisions -->
<!---
If you are fixing an existing issue, please include "Fixes #nnn" in your
commit message and your description; but you should still explain what
the change does.
-->
Fix a subset of tests in nxos_user and nxos_banner
##### ISSUE TYPE
<!--- Pick one below and delete the rest: -->
- Bugfix Pull Request
##### COMPONENT NAME
<!--- Name of the module/plugin/module/task -->
nxos
##### ANSIBLE VERSION
<!--- Paste verbatim output from “ansible --version” between quotes below -->
```
2.4
``` | https://api.github.com/repos/ansible/ansible/pulls/27507 | 2017-07-31T17:40:03Z | 2017-07-31T18:13:32Z | 2017-07-31T18:13:32Z | 2019-04-26T22:07:06Z | 740 | ansible/ansible | 48,879 |
ctransformers: add mlock and no-mmap options | diff --git a/modules/ctransformers_model.py b/modules/ctransformers_model.py
index 5e0f347c15..8b8b5c4d90 100644
--- a/modules/ctransformers_model.py
+++ b/modules/ctransformers_model.py
@@ -19,7 +19,9 @@ def from_pretrained(self, path):
gpu_layers=shared.args.n_gpu_layers,
batch_size=shared.args.n_batch,
context_length=shared.args.n_ctx,
- stream=True
+ stream=True,
+ mmap=not shared.args.no_mmap,
+ mlock=shared.args.mlock
)
self.model = AutoModelForCausalLM.from_pretrained(
diff --git a/modules/loaders.py b/modules/loaders.py
index 7444555f96..472e8ddb5c 100644
--- a/modules/loaders.py
+++ b/modules/loaders.py
@@ -101,7 +101,9 @@
'n_gpu_layers',
'n_batch',
'threads',
- 'model_type'
+ 'model_type',
+ 'no_mmap',
+ 'mlock'
]
})
diff --git a/requirements.txt b/requirements.txt
index 05a3ec0e03..25c953ee52 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -42,4 +42,4 @@ https://github.com/jllllll/GPTQ-for-LLaMa-CUDA/releases/download/0.1.0/gptq_for_
https://github.com/jllllll/GPTQ-for-LLaMa-CUDA/releases/download/0.1.0/gptq_for_llama-0.1.0+cu117-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64"
# ctransformers
-https://github.com/jllllll/ctransformers-cuBLAS-wheels/releases/download/AVX2/ctransformers-0.2.22+cu117-py3-none-any.whl
+https://github.com/jllllll/ctransformers-cuBLAS-wheels/releases/download/AVX2/ctransformers-0.2.23+cu117-py3-none-any.whl
| Updated to support mlock/mmap.
## Checklist:
- [X] I have read the [Contributing guidelines](https://github.com/oobabooga/text-generation-webui/wiki/Contributing-guidelines).
| https://api.github.com/repos/oobabooga/text-generation-webui/pulls/3649 | 2023-08-22T14:33:25Z | 2023-08-22T19:51:35Z | 2023-08-22T19:51:35Z | 2023-08-22T19:51:35Z | 515 | oobabooga/text-generation-webui | 26,481 |
Sync parameter order in boston_housing with reuters | diff --git a/keras/datasets/boston_housing.py b/keras/datasets/boston_housing.py
index 0b3967e35bf..7eed6e98cca 100644
--- a/keras/datasets/boston_housing.py
+++ b/keras/datasets/boston_housing.py
@@ -2,15 +2,15 @@
import numpy as np
-def load_data(path='boston_housing.npz', seed=113, test_split=0.2):
+def load_data(path='boston_housing.npz', test_split=0.2, seed=113):
"""Loads the Boston Housing dataset.
# Arguments
path: path where to cache the dataset locally
(relative to ~/.keras/datasets).
+ test_split: fraction of the data to reserve as test set.
seed: Random seed for shuffling the data
before computing the test split.
- test_split: fraction of the data to reserve as test set.
# Returns
Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.
| Those are the only two that have `test_split`. Syncing in that direction, because `boston_housing` is not referenced anywhere in Keras code, while `reuters` is. I think there was some discussion of datasets API in tensorflow, but can't find it now. Should `test_split` be renamed `validation_split` for consistency with the rest of codebase? | https://api.github.com/repos/keras-team/keras/pulls/8326 | 2017-10-31T17:38:27Z | 2017-10-31T19:37:33Z | 2017-10-31T19:37:33Z | 2017-10-31T21:01:18Z | 245 | keras-team/keras | 47,009 |
Fix compatibility with pytest 8 | diff --git a/requirements-dev.txt b/requirements-dev.txt
index 13173f3ae5..e80b18581e 100644
--- a/requirements-dev.txt
+++ b/requirements-dev.txt
@@ -1,5 +1,5 @@
-e .[socks]
-pytest>=2.8.0,<=6.2.5
+pytest>=2.8.0,<9
pytest-cov
pytest-httpbin==2.0.0
httpbin~=0.10.0
diff --git a/tests/test_requests.py b/tests/test_requests.py
index 4de47bc693..d05febeef5 100644
--- a/tests/test_requests.py
+++ b/tests/test_requests.py
@@ -1003,7 +1003,7 @@ def test_https_warnings(self, nosan_server):
"SubjectAltNameWarning",
)
- with pytest.warns(None) as warning_records:
+ with pytest.warns() as warning_records:
warnings.simplefilter("always")
requests.get(f"https://localhost:{port}/", verify=ca_bundle)
| `pytest.warns(None)` has been deprecated in pytest 7.0.0 and it's no longer working in pytest 8.
Resolves: https://github.com/psf/requests/issues/6679 | https://api.github.com/repos/psf/requests/pulls/6682 | 2024-04-11T19:10:21Z | 2024-04-11T19:49:35Z | 2024-04-11T19:49:35Z | 2024-04-11T19:49:35Z | 241 | psf/requests | 32,179 |
Allow passing a list of file paths | diff --git a/gpt_index/readers/file/base.py b/gpt_index/readers/file/base.py
index d8acc1d439bb0..93453fea26cde 100644
--- a/gpt_index/readers/file/base.py
+++ b/gpt_index/readers/file/base.py
@@ -32,6 +32,7 @@ class SimpleDirectoryReader(BaseReader):
Args:
input_dir (str): Path to the directory.
+ input_files (List): List of file paths to read (Optional; overrides input_dir)
exclude_hidden (bool): Whether to exclude hidden files (dotfiles).
errors (str): how encoding and decoding errors are to be handled,
see https://docs.python.org/3/library/functions.html#open
@@ -51,7 +52,8 @@ class SimpleDirectoryReader(BaseReader):
def __init__(
self,
- input_dir: str,
+ input_dir: Optional[str] = None,
+ input_files: Optional[List] = None,
exclude_hidden: bool = True,
errors: str = "ignore",
recursive: bool = False,
@@ -63,7 +65,10 @@ def __init__(
) -> None:
"""Initialize with parameters."""
super().__init__(verbose=verbose)
- self.input_dir = Path(input_dir)
+
+ if not input_dir and not input_files:
+ raise ValueError("Must provide either `input_dir` or `input_files`.")
+
self.errors = errors
self.recursive = recursive
@@ -71,7 +76,15 @@ def __init__(
self.required_exts = required_exts
self.num_files_limit = num_files_limit
- self.input_files = self._add_files(self.input_dir)
+ if input_files:
+ self.input_files = []
+ for path in input_files:
+ input_file = Path(path)
+ self.input_files.append(input_file)
+ elif input_dir:
+ self.input_dir = Path(input_dir)
+ self.input_files = self._add_files(self.input_dir)
+
self.file_extractor = file_extractor or DEFAULT_FILE_EXTRACTOR
self.file_metadata = file_metadata
| SimpleDirectoryReader can take a path to a directory. Allow passing a list of file paths as well. | https://api.github.com/repos/run-llama/llama_index/pulls/324 | 2023-01-27T22:52:36Z | 2023-01-28T17:57:30Z | 2023-01-28T17:57:30Z | 2023-01-28T17:57:30Z | 467 | run-llama/llama_index | 6,143 |
Update val_batch*.jpg for Chinese fonts | diff --git a/utils/general.py b/utils/general.py
index 86e3b3c1c54..fce5e38c6c9 100755
--- a/utils/general.py
+++ b/utils/general.py
@@ -37,6 +37,7 @@
ROOT = FILE.parents[1] # YOLOv5 root directory
NUM_THREADS = min(8, max(1, os.cpu_count() - 1)) # number of YOLOv5 multiprocessing threads
VERBOSE = str(os.getenv('YOLOv5_VERBOSE', True)).lower() == 'true' # global verbose mode
+FONT = 'Arial.ttf' # https://ultralytics.com/assets/Arial.ttf
torch.set_printoptions(linewidth=320, precision=5, profile='long')
np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5
@@ -55,6 +56,21 @@ def is_kaggle():
return False
+def is_writeable(dir, test=False):
+ # Return True if directory has write permissions, test opening a file with write permissions if test=True
+ if test: # method 1
+ file = Path(dir) / 'tmp.txt'
+ try:
+ with open(file, 'w'): # open file with write permissions
+ pass
+ file.unlink() # remove file
+ return True
+ except OSError:
+ return False
+ else: # method 2
+ return os.access(dir, os.R_OK) # possible issues on Windows
+
+
def set_logging(name=None, verbose=VERBOSE):
# Sets level and returns logger
if is_kaggle():
@@ -68,6 +84,22 @@ def set_logging(name=None, verbose=VERBOSE):
LOGGER = set_logging('yolov5') # define globally (used in train.py, val.py, detect.py, etc.)
+def user_config_dir(dir='Ultralytics', env_var='YOLOV5_CONFIG_DIR'):
+ # Return path of user configuration directory. Prefer environment variable if exists. Make dir if required.
+ env = os.getenv(env_var)
+ if env:
+ path = Path(env) # use environment variable
+ else:
+ cfg = {'Windows': 'AppData/Roaming', 'Linux': '.config', 'Darwin': 'Library/Application Support'} # 3 OS dirs
+ path = Path.home() / cfg.get(platform.system(), '') # OS-specific config dir
+ path = (path if is_writeable(path) else Path('/tmp')) / dir # GCP and AWS lambda fix, only /tmp is writeable
+ path.mkdir(exist_ok=True) # make if required
+ return path
+
+
+CONFIG_DIR = user_config_dir() # Ultralytics settings dir
+
+
class Profile(contextlib.ContextDecorator):
# Usage: @Profile() decorator or 'with Profile():' context manager
def __enter__(self):
@@ -152,34 +184,6 @@ def get_latest_run(search_dir='.'):
return max(last_list, key=os.path.getctime) if last_list else ''
-def user_config_dir(dir='Ultralytics', env_var='YOLOV5_CONFIG_DIR'):
- # Return path of user configuration directory. Prefer environment variable if exists. Make dir if required.
- env = os.getenv(env_var)
- if env:
- path = Path(env) # use environment variable
- else:
- cfg = {'Windows': 'AppData/Roaming', 'Linux': '.config', 'Darwin': 'Library/Application Support'} # 3 OS dirs
- path = Path.home() / cfg.get(platform.system(), '') # OS-specific config dir
- path = (path if is_writeable(path) else Path('/tmp')) / dir # GCP and AWS lambda fix, only /tmp is writeable
- path.mkdir(exist_ok=True) # make if required
- return path
-
-
-def is_writeable(dir, test=False):
- # Return True if directory has write permissions, test opening a file with write permissions if test=True
- if test: # method 1
- file = Path(dir) / 'tmp.txt'
- try:
- with open(file, 'w'): # open file with write permissions
- pass
- file.unlink() # remove file
- return True
- except OSError:
- return False
- else: # method 2
- return os.access(dir, os.R_OK) # possible issues on Windows
-
-
def is_docker():
# Is environment a Docker container?
return Path('/workspace').exists() # or Path('/.dockerenv').exists()
@@ -207,7 +211,7 @@ def is_ascii(s=''):
def is_chinese(s='人工智能'):
# Is string composed of any Chinese characters?
- return re.search('[\u4e00-\u9fff]', s)
+ return True if re.search('[\u4e00-\u9fff]', str(s)) else False
def emojis(str=''):
@@ -378,6 +382,15 @@ def check_file(file, suffix=''):
return files[0] # return file
+def check_font(font=FONT):
+ # Download font to CONFIG_DIR if necessary
+ font = Path(font)
+ if not font.exists() and not (CONFIG_DIR / font.name).exists():
+ url = "https://ultralytics.com/assets/" + font.name
+ LOGGER.info(f'Downloading {url} to {CONFIG_DIR / font.name}...')
+ torch.hub.download_url_to_file(url, str(font), progress=False)
+
+
def check_dataset(data, autodownload=True):
# Download and/or unzip dataset if not found locally
# Usage: https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128_with_yaml.zip
diff --git a/utils/plots.py b/utils/plots.py
index 74868403edc..be70ac8a030 100644
--- a/utils/plots.py
+++ b/utils/plots.py
@@ -17,12 +17,11 @@
import torch
from PIL import Image, ImageDraw, ImageFont
-from utils.general import (LOGGER, Timeout, check_requirements, clip_coords, increment_path, is_ascii, is_chinese,
- try_except, user_config_dir, xywh2xyxy, xyxy2xywh)
+from utils.general import (CONFIG_DIR, FONT, LOGGER, Timeout, check_font, check_requirements, clip_coords,
+ increment_path, is_ascii, is_chinese, try_except, xywh2xyxy, xyxy2xywh)
from utils.metrics import fitness
# Settings
-CONFIG_DIR = user_config_dir() # Ultralytics settings dir
RANK = int(os.getenv('RANK', -1))
matplotlib.rc('font', **{'size': 11})
matplotlib.use('Agg') # for writing to files only
@@ -49,16 +48,14 @@ def hex2rgb(h): # rgb order (PIL)
colors = Colors() # create instance for 'from utils.plots import colors'
-def check_font(font='Arial.ttf', size=10):
+def check_pil_font(font=FONT, size=10):
# Return a PIL TrueType Font, downloading to CONFIG_DIR if necessary
font = Path(font)
font = font if font.exists() else (CONFIG_DIR / font.name)
try:
return ImageFont.truetype(str(font) if font.exists() else font.name, size)
except Exception as e: # download if missing
- url = "https://ultralytics.com/assets/" + font.name
- LOGGER.info(f'Downloading {url} to {font}...')
- torch.hub.download_url_to_file(url, str(font), progress=False)
+ check_font(font)
try:
return ImageFont.truetype(str(font), size)
except TypeError:
@@ -67,7 +64,7 @@ def check_font(font='Arial.ttf', size=10):
class Annotator:
if RANK in (-1, 0):
- check_font() # download TTF if necessary
+ check_pil_font() # download TTF if necessary
# YOLOv5 Annotator for train/val mosaics and jpgs and detect/hub inference annotations
def __init__(self, im, line_width=None, font_size=None, font='Arial.ttf', pil=False, example='abc'):
@@ -76,8 +73,8 @@ def __init__(self, im, line_width=None, font_size=None, font='Arial.ttf', pil=Fa
if self.pil: # use PIL
self.im = im if isinstance(im, Image.Image) else Image.fromarray(im)
self.draw = ImageDraw.Draw(self.im)
- self.font = check_font(font='Arial.Unicode.ttf' if is_chinese(example) else font,
- size=font_size or max(round(sum(self.im.size) / 2 * 0.035), 12))
+ self.font = check_pil_font(font='Arial.Unicode.ttf' if is_chinese(example) else font,
+ size=font_size or max(round(sum(self.im.size) / 2 * 0.035), 12))
else: # use cv2
self.im = im
self.lw = line_width or max(round(sum(im.shape) / 2 * 0.003), 2) # line width
@@ -89,10 +86,10 @@ def box_label(self, box, label='', color=(128, 128, 128), txt_color=(255, 255, 2
if label:
w, h = self.font.getsize(label) # text width, height
outside = box[1] - h >= 0 # label fits outside box
- self.draw.rectangle([box[0],
+ self.draw.rectangle((box[0],
box[1] - h if outside else box[1],
box[0] + w + 1,
- box[1] + 1 if outside else box[1] + h + 1], fill=color)
+ box[1] + 1 if outside else box[1] + h + 1), fill=color)
# self.draw.text((box[0], box[1]), label, fill=txt_color, font=self.font, anchor='ls') # for PIL>8.0
self.draw.text((box[0], box[1] - h if outside else box[1]), label, fill=txt_color, font=self.font)
else: # cv2
@@ -210,7 +207,7 @@ def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max
# Annotate
fs = int((h + w) * ns * 0.01) # font size
- annotator = Annotator(mosaic, line_width=round(fs / 10), font_size=fs, pil=True)
+ annotator = Annotator(mosaic, line_width=round(fs / 10), font_size=fs, pil=True, example=names)
for i in range(i + 1):
x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin
annotator.rectangle([x, y, x + w, y + h], None, (255, 255, 255), width=2) # borders
| Partial fix for #6522
## 🛠️ PR Summary
<sub>Made with ❤️ by [Ultralytics Actions](https://github.com/ultralytics/actions)<sub>
### 🌟 Summary
Improved font handling and directory access in YOLOv5 utilities.
### 📊 Key Changes
- Added a default `FONT` variable specifying the 'Arial.ttf' font.
- Introduced `is_writeable()` function to check directory write permissions.
- Moved `user_config_dir()` function for configuring user-specific settings to an earlier part of the code.
- Created `check_font()` function to ensure the desired font is available, downloading if necessary.
- Modified `check_pil_font()` in `plots.py` to utilize the new `check_font()` from `general.py`.
- Annotator class and `plot_images()` method in `plots.py` were adjusted to use these new font-checking utilities.
### 🎯 Purpose & Impact
- Ensures consistent font usage across different operating environments. 🖋️
- Simplifies checking directory writing permissions, improving file handling. 📁
- Reduces code duplication by consolidating font-related checks. 🛠️
- Enhancements potentially make the software more robust and user-friendly, particularly regarding font dependencies and configuration file management. 👍
- End-users benefit from more reliable output appearance, especially when custom fonts are required. 🌐 | https://api.github.com/repos/ultralytics/yolov5/pulls/6526 | 2022-02-03T11:38:11Z | 2022-02-03T18:09:24Z | 2022-02-03T18:09:24Z | 2024-01-19T13:05:00Z | 2,552 | ultralytics/yolov5 | 25,007 |
Don't write media_dir | diff --git a/manimlib/constants.py b/manimlib/constants.py
index a014a93b8b..366b37afed 100644
--- a/manimlib/constants.py
+++ b/manimlib/constants.py
@@ -19,8 +19,6 @@
f"Media will be stored in {MEDIA_DIR + os.sep}. You can change "
"this behavior by writing a different directory to media_dir.txt."
)
-with open("media_dir.txt", 'w') as media_file:
- media_file.write(MEDIA_DIR)
VIDEO_DIR = os.path.join(MEDIA_DIR, "videos")
RASTER_IMAGE_DIR = os.path.join(MEDIA_DIR, "designs", "raster_images")
| Currently, manim will always write `media_dir.txt` to the directory where it runs, even if the user sets the `FILE_DIR` and `MEDIA_DIR` environment variables. This is more of a security inconvenience than a security problem, but it's still rather impolite for manim to modify part of the filesystem without allowing the user to specify a different location.
Besides, this write isn't needed. If the media directory is read from `media_dir.txt`, then the file is already there. And if it is read from an environment variable, it can be passed via environment variable again.
| https://api.github.com/repos/3b1b/manim/pulls/460 | 2019-02-28T21:33:35Z | 2019-02-28T21:36:14Z | 2019-02-28T21:36:14Z | 2019-02-28T21:36:18Z | 154 | 3b1b/manim | 18,450 |
[autoparallel] bypass MetaInfo when unavailable and modify BCAST_FUNC_OP metainfo | diff --git a/colossalai/auto_parallel/meta_profiler/meta_registry/binary_elementwise_ops.py b/colossalai/auto_parallel/meta_profiler/meta_registry/binary_elementwise_ops.py
index 15c3063b759b..281a92c0d4f1 100644
--- a/colossalai/auto_parallel/meta_profiler/meta_registry/binary_elementwise_ops.py
+++ b/colossalai/auto_parallel/meta_profiler/meta_registry/binary_elementwise_ops.py
@@ -24,26 +24,25 @@ def binary_elementwise_meta_info(*args, **kwargs) -> Tuple[TrainCycleItem, Train
Tuple[TrainCycleItem, TrainCycleItem, List[torch.Tensor]]: compute cost, memory cost and forward inputs
"""
- input_op_data, other_op_data = [arg for arg in args if arg.type != OperationDataType.OUTPUT]
+ input_op_data = [arg for arg in args if arg.type != OperationDataType.OUTPUT]
output_op_data = next(filter(lambda arg: arg.type == OperationDataType.OUTPUT, args))
# construct forward args for flop mapping
- fwd_in_args = [input_op_data.data, other_op_data.data]
+ fwd_in_args = [opdata.data for opdata in input_op_data]
fwd_out_args = [output_op_data.data]
# calculate cost
# calculate compute cost
# NOTE: we set bwd_compute_cost two times of fwd_compute_cost in this case
- fwd_compute_cost = flop_mapping[torch.ops.aten._adaptive_avg_pool2d.default](fwd_in_args, fwd_out_args)
+ fwd_compute_cost = flop_mapping[torch.ops.aten.add.Tensor](fwd_in_args, fwd_out_args)
bwd_compute_cost = fwd_compute_cost * 2
compute_cost = TrainCycleItem(fwd=fwd_compute_cost, bwd=bwd_compute_cost, total=fwd_compute_cost + bwd_compute_cost)
# calculate memory cost
- param_mem_cost = activation_size(
- [arg.data for arg in [input_op_data, other_op_data] if arg.type == OperationDataType.PARAM])
+ param_mem_cost = activation_size([arg.data for arg in input_op_data if arg.type == OperationDataType.PARAM])
fwd_mem_cost = MemoryCost(
- activation=activation_size([input_op_data.data, output_op_data.data]),
+ activation=activation_size(output_op_data.data),
parameter=param_mem_cost,
)
bwd_mem_cost = MemoryCost(
diff --git a/colossalai/auto_parallel/tensor_shard/node_handler/node_handler.py b/colossalai/auto_parallel/tensor_shard/node_handler/node_handler.py
index af3cb5810d11..78dc58c905ec 100644
--- a/colossalai/auto_parallel/tensor_shard/node_handler/node_handler.py
+++ b/colossalai/auto_parallel/tensor_shard/node_handler/node_handler.py
@@ -4,7 +4,7 @@
import torch
from torch.fx.node import Node
-from colossalai.auto_parallel.meta_profiler.metainfo import MetaInfo
+from colossalai.auto_parallel.meta_profiler.metainfo import MetaInfo, meta_register
from colossalai.auto_parallel.tensor_shard.sharding_strategy import (
OperationData,
OperationDataType,
@@ -234,15 +234,19 @@ def register_strategy(self, compute_resharding_cost: bool = True) -> StrategiesV
"""
super().register_strategy(compute_resharding_cost=compute_resharding_cost)
target = self.get_target_function()
- metainfo_vector = []
- for strategy in self.strategies_vector:
- metainfo = MetaInfo(strategy, target)
- strategy.compute_cost = metainfo.compute_cost
- strategy.memory_cost = metainfo.memory_cost
- metainfo_vector.append(metainfo)
-
- # attach metainfos to the handler
- setattr(self, "metainfo_vector", metainfo_vector)
+ # Currently we haven't patched all the torch functions and modules, so if the target
+ # is not patched, we will use the default cost model to compute the cost.
+ # TODO: patch all torch functions and modules to make it clean
+ if meta_register.has(target.__class__) or meta_register.has(target):
+ metainfo_vector = []
+ for strategy in self.strategies_vector:
+ metainfo = MetaInfo(strategy, target)
+ strategy.compute_cost = metainfo.compute_cost
+ strategy.memory_cost = metainfo.memory_cost
+ metainfo_vector.append(metainfo)
+
+ # attach metainfos to the handler
+ setattr(self, "metainfo_vector", metainfo_vector)
return self.strategies_vector
@@ -281,14 +285,18 @@ def register_strategy(self, compute_resharding_cost: bool = True) -> StrategiesV
"""
super().register_strategy(compute_resharding_cost=compute_resharding_cost)
target = self.get_target_function()
- metainfo_vector = []
- for strategy in self.strategies_vector:
- metainfo = MetaInfo(strategy, target)
- strategy.compute_cost = metainfo.compute_cost
- strategy.memory_cost = metainfo.memory_cost
- metainfo_vector.append(metainfo)
-
- # attach metainfos to the handler
- setattr(self, "metainfo_vector", metainfo_vector)
+ # Currently we haven't patched all the torch functions and modules, so if the target
+ # is not patched, we will use the default cost model to compute the cost.
+ # TODO: patch all torch functions and modules to make it clean
+ if meta_register.has(target.__class__) or meta_register.has(target):
+ metainfo_vector = []
+ for strategy in self.strategies_vector:
+ metainfo = MetaInfo(strategy, target)
+ strategy.compute_cost = metainfo.compute_cost
+ strategy.memory_cost = metainfo.memory_cost
+ metainfo_vector.append(metainfo)
+
+ # attach metainfos to the handler
+ setattr(self, "metainfo_vector", metainfo_vector)
return self.strategies_vector
| # What's New?
In this PR I modify the meta information for `BCAST_FUNC_OP` to fix the case like `torch.true_divide` that only has one input tensor. I also bypass `MetaInfo` when the operation is not patched in the meta registry. | https://api.github.com/repos/hpcaitech/ColossalAI/pulls/2293 | 2023-01-03T12:27:19Z | 2023-01-03T12:28:01Z | 2023-01-03T12:28:01Z | 2023-01-03T12:28:02Z | 1,352 | hpcaitech/ColossalAI | 11,761 |
Fixed #30763 -- Fixed management commands when using required mutually exclusive groups. | diff --git a/django/core/management/__init__.py b/django/core/management/__init__.py
index 5f2e64761ee08..adc7d173eb5ce 100644
--- a/django/core/management/__init__.py
+++ b/django/core/management/__init__.py
@@ -130,11 +130,19 @@ def get_actions(parser):
yield opt
parser_actions = list(get_actions(parser))
+ mutually_exclusive_required_options = {
+ opt
+ for group in parser._mutually_exclusive_groups
+ for opt in group._group_actions if group.required
+ }
# Any required arguments which are passed in via **options must be passed
# to parse_args().
parse_args += [
'{}={}'.format(min(opt.option_strings), arg_options[opt.dest])
- for opt in parser_actions if opt.required and opt.dest in options
+ for opt in parser_actions if (
+ opt.dest in options and
+ (opt.required or opt in mutually_exclusive_required_options)
+ )
]
defaults = parser.parse_args(args=parse_args)
defaults = dict(defaults._get_kwargs(), **arg_options)
diff --git a/tests/user_commands/management/commands/mutually_exclusive_required.py b/tests/user_commands/management/commands/mutually_exclusive_required.py
new file mode 100644
index 0000000000000..e5df17edb0ac5
--- /dev/null
+++ b/tests/user_commands/management/commands/mutually_exclusive_required.py
@@ -0,0 +1,12 @@
+from django.core.management.base import BaseCommand
+
+
+class Command(BaseCommand):
+
+ def add_arguments(self, parser):
+ group = parser.add_mutually_exclusive_group(required=True)
+ group.add_argument('--foo-id', type=int, nargs='?', default=None)
+ group.add_argument('--foo-name', type=str, nargs='?', default=None)
+
+ def handle(self, *args, **options):
+ self.stdout.write(','.join(options))
diff --git a/tests/user_commands/tests.py b/tests/user_commands/tests.py
index a53c781ac644d..4e730472f53ad 100644
--- a/tests/user_commands/tests.py
+++ b/tests/user_commands/tests.py
@@ -214,6 +214,16 @@ def test_command_add_arguments_after_common_arguments(self):
management.call_command('common_args', stdout=out)
self.assertIn('Detected that --version already exists', out.getvalue())
+ def test_mutually_exclusive_group_required_options(self):
+ out = StringIO()
+ management.call_command('mutually_exclusive_required', foo_id=1, stdout=out)
+ self.assertIn('foo_id', out.getvalue())
+ management.call_command('mutually_exclusive_required', foo_name='foo', stdout=out)
+ self.assertIn('foo_name', out.getvalue())
+ msg = 'Error: one of the arguments --foo-id --foo-name is required'
+ with self.assertRaisesMessage(CommandError, msg):
+ management.call_command('mutually_exclusive_required', stdout=out)
+
def test_subparser(self):
out = StringIO()
management.call_command('subparser', 'foo', 12, stdout=out)
| [ticket 30763](https://code.djangoproject.com/ticket/30763)
Thanks to @felixxm for preparing the test case. | https://api.github.com/repos/django/django/pulls/11749 | 2019-09-05T21:47:44Z | 2019-09-06T09:53:11Z | 2019-09-06T09:53:11Z | 2019-09-06T09:53:11Z | 707 | django/django | 50,977 |
DOC: Fix rst formatting in dev environment docs | diff --git a/doc/source/development/contributing_environment.rst b/doc/source/development/contributing_environment.rst
index 38e354d8c57d6..8bc15d6968afc 100644
--- a/doc/source/development/contributing_environment.rst
+++ b/doc/source/development/contributing_environment.rst
@@ -225,7 +225,7 @@ To compile pandas with meson, run::
# Build and install pandas
python -m pip install -ve . --no-build-isolation
-** Build options **
+**Build options**
It is possible to pass options from the pip frontend to the meson backend if you would like to configure your
install. Occasionally, you'll want to use this to adjust the build directory, and/or toggle debug/optimization levels.
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
This isn't formatted correctly in the [currently deployed version of the docs](https://pandas.pydata.org/docs/dev/development/contributing_environment.html#step-3-build-and-install-pandas). | https://api.github.com/repos/pandas-dev/pandas/pulls/53187 | 2023-05-11T18:04:54Z | 2023-05-11T20:17:25Z | 2023-05-11T20:17:25Z | 2023-05-11T20:17:42Z | 176 | pandas-dev/pandas | 45,544 |
Implement Perp-Neg | diff --git a/comfy/samplers.py b/comfy/samplers.py
index 39bc3774a4..35c9ccf059 100644
--- a/comfy/samplers.py
+++ b/comfy/samplers.py
@@ -251,7 +251,8 @@ def sampling_function(model, x, timestep, uncond, cond, cond_scale, model_option
cond_pred, uncond_pred = calc_cond_uncond_batch(model, cond, uncond_, x, timestep, model_options)
if "sampler_cfg_function" in model_options:
- args = {"cond": x - cond_pred, "uncond": x - uncond_pred, "cond_scale": cond_scale, "timestep": timestep, "input": x, "sigma": timestep}
+ args = {"cond": x - cond_pred, "uncond": x - uncond_pred, "cond_scale": cond_scale, "timestep": timestep, "input": x, "sigma": timestep,
+ "cond_denoised": cond_pred, "uncond_denoised": uncond_pred, "model": model, "model_options": model_options}
cfg_result = x - model_options["sampler_cfg_function"](args)
else:
cfg_result = uncond_pred + (cond_pred - uncond_pred) * cond_scale
diff --git a/comfy_extras/nodes_perpneg.py b/comfy_extras/nodes_perpneg.py
new file mode 100644
index 0000000000..36f2eb01a5
--- /dev/null
+++ b/comfy_extras/nodes_perpneg.py
@@ -0,0 +1,58 @@
+import torch
+import comfy.model_management
+import comfy.sample
+import comfy.samplers
+import comfy.utils
+
+
+class PerpNeg:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": {"model": ("MODEL", ),
+ "clip": ("CLIP", ),
+ "neg_scale": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0}),
+ }}
+ RETURN_TYPES = ("MODEL",)
+ FUNCTION = "patch"
+
+ CATEGORY = "_for_testing"
+
+ def patch(self, model, clip, neg_scale):
+ m = model.clone()
+
+ tokens = clip.tokenize("")
+ nocond, nocond_pooled = clip.encode_from_tokens(tokens, return_pooled=True)
+ nocond = [[nocond, {"pooled_output": nocond_pooled}]]
+ nocond = comfy.sample.convert_cond(nocond)
+
+ def cfg_function(args):
+ model = args["model"]
+ noise_pred_pos = args["cond_denoised"]
+ noise_pred_neg = args["uncond_denoised"]
+ cond_scale = args["cond_scale"]
+ x = args["input"]
+ sigma = args["sigma"]
+ model_options = args["model_options"]
+
+ (noise_pred_nocond, _) = comfy.samplers.calc_cond_uncond_batch(model, nocond, None, x, sigma, model_options)
+
+ pos = noise_pred_pos - noise_pred_nocond
+ neg = noise_pred_neg - noise_pred_nocond
+ perp = ((torch.mul(pos, neg).sum())/(torch.norm(neg)**2)) * neg
+ perp_neg = perp * neg_scale
+ cfg_result = noise_pred_nocond + cond_scale*(pos - perp_neg)
+ cfg_result = x - cfg_result
+ return cfg_result
+
+ m.set_model_sampler_cfg_function(cfg_function)
+
+ return (m, )
+
+
+NODE_CLASS_MAPPINGS = {
+ "PerpNeg": PerpNeg,
+}
+
+NODE_DISPLAY_NAME_MAPPINGS = {
+ "PerpNeg": "Perp-Neg",
+}
diff --git a/nodes.py b/nodes.py
index 3d24750cbf..3031b10aad 100644
--- a/nodes.py
+++ b/nodes.py
@@ -1868,6 +1868,7 @@ def init_custom_nodes():
"nodes_images.py",
"nodes_video_model.py",
"nodes_sag.py",
+ "nodes_perpneg.py",
]
for node_file in extras_files:
| Implement Perp-Neg as a sampler_cfg_function patch | https://api.github.com/repos/comfyanonymous/ComfyUI/pulls/2303 | 2023-12-15T19:01:50Z | 2023-12-15T19:54:08Z | 2023-12-15T19:54:08Z | 2023-12-15T19:54:08Z | 956 | comfyanonymous/ComfyUI | 17,882 |
Remove outdated BERT tips | diff --git a/docs/source/model_doc/bert.rst b/docs/source/model_doc/bert.rst
index cbc1c8aa77417..1666260f96e56 100644
--- a/docs/source/model_doc/bert.rst
+++ b/docs/source/model_doc/bert.rst
@@ -27,13 +27,8 @@ Tips:
- BERT is a model with absolute position embeddings so it's usually advised to pad the inputs on
the right rather than the left.
-- BERT was trained with a masked language modeling (MLM) objective. It is therefore efficient at predicting masked
- tokens and at NLU in general, but is not optimal for text generation. Models trained with a causal language
- modeling (CLM) objective are better in that regard.
-- Alongside MLM, BERT was trained using a next sentence prediction (NSP) objective using the [CLS] token as a sequence
- approximate. The user may use this token (the first token in a sequence built with special tokens) to get a sequence
- prediction rather than a token prediction. However, averaging over the sequence may yield better results than using
- the [CLS] token.
+- BERT was trained with the masked language modeling (MLM) and next sentence prediction (NSP) objectives. It is efficient at predicting masked
+ tokens and at NLU in general, but is not optimal for text generation.
The original code can be found `here <https://github.com/google-research/bert>`_.
diff --git a/src/transformers/modeling_outputs.py b/src/transformers/modeling_outputs.py
index 3a91d17904d5a..1c36dc2d81ac4 100644
--- a/src/transformers/modeling_outputs.py
+++ b/src/transformers/modeling_outputs.py
@@ -45,10 +45,6 @@ class BaseModelOutputWithPooling(ModelOutput):
further processed by a Linear layer and a Tanh activation function. The Linear
layer weights are trained from the next sentence prediction (classification)
objective during pretraining.
-
- This output is usually *not* a good summary
- of the semantic content of the input, you're often better with averaging or pooling
- the sequence of hidden-states for the whole input sequence.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
| Why remove the tips:
> - BERT is a model with absolute position embeddings so it's usually advised to pad the inputs on
> the right rather than the left.
Yes but since we don't provide an option to pad from the left I think it's not necessary.
> - BERT was trained with a masked language modeling (MLM) objective. It is therefore efficient at predicting masked
> tokens and at NLU in general, but is not optimal for text generation. Models trained with a causal language
> modeling (CLM) objective are better in that regard.
No. T5 & BART proved it wrong.
> - Alongside MLM, BERT was trained using a next sentence prediction (NSP) objective using the [CLS] token as a sequence
> approximate. The user may use this token (the first token in a sequence built with special tokens) to get a sequence
> prediction rather than a token prediction. However, averaging over the sequence may yield better results than using
> the [CLS] token.
No. [CLS] can do learnable self-attention pooling, which is way much better than parameter-free average pooling especially when fine-tuned. (w.r.t. SentenceBERT)
| https://api.github.com/repos/huggingface/transformers/pulls/6217 | 2020-08-03T15:31:24Z | 2020-08-03T17:17:56Z | 2020-08-03T17:17:56Z | 2020-08-03T17:17:57Z | 575 | huggingface/transformers | 12,281 |
Added Machine Learning for Drug Discovery | diff --git a/books.md b/books.md
index a78c7179..72535a83 100644
--- a/books.md
+++ b/books.md
@@ -69,6 +69,7 @@ The following is a list of free and/or open source books on machine learning, st
- [Practical Gradient Boosting](https://www.amazon.com/dp/B0BL1HRD6Z) by Guillaume Saupin
- [Machine Learning System Design](https://www.manning.com/books/machine-learning-system-design) - Valerii Babushkin and Arseny Kravchenko - A book about planning and designing successful ML applications.
- [Fight Fraud with Machine Learning](https://www.manning.com/books/fight-fraud-with-machine-learning) - by Ashish Ranjan Jha - A book about developing scalable and tunable models that can spot and stop fraudulent activity.
+- [Machine Learning for Drug Discovery](https://www.manning.com/books/machine-learning-for-drug-discovery) - by Noah Flynn - A book that introduces the machine learning and deep learning techniques that drive modern medical research.
## Deep Learning
| Hi,
Stjepan from Manning here. Please add this title to the list.
Thank you.
Best, | https://api.github.com/repos/josephmisiti/awesome-machine-learning/pulls/986 | 2024-04-11T14:17:17Z | 2024-04-11T14:32:38Z | 2024-04-11T14:32:38Z | 2024-04-12T09:49:16Z | 241 | josephmisiti/awesome-machine-learning | 51,715 |
calculus by ron larson | diff --git a/books.md b/books.md
index c3acb539..77c3e8e1 100644
--- a/books.md
+++ b/books.md
@@ -99,3 +99,4 @@ The following is a list of free, open source books on machine learning, statisti
## Calculus
* [Calculus Made Easy](https://github.com/lahorekid/Calculus/blob/master/Calculus%20Made%20Easy.pdf)
+* [calculus by ron larson](https://www.spps.org/cms/lib/MN01910242/Centricity/Domain/860/%20CalculusTextbook.pdf)
| This is a great book for introduction to ML/deeplearninng/datascience | https://api.github.com/repos/josephmisiti/awesome-machine-learning/pulls/599 | 2019-03-30T15:56:00Z | 2019-04-09T18:54:06Z | 2019-04-09T18:54:06Z | 2019-04-09T18:54:14Z | 140 | josephmisiti/awesome-machine-learning | 52,278 |
gh-80109: io.TextIOWrapper drops the internal buffer during write() | diff --git a/Lib/_pyio.py b/Lib/_pyio.py
index 32698abac78d25..df2c29bfa9caee 100644
--- a/Lib/_pyio.py
+++ b/Lib/_pyio.py
@@ -2198,8 +2198,9 @@ def write(self, s):
self.buffer.write(b)
if self._line_buffering and (haslf or "\r" in s):
self.flush()
- self._set_decoded_chars('')
- self._snapshot = None
+ if self._snapshot is not None:
+ self._set_decoded_chars('')
+ self._snapshot = None
if self._decoder:
self._decoder.reset()
return length
@@ -2513,8 +2514,9 @@ def read(self, size=None):
# Read everything.
result = (self._get_decoded_chars() +
decoder.decode(self.buffer.read(), final=True))
- self._set_decoded_chars('')
- self._snapshot = None
+ if self._snapshot is not None:
+ self._set_decoded_chars('')
+ self._snapshot = None
return result
else:
# Keep reading chunks until we have size characters to return.
diff --git a/Lib/test/test_io.py b/Lib/test/test_io.py
index 1d78876f2a1c84..ca31b9dad2631a 100644
--- a/Lib/test/test_io.py
+++ b/Lib/test/test_io.py
@@ -3880,6 +3880,14 @@ def test_issue25862(self):
t.write('x')
t.tell()
+ def test_issue35928(self):
+ p = self.BufferedRWPair(self.BytesIO(b'foo\nbar\n'), self.BytesIO())
+ f = self.TextIOWrapper(p)
+ res = f.readline()
+ self.assertEqual(res, 'foo\n')
+ f.write(res)
+ self.assertEqual(res + f.readline(), 'foo\nbar\n')
+
class MemviewBytesIO(io.BytesIO):
'''A BytesIO object whose read method returns memoryviews
diff --git a/Misc/NEWS.d/next/Library/2020-10-03-23-47-28.bpo-35928.E0iPAa.rst b/Misc/NEWS.d/next/Library/2020-10-03-23-47-28.bpo-35928.E0iPAa.rst
new file mode 100644
index 00000000000000..c63e616458a356
--- /dev/null
+++ b/Misc/NEWS.d/next/Library/2020-10-03-23-47-28.bpo-35928.E0iPAa.rst
@@ -0,0 +1,2 @@
+:class:`io.TextIOWrapper` now correctly handles the decoding buffer after
+``read()`` and ``write()``.
diff --git a/Modules/_io/textio.c b/Modules/_io/textio.c
index 4507930c14bb50..e93c3e091549db 100644
--- a/Modules/_io/textio.c
+++ b/Modules/_io/textio.c
@@ -1762,8 +1762,10 @@ _io_TextIOWrapper_write_impl(textio *self, PyObject *text)
}
}
- textiowrapper_set_decoded_chars(self, NULL);
- Py_CLEAR(self->snapshot);
+ if (self->snapshot != NULL) {
+ textiowrapper_set_decoded_chars(self, NULL);
+ Py_CLEAR(self->snapshot);
+ }
if (self->decoder) {
ret = PyObject_CallMethodNoArgs(self->decoder, &_Py_ID(reset));
@@ -1999,8 +2001,10 @@ _io_TextIOWrapper_read_impl(textio *self, Py_ssize_t n)
if (result == NULL)
goto fail;
- textiowrapper_set_decoded_chars(self, NULL);
- Py_CLEAR(self->snapshot);
+ if (self->snapshot != NULL) {
+ textiowrapper_set_decoded_chars(self, NULL);
+ Py_CLEAR(self->snapshot);
+ }
return result;
}
else {
| io.TextIOWrapper was dropping the internal decoding buffer
during read() and write() calls.
<!-- issue-number: [bpo-35928](https://bugs.python.org/issue35928) -->
https://bugs.python.org/issue35928
<!-- /issue-number -->
<!-- gh-issue-number: gh-80109 -->
* Issue: gh-80109
<!-- /gh-issue-number -->
| https://api.github.com/repos/python/cpython/pulls/22535 | 2020-10-04T05:50:56Z | 2024-01-08T10:33:34Z | 2024-01-08T10:33:34Z | 2024-01-08T12:19:28Z | 936 | python/cpython | 4,559 |
Remove "remember-last-answer" from Breeze/CI | diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index e9e3954e34751..f880e3b1d9c20 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -735,7 +735,7 @@ repos:
# The below pre-commits are those requiring CI image to be built
- id: build
name: Check if image build is needed
- entry: ./scripts/ci/pre_commit/pre_commit_ci_build.sh 3.7 false
+ entry: ./scripts/ci/pre_commit/pre_commit_ci_build.sh 3.7
language: system
always_run: true
pass_filenames: false
diff --git a/breeze b/breeze
index 93ac65ab253c6..269cf9077e5b5 100755
--- a/breeze
+++ b/breeze
@@ -3605,8 +3605,6 @@ breeze::parse_arguments "${@}"
breeze::print_header_line
-build_images::forget_last_answer
-
breeze::check_and_save_all_params
build_images::determine_docker_cache_strategy
diff --git a/dev/breeze/src/airflow_breeze/global_constants.py b/dev/breeze/src/airflow_breeze/global_constants.py
index b8db092157adf..5ae0843578b49 100644
--- a/dev/breeze/src/airflow_breeze/global_constants.py
+++ b/dev/breeze/src/airflow_breeze/global_constants.py
@@ -24,7 +24,6 @@
FORCE_PULL_IMAGES = False
CHECK_IF_BASE_PYTHON_IMAGE_UPDATED = False
FORCE_BUILD_IMAGES = False
-# LAST_FORCE_ANSWER_FILE = f"{BUILD_CACHE_DIR}/last_force_answer.sh"
FORCE_ANSWER_TO_QUESTION = ""
SKIP_CHECK_REMOTE_IMAGE = False
PUSH_PYTHON_BASE_IMAGE = False
diff --git a/scripts/ci/libraries/_build_images.sh b/scripts/ci/libraries/_build_images.sh
index f098169577875..796fac02002dc 100644
--- a/scripts/ci/libraries/_build_images.sh
+++ b/scripts/ci/libraries/_build_images.sh
@@ -87,36 +87,6 @@ function build_images::get_airflow_version_from_production_image() {
docker run --entrypoint /bin/bash "${AIRFLOW_PROD_IMAGE}" -c 'echo "${AIRFLOW_VERSION}"'
}
-# Removes the "Forced answer" (yes/no/quit) given previously, unless you specifically want to remember it.
-#
-# This is the default behaviour of all rebuild scripts to ask independently whether you want to
-# rebuild the image or not. Sometimes however we want to remember answer previously given. For
-# example if you answered "no" to rebuild the image, the assumption is that you do not
-# want to rebuild image also for other rebuilds in the same pre-commit execution.
-#
-# All the pre-commit checks therefore have `export REMEMBER_LAST_ANSWER="true"` set
-# So that in case they are run in a sequence of commits they will not rebuild. Similarly if your most
-# recent answer was "no" and you run `pre-commit run mypy` (for example) it will also reuse the
-# "no" answer given previously. This happens until you run any of the breeze commands or run all
-# pre-commits `pre-commit run` - then the "LAST_FORCE_ANSWER_FILE" will be removed and you will
-# be asked again.
-function build_images::forget_last_answer() {
- if [[ ${REMEMBER_LAST_ANSWER:="false"} != "true" ]]; then
- verbosity::print_info
- verbosity::print_info "Forgetting last answer from ${LAST_FORCE_ANSWER_FILE}:"
- verbosity::print_info
- rm -f "${LAST_FORCE_ANSWER_FILE}"
- else
- if [[ -f "${LAST_FORCE_ANSWER_FILE}" ]]; then
- verbosity::print_info
- verbosity::print_info "Still remember last answer from ${LAST_FORCE_ANSWER_FILE}:"
- verbosity::print_info "$(cat "${LAST_FORCE_ANSWER_FILE}")"
- verbosity::print_info
- fi
- fi
-}
-
-
function build_images::reconfirm_rebuilding_if_not_rebased() {
local latest_main_commit_sha
latest_main_commit_sha=$(curl -s -H "Accept: application/vnd.github.VERSION.sha" \
@@ -178,12 +148,6 @@ function build_images::confirm_rebuilding_on_modified_files() {
# So that the script works also from within pre-commit run via git hooks - where stdin is not
# available - it tries to find usable terminal and ask the user via this terminal.
function build_images::confirm_image_rebuild() {
- if [[ -f "${LAST_FORCE_ANSWER_FILE}" ]]; then
- # set variable from last answered response given in the same pre-commit run - so that it can be
- # answered in the first pre-commit check (build) and then used in another (mypy/flake8 etc).
- # shellcheck disable=SC1090
- source "${LAST_FORCE_ANSWER_FILE}"
- fi
set +e
local RES
if [[ ${CI:="false"} == "true" ]]; then
@@ -235,7 +199,6 @@ function build_images::confirm_image_rebuild() {
# Force "no" also to subsequent questions so that if you answer it once, you are not asked
# For all other pre-commits and you will continue using the images you already have
export FORCE_ANSWER_TO_QUESTIONS="no"
- echo 'export FORCE_ANSWER_TO_QUESTIONS="no"' >"${LAST_FORCE_ANSWER_FILE}"
elif [[ ${RES} == "2" ]]; then
echo
echo "${COLOR_RED}ERROR: The ${THE_IMAGE_TYPE} needs to be rebuilt - it is outdated. ${COLOR_RESET}"
diff --git a/scripts/ci/libraries/_initialization.sh b/scripts/ci/libraries/_initialization.sh
index 5c9d239ef75a3..7809b4b2bab0b 100644
--- a/scripts/ci/libraries/_initialization.sh
+++ b/scripts/ci/libraries/_initialization.sh
@@ -294,11 +294,6 @@ function initialization::initialize_force_variables() {
# Can be overridden by '--force-build-images' flag.
export FORCE_BUILD_IMAGES=${FORCE_BUILD_IMAGES:="false"}
- # File to keep the last forced answer. This is useful for pre-commits where you need to
- # only answer once if the image should be rebuilt or not and your answer is used for
- # All the subsequent questions
- export LAST_FORCE_ANSWER_FILE="${BUILD_CACHE_DIR}/last_force_answer.sh"
-
# Can be set to "yes/no/quit" in order to force specified answer to all questions asked to the user.
export FORCE_ANSWER_TO_QUESTIONS=${FORCE_ANSWER_TO_QUESTIONS:=""}
diff --git a/scripts/ci/pre_commit/pre_commit_bat_tests.sh b/scripts/ci/pre_commit/pre_commit_bat_tests.sh
index 58078623552cb..499972609da6d 100755
--- a/scripts/ci/pre_commit/pre_commit_bat_tests.sh
+++ b/scripts/ci/pre_commit/pre_commit_bat_tests.sh
@@ -16,7 +16,6 @@
# specific language governing permissions and limitations
# under the License.
export FORCE_ANSWER_TO_QUESTIONS=${FORCE_ANSWER_TO_QUESTIONS:="quit"}
-export REMEMBER_LAST_ANSWER="true"
export PRINT_INFO_FROM_SCRIPTS="false"
export SKIP_CHECK_REMOTE_IMAGE="true"
diff --git a/scripts/ci/pre_commit/pre_commit_check_license.sh b/scripts/ci/pre_commit/pre_commit_check_license.sh
index 5eb42b6d985ab..b9ab60067bbbc 100755
--- a/scripts/ci/pre_commit/pre_commit_check_license.sh
+++ b/scripts/ci/pre_commit/pre_commit_check_license.sh
@@ -17,7 +17,6 @@
# under the License.
set -euo pipefail
export FORCE_ANSWER_TO_QUESTIONS=${FORCE_ANSWER_TO_QUESTIONS:="quit"}
-export REMEMBER_LAST_ANSWER="true"
export PRINT_INFO_FROM_SCRIPTS="false"
export SKIP_CHECK_REMOTE_IMAGE="true"
diff --git a/scripts/ci/pre_commit/pre_commit_ci_build.sh b/scripts/ci/pre_commit/pre_commit_ci_build.sh
index 066380107714f..55d0aac23a8e9 100755
--- a/scripts/ci/pre_commit/pre_commit_ci_build.sh
+++ b/scripts/ci/pre_commit/pre_commit_ci_build.sh
@@ -16,7 +16,6 @@
# specific language governing permissions and limitations
# under the License.
export PYTHON_MAJOR_MINOR_VERSION="${1}"
-export REMEMBER_LAST_ANSWER="${2}"
export PRINT_INFO_FROM_SCRIPTS="false"
# shellcheck source=scripts/ci/libraries/_script_init.sh
@@ -50,8 +49,6 @@ function build_images::rebuild_ci_image_if_confirmed_for_pre_commit() {
fi
}
-build_images::forget_last_answer
-
build_images::prepare_ci_build
build_images::rebuild_ci_image_if_confirmed_for_pre_commit
diff --git a/scripts/ci/pre_commit/pre_commit_flake8.sh b/scripts/ci/pre_commit/pre_commit_flake8.sh
index cbef9d08bce23..4138478b7ae93 100755
--- a/scripts/ci/pre_commit/pre_commit_flake8.sh
+++ b/scripts/ci/pre_commit/pre_commit_flake8.sh
@@ -17,7 +17,6 @@
# under the License.
export PYTHON_MAJOR_MINOR_VERSION="3.7"
export FORCE_ANSWER_TO_QUESTIONS=${FORCE_ANSWER_TO_QUESTIONS:="quit"}
-export REMEMBER_LAST_ANSWER="true"
export PRINT_INFO_FROM_SCRIPTS="false"
# shellcheck source=scripts/ci/static_checks/flake8.sh
diff --git a/scripts/ci/pre_commit/pre_commit_helm_lint.sh b/scripts/ci/pre_commit/pre_commit_helm_lint.sh
index 56a57f3555d95..c8f2a5befdd99 100755
--- a/scripts/ci/pre_commit/pre_commit_helm_lint.sh
+++ b/scripts/ci/pre_commit/pre_commit_helm_lint.sh
@@ -15,7 +15,6 @@
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
-export REMEMBER_LAST_ANSWER="true"
export PRINT_INFO_FROM_SCRIPTS="false"
export SKIP_CHECK_REMOTE_IMAGE="true"
diff --git a/scripts/ci/pre_commit/pre_commit_in_container_bats_test.sh b/scripts/ci/pre_commit/pre_commit_in_container_bats_test.sh
index b699a54361234..fe1d7e7cba1c5 100755
--- a/scripts/ci/pre_commit/pre_commit_in_container_bats_test.sh
+++ b/scripts/ci/pre_commit/pre_commit_in_container_bats_test.sh
@@ -16,7 +16,6 @@
# specific language governing permissions and limitations
# under the License.
export FORCE_ANSWER_TO_QUESTIONS=${FORCE_ANSWER_TO_QUESTIONS:="quit"}
-export REMEMBER_LAST_ANSWER="true"
export PRINT_INFO_FROM_SCRIPTS="false"
export SKIP_CHECK_REMOTE_IMAGE="true"
diff --git a/scripts/ci/pre_commit/pre_commit_lint_dockerfile.sh b/scripts/ci/pre_commit/pre_commit_lint_dockerfile.sh
index 1c0697803ca29..90dc7e0cdbb45 100755
--- a/scripts/ci/pre_commit/pre_commit_lint_dockerfile.sh
+++ b/scripts/ci/pre_commit/pre_commit_lint_dockerfile.sh
@@ -15,7 +15,6 @@
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
-export REMEMBER_LAST_ANSWER="true"
export PRINT_INFO_FROM_SCRIPTS="false"
export SKIP_CHECK_REMOTE_IMAGE="true"
diff --git a/scripts/ci/pre_commit/pre_commit_mypy.sh b/scripts/ci/pre_commit/pre_commit_mypy.sh
index 5f3198e15b0b4..7abd6b4bb796b 100755
--- a/scripts/ci/pre_commit/pre_commit_mypy.sh
+++ b/scripts/ci/pre_commit/pre_commit_mypy.sh
@@ -17,7 +17,6 @@
# under the License.
export PYTHON_MAJOR_MINOR_VERSION="3.7"
export FORCE_ANSWER_TO_QUESTIONS=${FORCE_ANSWER_TO_QUESTIONS:="quit"}
-export REMEMBER_LAST_ANSWER="true"
export PRINT_INFO_FROM_SCRIPTS="false"
# shellcheck source=scripts/ci/static_checks/mypy.sh
| This feature had been removed when recent BUILDX improvements
were added. This PR removes the remnants of it.
Follow up after #20664
<!--
Thank you for contributing! Please make sure that your code changes
are covered with tests. And in case of new features or big changes
remember to adjust the documentation.
Feel free to ping committers for the review!
In case of existing issue, reference it using one of the following:
closes: #ISSUE
related: #ISSUE
How to write a good git commit message:
http://chris.beams.io/posts/git-commit/
-->
---
**^ Add meaningful description above**
Read the **[Pull Request Guidelines](https://github.com/apache/airflow/blob/main/CONTRIBUTING.rst#pull-request-guidelines)** for more information.
In case of fundamental code change, Airflow Improvement Proposal ([AIP](https://cwiki.apache.org/confluence/display/AIRFLOW/Airflow+Improvements+Proposals)) is needed.
In case of a new dependency, check compliance with the [ASF 3rd Party License Policy](https://www.apache.org/legal/resolved.html#category-x).
In case of backwards incompatible changes please leave a note in [UPDATING.md](https://github.com/apache/airflow/blob/main/UPDATING.md).
| https://api.github.com/repos/apache/airflow/pulls/21186 | 2022-01-28T14:00:38Z | 2022-01-28T18:27:51Z | 2022-01-28T18:27:51Z | 2022-07-29T20:08:27Z | 2,773 | apache/airflow | 14,582 |
[3.10] bpo-45494: Fix error location in EOF tokenizer errors (GH-29108) | diff --git a/Parser/pegen.c b/Parser/pegen.c
index 98f07a13c92cbb..464a902173dfb9 100644
--- a/Parser/pegen.c
+++ b/Parser/pegen.c
@@ -403,8 +403,12 @@ _PyPegen_raise_error(Parser *p, PyObject *errtype, const char *errmsg, ...)
Py_ssize_t col_offset;
Py_ssize_t end_col_offset = -1;
if (t->col_offset == -1) {
- col_offset = Py_SAFE_DOWNCAST(p->tok->cur - p->tok->buf,
- intptr_t, int);
+ if (p->tok->cur == p->tok->buf) {
+ col_offset = 0;
+ } else {
+ const char* start = p->tok->buf ? p->tok->line_start : p->tok->buf;
+ col_offset = Py_SAFE_DOWNCAST(p->tok->cur - start, intptr_t, int);
+ }
} else {
col_offset = t->col_offset + 1;
}
@@ -431,6 +435,7 @@ get_error_line(Parser *p, Py_ssize_t lineno)
assert(p->tok->fp == NULL || p->tok->fp == stdin);
char *cur_line = p->tok->fp_interactive ? p->tok->interactive_src_start : p->tok->str;
+ assert(cur_line != NULL);
for (int i = 0; i < lineno - 1; i++) {
cur_line = strchr(cur_line, '\n') + 1;
| (cherry picked from commit 79ff0d1687e3f823fb121a19f0297ad052871b1b)
Co-authored-by: Pablo Galindo Salgado <Pablogsal@gmail.com>
<!-- issue-number: [bpo-45494](https://bugs.python.org/issue45494) -->
https://bugs.python.org/issue45494
<!-- /issue-number -->
| https://api.github.com/repos/python/cpython/pulls/29672 | 2021-11-20T17:41:19Z | 2021-11-20T17:59:34Z | 2021-11-20T17:59:34Z | 2021-11-20T17:59:40Z | 364 | python/cpython | 4,370 |
CLN: de-kludge NDFrame.interpolate | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 5348040808e63..16bed91daa7d2 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -6835,27 +6835,16 @@ def interpolate(
axis = self._get_axis_number(axis)
if axis == 0:
- ax = self._info_axis_name
- _maybe_transposed_self = self
- elif axis == 1:
- _maybe_transposed_self = self.T
- ax = 1
-
- ax = _maybe_transposed_self._get_axis_number(ax)
-
- if _maybe_transposed_self.ndim == 2:
- alt_ax = 1 - ax
+ df = self
else:
- alt_ax = ax
+ df = self.T
- if isinstance(_maybe_transposed_self.index, MultiIndex) and method != "linear":
+ if isinstance(df.index, MultiIndex) and method != "linear":
raise ValueError(
"Only `method=linear` interpolation is supported on MultiIndexes."
)
- if _maybe_transposed_self._data.get_dtype_counts().get("object") == len(
- _maybe_transposed_self.T
- ):
+ if df.ndim == 2 and np.all(df.dtypes == np.dtype(object)):
raise TypeError(
"Cannot interpolate with all object-dtype columns "
"in the DataFrame. Try setting at least one "
@@ -6865,9 +6854,9 @@ def interpolate(
# create/use the index
if method == "linear":
# prior default
- index = np.arange(len(_maybe_transposed_self._get_axis(alt_ax)))
+ index = np.arange(len(df.index))
else:
- index = _maybe_transposed_self._get_axis(alt_ax)
+ index = df.index
methods = {"index", "values", "nearest", "time"}
is_numeric_or_datetime = (
is_numeric_dtype(index)
@@ -6888,10 +6877,10 @@ def interpolate(
"has not been implemented. Try filling "
"those NaNs before interpolating."
)
- data = _maybe_transposed_self._data
+ data = df._data
new_data = data.interpolate(
method=method,
- axis=ax,
+ axis=self._info_axis_number,
index=index,
limit=limit,
limit_direction=limit_direction,
@@ -6901,15 +6890,13 @@ def interpolate(
**kwargs,
)
+ result = self._constructor(new_data)
+ if axis == 1:
+ result = result.T
if inplace:
- if axis == 1:
- new_data = self._constructor(new_data).T._data
- self._update_inplace(new_data)
+ return self._update_inplace(result)
else:
- res = self._constructor(new_data).__finalize__(self)
- if axis == 1:
- res = res.T
- return res
+ return result.__finalize__(self)
# ----------------------------------------------------------------------
# Timeseries methods Methods
| i think i was responsible for these particular kludges a while back. | https://api.github.com/repos/pandas-dev/pandas/pulls/33084 | 2020-03-27T23:29:36Z | 2020-03-29T15:16:18Z | 2020-03-29T15:16:18Z | 2020-03-29T15:20:56Z | 703 | pandas-dev/pandas | 45,575 |
Upgrade arrow to include numpy bool fix | diff --git a/src/thirdparty/download_thirdparty.sh b/src/thirdparty/download_thirdparty.sh
index 89cb34e61cdf9..8c2086b9d8aa1 100755
--- a/src/thirdparty/download_thirdparty.sh
+++ b/src/thirdparty/download_thirdparty.sh
@@ -13,4 +13,4 @@ fi
cd $TP_DIR/arrow
git fetch origin master
-git checkout a6a97a9d4c07873266a71d8c87069dc4d168e4d2
+git checkout a8f518588fda471b2e3cc8e0f0064e7c4bb99899
| this is a workaround for https://github.com/ray-project/ray/issues/1121 | https://api.github.com/repos/ray-project/ray/pulls/1148 | 2017-10-20T17:01:07Z | 2017-10-21T00:25:16Z | 2017-10-21T00:25:16Z | 2017-10-21T00:25:18Z | 153 | ray-project/ray | 19,599 |
bump version | diff --git a/CHANGELOG.md b/CHANGELOG.md
index 7b6662b37..c2bebeff4 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -5,7 +5,7 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
-## [9.8.0] - Unreleased
+## [9.8.0] - 2021-01-11
### Added
@@ -16,11 +16,13 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- The `style` argument on Align now applies to background only
- Changed display of progress bars in no_color mode for clarity
+- Console property `size` will fall back to getting the terminal size of stdout it stdin fails, this allows size to be correctly determined when piping
### Fixed
- Fixed panel cropping when shrunk too bar
- Allow passing markdown over STDIN when using `python -m rich.markdown`
+- Fix printing MagicMock.mock_calls https://github.com/willmcgugan/rich/issues/903
## [9.7.0] - 2021-01-09
diff --git a/rich/console.py b/rich/console.py
index 3be4da2cc..d2c3b10b4 100644
--- a/rich/console.py
+++ b/rich/console.py
@@ -716,7 +716,10 @@ def size(self) -> ConsoleDimensions:
try:
width, height = os.get_terminal_size(sys.stdin.fileno())
except (AttributeError, ValueError, OSError):
- pass
+ try:
+ width, height = os.get_terminal_size(sys.stdout.fileno())
+ except (AttributeError, ValueError, OSError):
+ pass
# get_terminal_size can report 0, 0 if run from pseudo-terminal
width = width or 80
@@ -1053,6 +1056,12 @@ def check_text() -> None:
del text[:]
for renderable in objects:
+ # I promise this is sane
+ # This detects an object which claims to have all attributes, such as MagicMock.mock_calls
+ if hasattr(
+ renderable, "jwevpw_eors4dfo6mwo345ermk7kdnfnwerwer"
+ ): # pragma: no cover
+ renderable = repr(renderable)
rich_cast = getattr(renderable, "__rich__", None)
if rich_cast:
renderable = rich_cast()
diff --git a/rich/syntax.py b/rich/syntax.py
index 6dfc8a630..5485fd37a 100644
--- a/rich/syntax.py
+++ b/rich/syntax.py
@@ -590,7 +590,7 @@ def __rich_console__(
help="Overide background color",
)
parser.add_argument(
- "-L",
+ "-x",
"--lexer",
default="default",
dest="lexer_name",
@@ -602,17 +602,24 @@ def __rich_console__(
console = Console(force_terminal=args.force_color, width=args.width)
- kwargs = dict(
- line_numbers=args.line_numbers,
- word_wrap=args.word_wrap,
- theme=args.theme,
- background_color=args.background_color,
- indent_guides=args.indent_guides,
- )
-
if not args.path or args.path == "-":
code = sys.stdin.read()
- syntax = Syntax(code=code, lexer_name=args.lexer_name, **kwargs)
+ syntax = Syntax(
+ code=code,
+ lexer_name=args.lexer_name,
+ line_numbers=args.line_numbers,
+ word_wrap=args.word_wrap,
+ theme=args.theme,
+ background_color=args.background_color,
+ indent_guides=args.indent_guides,
+ )
else:
- syntax = Syntax.from_path(args.path, **kwargs)
+ syntax = Syntax.from_path(
+ args.path,
+ line_numbers=args.line_numbers,
+ word_wrap=args.word_wrap,
+ theme=args.theme,
+ background_color=args.background_color,
+ indent_guides=args.indent_guides,
+ )
console.print(syntax, soft_wrap=args.soft_wrap)
| ## Type of changes
- [ ] Bug fix
- [ ] New feature
- [ ] Documentation / docstrings
- [ ] Tests
- [ ] Other
## Checklist
- [ ] I've run the latest [black](https://github.com/psf/black) with default args on new code.
- [ ] I've updated CHANGELOG.md and CONTRIBUTORS.md where appropriate.
- [ ] I've added tests for new code.
- [ ] I accept that @willmcgugan may be pedantic in the code review.
## Description
Please describe your changes here. If this fixes a bug, please link to the issue, if possible.
| https://api.github.com/repos/Textualize/rich/pulls/908 | 2021-01-11T21:28:49Z | 2021-01-11T21:41:08Z | 2021-01-11T21:41:08Z | 2021-01-11T21:41:14Z | 985 | Textualize/rich | 48,083 |
Changed ELU implementation to use native ops | diff --git a/keras/activations.py b/keras/activations.py
index e4a194c1d83..a5f5eb77b73 100644
--- a/keras/activations.py
+++ b/keras/activations.py
@@ -15,6 +15,9 @@ def softmax(x):
'Here, ndim=' + str(ndim))
+def elu(x, alpha=1.0):
+ return K.elu(x, alpha)
+
def softplus(x):
return K.softplus(x)
diff --git a/keras/backend/tensorflow_backend.py b/keras/backend/tensorflow_backend.py
index 11c96e5c65f..e3cf1e2cf06 100644
--- a/keras/backend/tensorflow_backend.py
+++ b/keras/backend/tensorflow_backend.py
@@ -1348,6 +1348,19 @@ def relu(x, alpha=0., max_value=None):
return x
+def elu(x, alpha=1.):
+ """ Exponential linear unit
+
+ # Arguments
+ x: Tensor to compute the activation function for.
+ alpha: scalar
+ """
+ res = tf.nn.elu(x)
+ if alpha == 1:
+ return res
+ else:
+ return tf.select(x > 0, res, alpha*res)
+
def softmax(x):
'''Softmax of a tensor.
'''
diff --git a/keras/backend/theano_backend.py b/keras/backend/theano_backend.py
index d7af89a8a0e..0cefdabd5d5 100644
--- a/keras/backend/theano_backend.py
+++ b/keras/backend/theano_backend.py
@@ -931,11 +931,26 @@ def in_test_phase(x, alt):
# NN OPERATIONS
+def _assert_has_capability(module, func):
+ assert hasattr(module, func), ('It looks like like your version of '
+ 'Theano is out of date. '
+ 'Install the latest version with:\n'
+ 'pip install git+git://github.com/Theano/Theano.git --upgrade --no-deps')
+
+
+def elu(x, alpha=1.0):
+ """ Exponential linear unit
+
+ # Arguments
+ x: Tensor to compute the activation function for.
+ alpha: scalar
+ """
+ _assert_has_capability(T.nnet, 'elu')
+ return T.nnet.elu(x, alpha)
+
+
def relu(x, alpha=0., max_value=None):
- assert hasattr(T.nnet, 'relu'), ('It looks like like your version of '
- 'Theano is out of date. '
- 'Install the latest version with:\n'
- 'pip install git+git://github.com/Theano/Theano.git --upgrade --no-deps')
+ _assert_has_capability(T.nnet, 'relu')
x = T.nnet.relu(x, alpha)
if max_value is not None:
x = T.minimum(x, max_value)
diff --git a/keras/layers/advanced_activations.py b/keras/layers/advanced_activations.py
index a3cb0728c86..ad5ce8162d3 100644
--- a/keras/layers/advanced_activations.py
+++ b/keras/layers/advanced_activations.py
@@ -107,9 +107,7 @@ def __init__(self, alpha=1.0, **kwargs):
super(ELU, self).__init__(**kwargs)
def call(self, x, mask=None):
- pos = K.relu(x)
- neg = (x - abs(x)) * 0.5
- return pos + self.alpha * (K.exp(neg) - 1.)
+ return K.elu(x, self.alpha)
def get_config(self):
config = {'alpha': float(self.alpha)}
diff --git a/tests/keras/backend/test_backends.py b/tests/keras/backend/test_backends.py
index 83ca00c6fcd..7cc7227b6a6 100644
--- a/tests/keras/backend/test_backends.py
+++ b/tests/keras/backend/test_backends.py
@@ -492,6 +492,7 @@ def test_nn_operations(self):
check_single_tensor_operation('relu', (4, 2), alpha=0.1, max_value=0.5)
check_single_tensor_operation('softmax', (4, 10))
check_single_tensor_operation('softplus', (4, 10))
+ check_single_tensor_operation('elu', (4, 10), alpha=0.5)
check_single_tensor_operation('sigmoid', (4, 2))
check_single_tensor_operation('hard_sigmoid', (4, 2))
diff --git a/tests/keras/test_activations.py b/tests/keras/test_activations.py
index 223bde02415..d4a08e8d497 100644
--- a/tests/keras/test_activations.py
+++ b/tests/keras/test_activations.py
@@ -131,6 +131,23 @@ def test_relu():
assert_allclose(result, test_values, rtol=1e-05)
+def test_elu():
+ x = K.placeholder(ndim=2)
+ f = K.function([x], [activations.elu(x, 0.5)])
+
+ test_values = get_standard_values()
+ result = f([test_values])[0]
+
+ # because no negatives in test values
+ assert_allclose(result, test_values, rtol=1e-05)
+
+ negative_values = np.array([[-1, -2]], dtype=K.floatx())
+ result = f([negative_values])[0]
+ true_result = (np.exp(negative_values) - 1) / 2
+
+ assert_allclose(result, true_result)
+
+
def test_tanh():
test_values = get_standard_values()
| https://api.github.com/repos/keras-team/keras/pulls/3845 | 2016-09-22T09:24:13Z | 2016-09-22T18:08:21Z | 2016-09-22T18:08:21Z | 2016-11-18T15:02:31Z | 1,335 | keras-team/keras | 47,349 | |
Wrap RemBert integration test forward passes with torch.no_grad() | diff --git a/tests/models/rembert/test_modeling_rembert.py b/tests/models/rembert/test_modeling_rembert.py
index b431aff86e098..180b71b17ade1 100644
--- a/tests/models/rembert/test_modeling_rembert.py
+++ b/tests/models/rembert/test_modeling_rembert.py
@@ -464,7 +464,8 @@ def test_inference_model(self):
model = RemBertModel.from_pretrained("google/rembert")
input_ids = torch.tensor([[312, 56498, 313, 2125, 313]])
segment_ids = torch.tensor([[0, 0, 0, 1, 1]])
- output = model(input_ids, token_type_ids=segment_ids, output_hidden_states=True)
+ with torch.no_grad():
+ output = model(input_ids, token_type_ids=segment_ids, output_hidden_states=True)
hidden_size = 1152
| # What does this PR do?
Fixes #14642. Wrapped the forward pass inside RemBert's integration test with `with torch.no_grad()` to ensure no gradients are computed during inference.
## Before submitting
- [ ] This PR fixes a typo or improves the docs (you can dismiss the other checks if that's the case).
- [X] Did you read the [contributor guideline](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md#start-contributing-pull-requests),
Pull Request section?
- [X] Was this discussed/approved via a Github issue or the [forum](https://discuss.huggingface.co/)? Please add a link
to it if that's the case.
- [ ] Did you make sure to update the documentation with your changes? Here are the
[documentation guidelines](https://github.com/huggingface/transformers/tree/main/docs), and
[here are tips on formatting docstrings](https://github.com/huggingface/transformers/tree/main/docs#writing-source-documentation).
- [ ] Did you write any new necessary tests?
## Who can review?
@LysandreJik Please let me know if this fix works. Thank you:) | https://api.github.com/repos/huggingface/transformers/pulls/21503 | 2023-02-07T22:29:07Z | 2023-02-08T13:00:53Z | 2023-02-08T13:00:52Z | 2023-02-08T13:04:01Z | 208 | huggingface/transformers | 12,352 |
Updated Humanoid left_hip_y range to match right_hip_y | diff --git a/gym/envs/mujoco/assets/humanoid.xml b/gym/envs/mujoco/assets/humanoid.xml
index ebc2d267a5f..c8d5fc61a18 100755
--- a/gym/envs/mujoco/assets/humanoid.xml
+++ b/gym/envs/mujoco/assets/humanoid.xml
@@ -53,7 +53,7 @@
<body name="left_thigh" pos="0 0.1 -0.04">
<joint armature="0.01" axis="-1 0 0" damping="5" name="left_hip_x" pos="0 0 0" range="-25 5" stiffness="10" type="hinge"/>
<joint armature="0.01" axis="0 0 -1" damping="5" name="left_hip_z" pos="0 0 0" range="-60 35" stiffness="10" type="hinge"/>
- <joint armature="0.01" axis="0 1 0" damping="5" name="left_hip_y" pos="0 0 0" range="-120 20" stiffness="20" type="hinge"/>
+ <joint armature="0.01" axis="0 1 0" damping="5" name="left_hip_y" pos="0 0 0" range="-110 20" stiffness="20" type="hinge"/>
<geom fromto="0 0 0 0 -0.01 -.34" name="left_thigh1" size="0.06" type="capsule"/>
<body name="left_shin" pos="0 -0.01 -0.403">
<joint armature="0.0060" axis="0 -1 0" name="left_knee" pos="0 0 .02" range="-160 -2" stiffness="1" type="hinge"/>
| right_hip_y has range -110 to 20. left_hip_y should have the same range. | https://api.github.com/repos/openai/gym/pulls/932 | 2018-03-06T04:50:54Z | 2019-03-01T22:23:08Z | 2019-03-01T22:23:08Z | 2019-03-01T22:23:08Z | 431 | openai/gym | 5,409 |
Catch edge cases were delimiters are wrongfully caught | diff --git a/lib/cheat_wrapper.py b/lib/cheat_wrapper.py
index 59e6b921..606f3513 100644
--- a/lib/cheat_wrapper.py
+++ b/lib/cheat_wrapper.py
@@ -26,9 +26,25 @@ def _add_section_name(query):
if '/' in query:
return query
if ' ' in query:
- # for standalone queries only that may contain ' '
- return "%s/%s" % tuple(query.split(' ', 1))
- return "%s/%s" % tuple(query.split('+', 1))
+ delim = " "
+ elif '+' in query:
+ delim = "+"
+
+ index = 0
+ length = len(query)
+ while index != length:
+
+ index = query.index(delim, index) + 1
+
+ try:
+ comparison = query.index(delim, index)
+ except ValueError:
+ comparison = -1
+
+ if (index != comparison and index != length):
+ return "%s/%s" % (query[:index-1], query[index:])
+
+ return query
def cheat_wrapper(query, request_options=None, output_format='ansi'):
"""
diff --git a/lib/cheat_wrapper_test.py b/lib/cheat_wrapper_test.py
index ab826cec..5533edc8 100644
--- a/lib/cheat_wrapper_test.py
+++ b/lib/cheat_wrapper_test.py
@@ -3,6 +3,10 @@
unchanged = """
python/:list
ls
++
+g++
+g/+
+clang++
btrfs~volume
:intro
:cht.sh
@@ -14,6 +18,9 @@
split = """
python copy file
python/copy file
+
+g++ -O1
+g++/-O1
"""
def test_header_split():
| related to #308
implements more robust logic that splits on the first delim it finds that isn't followed by the same delim | https://api.github.com/repos/chubin/cheat.sh/pulls/312 | 2021-10-13T09:18:03Z | 2021-11-13T12:50:22Z | 2021-11-13T12:50:22Z | 2021-11-13T15:32:23Z | 412 | chubin/cheat.sh | 15,249 |
Improve bad preferred challenge error message | diff --git a/certbot/cli.py b/certbot/cli.py
index d505334a01b..7df3fe05d13 100644
--- a/certbot/cli.py
+++ b/certbot/cli.py
@@ -1295,7 +1295,7 @@ def __call__(self, parser, namespace, pref_challs, option_string=None):
try:
challs = parse_preferred_challenges(pref_challs.split(","))
except errors.Error as error:
- raise argparse.ArgumentTypeError(str(error))
+ raise argparse.ArgumentError(self, str(error))
namespace.pref_challs.extend(challs)
diff --git a/certbot/tests/cli_test.py b/certbot/tests/cli_test.py
index d29f7c1b6e0..f18da240a3e 100644
--- a/certbot/tests/cli_test.py
+++ b/certbot/tests/cli_test.py
@@ -211,7 +211,10 @@ def test_preferred_challenges(self):
self.assertEqual(namespace.pref_challs, expected)
short_args = ['--preferred-challenges', 'jumping-over-the-moon']
- self.assertRaises(argparse.ArgumentTypeError, self.parse, short_args)
+ # argparse.ArgumentError makes argparse print more information
+ # to stderr and call sys.exit()
+ with mock.patch('sys.stderr'):
+ self.assertRaises(SystemExit, self.parse, short_args)
def test_server_flag(self):
namespace = self.parse('--server example.com'.split())
| Using ArgumentTypeError causes Certbot to report an unexpected error occurred while using ArgumentError causes argparse to print more usage information and call sys.exit(). | https://api.github.com/repos/certbot/certbot/pulls/4761 | 2017-05-31T23:36:36Z | 2017-06-01T17:42:35Z | 2017-06-01T17:42:35Z | 2017-06-07T16:46:20Z | 326 | certbot/certbot | 732 |
Fixed spelling -- super set -> superset. | diff --git a/docs/ref/contrib/gis/geos.txt b/docs/ref/contrib/gis/geos.txt
index 206a24c8aa834..bf8595811fef2 100644
--- a/docs/ref/contrib/gis/geos.txt
+++ b/docs/ref/contrib/gis/geos.txt
@@ -302,7 +302,7 @@ another object.
.. attribute:: GEOSGeometry.ewkt
Returns the "extended" Well-Known Text of the geometry. This representation
-is specific to PostGIS and is a super set of the OGC WKT standard. [#fnogc]_
+is specific to PostGIS and is a superset of the OGC WKT standard. [#fnogc]_
Essentially the SRID is prepended to the WKT representation, for example
``SRID=4326;POINT(5 23)``.
| https://api.github.com/repos/django/django/pulls/6095 | 2016-02-06T05:00:43Z | 2016-02-06T13:28:20Z | 2016-02-06T13:28:20Z | 2016-02-19T16:16:48Z | 190 | django/django | 50,845 | |
refactor: Reflect changes in Cluster class | diff --git a/diagrams/__init__.py b/diagrams/__init__.py
index 1fb33db73..33520175e 100644
--- a/diagrams/__init__.py
+++ b/diagrams/__init__.py
@@ -258,11 +258,8 @@ def __exit__(self, exc_type, exc_value, traceback):
self._diagram.subgraph(self.dot)
setcluster(self._parent)
- def _validate_direction(self, direction: str):
- direction = direction.upper()
- if direction in self.__directions:
- return True
- return False
+ def _validate_direction(self, direction: str) -> bool:
+ return direction.upper() in self.__directions
def node(self, nodeid: str, label: str, **attrs) -> None:
"""Create a new node in the cluster."""
| Refactor the _validate_direction in Cluster class I mess it when I made the changes in the 592 PR https://github.com/mingrammer/diagrams/pull/592 | https://api.github.com/repos/mingrammer/diagrams/pulls/653 | 2022-02-12T14:35:21Z | 2022-11-04T10:57:08Z | 2022-11-04T10:57:08Z | 2022-11-04T10:57:09Z | 195 | mingrammer/diagrams | 52,574 |
Fix typos | diff --git a/CppCoreGuidelines.md b/CppCoreGuidelines.md
index bdf01dc9c..22da416fd 100644
--- a/CppCoreGuidelines.md
+++ b/CppCoreGuidelines.md
@@ -2589,7 +2589,7 @@ Small simple functions are easily inlined where the cost of a function call is s
* Flag functions that do not "fit on a screen."
How big is a screen? Try 60 lines by 140 characters; that's roughly the maximum that's comfortable for a book page.
* Flag functions that are too complex. How complex is too complex?
- You could use cyclomatic complexity. Try "more than 10 logical path through." Count a simple switch as one path.
+ You could use cyclomatic complexity. Try "more than 10 logical paths through." Count a simple switch as one path.
### <a name="Rf-constexpr"></a>F.4: If a function might have to be evaluated at compile time, declare it `constexpr`
@@ -4732,7 +4732,7 @@ This is known as "the rule of zero".
##### Enforcement
(Not enforceable) While not enforceable, a good static analyzer can detect patterns that indicate a possible improvement to meet this rule.
-For example, a class with a (pointer, size) pair of member and a destructor that `delete`s the pointer could probably be converted to a `vector`.
+For example, a class with a (pointer, size) pair of members and a destructor that `delete`s the pointer could probably be converted to a `vector`.
### <a name="Rc-five"></a>C.21: If you define or `=delete` any copy, move, or destructor function, define or `=delete` them all
@@ -4821,7 +4821,7 @@ Relying on an implicitly generated copy operation in a class with a destructor i
##### Note
-Writing these functions can be error prone.
+Writing these functions can be error-prone.
Note their argument types:
class X {
@@ -7485,9 +7485,9 @@ Another (related) technique for separating interface and implementation is [Pimp
##### Note
-There is often a choice between offering common functionality as (implemented) base class functions and free-standing functions
+There is often a choice between offering common functionality as (implemented) base class functions and freestanding functions
(in an implementation namespace).
-Base classes gives a shorter notation and easier access to shared data (in the base)
+Base classes give a shorter notation and easier access to shared data (in the base)
at the cost of the functionality being available only to users of the hierarchy.
##### Enforcement
@@ -8869,7 +8869,7 @@ C++17 introduced a distinct type `std::byte` to facilitate operations on raw obj
# <a name="S-enum"></a>Enum: Enumerations
Enumerations are used to define sets of integer values and for defining types for such sets of values.
-There are two kind of enumerations, "plain" `enum`s and `class enum`s.
+There are two kinds of enumerations, "plain" `enum`s and `class enum`s.
Enumeration rule summary:
@@ -10896,7 +10896,7 @@ For containers, there is a tradition for using `{...}` for a list of elements an
vector<int> v2{10}; // vector of 1 element with the value 10
vector<int> v3(1, 2); // vector of 1 element with the value 2
- vector<int> v4{1, 2}; // vector of 2 element with the values 1 and 2
+ vector<int> v4{1, 2}; // vector of 2 elements with the values 1 and 2
##### Note
@@ -12693,7 +12693,7 @@ Don't use expensive copies of the loop variable of a range-`for` loop:
for (string s : vs) // ...
-This will copy each elements of `vs` into `s`. Better:
+This will copy each element of `vs` into `s`. Better:
for (string& s : vs) // ...
@@ -13737,7 +13737,7 @@ With C++20, we can do better still
The key is to pass sufficient information for a good implementation to be chosen.
In this, the `sort` interfaces shown here still have a weakness:
They implicitly rely on the element type having less-than (`<`) defined.
-To complete the interface, we need a second version that accepts a comparison criteria:
+To complete the interface, we need a second version that accepts a comparison criterion:
// compare elements of c using p
template<random_access_range R, class C> requires sortable<R, C>
@@ -14378,7 +14378,7 @@ but it can only identify races seen in a given execution.
##### Enforcement
-It is up to an application builder to choose which support tools are valuable for a particular applications.
+It is up to an application builder to choose which support tools are valuable for a particular application.
## <a name="SScp-con"></a>CP.con: Concurrency
@@ -15626,7 +15626,7 @@ The rules are designed to help avoid several kinds of errors:
* Type violations (e.g., misuse of `union`s and casts)
* Resource leaks (including memory leaks)
* Bounds errors
-* Lifetime errors (e.g., accessing an object after is has been `delete`d)
+* Lifetime errors (e.g., accessing an object after it has been `delete`d)
* Complexity errors (logical errors made likely by overly complex expression of ideas)
* Interface errors (e.g., an unexpected value is passed through an interface)
@@ -15684,7 +15684,7 @@ To make error handling systematic, robust, and non-repetitive.
// ...
}
-Here, `vector` and `string`s constructors might not be able to allocate sufficient memory for their elements, `vector`s constructor might not be able copy the `Thing`s in its initializer list, and `File_handle` might not be able to open the required file.
+Here, `vector` and `string`s constructors might not be able to allocate sufficient memory for their elements, `vector`s constructor might not be able to copy the `Thing`s in its initializer list, and `File_handle` might not be able to open the required file.
In each case, they throw an exception for `use()`'s caller to handle.
If `use()` could handle the failure to construct `bar` it can take control using `try`/`catch`.
In either case, `Foo`'s constructor correctly destroys constructed members before passing control to whatever tried to create a `Foo`.
@@ -15799,7 +15799,7 @@ Not all member functions can be called.
The class invariant - here stated as a comment - is established by the constructors.
`new` throws if it cannot allocate the required memory.
-The operators, notably the subscript operator, relies on the invariant.
+The operators, notably the subscript operator, rely on the invariant.
**See also**: [If a constructor cannot construct a valid object, throw an exception](#Rc-throw)
@@ -16634,7 +16634,7 @@ Flag all "hiding handlers".
You can't have a race condition on a constant.
It is easier to reason about a program when many of the objects cannot change their values.
-Interfaces that promises "no change" of objects passed as arguments greatly increase readability.
+Interfaces that promise "no change" of objects passed as arguments greatly increase readability.
Constant rule summary:
@@ -20703,7 +20703,7 @@ and errors (when we didn't deal correctly with semi-constructed objects consiste
if (!picture.Init()) {
puts("Error, invalid picture");
}
- // now have a invalid picture object instance.
+ // now have an invalid picture object instance.
##### Example, good
@@ -20936,7 +20936,7 @@ A textbook for beginners and relative novices.
## <a name="SS-core"></a>RF.core: Core Guidelines materials
-This section contains materials that has been useful for presenting the core guidelines and the ideas behind them:
+This section contains materials that have been useful for presenting the core guidelines and the ideas behind them:
* [Our documents directory](https://github.com/isocpp/CppCoreGuidelines/tree/master/docs)
* Stroustrup, Sutter, and Dos Reis: [A brief introduction to C++'s model for type- and resource-safety](http://www.stroustrup.com/resource-model.pdf). A paper with lots of examples.
@@ -21206,8 +21206,8 @@ Use `not_null<zstring>` for C-style strings that cannot be `nullptr`. ??? Do we
* `unique_ptr<T>` // unique ownership: `std::unique_ptr<T>`
* `shared_ptr<T>` // shared ownership: `std::shared_ptr<T>` (a counted pointer)
-* `stack_array<T>` // A stack-allocated array. The number of elements are determined at construction and fixed thereafter. The elements are mutable unless `T` is a `const` type.
-* `dyn_array<T>` // ??? needed ??? A heap-allocated array. The number of elements are determined at construction and fixed thereafter.
+* `stack_array<T>` // A stack-allocated array. The number of elements is determined at construction and fixed thereafter. The elements are mutable unless `T` is a `const` type.
+* `dyn_array<T>` // ??? needed ??? A heap-allocated array. The number of elements is determined at construction and fixed thereafter.
The elements are mutable unless `T` is a `const` type. Basically a `span` that allocates and owns its elements.
## <a name="SS-assertions"></a>GSL.assert: Assertions
@@ -22678,7 +22678,7 @@ More information on many topics about C++ can be found on the [Standard C++ Foun
* *constructor*: an operation that initializes ("constructs") an object.
Typically a constructor establishes an invariant and often acquires resources needed for an object to be used (which are then typically released by a destructor).
* *container*: an object that holds elements (other objects).
-* *copy*: an operation that makes two object have values that compare equal. See also move.
+* *copy*: an operation that makes two objects have values that compare equal. See also move.
* *correctness*: a program or a piece of a program is correct if it meets its specification.
Unfortunately, a specification can be incomplete or inconsistent, or can fail to meet users' reasonable expectations.
Thus, to produce acceptable code, we sometimes have to do more than just follow the formal specification.
@@ -22721,7 +22721,7 @@ More information on many topics about C++ can be found on the [Standard C++ Foun
* *iteration*: the act of repeatedly executing a piece of code; see recursion.
* *iterator*: an object that identifies an element of a sequence.
* *ISO*: International Organization for Standardization. The C++ language is an ISO standard, ISO/IEC 14882. More information at [iso.org](http://iso.org).
-* *library*: a collection of types, functions, classes, etc. implementing a set of facilities (abstractions) meant to be potentially used as part of more that one program.
+* *library*: a collection of types, functions, classes, etc. implementing a set of facilities (abstractions) meant to be potentially used as part of more than one program.
* *lifetime*: the time from the initialization of an object until it becomes unusable (goes out of scope, is deleted, or the program terminates).
* *linker*: a program that combines object code files and libraries into an executable program.
* *literal*: a notation that directly specifies a value, such as 12 specifying the integer value "twelve."
@@ -22761,7 +22761,7 @@ In particular, an object of a regular type can be copied and the result of a cop
* *rounding*: conversion of a value to the mathematically nearest value of a less precise type.
* *RTTI*: Run-Time Type Information. ???
* *scope*: the region of program text (source code) in which a name can be referred to.
-* *semiregular*: a concrete type that is copyable (including movable) and default-constructible (see `std::semiregular` concept). The result of a copy is an independent object with the same value as the original. A semiregular type behaves roughly like an built-in type like `int`, but possibly without a `==` operator. See also *regular type*.
+* *semiregular*: a concrete type that is copyable (including movable) and default-constructible (see `std::semiregular` concept). The result of a copy is an independent object with the same value as the original. A semiregular type behaves roughly like a built-in type like `int`, but possibly without a `==` operator. See also *regular type*.
* *sequence*: elements that can be visited in a linear order.
* *software*: a collection of pieces of code and associated data; often used interchangeably with program.
* *source code*: code as produced by a programmer and (in principle) readable by other programmers.
| https://api.github.com/repos/isocpp/CppCoreGuidelines/pulls/1970 | 2022-09-11T07:34:56Z | 2022-09-12T03:52:09Z | 2022-09-12T03:52:09Z | 2022-09-12T03:52:44Z | 2,915 | isocpp/CppCoreGuidelines | 15,899 | |
fix bug where when using prompt composition, hijack_comments generated before the final AND will be dropped | diff --git a/modules/processing.py b/modules/processing.py
index f773a30ef84..e6a3129ae69 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -312,6 +312,7 @@ def process_images(p: StableDiffusionProcessing) -> Processed:
os.makedirs(p.outpath_grids, exist_ok=True)
modules.sd_hijack.model_hijack.apply_circular(p.tiling)
+ modules.sd_hijack.model_hijack.clear_comments()
comments = {}
diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py
index d68f89cc2fb..6a51ab29524 100644
--- a/modules/sd_hijack.py
+++ b/modules/sd_hijack.py
@@ -82,6 +82,9 @@ def apply_circular(self, enable):
for layer in [layer for layer in self.layers if type(layer) == torch.nn.Conv2d]:
layer.padding_mode = 'circular' if enable else 'zeros'
+ def clear_comments(self):
+ self.comments = []
+
def tokenize(self, text):
max_length = self.clip.max_length - 2
_, remade_batch_tokens, _, _, _, token_count = self.clip.process_text([text])
@@ -263,7 +266,7 @@ def forward(self, text):
batch_multipliers, remade_batch_tokens, used_custom_terms, hijack_comments, hijack_fixes, token_count = self.process_text(text)
self.hijack.fixes = hijack_fixes
- self.hijack.comments = hijack_comments
+ self.hijack.comments += hijack_comments
if len(used_custom_terms) > 0:
self.hijack.comments.append("Used embeddings: " + ", ".join([f'{word} [{checksum}]' for word, checksum in used_custom_terms]))
| e.g. if using `embedding1 AND embedding2`, the messages below the image will only show that embedding2 is being used, when in fact both are being used. | https://api.github.com/repos/AUTOMATIC1111/stable-diffusion-webui/pulls/1915 | 2022-10-07T21:49:49Z | 2022-10-08T12:48:04Z | 2022-10-08T12:48:04Z | 2022-10-08T12:48:04Z | 424 | AUTOMATIC1111/stable-diffusion-webui | 39,717 |
Use GH action version when version argument not specified | diff --git a/.git_archival.txt b/.git_archival.txt
new file mode 100644
index 00000000000..8fb235d7045
--- /dev/null
+++ b/.git_archival.txt
@@ -0,0 +1,4 @@
+node: $Format:%H$
+node-date: $Format:%cI$
+describe-name: $Format:%(describe:tags=true,match=*[0-9]*)$
+ref-names: $Format:%D$
diff --git a/.gitattributes b/.gitattributes
new file mode 100644
index 00000000000..00a7b00c94e
--- /dev/null
+++ b/.gitattributes
@@ -0,0 +1 @@
+.git_archival.txt export-subst
diff --git a/CHANGES.md b/CHANGES.md
index 2071eb3f800..b10cfc9888a 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -42,6 +42,9 @@
<!-- For example, Docker, GitHub Actions, pre-commit, editors -->
+- Update GitHub Action to use the version of Black equivalent to action's version if
+ version input is not specified (#3543)
+
### Documentation
<!-- Major changes to documentation and policies. Small docs changes
diff --git a/action/main.py b/action/main.py
index ff9d4112aed..23c3a652194 100644
--- a/action/main.py
+++ b/action/main.py
@@ -22,12 +22,34 @@
extra_deps = "[colorama,jupyter]"
else:
extra_deps = "[colorama]"
-req = f"black{extra_deps}{version_specifier}"
+if version_specifier:
+ req = f"black{extra_deps}{version_specifier}"
+else:
+ describe_name = ""
+ with open(ACTION_PATH / ".git_archival.txt", encoding="utf-8") as fp:
+ for line in fp:
+ if line.startswith("describe-name: "):
+ describe_name = line[len("describe-name: ") :].rstrip()
+ break
+ if not describe_name:
+ print("::error::Failed to detect action version.", flush=True)
+ sys.exit(1)
+ # expected format is one of:
+ # - 23.1.0
+ # - 23.1.0-51-g448bba7
+ if describe_name.count("-") < 2:
+ # the action's commit matches a tag exactly, install exact version from PyPI
+ req = f"black{extra_deps}=={describe_name}"
+ else:
+ # the action's commit does not match any tag, install from the local git repo
+ req = f".{extra_deps}"
+print(f"Installing {req}...", flush=True)
pip_proc = run(
[str(ENV_BIN / "python"), "-m", "pip", "install", req],
stdout=PIPE,
stderr=STDOUT,
encoding="utf-8",
+ cwd=ACTION_PATH,
)
if pip_proc.returncode:
print(pip_proc.stdout)
| ### Description
Resolves #3382
There are 2 things going on here:
- addition of `.git_archival.txt` set with `export-subst` git attribute so that black can be installed from a git archive, see: https://github.com/pypa/setuptools_scm#git-archives
- update to GH action changing it to use GH action version when the version argument is not specified; this means:
- when the action's commit is tagged, install Black from PyPI using that tag's name as the pinned version
- when the action's commit is not tagged, install Black from the action's local directory
A couple of tests:
- `Jackenmen/black@22.12.0` - 22.12.0 on my repo is not the real commit of 22.12.0 as then it wouldn't have this feature; however it does allow you to see that the code gets 2022 formatting:
- https://github.com/Jackenmen/black/actions/runs/4076830464/jobs/7025089166
- `Jackenmen/black@e29e12c973d0e294a0a905b1fb4d8f4962bfa408` - the exact same commit that you get with 22.12.0, just confirming that it still correctly detects it as a tagged release in such a case:
- https://github.com/Jackenmen/black/actions/runs/4076898146/jobs/7025240425
- `Jackenmen/black@9a168472de3144674dd7db3c6b514b7e5f5d5c20` - an untagged commit *after* the fake 22.12.0 tag, this causes the action to install Black from the local source; you can see that this causes it to use 2023 formatting:
- https://github.com/Jackenmen/black/actions/runs/4076901054/jobs/7025246892
- `Jackenmen/black@stable` - the exact same commit that you get with 22.12.0, just confirming that it correctly fetches 22.12.0:
- https://github.com/Jackenmen/black/actions/runs/4077049374/jobs/7025578668
### Checklist - did you ...
- [x] Add an entry in `CHANGES.md` if necessary?
- [x] Add / update tests if necessary?
- [ ] Add new / update outdated documentation?
| https://api.github.com/repos/psf/black/pulls/3543 | 2023-02-02T17:25:27Z | 2023-03-28T01:40:28Z | 2023-03-28T01:40:27Z | 2023-03-28T02:59:02Z | 693 | psf/black | 23,875 |
Optimization - avoid temporary list objects, unnecessary function call | diff --git a/scrapy/commands/check.py b/scrapy/commands/check.py
index 2917b8ba726..017595f04f7 100644
--- a/scrapy/commands/check.py
+++ b/scrapy/commands/check.py
@@ -62,7 +62,7 @@ def run(self, args, opts):
self.settings['SPIDER_CONTRACTS_BASE'],
self.settings['SPIDER_CONTRACTS'],
)
- conman = ContractsManager([load_object(c) for c in contracts])
+ conman = ContractsManager(load_object(c) for c in contracts)
runner = TextTestRunner(verbosity=2 if opts.verbose else 1)
result = TextTestResult(runner.stream, runner.descriptions, runner.verbosity)
diff --git a/scrapy/commands/genspider.py b/scrapy/commands/genspider.py
index 2c14b3c1f37..f0d4353798e 100644
--- a/scrapy/commands/genspider.py
+++ b/scrapy/commands/genspider.py
@@ -88,8 +88,8 @@ def _genspider(self, module, name, domain, template_name, template_file):
'module': module,
'name': name,
'domain': domain,
- 'classname': '%sSpider' % ''.join([s.capitalize() \
- for s in module.split('_')])
+ 'classname': '%sSpider' % ''.join(s.capitalize() \
+ for s in module.split('_'))
}
spiders_module = import_module(self.settings['NEWSPIDER_MODULE'])
spiders_dir = abspath(dirname(spiders_module.__file__))
diff --git a/scrapy/crawler.py b/scrapy/crawler.py
index 2f1a92d3190..c7e3bb528f1 100644
--- a/scrapy/crawler.py
+++ b/scrapy/crawler.py
@@ -173,7 +173,7 @@ def stop(self):
Returns a deferred that is fired when they all have ended.
"""
- return defer.DeferredList([c.stop() for c in list(self.crawlers)])
+ return defer.DeferredList([c.stop() for c in self.crawlers])
@defer.inlineCallbacks
def join(self):
diff --git a/scrapy/loader/__init__.py b/scrapy/loader/__init__.py
index 84640f0b6c8..6c2ff968e45 100644
--- a/scrapy/loader/__init__.py
+++ b/scrapy/loader/__init__.py
@@ -69,7 +69,7 @@ def get_value(self, value, *processors, **kw):
regex = kw.get('re', None)
if regex:
value = arg_to_iter(value)
- value = flatten([extract_regex(regex, x) for x in value])
+ value = flatten(extract_regex(regex, x) for x in value)
for proc in processors:
if value is None:
@@ -149,7 +149,7 @@ def _get_values(self, xpaths, **kw):
def _get_xpathvalues(self, xpaths, **kw):
self._check_selector_method()
xpaths = arg_to_iter(xpaths)
- return flatten([self.selector.xpath(xpath).extract() for xpath in xpaths])
+ return flatten(self.selector.xpath(xpath).extract() for xpath in xpaths)
def add_css(self, field_name, css, *processors, **kw):
values = self._get_cssvalues(css, **kw)
@@ -166,7 +166,7 @@ def get_css(self, css, *processors, **kw):
def _get_cssvalues(self, csss, **kw):
self._check_selector_method()
csss = arg_to_iter(csss)
- return flatten([self.selector.css(css).extract() for css in csss])
+ return flatten(self.selector.css(css).extract() for css in csss)
XPathItemLoader = create_deprecated_class('XPathItemLoader', ItemLoader)
diff --git a/scrapy/shell.py b/scrapy/shell.py
index f008ce39a2f..099e1af0a26 100644
--- a/scrapy/shell.py
+++ b/scrapy/shell.py
@@ -146,7 +146,7 @@ def get_help(self):
"update local objects")
b.append(" view(response) View response in a browser")
- return "\n".join(["[s] %s" % l for l in b])
+ return "\n".join("[s] %s" % l for l in b)
def _is_relevant(self, value):
return isinstance(value, self.relevant_classes)
diff --git a/scrapy/utils/defer.py b/scrapy/utils/defer.py
index 8f3824abf67..bb4c74a6e9a 100644
--- a/scrapy/utils/defer.py
+++ b/scrapy/utils/defer.py
@@ -61,7 +61,7 @@ def parallel(iterable, count, callable, *args, **named):
"""
coop = task.Cooperator()
work = (callable(elem, *args, **named) for elem in iterable)
- return defer.DeferredList([coop.coiterate(work) for i in range(count)])
+ return defer.DeferredList([coop.coiterate(work) for _ in range(count)])
def process_chain(callbacks, input, *a, **kw):
"""Return a Deferred built by chaining the given callbacks"""
diff --git a/scrapy/utils/request.py b/scrapy/utils/request.py
index e361b74332e..a98ca61cef3 100644
--- a/scrapy/utils/request.py
+++ b/scrapy/utils/request.py
@@ -44,8 +44,8 @@ def request_fingerprint(request, include_headers=None):
"""
if include_headers:
- include_headers = tuple([to_bytes(h.lower())
- for h in sorted(include_headers)])
+ include_headers = tuple(to_bytes(h.lower())
+ for h in sorted(include_headers))
cache = _fingerprint_cache.setdefault(request, {})
if include_headers not in cache:
fp = hashlib.sha1()
| Please review.
| https://api.github.com/repos/scrapy/scrapy/pulls/1481 | 2015-09-04T17:37:33Z | 2015-09-14T19:36:50Z | 2015-09-14T19:36:50Z | 2015-09-14T21:03:40Z | 1,352 | scrapy/scrapy | 34,250 |
Error/Warning with build docker container from Dockerfile | diff --git a/Dockerfile b/Dockerfile
index 3e4c9430e3d..d42b632d4d9 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -21,6 +21,7 @@ WORKDIR /opt/certbot
# If <dest> doesn't exist, it is created along with all missing
# directories in its path.
+ENV DEBIAN_FRONTEND=noninteractive
COPY letsencrypt-auto-source/letsencrypt-auto /opt/certbot/src/letsencrypt-auto-source/letsencrypt-auto
RUN /opt/certbot/src/letsencrypt-auto-source/letsencrypt-auto --os-packages-only && \
| When I try to build container I see in logs
```
debconf: unable to initialize frontend: Dialog
debconf: (TERM is not set, so the dialog frontend is not usable.)
debconf: falling back to frontend: Readline
debconf: unable to initialize frontend: Readline
debconf: (This frontend requires a controlling tty.)
debconf: falling back to frontend: Teletype
```
`DEBIAN_FRONTEND=noninteractive` fixed this warning
docker version - 1.11.1.
| https://api.github.com/repos/certbot/certbot/pulls/3004 | 2016-05-16T09:45:01Z | 2016-05-18T23:35:17Z | 2016-05-18T23:35:17Z | 2016-05-18T23:35:17Z | 149 | certbot/certbot | 3,144 |
Fix stray ellipsis | diff --git a/CppCoreGuidelines.md b/CppCoreGuidelines.md
index 2ad06fdef..0c1052d1e 100644
--- a/CppCoreGuidelines.md
+++ b/CppCoreGuidelines.md
@@ -3152,7 +3152,7 @@ Usually you forward the entire parameter (or parameter pack, using `...`) exactl
Sometimes you may forward a composite parameter piecewise, each subobject once on every static control flow path:
template<class PairLike>
- inline auto test(PairLike&&... pairlike)
+ inline auto test(PairLike&& pairlike)
{
// ...
f1(some, args, and, forward<PairLike>(pairlike).first); // forward .first
| This PR removes a stray ellipsis where the author clearly didn't intend to use a parameter pack.
The code is ill-formed without this fix. | https://api.github.com/repos/isocpp/CppCoreGuidelines/pulls/2091 | 2023-06-23T16:24:44Z | 2023-06-23T17:08:51Z | 2023-06-23T17:08:51Z | 2023-06-23T17:08:52Z | 169 | isocpp/CppCoreGuidelines | 15,584 |
Add default headers when calling API destination target and fix ARN | diff --git a/localstack/utils/aws/aws_stack.py b/localstack/utils/aws/aws_stack.py
index 20b8606880243..5e1853887db95 100644
--- a/localstack/utils/aws/aws_stack.py
+++ b/localstack/utils/aws/aws_stack.py
@@ -665,44 +665,48 @@ def _resource_arn(name, pattern, account_id=None, region_name=None):
return pattern % (region_name, account_id, name)
-def send_event_to_target(arn, event, target_attributes=None, asynchronous=True):
- region = arn.split(":")[3]
+def send_event_to_target(target_arn, event, target_attributes=None, asynchronous=True):
+ region = target_arn.split(":")[3]
- if ":lambda:" in arn:
+ if ":lambda:" in target_arn:
from localstack.services.awslambda import lambda_api
- lambda_api.run_lambda(func_arn=arn, event=event, context={}, asynchronous=asynchronous)
+ lambda_api.run_lambda(
+ func_arn=target_arn, event=event, context={}, asynchronous=asynchronous
+ )
- elif ":sns:" in arn:
+ elif ":sns:" in target_arn:
sns_client = connect_to_service("sns", region_name=region)
- sns_client.publish(TopicArn=arn, Message=json.dumps(event))
+ sns_client.publish(TopicArn=target_arn, Message=json.dumps(event))
- elif ":sqs:" in arn:
+ elif ":sqs:" in target_arn:
sqs_client = connect_to_service("sqs", region_name=region)
- queue_url = get_sqs_queue_url(arn)
+ queue_url = get_sqs_queue_url(target_arn)
msg_group_id = dict_utils.get_safe(target_attributes, "$.SqsParameters.MessageGroupId")
kwargs = {"MessageGroupId": msg_group_id} if msg_group_id else {}
sqs_client.send_message(QueueUrl=queue_url, MessageBody=json.dumps(event), **kwargs)
- elif ":states:" in arn:
+ elif ":states:" in target_arn:
stepfunctions_client = connect_to_service("stepfunctions", region_name=region)
- stepfunctions_client.start_execution(stateMachineArn=arn, input=json.dumps(event))
+ stepfunctions_client.start_execution(stateMachineArn=target_arn, input=json.dumps(event))
- elif ":firehose:" in arn:
- delivery_stream_name = firehose_name(arn)
+ elif ":firehose:" in target_arn:
+ delivery_stream_name = firehose_name(target_arn)
firehose_client = connect_to_service("firehose", region_name=region)
firehose_client.put_record(
DeliveryStreamName=delivery_stream_name,
Record={"Data": to_bytes(json.dumps(event))},
)
- elif ":events:" in arn:
+ elif ":events:" in target_arn:
events_client = connect_to_service("events", region_name=region)
- arn_suffix_parts = arn.split(":")[-1].split("/")
- target_name = arn_suffix_parts[-1]
- if ":destination/" in arn or ":api-destination/" in arn:
- target_name = arn_suffix_parts[1] # extract name from ...:api-destination/<name>/<uuid>
- destination = events_client.describe_api_destination(Name=target_name)
+ if ":api-destination/" in target_arn:
+ # API destination support
+ # see https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-api-destinations.html
+ api_destination_name = target_arn.split(":")[-1].split("/")[
+ 1
+ ] # ...:api-destination/{name}/{uuid}
+ destination = events_client.describe_api_destination(Name=api_destination_name)
method = destination.get("HttpMethod", "GET")
endpoint = destination.get("InvocationEndpoint")
state = destination.get("ApiDestinationState") or "ACTIVE"
@@ -710,17 +714,34 @@ def send_event_to_target(arn, event, target_attributes=None, asynchronous=True):
'Calling EventBridge API destination (state "%s"): %s %s'
% (state, method, endpoint)
)
- result = requests.request(method=method, url=endpoint, data=json.dumps(event or {}))
+ # TODO: support connection/auth (BASIC AUTH, API KEY, OAUTH)
+ # connection_arn = destination.get("ConnectionArn")
+ headers = {
+ # default headers AWS sends with every api destination call
+ "User-Agent": "Amazon/EventBridge/ApiDestinations",
+ "Content-Type": "application/json; charset=utf-8",
+ "Range": "bytes=0-1048575",
+ "Accept-Encoding": "gzip,deflate",
+ "Connection": "close",
+ }
+ # TODO: consider option to disable the actual network call to avoid unintended side effects
+ # TODO: InvocationRateLimitPerSecond (needs some form of thread-safety, scoped to the api destination)
+ result = requests.request(
+ method=method, url=endpoint, data=json.dumps(event or {}), headers=headers
+ )
if result.status_code >= 400:
LOG.debug(
"Received code %s forwarding events: %s %s"
% (result.status_code, method, endpoint)
)
+ if result.status_code == 429 or 500 <= result.status_code <= 600:
+ pass # TODO: retry logic (only retry on 429 and 5xx response status)
else:
+ eventbus_name = target_arn.split(":")[-1].split("/")[-1]
events_client.put_events(
Entries=[
{
- "EventBusName": target_name,
+ "EventBusName": eventbus_name,
"Source": event.get("source"),
"DetailType": event.get("detail-type"),
"Detail": event.get("detail"),
@@ -728,14 +749,14 @@ def send_event_to_target(arn, event, target_attributes=None, asynchronous=True):
]
)
- elif ":kinesis:" in arn:
+ elif ":kinesis:" in target_arn:
partition_key_path = dict_utils.get_safe(
target_attributes,
"$.KinesisParameters.PartitionKeyPath",
default_value="$.id",
)
- stream_name = arn.split("/")[-1]
+ stream_name = target_arn.split("/")[-1]
partition_key = dict_utils.get_safe(event, partition_key_path, event["id"])
kinesis_client = connect_to_service("kinesis", region_name=region)
@@ -746,7 +767,7 @@ def send_event_to_target(arn, event, target_attributes=None, asynchronous=True):
)
else:
- LOG.warning('Unsupported Events rule target ARN: "%s"' % arn)
+ LOG.warning('Unsupported Events rule target ARN: "%s"' % target_arn)
def get_events_target_attributes(target):
| Implements enhancement from #4274
Adds default headers AWS sends when invoking the API destination target and fixes the api destination ARN structure.
Corresponding ARN fixes are available in the moto-ext release 2.0.3.37 | https://api.github.com/repos/localstack/localstack/pulls/4280 | 2021-07-09T11:09:22Z | 2021-07-09T14:51:24Z | 2021-07-09T14:51:24Z | 2021-07-09T14:51:27Z | 1,543 | localstack/localstack | 29,196 |
[requires.io] dependency update on master branch | diff --git a/setup.py b/setup.py
index f3aecd57e6..11a72a0017 100644
--- a/setup.py
+++ b/setup.py
@@ -68,7 +68,7 @@
"h2>=3.0, <4",
"hyperframe>=5.0, <6",
"kaitaistruct>=0.7, <0.8",
- "ldap3>=2.2.0, <2.4",
+ "ldap3>=2.4,<2.5",
"passlib>=1.6.5, <1.8",
"pyasn1>=0.3.1, <0.4",
"pyOpenSSL>=17.2,<17.4",
| https://api.github.com/repos/mitmproxy/mitmproxy/pulls/2629 | 2017-11-15T01:31:05Z | 2017-11-15T11:56:39Z | 2017-11-15T11:56:39Z | 2017-11-15T11:56:43Z | 169 | mitmproxy/mitmproxy | 28,137 | |
Added PokerStrategy support | diff --git a/data.json b/data.json
index c51943632..566a3b89e 100644
--- a/data.json
+++ b/data.json
@@ -1363,6 +1363,14 @@
"username_claimed": "blue",
"username_unclaimed": "noonewouldeverusethis7"
},
+ "PokerStrategy": {
+ "errorType": "status_code",
+ "rank": 11226535,
+ "url": "http://www.pokerstrategy.net/user/{}/profile/",
+ "urlMain": "http://www.pokerstrategy.net",
+ "username_claimed": "blue",
+ "username_unclaimed": "noonewouldeverusethis7"
+ },
"Polygon": {
"errorType": "status_code",
"rank": 1056,
| Added support for [PokerStrategy](www.pokerstrategy.net), closes #509. | https://api.github.com/repos/sherlock-project/sherlock/pulls/534 | 2020-01-31T09:37:01Z | 2020-02-04T15:27:08Z | 2020-02-04T15:27:08Z | 2020-02-04T15:27:09Z | 191 | sherlock-project/sherlock | 36,420 |
Update bitflyer doc link | diff --git a/js/bitflyer.js b/js/bitflyer.js
index bb934b081d35..830bd1d7e500 100644
--- a/js/bitflyer.js
+++ b/js/bitflyer.js
@@ -28,7 +28,7 @@ module.exports = class bitflyer extends Exchange {
'logo': 'https://user-images.githubusercontent.com/1294454/28051642-56154182-660e-11e7-9b0d-6042d1e6edd8.jpg',
'api': 'https://api.bitflyer.jp',
'www': 'https://bitflyer.jp',
- 'doc': 'https://bitflyer.jp/API',
+ 'doc': 'https://lightning.bitflyer.com/docs?lang=en',
},
'api': {
'public': {
| https://api.github.com/repos/ccxt/ccxt/pulls/4597 | 2019-01-31T16:43:12Z | 2019-01-31T16:44:35Z | 2019-01-31T16:44:35Z | 2019-01-31T16:45:45Z | 188 | ccxt/ccxt | 13,455 | |
Kia Ceed: display lane lines in HUD | diff --git a/selfdrive/car/hyundai/hyundaican.py b/selfdrive/car/hyundai/hyundaican.py
index f23d9d53286009..dc5a5b62868353 100644
--- a/selfdrive/car/hyundai/hyundaican.py
+++ b/selfdrive/car/hyundai/hyundaican.py
@@ -37,7 +37,7 @@ def create_lkas11(packer, frame, car_fingerprint, apply_steer, steer_req,
CAR.IONIQ_EV_2020, CAR.IONIQ_PHEV, CAR.KIA_SELTOS, CAR.ELANTRA_2021, CAR.GENESIS_G70_2020,
CAR.ELANTRA_HEV_2021, CAR.SONATA_HYBRID, CAR.KONA_EV, CAR.KONA_HEV, CAR.KONA_EV_2022,
CAR.SANTA_FE_2022, CAR.KIA_K5_2021, CAR.IONIQ_HEV_2022, CAR.SANTA_FE_HEV_2022,
- CAR.SANTA_FE_PHEV_2022, CAR.KIA_STINGER_2022, CAR.KIA_K5_HEV_2020):
+ CAR.SANTA_FE_PHEV_2022, CAR.KIA_STINGER_2022, CAR.KIA_K5_HEV_2020, CAR.KIA_CEED):
values["CF_Lkas_LdwsActivemode"] = int(left_lane) + (int(right_lane) << 1)
values["CF_Lkas_LdwsOpt_USM"] = 2
| Seems to be the only platform that is missing (only one dongle though): e0e98335f3ebc58f
Verified by checking rlogs and seen `CF_Lkas_LdwsActivemode` change like it is lane lines, not constant | https://api.github.com/repos/commaai/openpilot/pulls/28357 | 2023-06-01T06:01:55Z | 2023-06-01T07:04:08Z | 2023-06-01T07:04:08Z | 2023-06-01T07:04:09Z | 348 | commaai/openpilot | 8,911 |
bpo-39947: Remove old private trashcan C API functions | diff --git a/Include/cpython/object.h b/Include/cpython/object.h
index 84c60e55d5c9df..75cd0f9002215b 100644
--- a/Include/cpython/object.h
+++ b/Include/cpython/object.h
@@ -493,8 +493,8 @@ without deallocating anything (and so unbounded call-stack depth is avoided).
When the call stack finishes unwinding again, code generated by the END macro
notices this, and calls another routine to deallocate all the objects that
may have been added to the list of deferred deallocations. In effect, a
-chain of N deallocations is broken into (N-1)/(PyTrash_UNWIND_LEVEL-1) pieces,
-with the call stack never exceeding a depth of PyTrash_UNWIND_LEVEL.
+chain of N deallocations is broken into (N-1)/(_PyTrash_UNWIND_LEVEL-1) pieces,
+with the call stack never exceeding a depth of _PyTrash_UNWIND_LEVEL.
Since the tp_dealloc of a subclass typically calls the tp_dealloc of the base
class, we need to ensure that the trashcan is only triggered on the tp_dealloc
@@ -503,16 +503,6 @@ partially-deallocated object. To check this, the tp_dealloc function must be
passed as second argument to Py_TRASHCAN_BEGIN().
*/
-/* This is the old private API, invoked by the macros before 3.2.4.
- Kept for binary compatibility of extensions using the stable ABI. */
-PyAPI_FUNC(void) _PyTrash_deposit_object(PyObject*);
-PyAPI_FUNC(void) _PyTrash_destroy_chain(void);
-
-/* This is the old private API, invoked by the macros before 3.9.
- Kept for binary compatibility of extensions using the stable ABI. */
-PyAPI_FUNC(void) _PyTrash_thread_deposit_object(PyObject*);
-PyAPI_FUNC(void) _PyTrash_thread_destroy_chain(void);
-
/* Forward declarations for PyThreadState */
struct _ts;
@@ -522,8 +512,6 @@ PyAPI_FUNC(void) _PyTrash_end(struct _ts *tstate);
/* Python 3.10 private API, invoked by the Py_TRASHCAN_BEGIN(). */
PyAPI_FUNC(int) _PyTrash_cond(PyObject *op, destructor dealloc);
-#define PyTrash_UNWIND_LEVEL 50
-
#define Py_TRASHCAN_BEGIN_CONDITION(op, cond) \
do { \
PyThreadState *_tstate = NULL; \
diff --git a/Misc/NEWS.d/next/C API/2021-06-23-10-31-45.bpo-39947.je_HMo.rst b/Misc/NEWS.d/next/C API/2021-06-23-10-31-45.bpo-39947.je_HMo.rst
new file mode 100644
index 00000000000000..43adbffc7cce24
--- /dev/null
+++ b/Misc/NEWS.d/next/C API/2021-06-23-10-31-45.bpo-39947.je_HMo.rst
@@ -0,0 +1,20 @@
+Remove 4 private trashcan C API functions which were only kept for the backward
+compatibility of the stable ABI with Python 3.8 and older, since the trashcan
+API was not usable with the limited C API on Python 3.8 and older. The
+trashcan API was excluded from the limited C API in Python 3.9.
+
+Removed functions:
+
+* _PyTrash_deposit_object()
+* _PyTrash_destroy_chain()
+* _PyTrash_thread_deposit_object()
+* _PyTrash_thread_destroy_chain()
+
+The trashcan C API was never usable with the limited C API, since old trashcan
+macros accessed directly :c:type:`PyThreadState` members like
+``_tstate->trash_delete_nesting``, whereas the :c:type:`PyThreadState`
+structure is opaque in the limited C API.
+
+Exclude also the the ``PyTrash_UNWIND_LEVEL`` constant from the C API.
+
+Patch by Victor Stinner.
diff --git a/Misc/stable_abi.txt b/Misc/stable_abi.txt
index 24c71d12e3ba75..f104f84e451da1 100644
--- a/Misc/stable_abi.txt
+++ b/Misc/stable_abi.txt
@@ -1602,12 +1602,6 @@ function _PyThreadState_Init
function _PyThreadState_Prealloc
added 3.2
abi_only
-function _PyTrash_deposit_object
- added 3.2
- abi_only
-function _PyTrash_destroy_chain
- added 3.2
- abi_only
data _PyWeakref_CallableProxyType
added 3.2
abi_only
@@ -1920,12 +1914,6 @@ function Py_EncodeLocale
added 3.7 # (and 3.6.1 and 3.5.3)
function Py_SetPath
added 3.7 # (and 3.6.1 and 3.5.3)
-function _PyTrash_thread_deposit_object
- added 3.7 # (and 3.6.1 and 3.5.3)
- abi_only
-function _PyTrash_thread_destroy_chain
- added 3.7 # (and 3.6.1 and 3.5.3)
- abi_only
function PyErr_SetExcFromWindowsErr
added 3.7 # (and 3.6.1 and 3.5.3)
ifdef MS_WINDOWS
diff --git a/Objects/object.c b/Objects/object.c
index 854cc85b1cfa46..c87a83f225f14b 100644
--- a/Objects/object.c
+++ b/Objects/object.c
@@ -2092,25 +2092,13 @@ Py_ReprLeave(PyObject *obj)
/* Trashcan support. */
-/* Add op to the _PyTrash_delete_later list. Called when the current
+#define _PyTrash_UNWIND_LEVEL 50
+
+/* Add op to the gcstate->trash_delete_later list. Called when the current
* call-stack depth gets large. op must be a currently untracked gc'ed
* object, with refcount 0. Py_DECREF must already have been called on it.
*/
-void
-_PyTrash_deposit_object(PyObject *op)
-{
- PyInterpreterState *interp = _PyInterpreterState_GET();
- struct _gc_runtime_state *gcstate = &interp->gc;
-
- _PyObject_ASSERT(op, _PyObject_IS_GC(op));
- _PyObject_ASSERT(op, !_PyObject_GC_IS_TRACKED(op));
- _PyObject_ASSERT(op, Py_REFCNT(op) == 0);
- _PyGCHead_SET_PREV(_Py_AS_GC(op), gcstate->trash_delete_later);
- gcstate->trash_delete_later = op;
-}
-
-/* The equivalent API, using per-thread state recursion info */
-void
+static void
_PyTrash_thread_deposit_object(PyObject *op)
{
PyThreadState *tstate = _PyThreadState_GET();
@@ -2121,37 +2109,9 @@ _PyTrash_thread_deposit_object(PyObject *op)
tstate->trash_delete_later = op;
}
-/* Deallocate all the objects in the _PyTrash_delete_later list. Called when
- * the call-stack unwinds again.
- */
-void
-_PyTrash_destroy_chain(void)
-{
- PyInterpreterState *interp = _PyInterpreterState_GET();
- struct _gc_runtime_state *gcstate = &interp->gc;
-
- while (gcstate->trash_delete_later) {
- PyObject *op = gcstate->trash_delete_later;
- destructor dealloc = Py_TYPE(op)->tp_dealloc;
-
- gcstate->trash_delete_later =
- (PyObject*) _PyGCHead_PREV(_Py_AS_GC(op));
-
- /* Call the deallocator directly. This used to try to
- * fool Py_DECREF into calling it indirectly, but
- * Py_DECREF was already called on this object, and in
- * assorted non-release builds calling Py_DECREF again ends
- * up distorting allocation statistics.
- */
- _PyObject_ASSERT(op, Py_REFCNT(op) == 0);
- ++gcstate->trash_delete_nesting;
- (*dealloc)(op);
- --gcstate->trash_delete_nesting;
- }
-}
-
-/* The equivalent API, using per-thread state recursion info */
-void
+/* Deallocate all the objects in the gcstate->trash_delete_later list.
+ * Called when the call-stack unwinds again. */
+static void
_PyTrash_thread_destroy_chain(void)
{
PyThreadState *tstate = _PyThreadState_GET();
@@ -2192,7 +2152,7 @@ _PyTrash_thread_destroy_chain(void)
int
_PyTrash_begin(PyThreadState *tstate, PyObject *op)
{
- if (tstate->trash_delete_nesting >= PyTrash_UNWIND_LEVEL) {
+ if (tstate->trash_delete_nesting >= _PyTrash_UNWIND_LEVEL) {
/* Store the object (to be deallocated later) and jump past
* Py_TRASHCAN_END, skipping the body of the deallocator */
_PyTrash_thread_deposit_object(op);
diff --git a/PC/python3dll.c b/PC/python3dll.c
index 378669c27f0544..0ebb56efaecb2c 100755
--- a/PC/python3dll.c
+++ b/PC/python3dll.c
@@ -37,10 +37,6 @@ EXPORT_FUNC(_PyObject_NewVar)
EXPORT_FUNC(_PyState_AddModule)
EXPORT_FUNC(_PyThreadState_Init)
EXPORT_FUNC(_PyThreadState_Prealloc)
-EXPORT_FUNC(_PyTrash_deposit_object)
-EXPORT_FUNC(_PyTrash_destroy_chain)
-EXPORT_FUNC(_PyTrash_thread_deposit_object)
-EXPORT_FUNC(_PyTrash_thread_destroy_chain)
EXPORT_FUNC(Py_AddPendingCall)
EXPORT_FUNC(Py_AtExit)
EXPORT_FUNC(Py_BuildValue)
| Remove 4 C API private trashcan functions which were only kept for
the backward compatibility of the stable ABI with Python 3.8 and
older, since the trashcan API was not usable with the limited C API
on Python 3.8 and older. The trashcan API was excluded from the
limited C API in Python 3.9.
The trashcan C API was never usable with the limited C API, since old
trashcan macros accessed directly PyThreadState members like
"_tstate->trash_delete_nesting", whereas the PyThreadState structure
is opaque in the limited C API.
Exclude also the PyTrash_UNWIND_LEVEL constant from the C API.
The trashcan C API was modified in Python 3.9 by commit
38965ec5411da60d312b59be281f3510d58e0cf1 and in Python 3.10 by commit
ed1a5a5baca8f61e9a99c5be3adc16b1801514fe to hide implementation
details.
<!--
Thanks for your contribution!
Please read this comment in its entirety. It's quite important.
# Pull Request title
It should be in the following format:
```
bpo-NNNN: Summary of the changes made
```
Where: bpo-NNNN refers to the issue number in the https://bugs.python.org.
Most PRs will require an issue number. Trivial changes, like fixing a typo, do not need an issue.
# Backport Pull Request title
If this is a backport PR (PR made against branches other than `main`),
please ensure that the PR title is in the following format:
```
[X.Y] <title from the original PR> (GH-NNNN)
```
Where: [X.Y] is the branch name, e.g. [3.6].
GH-NNNN refers to the PR number from `main`.
-->
<!-- issue-number: [bpo-39947](https://bugs.python.org/issue39947) -->
https://bugs.python.org/issue39947
<!-- /issue-number -->
| https://api.github.com/repos/python/cpython/pulls/26869 | 2021-06-23T08:49:33Z | 2021-06-23T13:51:47Z | 2021-06-23T13:51:47Z | 2021-06-23T13:52:01Z | 2,245 | python/cpython | 4,082 |
Update Ansible release version to v2.14.11.post0. | diff --git a/lib/ansible/release.py b/lib/ansible/release.py
index 6f0f794fc04a3c..9937080ab94ba0 100644
--- a/lib/ansible/release.py
+++ b/lib/ansible/release.py
@@ -19,6 +19,6 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-__version__ = '2.14.11'
+__version__ = '2.14.11.post0'
__author__ = 'Ansible, Inc.'
__codename__ = "C'mon Everybody"
| ##### SUMMARY
Update Ansible release version to v2.14.11.post0.
##### ISSUE TYPE
Feature Pull Request
| https://api.github.com/repos/ansible/ansible/pulls/81937 | 2023-10-09T15:55:12Z | 2023-10-09T16:39:31Z | 2023-10-09T16:39:31Z | 2023-12-14T14:01:42Z | 135 | ansible/ansible | 49,042 |
Fix mypy errors at mst_kruskal | diff --git a/graphs/minimum_spanning_tree_kruskal.py b/graphs/minimum_spanning_tree_kruskal.py
index a51f970341f7..f21a87a7d534 100644
--- a/graphs/minimum_spanning_tree_kruskal.py
+++ b/graphs/minimum_spanning_tree_kruskal.py
@@ -1,15 +1,14 @@
-from typing import List, Tuple
-
-
-def kruskal(num_nodes: int, num_edges: int, edges: List[Tuple[int, int, int]]) -> int:
+def kruskal(
+ num_nodes: int, edges: list[tuple[int, int, int]]
+) -> list[tuple[int, int, int]]:
"""
- >>> kruskal(4, 3, [(0, 1, 3), (1, 2, 5), (2, 3, 1)])
+ >>> kruskal(4, [(0, 1, 3), (1, 2, 5), (2, 3, 1)])
[(2, 3, 1), (0, 1, 3), (1, 2, 5)]
- >>> kruskal(4, 5, [(0, 1, 3), (1, 2, 5), (2, 3, 1), (0, 2, 1), (0, 3, 2)])
+ >>> kruskal(4, [(0, 1, 3), (1, 2, 5), (2, 3, 1), (0, 2, 1), (0, 3, 2)])
[(2, 3, 1), (0, 2, 1), (0, 1, 3)]
- >>> kruskal(4, 6, [(0, 1, 3), (1, 2, 5), (2, 3, 1), (0, 2, 1), (0, 3, 2),
+ >>> kruskal(4, [(0, 1, 3), (1, 2, 5), (2, 3, 1), (0, 2, 1), (0, 3, 2),
... (2, 1, 1)])
[(2, 3, 1), (0, 2, 1), (2, 1, 1)]
"""
@@ -44,4 +43,4 @@ def find_parent(i):
node1, node2, cost = [int(x) for x in input().strip().split()]
edges.append((node1, node2, cost))
- kruskal(num_nodes, num_edges, edges)
+ kruskal(num_nodes, edges)
diff --git a/graphs/tests/test_min_spanning_tree_kruskal.py b/graphs/tests/test_min_spanning_tree_kruskal.py
index 3a527aef384f..d6df242ec6d1 100644
--- a/graphs/tests/test_min_spanning_tree_kruskal.py
+++ b/graphs/tests/test_min_spanning_tree_kruskal.py
@@ -2,7 +2,7 @@
def test_kruskal_successful_result():
- num_nodes, num_edges = 9, 14
+ num_nodes = 9
edges = [
[0, 1, 4],
[0, 7, 8],
@@ -20,7 +20,7 @@ def test_kruskal_successful_result():
[1, 7, 11],
]
- result = kruskal(num_nodes, num_edges, edges)
+ result = kruskal(num_nodes, edges)
expected = [
[7, 6, 1],
| ### **Describe your change:**
Fix mypy errors at mst_kruskal
### **Checklist:**
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| https://api.github.com/repos/TheAlgorithms/Python/pulls/4581 | 2021-07-29T13:30:04Z | 2021-08-02T12:40:48Z | 2021-08-02T12:40:48Z | 2021-08-02T12:40:48Z | 851 | TheAlgorithms/Python | 30,382 |
Fix for the CORS check | diff --git a/lib/streamlit/server/Server.py b/lib/streamlit/server/Server.py
index 5d0e34fdb8b2..1d0bac06e3f0 100644
--- a/lib/streamlit/server/Server.py
+++ b/lib/streamlit/server/Server.py
@@ -486,7 +486,7 @@ def initialize(self, server):
def check_origin(self, origin):
"""Set up CORS."""
- return is_url_from_allowed_origins(origin)
+ return super().check_origin(origin) or is_url_from_allowed_origins(origin)
def open(self):
self._session = self._server._add_browser_connection(self)
| **Issue:** #964
**Description:**
Streamlit is using tornado for WebSocket. It has overwritten the origin check for CORS, which only allows the configured list of URLs.
However, if the HOST and ORIGIN are the same, it shouldn't invoke the check for the URL list at all. If they are the same value, it should allow the connection.
**Changes**
This will add back the torndado implementation, checking whether HOST and ORIGIN are equal, otherwise then proceed for URL list checks.
---
**Contribution License Agreement**
By submiting this pull request you agree that all contributions to this project are made under the Apache 2.0 license.
| https://api.github.com/repos/streamlit/streamlit/pulls/965 | 2020-01-14T09:59:09Z | 2020-02-09T22:13:18Z | 2020-02-09T22:13:18Z | 2020-02-09T22:13:18Z | 146 | streamlit/streamlit | 21,976 |
ref: Remove dead code from post process forwarder | diff --git a/src/sentry/eventstream/kafka/backend.py b/src/sentry/eventstream/kafka/backend.py
index 6839bd6461c9e5..630a8f6f75bd1e 100644
--- a/src/sentry/eventstream/kafka/backend.py
+++ b/src/sentry/eventstream/kafka/backend.py
@@ -14,10 +14,9 @@
PostProcessForwarderWorker,
TransactionsPostProcessForwarderWorker,
)
-from sentry.eventstream.kafka.protocol import get_task_kwargs_for_message
from sentry.eventstream.snuba import KW_SKIP_SEMANTIC_PARTITIONING, SnubaProtocolEventStream
from sentry.killswitches import killswitch_matches_context
-from sentry.utils import json, kafka, metrics
+from sentry.utils import json, kafka
from sentry.utils.batching_kafka_consumer import BatchingKafkaConsumer
logger = logging.getLogger(__name__)
@@ -264,24 +263,6 @@ def handler(signum, frame):
consumer.run()
- def _get_task_kwargs_and_dispatch(self, message) -> None:
- with metrics.timer("eventstream.duration", instance="get_task_kwargs_for_message"):
- task_kwargs = get_task_kwargs_for_message(message.value())
-
- if task_kwargs is not None:
- if task_kwargs["group_id"] is None:
- metrics.incr(
- "eventstream.messages",
- tags={"partition": message.partition(), "type": "transactions"},
- )
- else:
- metrics.incr(
- "eventstream.messages",
- tags={"partition": message.partition(), "type": "errors"},
- )
- with metrics.timer("eventstream.duration", instance="dispatch_post_process_group_task"):
- self._dispatch_post_process_group_task(**task_kwargs)
-
def run_post_process_forwarder(
self,
entity: Union[Literal["all"], Literal["errors"], Literal["transactions"]],
| https://api.github.com/repos/getsentry/sentry/pulls/37119 | 2022-07-27T15:22:42Z | 2022-07-27T18:15:25Z | 2022-07-27T18:15:25Z | 2022-08-12T00:02:21Z | 412 | getsentry/sentry | 44,054 | |
Update README.md: remove duplicates within the same section/category | diff --git a/README.md b/README.md
index dcd4bc6..3b93f94 100644
--- a/README.md
+++ b/README.md
@@ -740,7 +740,6 @@ Practice:
### Functional programming (FP)
-- [Jargon from the functional programming world](https://github.com/hemanth/functional-programming-jargon)
- [Goodbye, Object Oriented Programming](https://medium.com/@cscalfani/goodbye-object-oriented-programming-a59cda4c0e53#.39ax09e4k)
- [Functional Programming & Haskell](https://www.youtube.com/watch?v=LnX3B9oaKzw) 🎞: some good reasons to learn FP!
- [Functional Programming Fundamentals](https://www.matthewgerstman.com/functional-programming-fundamentals/): short introduction to FP and its advantages.
@@ -1211,7 +1210,6 @@ List of resources:
### Shell (command line)
- 🧰 [alebcay/awesome-shell](https://github.com/alebcay/awesome-shell)
-- [Bash Hackers Wiki](http://wiki.bash-hackers.org/)
- [dylanaraps/pure-bash-bible: a collection of pure bash alternatives to external processes.](https://github.com/dylanaraps/pure-bash-bible)
- [The Bash Hackers Wiki](https://wiki.bash-hackers.org/) provides a gentler way to learn about bash than its manages.
- [Awk in 20 Minutes](https://ferd.ca/awk-in-20-minutes.html)
| 👋😀
Besides these two, there are a few more duplicated links throughout the README **but** in different sections/categories:
- amazon.com/The-Linux-Programming-Interface-Handbook/dp/1593272200
- en.wikipedia.org/wiki/Domain-driven_design
- github.com/charlax/dotfiles/tree/master/vim
- github.com/charlax/engineering-management
- github.com/jwasham/coding-interview-university
- github.com/kdeldycke/awesome-falsehood
- highscalability.com/
- smile.amazon.com/Scalability-Rules-Principles-Scaling-Sites/dp/013443160X
Maybe they should be cleaned up as well? | https://api.github.com/repos/charlax/professional-programming/pulls/48 | 2022-07-21T10:20:27Z | 2022-07-26T11:50:36Z | 2022-07-26T11:50:36Z | 2022-07-26T11:51:16Z | 352 | charlax/professional-programming | 21,539 |
Switch to new MySQL public key | diff --git a/scripts/docker/install_mysql.sh b/scripts/docker/install_mysql.sh
index 52aca493a2df5..955f13028b872 100755
--- a/scripts/docker/install_mysql.sh
+++ b/scripts/docker/install_mysql.sh
@@ -44,7 +44,7 @@ install_mysql_client() {
exit 1
fi
- local key="A4A9406876FCBD3C456770C88C718D3B5072E1F5"
+ local key="467B942D3A79BD29"
readonly key
GNUPGHOME="$(mktemp -d)"
| MySQL changed key used to sign their apt packages. This caused
docker building failing for prod images as MySQL could not be
installed.
New Public Key is used instead.
Fixes: #20911
<!--
Thank you for contributing! Please make sure that your code changes
are covered with tests. And in case of new features or big changes
remember to adjust the documentation.
Feel free to ping committers for the review!
In case of existing issue, reference it using one of the following:
closes: #ISSUE
related: #ISSUE
How to write a good git commit message:
http://chris.beams.io/posts/git-commit/
-->
---
**^ Add meaningful description above**
Read the **[Pull Request Guidelines](https://github.com/apache/airflow/blob/main/CONTRIBUTING.rst#pull-request-guidelines)** for more information.
In case of fundamental code change, Airflow Improvement Proposal ([AIP](https://cwiki.apache.org/confluence/display/AIRFLOW/Airflow+Improvements+Proposals)) is needed.
In case of a new dependency, check compliance with the [ASF 3rd Party License Policy](https://www.apache.org/legal/resolved.html#category-x).
In case of backwards incompatible changes please leave a note in [UPDATING.md](https://github.com/apache/airflow/blob/main/UPDATING.md).
| https://api.github.com/repos/apache/airflow/pulls/20912 | 2022-01-17T22:50:01Z | 2022-01-18T03:35:42Z | 2022-01-18T03:35:42Z | 2022-07-29T20:15:01Z | 137 | apache/airflow | 14,399 |
Update license year | diff --git a/LICENSE.md b/LICENSE.md
index d79db85f0..ec1d1e23c 100644
--- a/LICENSE.md
+++ b/LICENSE.md
@@ -1,7 +1,7 @@
The MIT License (MIT)
=====================
-Copyright (c) 2015 Vladimir Iakovlev
+Copyright (c) 2015-2018 Vladimir Iakovlev
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
| TO DO:
- [x] Wait to the next year (100% done, depends on timezone)
- [ ] Merge!
:octocat: | https://api.github.com/repos/nvbn/thefuck/pulls/752 | 2017-12-31T23:58:42Z | 2018-01-01T17:53:35Z | 2018-01-01T17:53:35Z | 2018-01-01T18:37:39Z | 119 | nvbn/thefuck | 30,551 |
Some cleaning | diff --git a/README.md b/README.md
index 96553b24..6643112e 100644
--- a/README.md
+++ b/README.md
@@ -1226,10 +1226,8 @@ be
* [Summing Bird](https://github.com/twitter/summingbird) - Streaming MapReduce with Scalding and Storm
* [Algebird](https://github.com/twitter/algebird) - Abstract Algebra for Scala
* [xerial](https://github.com/xerial/xerial) - Data management utilities for Scala
-* [simmer](https://github.com/avibryant/simmer) - Reduce your data. A unix filter for algebird-powered aggregation.
* [PredictionIO](https://github.com/apache/incubator-predictionio) - PredictionIO, a machine learning server for software developers and data engineers.
* [BIDMat](https://github.com/BIDData/BIDMat) - CPU and GPU-accelerated matrix library intended to support large-scale exploratory data analysis.
-* [Wolfe](http://www.wolfe.ml/) Declarative Machine Learning
* [Flink](http://flink.apache.org/) - Open source platform for distributed stream and batch data processing.
* [Spark Notebook](http://spark-notebook.io) - Interactive and Reactive Data Science using Scala and Spark.
| - Simmer: not updated for ages
- Wolfe: link is broken | https://api.github.com/repos/josephmisiti/awesome-machine-learning/pulls/393 | 2017-06-28T10:11:43Z | 2017-07-01T20:37:53Z | 2017-07-01T20:37:53Z | 2017-07-01T20:37:55Z | 293 | josephmisiti/awesome-machine-learning | 52,145 |
More richer reacher | diff --git a/gym/envs/mujoco/assets/reacher.xml b/gym/envs/mujoco/assets/reacher.xml
index 39bca2d71df..151d1f85113 100644
--- a/gym/envs/mujoco/assets/reacher.xml
+++ b/gym/envs/mujoco/assets/reacher.xml
@@ -1,4 +1,4 @@
-<mujoco model="richer">
+<mujoco model="reacher">
<compiler angle="radian" inertiafromgeom="true"/>
<default>
<joint armature="1" damping="1" limited="true"/>
@@ -36,4 +36,4 @@
<motor ctrllimited="true" ctrlrange="-1.0 1.0" gear="200.0" joint="joint0"/>
<motor ctrllimited="true" ctrlrange="-1.0 1.0" gear="200.0" joint="joint1"/>
</actuator>
-</mujoco>
\ No newline at end of file
+</mujoco>
diff --git a/gym/envs/mujoco/reacher.py b/gym/envs/mujoco/reacher.py
index 2d018596aae..cb3c29d83aa 100644
--- a/gym/envs/mujoco/reacher.py
+++ b/gym/envs/mujoco/reacher.py
@@ -111,7 +111,7 @@ class ReacherEnv(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self):
utils.EzPickle.__init__(self)
- mujoco_env.MujocoEnv.__init__(self, "richer.xml", 2)
+ mujoco_env.MujocoEnv.__init__(self, "reacher.xml", 2)
def step(self, a):
vec = self.get_body_com("fingertip") - self.get_body_com("target")
| https://api.github.com/repos/openai/gym/pulls/2591 | 2022-02-03T03:20:42Z | 2022-02-03T13:13:26Z | 2022-02-03T13:13:26Z | 2022-02-03T13:13:27Z | 435 | openai/gym | 5,406 | |
enhance top description | diff --git a/append_output.sh b/append_output.sh
index 8e576d0f..3bb9202c 100755
--- a/append_output.sh
+++ b/append_output.sh
@@ -10,7 +10,7 @@ output_marker='OUTPUT = """'
# get everything (excluding part between `output_marker` and the end of the file)
# into `src` var
src=$(sed -n -e "/$output_marker/,\$!p" "$1")
-output=$(python "$1")
+output=$(python3 "$1")
echo "$src" > $1
echo -e "\n" >> $1
diff --git a/patterns/structural/proxy.py b/patterns/structural/proxy.py
index 8dcb7687..560643ee 100644
--- a/patterns/structural/proxy.py
+++ b/patterns/structural/proxy.py
@@ -1,55 +1,87 @@
"""
+*What is this pattern about?
+Proxy is used in places where you want to add functionality to a class without
+changing its interface. The main class is called `Real Subject`. A client should
+use the proxy or the real subject without any code change, so both must have the
+same interface. Logging and controlling access to the real subject are some of
+the proxy pattern usages.
+
+*References:
+https://refactoring.guru/design-patterns/proxy/python/example
+https://python-3-patterns-idioms-test.readthedocs.io/en/latest/Fronting.html
+
*TL;DR
-Provides an interface to resource that is expensive to duplicate.
+Add functionality or logic (e.g. logging, caching, authorization) to a resource
+without changing its interface.
"""
-import time
+class Subject:
+ """
+ As mentioned in the document, interfaces of both RealSubject and Proxy should
+ be the same, because the client should be able to use RealSubject or Proxy with
+ no code change.
+
+ Not all times this interface is necessary. The point is the client should be
+ able to use RealSubject or Proxy interchangeably with no change in code.
+ """
+
+ def do_the_job(self, user):
+ raise NotImplementedError()
-class SalesManager:
- def talk(self):
- print("Sales Manager ready to talk")
+class RealSubject(Subject):
+ """
+ This is the main job doer. External services like payment gateways can be a
+ good example.
+ """
-class Proxy:
+ def do_the_job(self, user):
+ print(f'I am doing the job for {user}')
+
+
+class Proxy(Subject):
def __init__(self):
- self.busy = 'No'
- self.sales = None
-
- def talk(self):
- print("Proxy checking for Sales Manager availability")
- if self.busy == 'No':
- self.sales = SalesManager()
- time.sleep(0.1)
- self.sales.talk()
+ self._real_subject = RealSubject()
+
+ def do_the_job(self, user):
+ """
+ logging and controlling access are some examples of proxy usages.
+ """
+
+ print(f'[log] Doing the job for {user} is requested.')
+
+ if user == 'admin':
+ self._real_subject.do_the_job(user)
else:
- time.sleep(0.1)
- print("Sales Manager is busy")
+ print(f'[log] I can do the job just for `admins`.')
+
+
+def client(job_doer, user):
+ job_doer.do_the_job(user)
+
+def main():
+ """
+ >>> proxy = Proxy()
+
+ >>> real_subject = RealSubject()
+
+ >>> client(proxy, 'admin')
+ [log] Doing the job for admin is requested.
+ I am doing the job for admin
+
+ >>> client(proxy, 'anonymous')
+ [log] Doing the job for anonymous is requested.
+ [log] I can do the job just for `admins`.
+ >>> client(real_subject, 'admin')
+ I am doing the job for admin
-class NoTalkProxy(Proxy):
- def talk(self):
- print("Proxy checking for Sales Manager availability")
- time.sleep(0.1)
- print("This Sales Manager will not talk to you", "whether he/she is busy or not")
+ >>> client(real_subject, 'anonymous')
+ I am doing the job for anonymous
+ """
if __name__ == '__main__':
- p = Proxy()
- p.talk()
- p.busy = 'Yes'
- p.talk()
- p = NoTalkProxy()
- p.talk()
- p.busy = 'Yes'
- p.talk()
-
-### OUTPUT ###
-# Proxy checking for Sales Manager availability
-# Sales Manager ready to talk
-# Proxy checking for Sales Manager availability
-# Sales Manager is busy
-# Proxy checking for Sales Manager availability
-# This Sales Manager will not talk to you whether he/she is busy or not
-# Proxy checking for Sales Manager availability
-# This Sales Manager will not talk to you whether he/she is busy or not
+ import doctest
+ doctest.testmod()
\ No newline at end of file
| add a little more description and references | https://api.github.com/repos/faif/python-patterns/pulls/321 | 2020-03-09T06:28:24Z | 2020-03-10T22:52:48Z | 2020-03-10T22:52:48Z | 2020-03-11T06:03:46Z | 1,175 | faif/python-patterns | 33,467 |
Remove --sdp-attention, --xformers flags | diff --git a/README.md b/README.md
index d0a347c795..15cca711c3 100644
--- a/README.md
+++ b/README.md
@@ -231,8 +231,6 @@ List of command-line flags
| `--load-in-8bit` | Load the model with 8-bit precision (using bitsandbytes). |
| `--bf16` | Load the model with bfloat16 precision. Requires NVIDIA Ampere GPU. |
| `--no-cache` | Set `use_cache` to `False` while generating text. This reduces VRAM usage slightly, but it comes at a performance cost. |
-| `--xformers` | Use xformer's memory efficient attention. This is really old and probably doesn't do anything. |
-| `--sdp-attention` | Use PyTorch 2.0's SDP attention. Same as above. |
| `--trust-remote-code` | Set `trust_remote_code=True` while loading the model. Necessary for some models. |
| `--no_use_fast` | Set use_fast=False while loading the tokenizer (it's True by default). Use this if you have any problems related to use_fast. |
| `--use_flash_attention_2` | Set use_flash_attention_2=True while loading the model. |
diff --git a/modules/llama_attn_hijack.py b/modules/llama_attn_hijack.py
deleted file mode 100644
index 00436fb2ed..0000000000
--- a/modules/llama_attn_hijack.py
+++ /dev/null
@@ -1,171 +0,0 @@
-import math
-import sys
-from typing import Optional, Tuple
-
-import torch
-import torch.nn as nn
-
-import modules.shared as shared
-from modules.logging_colors import logger
-
-if shared.args.xformers:
- try:
- import xformers.ops
- except Exception:
- logger.error("xformers not found! Please install it before trying to use it.", file=sys.stderr)
-
-
-def hijack_llama_attention():
- import transformers.models.llama.modeling_llama
- if shared.args.xformers:
- transformers.models.llama.modeling_llama.LlamaAttention.forward = xformers_forward
- logger.info("Replaced attention with xformers_attention")
- elif shared.args.sdp_attention:
- transformers.models.llama.modeling_llama.LlamaAttention.forward = sdp_attention_forward
- logger.info("Replaced attention with sdp_attention")
-
-
-def xformers_forward(
- self,
- hidden_states: torch.Tensor,
- attention_mask: Optional[torch.Tensor] = None,
- position_ids: Optional[torch.LongTensor] = None,
- past_key_value: Optional[Tuple[torch.Tensor]] = None,
- output_attentions: bool = False,
- use_cache: bool = False,
-) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
- bsz, q_len, _ = hidden_states.size()
-
- query_states = self.q_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
- key_states = self.k_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
- value_states = self.v_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
-
- kv_seq_len = key_states.shape[-2]
- if past_key_value is not None:
- kv_seq_len += past_key_value[0].shape[-2]
- cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
- query_states, key_states = transformers.models.llama.modeling_llama.apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
- # [bsz, nh, t, hd]
-
- if past_key_value is not None:
- # reuse k, v, self_attention
- key_states = torch.cat([past_key_value[0], key_states], dim=2)
- value_states = torch.cat([past_key_value[1], value_states], dim=2)
-
- past_key_value = (key_states, value_states) if use_cache else None
-
- # We only apply xformers optimizations if we don't need to output the whole attention matrix
- if not output_attentions:
- query_states = query_states.transpose(1, 2)
- key_states = key_states.transpose(1, 2)
- value_states = value_states.transpose(1, 2)
-
- # This is a nasty hack. We know attention_mask in transformers is either LowerTriangular or all Zeros.
- # We therefore check if one element in the upper triangular portion is zero. If it is, then the mask is all zeros.
- if attention_mask is None or attention_mask[0, 0, 0, 1] == 0:
- # input and output should be of form (bsz, q_len, num_heads, head_dim)
- attn_output = xformers.ops.memory_efficient_attention(query_states, key_states, value_states, attn_bias=None)
- else:
- # input and output should be of form (bsz, q_len, num_heads, head_dim)
- attn_output = xformers.ops.memory_efficient_attention(query_states, key_states, value_states, attn_bias=xformers.ops.LowerTriangularMask())
- attn_weights = None
- else:
- attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
-
- if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
- raise ValueError(
- f"Attention weights should be of size {(bsz * self.num_heads, q_len, kv_seq_len)}, but is"
- f" {attn_weights.size()}"
- )
-
- if attention_mask is not None:
- if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
- raise ValueError(
- f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
- )
- attn_weights = attn_weights + attention_mask
- attn_weights = torch.max(attn_weights, torch.tensor(torch.finfo(attn_weights.dtype).min))
-
- # upcast attention to fp32
- attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
- attn_output = torch.matmul(attn_weights, value_states)
-
- if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
- raise ValueError(
- f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
- f" {attn_output.size()}"
- )
-
- attn_output = attn_output.transpose(1, 2)
-
- attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
- attn_output = self.o_proj(attn_output)
- return attn_output, attn_weights, past_key_value
-
-
-def sdp_attention_forward(
- self,
- hidden_states: torch.Tensor,
- attention_mask: Optional[torch.Tensor] = None,
- position_ids: Optional[torch.LongTensor] = None,
- past_key_value: Optional[Tuple[torch.Tensor]] = None,
- output_attentions: bool = False,
- use_cache: bool = False,
-) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
- bsz, q_len, _ = hidden_states.size()
-
- query_states = self.q_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
- key_states = self.k_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
- value_states = self.v_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
-
- kv_seq_len = key_states.shape[-2]
- if past_key_value is not None:
- kv_seq_len += past_key_value[0].shape[-2]
- cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
- query_states, key_states = transformers.models.llama.modeling_llama.apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
- # [bsz, nh, t, hd]
-
- if past_key_value is not None:
- # reuse k, v, self_attention
- key_states = torch.cat([past_key_value[0], key_states], dim=2)
- value_states = torch.cat([past_key_value[1], value_states], dim=2)
-
- past_key_value = (key_states, value_states) if use_cache else None
-
- # We only apply sdp attention if we don't need to output the whole attention matrix
- if not output_attentions:
- attn_output = torch.nn.functional.scaled_dot_product_attention(query_states, key_states, value_states, attn_mask=attention_mask, is_causal=False)
- attn_weights = None
- else:
- attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
-
- if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
- raise ValueError(
- f"Attention weights should be of size {(bsz * self.num_heads, q_len, kv_seq_len)}, but is"
- f" {attn_weights.size()}"
- )
-
- if attention_mask is not None:
- if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
- raise ValueError(
- f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
- )
- attn_weights = attn_weights + attention_mask
- attn_weights = torch.max(attn_weights, torch.tensor(torch.finfo(attn_weights.dtype).min))
-
- # upcast attention to fp32
- attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
- attn_output = torch.matmul(attn_weights, value_states)
-
- if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
- raise ValueError(
- f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
- f" {attn_output.size()}"
- )
-
- attn_output = attn_output.transpose(1, 2)
- attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
-
- attn_output = self.o_proj(attn_output)
-
- return attn_output, attn_weights, past_key_value
diff --git a/modules/models.py b/modules/models.py
index e166f737a4..5235f10857 100644
--- a/modules/models.py
+++ b/modules/models.py
@@ -21,7 +21,7 @@
)
import modules.shared as shared
-from modules import RoPE, llama_attn_hijack, sampler_hijack
+from modules import RoPE, sampler_hijack
from modules.logging_colors import logger
from modules.models_settings import get_model_metadata
from modules.relative_imports import RelativeImport
@@ -97,10 +97,6 @@ def load_model(model_name, loader=None):
else:
tokenizer = load_tokenizer(model_name, model)
- # Hijack attention with xformers
- if any((shared.args.xformers, shared.args.sdp_attention)):
- llama_attn_hijack.hijack_llama_attention()
-
shared.settings.update({k: v for k, v in metadata.items() if k in shared.settings})
if loader.lower().startswith('exllama'):
shared.settings['truncation_length'] = shared.args.max_seq_len
diff --git a/modules/shared.py b/modules/shared.py
index f98343b866..36ace23c2d 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -98,8 +98,6 @@
group.add_argument('--load-in-8bit', action='store_true', help='Load the model with 8-bit precision (using bitsandbytes).')
group.add_argument('--bf16', action='store_true', help='Load the model with bfloat16 precision. Requires NVIDIA Ampere GPU.')
group.add_argument('--no-cache', action='store_true', help='Set use_cache to False while generating text. This reduces VRAM usage slightly, but it comes at a performance cost.')
-group.add_argument('--xformers', action='store_true', help='Use xformer\'s memory efficient attention. This is really old and probably doesn\'t do anything.')
-group.add_argument('--sdp-attention', action='store_true', help='Use PyTorch 2.0\'s SDP attention. Same as above.')
group.add_argument('--trust-remote-code', action='store_true', help='Set trust_remote_code=True while loading the model. Necessary for some models.')
group.add_argument('--force-safetensors', action='store_true', help='Set use_safetensors=True while loading the model. This prevents arbitrary code execution.')
group.add_argument('--no_use_fast', action='store_true', help='Set use_fast=False while loading the tokenizer (it\'s True by default). Use this if you have any problems related to use_fast.')
| These flags were implemented in a PR a long time ago and have no effect, as far as I'm aware. | https://api.github.com/repos/oobabooga/text-generation-webui/pulls/5126 | 2023-12-31T04:06:48Z | 2023-12-31T04:36:51Z | 2023-12-31T04:36:51Z | 2024-04-04T02:48:18Z | 3,060 | oobabooga/text-generation-webui | 26,314 |
Document hook error handling | diff --git a/docs/using.rst b/docs/using.rst
index 549a3479c74..a325ff41317 100644
--- a/docs/using.rst
+++ b/docs/using.rst
@@ -378,10 +378,16 @@ then restart it after the plugin is finished. Example::
certbot renew --pre-hook "service nginx stop" --post-hook "service nginx start"
-The hooks will only be
-run if a certificate is due for renewal, so you can run this command
-frequently without unnecessarily stopping your webserver. More
-information about renewal hooks can be found by running
+If a hook exits with a non-zero exit code, the error will be printed
+to ``stderr`` but renewal will be attempted anyway. A failing hook
+doesn't directly cause Certbot to exit with a non-zero exit code, but
+since Certbot exits with a non-zero exit code when renewals fail, a
+failed hook causing renewal failures will indirectly result in a
+non-zero exit code. Hooks will only be run if a certificate is due for
+renewal, so you can run the above command frequently without
+unnecessarily stopping your webserver.
+
+ More information about renewal hooks can be found by running
``certbot --help renew``.
If you're sure that this command executes successfully without human
| Ref https://github.com/certbot/certbot/issues/4253#issuecomment-283733385; /cc @SwartzCr for review :) | https://api.github.com/repos/certbot/certbot/pulls/4418 | 2017-03-27T18:04:17Z | 2017-04-07T17:17:00Z | 2017-04-07T17:17:00Z | 2017-04-07T19:34:30Z | 294 | certbot/certbot | 3,678 |
Small fix to export script | diff --git a/inference/server/export.py b/inference/server/export.py
index 36f938e186..054b6f8341 100644
--- a/inference/server/export.py
+++ b/inference/server/export.py
@@ -52,7 +52,7 @@ def prepare_export_events(
message: DbMessage,
anonymizer: Anonymizer | None = None,
) -> dict[str, list[ExportMessageEvent]]:
- export_events: dict[str, list[ExportMessageEvent]] = []
+ export_events: dict[str, list[ExportMessageEvent]] = {}
if message.reports:
export_events["report"] = [
| https://api.github.com/repos/LAION-AI/Open-Assistant/pulls/3198 | 2023-05-19T16:12:44Z | 2023-05-20T13:42:09Z | 2023-05-20T13:42:09Z | 2023-05-20T13:42:10Z | 142 | LAION-AI/Open-Assistant | 37,779 | |
Dictionary creation could be rewritten as litteral | diff --git a/sklearn/metrics/tests/test_pairwise.py b/sklearn/metrics/tests/test_pairwise.py
index 122d316c0e22a..95833ff646fc4 100644
--- a/sklearn/metrics/tests/test_pairwise.py
+++ b/sklearn/metrics/tests/test_pairwise.py
@@ -233,8 +233,7 @@ def test_pairwise_kernels(): # Test the pairwise_kernels helper function.
assert_array_almost_equal(K1, K2)
# Test with a callable function, with given keywords.
metric = callable_rbf_kernel
- kwds = {}
- kwds['gamma'] = 0.1
+ kwds = {'gamma': 0.1}
K1 = pairwise_kernels(X, Y=Y, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=Y, **kwds)
assert_array_almost_equal(K1, K2)
| https://api.github.com/repos/scikit-learn/scikit-learn/pulls/5629 | 2015-10-30T16:29:35Z | 2015-10-30T19:45:51Z | 2015-10-30T19:45:51Z | 2015-10-30T19:45:51Z | 211 | scikit-learn/scikit-learn | 46,199 | |
added regex check to BuyMeACoffee | diff --git a/sherlock/resources/data.json b/sherlock/resources/data.json
index 5f98c3399..da4ffd0aa 100644
--- a/sherlock/resources/data.json
+++ b/sherlock/resources/data.json
@@ -309,6 +309,7 @@
},
"BuyMeACoffee": {
"errorType": "status_code",
+ "regexCheck": "[a-zA-Z0-9]{3,15}",
"url": "https://buymeacoff.ee/{}",
"urlMain": "https://www.buymeacoffee.com/",
"urlProbe": "https://www.buymeacoffee.com/{}",
| https://api.github.com/repos/sherlock-project/sherlock/pulls/1733 | 2023-03-13T10:38:04Z | 2023-03-13T10:44:13Z | 2023-03-13T10:44:13Z | 2023-08-29T13:06:56Z | 152 | sherlock-project/sherlock | 36,648 | |
🌐 Add German translation for `docs/de/docs/reference/dependencies.md` | diff --git a/docs/de/docs/reference/dependencies.md b/docs/de/docs/reference/dependencies.md
new file mode 100644
index 0000000000000..2ed5b5050146b
--- /dev/null
+++ b/docs/de/docs/reference/dependencies.md
@@ -0,0 +1,29 @@
+# Abhängigkeiten – `Depends()` und `Security()`
+
+## `Depends()`
+
+Abhängigkeiten werden hauptsächlich mit der speziellen Funktion `Depends()` behandelt, die ein Callable entgegennimmt.
+
+Hier finden Sie deren Referenz und Parameter.
+
+Sie können sie direkt von `fastapi` importieren:
+
+```python
+from fastapi import Depends
+```
+
+::: fastapi.Depends
+
+## `Security()`
+
+In vielen Szenarien können Sie die Sicherheit (Autorisierung, Authentifizierung usw.) mit Abhängigkeiten handhaben, indem Sie `Depends()` verwenden.
+
+Wenn Sie jedoch auch OAuth2-Scopes deklarieren möchten, können Sie `Security()` anstelle von `Depends()` verwenden.
+
+Sie können `Security()` direkt von `fastapi` importieren:
+
+```python
+from fastapi import Security
+```
+
+::: fastapi.Security
| ← `reference/exceptions.md` (#10817)
→ `reference/apirouter.md` (#10819)
[German translation progress](https://github.com/tiangolo/fastapi/discussions/10582) | https://api.github.com/repos/tiangolo/fastapi/pulls/10818 | 2023-12-23T02:23:58Z | 2024-03-30T18:16:45Z | 2024-03-30T18:16:45Z | 2024-04-01T01:11:06Z | 299 | tiangolo/fastapi | 23,018 |
Fix GUI tooltips on configuration setting window in Original/ Trainer/ Color Augmentation section | diff --git a/plugins/train/trainer/original_defaults.py b/plugins/train/trainer/original_defaults.py
index be760eff44..943cd40b4d 100755
--- a/plugins/train/trainer/original_defaults.py
+++ b/plugins/train/trainer/original_defaults.py
@@ -88,7 +88,7 @@
color_lightness=dict(
default=30,
info="Percentage amount to randomly alter the lightness of each training image.\n"
- "NB: This is ignored if the 'no-flip' option is enabled",
+ "NB: This is ignored if the 'no-augment-color' option is enabled",
datatype=int,
rounding=1,
min_max=(0, 75),
@@ -96,8 +96,8 @@
color_ab=dict(
default=8,
info="Percentage amount to randomly alter the 'a' and 'b' colors of the L*a*b* color "
- "space of each training image.\nNB: This is ignored if the 'no-flip' option is "
- "enabled",
+ "space of each training image.\nNB: This is ignored if the 'no-augment-color' option"
+ "is enabled",
datatype=int,
rounding=1,
min_max=(0, 50),
| https://api.github.com/repos/deepfakes/faceswap/pulls/1252 | 2022-07-30T05:32:38Z | 2022-08-02T09:29:48Z | 2022-08-02T09:29:48Z | 2022-08-02T09:29:48Z | 279 | deepfakes/faceswap | 18,825 | |
Bump aiohttp from 3.6.2 to 3.6.3 | diff --git a/python/setup.py b/python/setup.py
index 311b805317af..472dfb57c43a 100644
--- a/python/setup.py
+++ b/python/setup.py
@@ -79,7 +79,7 @@
extras_require={
':python_version>="3.5.2"': [
- 'aiohttp==3.6.2',
+ 'aiohttp==3.6.3',
'aiodns==1.1.1',
'yarl==1.1.0',
],
| https://api.github.com/repos/ccxt/ccxt/pulls/7765 | 2020-10-13T09:02:26Z | 2020-10-13T11:49:01Z | 2020-10-13T11:49:01Z | 2020-10-13T14:50:29Z | 124 | ccxt/ccxt | 13,500 | |
Better document plugins and reversion | diff --git a/certbot/interfaces.py b/certbot/interfaces.py
index 37835462e01..e4e62e0a2fb 100644
--- a/certbot/interfaces.py
+++ b/certbot/interfaces.py
@@ -180,6 +180,9 @@ def perform(achalls):
def cleanup(achalls):
"""Revert changes and shutdown after challenges complete.
+ This method should be able to revert all changes made by
+ perform, even if perform exited abnormally.
+
:param list achalls: Non-empty (guaranteed) list of
:class:`~certbot.achallenges.AnnotatedChallenge`
instances, a subset of those previously passed to :func:`perform`.
@@ -238,6 +241,14 @@ class IInstaller(IPlugin):
Represents any server that an X509 certificate can be placed.
+ It is assumed that :func:`save` is the only method that finalizes a
+ checkpoint. This is important to ensure that checkpoints are
+ restored in a consistent manner if requested by the user or in case
+ of an error.
+
+ Using :class:`certbot.reverter.Reverter` to implement checkpoints,
+ rollback, and recovery can dramatically simplify plugin development.
+
"""
def get_all_names():
@@ -304,8 +315,11 @@ def save(title=None, temporary=False):
Both title and temporary are needed because a save may be
intended to be permanent, but the save is not ready to be a full
- checkpoint. If an exception is raised, it is assumed a new
- checkpoint was not created.
+ checkpoint.
+
+ It is assumed that at most one checkpoint is finalized by this
+ method. Additionally, if an exception is raised, it is assumed a
+ new checkpoint was not finalized.
:param str title: The title of the save. If a title is given, the
configuration will be saved as a new checkpoint and put in a
diff --git a/certbot/reverter.py b/certbot/reverter.py
index f8140d60da3..6dde050777d 100644
--- a/certbot/reverter.py
+++ b/certbot/reverter.py
@@ -24,6 +24,39 @@
class Reverter(object):
"""Reverter Class - save and revert configuration checkpoints.
+ This class can be used by the plugins, especially Installers, to
+ undo changes made to the user's system. Modifications to files and
+ commands to do undo actions taken by the plugin should be registered
+ with this class before the action is taken.
+
+ Once a change has been registered with this class, there are three
+ states the change can be in. First, the change can be a temporary
+ change. This should be used for changes that will soon be reverted,
+ such as config changes for the purpose of solving a challenge.
+ Changes are added to this state through calls to
+ :func:`~add_to_temp_checkpoint` and reverted when
+ :func:`~revert_temporary_config` or :func:`~recovery_routine` is
+ called.
+
+ The second state a change can be in is in progress. These changes
+ are not temporary, however, they also have not been finalized in a
+ checkpoint. A change must become in progress before it can be
+ finalized. Changes are added to this state through calls to
+ :func:`~add_to_checkpoint` and reverted when
+ :func:`~recovery_routine` is called.
+
+ The last state a change can be in is finalized in a checkpoint. A
+ change is put into this state by first becoming an in progress
+ change and then calling :func:`~finalize_checkpoint`. Changes
+ in this state can be reverted through calls to
+ :func:`~rollback_checkpoints`.
+
+ As a final note, creating new files and registering undo commands
+ are handled specially and use the methods
+ :func:`~register_file_creation` and :func:`~register_undo_command`
+ respectively. Both of these methods can be used to create either
+ temporary or in progress changes.
+
.. note:: Consider moving everything over to CSV format.
:param config: Configuration.
| https://api.github.com/repos/certbot/certbot/pulls/3208 | 2016-06-24T00:17:48Z | 2016-06-24T23:54:15Z | 2016-06-24T23:54:15Z | 2016-10-06T01:21:38Z | 942 | certbot/certbot | 2,417 | |
[Core] Show_webui segfault fix. | diff --git a/python/ray/_raylet.pyx b/python/ray/_raylet.pyx
index 930e45ecb0903..7173860ec53d3 100644
--- a/python/ray/_raylet.pyx
+++ b/python/ray/_raylet.pyx
@@ -634,6 +634,12 @@ cdef shared_ptr[CBuffer] string_to_buffer(c_string& c_str):
<uint8_t*>(c_str.data()), c_str.size(), True))
+cdef void terminate_asyncio_thread() nogil:
+ with gil:
+ core_worker = ray.worker.global_worker.core_worker
+ core_worker.destroy_event_loop_if_exists()
+
+
cdef class CoreWorker:
def __cinit__(self, is_driver, store_socket, raylet_socket,
@@ -667,6 +673,7 @@ cdef class CoreWorker:
options.is_local_mode = local_mode
options.num_workers = 1
options.kill_main = kill_main_task
+ options.terminate_asyncio_thread = terminate_asyncio_thread
CCoreWorkerProcess.Initialize(options)
@@ -1171,7 +1178,8 @@ cdef class CoreWorker:
if self.async_thread is None:
self.async_thread = threading.Thread(
- target=lambda: self.async_event_loop.run_forever()
+ target=lambda: self.async_event_loop.run_forever(),
+ name="AsyncIO Thread"
)
# Making the thread a daemon causes it to exit
# when the main thread exits.
@@ -1192,8 +1200,12 @@ cdef class CoreWorker:
.YieldCurrentFiber(event))
return future.result()
- def destory_event_loop_if_exists(self):
+ def destroy_event_loop_if_exists(self):
if self.async_event_loop is not None:
+ # We should stop the monitor first because otherwise,
+ # loop.stop() will continue forever as monitor
+ # main loop will not be terminated.
+ self.async_event_loop.monitor_state.kill()
self.async_event_loop.stop()
if self.async_thread is not None:
self.async_thread.join()
diff --git a/python/ray/async_compat.py b/python/ray/async_compat.py
index df103f5df3961..4bfe7bffadcda 100644
--- a/python/ray/async_compat.py
+++ b/python/ray/async_compat.py
@@ -107,7 +107,8 @@ def __init__(self, loop):
self.names_lock = threading.Lock()
self.sleep_time = 1.0
- asyncio.ensure_future(self.monitor(), loop=loop)
+ self.monitor_loop_future = asyncio.ensure_future(
+ self.monitor(), loop=loop)
async def monitor(self):
while True:
@@ -131,3 +132,11 @@ def get_all_task_names(self):
with self.names_lock:
names = list(self.names.values())
return names
+
+ def kill(self):
+ """Kill the monitor's loop
+
+ This should be called in order to clean an event loop
+ that this monitor is running.
+ """
+ self.monitor_loop_future.cancel()
diff --git a/python/ray/includes/libcoreworker.pxd b/python/ray/includes/libcoreworker.pxd
index 6524139e097b7..a2698ee51093b 100644
--- a/python/ray/includes/libcoreworker.pxd
+++ b/python/ray/includes/libcoreworker.pxd
@@ -217,6 +217,7 @@ cdef extern from "ray/core_worker/core_worker.h" nogil:
int num_workers
(c_bool() nogil) kill_main
CCoreWorkerOptions()
+ (void() nogil) terminate_asyncio_thread
cdef cppclass CCoreWorkerProcess "ray::CoreWorkerProcess":
@staticmethod
diff --git a/python/ray/ray_perf.py b/python/ray/ray_perf.py
index eae62e396fd34..55c3c100c4e1d 100644
--- a/python/ray/ray_perf.py
+++ b/python/ray/ray_perf.py
@@ -92,6 +92,7 @@ def timeit(name, fn, multiplier=1):
def main():
print("Tip: set TESTS_TO_RUN='pattern' to run a subset of benchmarks")
ray.init()
+
value = ray.put(0)
arr = np.zeros(100 * 1024 * 1024, dtype=np.int64)
diff --git a/src/ray/core_worker/core_worker.cc b/src/ray/core_worker/core_worker.cc
index a3e823a11e359..7739ee867ec9a 100644
--- a/src/ray/core_worker/core_worker.cc
+++ b/src/ray/core_worker/core_worker.cc
@@ -590,6 +590,13 @@ void CoreWorker::WaitForShutdown() {
}
if (options_.worker_type == WorkerType::WORKER) {
RAY_CHECK(task_execution_service_.stopped());
+ // Asyncio coroutines could still run after CoreWorker is removed because it is
+ // running in a different thread. This can cause segfault because coroutines try to
+ // access CoreWorker methods that are already garbage collected. We should complete
+ // all coroutines before shutting down in order to prevent this.
+ if (worker_context_.CurrentActorIsAsync()) {
+ options_.terminate_asyncio_thread();
+ }
}
}
diff --git a/src/ray/core_worker/core_worker.h b/src/ray/core_worker/core_worker.h
index 27c374ccf4169..5b6ea601b948d 100644
--- a/src/ray/core_worker/core_worker.h
+++ b/src/ray/core_worker/core_worker.h
@@ -114,6 +114,8 @@ struct CoreWorkerOptions {
bool is_local_mode;
/// The number of workers to be started in the current process.
int num_workers;
+ /// The function to destroy asyncio event and loops.
+ std::function<void()> terminate_asyncio_thread;
};
/// Lifecycle management of one or more `CoreWorker` instances in a process.
| <!-- Thank you for your contribution! Please review https://github.com/ray-project/ray/blob/master/CONTRIBUTING.rst before opening a pull request. -->
## Why are these changes needed?
This will resolve https://github.com/ray-project/ray/issues/8309
The problem was that CoreWorker removes its cpp instance before default_worker.py's shutdown is called (where threads are joined). That means that async thread's coroutines are still running and using CoreWorker that was already GC'ed. This PR will make sure those coroutines are cleaned properly before CoreWorker is GC'ed.
This rarely happens in sync cases as well (https://github.com/ray-project/ray/issues/8291) due to import threads (look at Simon's comment). It is so much more rare than async cases because there is a mechanism to "stop thread" before they are joined on shutdown. I didn't handle this in this PR because of two reasons.
- It hasn't been issues for a long time (although I am sure it was there from before). This is very rare (it is detected only once out of 8 long running tests for 3 days whereas async cases occur almost every microbenchmark script run) and fixing this won't probably have big impact compared to efforts required.
- Fixing this will require uncertain amount of refactoring because we need to change the cleanup mechanism. Moving thread join logic to CoreWorker didn't work. It will break Java code as well because we need to run Python code inside Cpp code. So, I decided to deprioritize this for now. I will keep the other issue open (https://github.com/ray-project/ray/issues/8291) so I can revisit later.
This has been manually tested. I ran the reproducible script 10 times, and verified the error didn't occur. It didn't properly handle this issue yet https://github.com/ray-project/ray/issues/8291 because it was a bit tricky to fix.
## Related issue number
https://github.com/ray-project/ray/issues/8309
## Checks
- [x] I've run `scripts/format.sh` to lint the changes in this PR.
- [ ] I've included any doc changes needed for https://docs.ray.io/en/latest/.
- [x] I've made sure the tests are passing. Note that there might be a few flaky tests, see the recent failure rates at https://ray-travis-tracker.herokuapp.com/.
- Testing Strategy
- [ ] Unit tests
- [ ] Release tests
- [ ] This PR is not tested (please justify below)
| https://api.github.com/repos/ray-project/ray/pulls/8323 | 2020-05-05T07:40:56Z | 2020-05-06T16:45:08Z | 2020-05-06T16:45:07Z | 2022-02-17T00:29:46Z | 1,335 | ray-project/ray | 19,867 |
Remove hard certifi dependency and document. | diff --git a/docs/user/advanced.rst b/docs/user/advanced.rst
index 0de6b1b178..df653d0882 100644
--- a/docs/user/advanced.rst
+++ b/docs/user/advanced.rst
@@ -741,3 +741,21 @@ coffee.
r = requests.get('https://github.com', timeout=None)
.. _`connect()`: http://linux.die.net/man/2/connect
+
+CA Certificates
+---------------
+
+By default Requests bundles a set of root CAs that it trusts, sourced from the
+`Mozilla trust store`_. However, these are only updated once for each Requests
+version. This means that if you pin a Requests version your certificates can
+become extremely out of date.
+
+From Requests version 2.4.0 onwards, Requests will attempt to use certificates
+from `certifi`_ if it is present on the system. This allows for users to update
+their trusted certificates without having to change the code that runs on their
+system.
+
+For the sake of security we recommend upgrading certifi frequently!
+
+.. _certifi: http://certifi.io/
+.. _Mozilla trust store: https://hg.mozilla.org/mozilla-central/raw-file/tip/security/nss/lib/ckfw/builtins/certdata.txt
diff --git a/setup.py b/setup.py
index 9540a47ccc..b790ec71ab 100755
--- a/setup.py
+++ b/setup.py
@@ -25,7 +25,7 @@
'requests.packages.urllib3.packages.ssl_match_hostname',
]
-requires = ['certifi']
+requires = []
with open('README.rst') as f:
readme = f.read()
| As discussed in #2124.
| https://api.github.com/repos/psf/requests/pulls/2203 | 2014-09-04T18:41:19Z | 2014-09-05T15:11:57Z | 2014-09-05T15:11:57Z | 2021-09-08T10:01:23Z | 382 | psf/requests | 32,331 |
Add BinarySensorEntity to pylint checks | diff --git a/pylint/plugins/hass_enforce_type_hints.py b/pylint/plugins/hass_enforce_type_hints.py
index f3c7a01a10ff13..f08e6d932e78f0 100644
--- a/pylint/plugins/hass_enforce_type_hints.py
+++ b/pylint/plugins/hass_enforce_type_hints.py
@@ -678,6 +678,25 @@ class ClassTypeHintMatch:
],
),
],
+ "binary_sensor": [
+ ClassTypeHintMatch(
+ base_class="Entity",
+ matches=_ENTITY_MATCH,
+ ),
+ ClassTypeHintMatch(
+ base_class="BinarySensorEntity",
+ matches=[
+ TypeHintMatch(
+ function_name="device_class",
+ return_type=["BinarySensorDeviceClass", "str", None],
+ ),
+ TypeHintMatch(
+ function_name="is_on",
+ return_type=["bool", None],
+ ),
+ ],
+ ),
+ ],
"cover": [
ClassTypeHintMatch(
base_class="Entity",
| ## Proposed change
<!--
Describe the big picture of your changes here to communicate to the
maintainers why we should accept this pull request. If it fixes a bug
or resolves a feature request, be sure to link to that issue in the
additional information section.
-->
Add BinarySensorEntity to pylint checks
Can be tested with
```console
pylint --disable=all --enable=hass_enforce_type_hints --ignore-missing-annotations=n homeassistant/components/**/binary_sensor.py
************* Module homeassistant.components.zoneminder.binary_sensor
homeassistant/components/zoneminder/binary_sensor.py:48:4: W7432: Return type should be ['BinarySensorDeviceClass', 'str', None] (hass-return-type)
homeassistant/components/zoneminder/binary_sensor.py:43:4: W7432: Return type should be ['bool', None] (hass-return-type)
homeassistant/components/zoneminder/binary_sensor.py:38:4: W7432: Return type should be ['str', None] (hass-return-type)
homeassistant/components/zoneminder/binary_sensor.py:48:4: W7432: Return type should be ['***DeviceClass', 'str', None] (hass-return-type)
homeassistant/components/zoneminder/binary_sensor.py:52:4: W7432: Return type should be None (hass-return-type)
```
## Type of change
<!--
What type of change does your PR introduce to Home Assistant?
NOTE: Please, check only 1! box!
If your PR requires multiple boxes to be checked, you'll most likely need to
split it into multiple PRs. This makes things easier and faster to code review.
-->
- [ ] Dependency upgrade
- [ ] Bugfix (non-breaking change which fixes an issue)
- [ ] New integration (thank you!)
- [ ] New feature (which adds functionality to an existing integration)
- [ ] Breaking change (fix/feature causing existing functionality to break)
- [x] Code quality improvements to existing code or addition of tests
## Additional information
<!--
Details are important, and help maintainers processing your PR.
Please be sure to fill out additional details, if applicable.
-->
- This PR fixes or closes issue: fixes #
- This PR is related to issue:
- Link to documentation pull request:
## Checklist
<!--
Put an `x` in the boxes that apply. You can also fill these out after
creating the PR. If you're unsure about any of them, don't hesitate to ask.
We're here to help! This is simply a reminder of what we are going to look
for before merging your code.
-->
- [ ] The code change is tested and works locally.
- [ ] Local tests pass. **Your PR cannot be merged unless tests pass**
- [ ] There is no commented out code in this PR.
- [ ] I have followed the [development checklist][dev-checklist]
- [ ] The code has been formatted using Black (`black --fast homeassistant tests`)
- [ ] Tests have been added to verify that the new code works.
If user exposed functionality or configuration variables are added/changed:
- [ ] Documentation added/updated for [www.home-assistant.io][docs-repository]
If the code communicates with devices, web services, or third-party tools:
- [ ] The [manifest file][manifest-docs] has all fields filled out correctly.
Updated and included derived files by running: `python3 -m script.hassfest`.
- [ ] New or updated dependencies have been added to `requirements_all.txt`.
Updated by running `python3 -m script.gen_requirements_all`.
- [ ] For the updated dependencies - a link to the changelog, or at minimum a diff between library versions is added to the PR description.
- [ ] Untested files have been added to `.coveragerc`.
The integration reached or maintains the following [Integration Quality Scale][quality-scale]:
<!--
The Integration Quality Scale scores an integration on the code quality
and user experience. Each level of the quality scale consists of a list
of requirements. We highly recommend getting your integration scored!
-->
- [ ] No score or internal
- [ ] 🥈 Silver
- [ ] 🥇 Gold
- [ ] 🏆 Platinum
<!--
This project is very active and we have a high turnover of pull requests.
Unfortunately, the number of incoming pull requests is higher than what our
reviewers can review and merge so there is a long backlog of pull requests
waiting for review. You can help here!
By reviewing another pull request, you will help raise the code quality of
that pull request and the final review will be faster. This way the general
pace of pull request reviews will go up and your wait time will go down.
When picking a pull request to review, try to choose one that hasn't yet
been reviewed.
Thanks for helping out!
-->
To help with the load of incoming pull requests:
- [ ] I have reviewed two other [open pull requests][prs] in this repository.
[prs]: https://github.com/home-assistant/core/pulls?q=is%3Aopen+is%3Apr+-author%3A%40me+-draft%3Atrue+-label%3Awaiting-for-upstream+sort%3Acreated-desc+review%3Anone+-status%3Afailure
<!--
Thank you for contributing <3
Below, some useful links you could explore:
-->
[dev-checklist]: https://developers.home-assistant.io/docs/en/development_checklist.html
[manifest-docs]: https://developers.home-assistant.io/docs/en/creating_integration_manifest.html
[quality-scale]: https://developers.home-assistant.io/docs/en/next/integration_quality_scale_index.html
[docs-repository]: https://github.com/home-assistant/home-assistant.io
| https://api.github.com/repos/home-assistant/core/pulls/74131 | 2022-06-28T13:18:23Z | 2022-06-29T09:41:56Z | 2022-06-29T09:41:56Z | 2022-06-30T10:02:00Z | 240 | home-assistant/core | 39,400 |
Add excluded_search_path_prefixes setting - improves perf in WSL | diff --git a/README.md b/README.md
index 46a1d24b9..4f396ce95 100644
--- a/README.md
+++ b/README.md
@@ -434,6 +434,7 @@ Several *The Fuck* parameters can be changed in the file `$XDG_CONFIG_HOME/thefu
* `wait_slow_command` – max amount of time in seconds for getting previous command output if it in `slow_commands` list;
* `slow_commands` – list of slow commands;
* `num_close_matches` – maximum number of close matches to suggest, by default `3`.
+* `excluded_search_path_prefixes` – path prefixes to ignore when searching for commands, by default `[]`.
An example of `settings.py`:
@@ -466,6 +467,7 @@ rule with lower `priority` will be matched first;
* `THEFUCK_WAIT_SLOW_COMMAND` – max amount of time in seconds for getting previous command output if it in `slow_commands` list;
* `THEFUCK_SLOW_COMMANDS` – list of slow commands, like `lein:gradle`;
* `THEFUCK_NUM_CLOSE_MATCHES` – maximum number of close matches to suggest, like `5`.
+* `THEFUCK_EXCLUDED_SEARCH_PATH_PREFIXES` – path prefixes to ignore when searching for commands, by default `[]`.
For example:
diff --git a/tests/test_conf.py b/tests/test_conf.py
index 657e47556..e03473ab5 100644
--- a/tests/test_conf.py
+++ b/tests/test_conf.py
@@ -54,7 +54,8 @@ def test_from_env(self, os_environ, settings):
'THEFUCK_PRIORITY': 'bash=10:lisp=wrong:vim=15',
'THEFUCK_WAIT_SLOW_COMMAND': '999',
'THEFUCK_SLOW_COMMANDS': 'lein:react-native:./gradlew',
- 'THEFUCK_NUM_CLOSE_MATCHES': '359'})
+ 'THEFUCK_NUM_CLOSE_MATCHES': '359',
+ 'THEFUCK_EXCLUDED_SEARCH_PATH_PREFIXES': '/media/:/mnt/'})
settings.init()
assert settings.rules == ['bash', 'lisp']
assert settings.exclude_rules == ['git', 'vim']
@@ -65,6 +66,7 @@ def test_from_env(self, os_environ, settings):
assert settings.wait_slow_command == 999
assert settings.slow_commands == ['lein', 'react-native', './gradlew']
assert settings.num_close_matches == 359
+ assert settings.excluded_search_path_prefixes == ['/media/', '/mnt/']
def test_from_env_with_DEFAULT(self, os_environ, settings):
os_environ.update({'THEFUCK_RULES': 'DEFAULT_RULES:bash:lisp'})
diff --git a/tests/test_utils.py b/tests/test_utils.py
index 5c3542a74..b32f695ad 100644
--- a/tests/test_utils.py
+++ b/tests/test_utils.py
@@ -94,6 +94,20 @@ def test_get_all_executables_pathsep(path, pathsep):
Path_mock.assert_has_calls([call(p) for p in path.split(pathsep)], True)
+@pytest.mark.usefixtures('no_memoize', 'os_environ_pathsep')
+@pytest.mark.parametrize('path, pathsep, excluded', [
+ ('/foo:/bar:/baz:/foo/bar:/mnt/foo', ':', '/mnt/foo'),
+ (r'C:\\foo;C:\\bar;C:\\baz;C:\\foo\\bar;Z:\\foo', ';', r'Z:\\foo')])
+def test_get_all_executables_exclude_paths(path, pathsep, excluded, settings):
+ settings.init()
+ settings.excluded_search_path_prefixes = [excluded]
+ with patch('thefuck.utils.Path') as Path_mock:
+ get_all_executables()
+ path_list = path.split(pathsep)
+ assert call(path_list[-1]) not in Path_mock.mock_calls
+ assert all(call(p) in Path_mock.mock_calls for p in path_list[:-1])
+
+
@pytest.mark.parametrize('args, result', [
(('apt-get instol vim', 'instol', 'install'), 'apt-get install vim'),
(('git brnch', 'brnch', 'branch'), 'git branch')])
diff --git a/thefuck/conf.py b/thefuck/conf.py
index b55196340..27876ef47 100644
--- a/thefuck/conf.py
+++ b/thefuck/conf.py
@@ -101,7 +101,7 @@ def _val_from_env(self, env, attr):
elif attr in ('require_confirmation', 'no_colors', 'debug',
'alter_history', 'instant_mode'):
return val.lower() == 'true'
- elif attr == 'slow_commands':
+ elif attr in ('slow_commands', 'excluded_search_path_prefixes'):
return val.split(':')
else:
return val
diff --git a/thefuck/const.py b/thefuck/const.py
index d272f1b20..8d339264d 100644
--- a/thefuck/const.py
+++ b/thefuck/const.py
@@ -43,7 +43,8 @@ def __repr__(self):
'repeat': False,
'instant_mode': False,
'num_close_matches': 3,
- 'env': {'LC_ALL': 'C', 'LANG': 'C', 'GIT_TRACE': '1'}}
+ 'env': {'LC_ALL': 'C', 'LANG': 'C', 'GIT_TRACE': '1'},
+ 'excluded_search_path_prefixes': []}
ENV_TO_ATTR = {'THEFUCK_RULES': 'rules',
'THEFUCK_EXCLUDE_RULES': 'exclude_rules',
@@ -58,7 +59,8 @@ def __repr__(self):
'THEFUCK_SLOW_COMMANDS': 'slow_commands',
'THEFUCK_REPEAT': 'repeat',
'THEFUCK_INSTANT_MODE': 'instant_mode',
- 'THEFUCK_NUM_CLOSE_MATCHES': 'num_close_matches'}
+ 'THEFUCK_NUM_CLOSE_MATCHES': 'num_close_matches',
+ 'THEFUCK_EXCLUDED_SEARCH_PATH_PREFIXES': 'excluded_search_path_prefixes'}
SETTINGS_HEADER = u"""# The Fuck settings file
#
diff --git a/thefuck/utils.py b/thefuck/utils.py
index 8d55f3725..1df113420 100644
--- a/thefuck/utils.py
+++ b/thefuck/utils.py
@@ -104,6 +104,10 @@ def get_close_matches(word, possibilities, n=None, cutoff=0.6):
return difflib_get_close_matches(word, possibilities, n, cutoff)
+def include_path_in_search(path):
+ return not any(path.startswith(x) for x in settings.excluded_search_path_prefixes)
+
+
@memoize
def get_all_executables():
from thefuck.shells import shell
@@ -119,6 +123,7 @@ def _safe(fn, fallback):
bins = [exe.name.decode('utf8') if six.PY2 else exe.name
for path in os.environ.get('PATH', '').split(os.pathsep)
+ if include_path_in_search(path)
for exe in _safe(lambda: list(Path(path).iterdir()), [])
if not _safe(exe.is_dir, True)
and exe.name not in tf_entry_points]
| Allows filtering the paths used to search for commands
Can be useful to filter out `/mnt/` in WSL for performance
Running in WSL with default config:
```
DEBUG: Total took: 0:00:10.213128
```
Running in WSL with `excluded_search_path_prefixes = ['/mnt/']` in `settings.py`:
```
DEBUG: Total took: 0:00:00.617300
```
This provides an approach to solving #1036 without having to remove Windows paths from `PATH` (which breaks various WSL use-cases) | https://api.github.com/repos/nvbn/thefuck/pulls/1165 | 2021-02-04T21:17:32Z | 2021-04-21T17:43:21Z | 2021-04-21T17:43:21Z | 2021-04-21T17:44:32Z | 1,656 | nvbn/thefuck | 30,754 |
[bug] fix det_res18_db_v2.0.yml | diff --git a/configs/det/det_res18_db_v2.0.yml b/configs/det/det_res18_db_v2.0.yml
index e983c221e2..85c574debe 100644
--- a/configs/det/det_res18_db_v2.0.yml
+++ b/configs/det/det_res18_db_v2.0.yml
@@ -22,7 +22,6 @@ Architecture:
Backbone:
name: ResNet_vd
layers: 18
- disable_se: True
Neck:
name: DBFPN
out_channels: 256
| https://api.github.com/repos/PaddlePaddle/PaddleOCR/pulls/8579 | 2022-12-08T05:59:57Z | 2022-12-08T06:00:18Z | 2022-12-08T06:00:18Z | 2022-12-08T06:00:18Z | 139 | PaddlePaddle/PaddleOCR | 42,392 | |
Improve docstring re-indentation handling | diff --git a/src/black/__init__.py b/src/black/__init__.py
index e37caa98a2c..c3c8c207cd4 100644
--- a/src/black/__init__.py
+++ b/src/black/__init__.py
@@ -2037,13 +2037,20 @@ def visit_factor(self, node: Node) -> Iterator[Line]:
yield from self.visit_default(node)
def visit_STRING(self, leaf: Leaf) -> Iterator[Line]:
- # Check if it's a docstring
- if prev_siblings_are(
- leaf.parent, [None, token.NEWLINE, token.INDENT, syms.simple_stmt]
- ) and is_multiline_string(leaf):
- prefix = " " * self.current_line.depth
- docstring = fix_docstring(leaf.value[3:-3], prefix)
- leaf.value = leaf.value[0:3] + docstring + leaf.value[-3:]
+ if is_docstring(leaf) and "\\\n" not in leaf.value:
+ # We're ignoring docstrings with backslash newline escapes because changing
+ # indentation of those changes the AST representation of the code.
+ prefix = get_string_prefix(leaf.value)
+ lead_len = len(prefix) + 3
+ tail_len = -3
+ indent = " " * 4 * self.current_line.depth
+ docstring = fix_docstring(leaf.value[lead_len:tail_len], indent)
+ if docstring:
+ if leaf.value[lead_len - 1] == docstring[0]:
+ docstring = " " + docstring
+ if leaf.value[tail_len + 1] == docstring[-1]:
+ docstring = docstring + " "
+ leaf.value = leaf.value[0:lead_len] + docstring + leaf.value[tail_len:]
normalize_string_quotes(leaf)
yield from self.visit_default(leaf)
@@ -6608,6 +6615,26 @@ def patched_main() -> None:
main()
+def is_docstring(leaf: Leaf) -> bool:
+ if not is_multiline_string(leaf):
+ # For the purposes of docstring re-indentation, we don't need to do anything
+ # with single-line docstrings.
+ return False
+
+ if prev_siblings_are(
+ leaf.parent, [None, token.NEWLINE, token.INDENT, syms.simple_stmt]
+ ):
+ return True
+
+ # Multiline docstring on the same line as the `def`.
+ if prev_siblings_are(leaf.parent, [syms.parameters, token.COLON, syms.simple_stmt]):
+ # `syms.parameters` is only used in funcdefs and async_funcdefs in the Python
+ # grammar. We're safe to return True without further checks.
+ return True
+
+ return False
+
+
def fix_docstring(docstring: str, prefix: str) -> str:
# https://www.python.org/dev/peps/pep-0257/#handling-docstring-indentation
if not docstring:
@@ -6631,7 +6658,6 @@ def fix_docstring(docstring: str, prefix: str) -> str:
trimmed.append(prefix + stripped_line)
else:
trimmed.append("")
- # Return a single string:
return "\n".join(trimmed)
diff --git a/tests/data/docstring.py b/tests/data/docstring.py
index fcb8eb12a78..2d3d73a101c 100644
--- a/tests/data/docstring.py
+++ b/tests/data/docstring.py
@@ -81,6 +81,35 @@ def single_line():
"""
pass
+
+def this():
+ r"""
+ 'hey ho'
+ """
+
+
+def that():
+ """ "hey yah" """
+
+
+def and_that():
+ """
+ "hey yah" """
+
+
+def and_this():
+ '''
+ "hey yah"'''
+
+
+def believe_it_or_not_this_is_in_the_py_stdlib(): '''
+"hey yah"'''
+
+
+def ignored_docstring():
+ """a => \
+b"""
+
# output
class MyClass:
@@ -164,3 +193,33 @@ def over_indent():
def single_line():
"""But with a newline after it!"""
pass
+
+
+def this():
+ r"""
+ 'hey ho'
+ """
+
+
+def that():
+ """ "hey yah" """
+
+
+def and_that():
+ """
+ "hey yah" """
+
+
+def and_this():
+ '''
+ "hey yah"'''
+
+
+def believe_it_or_not_this_is_in_the_py_stdlib():
+ '''
+ "hey yah"'''
+
+
+def ignored_docstring():
+ """a => \
+b"""
\ No newline at end of file
diff --git a/tests/test_black.py b/tests/test_black.py
index 6705490ea13..cf311f52e14 100644
--- a/tests/test_black.py
+++ b/tests/test_black.py
@@ -496,6 +496,11 @@ def test_docstring(self) -> None:
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
+ mode = replace(DEFAULT_MODE, string_normalization=False)
+ not_normalized = fs(source, mode=mode)
+ self.assertFormatEqual(expected, not_normalized)
+ black.assert_equivalent(source, not_normalized)
+ black.assert_stable(source, not_normalized, mode=mode)
def test_long_strings(self) -> None:
"""Tests for splitting long strings."""
| This addresses a few crashers, namely:
* producing non-equivalent code due to mangling escaped newlines,
* invalid hugging quote characters in the docstring body to the docstring outer triple quotes (causing a quadruple quote which is a syntax error),
* lack of handling for docstrings that start on the same line as the `def`, and
* invalid stripping of outer triple quotes when the docstring contained a string prefix.
As a bonus, tests now also run when string normalization is disabled. | https://api.github.com/repos/psf/black/pulls/1623 | 2020-08-25T20:32:22Z | 2020-08-25T21:14:40Z | 2020-08-25T21:14:40Z | 2021-01-11T00:09:50Z | 1,268 | psf/black | 24,185 |
Add test checking ZHA light restores with `None` attributes | diff --git a/tests/components/zha/conftest.py b/tests/components/zha/conftest.py
index 9d9d74e72dfeab..a4ff5a3b20549e 100644
--- a/tests/components/zha/conftest.py
+++ b/tests/components/zha/conftest.py
@@ -26,7 +26,9 @@
import homeassistant.components.zha.core.const as zha_const
import homeassistant.components.zha.core.device as zha_core_device
from homeassistant.components.zha.core.helpers import get_zha_gateway
+from homeassistant.helpers import restore_state
from homeassistant.setup import async_setup_component
+import homeassistant.util.dt as dt_util
from .common import patch_cluster as common_patch_cluster
@@ -498,3 +500,35 @@ def network_backup() -> zigpy.backups.NetworkBackup:
},
}
)
+
+
+@pytest.fixture
+def core_rs(hass_storage):
+ """Core.restore_state fixture."""
+
+ def _storage(entity_id, state, attributes={}):
+ now = dt_util.utcnow().isoformat()
+
+ hass_storage[restore_state.STORAGE_KEY] = {
+ "version": restore_state.STORAGE_VERSION,
+ "key": restore_state.STORAGE_KEY,
+ "data": [
+ {
+ "state": {
+ "entity_id": entity_id,
+ "state": str(state),
+ "attributes": attributes,
+ "last_changed": now,
+ "last_updated": now,
+ "context": {
+ "id": "3c2243ff5f30447eb12e7348cfd5b8ff",
+ "user_id": None,
+ },
+ },
+ "last_seen": now,
+ }
+ ],
+ }
+ return
+
+ return _storage
diff --git a/tests/components/zha/test_binary_sensor.py b/tests/components/zha/test_binary_sensor.py
index b41499dada7789..5dd7a5653ecd79 100644
--- a/tests/components/zha/test_binary_sensor.py
+++ b/tests/components/zha/test_binary_sensor.py
@@ -9,8 +9,6 @@
from homeassistant.const import STATE_OFF, STATE_ON, STATE_UNAVAILABLE, Platform
from homeassistant.core import HomeAssistant
-from homeassistant.helpers import restore_state
-from homeassistant.util import dt as dt_util
from .common import (
async_enable_traffic,
@@ -152,38 +150,6 @@ async def test_binary_sensor(
assert hass.states.get(entity_id).state == STATE_OFF
-@pytest.fixture
-def core_rs(hass_storage):
- """Core.restore_state fixture."""
-
- def _storage(entity_id, attributes, state):
- now = dt_util.utcnow().isoformat()
-
- hass_storage[restore_state.STORAGE_KEY] = {
- "version": restore_state.STORAGE_VERSION,
- "key": restore_state.STORAGE_KEY,
- "data": [
- {
- "state": {
- "entity_id": entity_id,
- "state": str(state),
- "attributes": attributes,
- "last_changed": now,
- "last_updated": now,
- "context": {
- "id": "3c2243ff5f30447eb12e7348cfd5b8ff",
- "user_id": None,
- },
- },
- "last_seen": now,
- }
- ],
- }
- return
-
- return _storage
-
-
@pytest.mark.parametrize(
"restored_state",
[
diff --git a/tests/components/zha/test_light.py b/tests/components/zha/test_light.py
index 1ec70b74735389..bd799187a19728 100644
--- a/tests/components/zha/test_light.py
+++ b/tests/components/zha/test_light.py
@@ -40,7 +40,10 @@
)
from .conftest import SIG_EP_INPUT, SIG_EP_OUTPUT, SIG_EP_PROFILE, SIG_EP_TYPE
-from tests.common import async_fire_time_changed
+from tests.common import (
+ async_fire_time_changed,
+ async_mock_load_restore_state_from_storage,
+)
IEEE_GROUPABLE_DEVICE = "01:2d:6f:00:0a:90:69:e8"
IEEE_GROUPABLE_DEVICE2 = "02:2d:6f:00:0a:90:69:e9"
@@ -1921,3 +1924,76 @@ async def test_group_member_assume_state(
await zha_gateway.async_remove_zigpy_group(zha_group.group_id)
assert hass.states.get(group_entity_id) is None
assert entity_registry.async_get(group_entity_id) is None
+
+
+@pytest.mark.parametrize(
+ ("restored_state", "expected_state"),
+ [
+ (
+ STATE_ON,
+ {
+ "brightness": None,
+ "off_with_transition": None,
+ "off_brightness": None,
+ "color_mode": ColorMode.XY, # color_mode defaults to what the light supports when restored with ON state
+ "color_temp": None,
+ "xy_color": None,
+ "hs_color": None,
+ "effect": None,
+ },
+ ),
+ (
+ STATE_OFF,
+ {
+ "brightness": None,
+ "off_with_transition": None,
+ "off_brightness": None,
+ "color_mode": None,
+ "color_temp": None,
+ "xy_color": None,
+ "hs_color": None,
+ "effect": None,
+ },
+ ),
+ ],
+)
+async def test_restore_light_state(
+ hass: HomeAssistant,
+ zigpy_device_mock,
+ core_rs,
+ zha_device_restored,
+ restored_state,
+ expected_state,
+) -> None:
+ """Test ZHA light restores without throwing an error when attributes are None."""
+
+ # restore state with None values
+ attributes = {
+ "brightness": None,
+ "off_with_transition": None,
+ "off_brightness": None,
+ "color_mode": None,
+ "color_temp": None,
+ "xy_color": None,
+ "hs_color": None,
+ "effect": None,
+ }
+
+ entity_id = "light.fakemanufacturer_fakemodel_light"
+ core_rs(
+ entity_id,
+ state=restored_state,
+ attributes=attributes,
+ )
+ await async_mock_load_restore_state_from_storage(hass)
+
+ zigpy_device = zigpy_device_mock(LIGHT_COLOR)
+ zha_device = await zha_device_restored(zigpy_device)
+ entity_id = find_entity_id(Platform.LIGHT, zha_device, hass)
+
+ assert entity_id is not None
+ assert hass.states.get(entity_id).state == restored_state
+
+ # compare actual restored state to expected state
+ for attribute, expected_value in expected_state.items():
+ assert hass.states.get(entity_id).attributes.get(attribute) == expected_value
| ## Proposed change
This adds a regression test to make sure ZHA doesn't throw an exception when `None` attributes are restored for lights.
- The issue was originally discovered in: https://github.com/home-assistant/core/issues/102599
- A patch was made here: https://github.com/home-assistant/core/pull/102774
- The PR that caused this issue to appear in ZHA: https://github.com/home-assistant/core/pull/101946
I've moved the `core_rs` fixture from `test_binary_sensor.py` to `conftest.py` to also re-use it for the added test in `test_light.py`.
## Type of change
- [ ] Dependency upgrade
- [ ] Bugfix (non-breaking change which fixes an issue)
- [ ] New integration (thank you!)
- [ ] New feature (which adds functionality to an existing integration)
- [ ] Deprecation (breaking change to happen in the future)
- [ ] Breaking change (fix/feature causing existing functionality to break)
- [x] Code quality improvements to existing code or addition of tests
## Additional information
- This PR fixes or closes issue: fixes #
- This PR is related to issue:
- Link to documentation pull request:
## Checklist
- [ ] The code change is tested and works locally.
- [x] Local tests pass. **Your PR cannot be merged unless tests pass**
- [ ] There is no commented out code in this PR.
- [ ] I have followed the [development checklist][dev-checklist]
- [ ] I have followed the [perfect PR recommendations][perfect-pr]
- [ ] The code has been formatted using Black (`black --fast homeassistant tests`)
- [ ] Tests have been added to verify that the new code works.
If user exposed functionality or configuration variables are added/changed:
- [ ] Documentation added/updated for [www.home-assistant.io][docs-repository]
If the code communicates with devices, web services, or third-party tools:
- [ ] The [manifest file][manifest-docs] has all fields filled out correctly.
Updated and included derived files by running: `python3 -m script.hassfest`.
- [ ] New or updated dependencies have been added to `requirements_all.txt`.
Updated by running `python3 -m script.gen_requirements_all`.
- [ ] For the updated dependencies - a link to the changelog, or at minimum a diff between library versions is added to the PR description.
- [ ] Untested files have been added to `.coveragerc`.
To help with the load of incoming pull requests:
- [ ] I have reviewed two other [open pull requests][prs] in this repository.
[prs]: https://github.com/home-assistant/core/pulls?q=is%3Aopen+is%3Apr+-author%3A%40me+-draft%3Atrue+-label%3Awaiting-for-upstream+sort%3Acreated-desc+review%3Anone+-status%3Afailure
[dev-checklist]: https://developers.home-assistant.io/docs/development_checklist/
[manifest-docs]: https://developers.home-assistant.io/docs/creating_integration_manifest/
[quality-scale]: https://developers.home-assistant.io/docs/integration_quality_scale_index/
[docs-repository]: https://github.com/home-assistant/home-assistant.io
[perfect-pr]: https://developers.home-assistant.io/docs/review-process/#creating-the-perfect-pr
| https://api.github.com/repos/home-assistant/core/pulls/102806 | 2023-10-25T18:28:10Z | 2023-11-15T09:45:35Z | 2023-11-15T09:45:35Z | 2023-11-16T10:01:47Z | 1,556 | home-assistant/core | 39,103 |
Added smartcore and linfa Rust meta-crates | diff --git a/README.md b/README.md
index 956b2173..bda9fd94 100644
--- a/README.md
+++ b/README.md
@@ -1416,6 +1416,8 @@ be
<a name="rust-general-purpose-machine-learning"></a>
#### General-Purpose Machine Learning
+* [smartcore](https://github.com/smartcorelib/smartcore) - "The Most Advanced Machine Learning Library In Rust."
+* [linfa](https://github.com/rust-ml/linfa) - a comprehensive toolkit to build Machine Learning applications with Rust
* [deeplearn-rs](https://github.com/tedsta/deeplearn-rs) - deeplearn-rs provides simple networks that use matrix multiplication, addition, and ReLU under the MIT license.
* [rustlearn](https://github.com/maciejkula/rustlearn) - a machine learning framework featuring logistic regression, support vector machines, decision trees and random forests.
* [rusty-machine](https://github.com/AtheMathmo/rusty-machine) - a pure-rust machine learning library.
| SmartCore is a comprehensive library for machine learning and numerical computing. The library provides a set of tools for linear algebra, numerical computing, optimization, and enables a generic, powerful yet still efficient approach to machine learning.
Linfa is a comprehensive toolkit for statistical learning, providing algorithms for optimal model and density estimation.
It contains common learning algorithms such as Elastic Net, Support Vector Machines, Kernel Methods, Linear Decision Trees, etc. and bundles them together. | https://api.github.com/repos/josephmisiti/awesome-machine-learning/pulls/821 | 2021-10-19T20:27:56Z | 2021-10-30T19:36:03Z | 2021-10-30T19:36:03Z | 2021-10-30T19:36:03Z | 242 | josephmisiti/awesome-machine-learning | 51,757 |
Move Model collation args tests to correct folder | diff --git a/tests/models/test_base.py b/tests/models/test_base.py
new file mode 100644
index 0000000000000..fa253130a21d2
--- /dev/null
+++ b/tests/models/test_base.py
@@ -0,0 +1,47 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+import pytest
+from pytest import param
+
+from airflow.models.base import get_id_collation_args
+from tests.test_utils.config import conf_vars
+
+
+@pytest.mark.parametrize(
+ ("dsn", "expected", "extra"),
+ [
+ param("postgres://host/the_database", {}, {}, id="postgres"),
+ param("mysql://host/the_database", {"collation": "utf8mb3_general_ci"}, {}, id="mysql"),
+ param("mysql+pymsql://host/the_database", {"collation": "utf8mb3_general_ci"}, {}, id="mysql+pymsql"),
+ param(
+ "mysql://host/the_database",
+ {"collation": "ascii"},
+ {('core', 'sql_engine_collation_for_ids'): 'ascii'},
+ id="mysql with explicit config",
+ ),
+ param(
+ "postgres://host/the_database",
+ {"collation": "ascii"},
+ {('core', 'sql_engine_collation_for_ids'): 'ascii'},
+ id="postgres with explicit config",
+ ),
+ ],
+)
+def test_collation(dsn, expected, extra):
+ with conf_vars({('core', 'sql_alchemy_conn'): dsn, **extra}):
+ assert expected == get_id_collation_args()
diff --git a/tests/sensors/test_base.py b/tests/sensors/test_base.py
index a26bc94963f2e..dd3bf29a0ad49 100644
--- a/tests/sensors/test_base.py
+++ b/tests/sensors/test_base.py
@@ -19,7 +19,6 @@
import unittest
from datetime import timedelta
-from unittest import mock
from unittest.mock import Mock, patch
import pytest
@@ -27,7 +26,6 @@
from airflow.exceptions import AirflowException, AirflowRescheduleException, AirflowSensorTimeout
from airflow.models import DagBag, TaskInstance, TaskReschedule
-from airflow.models.base import get_id_collation_args
from airflow.models.dag import DAG
from airflow.operators.dummy import DummyOperator
from airflow.sensors.base import BaseSensorOperator, poke_mode_only
@@ -657,42 +655,3 @@ def test_poke_mode_only_bad_poke(self):
sensor = DummyPokeOnlySensor(task_id='foo', mode='poke', poke_changes_mode=True, dag=self.dag)
with pytest.raises(ValueError):
sensor.poke({})
-
-
-class TestCollation(unittest.TestCase):
- @mock.patch.dict(
- 'os.environ',
- AIRFLOW__CORE__SQL_ALCHEMY_CONN='postgres://host/the_database',
- )
- def test_collation_empty_on_non_mysql(self):
- assert {} == get_id_collation_args()
-
- @mock.patch.dict(
- 'os.environ',
- AIRFLOW__CORE__SQL_ALCHEMY_CONN='mysql://host/the_database',
- )
- def test_collation_set_on_mysql(self):
- assert {"collation": "utf8mb3_general_ci"} == get_id_collation_args()
-
- @mock.patch.dict(
- 'os.environ',
- AIRFLOW__CORE__SQL_ALCHEMY_CONN='mysql+pymsql://host/the_database',
- )
- def test_collation_set_on_mysql_with_pymsql(self):
- assert {"collation": "utf8mb3_general_ci"} == get_id_collation_args()
-
- @mock.patch.dict(
- 'os.environ',
- AIRFLOW__CORE__SQL_ALCHEMY_CONN='mysql://host/the_database',
- AIRFLOW__CORE__SQL_ENGINE_COLLATION_FOR_IDS='ascii',
- )
- def test_collation_override_on_non_mysql(self):
- assert {"collation": "ascii"} == get_id_collation_args()
-
- @mock.patch.dict(
- 'os.environ',
- AIRFLOW__CORE__SQL_ALCHEMY_CONN='postgres://host/the_database',
- AIRFLOW__CORE__SQL_ENGINE_COLLATION_FOR_IDS='ascii',
- )
- def test_collation_override_on_mysql(self):
- assert {"collation": "ascii"} == get_id_collation_args()
| The tests for this got added to test_base.py in #17729, which is the right file name, but inside tests/sensors/, which isn't right :)
Created a new tests/models/test_base.py for this.
And cos I'm on a "removing lines of code" kick I've parameterized it too so it a "data driven" test.
| https://api.github.com/repos/apache/airflow/pulls/17791 | 2021-08-23T19:32:34Z | 2021-08-23T21:43:35Z | 2021-08-23T21:43:35Z | 2021-08-24T06:09:15Z | 1,133 | apache/airflow | 14,859 |
[MRG+1] Update _monkeypatches.py | diff --git a/scrapy/_monkeypatches.py b/scrapy/_monkeypatches.py
index 935c4bfa353..b68099cade8 100644
--- a/scrapy/_monkeypatches.py
+++ b/scrapy/_monkeypatches.py
@@ -4,12 +4,7 @@
if six.PY2:
from urlparse import urlparse
-
- # workaround for https://bugs.python.org/issue7904 - Python < 2.7
- if urlparse('s3://bucket/key').netloc != 'bucket':
- from urlparse import uses_netloc
- uses_netloc.append('s3')
-
+
# workaround for https://bugs.python.org/issue9374 - Python < 2.7.4
if urlparse('s3://bucket/key?key=value').query != 'key=value':
from urlparse import uses_query
| The workarounds are not required assuming the bugs regarding urlparse are absent in Python versions >2.7. We already exit the program if Python version<2.7 in the __init__.py(line 17).The monkeypatches are deployed after this check at line 27 in the __init__.py . | https://api.github.com/repos/scrapy/scrapy/pulls/3907 | 2019-07-27T07:59:24Z | 2019-08-01T08:41:27Z | 2019-08-01T08:41:27Z | 2019-08-01T08:41:42Z | 188 | scrapy/scrapy | 35,063 |
Added awesome pysolr library to Search section. | diff --git a/README.md b/README.md
index 213de8659..c6f2a7f00 100644
--- a/README.md
+++ b/README.md
@@ -957,6 +957,7 @@ Inspired by [awesome-php](https://github.com/ziadoz/awesome-php).
* [elasticsearch-dsl-py](https://github.com/elastic/elasticsearch-dsl-py) - The official high-level Python client for Elasticsearch.
* [elasticsearch-py](https://www.elastic.co/guide/en/elasticsearch/client/python-api/current/index.html) - The official low-level Python client for [Elasticsearch](https://www.elastic.co/products/elasticsearch).
* [esengine](https://github.com/catholabs/esengine) - ElasticSearch ODM (Object Document Mapper) for Python.
+* [pysolr](https://github.com/django-haystack/pysolr) - A lightweight Python wrapper for Apache Solr (incl. SolrCloud awareness).
* [solrpy](https://github.com/edsu/solrpy) - A Python client for [solr](http://lucene.apache.org/solr/).
* [Whoosh](http://whoosh.readthedocs.io/) - A fast, pure Python search engine library.
| ## What is this Python project?
[pysolr](https://github.com/django-haystack/pysolr/) is a lightweight Python wrapper for Apache Solr. It provides an interface that queries the server and returns results based on the query.
Latest release on Python package index: https://pypi.python.org/pypi/pysolr/3.6.0
## What's the difference between this Python project and similar ones?
* Active project (see https://wiki.apache.org/solr/SolPython for other Python solr libs)
* Python 3.x support
* "More Like This" support
* Support for Spelling correction
* SolrCloud awareness
--
Anyone who agrees with this pull request could vote for it by adding a :+1: to it, and usually, the maintainer will merge it when votes reach **20**. | https://api.github.com/repos/vinta/awesome-python/pulls/881 | 2017-05-02T09:14:02Z | 2017-05-03T13:46:07Z | 2017-05-03T13:46:07Z | 2017-05-03T13:46:07Z | 283 | vinta/awesome-python | 26,937 |
Default environ | diff --git a/CHANGES b/CHANGES
index 5a1f5a3334..13ce156c22 100644
--- a/CHANGES
+++ b/CHANGES
@@ -19,6 +19,8 @@ Version 0.12
well as error handlers.
- Disable logger propagation by default for the app logger.
- Add support for range requests in ``send_file``.
+- ``app.test_client`` includes preset default environment, which can now be
+ directly set, instead of per ``client.get``.
Version 0.11.2
--------------
diff --git a/flask/testing.py b/flask/testing.py
index 8eacf58b40..3160024565 100644
--- a/flask/testing.py
+++ b/flask/testing.py
@@ -10,6 +10,7 @@
:license: BSD, see LICENSE for more details.
"""
+import werkzeug
from contextlib import contextmanager
from werkzeug.test import Client, EnvironBuilder
from flask import _request_ctx_stack
@@ -43,11 +44,23 @@ class FlaskClient(Client):
information about how to use this class refer to
:class:`werkzeug.test.Client`.
+ .. versionchanged:: 0.12
+ `app.test_client()` includes preset default environment, which can be
+ set after instantiation of the `app.test_client()` object in
+ `client.environ_base`.
+
Basic usage is outlined in the :ref:`testing` chapter.
"""
preserve_context = False
+ def __init__(self, *args, **kwargs):
+ super(FlaskClient, self).__init__(*args, **kwargs)
+ self.environ_base = {
+ "REMOTE_ADDR": "127.0.0.1",
+ "HTTP_USER_AGENT": "werkzeug/" + werkzeug.__version__
+ }
+
@contextmanager
def session_transaction(self, *args, **kwargs):
"""When used in combination with a ``with`` statement this opens a
@@ -101,6 +114,7 @@ def session_transaction(self, *args, **kwargs):
def open(self, *args, **kwargs):
kwargs.setdefault('environ_overrides', {}) \
['flask._preserve_context'] = self.preserve_context
+ kwargs.setdefault('environ_base', self.environ_base)
as_tuple = kwargs.pop('as_tuple', False)
buffered = kwargs.pop('buffered', False)
diff --git a/tests/test_testing.py b/tests/test_testing.py
index 7bb99e7971..9d3539049d 100644
--- a/tests/test_testing.py
+++ b/tests/test_testing.py
@@ -11,6 +11,7 @@
import pytest
import flask
+import werkzeug
from flask._compat import text_type
@@ -43,6 +44,40 @@ def index():
rv = c.get('/')
assert rv.data == b'http://localhost/'
+def test_environ_base_default():
+ app = flask.Flask(__name__)
+ app.testing = True
+ @app.route('/')
+ def index():
+ flask.g.user_agent = flask.request.headers["User-Agent"]
+ return flask.request.remote_addr
+
+ with app.test_client() as c:
+ rv = c.get('/')
+ assert rv.data == b'127.0.0.1'
+ assert flask.g.user_agent == 'werkzeug/' + werkzeug.__version__
+
+def test_environ_base_modified():
+ app = flask.Flask(__name__)
+ app.testing = True
+ @app.route('/')
+ def index():
+ flask.g.user_agent = flask.request.headers["User-Agent"]
+ return flask.request.remote_addr
+
+ with app.test_client() as c:
+ c.environ_base['REMOTE_ADDR'] = '0.0.0.0'
+ c.environ_base['HTTP_USER_AGENT'] = 'Foo'
+ rv = c.get('/')
+ assert rv.data == b'0.0.0.0'
+ assert flask.g.user_agent == 'Foo'
+
+ c.environ_base['REMOTE_ADDR'] = '0.0.0.1'
+ c.environ_base['HTTP_USER_AGENT'] = 'Bar'
+ rv = c.get('/')
+ assert rv.data == b'0.0.0.1'
+ assert flask.g.user_agent == 'Bar'
+
def test_redirect_keep_session():
app = flask.Flask(__name__)
app.secret_key = 'testing'
| Addresses #1467. Now you can pass `environ_base` dictionary into `app.test_client`. This also allows overriding per `open` call (or `make_test_environ_builder` call).
Perhaps a better solution would be to edit werkzeug directly and add it to that init (which, in turn, would allow us to remove the subclass init). I don't believe that would cause any unintended behavior, but I'm open to further conversation on it.
<!-- Reviewable:start -->
---
This change is [<img src="https://reviewable.io/review_button.svg" height="34" align="absmiddle" alt="Reviewable"/>](https://reviewable.io/reviews/pallets/flask/2047)
<!-- Reviewable:end -->
| https://api.github.com/repos/pallets/flask/pulls/2047 | 2016-10-01T16:54:53Z | 2016-10-12T06:54:25Z | 2016-10-12T06:54:25Z | 2020-11-14T02:52:56Z | 984 | pallets/flask | 20,846 |
nvidia-ai-endpoints[patch]: release 0.0.2 | diff --git a/libs/partners/nvidia-ai-endpoints/pyproject.toml b/libs/partners/nvidia-ai-endpoints/pyproject.toml
index fe13960fd753e3..e75e70c8c5435d 100644
--- a/libs/partners/nvidia-ai-endpoints/pyproject.toml
+++ b/libs/partners/nvidia-ai-endpoints/pyproject.toml
@@ -1,6 +1,6 @@
[tool.poetry]
name = "langchain-nvidia-ai-endpoints"
-version = "0.0.1.post2"
+version = "0.0.2"
description = "An integration package connecting NVIDIA AI Endpoints and LangChain"
authors = []
readme = "README.md"
| https://api.github.com/repos/langchain-ai/langchain/pulls/17125 | 2024-02-06T20:43:57Z | 2024-02-06T20:48:25Z | 2024-02-06T20:48:25Z | 2024-02-06T20:48:26Z | 160 | langchain-ai/langchain | 42,935 | |
Add jet-bridge | diff --git a/README.md b/README.md
index ded142745..c706e66ad 100644
--- a/README.md
+++ b/README.md
@@ -111,6 +111,7 @@ Inspired by [awesome-php](https://github.com/ziadoz/awesome-php).
* [django-jet](https://github.com/geex-arts/django-jet) - Modern responsive template for the Django admin interface with improved functionality.
* [django-suit](https://djangosuit.com/) - Alternative Django Admin-Interface (free only for Non-commercial use).
* [django-xadmin](https://github.com/sshwsfc/xadmin) - Drop-in replacement of Django admin comes with lots of goodies.
+* [jet-bridge](https://github.com/jet-admin/jet-bridge) - Admin panel framework for any application with nice UI (ex Jet Django)
* [flask-admin](https://github.com/flask-admin/flask-admin) - Simple and extensible administrative interface framework for Flask.
* [flower](https://github.com/mher/flower) - Real-time monitor and web admin for Celery.
* [wooey](https://github.com/wooey/wooey) - A Django app which creates automatic web UIs for Python scripts.
|
## What is this Python project?
Jet Admin is a SaaS service that automatically generates extendable back office for your application. Jet Bridge is a standalone app which generates REST API thought which your SQL database is connected to Jet Admin. All you need is to install Jet Bridge adapter and connect it to your database.
More here https://github.com/jet-admin/jet-bridge
## What's the difference between this Python project and similar ones?
- Powerful dashboard out-of-the-box
- Nice and responsive UI
- With WYSIWYG interface customization your can change almost every part of interface
- Flex Features allows you to create your custom Actions, Views, Fields and other
- Works with any technology: the interface is generated automatically based on an analysis of the data and data structure of your database
--
Anyone who agrees with this pull request could vote for it by adding a :+1: to it, and usually, the maintainer will merge it when votes reach **20**.
| https://api.github.com/repos/vinta/awesome-python/pulls/1385 | 2019-10-09T15:28:20Z | 2019-10-17T14:50:50Z | 2019-10-17T14:50:50Z | 2019-10-17T14:50:50Z | 273 | vinta/awesome-python | 27,145 |
Refs #29898 -- Refactored out ProjectState.resolve_model_relations()/resolve_model_field_relations() hooks. | diff --git a/django/db/migrations/state.py b/django/db/migrations/state.py
index 6e023415bdadc..392708134d19e 100644
--- a/django/db/migrations/state.py
+++ b/django/db/migrations/state.py
@@ -346,6 +346,40 @@ def _reload(self, related_models):
# Render all models
self.apps.render_multiple(states_to_be_rendered)
+ def update_model_field_relation(
+ self, model, model_key, field_name, field, concretes,
+ ):
+ remote_model_key = resolve_relation(model, *model_key)
+ if remote_model_key[0] not in self.real_apps and remote_model_key in concretes:
+ remote_model_key = concretes[remote_model_key]
+ self.relations[remote_model_key][model_key].append((field_name, field))
+
+ def resolve_model_field_relations(
+ self, model_key, field_name, field, concretes=None,
+ ):
+ remote_field = field.remote_field
+ if not remote_field:
+ return
+ if concretes is None:
+ concretes, _ = self._get_concrete_models_mapping_and_proxy_models()
+
+ self.update_model_field_relation(
+ remote_field.model, model_key, field_name, field, concretes,
+ )
+
+ through = getattr(remote_field, 'through', None)
+ if not through:
+ return
+ self.update_model_field_relation(through, model_key, field_name, field, concretes)
+
+ def resolve_model_relations(self, model_key, concretes=None):
+ if concretes is None:
+ concretes, _ = self._get_concrete_models_mapping_and_proxy_models()
+
+ model_state = self.models[model_key]
+ for field_name, field in model_state.fields.items():
+ self.resolve_model_field_relations(model_key, field_name, field, concretes)
+
def resolve_fields_and_relations(self):
# Resolve fields.
for model_state in self.models.values():
@@ -357,23 +391,8 @@ def resolve_fields_and_relations(self):
concretes, proxies = self._get_concrete_models_mapping_and_proxy_models()
for model_key in concretes:
- model_state = self.models[model_key]
- for field_name, field in model_state.fields.items():
- remote_field = field.remote_field
- if not remote_field:
- continue
- remote_model_key = resolve_relation(remote_field.model, *model_key)
- if remote_model_key[0] not in self.real_apps and remote_model_key in concretes:
- remote_model_key = concretes[remote_model_key]
- self.relations[remote_model_key][model_key].append((field_name, field))
+ self.resolve_model_relations(model_key, concretes)
- through = getattr(remote_field, 'through', None)
- if not through:
- continue
- through_model_key = resolve_relation(through, *model_key)
- if through_model_key[0] not in self.real_apps and through_model_key in concretes:
- through_model_key = concretes[through_model_key]
- self.relations[through_model_key][model_key].append((field_name, field))
for model_key in proxies:
self.relations[model_key] = self.relations[concretes[model_key]]
| https://api.github.com/repos/django/django/pulls/14781 | 2021-08-20T06:58:21Z | 2021-08-20T18:24:09Z | 2021-08-20T18:24:09Z | 2021-08-20T18:25:05Z | 756 | django/django | 51,396 | |
[ie/Axs] new extractor | diff --git a/yt_dlp/extractor/_extractors.py b/yt_dlp/extractor/_extractors.py
index b788737a2da..b836fe8a3d5 100644
--- a/yt_dlp/extractor/_extractors.py
+++ b/yt_dlp/extractor/_extractors.py
@@ -165,6 +165,7 @@
AWAANLiveIE,
AWAANSeasonIE,
)
+from .axs import AxsIE
from .azmedien import AZMedienIE
from .baidu import BaiduVideoIE
from .banbye import (
diff --git a/yt_dlp/extractor/axs.py b/yt_dlp/extractor/axs.py
new file mode 100644
index 00000000000..4b263725f12
--- /dev/null
+++ b/yt_dlp/extractor/axs.py
@@ -0,0 +1,87 @@
+from .common import InfoExtractor
+from ..utils import (
+ float_or_none,
+ js_to_json,
+ parse_iso8601,
+ traverse_obj,
+ url_or_none,
+)
+
+
+class AxsIE(InfoExtractor):
+ IE_NAME = 'axs.tv'
+ _VALID_URL = r'https?://(?:www\.)?axs\.tv/(?:channel/(?:[^/?#]+/)+)?video/(?P<id>[^/?#]+)'
+
+ _TESTS = [{
+ 'url': 'https://www.axs.tv/video/5f4dc776b70e4f1c194f22ef/',
+ 'md5': '8d97736ae8e50c64df528e5e676778cf',
+ 'info_dict': {
+ 'id': '5f4dc776b70e4f1c194f22ef',
+ 'title': 'Small Town',
+ 'ext': 'mp4',
+ 'description': 'md5:e314d28bfaa227a4d7ec965fae19997f',
+ 'upload_date': '20230602',
+ 'timestamp': 1685729564,
+ 'duration': 1284.216,
+ 'series': 'Rock & Roll Road Trip with Sammy Hagar',
+ 'season': 2,
+ 'episode': '3',
+ 'thumbnail': 'https://images.dotstudiopro.com/5f4e9d330a0c3b295a7e8394',
+ },
+ }, {
+ 'url': 'https://www.axs.tv/channel/rock-star-interview/video/daryl-hall',
+ 'md5': '300ae795cd8f9984652c0949734ffbdc',
+ 'info_dict': {
+ 'id': '5f488148b70e4f392572977c',
+ 'display_id': 'daryl-hall',
+ 'title': 'Daryl Hall',
+ 'ext': 'mp4',
+ 'description': 'md5:e54ecaa0f4b5683fc9259e9e4b196628',
+ 'upload_date': '20230214',
+ 'timestamp': 1676403615,
+ 'duration': 2570.668,
+ 'series': 'The Big Interview with Dan Rather',
+ 'season': 3,
+ 'episode': '5',
+ 'thumbnail': 'https://images.dotstudiopro.com/5f4d1901f340b50d937cec32',
+ },
+ }]
+
+ def _real_extract(self, url):
+ display_id = self._match_id(url)
+ webpage = self._download_webpage(url, display_id)
+
+ webpage_json_data = self._search_json(
+ r'mountObj\s*=', webpage, 'video ID data', display_id,
+ transform_source=js_to_json)
+ video_id = webpage_json_data['video_id']
+ company_id = webpage_json_data['company_id']
+
+ meta = self._download_json(
+ f'https://api.myspotlight.tv/dotplayer/video/{company_id}/{video_id}',
+ video_id, query={'device_type': 'desktop_web'})['video']
+
+ formats = self._extract_m3u8_formats(
+ meta['video_m3u8'], video_id, 'mp4', m3u8_id='hls')
+
+ subtitles = {}
+ for cc in traverse_obj(meta, ('closeCaption', lambda _, v: url_or_none(v['srtPath']))):
+ subtitles.setdefault(cc.get('srtShortLang') or 'en', []).append(
+ {'ext': cc.get('srtExt'), 'url': cc['srtPath']})
+
+ return {
+ 'id': video_id,
+ 'display_id': display_id,
+ 'formats': formats,
+ **traverse_obj(meta, {
+ 'title': ('title', {str}),
+ 'description': ('description', {str}),
+ 'series': ('seriestitle', {str}),
+ 'season': ('season', {int}),
+ 'episode': ('episode', {str}),
+ 'duration': ('duration', {float_or_none}),
+ 'timestamp': ('updated_at', {parse_iso8601}),
+ 'thumbnail': ('thumb', {url_or_none}),
+ }),
+ 'subtitles': subtitles,
+ }
| Fixes https://github.com/yt-dlp/yt-dlp/issues/7451
**IMPORTANT**: PRs without the template will be CLOSED
### Description of your *pull request* and other information
Add support for axs.tv.
Fixes #7451
Note: The URLs in the tests may or may not expire. I originally had a one URL, but it seems to have disappeared after a few weeks.
<details open><summary>Template</summary> <!-- OPEN is intentional -->
### Before submitting a *pull request* make sure you have:
- [x] At least skimmed through [contributing guidelines](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#developer-instructions) including [yt-dlp coding conventions](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#yt-dlp-coding-conventions)
- [x] [Searched](https://github.com/yt-dlp/yt-dlp/search?q=is%3Apr&type=Issues) the bugtracker for similar pull requests
- [x] Checked the code with [flake8](https://pypi.python.org/pypi/flake8) and [ran relevant tests](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#developer-instructions)
### In order to be accepted and merged into yt-dlp each piece of code must be in public domain or released under [Unlicense](http://unlicense.org/). Check all of the following options that apply:
- [x] I am the original author of this code and I am willing to release it under [Unlicense](http://unlicense.org/)
- [ ] I am not the original author of this code but it is in public domain or released under [Unlicense](http://unlicense.org/) (provide reliable evidence)
### What is the purpose of your *pull request*?
- [ ] Fix or improvement to an extractor (Make sure to add/update tests)
- [x] New extractor ([Piracy websites will not be accepted](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#is-the-website-primarily-used-for-piracy))
- [ ] Core bug fix/improvement
- [ ] New feature (It is strongly [recommended to open an issue first](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#adding-new-feature-or-making-overarching-changes))
<!-- Do NOT edit/remove anything below this! -->
</details><details><summary>Copilot Summary</summary>
<!--
copilot:all
-->
### <samp>🤖 Generated by Copilot at 7c1fa05</samp>
### Summary
🎥🎸🌐
<!--
1. 🎥 - This emoji represents videos, which are the main content that the extractor deals with. It also suggests entertainment and media, which are the themes of the AXS TV website.
2. 🎸 - This emoji represents music, which is one of the genres that the AXS TV website features. It also suggests rock and roll, which is a common style of music on the network.
3. 🌐 - This emoji represents the web, which is the source of the videos and the API that the extractor uses. It also suggests global and diverse, which are some of the values of the AXS TV website.
-->
Add support for extracting videos from the AXS TV website. Create a new module `axs.py` that defines the `AxsIE` extractor class and import it in `_extractors.py`.
> _`AxsIE` extracts_
> _videos from AXS TV site_
> _metadata and `HLS`_
### Walkthrough
* Add `AxsIE` extractor class for AXS TV videos ([link](https://github.com/yt-dlp/yt-dlp/pull/8094/files?diff=unified&w=0#diff-128ed0ce30846ea4f00e43f659d68be76e3236c9844ab4c102e296482d19f3feR1-R83), [link](https://github.com/yt-dlp/yt-dlp/pull/8094/files?diff=unified&w=0#diff-780b22dc7eb280f5a7b2bbf79aff17826de88ddcbf2fc1116ba19901827aa4e3R168))
</details>
| https://api.github.com/repos/yt-dlp/yt-dlp/pulls/8094 | 2023-09-12T13:06:14Z | 2023-09-16T10:04:09Z | 2023-09-16T10:04:08Z | 2023-09-16T10:04:09Z | 1,218 | yt-dlp/yt-dlp | 8,156 |
Fix typos, comments, unused imports | diff --git a/CHANGELOG.md b/CHANGELOG.md
index 3a05e6811..2cd0594bf 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -11,6 +11,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- Change the render prefix to correspond to the decimal units in progress
+### Fixed
+
+- Fixed typo in `Style.transparent_background` method name.
+
## [8.0.0] - 2020-10-03
### Added
diff --git a/rich/console.py b/rich/console.py
index 108736375..545a85cf7 100644
--- a/rich/console.py
+++ b/rich/console.py
@@ -620,7 +620,7 @@ def size(self) -> ConsoleDimensions:
return ConsoleDimensions(80, 25)
width, height = shutil.get_terminal_size()
- # get_terminal_size can report 0, 0 if run from psuedo-terminal
+ # get_terminal_size can report 0, 0 if run from pseudo-terminal
width = width or 80
height = height or 25
return ConsoleDimensions(
@@ -749,7 +749,7 @@ def render_lines(
is required, such as the Panel class which draws a border around any renderable object.
Args:
- renderables (Iterable[RenderableType]): Any object or objects renderable in the console.
+ renderable (RenderableType): Any object renderable in the console.
options (Optional[ConsoleOptions], optional): Console options, or None to use self.options. Default to ``None``.
style (Style, optional): Optional style to apply to renderables. Defaults to ``None``.
pad (bool, optional): Pad lines shorter than render width. Defaults to ``True``.
@@ -858,7 +858,7 @@ def _collect_renderables(
"""Combined a number of renderables and text in to one renderable.
Args:
- renderables (Iterable[Union[str, ConsoleRenderable]]): Anything that Rich can render.
+ objects (Iterable[Any]): Anything that Rich can render.
sep (str, optional): String to write between print data. Defaults to " ".
end (str, optional): String to write at end of print data. Defaults to "\\n".
justify (str, optional): One of "left", "right", "center", or "full". Defaults to ``None``.
@@ -1366,7 +1366,7 @@ def save_html(
clear (bool, optional): Clear record buffer after exporting. Defaults to ``True``.
code_format (str, optional): Format string to render HTML, should contain {foreground}
{background} and {code}.
- inline_styes (bool, optional): If ``True`` styles will be inlined in to spans, which makes files
+ inline_styles (bool, optional): If ``True`` styles will be inlined in to spans, which makes files
larger but easier to cut and paste markup. If ``False``, styles will be embedded in a style tag.
Defaults to False.
diff --git a/rich/pretty.py b/rich/pretty.py
index d9289006f..2e3343976 100644
--- a/rich/pretty.py
+++ b/rich/pretty.py
@@ -359,7 +359,7 @@ def pretty_repr(
Args:
_object (Any): Object to repr.
- max_width (int, optional): Diresired maximum width of repr string. Defaults to 80.
+ max_width (int, optional): Desired maximum width of repr string. Defaults to 80.
indent_size (int, optional): Number of spaces to indent. Defaults to 4.
expand_all (bool, optional): Expand all containers regardless of available width. Defaults to False.
diff --git a/rich/style.py b/rich/style.py
index b51393b58..a67e2c67f 100644
--- a/rich/style.py
+++ b/rich/style.py
@@ -345,7 +345,7 @@ def link(self) -> Optional[str]:
return self._link
@property
- def transaprent_background(self) -> bool:
+ def transparent_background(self) -> bool:
"""Check if the style specified a transparent background."""
return self.bgcolor is None or self.bgcolor.is_default
diff --git a/rich/syntax.py b/rich/syntax.py
index 8055730c1..670ddba7f 100644
--- a/rich/syntax.py
+++ b/rich/syntax.py
@@ -353,7 +353,7 @@ def highlight(self, code: str) -> Text:
base_style = self._get_base_style()
justify: JustifyMethod = (
- "default" if base_style.transaprent_background else "left"
+ "default" if base_style.transparent_background else "left"
)
text = Text(
@@ -401,7 +401,7 @@ def _numbers_column_width(self) -> int:
def _get_number_styles(self, console: Console) -> Tuple[Style, Style, Style]:
"""Get background, number, and highlight styles for line numbers."""
background_style = self._get_base_style()
- if background_style.transaprent_background:
+ if background_style.transparent_background:
return Style.null(), Style(dim=True), Style.null()
if console.color_system in ("256", "truecolor"):
number_style = Style.chain(
@@ -428,7 +428,7 @@ def __rich_measure__(self, console: "Console", max_width: int) -> "Measurement":
def __rich_console__(
self, console: Console, options: ConsoleOptions
) -> RenderResult:
- transparent_background = self._get_base_style().transaprent_background
+ transparent_background = self._get_base_style().transparent_background
code_width = (
(options.max_width - self._numbers_column_width - 1)
if self.code_width is None
diff --git a/rich/traceback.py b/rich/traceback.py
index 37d4015c6..71ce631b2 100644
--- a/rich/traceback.py
+++ b/rich/traceback.py
@@ -9,8 +9,6 @@
from pygments.lexers import guess_lexer_for_filename
from pygments.token import (
- Comment,
- Generic,
Keyword,
Name,
Number,
@@ -212,7 +210,7 @@ def extract(
traceback: Optional[TracebackType],
show_locals: bool = False,
) -> Trace:
- """Extrace traceback information.
+ """Extract traceback information.
Args:
exc_type (Type[BaseException]): Exception type.
@@ -342,7 +340,7 @@ def _render_syntax_error(self, syntax_error: _SyntaxError) -> RenderResult:
if syntax_error.filename != "<stdin>":
text = Text.assemble(
(f" {syntax_error.filename}", "pygments.string"),
- (":", "pgments.text"),
+ (":", "pygments.text"),
(str(syntax_error.lineno), "pygments.number"),
style="pygments.text",
)
| ## Type of changes
- [ ] Bug fix
- [ ] New feature
- [x] Documentation / docstrings
- [ ] Tests
- [ ] Other
## Checklist
- [x] I've run the latest [black](https://github.com/psf/black) with default args on new code.
- [ ] I've updated CHANGELOG.md and CONTRIBUTORS.md where appropriate.
- [ ] I've added tests for new code.
- [x] I accept that @willmcgugan may be pedantic in the code review.
## Description
Just some semi-automatically found typos.
| https://api.github.com/repos/Textualize/rich/pulls/362 | 2020-10-08T04:01:06Z | 2020-10-08T13:17:33Z | 2020-10-08T13:17:33Z | 2020-10-08T13:17:33Z | 1,604 | Textualize/rich | 47,936 |
[Bilibili] Add 8k support | diff --git a/yt_dlp/extractor/bilibili.py b/yt_dlp/extractor/bilibili.py
index 4223a80ff39..652115898ab 100644
--- a/yt_dlp/extractor/bilibili.py
+++ b/yt_dlp/extractor/bilibili.py
@@ -17,9 +17,9 @@
ExtractorError,
int_or_none,
float_or_none,
+ mimetype2ext,
parse_iso8601,
traverse_obj,
- try_get,
parse_count,
smuggle_url,
srt_subtitles_timecode,
@@ -53,15 +53,13 @@ class BiliBiliIE(InfoExtractor):
'md5': '5f7d29e1a2872f3df0cf76b1f87d3788',
'info_dict': {
'id': '1074402',
- 'ext': 'flv',
+ 'ext': 'mp4',
'title': '【金坷垃】金泡沫',
+ 'uploader_id': '156160',
+ 'uploader': '菊子桑',
+ 'upload_date': '20140420',
'description': 'md5:ce18c2a2d2193f0df2917d270f2e5923',
- 'duration': 308.067,
'timestamp': 1398012678,
- 'upload_date': '20140420',
- 'thumbnail': r're:^https?://.+\.jpg',
- 'uploader': '菊子桑',
- 'uploader_id': '156160',
},
}, {
# Tested in BiliBiliBangumiIE
@@ -82,42 +80,20 @@ class BiliBiliIE(InfoExtractor):
},
'skip': 'Geo-restricted to China',
}, {
- # Title with double quotes
'url': 'http://www.bilibili.com/video/av8903802/',
'info_dict': {
'id': '8903802',
+ 'ext': 'mp4',
'title': '阿滴英文|英文歌分享#6 "Closer',
+ 'upload_date': '20170301',
'description': '滴妹今天唱Closer給你聽! 有史以来,被推最多次也是最久的歌曲,其实歌词跟我原本想像差蛮多的,不过还是好听! 微博@阿滴英文',
+ 'timestamp': 1488382634,
+ 'uploader_id': '65880958',
+ 'uploader': '阿滴英文',
+ },
+ 'params': {
+ 'skip_download': True,
},
- 'playlist': [{
- 'info_dict': {
- 'id': '8903802_part1',
- 'ext': 'flv',
- 'title': '阿滴英文|英文歌分享#6 "Closer',
- 'description': 'md5:3b1b9e25b78da4ef87e9b548b88ee76a',
- 'uploader': '阿滴英文',
- 'uploader_id': '65880958',
- 'timestamp': 1488382634,
- 'upload_date': '20170301',
- },
- 'params': {
- 'skip_download': True,
- },
- }, {
- 'info_dict': {
- 'id': '8903802_part2',
- 'ext': 'flv',
- 'title': '阿滴英文|英文歌分享#6 "Closer',
- 'description': 'md5:3b1b9e25b78da4ef87e9b548b88ee76a',
- 'uploader': '阿滴英文',
- 'uploader_id': '65880958',
- 'timestamp': 1488382634,
- 'upload_date': '20170301',
- },
- 'params': {
- 'skip_download': True,
- },
- }]
}, {
# new BV video id format
'url': 'https://www.bilibili.com/video/BV1JE411F741',
@@ -152,6 +128,7 @@ def _real_extract(self, url):
av_id, bv_id = self._get_video_id_set(video_id, mobj.group('id_bv') is not None)
video_id = av_id
+ info = {}
anime_id = mobj.group('anime_id')
page_id = mobj.group('page')
webpage = self._download_webpage(url, video_id)
@@ -203,35 +180,48 @@ def _real_extract(self, url):
}
headers.update(self.geo_verification_headers())
+ video_info = self._parse_json(
+ self._search_regex(r'window.__playinfo__\s*=\s*({.+?})</script>', webpage, 'video info', default=None),
+ video_id, fatal=False) or {}
+ video_info = video_info.get('data') or {}
+
+ durl = traverse_obj(video_info, ('dash', 'video'))
+ audios = traverse_obj(video_info, ('dash', 'audio')) or []
entries = []
RENDITIONS = ('qn=80&quality=80&type=', 'quality=2&type=mp4')
for num, rendition in enumerate(RENDITIONS, start=1):
payload = 'appkey=%s&cid=%s&otype=json&%s' % (self._APP_KEY, cid, rendition)
sign = hashlib.md5((payload + self._BILIBILI_KEY).encode('utf-8')).hexdigest()
-
- video_info = self._download_json(
- 'http://interface.bilibili.com/v2/playurl?%s&sign=%s' % (payload, sign),
- video_id, note='Downloading video info page',
- headers=headers, fatal=num == len(RENDITIONS))
-
if not video_info:
- continue
+ video_info = self._download_json(
+ 'http://interface.bilibili.com/v2/playurl?%s&sign=%s' % (payload, sign),
+ video_id, note='Downloading video info page',
+ headers=headers, fatal=num == len(RENDITIONS))
+ if not video_info:
+ continue
- if 'durl' not in video_info:
+ if not durl and 'durl' not in video_info:
if num < len(RENDITIONS):
continue
self._report_error(video_info)
- for idx, durl in enumerate(video_info['durl']):
- formats = [{
- 'url': durl['url'],
- 'filesize': int_or_none(durl['size']),
- }]
- for backup_url in durl.get('backup_url', []):
+ formats = []
+ for idx, durl in enumerate(durl or video_info['durl']):
+ formats.append({
+ 'url': durl.get('baseUrl') or durl.get('base_url') or durl.get('url'),
+ 'ext': mimetype2ext(durl.get('mimeType') or durl.get('mime_type')),
+ 'fps': int_or_none(durl.get('frameRate') or durl.get('frame_rate')),
+ 'width': int_or_none(durl.get('width')),
+ 'height': int_or_none(durl.get('height')),
+ 'vcodec': durl.get('codecs'),
+ 'acodec': 'none' if audios else None,
+ 'tbr': float_or_none(durl.get('bandwidth'), scale=1000),
+ 'filesize': int_or_none(durl.get('size')),
+ })
+ for backup_url in traverse_obj(durl, 'backup_url', expected_type=list) or []:
formats.append({
'url': backup_url,
- # backup URLs have lower priorities
'quality': -2 if 'hd.mp4' in backup_url else -3,
})
@@ -239,30 +229,46 @@ def _real_extract(self, url):
a_format.setdefault('http_headers', {}).update({
'Referer': url,
})
-
- self._sort_formats(formats)
-
- entries.append({
- 'id': '%s_part%s' % (video_id, idx),
- 'duration': float_or_none(durl.get('length'), 1000),
- 'formats': formats,
+ for audio in audios:
+ formats.append({
+ 'url': audio.get('baseUrl') or audio.get('base_url') or audio.get('url'),
+ 'ext': mimetype2ext(audio.get('mimeType') or audio.get('mime_type')),
+ 'fps': int_or_none(audio.get('frameRate') or audio.get('frame_rate')),
+ 'width': int_or_none(audio.get('width')),
+ 'height': int_or_none(audio.get('height')),
+ 'acodec': audio.get('codecs'),
+ 'vcodec': 'none',
+ 'tbr': float_or_none(audio.get('bandwidth'), scale=1000),
+ 'filesize': int_or_none(audio.get('size'))
})
+ for backup_url in traverse_obj(audio, 'backup_url', expected_type=list) or []:
+ formats.append({
+ 'url': backup_url,
+ # backup URLs have lower priorities
+ 'quality': -3,
+ })
+
+ info.update({
+ 'id': video_id,
+ 'duration': float_or_none(durl.get('length'), 1000),
+ 'formats': formats,
+ })
break
+ self._sort_formats(formats)
+
title = self._html_search_regex(
- (r'<h1[^>]+\btitle=(["\'])(?P<title>(?:(?!\1).)+)\1',
+ (r'<h1[^>]+title=(["\'])(?P<title>[^"\']+)',
r'(?s)<h1[^>]*>(?P<title>.+?)</h1>'), webpage, 'title',
group='title')
# Get part title for anthologies
if page_id is not None:
- # TODO: The json is already downloaded by _extract_anthology_entries. Don't redownload for each video
- part_title = try_get(
- self._download_json(
- f'https://api.bilibili.com/x/player/pagelist?bvid={bv_id}&jsonp=jsonp',
- video_id, note='Extracting videos in anthology'),
- lambda x: x['data'][int(page_id) - 1]['part'])
- title = part_title or title
+ # TODO: The json is already downloaded by _extract_anthology_entries. Don't redownload for each video.
+ part_info = traverse_obj(self._download_json(
+ f'https://api.bilibili.com/x/player/pagelist?bvid={bv_id}&jsonp=jsonp',
+ video_id, note='Extracting videos in anthology'), 'data', expected_type=list)
+ title = title if len(part_info) == 1 else traverse_obj(part_info, (int(page_id) - 1, 'part')) or title
description = self._html_search_meta('description', webpage)
timestamp = unified_timestamp(self._html_search_regex(
@@ -272,7 +278,7 @@ def _real_extract(self, url):
thumbnail = self._html_search_meta(['og:image', 'thumbnailUrl'], webpage)
# TODO 'view_count' requires deobfuscating Javascript
- info = {
+ info.update({
'id': str(video_id) if page_id is None else '%s_part%s' % (video_id, page_id),
'cid': cid,
'title': title,
@@ -280,7 +286,7 @@ def _real_extract(self, url):
'timestamp': timestamp,
'thumbnail': thumbnail,
'duration': float_or_none(video_info.get('timelength'), scale=1000),
- }
+ })
uploader_mobj = re.search(
r'<a[^>]+href="(?:https?:)?//space\.bilibili\.com/(?P<id>\d+)"[^>]*>\s*(?P<name>[^<]+?)\s*<',
@@ -301,7 +307,7 @@ def _real_extract(self, url):
video_id, fatal=False, note='Downloading tags'), ('data', ..., 'tag_name')),
}
- entries[0]['subtitles'] = {
+ info['subtitles'] = {
'danmaku': [{
'ext': 'xml',
'url': f'https://comment.bilibili.com/{cid}.xml',
@@ -336,12 +342,10 @@ def _real_extract(self, url):
entry['id'] = '%s_part%d' % (video_id, (idx + 1))
return {
- '_type': 'multi_video',
'id': str(video_id),
'bv_id': bv_id,
'title': title,
'description': description,
- 'entries': entries,
**info, **top_level_info
}
@@ -482,9 +486,9 @@ def _entries(self, list_id):
data = self._download_json(
self._API_URL % (list_id, page_num), list_id, note=f'Downloading page {page_num}')['data']
- max_count = max_count or try_get(data, lambda x: x['page']['count'])
+ max_count = max_count or traverse_obj(data, ('page', 'count'))
- entries = try_get(data, lambda x: x['list']['vlist'])
+ entries = traverse_obj(data, ('list', 'vlist'))
if not entries:
return
for entry in entries:
@@ -522,7 +526,7 @@ def _fetch_page(self, api_url, num_pages, query, page_num):
api_url, query, query={'Search_key': query, 'pn': page_num},
note='Extracting results from page %s of %s' % (page_num, num_pages))
- video_list = try_get(parsed_json, lambda x: x['data']['archives'], list)
+ video_list = traverse_obj(parsed_json, ('data', 'archives'), expected_type=list)
if not video_list:
raise ExtractorError('Failed to retrieve video list for page %d' % page_num)
@@ -552,7 +556,7 @@ def _entries(self, category, subcategory, query):
api_url = 'https://api.bilibili.com/x/web-interface/newlist?rid=%d&type=1&ps=20&jsonp=jsonp' % rid_value
page_json = self._download_json(api_url, query, query={'Search_key': query, 'pn': '1'})
- page_data = try_get(page_json, lambda x: x['data']['page'], dict)
+ page_data = traverse_obj(page_json, ('data', 'page'), expected_type=dict)
count, size = int_or_none(page_data.get('count')), int_or_none(page_data.get('size'))
if count is None or not size:
raise ExtractorError('Failed to calculate either page count or size')
| ## Please follow the guide below
- You will be asked some questions, please read them **carefully** and answer honestly
- Put an `x` into all the boxes [ ] relevant to your *pull request* (like that [x])
- Use *Preview* tab to see how your *pull request* will actually look like
---
### Before submitting a *pull request* make sure you have:
- [x] At least skimmed through [contributing guidelines](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#developer-instructions) including [yt-dlp coding conventions](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#yt-dlp-coding-conventions)
- [x] [Searched](https://github.com/yt-dlp/yt-dlp/search?q=is%3Apr&type=Issues) the bugtracker for similar pull requests
- [x] Checked the code with [flake8](https://pypi.python.org/pypi/flake8)
### In order to be accepted and merged into yt-dlp each piece of code must be in public domain or released under [Unlicense](http://unlicense.org/). Check one of the following options:
- [x] I am the original author of this code and I am willing to release it under [Unlicense](http://unlicense.org/)
- [ ] I am not the original author of this code but it is in public domain or released under [Unlicense](http://unlicense.org/) (provide reliable evidence)
### What is the purpose of your *pull request*?
- [ ] Bug fix
- [x] Improvement
- [ ] New extractor
- [ ] New feature
---
### Description of your *pull request* and other information
Closes #1898 Closes #1819 (hopefully)
| https://api.github.com/repos/yt-dlp/yt-dlp/pulls/1964 | 2021-12-11T18:04:22Z | 2022-01-30T18:51:23Z | 2022-01-30T18:51:22Z | 2022-01-30T18:51:23Z | 3,372 | yt-dlp/yt-dlp | 7,649 |
[ie/abematv] allow login on AbemaTVTitle | diff --git a/yt_dlp/extractor/abematv.py b/yt_dlp/extractor/abematv.py
index 0a610e3151f..6453dde9732 100644
--- a/yt_dlp/extractor/abematv.py
+++ b/yt_dlp/extractor/abematv.py
@@ -92,6 +92,8 @@ def abematv_license_open(self, url):
class AbemaTVBaseIE(InfoExtractor):
+ _NETRC_MACHINE = 'abematv'
+
_USERTOKEN = None
_DEVICE_ID = None
_MEDIATOKEN = None
@@ -184,6 +186,37 @@ def _get_media_token(self, invalidate=False, to_show=True):
return self._MEDIATOKEN
+ def _perform_login(self, username, password):
+ self._get_device_token()
+ if self.cache.load(self._NETRC_MACHINE, username, min_ver='2024.01.19') and self._get_media_token():
+ self.write_debug('Skipping logging in')
+ return
+
+ if '@' in username: # don't strictly check if it's email address or not
+ ep, method = 'user/email', 'email'
+ else:
+ ep, method = 'oneTimePassword', 'userId'
+
+ login_response = self._download_json(
+ f'https://api.abema.io/v1/auth/{ep}', None, note='Logging in',
+ data=json.dumps({
+ method: username,
+ 'password': password
+ }).encode('utf-8'), headers={
+ 'Authorization': f'bearer {self._get_device_token()}',
+ 'Origin': 'https://abema.tv',
+ 'Referer': 'https://abema.tv/',
+ 'Content-Type': 'application/json',
+ })
+
+ AbemaTVBaseIE._USERTOKEN = login_response['token']
+ self._get_media_token(True)
+ auth_cache = {
+ 'device_id': AbemaTVBaseIE._DEVICE_ID,
+ 'usertoken': AbemaTVBaseIE._USERTOKEN,
+ }
+ self.cache.store(self._NETRC_MACHINE, username, auth_cache)
+
def _call_api(self, endpoint, video_id, query=None, note='Downloading JSON metadata'):
return self._download_json(
f'https://api.abema.io/{endpoint}', video_id, query=query or {},
@@ -207,7 +240,6 @@ def _extract_breadcrumb_list(self, webpage, video_id):
class AbemaTVIE(AbemaTVBaseIE):
_VALID_URL = r'https?://abema\.tv/(?P<type>now-on-air|video/episode|channels/.+?/slots)/(?P<id>[^?/]+)'
- _NETRC_MACHINE = 'abematv'
_TESTS = [{
'url': 'https://abema.tv/video/episode/194-25_s2_p1',
'info_dict': {
@@ -256,37 +288,6 @@ class AbemaTVIE(AbemaTVBaseIE):
}]
_TIMETABLE = None
- def _perform_login(self, username, password):
- self._get_device_token()
- if self.cache.load(self._NETRC_MACHINE, username, min_ver='2024.01.19') and self._get_media_token():
- self.write_debug('Skipping logging in')
- return
-
- if '@' in username: # don't strictly check if it's email address or not
- ep, method = 'user/email', 'email'
- else:
- ep, method = 'oneTimePassword', 'userId'
-
- login_response = self._download_json(
- f'https://api.abema.io/v1/auth/{ep}', None, note='Logging in',
- data=json.dumps({
- method: username,
- 'password': password
- }).encode('utf-8'), headers={
- 'Authorization': f'bearer {self._get_device_token()}',
- 'Origin': 'https://abema.tv',
- 'Referer': 'https://abema.tv/',
- 'Content-Type': 'application/json',
- })
-
- AbemaTVBaseIE._USERTOKEN = login_response['token']
- self._get_media_token(True)
- auth_cache = {
- 'device_id': AbemaTVBaseIE._DEVICE_ID,
- 'usertoken': AbemaTVBaseIE._USERTOKEN,
- }
- self.cache.store(self._NETRC_MACHINE, username, auth_cache)
-
def _real_extract(self, url):
# starting download using infojson from this extractor is undefined behavior,
# and never be fixed in the future; you must trigger downloads by directly specifying URL.
| **IMPORTANT**: PRs without the template will be CLOSED
### Description of your *pull request* and other information
<!--
Explanation of your *pull request* in arbitrary form goes here. Please **make sure the description explains the purpose and effect** of your *pull request* and is worded well enough to be understood. Provide as much **context and examples** as possible
-->
This commit moves the `_NETRC_MACHINE` and `_perform_login` function to AbemaTVBaseIE which allows AbemaTVTitle to authenticate with the server to download premium-exclusive VODs in the whole series (title).
<details open><summary>Template</summary> <!-- OPEN is intentional -->
<!--
# PLEASE FOLLOW THE GUIDE BELOW
- You will be asked some questions, please read them **carefully** and answer honestly
- Put an `x` into all the boxes `[ ]` relevant to your *pull request* (like [x])
- Use *Preview* tab to see how your *pull request* will actually look like
-->
### Before submitting a *pull request* make sure you have:
- [x] At least skimmed through [contributing guidelines](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#developer-instructions) including [yt-dlp coding conventions](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#yt-dlp-coding-conventions)
- [x] [Searched](https://github.com/yt-dlp/yt-dlp/search?q=is%3Apr&type=Issues) the bugtracker for similar pull requests
- [x] Checked the code with [flake8](https://pypi.python.org/pypi/flake8) and [ran relevant tests](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#developer-instructions)
### In order to be accepted and merged into yt-dlp each piece of code must be in public domain or released under [Unlicense](http://unlicense.org/). Check all of the following options that apply:
- [x] I am the original author of this code and I am willing to release it under [Unlicense](http://unlicense.org/)
- [ ] I am not the original author of this code but it is in public domain or released under [Unlicense](http://unlicense.org/) (provide reliable evidence)
### What is the purpose of your *pull request*?
- [x] Fix or improvement to an extractor (Make sure to add/update tests)
- [ ] New extractor ([Piracy websites will not be accepted](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#is-the-website-primarily-used-for-piracy))
- [ ] Core bug fix/improvement
- [ ] New feature (It is strongly [recommended to open an issue first](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#adding-new-feature-or-making-overarching-changes))
</details>
| https://api.github.com/repos/yt-dlp/yt-dlp/pulls/8901 | 2024-01-01T06:10:39Z | 2024-01-19T09:50:17Z | 2024-01-19T09:50:17Z | 2024-01-19T09:50:17Z | 1,069 | yt-dlp/yt-dlp | 7,555 |
[requires.io] dependency update on master branch | diff --git a/setup.py b/setup.py
index ad792881e0..4f824ddf95 100644
--- a/setup.py
+++ b/setup.py
@@ -88,7 +88,7 @@
'dev': [
"flake8>=3.5, <3.6",
"Flask>=0.10.1, <0.13",
- "mypy>=0.550,<0.551",
+ "mypy>=0.560,<0.561",
"pytest-cov>=2.2.1, <3",
"pytest-faulthandler>=1.3.0, <2",
"pytest-timeout>=1.0.0, <2",
| https://api.github.com/repos/mitmproxy/mitmproxy/pulls/2682 | 2017-12-15T22:20:15Z | 2017-12-16T07:50:43Z | 2017-12-16T07:50:43Z | 2017-12-16T07:50:46Z | 157 | mitmproxy/mitmproxy | 28,391 | |
New extractor: live.philharmoniedeparis.fr | diff --git a/youtube_dl/extractor/__init__.py b/youtube_dl/extractor/__init__.py
index ab80fd5e0e5..641c45f4345 100644
--- a/youtube_dl/extractor/__init__.py
+++ b/youtube_dl/extractor/__init__.py
@@ -377,6 +377,7 @@
from .parliamentliveuk import ParliamentLiveUKIE
from .patreon import PatreonIE
from .pbs import PBSIE
+from .philharmoniedeparis import PhilharmonieDeParisIE
from .phoenix import PhoenixIE
from .photobucket import PhotobucketIE
from .planetaplay import PlanetaPlayIE
diff --git a/youtube_dl/extractor/philharmoniedeparis.py b/youtube_dl/extractor/philharmoniedeparis.py
new file mode 100644
index 00000000000..7fc2f32ab8c
--- /dev/null
+++ b/youtube_dl/extractor/philharmoniedeparis.py
@@ -0,0 +1,77 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from ..utils import (
+ determine_ext,
+ int_or_none,
+ parse_iso8601,
+ unified_strdate,
+)
+
+class PhilharmonieDeParisIE(InfoExtractor):
+ _VALID_URL = r'http://live\.philharmoniedeparis\.fr/concert/(?P<id>\d+)(?:/|\.html)'
+ _TESTS = [{
+ 'url': 'http://live.philharmoniedeparis.fr/concert/1032066.html',
+ 'info_dict': {
+ 'id': '1032066',
+ 'ext': 'mp4',
+ 'title': "Week-end Bach. Passion selon saint Jean. Akademie für alte Musik Berlin, Rias Kammerchor, René Jacobs",
+ 'upload_date': '20150404',
+ }
+ }]
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+
+ webpage = self._download_webpage(url, video_id)
+ fichier_nom = self._html_search_regex(r'\sflashvars\s*:\s*\{\s*fichier_nom\s*:\s*\'(.*?)\'\s*,', webpage, 'fichier_nom')
+
+ playlist = self._download_xml('http://live.philharmoniedeparis.fr' + fichier_nom, video_id)
+
+ concert = playlist.find('.//concert')
+
+ formats = []
+ info_dict = {
+ 'id': video_id,
+ 'title': concert.find('./titre').text,
+ 'formats': formats,
+ }
+
+ if concert.attrib.get('heure'):
+ info_dict['timestamp'] = parse_iso8601(('%s-%s-%s%s') % (
+ concert.attrib['date'][0:4],
+ concert.attrib['date'][4:6],
+ concert.attrib['date'][6:8],
+ concert.attrib['heure']
+ ))
+ else:
+ info_dict['upload_date'] = concert.attrib['date']
+
+ fichiers = concert.find('./fichiers')
+ for fichier in fichiers.findall('./fichier'):
+ # Sometimes <ficher>s have no attributes at all. Skip them.
+ if 'url' not in fichier.attrib:
+ continue
+
+ formats.append({
+ 'format_id': 'lq',
+ 'url': fichiers.attrib['serveurstream'],
+ 'ext': determine_ext(fichier.attrib['url']),
+ 'play_path': fichier.attrib['url'],
+ 'width': int_or_none(concert.attrib['largeur']),
+ 'height': int_or_none(concert.attrib['hauteur']),
+ 'quality': 1,
+ })
+
+ formats.append({
+ 'format_id': 'hq',
+ 'url': fichiers.attrib['serveurstream'],
+ 'ext': determine_ext(fichier.attrib['url_hd']),
+ 'play_path': fichier.attrib['url_hd'],
+ 'width': int_or_none(concert.attrib['largeur_hd']),
+ 'height': int_or_none(concert.attrib['hauteur_hd']),
+ 'quality': 2,
+ })
+
+ return info_dict
| This should resolve #5465.
| https://api.github.com/repos/ytdl-org/youtube-dl/pulls/5533 | 2015-04-26T12:24:16Z | 2015-04-26T21:37:09Z | 2015-04-26T21:37:08Z | 2015-04-27T11:25:22Z | 973 | ytdl-org/youtube-dl | 50,023 |
Some spelling corrections | diff --git a/README.md b/README.md
index dd0f87834..dee3dad80 100644
--- a/README.md
+++ b/README.md
@@ -284,13 +284,13 @@ long, literate-programming-style documentation generator.
## Database Drivers
-*Libraties for connecting and operating databases.*
+*Libraries for connecting and operating databases.*
* Relational Databases
* [mysql-python](http://sourceforge.net/projects/mysql-python/) - The MySQL database connector for Python.
* [mysql-connector-python](https://pypi.python.org/pypi/mysql-connector-python) - A pure Python MySQL driver from Oracle (in case you don't want or can't install system MySQL library)
* [oursql](https://pythonhosted.org/oursql/) - A better MySQL connector for Python with support for native prepared statements and BLOBs.
- * [psycopg2](http://initd.org/psycopg/) - The most popular PostgreSQL adapter for the Python.
+ * [psycopg2](http://initd.org/psycopg/) - The most popular PostgreSQL adapter for Python.
* NoSQL Databases
* [cassandra-python-driver](https://github.com/datastax/python-driver) - Python driver for Cassandra by Datastax.
* [pycassa](https://github.com/pycassa/pycassa) - Python Thrift driver for Cassandra.
@@ -340,7 +340,7 @@ long, literate-programming-style documentation generator.
* [Mezzanine](http://mezzanine.jupo.org/) - A powerful, consistent, and flexible content management platform.
* [Wagtail](http://wagtail.io/) - A Django content management system.
-* [django-oscar](http://oscarcommerce.com/) - An open-source ecommerce framework for Django.
+* [django-oscar](http://oscarcommerce.com/) - An open-source e-commerce framework for Django.
* [Quokka CMS](http://quokkaproject.org) - Flexible, extensible, small CMS powered by Flask and MongoDB.
* [Opps CMS](http://oppsproject.org/) - A Django-based CMS for magazines, newspapers websites and portals with high-traffic.
* [Plone](http://plone.org/) - Content Management System built on top of the open source application server Zope and the accompanying Content Management Framework.
| Just some small spelling and grammatical improvements that I found while reading through.
| https://api.github.com/repos/vinta/awesome-python/pulls/130 | 2014-07-10T18:42:48Z | 2014-07-10T18:57:51Z | 2014-07-10T18:57:51Z | 2014-07-10T18:57:51Z | 513 | vinta/awesome-python | 27,242 |
rename bool to str2bool | diff --git a/tools/infer/utility.py b/tools/infer/utility.py
index c8e8d5b334..2f1803c089 100755
--- a/tools/infer/utility.py
+++ b/tools/infer/utility.py
@@ -51,7 +51,7 @@ def init_args():
parser.add_argument("--det_db_box_thresh", type=float, default=0.6)
parser.add_argument("--det_db_unclip_ratio", type=float, default=1.5)
parser.add_argument("--max_batch_size", type=int, default=10)
- parser.add_argument("--use_dilation", type=bool, default=False)
+ parser.add_argument("--use_dilation", type=str2bool, default=False)
parser.add_argument("--det_db_score_mode", type=str, default="fast")
# EAST parmas
parser.add_argument("--det_east_score_thresh", type=float, default=0.8)
@@ -61,7 +61,7 @@ def init_args():
# SAST parmas
parser.add_argument("--det_sast_score_thresh", type=float, default=0.5)
parser.add_argument("--det_sast_nms_thresh", type=float, default=0.2)
- parser.add_argument("--det_sast_polygon", type=bool, default=False)
+ parser.add_argument("--det_sast_polygon", type=str2bool, default=False)
# params for text recognizer
parser.add_argument("--rec_algorithm", type=str, default='CRNN')
@@ -90,7 +90,7 @@ def init_args():
parser.add_argument(
"--e2e_char_dict_path", type=str, default="./ppocr/utils/ic15_dict.txt")
parser.add_argument("--e2e_pgnet_valid_set", type=str, default='totaltext')
- parser.add_argument("--e2e_pgnet_polygon", type=bool, default=True)
+ parser.add_argument("--e2e_pgnet_polygon", type=str2bool, default=True)
parser.add_argument("--e2e_pgnet_mode", type=str, default='fast')
# params for text classifier
@@ -111,7 +111,7 @@ def init_args():
parser.add_argument("--total_process_num", type=int, default=1)
parser.add_argument("--process_id", type=int, default=0)
- parser.add_argument("--benchmark", type=bool, default=False)
+ parser.add_argument("--benchmark", type=str2bool, default=False)
parser.add_argument("--save_log_path", type=str, default="./log_output/")
parser.add_argument("--show_log", type=str2bool, default=True)
| https://api.github.com/repos/PaddlePaddle/PaddleOCR/pulls/3585 | 2021-08-07T07:48:17Z | 2021-08-09T04:05:08Z | 2021-08-09T04:05:08Z | 2021-11-13T03:30:26Z | 560 | PaddlePaddle/PaddleOCR | 42,453 | |
Update Lexicon requirements to stabilize certbot-dns-ovh behavior | diff --git a/certbot-ci/certbot_integration_tests/utils/misc.py b/certbot-ci/certbot_integration_tests/utils/misc.py
index 8260ccf5e58..dfeda4cda5b 100644
--- a/certbot-ci/certbot_integration_tests/utils/misc.py
+++ b/certbot-ci/certbot_integration_tests/utils/misc.py
@@ -52,10 +52,10 @@ def _suppress_x509_verification_warnings() -> None:
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
except ImportError:
# Handle old versions of request with vendorized urllib3
- # pylint: disable=no-member
- from requests.packages.urllib3.exceptions import InsecureRequestWarning
- requests.packages.urllib3.disable_warnings( # type: ignore[attr-defined]
- InsecureRequestWarning)
+ # pylint: disable=no-member,line-too-long
+ from requests.packages.urllib3.exceptions import InsecureRequestWarning # type: ignore[import-untyped]
+ requests.packages.urllib3.disable_warnings(InsecureRequestWarning) # type: ignore[attr-defined]
+ # pylint: enable=no-member,line-too-long
def check_until_timeout(url: str, attempts: int = 30) -> None:
diff --git a/certbot-ci/setup.py b/certbot-ci/setup.py
index 2cb8308c115..81feeb7cfef 100644
--- a/certbot-ci/setup.py
+++ b/certbot-ci/setup.py
@@ -21,7 +21,7 @@
'pytz>=2019.3',
'requests',
'setuptools',
- 'types-python-dateutil'
+ 'types-python-dateutil',
]
setup(
diff --git a/certbot-dns-ovh/setup.py b/certbot-dns-ovh/setup.py
index e4c615f3f0e..e3fe41d8287 100644
--- a/certbot-dns-ovh/setup.py
+++ b/certbot-dns-ovh/setup.py
@@ -7,7 +7,7 @@
version = '2.8.0.dev0'
install_requires = [
- 'dns-lexicon>=3.14.1',
+ 'dns-lexicon>=3.15.1',
'setuptools>=41.6.0',
]
diff --git a/certbot/CHANGELOG.md b/certbot/CHANGELOG.md
index 92ceba3fb3b..900948fc635 100644
--- a/certbot/CHANGELOG.md
+++ b/certbot/CHANGELOG.md
@@ -14,6 +14,7 @@ Certbot adheres to [Semantic Versioning](https://semver.org/).
### Fixed
+* `certbot-dns-ovh` plugin now requires `lexicon>=3.15.1` to ensure a consistent behavior with OVH APIs.
* Fixed a bug where argument sources weren't correctly detected in abbreviated
arguments, short arguments, and some other circumstances
diff --git a/tools/oldest_constraints.txt b/tools/oldest_constraints.txt
index d667873d48c..2f09901a159 100644
--- a/tools/oldest_constraints.txt
+++ b/tools/oldest_constraints.txt
@@ -20,7 +20,7 @@ cython==0.29.36 ; python_version >= "3.8" and python_version < "3.9"
dill==0.3.7 ; python_version >= "3.8" and python_version < "3.9"
distlib==0.3.7 ; python_version >= "3.8" and python_version < "3.9"
distro==1.0.1 ; python_version >= "3.8" and python_version < "3.9"
-dns-lexicon==3.14.1 ; python_version >= "3.8" and python_version < "3.9"
+dns-lexicon==3.15.1 ; python_version >= "3.8" and python_version < "3.9"
dnspython==1.15.0 ; python_version >= "3.8" and python_version < "3.9"
exceptiongroup==1.1.3 ; python_version >= "3.8" and python_version < "3.9"
execnet==2.0.2 ; python_version >= "3.8" and python_version < "3.9"
@@ -41,7 +41,7 @@ josepy==1.13.0 ; python_version >= "3.8" and python_version < "3.9"
logger==1.4 ; python_version >= "3.8" and python_version < "3.9"
mccabe==0.7.0 ; python_version >= "3.8" and python_version < "3.9"
mypy-extensions==1.0.0 ; python_version >= "3.8" and python_version < "3.9"
-mypy==1.5.1 ; python_version >= "3.8" and python_version < "3.9"
+mypy==1.6.0 ; python_version >= "3.8" and python_version < "3.9"
ndg-httpsclient==0.3.2 ; python_version >= "3.8" and python_version < "3.9"
oauth2client==4.1.3 ; python_version >= "3.8" and python_version < "3.9"
packaging==23.2 ; python_version >= "3.8" and python_version < "3.9"
@@ -57,6 +57,7 @@ pyasn1==0.4.8 ; python_version >= "3.8" and python_version < "3.9"
pycparser==2.14 ; python_version >= "3.8" and python_version < "3.9"
pylint==3.0.1 ; python_version >= "3.8" and python_version < "3.9"
pyopenssl==17.5.0 ; python_version >= "3.8" and python_version < "3.9"
+pyotp==2.9.0 ; python_version >= "3.8" and python_version < "3.9"
pyparsing==2.2.1 ; python_version >= "3.8" and python_version < "3.9"
pyrfc3339==1.0 ; python_version >= "3.8" and python_version < "3.9"
pytest-cov==4.1.0 ; python_version >= "3.8" and python_version < "3.9"
@@ -75,7 +76,7 @@ s3transfer==0.3.7 ; python_version >= "3.8" and python_version < "3.9"
setuptools==41.6.0 ; python_version >= "3.8" and python_version < "3.9"
six==1.11.0 ; python_version >= "3.8" and python_version < "3.9"
soupsieve==2.5 ; python_version >= "3.8" and python_version < "3.9"
-tldextract==3.6.0 ; python_version >= "3.8" and python_version < "3.9"
+tldextract==5.0.0 ; python_version >= "3.8" and python_version < "3.9"
tomli==2.0.1 ; python_version >= "3.8" and python_version < "3.9"
tomlkit==0.12.1 ; python_version >= "3.8" and python_version < "3.9"
tox==1.9.2 ; python_version >= "3.8" and python_version < "3.9"
diff --git a/tools/pinning/oldest/pyproject.toml b/tools/pinning/oldest/pyproject.toml
index 302014ae595..1a4d7d71f38 100644
--- a/tools/pinning/oldest/pyproject.toml
+++ b/tools/pinning/oldest/pyproject.toml
@@ -54,7 +54,7 @@ cloudflare = "1.5.1"
configobj = "5.0.6"
cryptography = "3.2.1"
distro = "1.0.1"
-dns-lexicon = "3.14.1"
+dns-lexicon = "3.15.1"
dnspython = "1.15.0"
funcsigs = "0.4"
google-api-python-client = "1.6.5"
diff --git a/tools/requirements.txt b/tools/requirements.txt
index af906a4ba21..1ed0ca0ee78 100644
--- a/tools/requirements.txt
+++ b/tools/requirements.txt
@@ -17,8 +17,8 @@ babel==2.13.0 ; python_version >= "3.8" and python_version < "4.0"
backcall==0.2.0 ; python_version >= "3.8" and python_version < "4.0"
bcrypt==4.0.1 ; python_version >= "3.8" and python_version < "4.0"
beautifulsoup4==4.12.2 ; python_version >= "3.8" and python_version < "4.0"
-boto3==1.28.62 ; python_version >= "3.8" and python_version < "4.0"
-botocore==1.31.62 ; python_version >= "3.8" and python_version < "4.0"
+boto3==1.28.63 ; python_version >= "3.8" and python_version < "4.0"
+botocore==1.31.63 ; python_version >= "3.8" and python_version < "4.0"
cachecontrol==0.12.14 ; python_version >= "3.8" and python_version < "4.0"
cachetools==5.3.1 ; python_version >= "3.8" and python_version < "4.0"
cachy==0.3.0 ; python_version >= "3.8" and python_version < "4.0"
@@ -39,7 +39,7 @@ deprecated==1.2.14 ; python_version >= "3.8" and python_version < "4.0"
dill==0.3.7 ; python_version >= "3.8" and python_version < "4.0"
distlib==0.3.7 ; python_version >= "3.8" and python_version < "4.0"
distro==1.8.0 ; python_version >= "3.8" and python_version < "4.0"
-dns-lexicon==3.15.0 ; python_version >= "3.8" and python_version < "4.0"
+dns-lexicon==3.15.1 ; python_version >= "3.8" and python_version < "4.0"
dnspython==2.4.2 ; python_version >= "3.8" and python_version < "4.0"
docutils==0.18.1 ; python_version >= "3.8" and python_version < "4.0"
dulwich==0.20.50 ; python_version >= "3.8" and python_version < "4.0"
@@ -49,10 +49,10 @@ executing==2.0.0 ; python_version >= "3.8" and python_version < "4.0"
fabric==3.2.2 ; python_version >= "3.8" and python_version < "4.0"
filelock==3.12.4 ; python_version >= "3.8" and python_version < "4.0"
google-api-core==2.12.0 ; python_version >= "3.8" and python_version < "4.0"
-google-api-python-client==2.102.0 ; python_version >= "3.8" and python_version < "4.0"
+google-api-python-client==2.103.0 ; python_version >= "3.8" and python_version < "4.0"
google-auth-httplib2==0.1.1 ; python_version >= "3.8" and python_version < "4.0"
google-auth==2.23.3 ; python_version >= "3.8" and python_version < "4.0"
-googleapis-common-protos==1.60.0 ; python_version >= "3.8" and python_version < "4.0"
+googleapis-common-protos==1.61.0 ; python_version >= "3.8" and python_version < "4.0"
html5lib==1.1 ; python_version >= "3.8" and python_version < "4.0"
httplib2==0.22.0 ; python_version >= "3.8" and python_version < "4.0"
idna==3.4 ; python_version >= "3.8" and python_version < "4.0"
@@ -87,7 +87,7 @@ more-itertools==10.1.0 ; python_version >= "3.8" and python_version < "4.0"
msgpack==1.0.7 ; python_version >= "3.8" and python_version < "4.0"
msrest==0.7.1 ; python_version >= "3.8" and python_version < "4.0"
mypy-extensions==1.0.0 ; python_version >= "3.8" and python_version < "4.0"
-mypy==1.5.1 ; python_version >= "3.8" and python_version < "4.0"
+mypy==1.6.0 ; python_version >= "3.8" and python_version < "4.0"
nh3==0.2.14 ; python_version >= "3.8" and python_version < "4.0"
oauthlib==3.2.2 ; python_version >= "3.8" and python_version < "4.0"
packaging==23.2 ; python_version >= "3.8" and python_version < "4.0"
@@ -141,7 +141,7 @@ requests-toolbelt==0.9.1 ; python_version >= "3.8" and python_version < "4.0"
requests==2.31.0 ; python_version >= "3.8" and python_version < "4.0"
rfc3986==2.0.0 ; python_version >= "3.8" and python_version < "4.0"
rich==13.6.0 ; python_version >= "3.8" and python_version < "4.0"
-rpds-py==0.10.4 ; python_version >= "3.8" and python_version < "4.0"
+rpds-py==0.10.6 ; python_version >= "3.8" and python_version < "4.0"
rsa==4.9 ; python_version >= "3.8" and python_version < "4"
s3transfer==0.7.0 ; python_version >= "3.8" and python_version < "4.0"
secretstorage==3.3.3 ; python_version >= "3.8" and python_version < "4.0" and sys_platform == "linux"
@@ -162,7 +162,7 @@ sphinxcontrib-jsmath==1.0.1 ; python_version >= "3.8" and python_version < "4.0"
sphinxcontrib-qthelp==1.0.3 ; python_version >= "3.8" and python_version < "4.0"
sphinxcontrib-serializinghtml==1.1.5 ; python_version >= "3.8" and python_version < "4.0"
stack-data==0.6.3 ; python_version >= "3.8" and python_version < "4.0"
-tldextract==3.6.0 ; python_version >= "3.8" and python_version < "4.0"
+tldextract==5.0.0 ; python_version >= "3.8" and python_version < "4.0"
tomli==2.0.1 ; python_version >= "3.8" and python_full_version <= "3.11.0a6"
tomlkit==0.12.1 ; python_version >= "3.8" and python_version < "4.0"
tox==3.28.0 ; python_version >= "3.8" and python_version < "4.0"
| Fixes #9799
You can see in the issue the reasoning to raise the minimum version of Lexicon to 3.15.1 for certbot-dns-ovh.
| https://api.github.com/repos/certbot/certbot/pulls/9802 | 2023-10-13T16:06:06Z | 2023-10-18T20:19:26Z | 2023-10-18T20:19:26Z | 2023-10-19T19:00:06Z | 3,583 | certbot/certbot | 3,584 |
add heap_sort.py | diff --git a/heap_sort.py b/heap_sort.py
new file mode 100644
index 0000000000..e92e5aafaa
--- /dev/null
+++ b/heap_sort.py
@@ -0,0 +1,47 @@
+# This program is a comparison based sorting technique.
+# It is similar to selection sort in the sense that it first identifies the maximum element,
+# and places it at the end. We repeat the process until the list is sorted.
+# The sort algorithm has a time complexity of O(nlogn)
+
+def refineHeap(arr, n, i):
+ # Initialize the largest entry as the root of the heap
+ largest = i
+ left = 2 * i + 1
+ right = 2 * i + 2
+
+ # If the left child exists and it is larger than largest, replace it
+ if left < n and arr[largest] < arr[left]:
+ largest = left
+
+ # Perform the same operation for the right hand side of the heap
+ if right < n and arr[largest] < arr[right]:
+ largest = right
+
+ # Change root if the largest value changed
+ if largest != i:
+ arr[i], arr[largest] = arr[largest], arr[i]
+
+ # Repeat the process until the heap is fully defined
+ refineHeap(arr, n, largest)
+
+
+# Main function
+def heapSort(arr):
+ n = len(arr)
+
+ # Make a heap
+ for i in range(n//2 - 1, -1, -1):
+ refineHeap(arr, n, i)
+
+ # Extract elements individually
+ for i in range(n - 1, 0, -1):
+ # Fancy notation for swapping two values in an array
+ arr[i], arr[0] = arr[0], arr[i]
+ refineHeap(arr, i, 0)
+
+# Code that will run on start
+arr = [15, 29, 9, 3, 16, 7, 66, 4]
+print("Unsorted Array: ", arr)
+heapSort(arr)
+n = len(arr)
+print("Sorted array: ", arr)
\ No newline at end of file
| https://api.github.com/repos/geekcomputers/Python/pulls/1306 | 2021-03-29T19:18:15Z | 2021-04-02T13:09:05Z | 2021-04-02T13:09:05Z | 2021-04-02T13:09:05Z | 507 | geekcomputers/Python | 31,372 | |
fix typo | diff --git a/README.md b/README.md
index e69822eb3..52363d79f 100644
--- a/README.md
+++ b/README.md
@@ -154,7 +154,7 @@ Part-of-Speech tagging).
These models are all released under the same license as the source code (Apache
2.0).
-The links to the models are here (right-cick, 'Save link as...' on the name):
+The links to the models are here (right-click, 'Save link as...' on the name):
* **[`BERT-Base, Uncased`](https://storage.googleapis.com/bert_models/2018_10_18/uncased_L-12_H-768_A-12.zip)**:
12-layer, 768-hidden, 12-heads, 110M parameters
| https://api.github.com/repos/google-research/bert/pulls/31 | 2018-11-02T04:23:09Z | 2018-11-02T04:27:00Z | 2018-11-02T04:27:00Z | 2018-11-02T04:27:00Z | 182 | google-research/bert | 38,439 | |
rm dyg shape for trt | diff --git a/tools/infer/utility.py b/tools/infer/utility.py
index 55ec0a238a..2a932df7c9 100644
--- a/tools/infer/utility.py
+++ b/tools/infer/utility.py
@@ -229,89 +229,10 @@ def create_predictor(args, mode, logger):
)
config.enable_tuned_tensorrt_dynamic_shape(
args.shape_info_filename, True)
-
- use_dynamic_shape = True
- if mode == "det":
- min_input_shape = {
- "x": [1, 3, 50, 50],
- "conv2d_92.tmp_0": [1, 120, 20, 20],
- "conv2d_91.tmp_0": [1, 24, 10, 10],
- "conv2d_59.tmp_0": [1, 96, 20, 20],
- "nearest_interp_v2_1.tmp_0": [1, 256, 10, 10],
- "nearest_interp_v2_2.tmp_0": [1, 256, 20, 20],
- "conv2d_124.tmp_0": [1, 256, 20, 20],
- "nearest_interp_v2_3.tmp_0": [1, 64, 20, 20],
- "nearest_interp_v2_4.tmp_0": [1, 64, 20, 20],
- "nearest_interp_v2_5.tmp_0": [1, 64, 20, 20],
- "elementwise_add_7": [1, 56, 2, 2],
- "nearest_interp_v2_0.tmp_0": [1, 256, 2, 2]
- }
- max_input_shape = {
- "x": [1, 3, 1536, 1536],
- "conv2d_92.tmp_0": [1, 120, 400, 400],
- "conv2d_91.tmp_0": [1, 24, 200, 200],
- "conv2d_59.tmp_0": [1, 96, 400, 400],
- "nearest_interp_v2_1.tmp_0": [1, 256, 200, 200],
- "conv2d_124.tmp_0": [1, 256, 400, 400],
- "nearest_interp_v2_2.tmp_0": [1, 256, 400, 400],
- "nearest_interp_v2_3.tmp_0": [1, 64, 400, 400],
- "nearest_interp_v2_4.tmp_0": [1, 64, 400, 400],
- "nearest_interp_v2_5.tmp_0": [1, 64, 400, 400],
- "elementwise_add_7": [1, 56, 400, 400],
- "nearest_interp_v2_0.tmp_0": [1, 256, 400, 400]
- }
- opt_input_shape = {
- "x": [1, 3, 640, 640],
- "conv2d_92.tmp_0": [1, 120, 160, 160],
- "conv2d_91.tmp_0": [1, 24, 80, 80],
- "conv2d_59.tmp_0": [1, 96, 160, 160],
- "nearest_interp_v2_1.tmp_0": [1, 256, 80, 80],
- "nearest_interp_v2_2.tmp_0": [1, 256, 160, 160],
- "conv2d_124.tmp_0": [1, 256, 160, 160],
- "nearest_interp_v2_3.tmp_0": [1, 64, 160, 160],
- "nearest_interp_v2_4.tmp_0": [1, 64, 160, 160],
- "nearest_interp_v2_5.tmp_0": [1, 64, 160, 160],
- "elementwise_add_7": [1, 56, 40, 40],
- "nearest_interp_v2_0.tmp_0": [1, 256, 40, 40]
- }
- min_pact_shape = {
- "nearest_interp_v2_26.tmp_0": [1, 256, 20, 20],
- "nearest_interp_v2_27.tmp_0": [1, 64, 20, 20],
- "nearest_interp_v2_28.tmp_0": [1, 64, 20, 20],
- "nearest_interp_v2_29.tmp_0": [1, 64, 20, 20]
- }
- max_pact_shape = {
- "nearest_interp_v2_26.tmp_0": [1, 256, 400, 400],
- "nearest_interp_v2_27.tmp_0": [1, 64, 400, 400],
- "nearest_interp_v2_28.tmp_0": [1, 64, 400, 400],
- "nearest_interp_v2_29.tmp_0": [1, 64, 400, 400]
- }
- opt_pact_shape = {
- "nearest_interp_v2_26.tmp_0": [1, 256, 160, 160],
- "nearest_interp_v2_27.tmp_0": [1, 64, 160, 160],
- "nearest_interp_v2_28.tmp_0": [1, 64, 160, 160],
- "nearest_interp_v2_29.tmp_0": [1, 64, 160, 160]
- }
- min_input_shape.update(min_pact_shape)
- max_input_shape.update(max_pact_shape)
- opt_input_shape.update(opt_pact_shape)
- elif mode == "rec":
- if args.rec_algorithm not in ["CRNN", "SVTR_LCNet"]:
- use_dynamic_shape = False
- imgH = int(args.rec_image_shape.split(',')[-2])
- min_input_shape = {"x": [1, 3, imgH, 10]}
- max_input_shape = {"x": [args.rec_batch_num, 3, imgH, 2304]}
- opt_input_shape = {"x": [args.rec_batch_num, 3, imgH, 320]}
- config.exp_disable_tensorrt_ops(["transpose2"])
- elif mode == "cls":
- min_input_shape = {"x": [1, 3, 48, 10]}
- max_input_shape = {"x": [args.rec_batch_num, 3, 48, 1024]}
- opt_input_shape = {"x": [args.rec_batch_num, 3, 48, 320]}
else:
- use_dynamic_shape = False
- if use_dynamic_shape:
- config.set_trt_dynamic_shape_info(
- min_input_shape, max_input_shape, opt_input_shape)
+ logger.info(
+ f"when using tensorrt, dynamic shape is a suggested option, you can use '--shape_info_filename=shape.txt' for offline dygnamic shape tuning"
+ )
elif args.use_xpu:
config.enable_xpu(10 * 1024 * 1024)
| att
Almost no diff using collecting shape
* GPU
* [2022/08/17 15:19:19] ppocr INFO: Predicts of ./train_data/test_eval/200_crop/310_crop_160_16.jpg:('出发地陶克苏', 0.798431932926178)
* GPU+trt + delete pass
* [2022/08/17 15:20:21] ppocr INFO: Predicts of ./train_data/test_eval/200_crop/310_crop_160_16.jpg:('出发地陶克苏', 0.7984303832054138)
* GPU + trt + collect shape
* [2022/08/17 15:21:48] ppocr INFO: Predicts of ./train_data/test_eval/200_crop/310_crop_160_16.jpg:('出发地陶克苏', 0.7984321713447571) | https://api.github.com/repos/PaddlePaddle/PaddleOCR/pulls/7221 | 2022-08-17T03:06:25Z | 2022-08-17T15:24:47Z | 2022-08-17T15:24:47Z | 2022-08-17T15:24:47Z | 1,679 | PaddlePaddle/PaddleOCR | 42,759 |
Grids saving to subdirectory when setting unchecked | diff --git a/modules/processing.py b/modules/processing.py
index 0246e094814..3abf31813fa 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -406,7 +406,7 @@ def infotext(iteration=0, position_in_batch=0):
index_of_first_image = 1
if opts.grid_save:
- images.save_image(grid, p.outpath_grids, "grid", all_seeds[0], all_prompts[0], opts.grid_format, info=infotext(), short_filename=not opts.grid_extended_filename, p=p)
+ images.save_image(grid, p.outpath_grids, "grid", all_seeds[0], all_prompts[0], opts.grid_format, info=infotext(), short_filename=not opts.grid_extended_filename, p=p, grid=True)
devices.torch_gc()
return Processed(p, output_images, all_seeds[0], infotext(), subseed=all_subseeds[0], all_prompts=all_prompts, all_seeds=all_seeds, all_subseeds=all_subseeds, index_of_first_image=index_of_first_image)
| #1086
Grids currently save to a subdirectory when the "Save images to subdirectory" setting is checked but the "Save grids to subdirectory" setting isn't. This change passes along the existing grid flag to resolve the issue. | https://api.github.com/repos/AUTOMATIC1111/stable-diffusion-webui/pulls/1090 | 2022-09-26T09:40:17Z | 2022-09-27T05:58:47Z | 2022-09-27T05:58:47Z | 2022-09-27T05:58:47Z | 258 | AUTOMATIC1111/stable-diffusion-webui | 40,142 |
[stable-2.13] ansible-test - Specify config path in plugin error | diff --git a/changelogs/fragments/ansible-test-test-plugin-error-message.yml b/changelogs/fragments/ansible-test-test-plugin-error-message.yml
new file mode 100644
index 00000000000000..f510425a13d89e
--- /dev/null
+++ b/changelogs/fragments/ansible-test-test-plugin-error-message.yml
@@ -0,0 +1,3 @@
+minor_changes:
+ - "ansible-test - Specify the configuration file location required by test plugins when the config file is not found.
+ This resolves issue: https://github.com/ansible/ansible/issues/79411"
diff --git a/test/lib/ansible_test/_internal/commands/integration/cloud/__init__.py b/test/lib/ansible_test/_internal/commands/integration/cloud/__init__.py
index 5afde048b31ebd..3ca81719475664 100644
--- a/test/lib/ansible_test/_internal/commands/integration/cloud/__init__.py
+++ b/test/lib/ansible_test/_internal/commands/integration/cloud/__init__.py
@@ -288,14 +288,14 @@ def filter(self, targets, exclude): # type: (t.Tuple[IntegrationTarget, ...], t
exclude.append(skip)
if not self.uses_docker and self.uses_config:
- display.warning('Excluding tests marked "%s" which require config (see "%s"): %s'
- % (skip.rstrip('/'), self.config_template_path, ', '.join(skipped)))
+ display.warning('Excluding tests marked "%s" which require a "%s" config file (see "%s"): %s'
+ % (skip.rstrip('/'), self.config_static_path, self.config_template_path, ', '.join(skipped)))
elif self.uses_docker and not self.uses_config:
display.warning('Excluding tests marked "%s" which requires container support: %s'
% (skip.rstrip('/'), ', '.join(skipped)))
elif self.uses_docker and self.uses_config:
- display.warning('Excluding tests marked "%s" which requires container support or config (see "%s"): %s'
- % (skip.rstrip('/'), self.config_template_path, ', '.join(skipped)))
+ display.warning('Excluding tests marked "%s" which requires container support or a "%s" config file (see "%s"): %s'
+ % (skip.rstrip('/'), self.config_static_path, self.config_template_path, ', '.join(skipped)))
def setup(self): # type: () -> None
"""Setup the cloud resource before delegation and register a cleanup callback."""
| ##### SUMMARY
Backport of https://github.com/ansible/ansible/pull/79881
(cherry picked from commit d48d1c23df171074e799717e824a8c5ace470643)
##### ISSUE TYPE
Feature Pull Request
##### COMPONENT NAME
ansible-test
| https://api.github.com/repos/ansible/ansible/pulls/79978 | 2023-02-10T21:26:17Z | 2023-02-10T22:56:54Z | 2023-02-10T22:56:54Z | 2023-02-17T14:00:12Z | 570 | ansible/ansible | 49,455 |
ANSWERED Network Related | diff --git a/README.md b/README.md
index 30dd8ba08..c74f575c6 100644
--- a/README.md
+++ b/README.md
@@ -30,7 +30,7 @@
<tr>
<td align="center"><a href="#coding"><img src="images/coding.png" width="75px;" height="75px;" alt="coding"/><br /><b>Coding</b></a><br /><sub><a href="#coding-beginner">Beginner :baby:</a></sub><br><sub><a href="#coding-advanced">Advanced :star:</a></sub></td>
<td align="center"><a href="#python"><img src="images/python.png" width="80px;" height="75px;" alt="Python"/><br /><b>Python</b></a><br /><sub><a href="#python-beginner">Beginner :baby:</a></sub><br><sub><a href="#python-advanced">Advanced :star:</a></sub></td>
- <td align="center"><a href="#go"><b>Go</b></a><br /><sub><a href="#go-beginner">Beginner :baby:</a></sub><br><sub></td>
+ <td align="center"><a href="#go"><img src="images/Go.png" width="75px;" height="75px;" alt="go"/><br /><b>Go</b></a><br /><sub><a href="#go-beginner">Beginner :baby:</a></sub><br><sub></td>
<td align="center"><a href="#shell-scripting"><img src="images/bash.png" width="70px;" height="75px;" alt="Bash"/><br /><b>Shell Scripting</b></a><br /><sub><a href="#shell-scripting-beginner">Beginner :baby:</a></sub><br><sub><a href="#shell-scripting-advanced">Advanced :star:</a></sub></td>
<td align="center"><a href="#kubernetes"><img src="images/kubernetes.png" width="75px;" height="75px;" alt="kubernetes"/><br /><b>Kubernetes</b></a><br /><sub><a href="#kubernetes-beginner">Beginner :baby:</a></sub><br><sub></td>
<td align="center"><a href="#prometheus"><img src="images/prometheus.png" width="75px;" height="75px;" alt="Prometheus"/><br /><b>Prometheus</b></a><br /><sub><a href="#prometheus-beginner">Beginner :baby:</a></sub><br><sub><a href="#prometheus-advanced">Advanced :star:</a></sub></td>
@@ -39,8 +39,8 @@
</tr>
<tr>
<td align="center"><a href="#cloud"><img src="images/cloud.png" width="110px;" height="75px;" alt="Cloud"/><br /><b>Cloud</b></a><br /><sub><a href="#cloud-beginner">Beginner :baby:</a></sub><br><sub></td>
- <td align="center"><a href="#aws"><b>AWS</b></a><br /><sub><a href="#aws-beginner">Beginner :baby:</a></sub><br><sub></td>
- <td align="center"><a href="#azure"><b>Azure</b></a><br /><sub><a href="#azure-beginner">Beginner :baby:</a></sub><br><sub></td>
+ <td align="center"><a href="#aws"><img src="images/aws.jpg" width="75px;" height="75px;" alt="aws"/><br /><b>AWS</b></a><br /><sub><a href="#aws-beginner">Beginner :baby:</a></sub><br><sub></td>
+ <td align="center"><a href="#azure"><img src="images/azure.png" width="75px;" height="75px;" alt="azure"/><br /><b>Azure</b></a><br /><sub><a href="#azure-beginner">Beginner :baby:</a></sub><br><sub></td>
<td align="center"><a href="#gcp"><b>Google Cloud Platform</b></a><br /><sub><a href="#gcp-beginner">Beginner :baby:</a></sub><br><sub></td>
<td align="center"><a href="#openstack"><img src="images/openstack.png" width="75px;" height="75px;" alt="openstack"/><br /><b>OpenStack</b></a><br /><sub><a href="#openstack-beginner">Beginner :baby:</a></sub><br><sub><a href="#openstack-advanced">Advanced :star:</a></sub></td>
<td align="center"><a href="#security"><img src="images/security.png" width="75px;" height="75px;" alt="security"/><br /><b>Security</b></a><br /><sub><a href="#security-beginner">Beginner :baby:</a></sub><br><sub><a href="#security-advanced">Advanced :star:</a></sub></td>
@@ -818,10 +818,19 @@ AWS CloudTrail
<details>
<summary>What is Ethernet?</summary><br><b>
+
+Ethernet simply refers to the most common type of Local Area Network (LAN) used today. A LAN—in contrast to a WAN (Wide Area Network), which spans a larger geographical area—is a connected network of computers in a small area, like your office, college campus, or even home.
</b></details>
<details>
<summary>What is a MAC address? What is it used for?</summary><br><b>
+
+A MAC address is a unique identification number or code used to identify individual devices on the network.
+
+Packets that are sent on the ethernet are always coming from a MAC address and sent to a MAC address. If a network adapter is receiving a packet, it is comparing the packet’s destination MAC address to the adapter’s own MAC address.
+
+
+
</b></details>
<details>
@@ -830,10 +839,41 @@ AWS CloudTrail
<details>
<summary>What is an IP address?</summary><br><b>
+
+An Internet Protocol address (IP address) is a numerical label assigned to each device connected to a computer network that uses the Internet Protocol for communication.An IP address serves two main functions: host or network interface identification and location addressing.
+
+
</b></details>
<details>
<summary>Explain subnet mask and given an example</summary><br><b>
+
+A Subnet mask is a 32-bit number that masks an IP address, and divides the IP address into network address and host address. Subnet Mask is made by setting network bits to all "1"s and setting host bits to all "0"s. Within a given network, two host addresses are reserved for special purpose, and cannot be assigned to hosts. The "0" address is assigned a network address and "255" is assigned to a broadcast address, and they cannot be assigned to hosts.
+
+**For Example**
+
+```
+| Address Class | No of Network Bits | No of Host Bits | Subnet mask | CIDR notation |
+| ------------- | ------------------ | --------------- | --------------- | ------------- |
+| A | 8 | 24 | 255.0.0.0 | /8 |
+| A | 9 | 23 | 255.128.0.0 | /9 |
+| A | 12 | 20 | 255.240.0.0 | /12 |
+| A | 14 | 18 | 255.252.0.0 | /14 |
+| B | 16 | 16 | 255.255.0.0 | /16 |
+| B | 17 | 15 | 255.255.128.0 | /17 |
+| B | 20 | 12 | 255.255.240.0 | /20 |
+| B | 22 | 10 | 255.255.252.0 | /22 |
+| C | 24 | 8 | 255.255.255.0 | /24 |
+| C | 25 | 7 | 255.255.255.128 | /25 |
+| C | 28 | 4 | 255.255.255.240 | /28 |
+| C | 30 | 2 | 255.255.255.252 | /30 |
+
+```
+
+###
+
+
+
</b></details>
<details>
@@ -843,13 +883,14 @@ AWS CloudTrail
<details>
<summary>Explain the OSI model. What layers there are? What each layer is responsible for?</summary><br><b>
-Application: user end (HTTP is here)
-Presentation: establishes context between application-layer entities (Encryption is here)
-Session: establishes, manages and terminates the connections
-Transport: transfers variable-length data sequences from a source to a destination host (TCP & UDP are here)
-Network: transfers datagrams from one network to another (IP is here)
-Data link: provides a link between two directly connected nodes (MAC is here)
-Physical: the electrical and physical spec the data connection (Bits are here)
+- Application: user end (HTTP is here)
+- Presentation: establishes context between application-layer entities (Encryption is here)
+- Session: establishes, manages and terminates the connections
+- Transport: transfers variable-length data sequences from a source to a destination host (TCP & UDP are here)
+- Network: transfers datagrams from one network to another (IP is here)
+- Data link: provides a link between two directly connected nodes (MAC is here)
+- Physical: the electrical and physical spec the data connection (Bits are here)
+
</b></details>
<details>
@@ -901,18 +942,44 @@ CSMA/CD algorithm:
<details>
<summary>How does a router works?</summary><br><b>
+
+A router is a physical or virtual appliance that passes information between two or more packet-switched computer networks. A router inspects a given data packet's destination Internet Protocol address (IP address), calculates the best way for it to reach its destination and then forwards it accordingly.
+
+
</b></details>
<details>
<summary>What is NAT?</summary><br><b>
+
+ Network Address Translation (NAT) is a process in which one or more local IP address is translated into one or more Global IP address and vice versa in order to provide Internet access to the local hosts.
+
+
</b></details>
<details>
<summary>What is a proxy? How it works? What do we need it for?</summary><br><b>
+
+A proxy server acts as a gateway between you and the internet. It’s an intermediary server separating end users from the websites they browse.
+
+If you’re using a proxy server, internet traffic flows through the proxy server on its way to the address you requested. The request then comes back through that same proxy server (there are exceptions to this rule), and then the proxy server forwards the data received from the website to you.
+
+roxy servers provide varying levels of functionality, security, and privacy depending on your use case, needs, or company policy.
+
+
</b></details>
<details>
<summary>What is TCP? How it works? What is the 3 way handshake?</summary><br><b>
+
+TCP 3-way handshake or three-way handshake is a process which is used in a TCP/IP network to make a connection between server and client.
+
+A three-way handshake is primarily used to create a TCP socket connection. It works when:
+
+- A client node sends a SYN data packet over an IP network to a server on the same or an external network. The objective of this packet is to ask/infer if the server is open for new connections.
+- The target server must have open ports that can accept and initiate new connections. When the server receives the SYN packet from the client node, it responds and returns a confirmation receipt – the ACK packet or SYN/ACK packet.
+- The client node receives the SYN/ACK from the server and responds with an ACK packet.
+
+
</b></details>
<details>
@@ -927,6 +994,10 @@ TCP establishes a connection between the client and the server to guarantee the
<details>
<summary>True or False? TCP is better than UDP</summary><br><b>
+
+False.
+UDP is faster, simpler and more efficient than TCP
+
</b></details>
<details>
@@ -935,10 +1006,20 @@ TCP establishes a connection between the client and the server to guarantee the
<details>
<summary>Explain "default gateway"</summary><br><b>
+
+A default gateway serves as an access point or IP router that a networked computer uses to send information to a computer in another network or the internet.
+
+
</b></details>
<details>
<summary>What is ARP? How it works?</summary><br><b>
+
+ARP stands for Address Resolution Protocol. When you try to ping an IP address on your local network, say 192.168.1.1, your system has to turn the IP address 192.168.1.1 into a MAC address. This involves using ARP to resolve the address, hence its name.
+
+Systems keep an ARP look-up table where they store information about what IP addresses are associated with what MAC addresses. When trying to send a packet to an IP address, the system will first consult this table to see if it already knows the MAC address. If there is a value cached, ARP is not used.
+
+
</b></details>
<details>
diff --git a/images/Go.png b/images/Go.png
new file mode 100644
index 000000000..7af77468c
Binary files /dev/null and b/images/Go.png differ
diff --git a/images/aws.jpg b/images/aws.jpg
new file mode 100644
index 000000000..aab2baa9a
Binary files /dev/null and b/images/aws.jpg differ
diff --git a/images/azure.png b/images/azure.png
new file mode 100644
index 000000000..ae2dc54e2
Binary files /dev/null and b/images/azure.png differ
| https://api.github.com/repos/bregman-arie/devops-exercises/pulls/72 | 2020-01-26T01:01:47Z | 2020-01-31T19:18:41Z | 2020-01-31T19:18:41Z | 2020-01-31T19:18:41Z | 3,181 | bregman-arie/devops-exercises | 17,662 | |
Put table back after we are done with it | diff --git a/blns.json b/blns.json
index a2ac16b..af5c192 100644
--- a/blns.json
+++ b/blns.json
@@ -11,6 +11,8 @@
"false",
"True",
"False",
+ "TRUE",
+ "FALSE",
"None",
"hasOwnProperty",
"\\",
@@ -99,6 +101,7 @@
"⁰⁴⁵",
"₀₁₂",
"⁰⁴⁵₀₁₂",
+ "ด้้้้้็็็็็้้้้้็็็็็้้้้้้้้็็็็็้้้้้็็็็็้้้้้้้้็็็็็้้้้้็็็็็้้้้้้้้็็็็็้้้้้็็็็ ด้้้้้็็็็็้้้้้็็็็็้้้้้้้้็็็็็้้้้้็็็็็้้้้้้้้็็็็็้้้้้็็็็็้้้้้้้้็็็็็้้้้้็็็็ ด้้้้้็็็็็้้้้้็็็็็้้้้้้้้็็็็็้้้้้็็็็็้้้้้้้้็็็็็้้้้้็็็็็้้้้้้้้็็็็็้้้้้็็็็",
"'",
"\"",
"''",
@@ -128,6 +131,7 @@
",。・:*:・゜’( ☻ ω ☻ )。・:*:・゜’",
"(╯°□°)╯︵ ┻━┻) ",
"(ノಥ益ಥ)ノ ┻━┻",
+ "┬─┬ノ( º _ ºノ)",
"( ͡° ͜ʖ ͡°)",
"😍",
"👩🏽",
@@ -147,7 +151,7 @@
"הָיְתָהtestالصفحات التّحول",
"﷽",
"ﷺ",
- "مُنَاقَشَةُ سُبُلِ اِسْتِخْدَامِ اللُّغَةِ فِي النُّظُمِ الْقَائِمَةِ وَفِيم يَخُصَّ التَّطْبِيقَاتُ الْحاسُوبِيَّةُ ",
+ "مُنَاقَشَةُ سُبُلِ اِسْتِخْدَامِ اللُّغَةِ فِي النُّظُمِ الْقَائِمَةِ وَفِيم يَخُصَّ التَّطْبِيقَاتُ الْحاسُوبِيَّةُ، ",
"",
" ",
"",
@@ -407,8 +411,8 @@
"<IMG SRC=\"javascript:alert('XSS')\"",
"<iframe src=http://ha.ckers.org/scriptlet.html <",
"\\\";alert('XSS');//",
- "<u oncopy=alert()> Copy me</u>",
- "<i onwheel=alert(1)> Scroll over me </i>",
+ "<u oncopy=alert()> Copy me</u>",
+ "<i onwheel=alert(1)> Scroll over me </i>",
"<plaintext>",
"http://a/%%30%30",
"1;DROP TABLE users",
@@ -444,6 +448,7 @@
"../../../../../../../../../../../etc/hosts",
"() { 0; }; touch /tmp/blns.shellshock1.fail;",
"() { _; } >_[$($())] { touch /tmp/blns.shellshock2.fail; }",
+ "<<< %s(un='%s') = %u",
"CON",
"PRN",
"AUX",
@@ -458,6 +463,7 @@
"COM2",
"COM3",
"COM4",
+ "DCC SEND STARTKEYLOGGER 0 0 0",
"Scunthorpe General Hospital",
"Penistone Community Church",
"Lightwater Country Park",
@@ -484,4 +490,4 @@
"But now...\u001b[20Cfor my greatest trick...\u001b[8m",
"The quic\b\b\b\b\b\bk brown fo\u0007\u0007\u0007\u0007\u0007\u0007\u0007\u0007\u0007\u0007\u0007x... [Beeeep]",
"Powerلُلُصّبُلُلصّبُررً ॣ ॣh ॣ ॣ冗"
-]
+]
\ No newline at end of file
diff --git a/blns.txt b/blns.txt
index 29a53f6..43d6768 100644
--- a/blns.txt
+++ b/blns.txt
@@ -168,6 +168,7 @@ __ロ(,_,*)
,。・:*:・゜’( ☻ ω ☻ )。・:*:・゜’
(╯°□°)╯︵ ┻━┻)
(ノಥ益ಥ)ノ ┻━┻
+┬─┬ノ( º _ ºノ)
( ͡° ͜ʖ ͡°)
# Emoji
| Apparently also update blns.json | https://api.github.com/repos/minimaxir/big-list-of-naughty-strings/pulls/108 | 2017-01-16T09:40:58Z | 2017-01-16T15:36:36Z | 2017-01-16T15:36:36Z | 2017-01-16T15:36:36Z | 1,229 | minimaxir/big-list-of-naughty-strings | 4,793 |
pygame | diff --git a/pygame.py b/pygame.py
new file mode 100644
index 0000000000..d0b620bbea
--- /dev/null
+++ b/pygame.py
@@ -0,0 +1,77 @@
+author-slayking1965
+"""
+This is a game very similar to stone paper scissor
+In this game :
+if computer chooses snake and user chooses water, the snake will drink water and computer wins.
+If computer chooses gun and user chooses water, the gun gets drown into water and user wins.
+And so on for other cases
+"""
+
+import random
+import time
+
+choices = {'S':'Snake','W':'Water','G':'Gun'}
+
+x = 0
+com_win = 0
+user_win = 0
+match_draw = 0
+
+print('Welcome to the Snake-Water-Gun Game\n')
+print('I am Mr. Computer, We will play this game 10 times')
+print('Whoever wins more matches will be the winner\n')
+
+while x < 10:
+ print(f'Game No. {x+1}')
+ for key, value in choices.items():
+ print(f'Choose {key} for {value}')
+
+ com_choice = random.choice(list(choices.keys())).lower()
+ user_choice = input('\n----->').lower()
+
+ if user_choice == 's' and com_choice == 'w':
+ com_win += 1
+
+ elif user_choice == 's' and com_choice == 'g':
+ com_win += 1
+
+ elif user_choice == 'w' and com_choice == 's':
+ user_win += 1
+
+ elif user_choice == 'g' and com_choice == 's':
+ user_win += 1
+
+ elif user_choice == 'g' and com_choice == 'w':
+ com_win += 1
+
+ elif user_choice == 'w' and com_choice == 'g':
+ user_win += 1
+
+ elif user_choice == com_choice:
+ match_draw += 1
+
+ else:
+ print('\n\nYou entered wrong !!!!!!')
+ x = 0
+ print('Restarting the game')
+ print('')
+ time.sleep(1)
+ continue
+
+ x += 1
+ print('\n')
+
+
+print('Here are final stats of the 10 matches : ')
+print(f'Mr. Computer won : {com_win} matches')
+print(f'You won : {user_win} matches')
+print(f'Matches Drawn : {match_draw}')
+
+if com_win > user_win:
+ print('\n-------Mr. Computer won-------')
+
+elif com_win < user_win:
+ print('\n-----------You won-----------')
+
+else:
+ print('\n----------Match Draw----------')
| plz merge it | https://api.github.com/repos/geekcomputers/Python/pulls/1079 | 2020-10-07T02:29:23Z | 2020-10-10T20:41:57Z | 2020-10-10T20:41:57Z | 2020-10-10T20:41:57Z | 643 | geekcomputers/Python | 31,518 |
Added netius as a WSGI server | diff --git a/README.md b/README.md
index dd0f87834..47691b50e 100644
--- a/README.md
+++ b/README.md
@@ -591,6 +591,7 @@ long, literate-programming-style documentation generator.
* [paste](http://pythonpaste.org/) - Multi-threaded, stable, tried and tested.
* [rocket](http://pypi.python.org/pypi/rocket) - Multi-threaded.
* [waitress](https://waitress.readthedocs.org/en/latest/) - Multi-threaded, poweres Pyramid.
+* [netius](https://github.com/hivesolutions/netius) - Asynchronous, very fast.
* [gunicorn](http://pypi.python.org/pypi/gunicorn) - Pre-forked, partly written in C.
* [fapws3](http://www.fapws.org/) - Asynchronous (network side only), written in C.
* [meinheld](http://pypi.python.org/pypi/meinheld) - Asynchronous, partly written in C.
| It's a stable non blocking async server in pure python. Works with both python 2 and 3.
| https://api.github.com/repos/vinta/awesome-python/pulls/123 | 2014-07-10T09:25:15Z | 2014-07-12T12:53:04Z | 2014-07-12T12:53:04Z | 2014-07-12T12:53:04Z | 231 | vinta/awesome-python | 27,199 |
Docker env setup related changes | diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile
index 85227dcc5c9..17b6f699a33 100644
--- a/.devcontainer/Dockerfile
+++ b/.devcontainer/Dockerfile
@@ -24,4 +24,6 @@ RUN echo "deb [arch=amd64] https://storage.googleapis.com/bazel-apt stable jdk1.
RUN apt update && apt install bazel -y
USER $USERNAME
-ENV PATH="/home/$USERNAME/.local/bin:${PATH}"
\ No newline at end of file
+ENV PATH="/home/$USERNAME/.local/bin:${PATH}"
+
+CMD ["/bin/bash"]
\ No newline at end of file
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index f8e3e54b3ca..e25f897bc65 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -90,7 +90,7 @@ In the container shell, you need to install the latest dependencies with the
following command.
```shell
-pip install -r /home/keras/requirements.txt
+pip install -r /home/keras/requirements.txt && pip uninstall keras-nightly -y
```
Now, the environment setup is complete. You are ready to run the tests.
| For those of us opting for the docker way of setting up the development environment but not using the `devcontainer.json` file, it would be useful to have this patch in place that includes the following changes:
1) Changes to the `Dockerfile` -- As this file doesn't have a `CMD` instruction defined, the image tends to inherit the instruction of the parent python image and hence takes us to the python interpreter after the launch of the container. It would be convenient to have the `CMD` instruction defined here itself to facilitate installing the packages and proceed from thereon rather than having to append it to the run command every time.
2) Changes to `CONTRIBUTING.md` -- Added uninstalling the keras-nightly package. | https://api.github.com/repos/keras-team/keras/pulls/16040 | 2022-02-10T10:45:32Z | 2022-02-27T00:07:40Z | 2022-02-27T00:07:40Z | 2022-03-09T11:23:22Z | 299 | keras-team/keras | 47,261 |
[doc] add tutorial for booster checkpoint | diff --git a/colossalai/booster/booster.py b/colossalai/booster/booster.py
index c14e602deaf5..6f2adaf03074 100644
--- a/colossalai/booster/booster.py
+++ b/colossalai/booster/booster.py
@@ -151,6 +151,16 @@ def no_sync(self, model: nn.Module) -> contextmanager:
return self.plugin.no_sync(model)
def load_model(self, model: nn.Module, checkpoint: str, strict: bool = True):
+ """Load model from checkpoint.
+
+ Args:
+ model (nn.Module): A model boosted by Booster.
+ checkpoint (str): Path to the checkpoint. It must be a local path.
+ It should be a directory path if the checkpoint is sharded. Otherwise, it should be a file path.
+ strict (bool, optional): whether to strictly enforce that the keys
+ in :attr:`state_dict` match the keys returned by this module's
+ :meth:`~torch.nn.Module.state_dict` function. Defaults to True.
+ """
self.checkpoint_io.load_model(model, checkpoint, strict)
def save_model(self,
@@ -159,16 +169,58 @@ def save_model(self,
prefix: str = None,
shard: bool = False,
size_per_shard: int = 1024):
+ """Save model to checkpoint.
+
+ Args:
+ model (nn.Module): A model boosted by Booster.
+ checkpoint (str): Path to the checkpoint. It must be a local path.
+ It is a file path if ``shard=False``. Otherwise, it is a directory path.
+ prefix (str, optional): A prefix added to parameter and buffer
+ names to compose the keys in state_dict. Defaults to None.
+ shard (bool, optional): Whether to save checkpoint a sharded way.
+ If true, the checkpoint will be a folder. Otherwise, it will be a single file. Defaults to False.
+ size_per_shard (int, optional): Maximum size of checkpoint shard file in MB. This is useful only when ``shard=True``. Defaults to 1024.
+ """
self.checkpoint_io.save_model(model, checkpoint, prefix, shard, size_per_shard)
def load_optimizer(self, optimizer: Optimizer, checkpoint: str):
+ """Load optimizer from checkpoint.
+
+ Args:
+ optimizer (Optimizer): An optimizer boosted by Booster.
+ checkpoint (str): Path to the checkpoint. It must be a local path.
+ It should be a directory path if the checkpoint is sharded. Otherwise, it should be a file path.
+ """
self.checkpoint_io.load_optimizer(optimizer, checkpoint)
def save_optimizer(self, optimizer: Optimizer, checkpoint: str, shard: bool = False, size_per_shard: int = 1024):
+ """Save optimizer to checkpoint.
+ Warning: Saving sharded optimizer checkpoint is not supported yet.
+
+ Args:
+ optimizer (Optimizer): An optimizer boosted by Booster.
+ checkpoint (str): Path to the checkpoint. It must be a local path.
+ It is a file path if ``shard=False``. Otherwise, it is a directory path.
+ shard (bool, optional): Whether to save checkpoint a sharded way.
+ If true, the checkpoint will be a folder. Otherwise, it will be a single file. Defaults to False.
+ size_per_shard (int, optional): Maximum size of checkpoint shard file in MB. This is useful only when ``shard=True``. Defaults to 1024.
+ """
self.checkpoint_io.save_optimizer(optimizer, checkpoint, shard, size_per_shard)
def save_lr_scheduler(self, lr_scheduler: LRScheduler, checkpoint: str):
+ """Save lr scheduler to checkpoint.
+
+ Args:
+ lr_scheduler (LRScheduler): A lr scheduler boosted by Booster.
+ checkpoint (str): Path to the checkpoint. It must be a local file path.
+ """
self.checkpoint_io.save_lr_scheduler(lr_scheduler, checkpoint)
def load_lr_scheduler(self, lr_scheduler: LRScheduler, checkpoint: str):
+ """Load lr scheduler from checkpoint.
+
+ Args:
+ lr_scheduler (LRScheduler): A lr scheduler boosted by Booster.
+ checkpoint (str): Path to the checkpoint. It must be a local file path.
+ """
self.checkpoint_io.load_lr_scheduler(lr_scheduler, checkpoint)
diff --git a/docs/sidebars.json b/docs/sidebars.json
index ed0ba52782ad..94f79dcd3509 100644
--- a/docs/sidebars.json
+++ b/docs/sidebars.json
@@ -29,6 +29,7 @@
"basics/launch_colossalai",
"basics/booster_api",
"basics/booster_plugins",
+ "basics/booster_checkpoint",
"basics/define_your_config",
"basics/initialize_features",
"basics/engine_trainer",
diff --git a/docs/source/en/basics/booster_checkpoint.md b/docs/source/en/basics/booster_checkpoint.md
new file mode 100644
index 000000000000..adc0af60b7de
--- /dev/null
+++ b/docs/source/en/basics/booster_checkpoint.md
@@ -0,0 +1,48 @@
+# Booster Checkpoint
+
+Author: [Hongxin Liu](https://github.com/ver217)
+
+**Prerequisite:**
+- [Booster API](./booster_api.md)
+
+## Introduction
+
+We've introduced the [Booster API](./booster_api.md) in the previous tutorial. In this tutorial, we will introduce how to save and load checkpoints using booster.
+
+## Model Checkpoint
+
+{{ autodoc:colossalai.booster.Booster.save_model }}
+
+Model must be boosted by `colossalai.booster.Booster` before saving. `checkpoint` is the path to saved checkpoint. It can be a file, if `shard=False`. Otherwise, it should be a directory. If `shard=True`, the checkpoint will be saved in a sharded way. This is useful when the checkpoint is too large to be saved in a single file. Our sharded checkpoint format is compatible with [huggingface/transformers](https://github.com/huggingface/transformers).
+
+{{ autodoc:colossalai.booster.Booster.load_model }}
+
+Model must be boosted by `colossalai.booster.Booster` before loading. It will detect the checkpoint format automatically, and load in corresponding way.
+
+## Optimizer Checkpoint
+
+> ⚠ Saving optimizer checkpoint in a sharded way is not supported yet.
+
+{{ autodoc:colossalai.booster.Booster.save_optimizer }}
+
+Optimizer must be boosted by `colossalai.booster.Booster` before saving.
+
+{{ autodoc:colossalai.booster.Booster.load_optimizer }}
+
+Optimizer must be boosted by `colossalai.booster.Booster` before loading.
+
+## LR Scheduler Checkpoint
+
+{{ autodoc:colossalai.booster.Booster.save_lr_scheduler }}
+
+LR scheduler must be boosted by `colossalai.booster.Booster` before saving. `checkpoint` is the local path to checkpoint file.
+
+{{ autodoc:colossalai.booster.Booster.load_lr_scheduler }}
+
+LR scheduler must be boosted by `colossalai.booster.Booster` before loading. `checkpoint` is the local path to checkpoint file.
+
+## Checkpoint design
+
+More details about checkpoint design can be found in our discussion [A Unified Checkpoint System Design](https://github.com/hpcaitech/ColossalAI/discussions/3339).
+
+<!-- doc-test-command: echo -->
diff --git a/docs/source/en/basics/booster_plugins.md b/docs/source/en/basics/booster_plugins.md
index c15c30c8450c..0362f095af2b 100644
--- a/docs/source/en/basics/booster_plugins.md
+++ b/docs/source/en/basics/booster_plugins.md
@@ -43,12 +43,16 @@ We've tested compatibility on some famous models, following models may not be su
Compatibility problems will be fixed in the future.
+> ⚠ This plugin can only load optimizer checkpoint saved by itself with the same number of processes now. This will be fixed in the future.
+
### Gemini Plugin
This plugin implements Zero-3 with chunk-based and heterogeneous memory management. It can train large models without much loss in speed. It also does not support local gradient accumulation. More details can be found in [Gemini Doc](../features/zero_with_chunk.md).
{{ autodoc:colossalai.booster.plugin.GeminiPlugin }}
+> ⚠ This plugin can only load optimizer checkpoint saved by itself with the same number of processes now. This will be fixed in the future.
+
### Torch DDP Plugin
More details can be found in [Pytorch Docs](https://pytorch.org/docs/main/generated/torch.nn.parallel.DistributedDataParallel.html#torch.nn.parallel.DistributedDataParallel).
@@ -62,3 +66,5 @@ More details can be found in [Pytorch Docs](https://pytorch.org/docs/main/genera
More details can be found in [Pytorch Docs](https://pytorch.org/docs/main/fsdp.html).
{{ autodoc:colossalai.booster.plugin.TorchFSDPPlugin }}
+
+<!-- doc-test-command: echo -->
diff --git a/docs/source/zh-Hans/basics/booster_checkpoint.md b/docs/source/zh-Hans/basics/booster_checkpoint.md
new file mode 100644
index 000000000000..d75f18c908ba
--- /dev/null
+++ b/docs/source/zh-Hans/basics/booster_checkpoint.md
@@ -0,0 +1,48 @@
+# Booster Checkpoint
+
+作者: [Hongxin Liu](https://github.com/ver217)
+
+**前置教程:**
+- [Booster API](./booster_api.md)
+
+## 引言
+
+我们在之前的教程中介绍了 [Booster API](./booster_api.md)。在本教程中,我们将介绍如何使用 booster 保存和加载 checkpoint。
+
+## 模型 Checkpoint
+
+{{ autodoc:colossalai.booster.Booster.save_model }}
+
+模型在保存前必须被 `colossalai.booster.Booster` 加速。 `checkpoint` 是要保存的 checkpoint 的路径。 如果 `shard=False`,它就是文件。 否则, 它就是文件夹。如果 `shard=True`,checkpoint 将以分片方式保存。当 checkpoint 太大而无法保存在单个文件中时,这很有用。我们的分片 checkpoint 格式与 [huggingface/transformers](https://github.com/huggingface/transformers) 兼容。
+
+{{ autodoc:colossalai.booster.Booster.load_model }}
+
+模型在加载前必须被 `colossalai.booster.Booster` 加速。它会自动检测 checkpoint 格式,并以相应的方式加载。
+
+## 优化器 Checkpoint
+
+> ⚠ 尚不支持以分片方式保存优化器 Checkpoint。
+
+{{ autodoc:colossalai.booster.Booster.save_optimizer }}
+
+优化器在保存前必须被 `colossalai.booster.Booster` 加速。
+
+{{ autodoc:colossalai.booster.Booster.load_optimizer }}
+
+优化器在加载前必须被 `colossalai.booster.Booster` 加速。
+
+## 学习率调度器 Checkpoint
+
+{{ autodoc:colossalai.booster.Booster.save_lr_scheduler }}
+
+学习率调度器在保存前必须被 `colossalai.booster.Booster` 加速。 `checkpoint` 是 checkpoint 文件的本地路径.
+
+{{ autodoc:colossalai.booster.Booster.load_lr_scheduler }}
+
+学习率调度器在加载前必须被 `colossalai.booster.Booster` 加速。 `checkpoint` 是 checkpoint 文件的本地路径.
+
+## Checkpoint 设计
+
+有关 Checkpoint 设计的更多详细信息,请参见我们的讨论 [A Unified Checkpoint System Design](https://github.com/hpcaitech/ColossalAI/discussions/3339).
+
+<!-- doc-test-command: echo -->
diff --git a/docs/source/zh-Hans/basics/booster_plugins.md b/docs/source/zh-Hans/basics/booster_plugins.md
index e0258eb37932..b15ceb1e3ad5 100644
--- a/docs/source/zh-Hans/basics/booster_plugins.md
+++ b/docs/source/zh-Hans/basics/booster_plugins.md
@@ -43,12 +43,16 @@ Zero-2 不支持局部梯度累积。如果您坚持使用,虽然可以积累
兼容性问题将在未来修复。
+> ⚠ 该插件现在只能加载自己保存的且具有相同进程数的优化器 Checkpoint。这将在未来得到解决。
+
### Gemini 插件
这个插件实现了基于Chunk内存管理和异构内存管理的 Zero-3。它可以训练大型模型而不会损失太多速度。它也不支持局部梯度累积。更多详细信息,请参阅 [Gemini 文档](../features/zero_with_chunk.md).
{{ autodoc:colossalai.booster.plugin.GeminiPlugin }}
+> ⚠ 该插件现在只能加载自己保存的且具有相同进程数的优化器 Checkpoint。这将在未来得到解决。
+
### Torch DDP 插件
更多详细信息,请参阅 [Pytorch 文档](https://pytorch.org/docs/main/generated/torch.nn.parallel.DistributedDataParallel.html#torch.nn.parallel.DistributedDataParallel).
@@ -62,3 +66,5 @@ Zero-2 不支持局部梯度累积。如果您坚持使用,虽然可以积累
更多详细信息,请参阅 [Pytorch 文档](https://pytorch.org/docs/main/fsdp.html).
{{ autodoc:colossalai.booster.plugin.TorchFSDPPlugin }}
+
+<!-- doc-test-command: echo -->
| ## 📌 Checklist before creating the PR
- [x] I have created an issue for this PR for traceability
- [x] The title follows the standard format: `[doc/gemini/tensor/...]: A concise description`
- [x] I have added relevant tags if possible for us to better distinguish different PRs
## 🚨 Issue number
> Link this PR to your issue with words like fixed to automatically close the linked issue upon merge
>
> e.g. `fixed #1234`, `closed #1234`, `resolved #1234`
Closes #3784
## 📝 What does this PR do?
> Summarize your work here.
> if you have any plots/diagrams/screenshots/tables, please attach them here.
1. Add docstring of Booster about checkpoint
2. Add a tutorial for booster checkpoint
3. Add cuation about ckpt for plugins
## 💥 Checklist before requesting a review
- [x] I have linked my PR to an issue ([instruction](https://docs.github.com/en/issues/tracking-your-work-with-issues/linking-a-pull-request-to-an-issue))
- [x] My issue clearly describes the problem/feature/proposal, with diagrams/charts/table/code if possible
- [x] I have performed a self-review of my code
- [ ] I have added thorough tests.
- [x] I have added docstrings for all the functions/methods I implemented
## ⭐️ Do you enjoy contributing to Colossal-AI?
- [x] 🌝 Yes, I do.
- [ ] 🌚 No, I don't.
Tell us more if you don't enjoy contributing to Colossal-AI.
| https://api.github.com/repos/hpcaitech/ColossalAI/pulls/3785 | 2023-05-19T09:19:31Z | 2023-05-19T10:05:09Z | 2023-05-19T10:05:09Z | 2023-05-19T10:05:12Z | 3,197 | hpcaitech/ColossalAI | 11,769 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.