Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +39 -0
- platform/dataops/dto/.github/workflows/build-images.yml +87 -0
- platform/dataops/dto/.github/workflows/dto-ci.yml +190 -0
- platform/dataops/dto/.venv/bin/Activate.ps1 +247 -0
- platform/dataops/dto/.venv/bin/activate +70 -0
- platform/dataops/dto/.venv/bin/activate.csh +27 -0
- platform/dataops/dto/.venv/bin/activate.fish +69 -0
- platform/dataops/dto/.venv/bin/coverage +7 -0
- platform/dataops/dto/.venv/bin/coverage-3.12 +7 -0
- platform/dataops/dto/.venv/bin/coverage3 +7 -0
- platform/dataops/dto/.venv/bin/f2py +7 -0
- platform/dataops/dto/.venv/bin/geomet +7 -0
- platform/dataops/dto/.venv/bin/ghp-import +7 -0
- platform/dataops/dto/.venv/bin/huggingface-cli +7 -0
- platform/dataops/dto/.venv/bin/jsonschema +7 -0
- platform/dataops/dto/.venv/bin/markdown-it +7 -0
- platform/dataops/dto/.venv/bin/markdown_py +7 -0
- platform/dataops/dto/.venv/bin/mkdocs +7 -0
- platform/dataops/dto/.venv/bin/normalizer +7 -0
- platform/dataops/dto/.venv/bin/pip +8 -0
- platform/dataops/dto/.venv/bin/pip3 +8 -0
- platform/dataops/dto/.venv/bin/pip3.12 +8 -0
- platform/dataops/dto/.venv/bin/py.test +7 -0
- platform/dataops/dto/.venv/bin/pybabel +7 -0
- platform/dataops/dto/.venv/bin/pygmentize +7 -0
- platform/dataops/dto/.venv/bin/pytest +7 -0
- platform/dataops/dto/.venv/bin/python +3 -0
- platform/dataops/dto/.venv/bin/python3 +3 -0
- platform/dataops/dto/.venv/bin/python3.12 +3 -0
- platform/dataops/dto/.venv/bin/tabulate +7 -0
- platform/dataops/dto/.venv/bin/tqdm +7 -0
- platform/dataops/dto/.venv/bin/watchmedo +7 -0
- platform/dataops/dto/.venv/bin/wheel +8 -0
- platform/dataops/dto/.venv/lib/python3.12/site-packages/__pycache__/ghp_import.cpython-312.pyc +0 -0
- platform/dataops/dto/.venv/lib/python3.12/site-packages/__pycache__/graphyte.cpython-312.pyc +0 -0
- platform/dataops/dto/.venv/lib/python3.12/site-packages/__pycache__/py.cpython-312.pyc +0 -0
- platform/dataops/dto/.venv/lib/python3.12/site-packages/__pycache__/six.cpython-312.pyc +0 -0
- platform/dataops/dto/.venv/lib/python3.12/site-packages/__pycache__/typing_extensions.cpython-312.pyc +3 -0
- platform/dataops/dto/.venv/lib/python3.12/site-packages/__pycache__/yaml_env_tag.cpython-312.pyc +0 -0
- platform/dataops/dto/.venv/lib/python3.12/site-packages/_cffi_backend.cpython-312-x86_64-linux-gnu.so +3 -0
- platform/dataops/dto/.venv/lib/python3.12/site-packages/asyncio-3.4.3.dist-info/DESCRIPTION.rst +46 -0
- platform/dataops/dto/.venv/lib/python3.12/site-packages/asyncio-3.4.3.dist-info/INSTALLER +1 -0
- platform/dataops/dto/.venv/lib/python3.12/site-packages/asyncio-3.4.3.dist-info/METADATA +59 -0
- platform/dataops/dto/.venv/lib/python3.12/site-packages/asyncio-3.4.3.dist-info/RECORD +53 -0
- platform/dataops/dto/.venv/lib/python3.12/site-packages/asyncio-3.4.3.dist-info/REQUESTED +0 -0
- platform/dataops/dto/.venv/lib/python3.12/site-packages/asyncio-3.4.3.dist-info/WHEEL +5 -0
- platform/dataops/dto/.venv/lib/python3.12/site-packages/asyncio-3.4.3.dist-info/metadata.json +1 -0
- platform/dataops/dto/.venv/lib/python3.12/site-packages/asyncio-3.4.3.dist-info/top_level.txt +1 -0
- platform/dataops/dto/.venv/lib/python3.12/site-packages/asyncio/__init__.py +50 -0
- platform/dataops/dto/.venv/lib/python3.12/site-packages/asyncio/base_events.py +1179 -0
.gitattributes
CHANGED
|
@@ -3221,3 +3221,42 @@ platform/dbops/b/1e8ba735f644/data/postgres/base/5/2838 filter=lfs diff=lfs merg
|
|
| 3221 |
platform/dbops/b/1e8ba735f644/data/postgres/base/5/2691 filter=lfs diff=lfs merge=lfs -text
|
| 3222 |
platform/dbops/b/1e8ba735f644/data/postgres/base/5/2658 filter=lfs diff=lfs merge=lfs -text
|
| 3223 |
platform/dbops/b/1e8ba735f644/data/postgres/base/5/2675 filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 3221 |
platform/dbops/b/1e8ba735f644/data/postgres/base/5/2691 filter=lfs diff=lfs merge=lfs -text
|
| 3222 |
platform/dbops/b/1e8ba735f644/data/postgres/base/5/2658 filter=lfs diff=lfs merge=lfs -text
|
| 3223 |
platform/dbops/b/1e8ba735f644/data/postgres/base/5/2675 filter=lfs diff=lfs merge=lfs -text
|
| 3224 |
+
platform/dbops/b/1e8ba735f644/data/postgres/base/5/2673 filter=lfs diff=lfs merge=lfs -text
|
| 3225 |
+
platform/dbops/b/1e8ba735f644/data/postgres/base/5/pg_internal.init filter=lfs diff=lfs merge=lfs -text
|
| 3226 |
+
platform/dbops/b/1e8ba735f644/data/postgres/base/16709/2619 filter=lfs diff=lfs merge=lfs -text
|
| 3227 |
+
platform/dbops/b/1e8ba735f644/data/postgres/base/16709/1247 filter=lfs diff=lfs merge=lfs -text
|
| 3228 |
+
platform/dbops/b/1e8ba735f644/data/postgres/base/16709/2838 filter=lfs diff=lfs merge=lfs -text
|
| 3229 |
+
platform/dbops/b/1e8ba735f644/data/postgres/base/16709/2691 filter=lfs diff=lfs merge=lfs -text
|
| 3230 |
+
platform/dbops/b/1e8ba735f644/data/postgres/base/16709/2658 filter=lfs diff=lfs merge=lfs -text
|
| 3231 |
+
platform/dbops/b/1e8ba735f644/data/postgres/base/16709/2675 filter=lfs diff=lfs merge=lfs -text
|
| 3232 |
+
platform/dbops/b/1e8ba735f644/data/postgres/base/16709/2673 filter=lfs diff=lfs merge=lfs -text
|
| 3233 |
+
platform/dbops/b/1e8ba735f644/data/postgres/base/16709/1249 filter=lfs diff=lfs merge=lfs -text
|
| 3234 |
+
platform/dbops/b/1e8ba735f644/data/postgres/base/16709/1255 filter=lfs diff=lfs merge=lfs -text
|
| 3235 |
+
platform/dbops/b/1e8ba735f644/data/postgres/base/16709/1259 filter=lfs diff=lfs merge=lfs -text
|
| 3236 |
+
platform/dbops/b/1e8ba735f644/data/postgres/base/16709/2617 filter=lfs diff=lfs merge=lfs -text
|
| 3237 |
+
platform/dbops/b/1e8ba735f644/data/postgres/base/16709/2618 filter=lfs diff=lfs merge=lfs -text
|
| 3238 |
+
platform/dbops/b/1e8ba735f644/data/postgres/base/16709/2609 filter=lfs diff=lfs merge=lfs -text
|
| 3239 |
+
platform/dbops/b/1e8ba735f644/data/postgres/base/16709/2608 filter=lfs diff=lfs merge=lfs -text
|
| 3240 |
+
platform/dbops/projects/dto/logs/jira-automation.log.1 filter=lfs diff=lfs merge=lfs -text
|
| 3241 |
+
platform/dbops/projects/dto/logs/confluence-automation.log.5 filter=lfs diff=lfs merge=lfs -text
|
| 3242 |
+
platform/dbops/projects/dto/logs/confluence-automation.log.4 filter=lfs diff=lfs merge=lfs -text
|
| 3243 |
+
platform/dbops/projects/dto/logs/confluence-automation.log.3 filter=lfs diff=lfs merge=lfs -text
|
| 3244 |
+
platform/dbops/projects/dto/logs/confluence-automation.log.2 filter=lfs diff=lfs merge=lfs -text
|
| 3245 |
+
platform/dbops/projects/dto/logs/confluence-automation.log.1 filter=lfs diff=lfs merge=lfs -text
|
| 3246 |
+
platform/dbops/projects/dto/logs/health-monitor.log.5 filter=lfs diff=lfs merge=lfs -text
|
| 3247 |
+
platform/dbops/projects/dto/logs/health-monitor.log.4 filter=lfs diff=lfs merge=lfs -text
|
| 3248 |
+
platform/dbops/projects/dto/logs/health-monitor.log.3 filter=lfs diff=lfs merge=lfs -text
|
| 3249 |
+
platform/dbops/projects/dto/logs/health-monitor.log.2 filter=lfs diff=lfs merge=lfs -text
|
| 3250 |
+
platform/dbops/projects/dto/logs/health-monitor.log.1 filter=lfs diff=lfs merge=lfs -text
|
| 3251 |
+
platform/dbops/projects/dto/logs/jira-automation.log.5 filter=lfs diff=lfs merge=lfs -text
|
| 3252 |
+
platform/dbops/projects/dto/logs/jira-automation.log.4 filter=lfs diff=lfs merge=lfs -text
|
| 3253 |
+
platform/dbops/projects/dto/logs/jira-automation.log.3 filter=lfs diff=lfs merge=lfs -text
|
| 3254 |
+
platform/dbops/projects/dto/logs/jira-automation.log.2 filter=lfs diff=lfs merge=lfs -text
|
| 3255 |
+
platform/dataops/dto/.venv/bin/python3 filter=lfs diff=lfs merge=lfs -text
|
| 3256 |
+
platform/dataops/dto/.venv/bin/python filter=lfs diff=lfs merge=lfs -text
|
| 3257 |
+
platform/dataops/dto/.venv/bin/python3.12 filter=lfs diff=lfs merge=lfs -text
|
| 3258 |
+
platform/dataops/dto/.venv/lib/python3.12/site-packages/_cffi_backend.cpython-312-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 3259 |
+
platform/dataops/dto/.venv/lib/python3.12/site-packages/nacl/_sodium.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 3260 |
+
platform/dataops/dto/.venv/lib/python3.12/site-packages/__pycache__/typing_extensions.cpython-312.pyc filter=lfs diff=lfs merge=lfs -text
|
| 3261 |
+
platform/dataops/dto/.venv/lib/python3.12/site-packages/rpds/rpds.cpython-312-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 3262 |
+
platform/dataops/dto/.venv/lib/python3.12/site-packages/regex/_regex.cpython-312-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
platform/dataops/dto/.github/workflows/build-images.yml
ADDED
|
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
name: Build and Publish Images
|
| 2 |
+
|
| 3 |
+
on:
|
| 4 |
+
push:
|
| 5 |
+
branches: [ main ]
|
| 6 |
+
paths:
|
| 7 |
+
- 'docker/**'
|
| 8 |
+
- '.github/workflows/build-images.yml'
|
| 9 |
+
- 'services/**'
|
| 10 |
+
- 'requirements.txt'
|
| 11 |
+
pull_request:
|
| 12 |
+
branches: [ main ]
|
| 13 |
+
workflow_dispatch:
|
| 14 |
+
|
| 15 |
+
jobs:
|
| 16 |
+
build-and-push:
|
| 17 |
+
runs-on: ubuntu-latest
|
| 18 |
+
permissions:
|
| 19 |
+
contents: read
|
| 20 |
+
packages: write
|
| 21 |
+
strategy:
|
| 22 |
+
matrix:
|
| 23 |
+
image:
|
| 24 |
+
- name: dto-health-monitor
|
| 25 |
+
context: docker/health-monitor
|
| 26 |
+
dockerfile: docker/health-monitor/Dockerfile
|
| 27 |
+
steps:
|
| 28 |
+
- name: Checkout
|
| 29 |
+
uses: actions/checkout@v4
|
| 30 |
+
|
| 31 |
+
- name: Set up Docker Buildx
|
| 32 |
+
uses: docker/setup-buildx-action@v3
|
| 33 |
+
|
| 34 |
+
- name: Log in to GHCR
|
| 35 |
+
uses: docker/login-action@v3
|
| 36 |
+
with:
|
| 37 |
+
registry: ghcr.io
|
| 38 |
+
username: ${{ github.actor }}
|
| 39 |
+
password: ${{ secrets.GITHUB_TOKEN }}
|
| 40 |
+
|
| 41 |
+
- name: Log in to Docker Hub
|
| 42 |
+
if: secrets.DOCKERHUB_USERNAME && secrets.DOCKERHUB_TOKEN
|
| 43 |
+
uses: docker/login-action@v3
|
| 44 |
+
with:
|
| 45 |
+
registry: docker.io
|
| 46 |
+
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
| 47 |
+
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
| 48 |
+
|
| 49 |
+
- name: Extract metadata (tags, labels)
|
| 50 |
+
id: meta
|
| 51 |
+
uses: docker/metadata-action@v5
|
| 52 |
+
with:
|
| 53 |
+
images: |
|
| 54 |
+
ghcr.io/${{ github.repository_owner }}/${{ matrix.image.name }}
|
| 55 |
+
docker.io/${{ secrets.DOCKERHUB_ORG || secrets.DOCKERHUB_USERNAME }}/${{ matrix.image.name }}
|
| 56 |
+
tags: |
|
| 57 |
+
type=raw,value=latest,enable=${{ github.ref == 'refs/heads/main' }}
|
| 58 |
+
type=sha,format=short
|
| 59 |
+
type=ref,event=tag
|
| 60 |
+
|
| 61 |
+
- name: Build and push
|
| 62 |
+
uses: docker/build-push-action@v6
|
| 63 |
+
with:
|
| 64 |
+
context: ${{ matrix.image.context }}
|
| 65 |
+
file: ${{ matrix.image.dockerfile }}
|
| 66 |
+
push: ${{ github.event_name != 'pull_request' }}
|
| 67 |
+
tags: ${{ steps.meta.outputs.tags }}
|
| 68 |
+
labels: ${{ steps.meta.outputs.labels }}
|
| 69 |
+
cache-from: type=gha
|
| 70 |
+
cache-to: type=gha,mode=max
|
| 71 |
+
|
| 72 |
+
- name: Trivy scan
|
| 73 |
+
uses: aquasecurity/trivy-action@0.28.0
|
| 74 |
+
with:
|
| 75 |
+
image-ref: ${{ fromJSON(steps.meta.outputs.json).tags[0] }}
|
| 76 |
+
format: 'sarif'
|
| 77 |
+
output: 'trivy-${{ matrix.image.name }}.sarif'
|
| 78 |
+
exit-code: '1'
|
| 79 |
+
ignore-unfixed: true
|
| 80 |
+
vuln-type: 'os,library'
|
| 81 |
+
continue-on-error: true
|
| 82 |
+
|
| 83 |
+
- name: Upload scan results
|
| 84 |
+
uses: actions/upload-artifact@v4
|
| 85 |
+
with:
|
| 86 |
+
name: trivy-${{ matrix.image.name }}-sarif
|
| 87 |
+
path: trivy-${{ matrix.image.name }}.sarif
|
platform/dataops/dto/.github/workflows/dto-ci.yml
ADDED
|
@@ -0,0 +1,190 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
name: DTO CI/CD Pipeline
|
| 2 |
+
|
| 3 |
+
on:
|
| 4 |
+
push:
|
| 5 |
+
branches: [ main, develop ]
|
| 6 |
+
paths:
|
| 7 |
+
- '**.py'
|
| 8 |
+
- '**.yaml'
|
| 9 |
+
- '**.yml'
|
| 10 |
+
- '**.md'
|
| 11 |
+
- '**.sh'
|
| 12 |
+
pull_request:
|
| 13 |
+
branches: [ main ]
|
| 14 |
+
workflow_dispatch:
|
| 15 |
+
|
| 16 |
+
jobs:
|
| 17 |
+
validate:
|
| 18 |
+
name: Validate DTO Configuration
|
| 19 |
+
runs-on: ubuntu-latest
|
| 20 |
+
|
| 21 |
+
steps:
|
| 22 |
+
- name: Checkout code
|
| 23 |
+
uses: actions/checkout@v4
|
| 24 |
+
|
| 25 |
+
- name: Set up Python
|
| 26 |
+
uses: actions/setup-python@v4
|
| 27 |
+
with:
|
| 28 |
+
python-version: '3.10'
|
| 29 |
+
|
| 30 |
+
- name: Install dependencies
|
| 31 |
+
run: |
|
| 32 |
+
python -m pip install --upgrade pip
|
| 33 |
+
pip install pyyaml jsonschema
|
| 34 |
+
|
| 35 |
+
- name: Validate manifest schema
|
| 36 |
+
run: |
|
| 37 |
+
python validate-ci.py
|
| 38 |
+
|
| 39 |
+
- name: Run syntax checks
|
| 40 |
+
run: |
|
| 41 |
+
# Validate YAML files
|
| 42 |
+
find . -name "*.yaml" -o -name "*.yml" | xargs -I {} sh -c 'echo "Validating {}" && python -c "import yaml; yaml.safe_load(open(\"{}\"))" && echo "✓ {}"'
|
| 43 |
+
|
| 44 |
+
# Validate Python syntax
|
| 45 |
+
find . -name "*.py" | xargs -I {} sh -c 'echo "Validating {}" && python -m py_compile {} && echo "✓ {}"'
|
| 46 |
+
|
| 47 |
+
test:
|
| 48 |
+
name: Run DTO Tests
|
| 49 |
+
runs-on: ubuntu-latest
|
| 50 |
+
needs: validate
|
| 51 |
+
|
| 52 |
+
steps:
|
| 53 |
+
- name: Checkout code
|
| 54 |
+
uses: actions/checkout@v4
|
| 55 |
+
|
| 56 |
+
- name: Set up Python
|
| 57 |
+
uses: actions/setup-python@v4
|
| 58 |
+
with:
|
| 59 |
+
python-version: '3.10'
|
| 60 |
+
|
| 61 |
+
- name: Install test dependencies
|
| 62 |
+
run: |
|
| 63 |
+
pip install pytest pytest-cov
|
| 64 |
+
|
| 65 |
+
- name: Run unit tests
|
| 66 |
+
run: |
|
| 67 |
+
# Create simple test runner
|
| 68 |
+
cat > test_runner.py << 'EOF'
|
| 69 |
+
import unittest
|
| 70 |
+
import os
|
| 71 |
+
import sys
|
| 72 |
+
|
| 73 |
+
# Add current directory to path
|
| 74 |
+
sys.path.insert(0, os.path.dirname(__file__))
|
| 75 |
+
|
| 76 |
+
# Basic test suite
|
| 77 |
+
class TestDTOFramework(unittest.TestCase):
|
| 78 |
+
|
| 79 |
+
def test_manifest_exists(self):
|
| 80 |
+
"""Test that manifest file exists"""
|
| 81 |
+
self.assertTrue(os.path.exists('dto_manifest.yaml'))
|
| 82 |
+
|
| 83 |
+
def test_scripts_executable(self):
|
| 84 |
+
"""Test that scripts are executable"""
|
| 85 |
+
for script in ['generate.py', 'validate-ci.py']:
|
| 86 |
+
if os.path.exists(script):
|
| 87 |
+
self.assertTrue(os.access(script, os.X_OK))
|
| 88 |
+
|
| 89 |
+
if __name__ == '__main__':
|
| 90 |
+
unittest.main()
|
| 91 |
+
EOF
|
| 92 |
+
|
| 93 |
+
python test_runner.py -v
|
| 94 |
+
|
| 95 |
+
security:
|
| 96 |
+
name: Security Scan
|
| 97 |
+
runs-on: ubuntu-latest
|
| 98 |
+
|
| 99 |
+
steps:
|
| 100 |
+
- name: Checkout code
|
| 101 |
+
uses: actions/checkout@v4
|
| 102 |
+
|
| 103 |
+
- name: Run security scan
|
| 104 |
+
uses: actions/checkov@v3
|
| 105 |
+
with:
|
| 106 |
+
directory: .
|
| 107 |
+
check: ["CKV_GHA_1", "CKV_GHA_2", "CKV_GHA_3"]
|
| 108 |
+
|
| 109 |
+
- name: Secret scanning
|
| 110 |
+
uses: actions/check-secrets@v1
|
| 111 |
+
|
| 112 |
+
documentation:
|
| 113 |
+
name: Documentation Build
|
| 114 |
+
runs-on: ubuntu-latest
|
| 115 |
+
|
| 116 |
+
steps:
|
| 117 |
+
- name: Checkout code
|
| 118 |
+
uses: actions/checkout@v4
|
| 119 |
+
|
| 120 |
+
- name: Build documentation
|
| 121 |
+
run: |
|
| 122 |
+
# Validate markdown files
|
| 123 |
+
find docs/ -name "*.md" | xargs -I {} sh -c 'echo "Validating {}" && markdownlint {} || true'
|
| 124 |
+
|
| 125 |
+
# Generate documentation index
|
| 126 |
+
echo "# DTO Framework Documentation" > DOCUMENTATION.md
|
| 127 |
+
echo "\n## Available Documents" >> DOCUMENTATION.md
|
| 128 |
+
find docs/ -name "*.md" | sed 's|^|- |' >> DOCUMENTATION.md
|
| 129 |
+
|
| 130 |
+
deploy:
|
| 131 |
+
name: Deploy to Staging
|
| 132 |
+
runs-on: ubuntu-latest
|
| 133 |
+
needs: [validate, test, security]
|
| 134 |
+
if: github.ref == 'refs/heads/main'
|
| 135 |
+
|
| 136 |
+
steps:
|
| 137 |
+
- name: Checkout code
|
| 138 |
+
uses: actions/checkout@v4
|
| 139 |
+
|
| 140 |
+
- name: Deploy to staging
|
| 141 |
+
run: |
|
| 142 |
+
echo "Deploying DTO framework to staging environment"
|
| 143 |
+
# Add deployment logic here
|
| 144 |
+
# This would typically use ansible, terraform, or custom deployment scripts
|
| 145 |
+
|
| 146 |
+
- name: Notify deployment
|
| 147 |
+
run: |
|
| 148 |
+
echo "DTO framework deployed successfully to staging"
|
| 149 |
+
# Add notification logic (Slack, Teams, etc.)
|
| 150 |
+
|
| 151 |
+
release:
|
| 152 |
+
name: Create Release
|
| 153 |
+
runs-on: ubuntu-latest
|
| 154 |
+
needs: deploy
|
| 155 |
+
if: github.ref == 'refs/heads/main'
|
| 156 |
+
|
| 157 |
+
steps:
|
| 158 |
+
- name: Checkout code
|
| 159 |
+
uses: actions/checkout@v4
|
| 160 |
+
|
| 161 |
+
- name: Create GitHub Release
|
| 162 |
+
uses: softprops/action-gh-release@v1
|
| 163 |
+
with:
|
| 164 |
+
tag_name: v$(date +%Y%m%d.%H%M%S)
|
| 165 |
+
name: DTO Framework Release $(date +%Y-%m-%d)
|
| 166 |
+
body: |
|
| 167 |
+
Automated release of Data Transfer Operations Framework
|
| 168 |
+
|
| 169 |
+
Changes:
|
| 170 |
+
- CI/CD pipeline enhancements
|
| 171 |
+
- Xet integration updates
|
| 172 |
+
- Documentation improvements
|
| 173 |
+
draft: false
|
| 174 |
+
prerelease: false
|
| 175 |
+
|
| 176 |
+
- name: Upload release artifacts
|
| 177 |
+
uses: actions/upload-artifact@v3
|
| 178 |
+
with:
|
| 179 |
+
name: dto-framework
|
| 180 |
+
path: |
|
| 181 |
+
*.py
|
| 182 |
+
*.yaml
|
| 183 |
+
*.md
|
| 184 |
+
scripts/
|
| 185 |
+
integrations/
|
| 186 |
+
|
| 187 |
+
# Environment configurations
|
| 188 |
+
env:
|
| 189 |
+
PYTHONPATH: .
|
| 190 |
+
DTO_ENV: ci
|
platform/dataops/dto/.venv/bin/Activate.ps1
ADDED
|
@@ -0,0 +1,247 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<#
|
| 2 |
+
.Synopsis
|
| 3 |
+
Activate a Python virtual environment for the current PowerShell session.
|
| 4 |
+
|
| 5 |
+
.Description
|
| 6 |
+
Pushes the python executable for a virtual environment to the front of the
|
| 7 |
+
$Env:PATH environment variable and sets the prompt to signify that you are
|
| 8 |
+
in a Python virtual environment. Makes use of the command line switches as
|
| 9 |
+
well as the `pyvenv.cfg` file values present in the virtual environment.
|
| 10 |
+
|
| 11 |
+
.Parameter VenvDir
|
| 12 |
+
Path to the directory that contains the virtual environment to activate. The
|
| 13 |
+
default value for this is the parent of the directory that the Activate.ps1
|
| 14 |
+
script is located within.
|
| 15 |
+
|
| 16 |
+
.Parameter Prompt
|
| 17 |
+
The prompt prefix to display when this virtual environment is activated. By
|
| 18 |
+
default, this prompt is the name of the virtual environment folder (VenvDir)
|
| 19 |
+
surrounded by parentheses and followed by a single space (ie. '(.venv) ').
|
| 20 |
+
|
| 21 |
+
.Example
|
| 22 |
+
Activate.ps1
|
| 23 |
+
Activates the Python virtual environment that contains the Activate.ps1 script.
|
| 24 |
+
|
| 25 |
+
.Example
|
| 26 |
+
Activate.ps1 -Verbose
|
| 27 |
+
Activates the Python virtual environment that contains the Activate.ps1 script,
|
| 28 |
+
and shows extra information about the activation as it executes.
|
| 29 |
+
|
| 30 |
+
.Example
|
| 31 |
+
Activate.ps1 -VenvDir C:\Users\MyUser\Common\.venv
|
| 32 |
+
Activates the Python virtual environment located in the specified location.
|
| 33 |
+
|
| 34 |
+
.Example
|
| 35 |
+
Activate.ps1 -Prompt "MyPython"
|
| 36 |
+
Activates the Python virtual environment that contains the Activate.ps1 script,
|
| 37 |
+
and prefixes the current prompt with the specified string (surrounded in
|
| 38 |
+
parentheses) while the virtual environment is active.
|
| 39 |
+
|
| 40 |
+
.Notes
|
| 41 |
+
On Windows, it may be required to enable this Activate.ps1 script by setting the
|
| 42 |
+
execution policy for the user. You can do this by issuing the following PowerShell
|
| 43 |
+
command:
|
| 44 |
+
|
| 45 |
+
PS C:\> Set-ExecutionPolicy -ExecutionPolicy RemoteSigned -Scope CurrentUser
|
| 46 |
+
|
| 47 |
+
For more information on Execution Policies:
|
| 48 |
+
https://go.microsoft.com/fwlink/?LinkID=135170
|
| 49 |
+
|
| 50 |
+
#>
|
| 51 |
+
Param(
|
| 52 |
+
[Parameter(Mandatory = $false)]
|
| 53 |
+
[String]
|
| 54 |
+
$VenvDir,
|
| 55 |
+
[Parameter(Mandatory = $false)]
|
| 56 |
+
[String]
|
| 57 |
+
$Prompt
|
| 58 |
+
)
|
| 59 |
+
|
| 60 |
+
<# Function declarations --------------------------------------------------- #>
|
| 61 |
+
|
| 62 |
+
<#
|
| 63 |
+
.Synopsis
|
| 64 |
+
Remove all shell session elements added by the Activate script, including the
|
| 65 |
+
addition of the virtual environment's Python executable from the beginning of
|
| 66 |
+
the PATH variable.
|
| 67 |
+
|
| 68 |
+
.Parameter NonDestructive
|
| 69 |
+
If present, do not remove this function from the global namespace for the
|
| 70 |
+
session.
|
| 71 |
+
|
| 72 |
+
#>
|
| 73 |
+
function global:deactivate ([switch]$NonDestructive) {
|
| 74 |
+
# Revert to original values
|
| 75 |
+
|
| 76 |
+
# The prior prompt:
|
| 77 |
+
if (Test-Path -Path Function:_OLD_VIRTUAL_PROMPT) {
|
| 78 |
+
Copy-Item -Path Function:_OLD_VIRTUAL_PROMPT -Destination Function:prompt
|
| 79 |
+
Remove-Item -Path Function:_OLD_VIRTUAL_PROMPT
|
| 80 |
+
}
|
| 81 |
+
|
| 82 |
+
# The prior PYTHONHOME:
|
| 83 |
+
if (Test-Path -Path Env:_OLD_VIRTUAL_PYTHONHOME) {
|
| 84 |
+
Copy-Item -Path Env:_OLD_VIRTUAL_PYTHONHOME -Destination Env:PYTHONHOME
|
| 85 |
+
Remove-Item -Path Env:_OLD_VIRTUAL_PYTHONHOME
|
| 86 |
+
}
|
| 87 |
+
|
| 88 |
+
# The prior PATH:
|
| 89 |
+
if (Test-Path -Path Env:_OLD_VIRTUAL_PATH) {
|
| 90 |
+
Copy-Item -Path Env:_OLD_VIRTUAL_PATH -Destination Env:PATH
|
| 91 |
+
Remove-Item -Path Env:_OLD_VIRTUAL_PATH
|
| 92 |
+
}
|
| 93 |
+
|
| 94 |
+
# Just remove the VIRTUAL_ENV altogether:
|
| 95 |
+
if (Test-Path -Path Env:VIRTUAL_ENV) {
|
| 96 |
+
Remove-Item -Path env:VIRTUAL_ENV
|
| 97 |
+
}
|
| 98 |
+
|
| 99 |
+
# Just remove VIRTUAL_ENV_PROMPT altogether.
|
| 100 |
+
if (Test-Path -Path Env:VIRTUAL_ENV_PROMPT) {
|
| 101 |
+
Remove-Item -Path env:VIRTUAL_ENV_PROMPT
|
| 102 |
+
}
|
| 103 |
+
|
| 104 |
+
# Just remove the _PYTHON_VENV_PROMPT_PREFIX altogether:
|
| 105 |
+
if (Get-Variable -Name "_PYTHON_VENV_PROMPT_PREFIX" -ErrorAction SilentlyContinue) {
|
| 106 |
+
Remove-Variable -Name _PYTHON_VENV_PROMPT_PREFIX -Scope Global -Force
|
| 107 |
+
}
|
| 108 |
+
|
| 109 |
+
# Leave deactivate function in the global namespace if requested:
|
| 110 |
+
if (-not $NonDestructive) {
|
| 111 |
+
Remove-Item -Path function:deactivate
|
| 112 |
+
}
|
| 113 |
+
}
|
| 114 |
+
|
| 115 |
+
<#
|
| 116 |
+
.Description
|
| 117 |
+
Get-PyVenvConfig parses the values from the pyvenv.cfg file located in the
|
| 118 |
+
given folder, and returns them in a map.
|
| 119 |
+
|
| 120 |
+
For each line in the pyvenv.cfg file, if that line can be parsed into exactly
|
| 121 |
+
two strings separated by `=` (with any amount of whitespace surrounding the =)
|
| 122 |
+
then it is considered a `key = value` line. The left hand string is the key,
|
| 123 |
+
the right hand is the value.
|
| 124 |
+
|
| 125 |
+
If the value starts with a `'` or a `"` then the first and last character is
|
| 126 |
+
stripped from the value before being captured.
|
| 127 |
+
|
| 128 |
+
.Parameter ConfigDir
|
| 129 |
+
Path to the directory that contains the `pyvenv.cfg` file.
|
| 130 |
+
#>
|
| 131 |
+
function Get-PyVenvConfig(
|
| 132 |
+
[String]
|
| 133 |
+
$ConfigDir
|
| 134 |
+
) {
|
| 135 |
+
Write-Verbose "Given ConfigDir=$ConfigDir, obtain values in pyvenv.cfg"
|
| 136 |
+
|
| 137 |
+
# Ensure the file exists, and issue a warning if it doesn't (but still allow the function to continue).
|
| 138 |
+
$pyvenvConfigPath = Join-Path -Resolve -Path $ConfigDir -ChildPath 'pyvenv.cfg' -ErrorAction Continue
|
| 139 |
+
|
| 140 |
+
# An empty map will be returned if no config file is found.
|
| 141 |
+
$pyvenvConfig = @{ }
|
| 142 |
+
|
| 143 |
+
if ($pyvenvConfigPath) {
|
| 144 |
+
|
| 145 |
+
Write-Verbose "File exists, parse `key = value` lines"
|
| 146 |
+
$pyvenvConfigContent = Get-Content -Path $pyvenvConfigPath
|
| 147 |
+
|
| 148 |
+
$pyvenvConfigContent | ForEach-Object {
|
| 149 |
+
$keyval = $PSItem -split "\s*=\s*", 2
|
| 150 |
+
if ($keyval[0] -and $keyval[1]) {
|
| 151 |
+
$val = $keyval[1]
|
| 152 |
+
|
| 153 |
+
# Remove extraneous quotations around a string value.
|
| 154 |
+
if ("'""".Contains($val.Substring(0, 1))) {
|
| 155 |
+
$val = $val.Substring(1, $val.Length - 2)
|
| 156 |
+
}
|
| 157 |
+
|
| 158 |
+
$pyvenvConfig[$keyval[0]] = $val
|
| 159 |
+
Write-Verbose "Adding Key: '$($keyval[0])'='$val'"
|
| 160 |
+
}
|
| 161 |
+
}
|
| 162 |
+
}
|
| 163 |
+
return $pyvenvConfig
|
| 164 |
+
}
|
| 165 |
+
|
| 166 |
+
|
| 167 |
+
<# Begin Activate script --------------------------------------------------- #>
|
| 168 |
+
|
| 169 |
+
# Determine the containing directory of this script
|
| 170 |
+
$VenvExecPath = Split-Path -Parent $MyInvocation.MyCommand.Definition
|
| 171 |
+
$VenvExecDir = Get-Item -Path $VenvExecPath
|
| 172 |
+
|
| 173 |
+
Write-Verbose "Activation script is located in path: '$VenvExecPath'"
|
| 174 |
+
Write-Verbose "VenvExecDir Fullname: '$($VenvExecDir.FullName)"
|
| 175 |
+
Write-Verbose "VenvExecDir Name: '$($VenvExecDir.Name)"
|
| 176 |
+
|
| 177 |
+
# Set values required in priority: CmdLine, ConfigFile, Default
|
| 178 |
+
# First, get the location of the virtual environment, it might not be
|
| 179 |
+
# VenvExecDir if specified on the command line.
|
| 180 |
+
if ($VenvDir) {
|
| 181 |
+
Write-Verbose "VenvDir given as parameter, using '$VenvDir' to determine values"
|
| 182 |
+
}
|
| 183 |
+
else {
|
| 184 |
+
Write-Verbose "VenvDir not given as a parameter, using parent directory name as VenvDir."
|
| 185 |
+
$VenvDir = $VenvExecDir.Parent.FullName.TrimEnd("\\/")
|
| 186 |
+
Write-Verbose "VenvDir=$VenvDir"
|
| 187 |
+
}
|
| 188 |
+
|
| 189 |
+
# Next, read the `pyvenv.cfg` file to determine any required value such
|
| 190 |
+
# as `prompt`.
|
| 191 |
+
$pyvenvCfg = Get-PyVenvConfig -ConfigDir $VenvDir
|
| 192 |
+
|
| 193 |
+
# Next, set the prompt from the command line, or the config file, or
|
| 194 |
+
# just use the name of the virtual environment folder.
|
| 195 |
+
if ($Prompt) {
|
| 196 |
+
Write-Verbose "Prompt specified as argument, using '$Prompt'"
|
| 197 |
+
}
|
| 198 |
+
else {
|
| 199 |
+
Write-Verbose "Prompt not specified as argument to script, checking pyvenv.cfg value"
|
| 200 |
+
if ($pyvenvCfg -and $pyvenvCfg['prompt']) {
|
| 201 |
+
Write-Verbose " Setting based on value in pyvenv.cfg='$($pyvenvCfg['prompt'])'"
|
| 202 |
+
$Prompt = $pyvenvCfg['prompt'];
|
| 203 |
+
}
|
| 204 |
+
else {
|
| 205 |
+
Write-Verbose " Setting prompt based on parent's directory's name. (Is the directory name passed to venv module when creating the virtual environment)"
|
| 206 |
+
Write-Verbose " Got leaf-name of $VenvDir='$(Split-Path -Path $venvDir -Leaf)'"
|
| 207 |
+
$Prompt = Split-Path -Path $venvDir -Leaf
|
| 208 |
+
}
|
| 209 |
+
}
|
| 210 |
+
|
| 211 |
+
Write-Verbose "Prompt = '$Prompt'"
|
| 212 |
+
Write-Verbose "VenvDir='$VenvDir'"
|
| 213 |
+
|
| 214 |
+
# Deactivate any currently active virtual environment, but leave the
|
| 215 |
+
# deactivate function in place.
|
| 216 |
+
deactivate -nondestructive
|
| 217 |
+
|
| 218 |
+
# Now set the environment variable VIRTUAL_ENV, used by many tools to determine
|
| 219 |
+
# that there is an activated venv.
|
| 220 |
+
$env:VIRTUAL_ENV = $VenvDir
|
| 221 |
+
|
| 222 |
+
if (-not $Env:VIRTUAL_ENV_DISABLE_PROMPT) {
|
| 223 |
+
|
| 224 |
+
Write-Verbose "Setting prompt to '$Prompt'"
|
| 225 |
+
|
| 226 |
+
# Set the prompt to include the env name
|
| 227 |
+
# Make sure _OLD_VIRTUAL_PROMPT is global
|
| 228 |
+
function global:_OLD_VIRTUAL_PROMPT { "" }
|
| 229 |
+
Copy-Item -Path function:prompt -Destination function:_OLD_VIRTUAL_PROMPT
|
| 230 |
+
New-Variable -Name _PYTHON_VENV_PROMPT_PREFIX -Description "Python virtual environment prompt prefix" -Scope Global -Option ReadOnly -Visibility Public -Value $Prompt
|
| 231 |
+
|
| 232 |
+
function global:prompt {
|
| 233 |
+
Write-Host -NoNewline -ForegroundColor Green "($_PYTHON_VENV_PROMPT_PREFIX) "
|
| 234 |
+
_OLD_VIRTUAL_PROMPT
|
| 235 |
+
}
|
| 236 |
+
$env:VIRTUAL_ENV_PROMPT = $Prompt
|
| 237 |
+
}
|
| 238 |
+
|
| 239 |
+
# Clear PYTHONHOME
|
| 240 |
+
if (Test-Path -Path Env:PYTHONHOME) {
|
| 241 |
+
Copy-Item -Path Env:PYTHONHOME -Destination Env:_OLD_VIRTUAL_PYTHONHOME
|
| 242 |
+
Remove-Item -Path Env:PYTHONHOME
|
| 243 |
+
}
|
| 244 |
+
|
| 245 |
+
# Add the venv to the PATH
|
| 246 |
+
Copy-Item -Path Env:PATH -Destination Env:_OLD_VIRTUAL_PATH
|
| 247 |
+
$Env:PATH = "$VenvExecDir$([System.IO.Path]::PathSeparator)$Env:PATH"
|
platform/dataops/dto/.venv/bin/activate
ADDED
|
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# This file must be used with "source bin/activate" *from bash*
|
| 2 |
+
# You cannot run it directly
|
| 3 |
+
|
| 4 |
+
deactivate () {
|
| 5 |
+
# reset old environment variables
|
| 6 |
+
if [ -n "${_OLD_VIRTUAL_PATH:-}" ] ; then
|
| 7 |
+
PATH="${_OLD_VIRTUAL_PATH:-}"
|
| 8 |
+
export PATH
|
| 9 |
+
unset _OLD_VIRTUAL_PATH
|
| 10 |
+
fi
|
| 11 |
+
if [ -n "${_OLD_VIRTUAL_PYTHONHOME:-}" ] ; then
|
| 12 |
+
PYTHONHOME="${_OLD_VIRTUAL_PYTHONHOME:-}"
|
| 13 |
+
export PYTHONHOME
|
| 14 |
+
unset _OLD_VIRTUAL_PYTHONHOME
|
| 15 |
+
fi
|
| 16 |
+
|
| 17 |
+
# Call hash to forget past commands. Without forgetting
|
| 18 |
+
# past commands the $PATH changes we made may not be respected
|
| 19 |
+
hash -r 2> /dev/null
|
| 20 |
+
|
| 21 |
+
if [ -n "${_OLD_VIRTUAL_PS1:-}" ] ; then
|
| 22 |
+
PS1="${_OLD_VIRTUAL_PS1:-}"
|
| 23 |
+
export PS1
|
| 24 |
+
unset _OLD_VIRTUAL_PS1
|
| 25 |
+
fi
|
| 26 |
+
|
| 27 |
+
unset VIRTUAL_ENV
|
| 28 |
+
unset VIRTUAL_ENV_PROMPT
|
| 29 |
+
if [ ! "${1:-}" = "nondestructive" ] ; then
|
| 30 |
+
# Self destruct!
|
| 31 |
+
unset -f deactivate
|
| 32 |
+
fi
|
| 33 |
+
}
|
| 34 |
+
|
| 35 |
+
# unset irrelevant variables
|
| 36 |
+
deactivate nondestructive
|
| 37 |
+
|
| 38 |
+
# on Windows, a path can contain colons and backslashes and has to be converted:
|
| 39 |
+
if [ "${OSTYPE:-}" = "cygwin" ] || [ "${OSTYPE:-}" = "msys" ] ; then
|
| 40 |
+
# transform D:\path\to\venv to /d/path/to/venv on MSYS
|
| 41 |
+
# and to /cygdrive/d/path/to/venv on Cygwin
|
| 42 |
+
export VIRTUAL_ENV=$(cygpath /data/adaptai/platform/dataops/dto/.venv)
|
| 43 |
+
else
|
| 44 |
+
# use the path as-is
|
| 45 |
+
export VIRTUAL_ENV=/data/adaptai/platform/dataops/dto/.venv
|
| 46 |
+
fi
|
| 47 |
+
|
| 48 |
+
_OLD_VIRTUAL_PATH="$PATH"
|
| 49 |
+
PATH="$VIRTUAL_ENV/"bin":$PATH"
|
| 50 |
+
export PATH
|
| 51 |
+
|
| 52 |
+
# unset PYTHONHOME if set
|
| 53 |
+
# this will fail if PYTHONHOME is set to the empty string (which is bad anyway)
|
| 54 |
+
# could use `if (set -u; : $PYTHONHOME) ;` in bash
|
| 55 |
+
if [ -n "${PYTHONHOME:-}" ] ; then
|
| 56 |
+
_OLD_VIRTUAL_PYTHONHOME="${PYTHONHOME:-}"
|
| 57 |
+
unset PYTHONHOME
|
| 58 |
+
fi
|
| 59 |
+
|
| 60 |
+
if [ -z "${VIRTUAL_ENV_DISABLE_PROMPT:-}" ] ; then
|
| 61 |
+
_OLD_VIRTUAL_PS1="${PS1:-}"
|
| 62 |
+
PS1='(.venv) '"${PS1:-}"
|
| 63 |
+
export PS1
|
| 64 |
+
VIRTUAL_ENV_PROMPT='(.venv) '
|
| 65 |
+
export VIRTUAL_ENV_PROMPT
|
| 66 |
+
fi
|
| 67 |
+
|
| 68 |
+
# Call hash to forget past commands. Without forgetting
|
| 69 |
+
# past commands the $PATH changes we made may not be respected
|
| 70 |
+
hash -r 2> /dev/null
|
platform/dataops/dto/.venv/bin/activate.csh
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# This file must be used with "source bin/activate.csh" *from csh*.
|
| 2 |
+
# You cannot run it directly.
|
| 3 |
+
|
| 4 |
+
# Created by Davide Di Blasi <davidedb@gmail.com>.
|
| 5 |
+
# Ported to Python 3.3 venv by Andrew Svetlov <andrew.svetlov@gmail.com>
|
| 6 |
+
|
| 7 |
+
alias deactivate 'test $?_OLD_VIRTUAL_PATH != 0 && setenv PATH "$_OLD_VIRTUAL_PATH" && unset _OLD_VIRTUAL_PATH; rehash; test $?_OLD_VIRTUAL_PROMPT != 0 && set prompt="$_OLD_VIRTUAL_PROMPT" && unset _OLD_VIRTUAL_PROMPT; unsetenv VIRTUAL_ENV; unsetenv VIRTUAL_ENV_PROMPT; test "\!:*" != "nondestructive" && unalias deactivate'
|
| 8 |
+
|
| 9 |
+
# Unset irrelevant variables.
|
| 10 |
+
deactivate nondestructive
|
| 11 |
+
|
| 12 |
+
setenv VIRTUAL_ENV /data/adaptai/platform/dataops/dto/.venv
|
| 13 |
+
|
| 14 |
+
set _OLD_VIRTUAL_PATH="$PATH"
|
| 15 |
+
setenv PATH "$VIRTUAL_ENV/"bin":$PATH"
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
set _OLD_VIRTUAL_PROMPT="$prompt"
|
| 19 |
+
|
| 20 |
+
if (! "$?VIRTUAL_ENV_DISABLE_PROMPT") then
|
| 21 |
+
set prompt = '(.venv) '"$prompt"
|
| 22 |
+
setenv VIRTUAL_ENV_PROMPT '(.venv) '
|
| 23 |
+
endif
|
| 24 |
+
|
| 25 |
+
alias pydoc python -m pydoc
|
| 26 |
+
|
| 27 |
+
rehash
|
platform/dataops/dto/.venv/bin/activate.fish
ADDED
|
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# This file must be used with "source <venv>/bin/activate.fish" *from fish*
|
| 2 |
+
# (https://fishshell.com/). You cannot run it directly.
|
| 3 |
+
|
| 4 |
+
function deactivate -d "Exit virtual environment and return to normal shell environment"
|
| 5 |
+
# reset old environment variables
|
| 6 |
+
if test -n "$_OLD_VIRTUAL_PATH"
|
| 7 |
+
set -gx PATH $_OLD_VIRTUAL_PATH
|
| 8 |
+
set -e _OLD_VIRTUAL_PATH
|
| 9 |
+
end
|
| 10 |
+
if test -n "$_OLD_VIRTUAL_PYTHONHOME"
|
| 11 |
+
set -gx PYTHONHOME $_OLD_VIRTUAL_PYTHONHOME
|
| 12 |
+
set -e _OLD_VIRTUAL_PYTHONHOME
|
| 13 |
+
end
|
| 14 |
+
|
| 15 |
+
if test -n "$_OLD_FISH_PROMPT_OVERRIDE"
|
| 16 |
+
set -e _OLD_FISH_PROMPT_OVERRIDE
|
| 17 |
+
# prevents error when using nested fish instances (Issue #93858)
|
| 18 |
+
if functions -q _old_fish_prompt
|
| 19 |
+
functions -e fish_prompt
|
| 20 |
+
functions -c _old_fish_prompt fish_prompt
|
| 21 |
+
functions -e _old_fish_prompt
|
| 22 |
+
end
|
| 23 |
+
end
|
| 24 |
+
|
| 25 |
+
set -e VIRTUAL_ENV
|
| 26 |
+
set -e VIRTUAL_ENV_PROMPT
|
| 27 |
+
if test "$argv[1]" != "nondestructive"
|
| 28 |
+
# Self-destruct!
|
| 29 |
+
functions -e deactivate
|
| 30 |
+
end
|
| 31 |
+
end
|
| 32 |
+
|
| 33 |
+
# Unset irrelevant variables.
|
| 34 |
+
deactivate nondestructive
|
| 35 |
+
|
| 36 |
+
set -gx VIRTUAL_ENV /data/adaptai/platform/dataops/dto/.venv
|
| 37 |
+
|
| 38 |
+
set -gx _OLD_VIRTUAL_PATH $PATH
|
| 39 |
+
set -gx PATH "$VIRTUAL_ENV/"bin $PATH
|
| 40 |
+
|
| 41 |
+
# Unset PYTHONHOME if set.
|
| 42 |
+
if set -q PYTHONHOME
|
| 43 |
+
set -gx _OLD_VIRTUAL_PYTHONHOME $PYTHONHOME
|
| 44 |
+
set -e PYTHONHOME
|
| 45 |
+
end
|
| 46 |
+
|
| 47 |
+
if test -z "$VIRTUAL_ENV_DISABLE_PROMPT"
|
| 48 |
+
# fish uses a function instead of an env var to generate the prompt.
|
| 49 |
+
|
| 50 |
+
# Save the current fish_prompt function as the function _old_fish_prompt.
|
| 51 |
+
functions -c fish_prompt _old_fish_prompt
|
| 52 |
+
|
| 53 |
+
# With the original prompt function renamed, we can override with our own.
|
| 54 |
+
function fish_prompt
|
| 55 |
+
# Save the return status of the last command.
|
| 56 |
+
set -l old_status $status
|
| 57 |
+
|
| 58 |
+
# Output the venv prompt; color taken from the blue of the Python logo.
|
| 59 |
+
printf "%s%s%s" (set_color 4B8BBE) '(.venv) ' (set_color normal)
|
| 60 |
+
|
| 61 |
+
# Restore the return status of the previous command.
|
| 62 |
+
echo "exit $old_status" | .
|
| 63 |
+
# Output the original/"old" prompt.
|
| 64 |
+
_old_fish_prompt
|
| 65 |
+
end
|
| 66 |
+
|
| 67 |
+
set -gx _OLD_FISH_PROMPT_OVERRIDE "$VIRTUAL_ENV"
|
| 68 |
+
set -gx VIRTUAL_ENV_PROMPT '(.venv) '
|
| 69 |
+
end
|
platform/dataops/dto/.venv/bin/coverage
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/data/adaptai/platform/dataops/dto/.venv/bin/python
|
| 2 |
+
import sys
|
| 3 |
+
from coverage.cmdline import main
|
| 4 |
+
if __name__ == '__main__':
|
| 5 |
+
if sys.argv[0].endswith('.exe'):
|
| 6 |
+
sys.argv[0] = sys.argv[0][:-4]
|
| 7 |
+
sys.exit(main())
|
platform/dataops/dto/.venv/bin/coverage-3.12
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/data/adaptai/platform/dataops/dto/.venv/bin/python
|
| 2 |
+
import sys
|
| 3 |
+
from coverage.cmdline import main
|
| 4 |
+
if __name__ == '__main__':
|
| 5 |
+
if sys.argv[0].endswith('.exe'):
|
| 6 |
+
sys.argv[0] = sys.argv[0][:-4]
|
| 7 |
+
sys.exit(main())
|
platform/dataops/dto/.venv/bin/coverage3
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/data/adaptai/platform/dataops/dto/.venv/bin/python
|
| 2 |
+
import sys
|
| 3 |
+
from coverage.cmdline import main
|
| 4 |
+
if __name__ == '__main__':
|
| 5 |
+
if sys.argv[0].endswith('.exe'):
|
| 6 |
+
sys.argv[0] = sys.argv[0][:-4]
|
| 7 |
+
sys.exit(main())
|
platform/dataops/dto/.venv/bin/f2py
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/data/adaptai/platform/dataops/dto/.venv/bin/python
|
| 2 |
+
import sys
|
| 3 |
+
from numpy.f2py.f2py2e import main
|
| 4 |
+
if __name__ == '__main__':
|
| 5 |
+
if sys.argv[0].endswith('.exe'):
|
| 6 |
+
sys.argv[0] = sys.argv[0][:-4]
|
| 7 |
+
sys.exit(main())
|
platform/dataops/dto/.venv/bin/geomet
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/data/adaptai/platform/dataops/dto/.venv/bin/python
|
| 2 |
+
import sys
|
| 3 |
+
from geomet.tool import cli
|
| 4 |
+
if __name__ == '__main__':
|
| 5 |
+
if sys.argv[0].endswith('.exe'):
|
| 6 |
+
sys.argv[0] = sys.argv[0][:-4]
|
| 7 |
+
sys.exit(cli())
|
platform/dataops/dto/.venv/bin/ghp-import
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/data/adaptai/platform/dataops/dto/.venv/bin/python
|
| 2 |
+
import sys
|
| 3 |
+
from ghp_import import main
|
| 4 |
+
if __name__ == '__main__':
|
| 5 |
+
if sys.argv[0].endswith('.exe'):
|
| 6 |
+
sys.argv[0] = sys.argv[0][:-4]
|
| 7 |
+
sys.exit(main())
|
platform/dataops/dto/.venv/bin/huggingface-cli
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/data/adaptai/platform/dataops/dto/.venv/bin/python
|
| 2 |
+
import sys
|
| 3 |
+
from huggingface_hub.commands.huggingface_cli import main
|
| 4 |
+
if __name__ == '__main__':
|
| 5 |
+
if sys.argv[0].endswith('.exe'):
|
| 6 |
+
sys.argv[0] = sys.argv[0][:-4]
|
| 7 |
+
sys.exit(main())
|
platform/dataops/dto/.venv/bin/jsonschema
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/data/adaptai/platform/dataops/dto/.venv/bin/python
|
| 2 |
+
import sys
|
| 3 |
+
from jsonschema.cli import main
|
| 4 |
+
if __name__ == '__main__':
|
| 5 |
+
if sys.argv[0].endswith('.exe'):
|
| 6 |
+
sys.argv[0] = sys.argv[0][:-4]
|
| 7 |
+
sys.exit(main())
|
platform/dataops/dto/.venv/bin/markdown-it
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/data/adaptai/platform/dataops/dto/.venv/bin/python
|
| 2 |
+
import sys
|
| 3 |
+
from markdown_it.cli.parse import main
|
| 4 |
+
if __name__ == '__main__':
|
| 5 |
+
if sys.argv[0].endswith('.exe'):
|
| 6 |
+
sys.argv[0] = sys.argv[0][:-4]
|
| 7 |
+
sys.exit(main())
|
platform/dataops/dto/.venv/bin/markdown_py
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/data/adaptai/platform/dataops/dto/.venv/bin/python
|
| 2 |
+
import sys
|
| 3 |
+
from markdown.__main__ import run
|
| 4 |
+
if __name__ == '__main__':
|
| 5 |
+
if sys.argv[0].endswith('.exe'):
|
| 6 |
+
sys.argv[0] = sys.argv[0][:-4]
|
| 7 |
+
sys.exit(run())
|
platform/dataops/dto/.venv/bin/mkdocs
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/data/adaptai/platform/dataops/dto/.venv/bin/python
|
| 2 |
+
import sys
|
| 3 |
+
from mkdocs.__main__ import cli
|
| 4 |
+
if __name__ == '__main__':
|
| 5 |
+
if sys.argv[0].endswith('.exe'):
|
| 6 |
+
sys.argv[0] = sys.argv[0][:-4]
|
| 7 |
+
sys.exit(cli())
|
platform/dataops/dto/.venv/bin/normalizer
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/data/adaptai/platform/dataops/dto/.venv/bin/python
|
| 2 |
+
import sys
|
| 3 |
+
from charset_normalizer.cli import cli_detect
|
| 4 |
+
if __name__ == '__main__':
|
| 5 |
+
if sys.argv[0].endswith('.exe'):
|
| 6 |
+
sys.argv[0] = sys.argv[0][:-4]
|
| 7 |
+
sys.exit(cli_detect())
|
platform/dataops/dto/.venv/bin/pip
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/data/adaptai/platform/dataops/dto/.venv/bin/python
|
| 2 |
+
# -*- coding: utf-8 -*-
|
| 3 |
+
import re
|
| 4 |
+
import sys
|
| 5 |
+
from pip._internal.cli.main import main
|
| 6 |
+
if __name__ == '__main__':
|
| 7 |
+
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
| 8 |
+
sys.exit(main())
|
platform/dataops/dto/.venv/bin/pip3
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/data/adaptai/platform/dataops/dto/.venv/bin/python
|
| 2 |
+
# -*- coding: utf-8 -*-
|
| 3 |
+
import re
|
| 4 |
+
import sys
|
| 5 |
+
from pip._internal.cli.main import main
|
| 6 |
+
if __name__ == '__main__':
|
| 7 |
+
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
| 8 |
+
sys.exit(main())
|
platform/dataops/dto/.venv/bin/pip3.12
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/data/adaptai/platform/dataops/dto/.venv/bin/python
|
| 2 |
+
# -*- coding: utf-8 -*-
|
| 3 |
+
import re
|
| 4 |
+
import sys
|
| 5 |
+
from pip._internal.cli.main import main
|
| 6 |
+
if __name__ == '__main__':
|
| 7 |
+
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
| 8 |
+
sys.exit(main())
|
platform/dataops/dto/.venv/bin/py.test
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/data/adaptai/platform/dataops/dto/.venv/bin/python
|
| 2 |
+
import sys
|
| 3 |
+
from pytest import console_main
|
| 4 |
+
if __name__ == '__main__':
|
| 5 |
+
if sys.argv[0].endswith('.exe'):
|
| 6 |
+
sys.argv[0] = sys.argv[0][:-4]
|
| 7 |
+
sys.exit(console_main())
|
platform/dataops/dto/.venv/bin/pybabel
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/data/adaptai/platform/dataops/dto/.venv/bin/python
|
| 2 |
+
import sys
|
| 3 |
+
from babel.messages.frontend import main
|
| 4 |
+
if __name__ == '__main__':
|
| 5 |
+
if sys.argv[0].endswith('.exe'):
|
| 6 |
+
sys.argv[0] = sys.argv[0][:-4]
|
| 7 |
+
sys.exit(main())
|
platform/dataops/dto/.venv/bin/pygmentize
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/data/adaptai/platform/dataops/dto/.venv/bin/python
|
| 2 |
+
import sys
|
| 3 |
+
from pygments.cmdline import main
|
| 4 |
+
if __name__ == '__main__':
|
| 5 |
+
if sys.argv[0].endswith('.exe'):
|
| 6 |
+
sys.argv[0] = sys.argv[0][:-4]
|
| 7 |
+
sys.exit(main())
|
platform/dataops/dto/.venv/bin/pytest
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/data/adaptai/platform/dataops/dto/.venv/bin/python
|
| 2 |
+
import sys
|
| 3 |
+
from pytest import console_main
|
| 4 |
+
if __name__ == '__main__':
|
| 5 |
+
if sys.argv[0].endswith('.exe'):
|
| 6 |
+
sys.argv[0] = sys.argv[0][:-4]
|
| 7 |
+
sys.exit(console_main())
|
platform/dataops/dto/.venv/bin/python
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2845674b3fae665f5f6c3f01674fefa4e495f15f5d755ee8f7f1a40575abdc8a
|
| 3 |
+
size 8021824
|
platform/dataops/dto/.venv/bin/python3
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2845674b3fae665f5f6c3f01674fefa4e495f15f5d755ee8f7f1a40575abdc8a
|
| 3 |
+
size 8021824
|
platform/dataops/dto/.venv/bin/python3.12
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2845674b3fae665f5f6c3f01674fefa4e495f15f5d755ee8f7f1a40575abdc8a
|
| 3 |
+
size 8021824
|
platform/dataops/dto/.venv/bin/tabulate
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/data/adaptai/platform/dataops/dto/.venv/bin/python
|
| 2 |
+
import sys
|
| 3 |
+
from tabulate import _main
|
| 4 |
+
if __name__ == '__main__':
|
| 5 |
+
if sys.argv[0].endswith('.exe'):
|
| 6 |
+
sys.argv[0] = sys.argv[0][:-4]
|
| 7 |
+
sys.exit(_main())
|
platform/dataops/dto/.venv/bin/tqdm
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/data/adaptai/platform/dataops/dto/.venv/bin/python
|
| 2 |
+
import sys
|
| 3 |
+
from tqdm.cli import main
|
| 4 |
+
if __name__ == '__main__':
|
| 5 |
+
if sys.argv[0].endswith('.exe'):
|
| 6 |
+
sys.argv[0] = sys.argv[0][:-4]
|
| 7 |
+
sys.exit(main())
|
platform/dataops/dto/.venv/bin/watchmedo
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/data/adaptai/platform/dataops/dto/.venv/bin/python
|
| 2 |
+
import sys
|
| 3 |
+
from watchdog.watchmedo import main
|
| 4 |
+
if __name__ == '__main__':
|
| 5 |
+
if sys.argv[0].endswith('.exe'):
|
| 6 |
+
sys.argv[0] = sys.argv[0][:-4]
|
| 7 |
+
sys.exit(main())
|
platform/dataops/dto/.venv/bin/wheel
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/data/adaptai/platform/dataops/dto/.venv/bin/python
|
| 2 |
+
# -*- coding: utf-8 -*-
|
| 3 |
+
import re
|
| 4 |
+
import sys
|
| 5 |
+
from wheel.cli import main
|
| 6 |
+
if __name__ == '__main__':
|
| 7 |
+
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
| 8 |
+
sys.exit(main())
|
platform/dataops/dto/.venv/lib/python3.12/site-packages/__pycache__/ghp_import.cpython-312.pyc
ADDED
|
Binary file (14.9 kB). View file
|
|
|
platform/dataops/dto/.venv/lib/python3.12/site-packages/__pycache__/graphyte.cpython-312.pyc
ADDED
|
Binary file (12.5 kB). View file
|
|
|
platform/dataops/dto/.venv/lib/python3.12/site-packages/__pycache__/py.cpython-312.pyc
ADDED
|
Binary file (411 Bytes). View file
|
|
|
platform/dataops/dto/.venv/lib/python3.12/site-packages/__pycache__/six.cpython-312.pyc
ADDED
|
Binary file (41.4 kB). View file
|
|
|
platform/dataops/dto/.venv/lib/python3.12/site-packages/__pycache__/typing_extensions.cpython-312.pyc
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4d7772347fc6748849d8217d2725591dc5b423d1ccf8accacfe5a9151fab7f83
|
| 3 |
+
size 163754
|
platform/dataops/dto/.venv/lib/python3.12/site-packages/__pycache__/yaml_env_tag.cpython-312.pyc
ADDED
|
Binary file (2.33 kB). View file
|
|
|
platform/dataops/dto/.venv/lib/python3.12/site-packages/_cffi_backend.cpython-312-x86_64-linux-gnu.so
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f9f2bad1b902b9dafab63007b78740df1fc21da396815b3f2dbd89d0918edcfa
|
| 3 |
+
size 1114632
|
platform/dataops/dto/.venv/lib/python3.12/site-packages/asyncio-3.4.3.dist-info/DESCRIPTION.rst
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Tulip is the codename for my reference implementation of PEP 3156.
|
| 2 |
+
|
| 3 |
+
PEP 3156: http://www.python.org/dev/peps/pep-3156/
|
| 4 |
+
|
| 5 |
+
*** This requires Python 3.3 or later! ***
|
| 6 |
+
|
| 7 |
+
Copyright/license: Open source, Apache 2.0. Enjoy.
|
| 8 |
+
|
| 9 |
+
Master Mercurial repo: http://code.google.com/p/tulip/
|
| 10 |
+
|
| 11 |
+
The actual code lives in the 'asyncio' subdirectory.
|
| 12 |
+
Tests are in the 'tests' subdirectory.
|
| 13 |
+
|
| 14 |
+
To run tests:
|
| 15 |
+
- make test
|
| 16 |
+
|
| 17 |
+
To run coverage (coverage package is required):
|
| 18 |
+
- make coverage
|
| 19 |
+
|
| 20 |
+
On Windows, things are a little more complicated. Assume 'P' is your
|
| 21 |
+
Python binary (for example C:\Python33\python.exe).
|
| 22 |
+
|
| 23 |
+
You must first build the _overlapped.pyd extension and have it placed
|
| 24 |
+
in the asyncio directory, as follows:
|
| 25 |
+
|
| 26 |
+
C> P setup.py build_ext --inplace
|
| 27 |
+
|
| 28 |
+
If this complains about vcvars.bat, you probably don't have the
|
| 29 |
+
required version of Visual Studio installed. Compiling extensions for
|
| 30 |
+
Python 3.3 requires Microsoft Visual C++ 2010 (MSVC 10.0) of any
|
| 31 |
+
edition; you can download Visual Studio Express 2010 for free from
|
| 32 |
+
http://www.visualstudio.com/downloads (scroll down to Visual C++ 2010
|
| 33 |
+
Express).
|
| 34 |
+
|
| 35 |
+
Once you have built the _overlapped.pyd extension successfully you can
|
| 36 |
+
run the tests as follows:
|
| 37 |
+
|
| 38 |
+
C> P runtests.py
|
| 39 |
+
|
| 40 |
+
And coverage as follows:
|
| 41 |
+
|
| 42 |
+
C> P runtests.py --coverage
|
| 43 |
+
|
| 44 |
+
--Guido van Rossum <guido@python.org>
|
| 45 |
+
|
| 46 |
+
|
platform/dataops/dto/.venv/lib/python3.12/site-packages/asyncio-3.4.3.dist-info/INSTALLER
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
pip
|
platform/dataops/dto/.venv/lib/python3.12/site-packages/asyncio-3.4.3.dist-info/METADATA
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Metadata-Version: 2.0
|
| 2 |
+
Name: asyncio
|
| 3 |
+
Version: 3.4.3
|
| 4 |
+
Summary: reference implementation of PEP 3156
|
| 5 |
+
Home-page: http://www.python.org/dev/peps/pep-3156/
|
| 6 |
+
Author: UNKNOWN
|
| 7 |
+
Author-email: UNKNOWN
|
| 8 |
+
License: UNKNOWN
|
| 9 |
+
Platform: UNKNOWN
|
| 10 |
+
Classifier: Programming Language :: Python
|
| 11 |
+
Classifier: Programming Language :: Python :: 3
|
| 12 |
+
Classifier: Programming Language :: Python :: 3.3
|
| 13 |
+
|
| 14 |
+
Tulip is the codename for my reference implementation of PEP 3156.
|
| 15 |
+
|
| 16 |
+
PEP 3156: http://www.python.org/dev/peps/pep-3156/
|
| 17 |
+
|
| 18 |
+
*** This requires Python 3.3 or later! ***
|
| 19 |
+
|
| 20 |
+
Copyright/license: Open source, Apache 2.0. Enjoy.
|
| 21 |
+
|
| 22 |
+
Master Mercurial repo: http://code.google.com/p/tulip/
|
| 23 |
+
|
| 24 |
+
The actual code lives in the 'asyncio' subdirectory.
|
| 25 |
+
Tests are in the 'tests' subdirectory.
|
| 26 |
+
|
| 27 |
+
To run tests:
|
| 28 |
+
- make test
|
| 29 |
+
|
| 30 |
+
To run coverage (coverage package is required):
|
| 31 |
+
- make coverage
|
| 32 |
+
|
| 33 |
+
On Windows, things are a little more complicated. Assume 'P' is your
|
| 34 |
+
Python binary (for example C:\Python33\python.exe).
|
| 35 |
+
|
| 36 |
+
You must first build the _overlapped.pyd extension and have it placed
|
| 37 |
+
in the asyncio directory, as follows:
|
| 38 |
+
|
| 39 |
+
C> P setup.py build_ext --inplace
|
| 40 |
+
|
| 41 |
+
If this complains about vcvars.bat, you probably don't have the
|
| 42 |
+
required version of Visual Studio installed. Compiling extensions for
|
| 43 |
+
Python 3.3 requires Microsoft Visual C++ 2010 (MSVC 10.0) of any
|
| 44 |
+
edition; you can download Visual Studio Express 2010 for free from
|
| 45 |
+
http://www.visualstudio.com/downloads (scroll down to Visual C++ 2010
|
| 46 |
+
Express).
|
| 47 |
+
|
| 48 |
+
Once you have built the _overlapped.pyd extension successfully you can
|
| 49 |
+
run the tests as follows:
|
| 50 |
+
|
| 51 |
+
C> P runtests.py
|
| 52 |
+
|
| 53 |
+
And coverage as follows:
|
| 54 |
+
|
| 55 |
+
C> P runtests.py --coverage
|
| 56 |
+
|
| 57 |
+
--Guido van Rossum <guido@python.org>
|
| 58 |
+
|
| 59 |
+
|
platform/dataops/dto/.venv/lib/python3.12/site-packages/asyncio-3.4.3.dist-info/RECORD
ADDED
|
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
asyncio-3.4.3.dist-info/DESCRIPTION.rst,sha256=bAZjof00nusb_zrWWkv9SvQx3MTvI2hct4h5kztkF0E,1299
|
| 2 |
+
asyncio-3.4.3.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
| 3 |
+
asyncio-3.4.3.dist-info/METADATA,sha256=ugM5pmv1pGpcH0_Ank3SlGH2KWhm5hBjfbo9sku6QT4,1663
|
| 4 |
+
asyncio-3.4.3.dist-info/RECORD,,
|
| 5 |
+
asyncio-3.4.3.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 6 |
+
asyncio-3.4.3.dist-info/WHEEL,sha256=-aSo8rHuuPDEFzkcqqQ55pDyCjy25bYMLxSiHWKAOTc,92
|
| 7 |
+
asyncio-3.4.3.dist-info/metadata.json,sha256=ZEfpR-htrGXt614v324AATAhUocWXJ20fLm37COMCII,445
|
| 8 |
+
asyncio-3.4.3.dist-info/top_level.txt,sha256=WprmKZDC2yB-f6r9Mj2UujoXsgkJEZ8TjyElgaL16T8,8
|
| 9 |
+
asyncio/__init__.py,sha256=KJCsXiIYG2d4fOB8PT6xHAu-0XrixyIx51KFn0gY9WY,1436
|
| 10 |
+
asyncio/__pycache__/__init__.cpython-312.pyc,,
|
| 11 |
+
asyncio/__pycache__/base_subprocess.cpython-312.pyc,,
|
| 12 |
+
asyncio/__pycache__/constants.cpython-312.pyc,,
|
| 13 |
+
asyncio/__pycache__/coroutines.cpython-312.pyc,,
|
| 14 |
+
asyncio/__pycache__/events.cpython-312.pyc,,
|
| 15 |
+
asyncio/__pycache__/futures.cpython-312.pyc,,
|
| 16 |
+
asyncio/__pycache__/locks.cpython-312.pyc,,
|
| 17 |
+
asyncio/__pycache__/log.cpython-312.pyc,,
|
| 18 |
+
asyncio/__pycache__/proactor_events.cpython-312.pyc,,
|
| 19 |
+
asyncio/__pycache__/protocols.cpython-312.pyc,,
|
| 20 |
+
asyncio/__pycache__/queues.cpython-312.pyc,,
|
| 21 |
+
asyncio/__pycache__/selector_events.cpython-312.pyc,,
|
| 22 |
+
asyncio/__pycache__/selectors.cpython-312.pyc,,
|
| 23 |
+
asyncio/__pycache__/sslproto.cpython-312.pyc,,
|
| 24 |
+
asyncio/__pycache__/streams.cpython-312.pyc,,
|
| 25 |
+
asyncio/__pycache__/subprocess.cpython-312.pyc,,
|
| 26 |
+
asyncio/__pycache__/test_support.cpython-312.pyc,,
|
| 27 |
+
asyncio/__pycache__/test_utils.cpython-312.pyc,,
|
| 28 |
+
asyncio/__pycache__/transports.cpython-312.pyc,,
|
| 29 |
+
asyncio/__pycache__/unix_events.cpython-312.pyc,,
|
| 30 |
+
asyncio/__pycache__/windows_utils.cpython-312.pyc,,
|
| 31 |
+
asyncio/base_events.py,sha256=MLq0JllHep1jnrK22Z0cDyZ1jUoI79Pclp5vdxIpYN8,44946
|
| 32 |
+
asyncio/base_subprocess.py,sha256=X2P3bKLmODZRVc5PO5xe3LDbiOE8rcbS07J5N_O92cg,8399
|
| 33 |
+
asyncio/constants.py,sha256=I8qh6SMz71N8m8gnzhSAsFQAnnZcDFMi9ZGEjapFAPQ,195
|
| 34 |
+
asyncio/coroutines.py,sha256=ueF6INxKWXIzLclt8d3s2NqVzvIAtPTtP5HM5fXxcCA,6239
|
| 35 |
+
asyncio/events.py,sha256=Gu43DTec0XH_0MNJufn-P3eDZqF0RReuhOg565z6_j8,19232
|
| 36 |
+
asyncio/futures.py,sha256=idhT72s5Hd7-EntrEaisqCFXrvGJp3alcm_eFazSWh8,14625
|
| 37 |
+
asyncio/locks.py,sha256=p4WipPNrbH9_sbO2jW-0kXNhVzxVJI-9y_vVKiwZcbA,14408
|
| 38 |
+
asyncio/log.py,sha256=gOTMPe1LE4urpIZRnnREgBoj1qw18inTNqQHqWr36NI,124
|
| 39 |
+
asyncio/proactor_events.py,sha256=vxaoliRdAiiZcvoFSdAGfjHgXS_Cqpvcg0xJFVFZROA,20124
|
| 40 |
+
asyncio/protocols.py,sha256=_5E_og4fDaGDxDHhfv8YbTi3j58oKzOCHbPauM9xyxA,4512
|
| 41 |
+
asyncio/queues.py,sha256=phBUIoLwYntH1Q-FbkF8WkpL5deXW0AyV5EvgzTHxPQ,9429
|
| 42 |
+
asyncio/selector_events.py,sha256=E6K6_zHmyDsxcTDnYU4x9fFvl2md9pKxt1zqTDS02bQ,39120
|
| 43 |
+
asyncio/selectors.py,sha256=DX7fB18eAkOULdk64sI5tDuQxJMMbzrX3Fp-luquwAw,18697
|
| 44 |
+
asyncio/sslproto.py,sha256=uf4oVigVSARAzvuYnH0isAA2pkL9-sGCUQu36XneBTc,24997
|
| 45 |
+
asyncio/streams.py,sha256=N5UKnRFXfrlDMghprdpUIkrz-EGy_N-5QOm5uxZnYfM,16167
|
| 46 |
+
asyncio/subprocess.py,sha256=4duwmg4q5nXGyXuQoW3fMXTg6J-WGzRgUr58pS5t6cQ,7223
|
| 47 |
+
asyncio/tasks.py,sha256=XMd-WRyzbzfAVitZhCmoll0enCuzG8ox6kgTmKgvsKc,23027
|
| 48 |
+
asyncio/test_support.py,sha256=RXgLvJKTIUZaEX27CsV0OJYpj1F5wwijc1SHHcZfYfQ,12675
|
| 49 |
+
asyncio/test_utils.py,sha256=8YuV10aAc8Y6lc-nJ6-1qfybxAwatNCW0Rpfzp1IR9M,12554
|
| 50 |
+
asyncio/transports.py,sha256=LKBC2n4LDmIwx-6_Mpnfcv7hJAWHUIxeblSb3-IUOrs,9941
|
| 51 |
+
asyncio/unix_events.py,sha256=XIfdOtdVtqLnnLM-yFmjXSuebdCmThY2BrCZ2IAocnI,34333
|
| 52 |
+
asyncio/windows_events.py,sha256=exfmTU03t2_cgcmplHmZQVDIYkKHvUGGWdsaKFZQ-00,27697
|
| 53 |
+
asyncio/windows_utils.py,sha256=ytNJq6mfUiVvBZqryHHmb4UrtZg7jbPNTkO0Myyua7c,6844
|
platform/dataops/dto/.venv/lib/python3.12/site-packages/asyncio-3.4.3.dist-info/REQUESTED
ADDED
|
File without changes
|
platform/dataops/dto/.venv/lib/python3.12/site-packages/asyncio-3.4.3.dist-info/WHEEL
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Wheel-Version: 1.0
|
| 2 |
+
Generator: bdist_wheel (0.24.0)
|
| 3 |
+
Root-Is-Purelib: true
|
| 4 |
+
Tag: py3-none-any
|
| 5 |
+
|
platform/dataops/dto/.venv/lib/python3.12/site-packages/asyncio-3.4.3.dist-info/metadata.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"version": "3.4.3", "name": "asyncio", "metadata_version": "2.0", "summary": "reference implementation of PEP 3156", "generator": "bdist_wheel (0.24.0)", "classifiers": ["Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.3"], "extensions": {"python.details": {"document_names": {"description": "DESCRIPTION.rst"}, "project_urls": {"Home": "http://www.python.org/dev/peps/pep-3156/"}}}}
|
platform/dataops/dto/.venv/lib/python3.12/site-packages/asyncio-3.4.3.dist-info/top_level.txt
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
asyncio
|
platform/dataops/dto/.venv/lib/python3.12/site-packages/asyncio/__init__.py
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""The asyncio package, tracking PEP 3156."""
|
| 2 |
+
|
| 3 |
+
import sys
|
| 4 |
+
|
| 5 |
+
# The selectors module is in the stdlib in Python 3.4 but not in 3.3.
|
| 6 |
+
# Do this first, so the other submodules can use "from . import selectors".
|
| 7 |
+
# Prefer asyncio/selectors.py over the stdlib one, as ours may be newer.
|
| 8 |
+
try:
|
| 9 |
+
from . import selectors
|
| 10 |
+
except ImportError:
|
| 11 |
+
import selectors # Will also be exported.
|
| 12 |
+
|
| 13 |
+
if sys.platform == 'win32':
|
| 14 |
+
# Similar thing for _overlapped.
|
| 15 |
+
try:
|
| 16 |
+
from . import _overlapped
|
| 17 |
+
except ImportError:
|
| 18 |
+
import _overlapped # Will also be exported.
|
| 19 |
+
|
| 20 |
+
# This relies on each of the submodules having an __all__ variable.
|
| 21 |
+
from .base_events import *
|
| 22 |
+
from .coroutines import *
|
| 23 |
+
from .events import *
|
| 24 |
+
from .futures import *
|
| 25 |
+
from .locks import *
|
| 26 |
+
from .protocols import *
|
| 27 |
+
from .queues import *
|
| 28 |
+
from .streams import *
|
| 29 |
+
from .subprocess import *
|
| 30 |
+
from .tasks import *
|
| 31 |
+
from .transports import *
|
| 32 |
+
|
| 33 |
+
__all__ = (base_events.__all__ +
|
| 34 |
+
coroutines.__all__ +
|
| 35 |
+
events.__all__ +
|
| 36 |
+
futures.__all__ +
|
| 37 |
+
locks.__all__ +
|
| 38 |
+
protocols.__all__ +
|
| 39 |
+
queues.__all__ +
|
| 40 |
+
streams.__all__ +
|
| 41 |
+
subprocess.__all__ +
|
| 42 |
+
tasks.__all__ +
|
| 43 |
+
transports.__all__)
|
| 44 |
+
|
| 45 |
+
if sys.platform == 'win32': # pragma: no cover
|
| 46 |
+
from .windows_events import *
|
| 47 |
+
__all__ += windows_events.__all__
|
| 48 |
+
else:
|
| 49 |
+
from .unix_events import * # pragma: no cover
|
| 50 |
+
__all__ += unix_events.__all__
|
platform/dataops/dto/.venv/lib/python3.12/site-packages/asyncio/base_events.py
ADDED
|
@@ -0,0 +1,1179 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Base implementation of event loop.
|
| 2 |
+
|
| 3 |
+
The event loop can be broken up into a multiplexer (the part
|
| 4 |
+
responsible for notifying us of I/O events) and the event loop proper,
|
| 5 |
+
which wraps a multiplexer with functionality for scheduling callbacks,
|
| 6 |
+
immediately or at a given time in the future.
|
| 7 |
+
|
| 8 |
+
Whenever a public API takes a callback, subsequent positional
|
| 9 |
+
arguments will be passed to the callback if/when it is called. This
|
| 10 |
+
avoids the proliferation of trivial lambdas implementing closures.
|
| 11 |
+
Keyword arguments for the callback are not supported; this is a
|
| 12 |
+
conscious design decision, leaving the door open for keyword arguments
|
| 13 |
+
to modify the meaning of the API call itself.
|
| 14 |
+
"""
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
import collections
|
| 18 |
+
import concurrent.futures
|
| 19 |
+
import heapq
|
| 20 |
+
import inspect
|
| 21 |
+
import logging
|
| 22 |
+
import os
|
| 23 |
+
import socket
|
| 24 |
+
import subprocess
|
| 25 |
+
import threading
|
| 26 |
+
import time
|
| 27 |
+
import traceback
|
| 28 |
+
import sys
|
| 29 |
+
import warnings
|
| 30 |
+
|
| 31 |
+
from . import coroutines
|
| 32 |
+
from . import events
|
| 33 |
+
from . import futures
|
| 34 |
+
from . import tasks
|
| 35 |
+
from .coroutines import coroutine
|
| 36 |
+
from .log import logger
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
__all__ = ['BaseEventLoop']
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
# Argument for default thread pool executor creation.
|
| 43 |
+
_MAX_WORKERS = 5
|
| 44 |
+
|
| 45 |
+
# Minimum number of _scheduled timer handles before cleanup of
|
| 46 |
+
# cancelled handles is performed.
|
| 47 |
+
_MIN_SCHEDULED_TIMER_HANDLES = 100
|
| 48 |
+
|
| 49 |
+
# Minimum fraction of _scheduled timer handles that are cancelled
|
| 50 |
+
# before cleanup of cancelled handles is performed.
|
| 51 |
+
_MIN_CANCELLED_TIMER_HANDLES_FRACTION = 0.5
|
| 52 |
+
|
| 53 |
+
def _format_handle(handle):
|
| 54 |
+
cb = handle._callback
|
| 55 |
+
if inspect.ismethod(cb) and isinstance(cb.__self__, tasks.Task):
|
| 56 |
+
# format the task
|
| 57 |
+
return repr(cb.__self__)
|
| 58 |
+
else:
|
| 59 |
+
return str(handle)
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
def _format_pipe(fd):
|
| 63 |
+
if fd == subprocess.PIPE:
|
| 64 |
+
return '<pipe>'
|
| 65 |
+
elif fd == subprocess.STDOUT:
|
| 66 |
+
return '<stdout>'
|
| 67 |
+
else:
|
| 68 |
+
return repr(fd)
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
class _StopError(BaseException):
|
| 72 |
+
"""Raised to stop the event loop."""
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
def _check_resolved_address(sock, address):
|
| 76 |
+
# Ensure that the address is already resolved to avoid the trap of hanging
|
| 77 |
+
# the entire event loop when the address requires doing a DNS lookup.
|
| 78 |
+
#
|
| 79 |
+
# getaddrinfo() is slow (around 10 us per call): this function should only
|
| 80 |
+
# be called in debug mode
|
| 81 |
+
family = sock.family
|
| 82 |
+
|
| 83 |
+
if family == socket.AF_INET:
|
| 84 |
+
host, port = address
|
| 85 |
+
elif family == socket.AF_INET6:
|
| 86 |
+
host, port = address[:2]
|
| 87 |
+
else:
|
| 88 |
+
return
|
| 89 |
+
|
| 90 |
+
# On Windows, socket.inet_pton() is only available since Python 3.4
|
| 91 |
+
if hasattr(socket, 'inet_pton'):
|
| 92 |
+
# getaddrinfo() is slow and has known issue: prefer inet_pton()
|
| 93 |
+
# if available
|
| 94 |
+
try:
|
| 95 |
+
socket.inet_pton(family, host)
|
| 96 |
+
except OSError as exc:
|
| 97 |
+
raise ValueError("address must be resolved (IP address), "
|
| 98 |
+
"got host %r: %s"
|
| 99 |
+
% (host, exc))
|
| 100 |
+
else:
|
| 101 |
+
# Use getaddrinfo(flags=AI_NUMERICHOST) to ensure that the address is
|
| 102 |
+
# already resolved.
|
| 103 |
+
type_mask = 0
|
| 104 |
+
if hasattr(socket, 'SOCK_NONBLOCK'):
|
| 105 |
+
type_mask |= socket.SOCK_NONBLOCK
|
| 106 |
+
if hasattr(socket, 'SOCK_CLOEXEC'):
|
| 107 |
+
type_mask |= socket.SOCK_CLOEXEC
|
| 108 |
+
try:
|
| 109 |
+
socket.getaddrinfo(host, port,
|
| 110 |
+
family=family,
|
| 111 |
+
type=(sock.type & ~type_mask),
|
| 112 |
+
proto=sock.proto,
|
| 113 |
+
flags=socket.AI_NUMERICHOST)
|
| 114 |
+
except socket.gaierror as err:
|
| 115 |
+
raise ValueError("address must be resolved (IP address), "
|
| 116 |
+
"got host %r: %s"
|
| 117 |
+
% (host, err))
|
| 118 |
+
|
| 119 |
+
def _raise_stop_error(*args):
|
| 120 |
+
raise _StopError
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
def _run_until_complete_cb(fut):
|
| 124 |
+
exc = fut._exception
|
| 125 |
+
if (isinstance(exc, BaseException)
|
| 126 |
+
and not isinstance(exc, Exception)):
|
| 127 |
+
# Issue #22429: run_forever() already finished, no need to
|
| 128 |
+
# stop it.
|
| 129 |
+
return
|
| 130 |
+
_raise_stop_error()
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
class Server(events.AbstractServer):
|
| 134 |
+
|
| 135 |
+
def __init__(self, loop, sockets):
|
| 136 |
+
self._loop = loop
|
| 137 |
+
self.sockets = sockets
|
| 138 |
+
self._active_count = 0
|
| 139 |
+
self._waiters = []
|
| 140 |
+
|
| 141 |
+
def __repr__(self):
|
| 142 |
+
return '<%s sockets=%r>' % (self.__class__.__name__, self.sockets)
|
| 143 |
+
|
| 144 |
+
def _attach(self):
|
| 145 |
+
assert self.sockets is not None
|
| 146 |
+
self._active_count += 1
|
| 147 |
+
|
| 148 |
+
def _detach(self):
|
| 149 |
+
assert self._active_count > 0
|
| 150 |
+
self._active_count -= 1
|
| 151 |
+
if self._active_count == 0 and self.sockets is None:
|
| 152 |
+
self._wakeup()
|
| 153 |
+
|
| 154 |
+
def close(self):
|
| 155 |
+
sockets = self.sockets
|
| 156 |
+
if sockets is None:
|
| 157 |
+
return
|
| 158 |
+
self.sockets = None
|
| 159 |
+
for sock in sockets:
|
| 160 |
+
self._loop._stop_serving(sock)
|
| 161 |
+
if self._active_count == 0:
|
| 162 |
+
self._wakeup()
|
| 163 |
+
|
| 164 |
+
def _wakeup(self):
|
| 165 |
+
waiters = self._waiters
|
| 166 |
+
self._waiters = None
|
| 167 |
+
for waiter in waiters:
|
| 168 |
+
if not waiter.done():
|
| 169 |
+
waiter.set_result(waiter)
|
| 170 |
+
|
| 171 |
+
@coroutine
|
| 172 |
+
def wait_closed(self):
|
| 173 |
+
if self.sockets is None or self._waiters is None:
|
| 174 |
+
return
|
| 175 |
+
waiter = futures.Future(loop=self._loop)
|
| 176 |
+
self._waiters.append(waiter)
|
| 177 |
+
yield from waiter
|
| 178 |
+
|
| 179 |
+
|
| 180 |
+
class BaseEventLoop(events.AbstractEventLoop):
|
| 181 |
+
|
| 182 |
+
def __init__(self):
|
| 183 |
+
self._timer_cancelled_count = 0
|
| 184 |
+
self._closed = False
|
| 185 |
+
self._ready = collections.deque()
|
| 186 |
+
self._scheduled = []
|
| 187 |
+
self._default_executor = None
|
| 188 |
+
self._internal_fds = 0
|
| 189 |
+
# Identifier of the thread running the event loop, or None if the
|
| 190 |
+
# event loop is not running
|
| 191 |
+
self._thread_id = None
|
| 192 |
+
self._clock_resolution = time.get_clock_info('monotonic').resolution
|
| 193 |
+
self._exception_handler = None
|
| 194 |
+
self._debug = (not sys.flags.ignore_environment
|
| 195 |
+
and bool(os.environ.get('PYTHONASYNCIODEBUG')))
|
| 196 |
+
# In debug mode, if the execution of a callback or a step of a task
|
| 197 |
+
# exceed this duration in seconds, the slow callback/task is logged.
|
| 198 |
+
self.slow_callback_duration = 0.1
|
| 199 |
+
self._current_handle = None
|
| 200 |
+
|
| 201 |
+
def __repr__(self):
|
| 202 |
+
return ('<%s running=%s closed=%s debug=%s>'
|
| 203 |
+
% (self.__class__.__name__, self.is_running(),
|
| 204 |
+
self.is_closed(), self.get_debug()))
|
| 205 |
+
|
| 206 |
+
def create_task(self, coro):
|
| 207 |
+
"""Schedule a coroutine object.
|
| 208 |
+
|
| 209 |
+
Return a task object.
|
| 210 |
+
"""
|
| 211 |
+
self._check_closed()
|
| 212 |
+
task = tasks.Task(coro, loop=self)
|
| 213 |
+
if task._source_traceback:
|
| 214 |
+
del task._source_traceback[-1]
|
| 215 |
+
return task
|
| 216 |
+
|
| 217 |
+
def _make_socket_transport(self, sock, protocol, waiter=None, *,
|
| 218 |
+
extra=None, server=None):
|
| 219 |
+
"""Create socket transport."""
|
| 220 |
+
raise NotImplementedError
|
| 221 |
+
|
| 222 |
+
def _make_ssl_transport(self, rawsock, protocol, sslcontext, waiter=None,
|
| 223 |
+
*, server_side=False, server_hostname=None,
|
| 224 |
+
extra=None, server=None):
|
| 225 |
+
"""Create SSL transport."""
|
| 226 |
+
raise NotImplementedError
|
| 227 |
+
|
| 228 |
+
def _make_datagram_transport(self, sock, protocol,
|
| 229 |
+
address=None, waiter=None, extra=None):
|
| 230 |
+
"""Create datagram transport."""
|
| 231 |
+
raise NotImplementedError
|
| 232 |
+
|
| 233 |
+
def _make_read_pipe_transport(self, pipe, protocol, waiter=None,
|
| 234 |
+
extra=None):
|
| 235 |
+
"""Create read pipe transport."""
|
| 236 |
+
raise NotImplementedError
|
| 237 |
+
|
| 238 |
+
def _make_write_pipe_transport(self, pipe, protocol, waiter=None,
|
| 239 |
+
extra=None):
|
| 240 |
+
"""Create write pipe transport."""
|
| 241 |
+
raise NotImplementedError
|
| 242 |
+
|
| 243 |
+
@coroutine
|
| 244 |
+
def _make_subprocess_transport(self, protocol, args, shell,
|
| 245 |
+
stdin, stdout, stderr, bufsize,
|
| 246 |
+
extra=None, **kwargs):
|
| 247 |
+
"""Create subprocess transport."""
|
| 248 |
+
raise NotImplementedError
|
| 249 |
+
|
| 250 |
+
def _write_to_self(self):
|
| 251 |
+
"""Write a byte to self-pipe, to wake up the event loop.
|
| 252 |
+
|
| 253 |
+
This may be called from a different thread.
|
| 254 |
+
|
| 255 |
+
The subclass is responsible for implementing the self-pipe.
|
| 256 |
+
"""
|
| 257 |
+
raise NotImplementedError
|
| 258 |
+
|
| 259 |
+
def _process_events(self, event_list):
|
| 260 |
+
"""Process selector events."""
|
| 261 |
+
raise NotImplementedError
|
| 262 |
+
|
| 263 |
+
def _check_closed(self):
|
| 264 |
+
if self._closed:
|
| 265 |
+
raise RuntimeError('Event loop is closed')
|
| 266 |
+
|
| 267 |
+
def run_forever(self):
|
| 268 |
+
"""Run until stop() is called."""
|
| 269 |
+
self._check_closed()
|
| 270 |
+
if self.is_running():
|
| 271 |
+
raise RuntimeError('Event loop is running.')
|
| 272 |
+
self._thread_id = threading.get_ident()
|
| 273 |
+
try:
|
| 274 |
+
while True:
|
| 275 |
+
try:
|
| 276 |
+
self._run_once()
|
| 277 |
+
except _StopError:
|
| 278 |
+
break
|
| 279 |
+
finally:
|
| 280 |
+
self._thread_id = None
|
| 281 |
+
|
| 282 |
+
def run_until_complete(self, future):
|
| 283 |
+
"""Run until the Future is done.
|
| 284 |
+
|
| 285 |
+
If the argument is a coroutine, it is wrapped in a Task.
|
| 286 |
+
|
| 287 |
+
WARNING: It would be disastrous to call run_until_complete()
|
| 288 |
+
with the same coroutine twice -- it would wrap it in two
|
| 289 |
+
different Tasks and that can't be good.
|
| 290 |
+
|
| 291 |
+
Return the Future's result, or raise its exception.
|
| 292 |
+
"""
|
| 293 |
+
self._check_closed()
|
| 294 |
+
|
| 295 |
+
new_task = not isinstance(future, futures.Future)
|
| 296 |
+
future = tasks.async(future, loop=self)
|
| 297 |
+
if new_task:
|
| 298 |
+
# An exception is raised if the future didn't complete, so there
|
| 299 |
+
# is no need to log the "destroy pending task" message
|
| 300 |
+
future._log_destroy_pending = False
|
| 301 |
+
|
| 302 |
+
future.add_done_callback(_run_until_complete_cb)
|
| 303 |
+
try:
|
| 304 |
+
self.run_forever()
|
| 305 |
+
except:
|
| 306 |
+
if new_task and future.done() and not future.cancelled():
|
| 307 |
+
# The coroutine raised a BaseException. Consume the exception
|
| 308 |
+
# to not log a warning, the caller doesn't have access to the
|
| 309 |
+
# local task.
|
| 310 |
+
future.exception()
|
| 311 |
+
raise
|
| 312 |
+
future.remove_done_callback(_run_until_complete_cb)
|
| 313 |
+
if not future.done():
|
| 314 |
+
raise RuntimeError('Event loop stopped before Future completed.')
|
| 315 |
+
|
| 316 |
+
return future.result()
|
| 317 |
+
|
| 318 |
+
def stop(self):
|
| 319 |
+
"""Stop running the event loop.
|
| 320 |
+
|
| 321 |
+
Every callback scheduled before stop() is called will run. Callbacks
|
| 322 |
+
scheduled after stop() is called will not run. However, those callbacks
|
| 323 |
+
will run if run_forever is called again later.
|
| 324 |
+
"""
|
| 325 |
+
self.call_soon(_raise_stop_error)
|
| 326 |
+
|
| 327 |
+
def close(self):
|
| 328 |
+
"""Close the event loop.
|
| 329 |
+
|
| 330 |
+
This clears the queues and shuts down the executor,
|
| 331 |
+
but does not wait for the executor to finish.
|
| 332 |
+
|
| 333 |
+
The event loop must not be running.
|
| 334 |
+
"""
|
| 335 |
+
if self.is_running():
|
| 336 |
+
raise RuntimeError("Cannot close a running event loop")
|
| 337 |
+
if self._closed:
|
| 338 |
+
return
|
| 339 |
+
if self._debug:
|
| 340 |
+
logger.debug("Close %r", self)
|
| 341 |
+
self._closed = True
|
| 342 |
+
self._ready.clear()
|
| 343 |
+
self._scheduled.clear()
|
| 344 |
+
executor = self._default_executor
|
| 345 |
+
if executor is not None:
|
| 346 |
+
self._default_executor = None
|
| 347 |
+
executor.shutdown(wait=False)
|
| 348 |
+
|
| 349 |
+
def is_closed(self):
|
| 350 |
+
"""Returns True if the event loop was closed."""
|
| 351 |
+
return self._closed
|
| 352 |
+
|
| 353 |
+
# On Python 3.3 and older, objects with a destructor part of a reference
|
| 354 |
+
# cycle are never destroyed. It's not more the case on Python 3.4 thanks
|
| 355 |
+
# to the PEP 442.
|
| 356 |
+
if sys.version_info >= (3, 4):
|
| 357 |
+
def __del__(self):
|
| 358 |
+
if not self.is_closed():
|
| 359 |
+
warnings.warn("unclosed event loop %r" % self, ResourceWarning)
|
| 360 |
+
if not self.is_running():
|
| 361 |
+
self.close()
|
| 362 |
+
|
| 363 |
+
def is_running(self):
|
| 364 |
+
"""Returns True if the event loop is running."""
|
| 365 |
+
return (self._thread_id is not None)
|
| 366 |
+
|
| 367 |
+
def time(self):
|
| 368 |
+
"""Return the time according to the event loop's clock.
|
| 369 |
+
|
| 370 |
+
This is a float expressed in seconds since an epoch, but the
|
| 371 |
+
epoch, precision, accuracy and drift are unspecified and may
|
| 372 |
+
differ per event loop.
|
| 373 |
+
"""
|
| 374 |
+
return time.monotonic()
|
| 375 |
+
|
| 376 |
+
def call_later(self, delay, callback, *args):
|
| 377 |
+
"""Arrange for a callback to be called at a given time.
|
| 378 |
+
|
| 379 |
+
Return a Handle: an opaque object with a cancel() method that
|
| 380 |
+
can be used to cancel the call.
|
| 381 |
+
|
| 382 |
+
The delay can be an int or float, expressed in seconds. It is
|
| 383 |
+
always relative to the current time.
|
| 384 |
+
|
| 385 |
+
Each callback will be called exactly once. If two callbacks
|
| 386 |
+
are scheduled for exactly the same time, it undefined which
|
| 387 |
+
will be called first.
|
| 388 |
+
|
| 389 |
+
Any positional arguments after the callback will be passed to
|
| 390 |
+
the callback when it is called.
|
| 391 |
+
"""
|
| 392 |
+
timer = self.call_at(self.time() + delay, callback, *args)
|
| 393 |
+
if timer._source_traceback:
|
| 394 |
+
del timer._source_traceback[-1]
|
| 395 |
+
return timer
|
| 396 |
+
|
| 397 |
+
def call_at(self, when, callback, *args):
|
| 398 |
+
"""Like call_later(), but uses an absolute time.
|
| 399 |
+
|
| 400 |
+
Absolute time corresponds to the event loop's time() method.
|
| 401 |
+
"""
|
| 402 |
+
if (coroutines.iscoroutine(callback)
|
| 403 |
+
or coroutines.iscoroutinefunction(callback)):
|
| 404 |
+
raise TypeError("coroutines cannot be used with call_at()")
|
| 405 |
+
self._check_closed()
|
| 406 |
+
if self._debug:
|
| 407 |
+
self._check_thread()
|
| 408 |
+
timer = events.TimerHandle(when, callback, args, self)
|
| 409 |
+
if timer._source_traceback:
|
| 410 |
+
del timer._source_traceback[-1]
|
| 411 |
+
heapq.heappush(self._scheduled, timer)
|
| 412 |
+
timer._scheduled = True
|
| 413 |
+
return timer
|
| 414 |
+
|
| 415 |
+
def call_soon(self, callback, *args):
|
| 416 |
+
"""Arrange for a callback to be called as soon as possible.
|
| 417 |
+
|
| 418 |
+
This operates as a FIFO queue: callbacks are called in the
|
| 419 |
+
order in which they are registered. Each callback will be
|
| 420 |
+
called exactly once.
|
| 421 |
+
|
| 422 |
+
Any positional arguments after the callback will be passed to
|
| 423 |
+
the callback when it is called.
|
| 424 |
+
"""
|
| 425 |
+
if self._debug:
|
| 426 |
+
self._check_thread()
|
| 427 |
+
handle = self._call_soon(callback, args)
|
| 428 |
+
if handle._source_traceback:
|
| 429 |
+
del handle._source_traceback[-1]
|
| 430 |
+
return handle
|
| 431 |
+
|
| 432 |
+
def _call_soon(self, callback, args):
|
| 433 |
+
if (coroutines.iscoroutine(callback)
|
| 434 |
+
or coroutines.iscoroutinefunction(callback)):
|
| 435 |
+
raise TypeError("coroutines cannot be used with call_soon()")
|
| 436 |
+
self._check_closed()
|
| 437 |
+
handle = events.Handle(callback, args, self)
|
| 438 |
+
if handle._source_traceback:
|
| 439 |
+
del handle._source_traceback[-1]
|
| 440 |
+
self._ready.append(handle)
|
| 441 |
+
return handle
|
| 442 |
+
|
| 443 |
+
def _check_thread(self):
|
| 444 |
+
"""Check that the current thread is the thread running the event loop.
|
| 445 |
+
|
| 446 |
+
Non-thread-safe methods of this class make this assumption and will
|
| 447 |
+
likely behave incorrectly when the assumption is violated.
|
| 448 |
+
|
| 449 |
+
Should only be called when (self._debug == True). The caller is
|
| 450 |
+
responsible for checking this condition for performance reasons.
|
| 451 |
+
"""
|
| 452 |
+
if self._thread_id is None:
|
| 453 |
+
return
|
| 454 |
+
thread_id = threading.get_ident()
|
| 455 |
+
if thread_id != self._thread_id:
|
| 456 |
+
raise RuntimeError(
|
| 457 |
+
"Non-thread-safe operation invoked on an event loop other "
|
| 458 |
+
"than the current one")
|
| 459 |
+
|
| 460 |
+
def call_soon_threadsafe(self, callback, *args):
|
| 461 |
+
"""Like call_soon(), but thread-safe."""
|
| 462 |
+
handle = self._call_soon(callback, args)
|
| 463 |
+
if handle._source_traceback:
|
| 464 |
+
del handle._source_traceback[-1]
|
| 465 |
+
self._write_to_self()
|
| 466 |
+
return handle
|
| 467 |
+
|
| 468 |
+
def run_in_executor(self, executor, callback, *args):
|
| 469 |
+
if (coroutines.iscoroutine(callback)
|
| 470 |
+
or coroutines.iscoroutinefunction(callback)):
|
| 471 |
+
raise TypeError("coroutines cannot be used with run_in_executor()")
|
| 472 |
+
self._check_closed()
|
| 473 |
+
if isinstance(callback, events.Handle):
|
| 474 |
+
assert not args
|
| 475 |
+
assert not isinstance(callback, events.TimerHandle)
|
| 476 |
+
if callback._cancelled:
|
| 477 |
+
f = futures.Future(loop=self)
|
| 478 |
+
f.set_result(None)
|
| 479 |
+
return f
|
| 480 |
+
callback, args = callback._callback, callback._args
|
| 481 |
+
if executor is None:
|
| 482 |
+
executor = self._default_executor
|
| 483 |
+
if executor is None:
|
| 484 |
+
executor = concurrent.futures.ThreadPoolExecutor(_MAX_WORKERS)
|
| 485 |
+
self._default_executor = executor
|
| 486 |
+
return futures.wrap_future(executor.submit(callback, *args), loop=self)
|
| 487 |
+
|
| 488 |
+
def set_default_executor(self, executor):
|
| 489 |
+
self._default_executor = executor
|
| 490 |
+
|
| 491 |
+
def _getaddrinfo_debug(self, host, port, family, type, proto, flags):
|
| 492 |
+
msg = ["%s:%r" % (host, port)]
|
| 493 |
+
if family:
|
| 494 |
+
msg.append('family=%r' % family)
|
| 495 |
+
if type:
|
| 496 |
+
msg.append('type=%r' % type)
|
| 497 |
+
if proto:
|
| 498 |
+
msg.append('proto=%r' % proto)
|
| 499 |
+
if flags:
|
| 500 |
+
msg.append('flags=%r' % flags)
|
| 501 |
+
msg = ', '.join(msg)
|
| 502 |
+
logger.debug('Get address info %s', msg)
|
| 503 |
+
|
| 504 |
+
t0 = self.time()
|
| 505 |
+
addrinfo = socket.getaddrinfo(host, port, family, type, proto, flags)
|
| 506 |
+
dt = self.time() - t0
|
| 507 |
+
|
| 508 |
+
msg = ('Getting address info %s took %.3f ms: %r'
|
| 509 |
+
% (msg, dt * 1e3, addrinfo))
|
| 510 |
+
if dt >= self.slow_callback_duration:
|
| 511 |
+
logger.info(msg)
|
| 512 |
+
else:
|
| 513 |
+
logger.debug(msg)
|
| 514 |
+
return addrinfo
|
| 515 |
+
|
| 516 |
+
def getaddrinfo(self, host, port, *,
|
| 517 |
+
family=0, type=0, proto=0, flags=0):
|
| 518 |
+
if self._debug:
|
| 519 |
+
return self.run_in_executor(None, self._getaddrinfo_debug,
|
| 520 |
+
host, port, family, type, proto, flags)
|
| 521 |
+
else:
|
| 522 |
+
return self.run_in_executor(None, socket.getaddrinfo,
|
| 523 |
+
host, port, family, type, proto, flags)
|
| 524 |
+
|
| 525 |
+
def getnameinfo(self, sockaddr, flags=0):
|
| 526 |
+
return self.run_in_executor(None, socket.getnameinfo, sockaddr, flags)
|
| 527 |
+
|
| 528 |
+
@coroutine
|
| 529 |
+
def create_connection(self, protocol_factory, host=None, port=None, *,
|
| 530 |
+
ssl=None, family=0, proto=0, flags=0, sock=None,
|
| 531 |
+
local_addr=None, server_hostname=None):
|
| 532 |
+
"""Connect to a TCP server.
|
| 533 |
+
|
| 534 |
+
Create a streaming transport connection to a given Internet host and
|
| 535 |
+
port: socket family AF_INET or socket.AF_INET6 depending on host (or
|
| 536 |
+
family if specified), socket type SOCK_STREAM. protocol_factory must be
|
| 537 |
+
a callable returning a protocol instance.
|
| 538 |
+
|
| 539 |
+
This method is a coroutine which will try to establish the connection
|
| 540 |
+
in the background. When successful, the coroutine returns a
|
| 541 |
+
(transport, protocol) pair.
|
| 542 |
+
"""
|
| 543 |
+
if server_hostname is not None and not ssl:
|
| 544 |
+
raise ValueError('server_hostname is only meaningful with ssl')
|
| 545 |
+
|
| 546 |
+
if server_hostname is None and ssl:
|
| 547 |
+
# Use host as default for server_hostname. It is an error
|
| 548 |
+
# if host is empty or not set, e.g. when an
|
| 549 |
+
# already-connected socket was passed or when only a port
|
| 550 |
+
# is given. To avoid this error, you can pass
|
| 551 |
+
# server_hostname='' -- this will bypass the hostname
|
| 552 |
+
# check. (This also means that if host is a numeric
|
| 553 |
+
# IP/IPv6 address, we will attempt to verify that exact
|
| 554 |
+
# address; this will probably fail, but it is possible to
|
| 555 |
+
# create a certificate for a specific IP address, so we
|
| 556 |
+
# don't judge it here.)
|
| 557 |
+
if not host:
|
| 558 |
+
raise ValueError('You must set server_hostname '
|
| 559 |
+
'when using ssl without a host')
|
| 560 |
+
server_hostname = host
|
| 561 |
+
|
| 562 |
+
if host is not None or port is not None:
|
| 563 |
+
if sock is not None:
|
| 564 |
+
raise ValueError(
|
| 565 |
+
'host/port and sock can not be specified at the same time')
|
| 566 |
+
|
| 567 |
+
f1 = self.getaddrinfo(
|
| 568 |
+
host, port, family=family,
|
| 569 |
+
type=socket.SOCK_STREAM, proto=proto, flags=flags)
|
| 570 |
+
fs = [f1]
|
| 571 |
+
if local_addr is not None:
|
| 572 |
+
f2 = self.getaddrinfo(
|
| 573 |
+
*local_addr, family=family,
|
| 574 |
+
type=socket.SOCK_STREAM, proto=proto, flags=flags)
|
| 575 |
+
fs.append(f2)
|
| 576 |
+
else:
|
| 577 |
+
f2 = None
|
| 578 |
+
|
| 579 |
+
yield from tasks.wait(fs, loop=self)
|
| 580 |
+
|
| 581 |
+
infos = f1.result()
|
| 582 |
+
if not infos:
|
| 583 |
+
raise OSError('getaddrinfo() returned empty list')
|
| 584 |
+
if f2 is not None:
|
| 585 |
+
laddr_infos = f2.result()
|
| 586 |
+
if not laddr_infos:
|
| 587 |
+
raise OSError('getaddrinfo() returned empty list')
|
| 588 |
+
|
| 589 |
+
exceptions = []
|
| 590 |
+
for family, type, proto, cname, address in infos:
|
| 591 |
+
try:
|
| 592 |
+
sock = socket.socket(family=family, type=type, proto=proto)
|
| 593 |
+
sock.setblocking(False)
|
| 594 |
+
if f2 is not None:
|
| 595 |
+
for _, _, _, _, laddr in laddr_infos:
|
| 596 |
+
try:
|
| 597 |
+
sock.bind(laddr)
|
| 598 |
+
break
|
| 599 |
+
except OSError as exc:
|
| 600 |
+
exc = OSError(
|
| 601 |
+
exc.errno, 'error while '
|
| 602 |
+
'attempting to bind on address '
|
| 603 |
+
'{!r}: {}'.format(
|
| 604 |
+
laddr, exc.strerror.lower()))
|
| 605 |
+
exceptions.append(exc)
|
| 606 |
+
else:
|
| 607 |
+
sock.close()
|
| 608 |
+
sock = None
|
| 609 |
+
continue
|
| 610 |
+
if self._debug:
|
| 611 |
+
logger.debug("connect %r to %r", sock, address)
|
| 612 |
+
yield from self.sock_connect(sock, address)
|
| 613 |
+
except OSError as exc:
|
| 614 |
+
if sock is not None:
|
| 615 |
+
sock.close()
|
| 616 |
+
exceptions.append(exc)
|
| 617 |
+
except:
|
| 618 |
+
if sock is not None:
|
| 619 |
+
sock.close()
|
| 620 |
+
raise
|
| 621 |
+
else:
|
| 622 |
+
break
|
| 623 |
+
else:
|
| 624 |
+
if len(exceptions) == 1:
|
| 625 |
+
raise exceptions[0]
|
| 626 |
+
else:
|
| 627 |
+
# If they all have the same str(), raise one.
|
| 628 |
+
model = str(exceptions[0])
|
| 629 |
+
if all(str(exc) == model for exc in exceptions):
|
| 630 |
+
raise exceptions[0]
|
| 631 |
+
# Raise a combined exception so the user can see all
|
| 632 |
+
# the various error messages.
|
| 633 |
+
raise OSError('Multiple exceptions: {}'.format(
|
| 634 |
+
', '.join(str(exc) for exc in exceptions)))
|
| 635 |
+
|
| 636 |
+
elif sock is None:
|
| 637 |
+
raise ValueError(
|
| 638 |
+
'host and port was not specified and no sock specified')
|
| 639 |
+
|
| 640 |
+
sock.setblocking(False)
|
| 641 |
+
|
| 642 |
+
transport, protocol = yield from self._create_connection_transport(
|
| 643 |
+
sock, protocol_factory, ssl, server_hostname)
|
| 644 |
+
if self._debug:
|
| 645 |
+
# Get the socket from the transport because SSL transport closes
|
| 646 |
+
# the old socket and creates a new SSL socket
|
| 647 |
+
sock = transport.get_extra_info('socket')
|
| 648 |
+
logger.debug("%r connected to %s:%r: (%r, %r)",
|
| 649 |
+
sock, host, port, transport, protocol)
|
| 650 |
+
return transport, protocol
|
| 651 |
+
|
| 652 |
+
@coroutine
|
| 653 |
+
def _create_connection_transport(self, sock, protocol_factory, ssl,
|
| 654 |
+
server_hostname):
|
| 655 |
+
protocol = protocol_factory()
|
| 656 |
+
waiter = futures.Future(loop=self)
|
| 657 |
+
if ssl:
|
| 658 |
+
sslcontext = None if isinstance(ssl, bool) else ssl
|
| 659 |
+
transport = self._make_ssl_transport(
|
| 660 |
+
sock, protocol, sslcontext, waiter,
|
| 661 |
+
server_side=False, server_hostname=server_hostname)
|
| 662 |
+
else:
|
| 663 |
+
transport = self._make_socket_transport(sock, protocol, waiter)
|
| 664 |
+
|
| 665 |
+
try:
|
| 666 |
+
yield from waiter
|
| 667 |
+
except:
|
| 668 |
+
transport.close()
|
| 669 |
+
raise
|
| 670 |
+
|
| 671 |
+
return transport, protocol
|
| 672 |
+
|
| 673 |
+
@coroutine
|
| 674 |
+
def create_datagram_endpoint(self, protocol_factory,
|
| 675 |
+
local_addr=None, remote_addr=None, *,
|
| 676 |
+
family=0, proto=0, flags=0):
|
| 677 |
+
"""Create datagram connection."""
|
| 678 |
+
if not (local_addr or remote_addr):
|
| 679 |
+
if family == 0:
|
| 680 |
+
raise ValueError('unexpected address family')
|
| 681 |
+
addr_pairs_info = (((family, proto), (None, None)),)
|
| 682 |
+
else:
|
| 683 |
+
# join address by (family, protocol)
|
| 684 |
+
addr_infos = collections.OrderedDict()
|
| 685 |
+
for idx, addr in ((0, local_addr), (1, remote_addr)):
|
| 686 |
+
if addr is not None:
|
| 687 |
+
assert isinstance(addr, tuple) and len(addr) == 2, (
|
| 688 |
+
'2-tuple is expected')
|
| 689 |
+
|
| 690 |
+
infos = yield from self.getaddrinfo(
|
| 691 |
+
*addr, family=family, type=socket.SOCK_DGRAM,
|
| 692 |
+
proto=proto, flags=flags)
|
| 693 |
+
if not infos:
|
| 694 |
+
raise OSError('getaddrinfo() returned empty list')
|
| 695 |
+
|
| 696 |
+
for fam, _, pro, _, address in infos:
|
| 697 |
+
key = (fam, pro)
|
| 698 |
+
if key not in addr_infos:
|
| 699 |
+
addr_infos[key] = [None, None]
|
| 700 |
+
addr_infos[key][idx] = address
|
| 701 |
+
|
| 702 |
+
# each addr has to have info for each (family, proto) pair
|
| 703 |
+
addr_pairs_info = [
|
| 704 |
+
(key, addr_pair) for key, addr_pair in addr_infos.items()
|
| 705 |
+
if not ((local_addr and addr_pair[0] is None) or
|
| 706 |
+
(remote_addr and addr_pair[1] is None))]
|
| 707 |
+
|
| 708 |
+
if not addr_pairs_info:
|
| 709 |
+
raise ValueError('can not get address information')
|
| 710 |
+
|
| 711 |
+
exceptions = []
|
| 712 |
+
|
| 713 |
+
for ((family, proto),
|
| 714 |
+
(local_address, remote_address)) in addr_pairs_info:
|
| 715 |
+
sock = None
|
| 716 |
+
r_addr = None
|
| 717 |
+
try:
|
| 718 |
+
sock = socket.socket(
|
| 719 |
+
family=family, type=socket.SOCK_DGRAM, proto=proto)
|
| 720 |
+
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
| 721 |
+
sock.setblocking(False)
|
| 722 |
+
|
| 723 |
+
if local_addr:
|
| 724 |
+
sock.bind(local_address)
|
| 725 |
+
if remote_addr:
|
| 726 |
+
yield from self.sock_connect(sock, remote_address)
|
| 727 |
+
r_addr = remote_address
|
| 728 |
+
except OSError as exc:
|
| 729 |
+
if sock is not None:
|
| 730 |
+
sock.close()
|
| 731 |
+
exceptions.append(exc)
|
| 732 |
+
except:
|
| 733 |
+
if sock is not None:
|
| 734 |
+
sock.close()
|
| 735 |
+
raise
|
| 736 |
+
else:
|
| 737 |
+
break
|
| 738 |
+
else:
|
| 739 |
+
raise exceptions[0]
|
| 740 |
+
|
| 741 |
+
protocol = protocol_factory()
|
| 742 |
+
waiter = futures.Future(loop=self)
|
| 743 |
+
transport = self._make_datagram_transport(sock, protocol, r_addr,
|
| 744 |
+
waiter)
|
| 745 |
+
if self._debug:
|
| 746 |
+
if local_addr:
|
| 747 |
+
logger.info("Datagram endpoint local_addr=%r remote_addr=%r "
|
| 748 |
+
"created: (%r, %r)",
|
| 749 |
+
local_addr, remote_addr, transport, protocol)
|
| 750 |
+
else:
|
| 751 |
+
logger.debug("Datagram endpoint remote_addr=%r created: "
|
| 752 |
+
"(%r, %r)",
|
| 753 |
+
remote_addr, transport, protocol)
|
| 754 |
+
|
| 755 |
+
try:
|
| 756 |
+
yield from waiter
|
| 757 |
+
except:
|
| 758 |
+
transport.close()
|
| 759 |
+
raise
|
| 760 |
+
|
| 761 |
+
return transport, protocol
|
| 762 |
+
|
| 763 |
+
@coroutine
|
| 764 |
+
def create_server(self, protocol_factory, host=None, port=None,
|
| 765 |
+
*,
|
| 766 |
+
family=socket.AF_UNSPEC,
|
| 767 |
+
flags=socket.AI_PASSIVE,
|
| 768 |
+
sock=None,
|
| 769 |
+
backlog=100,
|
| 770 |
+
ssl=None,
|
| 771 |
+
reuse_address=None):
|
| 772 |
+
"""Create a TCP server bound to host and port.
|
| 773 |
+
|
| 774 |
+
Return a Server object which can be used to stop the service.
|
| 775 |
+
|
| 776 |
+
This method is a coroutine.
|
| 777 |
+
"""
|
| 778 |
+
if isinstance(ssl, bool):
|
| 779 |
+
raise TypeError('ssl argument must be an SSLContext or None')
|
| 780 |
+
if host is not None or port is not None:
|
| 781 |
+
if sock is not None:
|
| 782 |
+
raise ValueError(
|
| 783 |
+
'host/port and sock can not be specified at the same time')
|
| 784 |
+
|
| 785 |
+
AF_INET6 = getattr(socket, 'AF_INET6', 0)
|
| 786 |
+
if reuse_address is None:
|
| 787 |
+
reuse_address = os.name == 'posix' and sys.platform != 'cygwin'
|
| 788 |
+
sockets = []
|
| 789 |
+
if host == '':
|
| 790 |
+
host = None
|
| 791 |
+
|
| 792 |
+
infos = yield from self.getaddrinfo(
|
| 793 |
+
host, port, family=family,
|
| 794 |
+
type=socket.SOCK_STREAM, proto=0, flags=flags)
|
| 795 |
+
if not infos:
|
| 796 |
+
raise OSError('getaddrinfo() returned empty list')
|
| 797 |
+
|
| 798 |
+
completed = False
|
| 799 |
+
try:
|
| 800 |
+
for res in infos:
|
| 801 |
+
af, socktype, proto, canonname, sa = res
|
| 802 |
+
try:
|
| 803 |
+
sock = socket.socket(af, socktype, proto)
|
| 804 |
+
except socket.error:
|
| 805 |
+
# Assume it's a bad family/type/protocol combination.
|
| 806 |
+
if self._debug:
|
| 807 |
+
logger.warning('create_server() failed to create '
|
| 808 |
+
'socket.socket(%r, %r, %r)',
|
| 809 |
+
af, socktype, proto, exc_info=True)
|
| 810 |
+
continue
|
| 811 |
+
sockets.append(sock)
|
| 812 |
+
if reuse_address:
|
| 813 |
+
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR,
|
| 814 |
+
True)
|
| 815 |
+
# Disable IPv4/IPv6 dual stack support (enabled by
|
| 816 |
+
# default on Linux) which makes a single socket
|
| 817 |
+
# listen on both address families.
|
| 818 |
+
if af == AF_INET6 and hasattr(socket, 'IPPROTO_IPV6'):
|
| 819 |
+
sock.setsockopt(socket.IPPROTO_IPV6,
|
| 820 |
+
socket.IPV6_V6ONLY,
|
| 821 |
+
True)
|
| 822 |
+
try:
|
| 823 |
+
sock.bind(sa)
|
| 824 |
+
except OSError as err:
|
| 825 |
+
raise OSError(err.errno, 'error while attempting '
|
| 826 |
+
'to bind on address %r: %s'
|
| 827 |
+
% (sa, err.strerror.lower()))
|
| 828 |
+
completed = True
|
| 829 |
+
finally:
|
| 830 |
+
if not completed:
|
| 831 |
+
for sock in sockets:
|
| 832 |
+
sock.close()
|
| 833 |
+
else:
|
| 834 |
+
if sock is None:
|
| 835 |
+
raise ValueError('Neither host/port nor sock were specified')
|
| 836 |
+
sockets = [sock]
|
| 837 |
+
|
| 838 |
+
server = Server(self, sockets)
|
| 839 |
+
for sock in sockets:
|
| 840 |
+
sock.listen(backlog)
|
| 841 |
+
sock.setblocking(False)
|
| 842 |
+
self._start_serving(protocol_factory, sock, ssl, server)
|
| 843 |
+
if self._debug:
|
| 844 |
+
logger.info("%r is serving", server)
|
| 845 |
+
return server
|
| 846 |
+
|
| 847 |
+
@coroutine
|
| 848 |
+
def connect_read_pipe(self, protocol_factory, pipe):
|
| 849 |
+
protocol = protocol_factory()
|
| 850 |
+
waiter = futures.Future(loop=self)
|
| 851 |
+
transport = self._make_read_pipe_transport(pipe, protocol, waiter)
|
| 852 |
+
|
| 853 |
+
try:
|
| 854 |
+
yield from waiter
|
| 855 |
+
except:
|
| 856 |
+
transport.close()
|
| 857 |
+
raise
|
| 858 |
+
|
| 859 |
+
if self._debug:
|
| 860 |
+
logger.debug('Read pipe %r connected: (%r, %r)',
|
| 861 |
+
pipe.fileno(), transport, protocol)
|
| 862 |
+
return transport, protocol
|
| 863 |
+
|
| 864 |
+
@coroutine
|
| 865 |
+
def connect_write_pipe(self, protocol_factory, pipe):
|
| 866 |
+
protocol = protocol_factory()
|
| 867 |
+
waiter = futures.Future(loop=self)
|
| 868 |
+
transport = self._make_write_pipe_transport(pipe, protocol, waiter)
|
| 869 |
+
|
| 870 |
+
try:
|
| 871 |
+
yield from waiter
|
| 872 |
+
except:
|
| 873 |
+
transport.close()
|
| 874 |
+
raise
|
| 875 |
+
|
| 876 |
+
if self._debug:
|
| 877 |
+
logger.debug('Write pipe %r connected: (%r, %r)',
|
| 878 |
+
pipe.fileno(), transport, protocol)
|
| 879 |
+
return transport, protocol
|
| 880 |
+
|
| 881 |
+
def _log_subprocess(self, msg, stdin, stdout, stderr):
|
| 882 |
+
info = [msg]
|
| 883 |
+
if stdin is not None:
|
| 884 |
+
info.append('stdin=%s' % _format_pipe(stdin))
|
| 885 |
+
if stdout is not None and stderr == subprocess.STDOUT:
|
| 886 |
+
info.append('stdout=stderr=%s' % _format_pipe(stdout))
|
| 887 |
+
else:
|
| 888 |
+
if stdout is not None:
|
| 889 |
+
info.append('stdout=%s' % _format_pipe(stdout))
|
| 890 |
+
if stderr is not None:
|
| 891 |
+
info.append('stderr=%s' % _format_pipe(stderr))
|
| 892 |
+
logger.debug(' '.join(info))
|
| 893 |
+
|
| 894 |
+
@coroutine
|
| 895 |
+
def subprocess_shell(self, protocol_factory, cmd, *, stdin=subprocess.PIPE,
|
| 896 |
+
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
|
| 897 |
+
universal_newlines=False, shell=True, bufsize=0,
|
| 898 |
+
**kwargs):
|
| 899 |
+
if not isinstance(cmd, (bytes, str)):
|
| 900 |
+
raise ValueError("cmd must be a string")
|
| 901 |
+
if universal_newlines:
|
| 902 |
+
raise ValueError("universal_newlines must be False")
|
| 903 |
+
if not shell:
|
| 904 |
+
raise ValueError("shell must be True")
|
| 905 |
+
if bufsize != 0:
|
| 906 |
+
raise ValueError("bufsize must be 0")
|
| 907 |
+
protocol = protocol_factory()
|
| 908 |
+
if self._debug:
|
| 909 |
+
# don't log parameters: they may contain sensitive information
|
| 910 |
+
# (password) and may be too long
|
| 911 |
+
debug_log = 'run shell command %r' % cmd
|
| 912 |
+
self._log_subprocess(debug_log, stdin, stdout, stderr)
|
| 913 |
+
transport = yield from self._make_subprocess_transport(
|
| 914 |
+
protocol, cmd, True, stdin, stdout, stderr, bufsize, **kwargs)
|
| 915 |
+
if self._debug:
|
| 916 |
+
logger.info('%s: %r' % (debug_log, transport))
|
| 917 |
+
return transport, protocol
|
| 918 |
+
|
| 919 |
+
@coroutine
|
| 920 |
+
def subprocess_exec(self, protocol_factory, program, *args,
|
| 921 |
+
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
|
| 922 |
+
stderr=subprocess.PIPE, universal_newlines=False,
|
| 923 |
+
shell=False, bufsize=0, **kwargs):
|
| 924 |
+
if universal_newlines:
|
| 925 |
+
raise ValueError("universal_newlines must be False")
|
| 926 |
+
if shell:
|
| 927 |
+
raise ValueError("shell must be False")
|
| 928 |
+
if bufsize != 0:
|
| 929 |
+
raise ValueError("bufsize must be 0")
|
| 930 |
+
popen_args = (program,) + args
|
| 931 |
+
for arg in popen_args:
|
| 932 |
+
if not isinstance(arg, (str, bytes)):
|
| 933 |
+
raise TypeError("program arguments must be "
|
| 934 |
+
"a bytes or text string, not %s"
|
| 935 |
+
% type(arg).__name__)
|
| 936 |
+
protocol = protocol_factory()
|
| 937 |
+
if self._debug:
|
| 938 |
+
# don't log parameters: they may contain sensitive information
|
| 939 |
+
# (password) and may be too long
|
| 940 |
+
debug_log = 'execute program %r' % program
|
| 941 |
+
self._log_subprocess(debug_log, stdin, stdout, stderr)
|
| 942 |
+
transport = yield from self._make_subprocess_transport(
|
| 943 |
+
protocol, popen_args, False, stdin, stdout, stderr,
|
| 944 |
+
bufsize, **kwargs)
|
| 945 |
+
if self._debug:
|
| 946 |
+
logger.info('%s: %r' % (debug_log, transport))
|
| 947 |
+
return transport, protocol
|
| 948 |
+
|
| 949 |
+
def set_exception_handler(self, handler):
|
| 950 |
+
"""Set handler as the new event loop exception handler.
|
| 951 |
+
|
| 952 |
+
If handler is None, the default exception handler will
|
| 953 |
+
be set.
|
| 954 |
+
|
| 955 |
+
If handler is a callable object, it should have a
|
| 956 |
+
signature matching '(loop, context)', where 'loop'
|
| 957 |
+
will be a reference to the active event loop, 'context'
|
| 958 |
+
will be a dict object (see `call_exception_handler()`
|
| 959 |
+
documentation for details about context).
|
| 960 |
+
"""
|
| 961 |
+
if handler is not None and not callable(handler):
|
| 962 |
+
raise TypeError('A callable object or None is expected, '
|
| 963 |
+
'got {!r}'.format(handler))
|
| 964 |
+
self._exception_handler = handler
|
| 965 |
+
|
| 966 |
+
def default_exception_handler(self, context):
|
| 967 |
+
"""Default exception handler.
|
| 968 |
+
|
| 969 |
+
This is called when an exception occurs and no exception
|
| 970 |
+
handler is set, and can be called by a custom exception
|
| 971 |
+
handler that wants to defer to the default behavior.
|
| 972 |
+
|
| 973 |
+
The context parameter has the same meaning as in
|
| 974 |
+
`call_exception_handler()`.
|
| 975 |
+
"""
|
| 976 |
+
message = context.get('message')
|
| 977 |
+
if not message:
|
| 978 |
+
message = 'Unhandled exception in event loop'
|
| 979 |
+
|
| 980 |
+
exception = context.get('exception')
|
| 981 |
+
if exception is not None:
|
| 982 |
+
exc_info = (type(exception), exception, exception.__traceback__)
|
| 983 |
+
else:
|
| 984 |
+
exc_info = False
|
| 985 |
+
|
| 986 |
+
if ('source_traceback' not in context
|
| 987 |
+
and self._current_handle is not None
|
| 988 |
+
and self._current_handle._source_traceback):
|
| 989 |
+
context['handle_traceback'] = self._current_handle._source_traceback
|
| 990 |
+
|
| 991 |
+
log_lines = [message]
|
| 992 |
+
for key in sorted(context):
|
| 993 |
+
if key in {'message', 'exception'}:
|
| 994 |
+
continue
|
| 995 |
+
value = context[key]
|
| 996 |
+
if key == 'source_traceback':
|
| 997 |
+
tb = ''.join(traceback.format_list(value))
|
| 998 |
+
value = 'Object created at (most recent call last):\n'
|
| 999 |
+
value += tb.rstrip()
|
| 1000 |
+
elif key == 'handle_traceback':
|
| 1001 |
+
tb = ''.join(traceback.format_list(value))
|
| 1002 |
+
value = 'Handle created at (most recent call last):\n'
|
| 1003 |
+
value += tb.rstrip()
|
| 1004 |
+
else:
|
| 1005 |
+
value = repr(value)
|
| 1006 |
+
log_lines.append('{}: {}'.format(key, value))
|
| 1007 |
+
|
| 1008 |
+
logger.error('\n'.join(log_lines), exc_info=exc_info)
|
| 1009 |
+
|
| 1010 |
+
def call_exception_handler(self, context):
|
| 1011 |
+
"""Call the current event loop's exception handler.
|
| 1012 |
+
|
| 1013 |
+
The context argument is a dict containing the following keys:
|
| 1014 |
+
|
| 1015 |
+
- 'message': Error message;
|
| 1016 |
+
- 'exception' (optional): Exception object;
|
| 1017 |
+
- 'future' (optional): Future instance;
|
| 1018 |
+
- 'handle' (optional): Handle instance;
|
| 1019 |
+
- 'protocol' (optional): Protocol instance;
|
| 1020 |
+
- 'transport' (optional): Transport instance;
|
| 1021 |
+
- 'socket' (optional): Socket instance.
|
| 1022 |
+
|
| 1023 |
+
New keys maybe introduced in the future.
|
| 1024 |
+
|
| 1025 |
+
Note: do not overload this method in an event loop subclass.
|
| 1026 |
+
For custom exception handling, use the
|
| 1027 |
+
`set_exception_handler()` method.
|
| 1028 |
+
"""
|
| 1029 |
+
if self._exception_handler is None:
|
| 1030 |
+
try:
|
| 1031 |
+
self.default_exception_handler(context)
|
| 1032 |
+
except Exception:
|
| 1033 |
+
# Second protection layer for unexpected errors
|
| 1034 |
+
# in the default implementation, as well as for subclassed
|
| 1035 |
+
# event loops with overloaded "default_exception_handler".
|
| 1036 |
+
logger.error('Exception in default exception handler',
|
| 1037 |
+
exc_info=True)
|
| 1038 |
+
else:
|
| 1039 |
+
try:
|
| 1040 |
+
self._exception_handler(self, context)
|
| 1041 |
+
except Exception as exc:
|
| 1042 |
+
# Exception in the user set custom exception handler.
|
| 1043 |
+
try:
|
| 1044 |
+
# Let's try default handler.
|
| 1045 |
+
self.default_exception_handler({
|
| 1046 |
+
'message': 'Unhandled error in exception handler',
|
| 1047 |
+
'exception': exc,
|
| 1048 |
+
'context': context,
|
| 1049 |
+
})
|
| 1050 |
+
except Exception:
|
| 1051 |
+
# Guard 'default_exception_handler' in case it is
|
| 1052 |
+
# overloaded.
|
| 1053 |
+
logger.error('Exception in default exception handler '
|
| 1054 |
+
'while handling an unexpected error '
|
| 1055 |
+
'in custom exception handler',
|
| 1056 |
+
exc_info=True)
|
| 1057 |
+
|
| 1058 |
+
def _add_callback(self, handle):
|
| 1059 |
+
"""Add a Handle to _scheduled (TimerHandle) or _ready."""
|
| 1060 |
+
assert isinstance(handle, events.Handle), 'A Handle is required here'
|
| 1061 |
+
if handle._cancelled:
|
| 1062 |
+
return
|
| 1063 |
+
assert not isinstance(handle, events.TimerHandle)
|
| 1064 |
+
self._ready.append(handle)
|
| 1065 |
+
|
| 1066 |
+
def _add_callback_signalsafe(self, handle):
|
| 1067 |
+
"""Like _add_callback() but called from a signal handler."""
|
| 1068 |
+
self._add_callback(handle)
|
| 1069 |
+
self._write_to_self()
|
| 1070 |
+
|
| 1071 |
+
def _timer_handle_cancelled(self, handle):
|
| 1072 |
+
"""Notification that a TimerHandle has been cancelled."""
|
| 1073 |
+
if handle._scheduled:
|
| 1074 |
+
self._timer_cancelled_count += 1
|
| 1075 |
+
|
| 1076 |
+
def _run_once(self):
|
| 1077 |
+
"""Run one full iteration of the event loop.
|
| 1078 |
+
|
| 1079 |
+
This calls all currently ready callbacks, polls for I/O,
|
| 1080 |
+
schedules the resulting callbacks, and finally schedules
|
| 1081 |
+
'call_later' callbacks.
|
| 1082 |
+
"""
|
| 1083 |
+
|
| 1084 |
+
sched_count = len(self._scheduled)
|
| 1085 |
+
if (sched_count > _MIN_SCHEDULED_TIMER_HANDLES and
|
| 1086 |
+
self._timer_cancelled_count / sched_count >
|
| 1087 |
+
_MIN_CANCELLED_TIMER_HANDLES_FRACTION):
|
| 1088 |
+
# Remove delayed calls that were cancelled if their number
|
| 1089 |
+
# is too high
|
| 1090 |
+
new_scheduled = []
|
| 1091 |
+
for handle in self._scheduled:
|
| 1092 |
+
if handle._cancelled:
|
| 1093 |
+
handle._scheduled = False
|
| 1094 |
+
else:
|
| 1095 |
+
new_scheduled.append(handle)
|
| 1096 |
+
|
| 1097 |
+
heapq.heapify(new_scheduled)
|
| 1098 |
+
self._scheduled = new_scheduled
|
| 1099 |
+
self._timer_cancelled_count = 0
|
| 1100 |
+
else:
|
| 1101 |
+
# Remove delayed calls that were cancelled from head of queue.
|
| 1102 |
+
while self._scheduled and self._scheduled[0]._cancelled:
|
| 1103 |
+
self._timer_cancelled_count -= 1
|
| 1104 |
+
handle = heapq.heappop(self._scheduled)
|
| 1105 |
+
handle._scheduled = False
|
| 1106 |
+
|
| 1107 |
+
timeout = None
|
| 1108 |
+
if self._ready:
|
| 1109 |
+
timeout = 0
|
| 1110 |
+
elif self._scheduled:
|
| 1111 |
+
# Compute the desired timeout.
|
| 1112 |
+
when = self._scheduled[0]._when
|
| 1113 |
+
timeout = max(0, when - self.time())
|
| 1114 |
+
|
| 1115 |
+
if self._debug and timeout != 0:
|
| 1116 |
+
t0 = self.time()
|
| 1117 |
+
event_list = self._selector.select(timeout)
|
| 1118 |
+
dt = self.time() - t0
|
| 1119 |
+
if dt >= 1.0:
|
| 1120 |
+
level = logging.INFO
|
| 1121 |
+
else:
|
| 1122 |
+
level = logging.DEBUG
|
| 1123 |
+
nevent = len(event_list)
|
| 1124 |
+
if timeout is None:
|
| 1125 |
+
logger.log(level, 'poll took %.3f ms: %s events',
|
| 1126 |
+
dt * 1e3, nevent)
|
| 1127 |
+
elif nevent:
|
| 1128 |
+
logger.log(level,
|
| 1129 |
+
'poll %.3f ms took %.3f ms: %s events',
|
| 1130 |
+
timeout * 1e3, dt * 1e3, nevent)
|
| 1131 |
+
elif dt >= 1.0:
|
| 1132 |
+
logger.log(level,
|
| 1133 |
+
'poll %.3f ms took %.3f ms: timeout',
|
| 1134 |
+
timeout * 1e3, dt * 1e3)
|
| 1135 |
+
else:
|
| 1136 |
+
event_list = self._selector.select(timeout)
|
| 1137 |
+
self._process_events(event_list)
|
| 1138 |
+
|
| 1139 |
+
# Handle 'later' callbacks that are ready.
|
| 1140 |
+
end_time = self.time() + self._clock_resolution
|
| 1141 |
+
while self._scheduled:
|
| 1142 |
+
handle = self._scheduled[0]
|
| 1143 |
+
if handle._when >= end_time:
|
| 1144 |
+
break
|
| 1145 |
+
handle = heapq.heappop(self._scheduled)
|
| 1146 |
+
handle._scheduled = False
|
| 1147 |
+
self._ready.append(handle)
|
| 1148 |
+
|
| 1149 |
+
# This is the only place where callbacks are actually *called*.
|
| 1150 |
+
# All other places just add them to ready.
|
| 1151 |
+
# Note: We run all currently scheduled callbacks, but not any
|
| 1152 |
+
# callbacks scheduled by callbacks run this time around --
|
| 1153 |
+
# they will be run the next time (after another I/O poll).
|
| 1154 |
+
# Use an idiom that is thread-safe without using locks.
|
| 1155 |
+
ntodo = len(self._ready)
|
| 1156 |
+
for i in range(ntodo):
|
| 1157 |
+
handle = self._ready.popleft()
|
| 1158 |
+
if handle._cancelled:
|
| 1159 |
+
continue
|
| 1160 |
+
if self._debug:
|
| 1161 |
+
try:
|
| 1162 |
+
self._current_handle = handle
|
| 1163 |
+
t0 = self.time()
|
| 1164 |
+
handle._run()
|
| 1165 |
+
dt = self.time() - t0
|
| 1166 |
+
if dt >= self.slow_callback_duration:
|
| 1167 |
+
logger.warning('Executing %s took %.3f seconds',
|
| 1168 |
+
_format_handle(handle), dt)
|
| 1169 |
+
finally:
|
| 1170 |
+
self._current_handle = None
|
| 1171 |
+
else:
|
| 1172 |
+
handle._run()
|
| 1173 |
+
handle = None # Needed to break cycles when an exception occurs.
|
| 1174 |
+
|
| 1175 |
+
def get_debug(self):
|
| 1176 |
+
return self._debug
|
| 1177 |
+
|
| 1178 |
+
def set_debug(self, enabled):
|
| 1179 |
+
self._debug = enabled
|