Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +7 -0
- evalkit_internvl/lib/python3.10/site-packages/open_clip/__init__.py +18 -0
- evalkit_internvl/lib/python3.10/site-packages/open_clip/coca_model.py +498 -0
- evalkit_internvl/lib/python3.10/site-packages/open_clip/hf_model.py +193 -0
- evalkit_internvl/lib/python3.10/site-packages/open_clip/openai.py +90 -0
- evalkit_internvl/lib/python3.10/site-packages/open_clip/pos_embed.py +96 -0
- evalkit_internvl/lib/python3.10/site-packages/open_clip/transformer.py +912 -0
- evalkit_internvl/lib/python3.10/site-packages/open_clip/utils.py +89 -0
- evalkit_internvl/lib/python3.10/site-packages/open_clip/version.py +1 -0
- evalkit_internvl/lib/python3.10/site-packages/open_clip/zero_shot_metadata.py +266 -0
- evalkit_internvl/lib/python3.10/site-packages/openai/types/fine_tuning/__pycache__/fine_tuning_job.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/openai/types/fine_tuning/__pycache__/fine_tuning_job_integration.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/openai/types/fine_tuning/jobs/__pycache__/fine_tuning_job_checkpoint.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/openai/types/fine_tuning/jobs/fine_tuning_job_checkpoint.py +47 -0
- evalkit_tf437/lib/python3.10/site-packages/decord.libs/libXau-b2e5323c.so.6.0.0 +0 -0
- evalkit_tf437/lib/python3.10/site-packages/decord.libs/libavdevice-bf61e037.so.58.5.100 +3 -0
- evalkit_tf437/lib/python3.10/site-packages/decord.libs/libavfilter-1e2243e2.so.7.40.101 +3 -0
- evalkit_tf437/lib/python3.10/site-packages/decord.libs/libavformat-8b46ea57.so.58.20.100 +3 -0
- evalkit_tf437/lib/python3.10/site-packages/decord.libs/libavutil-2b26904a.so.56.22.100 +3 -0
- evalkit_tf437/lib/python3.10/site-packages/decord.libs/libbz2-13e8c345.so.1.0.4 +0 -0
- evalkit_tf437/lib/python3.10/site-packages/decord.libs/libpostproc-88b722f8.so.55.3.100 +3 -0
- evalkit_tf437/lib/python3.10/site-packages/decord.libs/libswscale-8e37dcfd.so.5.3.100 +3 -0
- evalkit_tf437/lib/python3.10/site-packages/decord.libs/libxcb-77222338.so.1.1.0 +3 -0
- evalkit_tf437/lib/python3.10/site-packages/decord.libs/libxcb-shape-893f3868.so.0.0.0 +0 -0
- evalkit_tf437/lib/python3.10/site-packages/decord.libs/libxcb-shm-7ffb2544.so.0.0.0 +0 -0
- evalkit_tf437/lib/python3.10/site-packages/decord.libs/libxcb-xfixes-6523fc53.so.0.0.0 +0 -0
- evalkit_tf437/lib/python3.10/site-packages/decord.libs/libz-eb09ad1d.so.1.2.3 +0 -0
- evalkit_tf437/lib/python3.10/site-packages/einops_exts/__init__.py +3 -0
- evalkit_tf437/lib/python3.10/site-packages/einops_exts/__pycache__/__init__.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/einops_exts/__pycache__/einops_exts.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/einops_exts/__pycache__/torch.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/einops_exts/einops_exts.py +63 -0
- evalkit_tf437/lib/python3.10/site-packages/parso/__init__.py +58 -0
- evalkit_tf437/lib/python3.10/site-packages/parso/_compatibility.py +3 -0
- evalkit_tf437/lib/python3.10/site-packages/parso/cache.py +275 -0
- evalkit_tf437/lib/python3.10/site-packages/parso/file_io.py +38 -0
- evalkit_tf437/lib/python3.10/site-packages/parso/grammar.py +264 -0
- evalkit_tf437/lib/python3.10/site-packages/parso/normalizer.py +198 -0
- evalkit_tf437/lib/python3.10/site-packages/parso/parser.py +210 -0
- evalkit_tf437/lib/python3.10/site-packages/parso/py.typed +0 -0
- evalkit_tf437/lib/python3.10/site-packages/parso/python/__init__.py +0 -0
- evalkit_tf437/lib/python3.10/site-packages/parso/python/__pycache__/diff.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/parso/python/__pycache__/parser.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/parso/python/__pycache__/prefix.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/parso/python/__pycache__/token.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/parso/python/__pycache__/tokenize.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/parso/python/__pycache__/tree.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/parso/python/diff.py +886 -0
- evalkit_tf437/lib/python3.10/site-packages/parso/python/errors.py +1326 -0
- evalkit_tf437/lib/python3.10/site-packages/parso/python/grammar311.txt +169 -0
.gitattributes
CHANGED
|
@@ -586,3 +586,10 @@ evalkit_internvl/lib/python3.10/site-packages/numpy.libs/libgfortran-040039e1.so
|
|
| 586 |
evalkit_internvl/lib/python3.10/site-packages/huggingface_hub/inference/__pycache__/_client.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 587 |
evalkit_internvl/lib/python3.10/site-packages/huggingface_hub/inference/_generated/__pycache__/_async_client.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 588 |
evalkit_internvl/lib/python3.10/site-packages/xlsxwriter/__pycache__/worksheet.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 586 |
evalkit_internvl/lib/python3.10/site-packages/huggingface_hub/inference/__pycache__/_client.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 587 |
evalkit_internvl/lib/python3.10/site-packages/huggingface_hub/inference/_generated/__pycache__/_async_client.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 588 |
evalkit_internvl/lib/python3.10/site-packages/xlsxwriter/__pycache__/worksheet.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 589 |
+
evalkit_tf437/lib/python3.10/site-packages/decord.libs/libavutil-2b26904a.so.56.22.100 filter=lfs diff=lfs merge=lfs -text
|
| 590 |
+
evalkit_tf437/lib/python3.10/site-packages/decord.libs/libavformat-8b46ea57.so.58.20.100 filter=lfs diff=lfs merge=lfs -text
|
| 591 |
+
evalkit_tf437/lib/python3.10/site-packages/decord.libs/libpostproc-88b722f8.so.55.3.100 filter=lfs diff=lfs merge=lfs -text
|
| 592 |
+
evalkit_tf437/lib/python3.10/site-packages/decord.libs/libswscale-8e37dcfd.so.5.3.100 filter=lfs diff=lfs merge=lfs -text
|
| 593 |
+
evalkit_tf437/lib/python3.10/site-packages/decord.libs/libxcb-77222338.so.1.1.0 filter=lfs diff=lfs merge=lfs -text
|
| 594 |
+
evalkit_tf437/lib/python3.10/site-packages/decord.libs/libavfilter-1e2243e2.so.7.40.101 filter=lfs diff=lfs merge=lfs -text
|
| 595 |
+
evalkit_tf437/lib/python3.10/site-packages/decord.libs/libavdevice-bf61e037.so.58.5.100 filter=lfs diff=lfs merge=lfs -text
|
evalkit_internvl/lib/python3.10/site-packages/open_clip/__init__.py
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .version import __version__
|
| 2 |
+
|
| 3 |
+
from .coca_model import CoCa
|
| 4 |
+
from .constants import OPENAI_DATASET_MEAN, OPENAI_DATASET_STD
|
| 5 |
+
from .factory import create_model, create_model_and_transforms, create_model_from_pretrained, get_tokenizer, create_loss
|
| 6 |
+
from .factory import list_models, add_model_config, get_model_config, load_checkpoint
|
| 7 |
+
from .loss import ClipLoss, DistillClipLoss, CoCaLoss
|
| 8 |
+
from .model import CLIP, CustomTextCLIP, CLIPTextCfg, CLIPVisionCfg, \
|
| 9 |
+
convert_weights_to_lp, convert_weights_to_fp16, trace_model, get_cast_dtype, get_input_dtype, \
|
| 10 |
+
get_model_tokenize_cfg, get_model_preprocess_cfg, set_model_preprocess_cfg
|
| 11 |
+
from .openai import load_openai_model, list_openai_models
|
| 12 |
+
from .pretrained import list_pretrained, list_pretrained_models_by_tag, list_pretrained_tags_by_model, \
|
| 13 |
+
get_pretrained_url, download_pretrained_from_url, is_pretrained_cfg, get_pretrained_cfg, download_pretrained
|
| 14 |
+
from .push_to_hf_hub import push_pretrained_to_hf_hub, push_to_hf_hub
|
| 15 |
+
from .tokenizer import SimpleTokenizer, tokenize, decode
|
| 16 |
+
from .transform import image_transform, AugmentationCfg
|
| 17 |
+
from .zero_shot_classifier import build_zero_shot_classifier, build_zero_shot_classifier_legacy
|
| 18 |
+
from .zero_shot_metadata import OPENAI_IMAGENET_TEMPLATES, SIMPLE_IMAGENET_TEMPLATES, IMAGENET_CLASSNAMES
|
evalkit_internvl/lib/python3.10/site-packages/open_clip/coca_model.py
ADDED
|
@@ -0,0 +1,498 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Optional
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
from torch import nn
|
| 5 |
+
from torch.nn import functional as F
|
| 6 |
+
import numpy as np
|
| 7 |
+
from dataclasses import dataclass
|
| 8 |
+
|
| 9 |
+
from .transformer import (
|
| 10 |
+
LayerNormFp32,
|
| 11 |
+
LayerNorm,
|
| 12 |
+
QuickGELU,
|
| 13 |
+
MultimodalTransformer,
|
| 14 |
+
)
|
| 15 |
+
from .model import CLIPTextCfg, CLIPVisionCfg, _build_vision_tower, _build_text_tower
|
| 16 |
+
|
| 17 |
+
try:
|
| 18 |
+
from transformers import (
|
| 19 |
+
BeamSearchScorer,
|
| 20 |
+
LogitsProcessorList,
|
| 21 |
+
TopPLogitsWarper,
|
| 22 |
+
TopKLogitsWarper,
|
| 23 |
+
RepetitionPenaltyLogitsProcessor,
|
| 24 |
+
MinLengthLogitsProcessor,
|
| 25 |
+
MaxLengthCriteria,
|
| 26 |
+
StopStringCriteria,
|
| 27 |
+
EosTokenCriteria,
|
| 28 |
+
StoppingCriteriaList
|
| 29 |
+
)
|
| 30 |
+
|
| 31 |
+
GENERATION_TYPES = {
|
| 32 |
+
"top_k": TopKLogitsWarper,
|
| 33 |
+
"top_p": TopPLogitsWarper,
|
| 34 |
+
"beam_search": "beam_search"
|
| 35 |
+
}
|
| 36 |
+
_has_transformers = True
|
| 37 |
+
except ImportError as e:
|
| 38 |
+
GENERATION_TYPES = {
|
| 39 |
+
"top_k": None,
|
| 40 |
+
"top_p": None,
|
| 41 |
+
"beam_search": "beam_search"
|
| 42 |
+
}
|
| 43 |
+
_has_transformers = False
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
@dataclass
|
| 47 |
+
class MultimodalCfg(CLIPTextCfg):
|
| 48 |
+
mlp_ratio: int = 4
|
| 49 |
+
dim_head: int = 64
|
| 50 |
+
heads: int = 8
|
| 51 |
+
n_queries: int = 256
|
| 52 |
+
attn_pooler_heads: int = 8
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
def _build_text_decoder_tower(
|
| 56 |
+
embed_dim,
|
| 57 |
+
multimodal_cfg,
|
| 58 |
+
quick_gelu: bool = False,
|
| 59 |
+
cast_dtype: Optional[torch.dtype] = None,
|
| 60 |
+
):
|
| 61 |
+
multimodal_cfg = MultimodalCfg(**multimodal_cfg) if isinstance(multimodal_cfg, dict) else multimodal_cfg
|
| 62 |
+
act_layer = QuickGELU if quick_gelu else nn.GELU
|
| 63 |
+
norm_layer = (
|
| 64 |
+
LayerNormFp32 if cast_dtype in (torch.float16, torch.bfloat16) else LayerNorm
|
| 65 |
+
)
|
| 66 |
+
|
| 67 |
+
decoder = MultimodalTransformer(
|
| 68 |
+
context_length=multimodal_cfg.context_length,
|
| 69 |
+
width=multimodal_cfg.width,
|
| 70 |
+
heads=multimodal_cfg.heads,
|
| 71 |
+
layers=multimodal_cfg.layers,
|
| 72 |
+
ls_init_value=multimodal_cfg.ls_init_value,
|
| 73 |
+
output_dim=embed_dim,
|
| 74 |
+
act_layer=act_layer,
|
| 75 |
+
norm_layer=norm_layer,
|
| 76 |
+
)
|
| 77 |
+
|
| 78 |
+
return decoder
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
def _token_to_tensor(token_id, device: str = "cpu") -> torch.Tensor:
|
| 82 |
+
if not isinstance(token_id, torch.Tensor):
|
| 83 |
+
if isinstance(token_id, int):
|
| 84 |
+
token_id = [token_id]
|
| 85 |
+
token_id = torch.tensor(token_id, device=device)
|
| 86 |
+
return token_id
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
class CoCa(nn.Module):
|
| 90 |
+
def __init__(
|
| 91 |
+
self,
|
| 92 |
+
embed_dim,
|
| 93 |
+
multimodal_cfg: MultimodalCfg,
|
| 94 |
+
text_cfg: CLIPTextCfg,
|
| 95 |
+
vision_cfg: CLIPVisionCfg,
|
| 96 |
+
quick_gelu: bool = False,
|
| 97 |
+
init_logit_scale: float = np.log(1 / 0.07),
|
| 98 |
+
init_logit_bias: Optional[float] = None,
|
| 99 |
+
cast_dtype: Optional[torch.dtype] = None,
|
| 100 |
+
pad_id: int = 0,
|
| 101 |
+
):
|
| 102 |
+
super().__init__()
|
| 103 |
+
multimodal_cfg = MultimodalCfg(**multimodal_cfg) if isinstance(multimodal_cfg, dict) else multimodal_cfg
|
| 104 |
+
text_cfg = CLIPTextCfg(**text_cfg) if isinstance(text_cfg, dict) else text_cfg
|
| 105 |
+
vision_cfg = CLIPVisionCfg(**vision_cfg) if isinstance(vision_cfg, dict) else vision_cfg
|
| 106 |
+
|
| 107 |
+
self.text = _build_text_tower(
|
| 108 |
+
embed_dim=embed_dim,
|
| 109 |
+
text_cfg=text_cfg,
|
| 110 |
+
quick_gelu=quick_gelu,
|
| 111 |
+
cast_dtype=cast_dtype,
|
| 112 |
+
)
|
| 113 |
+
|
| 114 |
+
vocab_size = (
|
| 115 |
+
text_cfg.vocab_size # for hf models
|
| 116 |
+
if hasattr(text_cfg, "hf_model_name") and text_cfg.hf_model_name is not None
|
| 117 |
+
else text_cfg.vocab_size
|
| 118 |
+
)
|
| 119 |
+
|
| 120 |
+
self.visual = _build_vision_tower(
|
| 121 |
+
embed_dim=embed_dim,
|
| 122 |
+
vision_cfg=vision_cfg,
|
| 123 |
+
quick_gelu=quick_gelu,
|
| 124 |
+
cast_dtype=cast_dtype,
|
| 125 |
+
)
|
| 126 |
+
|
| 127 |
+
self.text_decoder = _build_text_decoder_tower(
|
| 128 |
+
vocab_size,
|
| 129 |
+
multimodal_cfg=multimodal_cfg,
|
| 130 |
+
quick_gelu=quick_gelu,
|
| 131 |
+
cast_dtype=cast_dtype,
|
| 132 |
+
)
|
| 133 |
+
|
| 134 |
+
self.logit_scale = nn.Parameter(torch.ones([]) * init_logit_scale)
|
| 135 |
+
if init_logit_bias is not None:
|
| 136 |
+
self.logit_bias = nn.Parameter(torch.ones([]) * init_logit_bias)
|
| 137 |
+
else:
|
| 138 |
+
self.logit_bias = None
|
| 139 |
+
self.pad_id = pad_id
|
| 140 |
+
|
| 141 |
+
self.context_length = multimodal_cfg.context_length
|
| 142 |
+
|
| 143 |
+
@torch.jit.ignore
|
| 144 |
+
def set_grad_checkpointing(self, enable: bool = True):
|
| 145 |
+
self.visual.set_grad_checkpointing(enable)
|
| 146 |
+
self.text.set_grad_checkpointing(enable)
|
| 147 |
+
self.text_decoder.set_grad_checkpointing(enable)
|
| 148 |
+
|
| 149 |
+
def _encode_image(self, images, normalize: bool = True):
|
| 150 |
+
image_latent, tokens_embs = self.visual(images)
|
| 151 |
+
image_latent = F.normalize(image_latent, dim=-1) if normalize else image_latent
|
| 152 |
+
return image_latent, tokens_embs
|
| 153 |
+
|
| 154 |
+
def _encode_text(self, text, normalize: bool = True):
|
| 155 |
+
text_latent, token_emb = self.text(text)
|
| 156 |
+
text_latent = F.normalize(text_latent, dim=-1) if normalize else text_latent
|
| 157 |
+
return text_latent, token_emb
|
| 158 |
+
|
| 159 |
+
def encode_image(self, images, normalize: bool = True):
|
| 160 |
+
image_latent, _ = self._encode_image(images, normalize=normalize)
|
| 161 |
+
return image_latent
|
| 162 |
+
|
| 163 |
+
def encode_text(self, text, normalize: bool = True):
|
| 164 |
+
text_latent, _ = self._encode_text(text, normalize=normalize)
|
| 165 |
+
return text_latent
|
| 166 |
+
|
| 167 |
+
def forward(
|
| 168 |
+
self,
|
| 169 |
+
image,
|
| 170 |
+
text: Optional[torch.Tensor] = None,
|
| 171 |
+
image_latent: Optional[torch.Tensor] = None,
|
| 172 |
+
image_embs: Optional[torch.Tensor] = None,
|
| 173 |
+
output_labels: bool = True,
|
| 174 |
+
):
|
| 175 |
+
if image_latent is None or image_embs is None:
|
| 176 |
+
image_latent, image_embs = self._encode_image(image)
|
| 177 |
+
|
| 178 |
+
if text is None:
|
| 179 |
+
return {"image_features": image_latent, "image_embs": image_embs}
|
| 180 |
+
|
| 181 |
+
text_latent, token_embs = self._encode_text(text)
|
| 182 |
+
|
| 183 |
+
# FIXME this isn't an ideal solution, would like to improve -RW
|
| 184 |
+
labels: Optional[torch.Tensor] = text[:, 1:] if output_labels else None
|
| 185 |
+
if output_labels:
|
| 186 |
+
# align text_embs and thus logits with labels for teacher-forcing caption loss
|
| 187 |
+
token_embs = token_embs[:, :-1]
|
| 188 |
+
|
| 189 |
+
logits = self.text_decoder(image_embs, token_embs)
|
| 190 |
+
out_dict = {
|
| 191 |
+
"image_features": image_latent,
|
| 192 |
+
"text_features": text_latent,
|
| 193 |
+
"logits": logits,
|
| 194 |
+
"logit_scale": self.logit_scale.exp()
|
| 195 |
+
}
|
| 196 |
+
if labels is not None:
|
| 197 |
+
out_dict["labels"] = labels
|
| 198 |
+
if self.logit_bias is not None:
|
| 199 |
+
out_dict["logit_bias"] = self.logit_bias
|
| 200 |
+
return out_dict
|
| 201 |
+
|
| 202 |
+
def generate(
|
| 203 |
+
self,
|
| 204 |
+
image,
|
| 205 |
+
text=None,
|
| 206 |
+
seq_len=30,
|
| 207 |
+
max_seq_len=77,
|
| 208 |
+
temperature=1.,
|
| 209 |
+
generation_type="beam_search",
|
| 210 |
+
top_p=0.1, # keep tokens in the 1 - top_p quantile
|
| 211 |
+
top_k=1, # keeps the top_k most probable tokens
|
| 212 |
+
pad_token_id=None,
|
| 213 |
+
eos_token_id=None,
|
| 214 |
+
sot_token_id=None,
|
| 215 |
+
num_beams=6,
|
| 216 |
+
num_beam_groups=3,
|
| 217 |
+
min_seq_len=5,
|
| 218 |
+
stopping_criteria=None,
|
| 219 |
+
repetition_penalty=1.0,
|
| 220 |
+
fixed_output_length=False # if True output.shape == (batch_size, seq_len)
|
| 221 |
+
):
|
| 222 |
+
# taking many ideas and components from HuggingFace GenerationMixin
|
| 223 |
+
# https://huggingface.co/docs/transformers/main/en/main_classes/text_generation
|
| 224 |
+
assert _has_transformers, "Please install transformers for generate functionality. `pip install transformers`."
|
| 225 |
+
assert seq_len > min_seq_len, "seq_len must be larger than min_seq_len"
|
| 226 |
+
device = image.device
|
| 227 |
+
|
| 228 |
+
with torch.no_grad():
|
| 229 |
+
sot_token_id = _token_to_tensor(49406 if sot_token_id is None else sot_token_id, device=device)
|
| 230 |
+
eos_token_id = _token_to_tensor(49407 if eos_token_id is None else eos_token_id, device=device)
|
| 231 |
+
pad_token_id = self.pad_id if pad_token_id is None else pad_token_id
|
| 232 |
+
logit_processor = LogitsProcessorList(
|
| 233 |
+
[
|
| 234 |
+
MinLengthLogitsProcessor(min_seq_len, eos_token_id),
|
| 235 |
+
RepetitionPenaltyLogitsProcessor(repetition_penalty),
|
| 236 |
+
]
|
| 237 |
+
)
|
| 238 |
+
|
| 239 |
+
if stopping_criteria is None:
|
| 240 |
+
stopping_criteria = [MaxLengthCriteria(max_length=seq_len)]
|
| 241 |
+
stopping_criteria = StoppingCriteriaList(stopping_criteria)
|
| 242 |
+
|
| 243 |
+
if generation_type == "beam_search":
|
| 244 |
+
output = self._generate_beamsearch(
|
| 245 |
+
image_inputs=image,
|
| 246 |
+
pad_token_id=pad_token_id,
|
| 247 |
+
eos_token_id=eos_token_id,
|
| 248 |
+
sot_token_id=sot_token_id,
|
| 249 |
+
num_beams=num_beams,
|
| 250 |
+
num_beam_groups=num_beam_groups,
|
| 251 |
+
min_seq_len=min_seq_len,
|
| 252 |
+
stopping_criteria=stopping_criteria,
|
| 253 |
+
logit_processor=logit_processor,
|
| 254 |
+
)
|
| 255 |
+
if fixed_output_length and output.shape[1] < seq_len:
|
| 256 |
+
pad_len = seq_len - output.shape[1]
|
| 257 |
+
return torch.cat((
|
| 258 |
+
output,
|
| 259 |
+
torch.ones(output.shape[0], pad_len, device=device, dtype=output.dtype) * pad_token_id
|
| 260 |
+
),
|
| 261 |
+
dim=1
|
| 262 |
+
)
|
| 263 |
+
return output
|
| 264 |
+
|
| 265 |
+
elif generation_type == "top_p":
|
| 266 |
+
logit_warper = GENERATION_TYPES[generation_type](top_p)
|
| 267 |
+
elif generation_type == "top_k":
|
| 268 |
+
logit_warper = GENERATION_TYPES[generation_type](top_k)
|
| 269 |
+
else:
|
| 270 |
+
raise ValueError(
|
| 271 |
+
f"generation_type has to be one of "
|
| 272 |
+
f"{'| ' + ' | '.join(list(GENERATION_TYPES.keys())) + ' |'}."
|
| 273 |
+
)
|
| 274 |
+
|
| 275 |
+
image_latent, image_embs = self._encode_image(image)
|
| 276 |
+
|
| 277 |
+
if text is None:
|
| 278 |
+
text = torch.ones((image.shape[0], 1), device=device, dtype=torch.long) * sot_token_id
|
| 279 |
+
|
| 280 |
+
was_training = self.training
|
| 281 |
+
num_dims = len(text.shape)
|
| 282 |
+
|
| 283 |
+
if num_dims == 1:
|
| 284 |
+
text = text[None, :]
|
| 285 |
+
|
| 286 |
+
self.eval()
|
| 287 |
+
out = text
|
| 288 |
+
|
| 289 |
+
while True:
|
| 290 |
+
x = out[:, -max_seq_len:]
|
| 291 |
+
cur_len = x.shape[1]
|
| 292 |
+
logits = self(
|
| 293 |
+
image,
|
| 294 |
+
x,
|
| 295 |
+
image_latent=image_latent,
|
| 296 |
+
image_embs=image_embs,
|
| 297 |
+
output_labels=False,
|
| 298 |
+
)["logits"][:, -1]
|
| 299 |
+
mask = (out[:, -1] == eos_token_id) | (out[:, -1] == pad_token_id)
|
| 300 |
+
sample = torch.ones((out.shape[0], 1), device=device, dtype=torch.long) * pad_token_id
|
| 301 |
+
|
| 302 |
+
if mask.all():
|
| 303 |
+
if not fixed_output_length:
|
| 304 |
+
break
|
| 305 |
+
else:
|
| 306 |
+
logits = logits[~mask, :]
|
| 307 |
+
filtered_logits = logit_processor(x[~mask, :], logits)
|
| 308 |
+
filtered_logits = logit_warper(x[~mask, :], filtered_logits)
|
| 309 |
+
probs = F.softmax(filtered_logits / temperature, dim=-1)
|
| 310 |
+
|
| 311 |
+
if (cur_len + 1 == seq_len):
|
| 312 |
+
sample[~mask, :] = torch.ones((sum(~mask), 1), device=device, dtype=torch.long) * eos_token_id
|
| 313 |
+
else:
|
| 314 |
+
sample[~mask, :] = torch.multinomial(probs, 1)
|
| 315 |
+
|
| 316 |
+
out = torch.cat((out, sample), dim=-1)
|
| 317 |
+
|
| 318 |
+
cur_len += 1
|
| 319 |
+
|
| 320 |
+
if all(stopping_criteria(out, None)):
|
| 321 |
+
break
|
| 322 |
+
|
| 323 |
+
if num_dims == 1:
|
| 324 |
+
out = out.squeeze(0)
|
| 325 |
+
|
| 326 |
+
self.train(was_training)
|
| 327 |
+
return out
|
| 328 |
+
|
| 329 |
+
def _generate_beamsearch(
|
| 330 |
+
self,
|
| 331 |
+
image_inputs,
|
| 332 |
+
pad_token_id=None,
|
| 333 |
+
eos_token_id=None,
|
| 334 |
+
sot_token_id=None,
|
| 335 |
+
num_beams=6,
|
| 336 |
+
num_beam_groups=3,
|
| 337 |
+
min_seq_len=5,
|
| 338 |
+
stopping_criteria=None,
|
| 339 |
+
logit_processor=None,
|
| 340 |
+
logit_warper=None,
|
| 341 |
+
):
|
| 342 |
+
device = image_inputs.device
|
| 343 |
+
batch_size = image_inputs.shape[0]
|
| 344 |
+
image_inputs = torch.repeat_interleave(image_inputs, num_beams, dim=0)
|
| 345 |
+
image_latent, image_embs = self._encode_image(image_inputs)
|
| 346 |
+
|
| 347 |
+
input_ids = torch.ones((batch_size * num_beams, 1), device=device, dtype=torch.long)
|
| 348 |
+
input_ids = input_ids * sot_token_id
|
| 349 |
+
beam_scorer = BeamSearchScorer(
|
| 350 |
+
batch_size=batch_size,
|
| 351 |
+
num_beams=num_beams,
|
| 352 |
+
device=device,
|
| 353 |
+
num_beam_groups=num_beam_groups,
|
| 354 |
+
)
|
| 355 |
+
# instantiate logits processors
|
| 356 |
+
logits_processor = (
|
| 357 |
+
LogitsProcessorList([MinLengthLogitsProcessor(min_seq_len, eos_token_id=eos_token_id)])
|
| 358 |
+
if logit_processor is None
|
| 359 |
+
else logit_processor
|
| 360 |
+
)
|
| 361 |
+
|
| 362 |
+
num_beams = beam_scorer.num_beams
|
| 363 |
+
num_beam_groups = beam_scorer.num_beam_groups
|
| 364 |
+
num_sub_beams = num_beams // num_beam_groups
|
| 365 |
+
batch_size = len(beam_scorer._beam_hyps) // num_beam_groups
|
| 366 |
+
batch_beam_size, cur_len = input_ids.shape
|
| 367 |
+
beam_indices = None
|
| 368 |
+
|
| 369 |
+
if num_beams * batch_size != batch_beam_size:
|
| 370 |
+
raise ValueError(
|
| 371 |
+
f"Batch dimension of `input_ids` should be {num_beams * batch_size}, but is {batch_beam_size}."
|
| 372 |
+
)
|
| 373 |
+
|
| 374 |
+
beam_scores = torch.full((batch_size, num_beams), -1e9, dtype=torch.float, device=device)
|
| 375 |
+
# initialise score of first beam of each group with 0 and the rest with 1e-9. This ensures that the beams in
|
| 376 |
+
# the same group don't produce same tokens everytime.
|
| 377 |
+
beam_scores[:, ::num_sub_beams] = 0
|
| 378 |
+
beam_scores = beam_scores.view((batch_size * num_beams,))
|
| 379 |
+
|
| 380 |
+
while True:
|
| 381 |
+
|
| 382 |
+
# predicted tokens in cur_len step
|
| 383 |
+
current_tokens = torch.zeros(batch_size * num_beams, dtype=input_ids.dtype, device=device)
|
| 384 |
+
|
| 385 |
+
# indices which will form the beams in the next time step
|
| 386 |
+
reordering_indices = torch.zeros(batch_size * num_beams, dtype=torch.long, device=device)
|
| 387 |
+
|
| 388 |
+
# do one decoder step on all beams of all sentences in batch
|
| 389 |
+
model_inputs = prepare_inputs_for_generation(input_ids=input_ids, image_inputs=image_inputs)
|
| 390 |
+
outputs = self(
|
| 391 |
+
model_inputs['images'],
|
| 392 |
+
model_inputs['text'],
|
| 393 |
+
image_latent=image_latent,
|
| 394 |
+
image_embs=image_embs,
|
| 395 |
+
output_labels=False,
|
| 396 |
+
)
|
| 397 |
+
|
| 398 |
+
for beam_group_idx in range(num_beam_groups):
|
| 399 |
+
group_start_idx = beam_group_idx * num_sub_beams
|
| 400 |
+
group_end_idx = min(group_start_idx + num_sub_beams, num_beams)
|
| 401 |
+
group_size = group_end_idx - group_start_idx
|
| 402 |
+
|
| 403 |
+
# indices of beams of current group among all sentences in batch
|
| 404 |
+
batch_group_indices = []
|
| 405 |
+
|
| 406 |
+
for batch_idx in range(batch_size):
|
| 407 |
+
batch_group_indices.extend(
|
| 408 |
+
[batch_idx * num_beams + idx for idx in range(group_start_idx, group_end_idx)]
|
| 409 |
+
)
|
| 410 |
+
group_input_ids = input_ids[batch_group_indices]
|
| 411 |
+
|
| 412 |
+
# select outputs of beams of currentg group only
|
| 413 |
+
next_token_logits = outputs['logits'][batch_group_indices, -1, :]
|
| 414 |
+
vocab_size = next_token_logits.shape[-1]
|
| 415 |
+
|
| 416 |
+
next_token_scores_processed = logits_processor(
|
| 417 |
+
group_input_ids, next_token_logits, current_tokens=current_tokens, beam_group_idx=beam_group_idx
|
| 418 |
+
)
|
| 419 |
+
next_token_scores = next_token_scores_processed + beam_scores[batch_group_indices].unsqueeze(-1)
|
| 420 |
+
next_token_scores = next_token_scores.expand_as(next_token_scores_processed)
|
| 421 |
+
|
| 422 |
+
# reshape for beam search
|
| 423 |
+
next_token_scores = next_token_scores.view(batch_size, group_size * vocab_size)
|
| 424 |
+
|
| 425 |
+
next_token_scores, next_tokens = torch.topk(
|
| 426 |
+
next_token_scores, 2 * group_size, dim=1, largest=True, sorted=True
|
| 427 |
+
)
|
| 428 |
+
|
| 429 |
+
next_indices = torch.div(next_tokens, vocab_size, rounding_mode="floor")
|
| 430 |
+
next_tokens = next_tokens % vocab_size
|
| 431 |
+
|
| 432 |
+
# stateless
|
| 433 |
+
process_beam_indices = sum(beam_indices, ()) if beam_indices is not None else None
|
| 434 |
+
beam_outputs = beam_scorer.process(
|
| 435 |
+
group_input_ids,
|
| 436 |
+
next_token_scores,
|
| 437 |
+
next_tokens,
|
| 438 |
+
next_indices,
|
| 439 |
+
pad_token_id=pad_token_id,
|
| 440 |
+
eos_token_id=eos_token_id,
|
| 441 |
+
beam_indices=process_beam_indices,
|
| 442 |
+
group_index=beam_group_idx,
|
| 443 |
+
)
|
| 444 |
+
beam_scores[batch_group_indices] = beam_outputs["next_beam_scores"]
|
| 445 |
+
beam_next_tokens = beam_outputs["next_beam_tokens"]
|
| 446 |
+
beam_idx = beam_outputs["next_beam_indices"]
|
| 447 |
+
|
| 448 |
+
input_ids[batch_group_indices] = group_input_ids[beam_idx]
|
| 449 |
+
group_input_ids = torch.cat([group_input_ids[beam_idx, :], beam_next_tokens.unsqueeze(-1)], dim=-1)
|
| 450 |
+
current_tokens[batch_group_indices] = group_input_ids[:, -1]
|
| 451 |
+
|
| 452 |
+
# (beam_idx // group_size) -> batch_idx
|
| 453 |
+
# (beam_idx % group_size) -> offset of idx inside the group
|
| 454 |
+
reordering_indices[batch_group_indices] = (
|
| 455 |
+
num_beams * torch.div(beam_idx, group_size, rounding_mode="floor") + group_start_idx + (beam_idx % group_size)
|
| 456 |
+
)
|
| 457 |
+
|
| 458 |
+
input_ids = torch.cat([input_ids, current_tokens.unsqueeze(-1)], dim=-1)
|
| 459 |
+
|
| 460 |
+
# increase cur_len
|
| 461 |
+
cur_len = cur_len + 1
|
| 462 |
+
if beam_scorer.is_done or all(stopping_criteria(input_ids, None)):
|
| 463 |
+
break
|
| 464 |
+
|
| 465 |
+
final_beam_indices = sum(beam_indices, ()) if beam_indices is not None else None
|
| 466 |
+
sequence_outputs = beam_scorer.finalize(
|
| 467 |
+
input_ids,
|
| 468 |
+
beam_scores,
|
| 469 |
+
next_tokens,
|
| 470 |
+
next_indices,
|
| 471 |
+
pad_token_id=pad_token_id,
|
| 472 |
+
eos_token_id=eos_token_id,
|
| 473 |
+
max_length=stopping_criteria.max_length,
|
| 474 |
+
beam_indices=final_beam_indices,
|
| 475 |
+
)
|
| 476 |
+
return sequence_outputs['sequences']
|
| 477 |
+
|
| 478 |
+
|
| 479 |
+
def prepare_inputs_for_generation(input_ids, image_inputs, past=None, **kwargs):
|
| 480 |
+
if past:
|
| 481 |
+
input_ids = input_ids[:, -1].unsqueeze(-1)
|
| 482 |
+
|
| 483 |
+
attention_mask = kwargs.get("attention_mask", None)
|
| 484 |
+
position_ids = kwargs.get("position_ids", None)
|
| 485 |
+
|
| 486 |
+
if attention_mask is not None and position_ids is None:
|
| 487 |
+
# create position_ids on the fly for batch generation
|
| 488 |
+
position_ids = attention_mask.long().cumsum(-1) - 1
|
| 489 |
+
position_ids.masked_fill_(attention_mask == 0, 1)
|
| 490 |
+
else:
|
| 491 |
+
position_ids = None
|
| 492 |
+
return {
|
| 493 |
+
"text": input_ids,
|
| 494 |
+
"images": image_inputs,
|
| 495 |
+
"past_key_values": past,
|
| 496 |
+
"position_ids": position_ids,
|
| 497 |
+
"attention_mask": attention_mask,
|
| 498 |
+
}
|
evalkit_internvl/lib/python3.10/site-packages/open_clip/hf_model.py
ADDED
|
@@ -0,0 +1,193 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
""" huggingface model adapter
|
| 2 |
+
|
| 3 |
+
Wraps HuggingFace transformers (https://github.com/huggingface/transformers) models for use as a text tower in CLIP model.
|
| 4 |
+
"""
|
| 5 |
+
import re
|
| 6 |
+
|
| 7 |
+
import torch
|
| 8 |
+
import torch.nn as nn
|
| 9 |
+
from torch import TensorType
|
| 10 |
+
|
| 11 |
+
try:
|
| 12 |
+
import transformers
|
| 13 |
+
from transformers import AutoModel, AutoTokenizer, AutoConfig, PretrainedConfig
|
| 14 |
+
from transformers.modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, \
|
| 15 |
+
BaseModelOutputWithPoolingAndCrossAttentions
|
| 16 |
+
except ImportError as e:
|
| 17 |
+
transformers = None
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
class BaseModelOutput:
|
| 21 |
+
pass
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
class PretrainedConfig:
|
| 25 |
+
pass
|
| 26 |
+
|
| 27 |
+
from .hf_configs import arch_dict
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
# utils
|
| 31 |
+
def _camel2snake(s):
|
| 32 |
+
return re.sub(r'(?<!^)(?=[A-Z])', '_', s).lower()
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
# TODO: ?last - for gpt-like models
|
| 36 |
+
_POOLERS = {}
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def register_pooler(cls):
|
| 40 |
+
"""Decorator registering pooler class"""
|
| 41 |
+
_POOLERS[_camel2snake(cls.__name__)] = cls
|
| 42 |
+
return cls
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
@register_pooler
|
| 46 |
+
class MeanPooler(nn.Module):
|
| 47 |
+
"""Mean pooling"""
|
| 48 |
+
|
| 49 |
+
def forward(self, x: BaseModelOutput, attention_mask: TensorType):
|
| 50 |
+
masked_output = x.last_hidden_state * attention_mask.unsqueeze(-1)
|
| 51 |
+
return masked_output.sum(dim=1) / attention_mask.sum(-1, keepdim=True)
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
@register_pooler
|
| 55 |
+
class MaxPooler(nn.Module):
|
| 56 |
+
"""Max pooling"""
|
| 57 |
+
|
| 58 |
+
def forward(self, x: BaseModelOutput, attention_mask: TensorType):
|
| 59 |
+
masked_output = x.last_hidden_state.masked_fill(attention_mask.unsqueeze(-1), -torch.inf)
|
| 60 |
+
return masked_output.max(1).values
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
@register_pooler
|
| 64 |
+
class ClsPooler(nn.Module):
|
| 65 |
+
"""CLS token pooling"""
|
| 66 |
+
|
| 67 |
+
def __init__(self, use_pooler_output=True):
|
| 68 |
+
super().__init__()
|
| 69 |
+
self.cls_token_position = 0
|
| 70 |
+
self.use_pooler_output = use_pooler_output
|
| 71 |
+
|
| 72 |
+
def forward(self, x: BaseModelOutput, attention_mask: TensorType):
|
| 73 |
+
if (self.use_pooler_output and
|
| 74 |
+
isinstance(x, (BaseModelOutputWithPooling, BaseModelOutputWithPoolingAndCrossAttentions)) and
|
| 75 |
+
(x.pooler_output is not None)
|
| 76 |
+
):
|
| 77 |
+
return x.pooler_output
|
| 78 |
+
|
| 79 |
+
return x.last_hidden_state[:, self.cls_token_position, :]
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
@register_pooler
|
| 83 |
+
class ClsLastHiddenStatePooler(nn.Module):
|
| 84 |
+
"""CLS token pooling
|
| 85 |
+
NOTE: this is equivalent to ClsPooler above with use_pooler_output=False
|
| 86 |
+
"""
|
| 87 |
+
|
| 88 |
+
def __init__(self):
|
| 89 |
+
super().__init__()
|
| 90 |
+
self.cls_token_position = 0
|
| 91 |
+
|
| 92 |
+
def forward(self, x: BaseModelOutput, attention_mask: TensorType):
|
| 93 |
+
return x.last_hidden_state[:, self.cls_token_position, :]
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
class HFTextEncoder(nn.Module):
|
| 97 |
+
"""HuggingFace model adapter"""
|
| 98 |
+
output_tokens: torch.jit.Final[bool]
|
| 99 |
+
|
| 100 |
+
def __init__(
|
| 101 |
+
self,
|
| 102 |
+
model_name_or_path: str,
|
| 103 |
+
output_dim: int,
|
| 104 |
+
config: PretrainedConfig = None,
|
| 105 |
+
pooler_type: str = None,
|
| 106 |
+
proj_type: str = None,
|
| 107 |
+
pretrained: bool = True,
|
| 108 |
+
output_tokens: bool = False,
|
| 109 |
+
):
|
| 110 |
+
super().__init__()
|
| 111 |
+
self.output_tokens = output_tokens
|
| 112 |
+
self.output_dim = output_dim
|
| 113 |
+
|
| 114 |
+
# TODO: find better way to get this information
|
| 115 |
+
uses_transformer_pooler = (pooler_type == "cls_pooler")
|
| 116 |
+
|
| 117 |
+
if transformers is None:
|
| 118 |
+
raise RuntimeError("Please `pip install transformers` to use pre-trained HuggingFace models")
|
| 119 |
+
if config is None:
|
| 120 |
+
self.config = AutoConfig.from_pretrained(model_name_or_path)
|
| 121 |
+
create_func, model_args = (AutoModel.from_pretrained, model_name_or_path) if pretrained else (
|
| 122 |
+
AutoModel.from_config, self.config)
|
| 123 |
+
# TODO: do all model configs have this attribute? PretrainedConfig does so yes??
|
| 124 |
+
if hasattr(self.config, "is_encoder_decoder") and self.config.is_encoder_decoder:
|
| 125 |
+
self.transformer = create_func(model_args)
|
| 126 |
+
self.transformer = self.transformer.encoder
|
| 127 |
+
else:
|
| 128 |
+
self.transformer = create_func(model_args, add_pooling_layer=uses_transformer_pooler)
|
| 129 |
+
else:
|
| 130 |
+
self.config = config
|
| 131 |
+
self.transformer = AutoModel.from_config(config)
|
| 132 |
+
if pooler_type is None: # get default arch pooler
|
| 133 |
+
pooler_type = (arch_dict[self.config.model_type]["pooler"])
|
| 134 |
+
|
| 135 |
+
# FIXME downstream users of OpenCLIP models use these attr, need to verify valid across all models
|
| 136 |
+
self.vocab_size = getattr(self.config, 'vocab_size', 0)
|
| 137 |
+
self.context_length = getattr(self.config, 'max_position_embeddings', 0)
|
| 138 |
+
|
| 139 |
+
self.pooler = _POOLERS[pooler_type]()
|
| 140 |
+
|
| 141 |
+
d_model = getattr(self.config, arch_dict[self.config.model_type]["config_names"]["width"])
|
| 142 |
+
if (d_model == output_dim) and (proj_type is None): # do we always need a proj?
|
| 143 |
+
self.proj = nn.Identity()
|
| 144 |
+
elif proj_type == 'linear':
|
| 145 |
+
self.proj = nn.Linear(d_model, output_dim, bias=False)
|
| 146 |
+
elif proj_type == 'mlp':
|
| 147 |
+
hidden_size = (d_model + output_dim) // 2
|
| 148 |
+
self.proj = nn.Sequential(
|
| 149 |
+
nn.Linear(d_model, hidden_size, bias=False),
|
| 150 |
+
nn.GELU(),
|
| 151 |
+
nn.Linear(hidden_size, output_dim, bias=False),
|
| 152 |
+
)
|
| 153 |
+
|
| 154 |
+
def forward(self, x: TensorType):
|
| 155 |
+
attn_mask = (x != self.config.pad_token_id).long()
|
| 156 |
+
out = self.transformer(input_ids=x, attention_mask=attn_mask)
|
| 157 |
+
pooled_out = self.pooler(out, attn_mask)
|
| 158 |
+
projected = self.proj(pooled_out)
|
| 159 |
+
|
| 160 |
+
seq_len = out.last_hidden_state.shape[1]
|
| 161 |
+
tokens = (
|
| 162 |
+
out.last_hidden_state[:, torch.arange(seq_len) != self.pooler.cls_token_position, :]
|
| 163 |
+
if type(self.pooler) == ClsPooler
|
| 164 |
+
else out.last_hidden_state
|
| 165 |
+
)
|
| 166 |
+
|
| 167 |
+
if self.output_tokens:
|
| 168 |
+
return projected, tokens
|
| 169 |
+
return projected
|
| 170 |
+
|
| 171 |
+
def lock(self, unlocked_layers: int = 0, freeze_layer_norm: bool = True):
|
| 172 |
+
if not unlocked_layers: # full freezing
|
| 173 |
+
for n, p in self.transformer.named_parameters():
|
| 174 |
+
p.requires_grad = (not freeze_layer_norm) if "LayerNorm" in n.split(".") else False
|
| 175 |
+
return
|
| 176 |
+
|
| 177 |
+
encoder = self.transformer.encoder if hasattr(self.transformer, 'encoder') else self.transformer
|
| 178 |
+
layer_list = getattr(encoder, arch_dict[self.config.model_type]["config_names"]["layer_attr"])
|
| 179 |
+
print(f"Unlocking {unlocked_layers}/{len(layer_list) + 1} layers of hf model")
|
| 180 |
+
embeddings = getattr(
|
| 181 |
+
self.transformer, arch_dict[self.config.model_type]["config_names"]["token_embeddings_attr"])
|
| 182 |
+
modules = [embeddings, *layer_list][:-unlocked_layers]
|
| 183 |
+
# freeze layers
|
| 184 |
+
for module in modules:
|
| 185 |
+
for n, p in module.named_parameters():
|
| 186 |
+
p.requires_grad = (not freeze_layer_norm) if "LayerNorm" in n.split(".") else False
|
| 187 |
+
|
| 188 |
+
@torch.jit.ignore
|
| 189 |
+
def set_grad_checkpointing(self, enable=True):
|
| 190 |
+
self.transformer.gradient_checkpointing_enable()
|
| 191 |
+
|
| 192 |
+
def init_parameters(self):
|
| 193 |
+
pass
|
evalkit_internvl/lib/python3.10/site-packages/open_clip/openai.py
ADDED
|
@@ -0,0 +1,90 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
""" OpenAI pretrained model functions
|
| 2 |
+
|
| 3 |
+
Adapted from https://github.com/openai/CLIP. Originally MIT License, Copyright (c) 2021 OpenAI.
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import os
|
| 7 |
+
import warnings
|
| 8 |
+
from typing import List, Optional, Union
|
| 9 |
+
|
| 10 |
+
import torch
|
| 11 |
+
|
| 12 |
+
from .constants import OPENAI_DATASET_MEAN, OPENAI_DATASET_STD
|
| 13 |
+
from .model import build_model_from_openai_state_dict, convert_weights_to_lp, get_cast_dtype
|
| 14 |
+
from .pretrained import get_pretrained_url, list_pretrained_models_by_tag, download_pretrained_from_url
|
| 15 |
+
|
| 16 |
+
__all__ = ["list_openai_models", "load_openai_model"]
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def list_openai_models() -> List[str]:
|
| 20 |
+
"""Returns the names of available CLIP models"""
|
| 21 |
+
return list_pretrained_models_by_tag('openai')
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def load_openai_model(
|
| 25 |
+
name: str,
|
| 26 |
+
precision: Optional[str] = None,
|
| 27 |
+
device: Optional[Union[str, torch.device]] = None,
|
| 28 |
+
cache_dir: Optional[str] = None,
|
| 29 |
+
):
|
| 30 |
+
"""Load a CLIP model
|
| 31 |
+
|
| 32 |
+
Parameters
|
| 33 |
+
----------
|
| 34 |
+
name : str
|
| 35 |
+
A model name listed by `clip.available_models()`, or the path to a model checkpoint containing the state_dict
|
| 36 |
+
precision: str
|
| 37 |
+
Model precision, if None defaults to 'fp32' if device == 'cpu' else 'fp16'.
|
| 38 |
+
device : Union[str, torch.device]
|
| 39 |
+
The device to put the loaded model
|
| 40 |
+
cache_dir : Optional[str]
|
| 41 |
+
The directory to cache the downloaded model weights
|
| 42 |
+
|
| 43 |
+
Returns
|
| 44 |
+
-------
|
| 45 |
+
model : torch.nn.Module
|
| 46 |
+
The CLIP model
|
| 47 |
+
preprocess : Callable[[PIL.Image], torch.Tensor]
|
| 48 |
+
A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input
|
| 49 |
+
"""
|
| 50 |
+
if device is None:
|
| 51 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 52 |
+
if precision is None:
|
| 53 |
+
precision = 'fp32' if device == 'cpu' else 'fp16'
|
| 54 |
+
|
| 55 |
+
if get_pretrained_url(name, 'openai'):
|
| 56 |
+
model_path = download_pretrained_from_url(get_pretrained_url(name, 'openai'), cache_dir=cache_dir)
|
| 57 |
+
elif os.path.isfile(name):
|
| 58 |
+
model_path = name
|
| 59 |
+
else:
|
| 60 |
+
raise RuntimeError(f"Model {name} not found; available models = {list_openai_models()}")
|
| 61 |
+
|
| 62 |
+
try:
|
| 63 |
+
# loading JIT archive
|
| 64 |
+
model = torch.jit.load(model_path, map_location="cpu").eval()
|
| 65 |
+
state_dict = None
|
| 66 |
+
except RuntimeError:
|
| 67 |
+
# loading saved state dict
|
| 68 |
+
state_dict = torch.load(model_path, map_location="cpu")
|
| 69 |
+
|
| 70 |
+
# Build a non-jit model from the OpenAI jitted model state dict
|
| 71 |
+
cast_dtype = get_cast_dtype(precision)
|
| 72 |
+
try:
|
| 73 |
+
model = build_model_from_openai_state_dict(state_dict or model.state_dict(), cast_dtype=cast_dtype)
|
| 74 |
+
except KeyError:
|
| 75 |
+
sd = {k[7:]: v for k, v in state_dict["state_dict"].items()}
|
| 76 |
+
model = build_model_from_openai_state_dict(sd, cast_dtype=cast_dtype)
|
| 77 |
+
|
| 78 |
+
# model from OpenAI state dict is in manually cast fp16 mode, must be converted for AMP/fp32/bf16 use
|
| 79 |
+
model = model.to(device)
|
| 80 |
+
# FIXME support pure fp16/bf16 precision modes
|
| 81 |
+
if precision != 'fp16':
|
| 82 |
+
model.float()
|
| 83 |
+
if precision == 'bf16':
|
| 84 |
+
# for bf16, convert back to low-precision
|
| 85 |
+
convert_weights_to_lp(model, dtype=torch.bfloat16)
|
| 86 |
+
|
| 87 |
+
# add mean / std attributes for consistency with OpenCLIP models
|
| 88 |
+
model.visual.image_mean = OPENAI_DATASET_MEAN
|
| 89 |
+
model.visual.image_std = OPENAI_DATASET_STD
|
| 90 |
+
return model
|
evalkit_internvl/lib/python3.10/site-packages/open_clip/pos_embed.py
ADDED
|
@@ -0,0 +1,96 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 2 |
+
# All rights reserved.
|
| 3 |
+
|
| 4 |
+
# This source code is licensed under the license found in the
|
| 5 |
+
# LICENSE file in the root directory of this source tree.
|
| 6 |
+
# --------------------------------------------------------
|
| 7 |
+
# Position embedding utils
|
| 8 |
+
# --------------------------------------------------------
|
| 9 |
+
|
| 10 |
+
import numpy as np
|
| 11 |
+
|
| 12 |
+
import torch
|
| 13 |
+
|
| 14 |
+
# --------------------------------------------------------
|
| 15 |
+
# 2D sine-cosine position embedding
|
| 16 |
+
# References:
|
| 17 |
+
# Transformer: https://github.com/tensorflow/models/blob/master/official/nlp/transformer/model_utils.py
|
| 18 |
+
# MoCo v3: https://github.com/facebookresearch/moco-v3
|
| 19 |
+
# --------------------------------------------------------
|
| 20 |
+
def get_2d_sincos_pos_embed(embed_dim, grid_size, cls_token=False):
|
| 21 |
+
"""
|
| 22 |
+
grid_size: int of the grid height and width
|
| 23 |
+
return:
|
| 24 |
+
pos_embed: [grid_size*grid_size, embed_dim] or [1+grid_size*grid_size, embed_dim] (w/ or w/o cls_token)
|
| 25 |
+
"""
|
| 26 |
+
grid_h = np.arange(grid_size, dtype=np.float32)
|
| 27 |
+
grid_w = np.arange(grid_size, dtype=np.float32)
|
| 28 |
+
grid = np.meshgrid(grid_w, grid_h) # here w goes first
|
| 29 |
+
grid = np.stack(grid, axis=0)
|
| 30 |
+
|
| 31 |
+
grid = grid.reshape([2, 1, grid_size, grid_size])
|
| 32 |
+
pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid)
|
| 33 |
+
if cls_token:
|
| 34 |
+
pos_embed = np.concatenate([np.zeros([1, embed_dim]), pos_embed], axis=0)
|
| 35 |
+
return pos_embed
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
def get_2d_sincos_pos_embed_from_grid(embed_dim, grid):
|
| 39 |
+
assert embed_dim % 2 == 0
|
| 40 |
+
|
| 41 |
+
# use half of dimensions to encode grid_h
|
| 42 |
+
emb_h = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[0]) # (H*W, D/2)
|
| 43 |
+
emb_w = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[1]) # (H*W, D/2)
|
| 44 |
+
|
| 45 |
+
emb = np.concatenate([emb_h, emb_w], axis=1) # (H*W, D)
|
| 46 |
+
return emb
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
def get_1d_sincos_pos_embed_from_grid(embed_dim, pos):
|
| 50 |
+
"""
|
| 51 |
+
embed_dim: output dimension for each position
|
| 52 |
+
pos: a list of positions to be encoded: size (M,)
|
| 53 |
+
out: (M, D)
|
| 54 |
+
"""
|
| 55 |
+
assert embed_dim % 2 == 0
|
| 56 |
+
omega = np.arange(embed_dim // 2, dtype=float)
|
| 57 |
+
omega /= embed_dim / 2.
|
| 58 |
+
omega = 1. / 10000**omega # (D/2,)
|
| 59 |
+
|
| 60 |
+
pos = pos.reshape(-1) # (M,)
|
| 61 |
+
out = np.einsum('m,d->md', pos, omega) # (M, D/2), outer product
|
| 62 |
+
|
| 63 |
+
emb_sin = np.sin(out) # (M, D/2)
|
| 64 |
+
emb_cos = np.cos(out) # (M, D/2)
|
| 65 |
+
|
| 66 |
+
emb = np.concatenate([emb_sin, emb_cos], axis=1) # (M, D)
|
| 67 |
+
return emb
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
# --------------------------------------------------------
|
| 71 |
+
# Interpolate position embeddings for high-resolution
|
| 72 |
+
# References:
|
| 73 |
+
# DeiT: https://github.com/facebookresearch/deit
|
| 74 |
+
# --------------------------------------------------------
|
| 75 |
+
def interpolate_pos_embed(model, checkpoint_model):
|
| 76 |
+
if 'pos_embed' in checkpoint_model:
|
| 77 |
+
pos_embed_checkpoint = checkpoint_model['pos_embed']
|
| 78 |
+
embedding_size = pos_embed_checkpoint.shape[-1]
|
| 79 |
+
num_patches = model.patch_embed.num_patches
|
| 80 |
+
num_extra_tokens = model.pos_embed.shape[-2] - num_patches
|
| 81 |
+
# height (== width) for the checkpoint position embedding
|
| 82 |
+
orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5)
|
| 83 |
+
# height (== width) for the new position embedding
|
| 84 |
+
new_size = int(num_patches ** 0.5)
|
| 85 |
+
# class_token and dist_token are kept unchanged
|
| 86 |
+
if orig_size != new_size:
|
| 87 |
+
print("Position interpolate from %dx%d to %dx%d" % (orig_size, orig_size, new_size, new_size))
|
| 88 |
+
extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens]
|
| 89 |
+
# only the position tokens are interpolated
|
| 90 |
+
pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:]
|
| 91 |
+
pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2)
|
| 92 |
+
pos_tokens = torch.nn.functional.interpolate(
|
| 93 |
+
pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False)
|
| 94 |
+
pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2)
|
| 95 |
+
new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)
|
| 96 |
+
checkpoint_model['pos_embed'] = new_pos_embed
|
evalkit_internvl/lib/python3.10/site-packages/open_clip/transformer.py
ADDED
|
@@ -0,0 +1,912 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from collections import OrderedDict
|
| 2 |
+
import math
|
| 3 |
+
from typing import Callable, List, Optional, Sequence, Tuple, Union
|
| 4 |
+
from functools import partial
|
| 5 |
+
|
| 6 |
+
import torch
|
| 7 |
+
from torch import nn
|
| 8 |
+
from torch.nn import functional as F
|
| 9 |
+
from torch.utils.checkpoint import checkpoint
|
| 10 |
+
|
| 11 |
+
from .utils import to_2tuple
|
| 12 |
+
from .pos_embed import get_2d_sincos_pos_embed
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class LayerNormFp32(nn.LayerNorm):
|
| 16 |
+
"""Subclass torch's LayerNorm to handle fp16 (by casting to float32 and back)."""
|
| 17 |
+
|
| 18 |
+
def forward(self, x: torch.Tensor):
|
| 19 |
+
orig_type = x.dtype
|
| 20 |
+
x = F.layer_norm(x.to(torch.float32), self.normalized_shape, self.weight, self.bias, self.eps)
|
| 21 |
+
return x.to(orig_type)
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
class LayerNorm(nn.LayerNorm):
|
| 25 |
+
"""Subclass torch's LayerNorm (with cast back to input dtype)."""
|
| 26 |
+
|
| 27 |
+
def forward(self, x: torch.Tensor):
|
| 28 |
+
orig_type = x.dtype
|
| 29 |
+
x = F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)
|
| 30 |
+
return x.to(orig_type)
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
class QuickGELU(nn.Module):
|
| 34 |
+
# NOTE This is slower than nn.GELU or nn.SiLU and uses more GPU memory
|
| 35 |
+
def forward(self, x: torch.Tensor):
|
| 36 |
+
return x * torch.sigmoid(1.702 * x)
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
class LayerScale(nn.Module):
|
| 40 |
+
def __init__(self, dim, init_values=1e-5, inplace=False):
|
| 41 |
+
super().__init__()
|
| 42 |
+
self.inplace = inplace
|
| 43 |
+
self.gamma = nn.Parameter(init_values * torch.ones(dim))
|
| 44 |
+
|
| 45 |
+
def forward(self, x):
|
| 46 |
+
return x.mul_(self.gamma) if self.inplace else x * self.gamma
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
class PatchDropout(nn.Module):
|
| 50 |
+
"""
|
| 51 |
+
https://arxiv.org/abs/2212.00794
|
| 52 |
+
"""
|
| 53 |
+
|
| 54 |
+
def __init__(self, prob, exclude_first_token=True):
|
| 55 |
+
super().__init__()
|
| 56 |
+
assert 0 <= prob < 1.
|
| 57 |
+
self.prob = prob
|
| 58 |
+
self.exclude_first_token = exclude_first_token # exclude CLS token
|
| 59 |
+
|
| 60 |
+
def forward(self, x):
|
| 61 |
+
if not self.training or self.prob == 0.:
|
| 62 |
+
return x
|
| 63 |
+
|
| 64 |
+
if self.exclude_first_token:
|
| 65 |
+
cls_tokens, x = x[:, :1], x[:, 1:]
|
| 66 |
+
else:
|
| 67 |
+
cls_tokens = torch.jit.annotate(torch.Tensor, x[:, :1])
|
| 68 |
+
|
| 69 |
+
batch = x.size()[0]
|
| 70 |
+
num_tokens = x.size()[1]
|
| 71 |
+
|
| 72 |
+
batch_indices = torch.arange(batch)
|
| 73 |
+
batch_indices = batch_indices[..., None]
|
| 74 |
+
|
| 75 |
+
keep_prob = 1 - self.prob
|
| 76 |
+
num_patches_keep = max(1, int(num_tokens * keep_prob))
|
| 77 |
+
|
| 78 |
+
rand = torch.randn(batch, num_tokens)
|
| 79 |
+
patch_indices_keep = rand.topk(num_patches_keep, dim=-1).indices
|
| 80 |
+
|
| 81 |
+
x = x[batch_indices, patch_indices_keep]
|
| 82 |
+
|
| 83 |
+
if self.exclude_first_token:
|
| 84 |
+
x = torch.cat((cls_tokens, x), dim=1)
|
| 85 |
+
|
| 86 |
+
return x
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
class Attention(nn.Module):
|
| 90 |
+
def __init__(
|
| 91 |
+
self,
|
| 92 |
+
dim: int,
|
| 93 |
+
num_heads: int = 8,
|
| 94 |
+
qkv_bias: bool = True,
|
| 95 |
+
scaled_cosine: bool = False,
|
| 96 |
+
scale_heads: bool = False,
|
| 97 |
+
logit_scale_max: float = math.log(1. / 0.01),
|
| 98 |
+
batch_first: bool = True,
|
| 99 |
+
attn_drop: float = 0.,
|
| 100 |
+
proj_drop: float = 0.
|
| 101 |
+
):
|
| 102 |
+
super().__init__()
|
| 103 |
+
self.scaled_cosine = scaled_cosine
|
| 104 |
+
self.scale_heads = scale_heads
|
| 105 |
+
assert dim % num_heads == 0, 'dim should be divisible by num_heads'
|
| 106 |
+
self.num_heads = num_heads
|
| 107 |
+
self.head_dim = dim // num_heads
|
| 108 |
+
self.scale = self.head_dim ** -0.5
|
| 109 |
+
self.logit_scale_max = logit_scale_max
|
| 110 |
+
self.batch_first = batch_first
|
| 111 |
+
self.use_fsdpa = hasattr(nn.functional, 'scaled_dot_product_attention')
|
| 112 |
+
|
| 113 |
+
# keeping in_proj in this form (instead of nn.Linear) to match weight scheme of original
|
| 114 |
+
self.in_proj_weight = nn.Parameter(torch.randn((dim * 3, dim)) * self.scale)
|
| 115 |
+
if qkv_bias:
|
| 116 |
+
self.in_proj_bias = nn.Parameter(torch.zeros(dim * 3))
|
| 117 |
+
else:
|
| 118 |
+
self.in_proj_bias = None
|
| 119 |
+
|
| 120 |
+
if self.scaled_cosine:
|
| 121 |
+
self.logit_scale = nn.Parameter(torch.log(10 * torch.ones((num_heads, 1, 1))))
|
| 122 |
+
else:
|
| 123 |
+
self.logit_scale = None
|
| 124 |
+
self.attn_drop = nn.Dropout(attn_drop)
|
| 125 |
+
if self.scale_heads:
|
| 126 |
+
self.head_scale = nn.Parameter(torch.ones((num_heads, 1, 1)))
|
| 127 |
+
else:
|
| 128 |
+
self.head_scale = None
|
| 129 |
+
self.out_proj = nn.Linear(dim, dim)
|
| 130 |
+
self.out_drop = nn.Dropout(proj_drop)
|
| 131 |
+
|
| 132 |
+
def forward(self, x, attn_mask: Optional[torch.Tensor] = None):
|
| 133 |
+
if self.batch_first:
|
| 134 |
+
x = x.transpose(0, 1)
|
| 135 |
+
|
| 136 |
+
L, N, C = x.shape
|
| 137 |
+
q, k, v = F.linear(x, self.in_proj_weight, self.in_proj_bias).chunk(3, dim=-1)
|
| 138 |
+
q = q.reshape(L, N * self.num_heads, -1).transpose(0, 1)
|
| 139 |
+
k = k.reshape(L, N * self.num_heads, -1).transpose(0, 1)
|
| 140 |
+
v = v.reshape(L, N * self.num_heads, -1).transpose(0, 1)
|
| 141 |
+
|
| 142 |
+
if attn_mask is not None and attn_mask.dtype == torch.bool:
|
| 143 |
+
new_attn_mask = torch.zeros_like(attn_mask, dtype=q.dtype)
|
| 144 |
+
new_attn_mask.masked_fill_(attn_mask, float("-inf"))
|
| 145 |
+
attn_mask = new_attn_mask
|
| 146 |
+
|
| 147 |
+
if self.logit_scale is not None:
|
| 148 |
+
attn = torch.bmm(F.normalize(q, dim=-1), F.normalize(k, dim=-1).transpose(-1, -2))
|
| 149 |
+
logit_scale = torch.clamp(self.logit_scale, max=self.logit_scale_max).exp()
|
| 150 |
+
attn = attn.view(N, self.num_heads, L, L) * logit_scale
|
| 151 |
+
attn = attn.view(-1, L, L)
|
| 152 |
+
if attn_mask is not None:
|
| 153 |
+
attn = attn + attn_mask
|
| 154 |
+
attn = attn.softmax(dim=-1)
|
| 155 |
+
attn = self.attn_drop(attn)
|
| 156 |
+
x = torch.bmm(attn, v)
|
| 157 |
+
else:
|
| 158 |
+
if self.use_fsdpa:
|
| 159 |
+
x = F.scaled_dot_product_attention(
|
| 160 |
+
q, k, v,
|
| 161 |
+
attn_mask=attn_mask,
|
| 162 |
+
dropout_p=self.attn_drop.p if self.training else 0.,
|
| 163 |
+
)
|
| 164 |
+
else:
|
| 165 |
+
q = q * self.scale
|
| 166 |
+
attn = torch.bmm(q, k.transpose(-1, -2))
|
| 167 |
+
if attn_mask is not None:
|
| 168 |
+
attn += attn_mask
|
| 169 |
+
attn = attn.softmax(dim=-1)
|
| 170 |
+
attn = self.attn_drop(attn)
|
| 171 |
+
x = torch.bmm(attn, v)
|
| 172 |
+
|
| 173 |
+
if self.head_scale is not None:
|
| 174 |
+
x = x.view(N, self.num_heads, L, C) * self.head_scale
|
| 175 |
+
x = x.view(-1, L, C)
|
| 176 |
+
|
| 177 |
+
x = x.transpose(0, 1).reshape(L, N, C)
|
| 178 |
+
|
| 179 |
+
if self.batch_first:
|
| 180 |
+
x = x.transpose(0, 1)
|
| 181 |
+
|
| 182 |
+
x = self.out_proj(x)
|
| 183 |
+
x = self.out_drop(x)
|
| 184 |
+
return x
|
| 185 |
+
|
| 186 |
+
|
| 187 |
+
class AttentionalPooler(nn.Module):
|
| 188 |
+
def __init__(
|
| 189 |
+
self,
|
| 190 |
+
d_model: int,
|
| 191 |
+
context_dim: int,
|
| 192 |
+
n_head: int = 8,
|
| 193 |
+
n_queries: int = 256,
|
| 194 |
+
norm_layer: Callable = LayerNorm,
|
| 195 |
+
):
|
| 196 |
+
super().__init__()
|
| 197 |
+
self.query = nn.Parameter(torch.randn(n_queries, d_model))
|
| 198 |
+
self.attn = nn.MultiheadAttention(d_model, n_head, kdim=context_dim, vdim=context_dim, batch_first=True)
|
| 199 |
+
self.ln_q = norm_layer(d_model)
|
| 200 |
+
self.ln_k = norm_layer(context_dim)
|
| 201 |
+
|
| 202 |
+
def forward(self, x: torch.Tensor):
|
| 203 |
+
N = x.shape[0]
|
| 204 |
+
x = self.ln_k(x)
|
| 205 |
+
q = self.ln_q(self.query)
|
| 206 |
+
out = self.attn(q.unsqueeze(0).expand(N, -1, -1), x, x, need_weights=False)[0]
|
| 207 |
+
return out
|
| 208 |
+
|
| 209 |
+
|
| 210 |
+
class ResidualAttentionBlock(nn.Module):
|
| 211 |
+
def __init__(
|
| 212 |
+
self,
|
| 213 |
+
d_model: int,
|
| 214 |
+
n_head: int,
|
| 215 |
+
mlp_ratio: float = 4.0,
|
| 216 |
+
ls_init_value: float = None,
|
| 217 |
+
act_layer: Callable = nn.GELU,
|
| 218 |
+
norm_layer: Callable = LayerNorm,
|
| 219 |
+
is_cross_attention: bool = False,
|
| 220 |
+
batch_first: bool = True,
|
| 221 |
+
):
|
| 222 |
+
super().__init__()
|
| 223 |
+
|
| 224 |
+
self.ln_1 = norm_layer(d_model)
|
| 225 |
+
self.attn = nn.MultiheadAttention(d_model, n_head, batch_first=batch_first)
|
| 226 |
+
self.ls_1 = LayerScale(d_model, ls_init_value) if ls_init_value is not None else nn.Identity()
|
| 227 |
+
if is_cross_attention:
|
| 228 |
+
self.ln_1_kv = norm_layer(d_model)
|
| 229 |
+
|
| 230 |
+
self.ln_2 = norm_layer(d_model)
|
| 231 |
+
mlp_width = int(d_model * mlp_ratio)
|
| 232 |
+
self.mlp = nn.Sequential(OrderedDict([
|
| 233 |
+
("c_fc", nn.Linear(d_model, mlp_width)),
|
| 234 |
+
("gelu", act_layer()),
|
| 235 |
+
("c_proj", nn.Linear(mlp_width, d_model))
|
| 236 |
+
]))
|
| 237 |
+
self.ls_2 = LayerScale(d_model, ls_init_value) if ls_init_value is not None else nn.Identity()
|
| 238 |
+
|
| 239 |
+
def attention(
|
| 240 |
+
self,
|
| 241 |
+
q_x: torch.Tensor,
|
| 242 |
+
k_x: Optional[torch.Tensor] = None,
|
| 243 |
+
v_x: Optional[torch.Tensor] = None,
|
| 244 |
+
attn_mask: Optional[torch.Tensor] = None,
|
| 245 |
+
):
|
| 246 |
+
k_x = k_x if k_x is not None else q_x
|
| 247 |
+
v_x = v_x if v_x is not None else q_x
|
| 248 |
+
|
| 249 |
+
attn_mask = attn_mask.to(q_x.dtype) if attn_mask is not None else None
|
| 250 |
+
return self.attn(
|
| 251 |
+
q_x, k_x, v_x, need_weights=False, attn_mask=attn_mask
|
| 252 |
+
)[0]
|
| 253 |
+
|
| 254 |
+
def forward(
|
| 255 |
+
self,
|
| 256 |
+
q_x: torch.Tensor,
|
| 257 |
+
k_x: Optional[torch.Tensor] = None,
|
| 258 |
+
v_x: Optional[torch.Tensor] = None,
|
| 259 |
+
attn_mask: Optional[torch.Tensor] = None,
|
| 260 |
+
):
|
| 261 |
+
k_x = self.ln_1_kv(k_x) if hasattr(self, "ln_1_kv") and k_x is not None else None
|
| 262 |
+
v_x = self.ln_1_kv(v_x) if hasattr(self, "ln_1_kv") and v_x is not None else None
|
| 263 |
+
x = q_x + self.ls_1(self.attention(q_x=self.ln_1(q_x), k_x=k_x, v_x=v_x, attn_mask=attn_mask))
|
| 264 |
+
x = x + self.ls_2(self.mlp(self.ln_2(x)))
|
| 265 |
+
return x
|
| 266 |
+
|
| 267 |
+
|
| 268 |
+
class CustomResidualAttentionBlock(nn.Module):
|
| 269 |
+
def __init__(
|
| 270 |
+
self,
|
| 271 |
+
d_model: int,
|
| 272 |
+
n_head: int,
|
| 273 |
+
mlp_ratio: float = 4.0,
|
| 274 |
+
ls_init_value: float = None,
|
| 275 |
+
act_layer: Callable = nn.GELU,
|
| 276 |
+
norm_layer: Callable = LayerNorm,
|
| 277 |
+
scale_cosine_attn: bool = False,
|
| 278 |
+
scale_heads: bool = False,
|
| 279 |
+
scale_attn: bool = False,
|
| 280 |
+
scale_fc: bool = False,
|
| 281 |
+
batch_first: bool = True,
|
| 282 |
+
):
|
| 283 |
+
super().__init__()
|
| 284 |
+
|
| 285 |
+
self.ln_1 = norm_layer(d_model)
|
| 286 |
+
self.attn = Attention(
|
| 287 |
+
d_model,
|
| 288 |
+
n_head,
|
| 289 |
+
scaled_cosine=scale_cosine_attn,
|
| 290 |
+
scale_heads=scale_heads,
|
| 291 |
+
batch_first=batch_first,
|
| 292 |
+
)
|
| 293 |
+
self.ln_attn = norm_layer(d_model) if scale_attn else nn.Identity()
|
| 294 |
+
self.ls_1 = LayerScale(d_model, ls_init_value) if ls_init_value is not None else nn.Identity()
|
| 295 |
+
|
| 296 |
+
self.ln_2 = norm_layer(d_model)
|
| 297 |
+
mlp_width = int(d_model * mlp_ratio)
|
| 298 |
+
self.mlp = nn.Sequential(OrderedDict([
|
| 299 |
+
("c_fc", nn.Linear(d_model, mlp_width)),
|
| 300 |
+
("gelu", act_layer()),
|
| 301 |
+
('ln', norm_layer(mlp_width) if scale_fc else nn.Identity()),
|
| 302 |
+
("c_proj", nn.Linear(mlp_width, d_model))
|
| 303 |
+
]))
|
| 304 |
+
self.ls_2 = LayerScale(d_model, ls_init_value) if ls_init_value is not None else nn.Identity()
|
| 305 |
+
|
| 306 |
+
def get_reference_weight(self):
|
| 307 |
+
return self.mlp.c_fc.weight
|
| 308 |
+
|
| 309 |
+
def forward(self, x: torch.Tensor, attn_mask: Optional[torch.Tensor] = None):
|
| 310 |
+
x = x + self.ls_1(self.ln_attn(self.attn(self.ln_1(x), attn_mask=attn_mask)))
|
| 311 |
+
x = x + self.ls_2(self.mlp(self.ln_2(x)))
|
| 312 |
+
return x
|
| 313 |
+
|
| 314 |
+
|
| 315 |
+
def _expand_token(token, batch_size: int):
|
| 316 |
+
return token.view(1, 1, -1).expand(batch_size, -1, -1)
|
| 317 |
+
|
| 318 |
+
|
| 319 |
+
class Transformer(nn.Module):
|
| 320 |
+
def __init__(
|
| 321 |
+
self,
|
| 322 |
+
width: int,
|
| 323 |
+
layers: int,
|
| 324 |
+
heads: int,
|
| 325 |
+
mlp_ratio: float = 4.0,
|
| 326 |
+
ls_init_value: float = None,
|
| 327 |
+
act_layer: Callable = nn.GELU,
|
| 328 |
+
norm_layer: Callable = LayerNorm,
|
| 329 |
+
batch_first: bool = True,
|
| 330 |
+
):
|
| 331 |
+
super().__init__()
|
| 332 |
+
self.width = width
|
| 333 |
+
self.layers = layers
|
| 334 |
+
self.batch_first = batch_first
|
| 335 |
+
self.grad_checkpointing = False
|
| 336 |
+
|
| 337 |
+
self.resblocks = nn.ModuleList([
|
| 338 |
+
ResidualAttentionBlock(
|
| 339 |
+
width,
|
| 340 |
+
heads,
|
| 341 |
+
mlp_ratio,
|
| 342 |
+
ls_init_value=ls_init_value,
|
| 343 |
+
act_layer=act_layer,
|
| 344 |
+
norm_layer=norm_layer,
|
| 345 |
+
batch_first=batch_first,
|
| 346 |
+
)
|
| 347 |
+
for _ in range(layers)
|
| 348 |
+
])
|
| 349 |
+
|
| 350 |
+
def get_cast_dtype(self) -> torch.dtype:
|
| 351 |
+
if hasattr(self.resblocks[0].mlp.c_fc, 'int8_original_dtype'):
|
| 352 |
+
return self.resblocks[0].mlp.c_fc.int8_original_dtype
|
| 353 |
+
return self.resblocks[0].mlp.c_fc.weight.dtype
|
| 354 |
+
|
| 355 |
+
def forward(self, x: torch.Tensor, attn_mask: Optional[torch.Tensor] = None):
|
| 356 |
+
if not self.batch_first:
|
| 357 |
+
x = x.transpose(0, 1).contiguous() # NLD -> LND
|
| 358 |
+
for r in self.resblocks:
|
| 359 |
+
if self.grad_checkpointing and not torch.jit.is_scripting():
|
| 360 |
+
# TODO: handle kwargs https://github.com/pytorch/pytorch/issues/79887#issuecomment-1161758372
|
| 361 |
+
x = checkpoint(r, x, None, None, attn_mask)
|
| 362 |
+
else:
|
| 363 |
+
x = r(x, attn_mask=attn_mask)
|
| 364 |
+
if not self.batch_first:
|
| 365 |
+
x = x.transpose(0, 1) # LND -> NLD
|
| 366 |
+
return x
|
| 367 |
+
|
| 368 |
+
|
| 369 |
+
class CustomTransformer(nn.Module):
|
| 370 |
+
""" A custom transformer that can use different block types. """
|
| 371 |
+
def __init__(
|
| 372 |
+
self,
|
| 373 |
+
width: int,
|
| 374 |
+
layers: int,
|
| 375 |
+
heads: int,
|
| 376 |
+
mlp_ratio: float = 4.0,
|
| 377 |
+
ls_init_value: float = None,
|
| 378 |
+
act_layer: Callable = nn.GELU,
|
| 379 |
+
norm_layer: Callable = LayerNorm,
|
| 380 |
+
batch_first: bool = True,
|
| 381 |
+
block_types: Union[str, List[str]] = 'CustomResidualAttentionBlock',
|
| 382 |
+
):
|
| 383 |
+
super().__init__()
|
| 384 |
+
self.width = width
|
| 385 |
+
self.layers = layers
|
| 386 |
+
self.batch_first = batch_first # run trasnformer stack in batch first (N, L, D)
|
| 387 |
+
self.grad_checkpointing = False
|
| 388 |
+
|
| 389 |
+
if isinstance(block_types, str):
|
| 390 |
+
block_types = [block_types] * layers
|
| 391 |
+
assert len(block_types) == layers
|
| 392 |
+
|
| 393 |
+
def _create_block(bt: str):
|
| 394 |
+
if bt == 'CustomResidualAttentionBlock':
|
| 395 |
+
return CustomResidualAttentionBlock(
|
| 396 |
+
width,
|
| 397 |
+
heads,
|
| 398 |
+
mlp_ratio=mlp_ratio,
|
| 399 |
+
ls_init_value=ls_init_value,
|
| 400 |
+
act_layer=act_layer,
|
| 401 |
+
norm_layer=norm_layer,
|
| 402 |
+
batch_first=batch_first,
|
| 403 |
+
)
|
| 404 |
+
else:
|
| 405 |
+
assert False
|
| 406 |
+
|
| 407 |
+
self.resblocks = nn.ModuleList([
|
| 408 |
+
_create_block(bt)
|
| 409 |
+
for bt in block_types
|
| 410 |
+
])
|
| 411 |
+
|
| 412 |
+
def get_cast_dtype(self) -> torch.dtype:
|
| 413 |
+
weight = self.resblocks[0].get_reference_weight()
|
| 414 |
+
if hasattr(weight, 'int8_original_dtype'):
|
| 415 |
+
return weight.int8_original_dtype
|
| 416 |
+
return weight.dtype
|
| 417 |
+
|
| 418 |
+
def forward(self, x: torch.Tensor, attn_mask: Optional[torch.Tensor] = None):
|
| 419 |
+
if not self.batch_first:
|
| 420 |
+
x = x.transpose(0, 1) # NLD -> LND
|
| 421 |
+
|
| 422 |
+
for r in self.resblocks:
|
| 423 |
+
if self.grad_checkpointing and not torch.jit.is_scripting():
|
| 424 |
+
# TODO: handle kwargs https://github.com/pytorch/pytorch/issues/79887#issuecomment-1161758372
|
| 425 |
+
x = checkpoint(r, x, None, None, attn_mask)
|
| 426 |
+
else:
|
| 427 |
+
x = r(x, attn_mask=attn_mask)
|
| 428 |
+
|
| 429 |
+
if not self.batch_first:
|
| 430 |
+
x = x.transpose(0, 1) # NLD -> LND
|
| 431 |
+
return x
|
| 432 |
+
|
| 433 |
+
|
| 434 |
+
class VisionTransformer(nn.Module):
|
| 435 |
+
output_tokens: torch.jit.Final[bool]
|
| 436 |
+
|
| 437 |
+
def __init__(
|
| 438 |
+
self,
|
| 439 |
+
image_size: int,
|
| 440 |
+
patch_size: int,
|
| 441 |
+
width: int,
|
| 442 |
+
layers: int,
|
| 443 |
+
heads: int,
|
| 444 |
+
mlp_ratio: float,
|
| 445 |
+
ls_init_value: float = None,
|
| 446 |
+
attentional_pool: bool = False,
|
| 447 |
+
attn_pooler_queries: int = 256,
|
| 448 |
+
attn_pooler_heads: int = 8,
|
| 449 |
+
output_dim: int = 512,
|
| 450 |
+
patch_dropout: float = 0.,
|
| 451 |
+
no_ln_pre: bool = False,
|
| 452 |
+
pos_embed_type: str = 'learnable',
|
| 453 |
+
pool_type: str = 'tok',
|
| 454 |
+
final_ln_after_pool: bool = False,
|
| 455 |
+
act_layer: Callable = nn.GELU,
|
| 456 |
+
norm_layer: Callable = LayerNorm,
|
| 457 |
+
output_tokens: bool = False,
|
| 458 |
+
):
|
| 459 |
+
super().__init__()
|
| 460 |
+
assert pool_type in ('tok', 'avg', 'none')
|
| 461 |
+
self.output_tokens = output_tokens
|
| 462 |
+
image_height, image_width = self.image_size = to_2tuple(image_size)
|
| 463 |
+
patch_height, patch_width = self.patch_size = to_2tuple(patch_size)
|
| 464 |
+
self.grid_size = (image_height // patch_height, image_width // patch_width)
|
| 465 |
+
self.final_ln_after_pool = final_ln_after_pool # currently ignored w/ attn pool enabled
|
| 466 |
+
self.output_dim = output_dim
|
| 467 |
+
|
| 468 |
+
self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False)
|
| 469 |
+
|
| 470 |
+
# class embeddings and positional embeddings
|
| 471 |
+
scale = width ** -0.5
|
| 472 |
+
self.class_embedding = nn.Parameter(scale * torch.randn(width))
|
| 473 |
+
if pos_embed_type == 'learnable':
|
| 474 |
+
self.positional_embedding = nn.Parameter(
|
| 475 |
+
scale * torch.randn(self.grid_size[0] * self.grid_size[1] + 1, width))
|
| 476 |
+
elif pos_embed_type == 'sin_cos_2d':
|
| 477 |
+
# fixed sin-cos embedding
|
| 478 |
+
assert self.grid_size[0] == self.grid_size[1],\
|
| 479 |
+
'currently sin cos 2d pos embedding only supports square input'
|
| 480 |
+
self.positional_embedding = nn.Parameter(
|
| 481 |
+
torch.zeros(self.grid_size[0] * self.grid_size[1] + 1, width), requires_grad=False)
|
| 482 |
+
pos_embed_type = get_2d_sincos_pos_embed(width, self.grid_size[0], cls_token=True)
|
| 483 |
+
self.positional_embedding.data.copy_(torch.from_numpy(pos_embed_type).float())
|
| 484 |
+
else:
|
| 485 |
+
raise ValueError
|
| 486 |
+
|
| 487 |
+
# setting a patch_dropout of 0. would mean it is disabled and this function would be the identity fn
|
| 488 |
+
self.patch_dropout = PatchDropout(patch_dropout) if patch_dropout > 0. else nn.Identity()
|
| 489 |
+
|
| 490 |
+
self.ln_pre = nn.Identity() if no_ln_pre else norm_layer(width)
|
| 491 |
+
self.transformer = Transformer(
|
| 492 |
+
width,
|
| 493 |
+
layers,
|
| 494 |
+
heads,
|
| 495 |
+
mlp_ratio,
|
| 496 |
+
ls_init_value=ls_init_value,
|
| 497 |
+
act_layer=act_layer,
|
| 498 |
+
norm_layer=norm_layer,
|
| 499 |
+
)
|
| 500 |
+
|
| 501 |
+
if attentional_pool:
|
| 502 |
+
if isinstance(attentional_pool, str):
|
| 503 |
+
self.attn_pool_type = attentional_pool
|
| 504 |
+
self.pool_type = 'none'
|
| 505 |
+
if attentional_pool in ('parallel', 'cascade'):
|
| 506 |
+
self.attn_pool = AttentionalPooler(
|
| 507 |
+
output_dim,
|
| 508 |
+
width,
|
| 509 |
+
n_head=attn_pooler_heads,
|
| 510 |
+
n_queries=attn_pooler_queries,
|
| 511 |
+
)
|
| 512 |
+
self.attn_pool_contrastive = AttentionalPooler(
|
| 513 |
+
output_dim,
|
| 514 |
+
width,
|
| 515 |
+
n_head=attn_pooler_heads,
|
| 516 |
+
n_queries=1,
|
| 517 |
+
)
|
| 518 |
+
else:
|
| 519 |
+
assert False
|
| 520 |
+
else:
|
| 521 |
+
self.attn_pool_type = ''
|
| 522 |
+
self.pool_type = pool_type
|
| 523 |
+
self.attn_pool = AttentionalPooler(
|
| 524 |
+
output_dim,
|
| 525 |
+
width,
|
| 526 |
+
n_head=attn_pooler_heads,
|
| 527 |
+
n_queries=attn_pooler_queries,
|
| 528 |
+
)
|
| 529 |
+
self.attn_pool_contrastive = None
|
| 530 |
+
pool_dim = output_dim
|
| 531 |
+
else:
|
| 532 |
+
self.attn_pool = None
|
| 533 |
+
pool_dim = width
|
| 534 |
+
self.pool_type = pool_type
|
| 535 |
+
|
| 536 |
+
self.ln_post = norm_layer(pool_dim)
|
| 537 |
+
self.proj = nn.Parameter(scale * torch.randn(pool_dim, output_dim))
|
| 538 |
+
|
| 539 |
+
self.init_parameters()
|
| 540 |
+
|
| 541 |
+
def lock(self, unlocked_groups=0, freeze_bn_stats=False):
|
| 542 |
+
for param in self.parameters():
|
| 543 |
+
param.requires_grad = False
|
| 544 |
+
|
| 545 |
+
if unlocked_groups != 0:
|
| 546 |
+
groups = [
|
| 547 |
+
[
|
| 548 |
+
self.conv1,
|
| 549 |
+
self.class_embedding,
|
| 550 |
+
self.positional_embedding,
|
| 551 |
+
self.ln_pre,
|
| 552 |
+
],
|
| 553 |
+
*self.transformer.resblocks[:-1],
|
| 554 |
+
[
|
| 555 |
+
self.transformer.resblocks[-1],
|
| 556 |
+
self.ln_post,
|
| 557 |
+
],
|
| 558 |
+
self.proj,
|
| 559 |
+
]
|
| 560 |
+
|
| 561 |
+
def _unlock(x):
|
| 562 |
+
if isinstance(x, Sequence):
|
| 563 |
+
for g in x:
|
| 564 |
+
_unlock(g)
|
| 565 |
+
else:
|
| 566 |
+
if isinstance(x, torch.nn.Parameter):
|
| 567 |
+
x.requires_grad = True
|
| 568 |
+
else:
|
| 569 |
+
for p in x.parameters():
|
| 570 |
+
p.requires_grad = True
|
| 571 |
+
|
| 572 |
+
_unlock(groups[-unlocked_groups:])
|
| 573 |
+
|
| 574 |
+
def init_parameters(self):
|
| 575 |
+
# FIXME OpenAI CLIP did not define an init for the VisualTransformer
|
| 576 |
+
# TODO experiment if default PyTorch init, below, or alternate init is best.
|
| 577 |
+
|
| 578 |
+
# nn.init.normal_(self.class_embedding, std=self.scale)
|
| 579 |
+
# nn.init.normal_(self.positional_embedding, std=self.scale)
|
| 580 |
+
#
|
| 581 |
+
# proj_std = (self.transformer.width ** -0.5) * ((2 * self.transformer.layers) ** -0.5)
|
| 582 |
+
# attn_std = self.transformer.width ** -0.5
|
| 583 |
+
# fc_std = (2 * self.transformer.width) ** -0.5
|
| 584 |
+
# for block in self.transformer.resblocks:
|
| 585 |
+
# nn.init.normal_(block.attn.in_proj_weight, std=attn_std)
|
| 586 |
+
# nn.init.normal_(block.attn.out_proj.weight, std=proj_std)
|
| 587 |
+
# nn.init.normal_(block.mlp.c_fc.weight, std=fc_std)
|
| 588 |
+
# nn.init.normal_(block.mlp.c_proj.weight, std=proj_std)
|
| 589 |
+
#
|
| 590 |
+
# if self.text_projection is not None:
|
| 591 |
+
# nn.init.normal_(self.text_projection, std=self.scale)
|
| 592 |
+
pass
|
| 593 |
+
|
| 594 |
+
@torch.jit.ignore
|
| 595 |
+
def set_grad_checkpointing(self, enable=True):
|
| 596 |
+
self.transformer.grad_checkpointing = enable
|
| 597 |
+
|
| 598 |
+
def _global_pool(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
|
| 599 |
+
if self.pool_type == 'avg':
|
| 600 |
+
pooled, tokens = x[:, 1:].mean(dim=1), x[:, 1:]
|
| 601 |
+
elif self.pool_type == 'tok':
|
| 602 |
+
pooled, tokens = x[:, 0], x[:, 1:]
|
| 603 |
+
else:
|
| 604 |
+
pooled = tokens = x
|
| 605 |
+
|
| 606 |
+
return pooled, tokens
|
| 607 |
+
|
| 608 |
+
def forward(self, x: torch.Tensor):
|
| 609 |
+
x = self.conv1(x) # shape = [*, width, grid, grid]
|
| 610 |
+
x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]
|
| 611 |
+
x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
|
| 612 |
+
|
| 613 |
+
# class embeddings and positional embeddings
|
| 614 |
+
x = torch.cat([_expand_token(self.class_embedding, x.shape[0]).to(x.dtype), x], dim=1)
|
| 615 |
+
# shape = [*, grid ** 2 + 1, width]
|
| 616 |
+
x = x + self.positional_embedding.to(x.dtype)
|
| 617 |
+
|
| 618 |
+
x = self.patch_dropout(x)
|
| 619 |
+
x = self.ln_pre(x)
|
| 620 |
+
x = self.transformer(x)
|
| 621 |
+
|
| 622 |
+
if self.attn_pool is not None:
|
| 623 |
+
if self.attn_pool_contrastive is not None:
|
| 624 |
+
# This is untested, WIP pooling that should match paper
|
| 625 |
+
x = self.ln_post(x) # TBD LN first or separate one after each pool?
|
| 626 |
+
tokens = self.attn_pool(x)
|
| 627 |
+
if self.attn_pool_type == 'parallel':
|
| 628 |
+
pooled = self.attn_pool_contrastive(x)
|
| 629 |
+
else:
|
| 630 |
+
assert self.attn_pool_type == 'cascade'
|
| 631 |
+
pooled = self.attn_pool_contrastive(tokens)
|
| 632 |
+
else:
|
| 633 |
+
# this is the original OpenCLIP CoCa setup, does not match paper
|
| 634 |
+
x = self.attn_pool(x)
|
| 635 |
+
x = self.ln_post(x)
|
| 636 |
+
pooled, tokens = self._global_pool(x)
|
| 637 |
+
elif self.final_ln_after_pool:
|
| 638 |
+
pooled, tokens = self._global_pool(x)
|
| 639 |
+
pooled = self.ln_post(pooled)
|
| 640 |
+
else:
|
| 641 |
+
x = self.ln_post(x)
|
| 642 |
+
pooled, tokens = self._global_pool(x)
|
| 643 |
+
|
| 644 |
+
if self.proj is not None:
|
| 645 |
+
pooled = pooled @ self.proj
|
| 646 |
+
|
| 647 |
+
if self.output_tokens:
|
| 648 |
+
return pooled, tokens
|
| 649 |
+
|
| 650 |
+
return pooled
|
| 651 |
+
|
| 652 |
+
|
| 653 |
+
def text_global_pool(x, text: Optional[torch.Tensor] = None, pool_type: str = 'argmax'):
|
| 654 |
+
if pool_type == 'first':
|
| 655 |
+
pooled, tokens = x[:, 0], x[:, 1:]
|
| 656 |
+
elif pool_type == 'last':
|
| 657 |
+
pooled, tokens = x[:, -1], x[:, :-1]
|
| 658 |
+
elif pool_type == 'argmax':
|
| 659 |
+
# take features from the eot embedding (eot_token is the highest number in each sequence)
|
| 660 |
+
assert text is not None
|
| 661 |
+
pooled, tokens = x[torch.arange(x.shape[0]), text.argmax(dim=-1)], x
|
| 662 |
+
else:
|
| 663 |
+
pooled = tokens = x
|
| 664 |
+
|
| 665 |
+
return pooled, tokens
|
| 666 |
+
|
| 667 |
+
|
| 668 |
+
class TextTransformer(nn.Module):
|
| 669 |
+
output_tokens: torch.jit.Final[bool]
|
| 670 |
+
|
| 671 |
+
def __init__(
|
| 672 |
+
self,
|
| 673 |
+
context_length: int = 77,
|
| 674 |
+
vocab_size: int = 49408,
|
| 675 |
+
width: int = 512,
|
| 676 |
+
heads: int = 8,
|
| 677 |
+
layers: int = 12,
|
| 678 |
+
mlp_ratio: float = 4.0,
|
| 679 |
+
ls_init_value: float = None,
|
| 680 |
+
output_dim: Optional[int] = 512,
|
| 681 |
+
embed_cls: bool = False,
|
| 682 |
+
no_causal_mask: bool = False,
|
| 683 |
+
pad_id: int = 0,
|
| 684 |
+
pool_type: str = 'argmax',
|
| 685 |
+
proj_type: str = 'linear',
|
| 686 |
+
proj_bias: bool = False,
|
| 687 |
+
act_layer: Callable = nn.GELU,
|
| 688 |
+
norm_layer: Callable = LayerNorm,
|
| 689 |
+
output_tokens: bool = False,
|
| 690 |
+
):
|
| 691 |
+
super().__init__()
|
| 692 |
+
assert pool_type in ('first', 'last', 'argmax', 'none')
|
| 693 |
+
self.output_tokens = output_tokens
|
| 694 |
+
self.num_pos = self.context_length = context_length
|
| 695 |
+
self.vocab_size = vocab_size
|
| 696 |
+
self.width = width
|
| 697 |
+
self.output_dim = output_dim
|
| 698 |
+
self.heads = heads
|
| 699 |
+
self.pad_id = pad_id
|
| 700 |
+
self.pool_type = pool_type
|
| 701 |
+
|
| 702 |
+
self.token_embedding = nn.Embedding(vocab_size, width)
|
| 703 |
+
if embed_cls:
|
| 704 |
+
self.cls_emb = nn.Parameter(torch.empty(width))
|
| 705 |
+
self.num_pos += 1
|
| 706 |
+
else:
|
| 707 |
+
self.cls_emb = None
|
| 708 |
+
self.positional_embedding = nn.Parameter(torch.empty(self.num_pos, width))
|
| 709 |
+
self.transformer = Transformer(
|
| 710 |
+
width=width,
|
| 711 |
+
layers=layers,
|
| 712 |
+
heads=heads,
|
| 713 |
+
mlp_ratio=mlp_ratio,
|
| 714 |
+
ls_init_value=ls_init_value,
|
| 715 |
+
act_layer=act_layer,
|
| 716 |
+
norm_layer=norm_layer,
|
| 717 |
+
)
|
| 718 |
+
self.ln_final = norm_layer(width)
|
| 719 |
+
|
| 720 |
+
if no_causal_mask:
|
| 721 |
+
self.attn_mask = None
|
| 722 |
+
else:
|
| 723 |
+
self.register_buffer('attn_mask', self.build_causal_mask(), persistent=False)
|
| 724 |
+
|
| 725 |
+
if proj_type == 'none' or not output_dim:
|
| 726 |
+
self.text_projection = None
|
| 727 |
+
else:
|
| 728 |
+
if proj_bias:
|
| 729 |
+
self.text_projection = nn.Linear(width, output_dim)
|
| 730 |
+
else:
|
| 731 |
+
self.text_projection = nn.Parameter(torch.empty(width, output_dim))
|
| 732 |
+
|
| 733 |
+
self.init_parameters()
|
| 734 |
+
|
| 735 |
+
def init_parameters(self):
|
| 736 |
+
nn.init.normal_(self.token_embedding.weight, std=0.02)
|
| 737 |
+
nn.init.normal_(self.positional_embedding, std=0.01)
|
| 738 |
+
if self.cls_emb is not None:
|
| 739 |
+
nn.init.normal_(self.cls_emb, std=0.01)
|
| 740 |
+
|
| 741 |
+
proj_std = (self.transformer.width ** -0.5) * ((2 * self.transformer.layers) ** -0.5)
|
| 742 |
+
attn_std = self.transformer.width ** -0.5
|
| 743 |
+
fc_std = (2 * self.transformer.width) ** -0.5
|
| 744 |
+
for block in self.transformer.resblocks:
|
| 745 |
+
nn.init.normal_(block.attn.in_proj_weight, std=attn_std)
|
| 746 |
+
nn.init.normal_(block.attn.out_proj.weight, std=proj_std)
|
| 747 |
+
nn.init.normal_(block.mlp.c_fc.weight, std=fc_std)
|
| 748 |
+
nn.init.normal_(block.mlp.c_proj.weight, std=proj_std)
|
| 749 |
+
|
| 750 |
+
if self.text_projection is not None:
|
| 751 |
+
if isinstance(self.text_projection, nn.Linear):
|
| 752 |
+
nn.init.normal_(self.text_projection.weight, std=self.transformer.width ** -0.5)
|
| 753 |
+
if self.text_projection.bias is not None:
|
| 754 |
+
nn.init.zeros_(self.text_projection.bias)
|
| 755 |
+
else:
|
| 756 |
+
nn.init.normal_(self.text_projection, std=self.transformer.width ** -0.5)
|
| 757 |
+
|
| 758 |
+
@torch.jit.ignore
|
| 759 |
+
def set_grad_checkpointing(self, enable=True):
|
| 760 |
+
self.transformer.grad_checkpointing = enable
|
| 761 |
+
|
| 762 |
+
def build_causal_mask(self):
|
| 763 |
+
# lazily create causal attention mask, with full attention between the tokens
|
| 764 |
+
# pytorch uses additive attention mask; fill with -inf
|
| 765 |
+
mask = torch.empty(self.num_pos, self.num_pos)
|
| 766 |
+
mask.fill_(float("-inf"))
|
| 767 |
+
mask.triu_(1) # zero out the lower diagonal
|
| 768 |
+
return mask
|
| 769 |
+
|
| 770 |
+
def build_cls_mask(self, text, cast_dtype: torch.dtype):
|
| 771 |
+
cls_mask = (text != self.pad_id).unsqueeze(1)
|
| 772 |
+
cls_mask = F.pad(cls_mask, (1, 0, cls_mask.shape[2], 0), value=True)
|
| 773 |
+
additive_mask = torch.empty(cls_mask.shape, dtype=cast_dtype, device=cls_mask.device)
|
| 774 |
+
additive_mask.fill_(0)
|
| 775 |
+
additive_mask.masked_fill_(~cls_mask, float("-inf"))
|
| 776 |
+
additive_mask = torch.repeat_interleave(additive_mask, self.heads, 0)
|
| 777 |
+
return additive_mask
|
| 778 |
+
|
| 779 |
+
def forward(self, text):
|
| 780 |
+
cast_dtype = self.transformer.get_cast_dtype()
|
| 781 |
+
seq_len = text.shape[1]
|
| 782 |
+
|
| 783 |
+
x = self.token_embedding(text).to(cast_dtype) # [batch_size, n_ctx, d_model]
|
| 784 |
+
attn_mask = self.attn_mask
|
| 785 |
+
if self.cls_emb is not None:
|
| 786 |
+
seq_len += 1
|
| 787 |
+
x = torch.cat([x, _expand_token(self.cls_emb, x.shape[0])], dim=1)
|
| 788 |
+
cls_mask = self.build_cls_mask(text, cast_dtype)
|
| 789 |
+
if attn_mask is not None:
|
| 790 |
+
attn_mask = attn_mask[None, :seq_len, :seq_len] + cls_mask[:, :seq_len, :seq_len]
|
| 791 |
+
|
| 792 |
+
x = x + self.positional_embedding[:seq_len].to(cast_dtype)
|
| 793 |
+
x = self.transformer(x, attn_mask=attn_mask)
|
| 794 |
+
|
| 795 |
+
# x.shape = [batch_size, n_ctx, transformer.width]
|
| 796 |
+
if self.cls_emb is not None:
|
| 797 |
+
# presence of appended cls embed (CoCa) overrides pool_type, always take last token
|
| 798 |
+
pooled, tokens = text_global_pool(x, pool_type='last')
|
| 799 |
+
pooled = self.ln_final(pooled) # final LN applied after pooling in this case
|
| 800 |
+
else:
|
| 801 |
+
x = self.ln_final(x)
|
| 802 |
+
pooled, tokens = text_global_pool(x, text, pool_type=self.pool_type)
|
| 803 |
+
|
| 804 |
+
if self.text_projection is not None:
|
| 805 |
+
if isinstance(self.text_projection, nn.Linear):
|
| 806 |
+
pooled = self.text_projection(pooled)
|
| 807 |
+
else:
|
| 808 |
+
pooled = pooled @ self.text_projection
|
| 809 |
+
|
| 810 |
+
if self.output_tokens:
|
| 811 |
+
return pooled, tokens
|
| 812 |
+
|
| 813 |
+
return pooled
|
| 814 |
+
|
| 815 |
+
|
| 816 |
+
class MultimodalTransformer(Transformer):
|
| 817 |
+
def __init__(
|
| 818 |
+
self,
|
| 819 |
+
width: int,
|
| 820 |
+
layers: int,
|
| 821 |
+
heads: int,
|
| 822 |
+
context_length: int = 77,
|
| 823 |
+
mlp_ratio: float = 4.0,
|
| 824 |
+
ls_init_value: float = None,
|
| 825 |
+
act_layer: Callable = nn.GELU,
|
| 826 |
+
norm_layer: Callable = LayerNorm,
|
| 827 |
+
output_dim: int = 512,
|
| 828 |
+
batch_first: bool = True,
|
| 829 |
+
):
|
| 830 |
+
super().__init__(
|
| 831 |
+
width=width,
|
| 832 |
+
layers=layers,
|
| 833 |
+
heads=heads,
|
| 834 |
+
mlp_ratio=mlp_ratio,
|
| 835 |
+
ls_init_value=ls_init_value,
|
| 836 |
+
act_layer=act_layer,
|
| 837 |
+
norm_layer=norm_layer,
|
| 838 |
+
batch_first=batch_first,
|
| 839 |
+
)
|
| 840 |
+
self.context_length = context_length
|
| 841 |
+
self.cross_attn = nn.ModuleList([
|
| 842 |
+
ResidualAttentionBlock(
|
| 843 |
+
width,
|
| 844 |
+
heads,
|
| 845 |
+
mlp_ratio,
|
| 846 |
+
ls_init_value=ls_init_value,
|
| 847 |
+
act_layer=act_layer,
|
| 848 |
+
norm_layer=norm_layer,
|
| 849 |
+
is_cross_attention=True,
|
| 850 |
+
batch_first=batch_first,
|
| 851 |
+
)
|
| 852 |
+
for _ in range(layers)
|
| 853 |
+
])
|
| 854 |
+
|
| 855 |
+
self.register_buffer('attn_mask', self.build_attention_mask(), persistent=False)
|
| 856 |
+
|
| 857 |
+
self.ln_final = norm_layer(width)
|
| 858 |
+
self.text_projection = nn.Parameter(torch.empty(width, output_dim))
|
| 859 |
+
|
| 860 |
+
def init_parameters(self):
|
| 861 |
+
proj_std = (self.transformer.width ** -0.5) * ((2 * self.transformer.layers) ** -0.5)
|
| 862 |
+
attn_std = self.transformer.width ** -0.5
|
| 863 |
+
fc_std = (2 * self.transformer.width) ** -0.5
|
| 864 |
+
for block in self.transformer.resblocks:
|
| 865 |
+
nn.init.normal_(block.attn.in_proj_weight, std=attn_std)
|
| 866 |
+
nn.init.normal_(block.attn.out_proj.weight, std=proj_std)
|
| 867 |
+
nn.init.normal_(block.mlp.c_fc.weight, std=fc_std)
|
| 868 |
+
nn.init.normal_(block.mlp.c_proj.weight, std=proj_std)
|
| 869 |
+
for block in self.transformer.cross_attn:
|
| 870 |
+
nn.init.normal_(block.attn.in_proj_weight, std=attn_std)
|
| 871 |
+
nn.init.normal_(block.attn.out_proj.weight, std=proj_std)
|
| 872 |
+
nn.init.normal_(block.mlp.c_fc.weight, std=fc_std)
|
| 873 |
+
nn.init.normal_(block.mlp.c_proj.weight, std=proj_std)
|
| 874 |
+
|
| 875 |
+
if self.text_projection is not None:
|
| 876 |
+
nn.init.normal_(self.text_projection, std=self.transformer.width ** -0.5)
|
| 877 |
+
|
| 878 |
+
def build_attention_mask(self):
|
| 879 |
+
# lazily create causal attention mask, with full attention between the tokens
|
| 880 |
+
# pytorch uses additive attention mask; fill with -inf
|
| 881 |
+
mask = torch.empty(self.context_length, self.context_length)
|
| 882 |
+
mask.fill_(float("-inf"))
|
| 883 |
+
mask.triu_(1) # zero out the lower diagonal
|
| 884 |
+
return mask
|
| 885 |
+
|
| 886 |
+
def forward(self, image_embs, text_embs):
|
| 887 |
+
seq_len = text_embs.shape[1]
|
| 888 |
+
if not self.batch_first:
|
| 889 |
+
image_embs = image_embs.permute(1, 0, 2) # NLD -> LND
|
| 890 |
+
text_embs = text_embs.permute(1, 0, 2) # NLD -> LND
|
| 891 |
+
|
| 892 |
+
for resblock, cross_attn in zip(self.resblocks, self.cross_attn):
|
| 893 |
+
if self.grad_checkpointing and not torch.jit.is_scripting():
|
| 894 |
+
# TODO: handle kwargs https://github.com/pytorch/pytorch/issues/79887#issuecomment-1161758372
|
| 895 |
+
text_embs = checkpoint(resblock, text_embs, None, None, self.attn_mask[:seq_len, :seq_len])
|
| 896 |
+
text_embs = checkpoint(cross_attn, text_embs, image_embs, image_embs, None)
|
| 897 |
+
else:
|
| 898 |
+
text_embs = resblock(text_embs, attn_mask=self.attn_mask[:seq_len, :seq_len])
|
| 899 |
+
text_embs = cross_attn(text_embs, k_x=image_embs, v_x=image_embs)
|
| 900 |
+
|
| 901 |
+
if not self.batch_first:
|
| 902 |
+
text_embs = text_embs.permute(1, 0, 2) # LND -> NLD
|
| 903 |
+
|
| 904 |
+
out = self.ln_final(text_embs)
|
| 905 |
+
if self.text_projection is not None:
|
| 906 |
+
out = out @ self.text_projection
|
| 907 |
+
|
| 908 |
+
return out
|
| 909 |
+
|
| 910 |
+
@torch.jit.ignore
|
| 911 |
+
def set_grad_checkpointing(self, enable=True):
|
| 912 |
+
self.grad_checkpointing = enable
|
evalkit_internvl/lib/python3.10/site-packages/open_clip/utils.py
ADDED
|
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from itertools import repeat
|
| 2 |
+
import collections.abc
|
| 3 |
+
|
| 4 |
+
import torch
|
| 5 |
+
from torch import nn as nn
|
| 6 |
+
from torchvision.ops.misc import FrozenBatchNorm2d
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def freeze_batch_norm_2d(module, module_match={}, name=''):
|
| 10 |
+
"""
|
| 11 |
+
Converts all `BatchNorm2d` and `SyncBatchNorm` layers of provided module into `FrozenBatchNorm2d`. If `module` is
|
| 12 |
+
itself an instance of either `BatchNorm2d` or `SyncBatchNorm`, it is converted into `FrozenBatchNorm2d` and
|
| 13 |
+
returned. Otherwise, the module is walked recursively and submodules are converted in place.
|
| 14 |
+
|
| 15 |
+
Args:
|
| 16 |
+
module (torch.nn.Module): Any PyTorch module.
|
| 17 |
+
module_match (dict): Dictionary of full module names to freeze (all if empty)
|
| 18 |
+
name (str): Full module name (prefix)
|
| 19 |
+
|
| 20 |
+
Returns:
|
| 21 |
+
torch.nn.Module: Resulting module
|
| 22 |
+
|
| 23 |
+
Inspired by https://github.com/pytorch/pytorch/blob/a5895f85be0f10212791145bfedc0261d364f103/torch/nn/modules/batchnorm.py#L762
|
| 24 |
+
"""
|
| 25 |
+
res = module
|
| 26 |
+
is_match = True
|
| 27 |
+
if module_match:
|
| 28 |
+
is_match = name in module_match
|
| 29 |
+
if is_match and isinstance(module, (nn.modules.batchnorm.BatchNorm2d, nn.modules.batchnorm.SyncBatchNorm)):
|
| 30 |
+
res = FrozenBatchNorm2d(module.num_features)
|
| 31 |
+
res.num_features = module.num_features
|
| 32 |
+
res.affine = module.affine
|
| 33 |
+
if module.affine:
|
| 34 |
+
res.weight.data = module.weight.data.clone().detach()
|
| 35 |
+
res.bias.data = module.bias.data.clone().detach()
|
| 36 |
+
res.running_mean.data = module.running_mean.data
|
| 37 |
+
res.running_var.data = module.running_var.data
|
| 38 |
+
res.eps = module.eps
|
| 39 |
+
else:
|
| 40 |
+
for child_name, child in module.named_children():
|
| 41 |
+
full_child_name = '.'.join([name, child_name]) if name else child_name
|
| 42 |
+
new_child = freeze_batch_norm_2d(child, module_match, full_child_name)
|
| 43 |
+
if new_child is not child:
|
| 44 |
+
res.add_module(child_name, new_child)
|
| 45 |
+
return res
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
# From PyTorch internals
|
| 49 |
+
def _ntuple(n):
|
| 50 |
+
def parse(x):
|
| 51 |
+
if isinstance(x, collections.abc.Iterable):
|
| 52 |
+
return x
|
| 53 |
+
return tuple(repeat(x, n))
|
| 54 |
+
return parse
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
to_1tuple = _ntuple(1)
|
| 58 |
+
to_2tuple = _ntuple(2)
|
| 59 |
+
to_3tuple = _ntuple(3)
|
| 60 |
+
to_4tuple = _ntuple(4)
|
| 61 |
+
to_ntuple = lambda n, x: _ntuple(n)(x)
|
| 62 |
+
|
| 63 |
+
# Replaces all linear layers with linear_replacement
|
| 64 |
+
# TODO: add int8 support for other linear layers including attn and convnets
|
| 65 |
+
def replace_linear(model, linear_replacement, include_modules=['c_fc', 'c_proj'], copy_weights=True):
|
| 66 |
+
for name, module in model.named_children():
|
| 67 |
+
if len(list(module.children())) > 0:
|
| 68 |
+
replace_linear(module, linear_replacement, include_modules, copy_weights)
|
| 69 |
+
|
| 70 |
+
if isinstance(module, torch.nn.Linear) and name in include_modules:
|
| 71 |
+
old_module = model._modules[name]
|
| 72 |
+
model._modules[name] = linear_replacement(
|
| 73 |
+
module.in_features,
|
| 74 |
+
module.out_features,
|
| 75 |
+
module.bias is not None,
|
| 76 |
+
)
|
| 77 |
+
if copy_weights:
|
| 78 |
+
model._modules[name].weight.data.copy_(old_module.weight.data)
|
| 79 |
+
if model._modules[name].bias is not None:
|
| 80 |
+
model._modules[name].bias.data.copy_(old_module.bias)
|
| 81 |
+
|
| 82 |
+
return model
|
| 83 |
+
|
| 84 |
+
def convert_int8_model_to_inference_mode(model):
|
| 85 |
+
for m in model.modules():
|
| 86 |
+
if hasattr(m, 'prepare_for_eval'):
|
| 87 |
+
int8_original_dtype = m.weight.dtype
|
| 88 |
+
m.prepare_for_eval()
|
| 89 |
+
m.int8_original_dtype = int8_original_dtype
|
evalkit_internvl/lib/python3.10/site-packages/open_clip/version.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
__version__ = '2.29.0'
|
evalkit_internvl/lib/python3.10/site-packages/open_clip/zero_shot_metadata.py
ADDED
|
@@ -0,0 +1,266 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
OPENAI_IMAGENET_TEMPLATES = (
|
| 3 |
+
lambda c: f'a bad photo of a {c}.',
|
| 4 |
+
lambda c: f'a photo of many {c}.',
|
| 5 |
+
lambda c: f'a sculpture of a {c}.',
|
| 6 |
+
lambda c: f'a photo of the hard to see {c}.',
|
| 7 |
+
lambda c: f'a low resolution photo of the {c}.',
|
| 8 |
+
lambda c: f'a rendering of a {c}.',
|
| 9 |
+
lambda c: f'graffiti of a {c}.',
|
| 10 |
+
lambda c: f'a bad photo of the {c}.',
|
| 11 |
+
lambda c: f'a cropped photo of the {c}.',
|
| 12 |
+
lambda c: f'a tattoo of a {c}.',
|
| 13 |
+
lambda c: f'the embroidered {c}.',
|
| 14 |
+
lambda c: f'a photo of a hard to see {c}.',
|
| 15 |
+
lambda c: f'a bright photo of a {c}.',
|
| 16 |
+
lambda c: f'a photo of a clean {c}.',
|
| 17 |
+
lambda c: f'a photo of a dirty {c}.',
|
| 18 |
+
lambda c: f'a dark photo of the {c}.',
|
| 19 |
+
lambda c: f'a drawing of a {c}.',
|
| 20 |
+
lambda c: f'a photo of my {c}.',
|
| 21 |
+
lambda c: f'the plastic {c}.',
|
| 22 |
+
lambda c: f'a photo of the cool {c}.',
|
| 23 |
+
lambda c: f'a close-up photo of a {c}.',
|
| 24 |
+
lambda c: f'a black and white photo of the {c}.',
|
| 25 |
+
lambda c: f'a painting of the {c}.',
|
| 26 |
+
lambda c: f'a painting of a {c}.',
|
| 27 |
+
lambda c: f'a pixelated photo of the {c}.',
|
| 28 |
+
lambda c: f'a sculpture of the {c}.',
|
| 29 |
+
lambda c: f'a bright photo of the {c}.',
|
| 30 |
+
lambda c: f'a cropped photo of a {c}.',
|
| 31 |
+
lambda c: f'a plastic {c}.',
|
| 32 |
+
lambda c: f'a photo of the dirty {c}.',
|
| 33 |
+
lambda c: f'a jpeg corrupted photo of a {c}.',
|
| 34 |
+
lambda c: f'a blurry photo of the {c}.',
|
| 35 |
+
lambda c: f'a photo of the {c}.',
|
| 36 |
+
lambda c: f'a good photo of the {c}.',
|
| 37 |
+
lambda c: f'a rendering of the {c}.',
|
| 38 |
+
lambda c: f'a {c} in a video game.',
|
| 39 |
+
lambda c: f'a photo of one {c}.',
|
| 40 |
+
lambda c: f'a doodle of a {c}.',
|
| 41 |
+
lambda c: f'a close-up photo of the {c}.',
|
| 42 |
+
lambda c: f'a photo of a {c}.',
|
| 43 |
+
lambda c: f'the origami {c}.',
|
| 44 |
+
lambda c: f'the {c} in a video game.',
|
| 45 |
+
lambda c: f'a sketch of a {c}.',
|
| 46 |
+
lambda c: f'a doodle of the {c}.',
|
| 47 |
+
lambda c: f'a origami {c}.',
|
| 48 |
+
lambda c: f'a low resolution photo of a {c}.',
|
| 49 |
+
lambda c: f'the toy {c}.',
|
| 50 |
+
lambda c: f'a rendition of the {c}.',
|
| 51 |
+
lambda c: f'a photo of the clean {c}.',
|
| 52 |
+
lambda c: f'a photo of a large {c}.',
|
| 53 |
+
lambda c: f'a rendition of a {c}.',
|
| 54 |
+
lambda c: f'a photo of a nice {c}.',
|
| 55 |
+
lambda c: f'a photo of a weird {c}.',
|
| 56 |
+
lambda c: f'a blurry photo of a {c}.',
|
| 57 |
+
lambda c: f'a cartoon {c}.',
|
| 58 |
+
lambda c: f'art of a {c}.',
|
| 59 |
+
lambda c: f'a sketch of the {c}.',
|
| 60 |
+
lambda c: f'a embroidered {c}.',
|
| 61 |
+
lambda c: f'a pixelated photo of a {c}.',
|
| 62 |
+
lambda c: f'itap of the {c}.',
|
| 63 |
+
lambda c: f'a jpeg corrupted photo of the {c}.',
|
| 64 |
+
lambda c: f'a good photo of a {c}.',
|
| 65 |
+
lambda c: f'a plushie {c}.',
|
| 66 |
+
lambda c: f'a photo of the nice {c}.',
|
| 67 |
+
lambda c: f'a photo of the small {c}.',
|
| 68 |
+
lambda c: f'a photo of the weird {c}.',
|
| 69 |
+
lambda c: f'the cartoon {c}.',
|
| 70 |
+
lambda c: f'art of the {c}.',
|
| 71 |
+
lambda c: f'a drawing of the {c}.',
|
| 72 |
+
lambda c: f'a photo of the large {c}.',
|
| 73 |
+
lambda c: f'a black and white photo of a {c}.',
|
| 74 |
+
lambda c: f'the plushie {c}.',
|
| 75 |
+
lambda c: f'a dark photo of a {c}.',
|
| 76 |
+
lambda c: f'itap of a {c}.',
|
| 77 |
+
lambda c: f'graffiti of the {c}.',
|
| 78 |
+
lambda c: f'a toy {c}.',
|
| 79 |
+
lambda c: f'itap of my {c}.',
|
| 80 |
+
lambda c: f'a photo of a cool {c}.',
|
| 81 |
+
lambda c: f'a photo of a small {c}.',
|
| 82 |
+
lambda c: f'a tattoo of the {c}.',
|
| 83 |
+
)
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
# a much smaller subset of above prompts
|
| 87 |
+
# from https://github.com/openai/CLIP/blob/main/notebooks/Prompt_Engineering_for_ImageNet.ipynb
|
| 88 |
+
SIMPLE_IMAGENET_TEMPLATES = (
|
| 89 |
+
lambda c: f'itap of a {c}.',
|
| 90 |
+
lambda c: f'a bad photo of the {c}.',
|
| 91 |
+
lambda c: f'a origami {c}.',
|
| 92 |
+
lambda c: f'a photo of the large {c}.',
|
| 93 |
+
lambda c: f'a {c} in a video game.',
|
| 94 |
+
lambda c: f'art of the {c}.',
|
| 95 |
+
lambda c: f'a photo of the small {c}.',
|
| 96 |
+
)
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
IMAGENET_CLASSNAMES = (
|
| 100 |
+
"tench", "goldfish", "great white shark", "tiger shark", "hammerhead shark", "electric ray",
|
| 101 |
+
"stingray", "rooster", "hen", "ostrich", "brambling", "goldfinch", "house finch", "junco",
|
| 102 |
+
"indigo bunting", "American robin", "bulbul", "jay", "magpie", "chickadee", "American dipper",
|
| 103 |
+
"kite (bird of prey)", "bald eagle", "vulture", "great grey owl", "fire salamander",
|
| 104 |
+
"smooth newt", "newt", "spotted salamander", "axolotl", "American bullfrog", "tree frog",
|
| 105 |
+
"tailed frog", "loggerhead sea turtle", "leatherback sea turtle", "mud turtle", "terrapin",
|
| 106 |
+
"box turtle", "banded gecko", "green iguana", "Carolina anole",
|
| 107 |
+
"desert grassland whiptail lizard", "agama", "frilled-necked lizard", "alligator lizard",
|
| 108 |
+
"Gila monster", "European green lizard", "chameleon", "Komodo dragon", "Nile crocodile",
|
| 109 |
+
"American alligator", "triceratops", "worm snake", "ring-necked snake",
|
| 110 |
+
"eastern hog-nosed snake", "smooth green snake", "kingsnake", "garter snake", "water snake",
|
| 111 |
+
"vine snake", "night snake", "boa constrictor", "African rock python", "Indian cobra",
|
| 112 |
+
"green mamba", "sea snake", "Saharan horned viper", "eastern diamondback rattlesnake",
|
| 113 |
+
"sidewinder rattlesnake", "trilobite", "harvestman", "scorpion", "yellow garden spider",
|
| 114 |
+
"barn spider", "European garden spider", "southern black widow", "tarantula", "wolf spider",
|
| 115 |
+
"tick", "centipede", "black grouse", "ptarmigan", "ruffed grouse", "prairie grouse", "peafowl",
|
| 116 |
+
"quail", "partridge", "african grey parrot", "macaw", "sulphur-crested cockatoo", "lorikeet",
|
| 117 |
+
"coucal", "bee eater", "hornbill", "hummingbird", "jacamar", "toucan", "duck",
|
| 118 |
+
"red-breasted merganser", "goose", "black swan", "tusker", "echidna", "platypus", "wallaby",
|
| 119 |
+
"koala", "wombat", "jellyfish", "sea anemone", "brain coral", "flatworm", "nematode", "conch",
|
| 120 |
+
"snail", "slug", "sea slug", "chiton", "chambered nautilus", "Dungeness crab", "rock crab",
|
| 121 |
+
"fiddler crab", "red king crab", "American lobster", "spiny lobster", "crayfish", "hermit crab",
|
| 122 |
+
"isopod", "white stork", "black stork", "spoonbill", "flamingo", "little blue heron",
|
| 123 |
+
"great egret", "bittern bird", "crane bird", "limpkin", "common gallinule", "American coot",
|
| 124 |
+
"bustard", "ruddy turnstone", "dunlin", "common redshank", "dowitcher", "oystercatcher",
|
| 125 |
+
"pelican", "king penguin", "albatross", "grey whale", "killer whale", "dugong", "sea lion",
|
| 126 |
+
"Chihuahua", "Japanese Chin", "Maltese", "Pekingese", "Shih Tzu", "King Charles Spaniel",
|
| 127 |
+
"Papillon", "toy terrier", "Rhodesian Ridgeback", "Afghan Hound", "Basset Hound", "Beagle",
|
| 128 |
+
"Bloodhound", "Bluetick Coonhound", "Black and Tan Coonhound", "Treeing Walker Coonhound",
|
| 129 |
+
"English foxhound", "Redbone Coonhound", "borzoi", "Irish Wolfhound", "Italian Greyhound",
|
| 130 |
+
"Whippet", "Ibizan Hound", "Norwegian Elkhound", "Otterhound", "Saluki", "Scottish Deerhound",
|
| 131 |
+
"Weimaraner", "Staffordshire Bull Terrier", "American Staffordshire Terrier",
|
| 132 |
+
"Bedlington Terrier", "Border Terrier", "Kerry Blue Terrier", "Irish Terrier",
|
| 133 |
+
"Norfolk Terrier", "Norwich Terrier", "Yorkshire Terrier", "Wire Fox Terrier",
|
| 134 |
+
"Lakeland Terrier", "Sealyham Terrier", "Airedale Terrier", "Cairn Terrier",
|
| 135 |
+
"Australian Terrier", "Dandie Dinmont Terrier", "Boston Terrier", "Miniature Schnauzer",
|
| 136 |
+
"Giant Schnauzer", "Standard Schnauzer", "Scottish Terrier", "Tibetan Terrier",
|
| 137 |
+
"Australian Silky Terrier", "Soft-coated Wheaten Terrier", "West Highland White Terrier",
|
| 138 |
+
"Lhasa Apso", "Flat-Coated Retriever", "Curly-coated Retriever", "Golden Retriever",
|
| 139 |
+
"Labrador Retriever", "Chesapeake Bay Retriever", "German Shorthaired Pointer", "Vizsla",
|
| 140 |
+
"English Setter", "Irish Setter", "Gordon Setter", "Brittany dog", "Clumber Spaniel",
|
| 141 |
+
"English Springer Spaniel", "Welsh Springer Spaniel", "Cocker Spaniel", "Sussex Spaniel",
|
| 142 |
+
"Irish Water Spaniel", "Kuvasz", "Schipperke", "Groenendael dog", "Malinois", "Briard",
|
| 143 |
+
"Australian Kelpie", "Komondor", "Old English Sheepdog", "Shetland Sheepdog", "collie",
|
| 144 |
+
"Border Collie", "Bouvier des Flandres dog", "Rottweiler", "German Shepherd Dog", "Dobermann",
|
| 145 |
+
"Miniature Pinscher", "Greater Swiss Mountain Dog", "Bernese Mountain Dog",
|
| 146 |
+
"Appenzeller Sennenhund", "Entlebucher Sennenhund", "Boxer", "Bullmastiff", "Tibetan Mastiff",
|
| 147 |
+
"French Bulldog", "Great Dane", "St. Bernard", "husky", "Alaskan Malamute", "Siberian Husky",
|
| 148 |
+
"Dalmatian", "Affenpinscher", "Basenji", "pug", "Leonberger", "Newfoundland dog",
|
| 149 |
+
"Great Pyrenees dog", "Samoyed", "Pomeranian", "Chow Chow", "Keeshond", "brussels griffon",
|
| 150 |
+
"Pembroke Welsh Corgi", "Cardigan Welsh Corgi", "Toy Poodle", "Miniature Poodle",
|
| 151 |
+
"Standard Poodle", "Mexican hairless dog (xoloitzcuintli)", "grey wolf", "Alaskan tundra wolf",
|
| 152 |
+
"red wolf or maned wolf", "coyote", "dingo", "dhole", "African wild dog", "hyena", "red fox",
|
| 153 |
+
"kit fox", "Arctic fox", "grey fox", "tabby cat", "tiger cat", "Persian cat", "Siamese cat",
|
| 154 |
+
"Egyptian Mau", "cougar", "lynx", "leopard", "snow leopard", "jaguar", "lion", "tiger",
|
| 155 |
+
"cheetah", "brown bear", "American black bear", "polar bear", "sloth bear", "mongoose",
|
| 156 |
+
"meerkat", "tiger beetle", "ladybug", "ground beetle", "longhorn beetle", "leaf beetle",
|
| 157 |
+
"dung beetle", "rhinoceros beetle", "weevil", "fly", "bee", "ant", "grasshopper",
|
| 158 |
+
"cricket insect", "stick insect", "cockroach", "praying mantis", "cicada", "leafhopper",
|
| 159 |
+
"lacewing", "dragonfly", "damselfly", "red admiral butterfly", "ringlet butterfly",
|
| 160 |
+
"monarch butterfly", "small white butterfly", "sulphur butterfly", "gossamer-winged butterfly",
|
| 161 |
+
"starfish", "sea urchin", "sea cucumber", "cottontail rabbit", "hare", "Angora rabbit",
|
| 162 |
+
"hamster", "porcupine", "fox squirrel", "marmot", "beaver", "guinea pig", "common sorrel horse",
|
| 163 |
+
"zebra", "pig", "wild boar", "warthog", "hippopotamus", "ox", "water buffalo", "bison",
|
| 164 |
+
"ram (adult male sheep)", "bighorn sheep", "Alpine ibex", "hartebeest", "impala (antelope)",
|
| 165 |
+
"gazelle", "arabian camel", "llama", "weasel", "mink", "European polecat",
|
| 166 |
+
"black-footed ferret", "otter", "skunk", "badger", "armadillo", "three-toed sloth", "orangutan",
|
| 167 |
+
"gorilla", "chimpanzee", "gibbon", "siamang", "guenon", "patas monkey", "baboon", "macaque",
|
| 168 |
+
"langur", "black-and-white colobus", "proboscis monkey", "marmoset", "white-headed capuchin",
|
| 169 |
+
"howler monkey", "titi monkey", "Geoffroy's spider monkey", "common squirrel monkey",
|
| 170 |
+
"ring-tailed lemur", "indri", "Asian elephant", "African bush elephant", "red panda",
|
| 171 |
+
"giant panda", "snoek fish", "eel", "silver salmon", "rock beauty fish", "clownfish",
|
| 172 |
+
"sturgeon", "gar fish", "lionfish", "pufferfish", "abacus", "abaya", "academic gown",
|
| 173 |
+
"accordion", "acoustic guitar", "aircraft carrier", "airliner", "airship", "altar", "ambulance",
|
| 174 |
+
"amphibious vehicle", "analog clock", "apiary", "apron", "trash can", "assault rifle",
|
| 175 |
+
"backpack", "bakery", "balance beam", "balloon", "ballpoint pen", "Band-Aid", "banjo",
|
| 176 |
+
"baluster / handrail", "barbell", "barber chair", "barbershop", "barn", "barometer", "barrel",
|
| 177 |
+
"wheelbarrow", "baseball", "basketball", "bassinet", "bassoon", "swimming cap", "bath towel",
|
| 178 |
+
"bathtub", "station wagon", "lighthouse", "beaker", "military hat (bearskin or shako)",
|
| 179 |
+
"beer bottle", "beer glass", "bell tower", "baby bib", "tandem bicycle", "bikini",
|
| 180 |
+
"ring binder", "binoculars", "birdhouse", "boathouse", "bobsleigh", "bolo tie", "poke bonnet",
|
| 181 |
+
"bookcase", "bookstore", "bottle cap", "hunting bow", "bow tie", "brass memorial plaque", "bra",
|
| 182 |
+
"breakwater", "breastplate", "broom", "bucket", "buckle", "bulletproof vest",
|
| 183 |
+
"high-speed train", "butcher shop", "taxicab", "cauldron", "candle", "cannon", "canoe",
|
| 184 |
+
"can opener", "cardigan", "car mirror", "carousel", "tool kit", "cardboard box / carton",
|
| 185 |
+
"car wheel", "automated teller machine", "cassette", "cassette player", "castle", "catamaran",
|
| 186 |
+
"CD player", "cello", "mobile phone", "chain", "chain-link fence", "chain mail", "chainsaw",
|
| 187 |
+
"storage chest", "chiffonier", "bell or wind chime", "china cabinet", "Christmas stocking",
|
| 188 |
+
"church", "movie theater", "cleaver", "cliff dwelling", "cloak", "clogs", "cocktail shaker",
|
| 189 |
+
"coffee mug", "coffeemaker", "spiral or coil", "combination lock", "computer keyboard",
|
| 190 |
+
"candy store", "container ship", "convertible", "corkscrew", "cornet", "cowboy boot",
|
| 191 |
+
"cowboy hat", "cradle", "construction crane", "crash helmet", "crate", "infant bed",
|
| 192 |
+
"Crock Pot", "croquet ball", "crutch", "cuirass", "dam", "desk", "desktop computer",
|
| 193 |
+
"rotary dial telephone", "diaper", "digital clock", "digital watch", "dining table",
|
| 194 |
+
"dishcloth", "dishwasher", "disc brake", "dock", "dog sled", "dome", "doormat", "drilling rig",
|
| 195 |
+
"drum", "drumstick", "dumbbell", "Dutch oven", "electric fan", "electric guitar",
|
| 196 |
+
"electric locomotive", "entertainment center", "envelope", "espresso machine", "face powder",
|
| 197 |
+
"feather boa", "filing cabinet", "fireboat", "fire truck", "fire screen", "flagpole", "flute",
|
| 198 |
+
"folding chair", "football helmet", "forklift", "fountain", "fountain pen", "four-poster bed",
|
| 199 |
+
"freight car", "French horn", "frying pan", "fur coat", "garbage truck",
|
| 200 |
+
"gas mask or respirator", "gas pump", "goblet", "go-kart", "golf ball", "golf cart", "gondola",
|
| 201 |
+
"gong", "gown", "grand piano", "greenhouse", "radiator grille", "grocery store", "guillotine",
|
| 202 |
+
"hair clip", "hair spray", "half-track", "hammer", "hamper", "hair dryer", "hand-held computer",
|
| 203 |
+
"handkerchief", "hard disk drive", "harmonica", "harp", "combine harvester", "hatchet",
|
| 204 |
+
"holster", "home theater", "honeycomb", "hook", "hoop skirt", "gymnastic horizontal bar",
|
| 205 |
+
"horse-drawn vehicle", "hourglass", "iPod", "clothes iron", "carved pumpkin", "jeans", "jeep",
|
| 206 |
+
"T-shirt", "jigsaw puzzle", "rickshaw", "joystick", "kimono", "knee pad", "knot", "lab coat",
|
| 207 |
+
"ladle", "lampshade", "laptop computer", "lawn mower", "lens cap", "letter opener", "library",
|
| 208 |
+
"lifeboat", "lighter", "limousine", "ocean liner", "lipstick", "slip-on shoe", "lotion",
|
| 209 |
+
"music speaker", "loupe magnifying glass", "sawmill", "magnetic compass", "messenger bag",
|
| 210 |
+
"mailbox", "tights", "one-piece bathing suit", "manhole cover", "maraca", "marimba", "mask",
|
| 211 |
+
"matchstick", "maypole", "maze", "measuring cup", "medicine cabinet", "megalith", "microphone",
|
| 212 |
+
"microwave oven", "military uniform", "milk can", "minibus", "miniskirt", "minivan", "missile",
|
| 213 |
+
"mitten", "mixing bowl", "mobile home", "ford model t", "modem", "monastery", "monitor",
|
| 214 |
+
"moped", "mortar and pestle", "graduation cap", "mosque", "mosquito net", "vespa",
|
| 215 |
+
"mountain bike", "tent", "computer mouse", "mousetrap", "moving van", "muzzle", "metal nail",
|
| 216 |
+
"neck brace", "necklace", "baby pacifier", "notebook computer", "obelisk", "oboe", "ocarina",
|
| 217 |
+
"odometer", "oil filter", "pipe organ", "oscilloscope", "overskirt", "bullock cart",
|
| 218 |
+
"oxygen mask", "product packet / packaging", "paddle", "paddle wheel", "padlock", "paintbrush",
|
| 219 |
+
"pajamas", "palace", "pan flute", "paper towel", "parachute", "parallel bars", "park bench",
|
| 220 |
+
"parking meter", "railroad car", "patio", "payphone", "pedestal", "pencil case",
|
| 221 |
+
"pencil sharpener", "perfume", "Petri dish", "photocopier", "plectrum", "Pickelhaube",
|
| 222 |
+
"picket fence", "pickup truck", "pier", "piggy bank", "pill bottle", "pillow", "ping-pong ball",
|
| 223 |
+
"pinwheel", "pirate ship", "drink pitcher", "block plane", "planetarium", "plastic bag",
|
| 224 |
+
"plate rack", "farm plow", "plunger", "Polaroid camera", "pole", "police van", "poncho",
|
| 225 |
+
"pool table", "soda bottle", "plant pot", "potter's wheel", "power drill", "prayer rug",
|
| 226 |
+
"printer", "prison", "missile", "projector", "hockey puck", "punching bag", "purse", "quill",
|
| 227 |
+
"quilt", "race car", "racket", "radiator", "radio", "radio telescope", "rain barrel",
|
| 228 |
+
"recreational vehicle", "fishing casting reel", "reflex camera", "refrigerator",
|
| 229 |
+
"remote control", "restaurant", "revolver", "rifle", "rocking chair", "rotisserie", "eraser",
|
| 230 |
+
"rugby ball", "ruler measuring stick", "sneaker", "safe", "safety pin", "salt shaker", "sandal",
|
| 231 |
+
"sarong", "saxophone", "scabbard", "weighing scale", "school bus", "schooner", "scoreboard",
|
| 232 |
+
"CRT monitor", "screw", "screwdriver", "seat belt", "sewing machine", "shield", "shoe store",
|
| 233 |
+
"shoji screen / room divider", "shopping basket", "shopping cart", "shovel", "shower cap",
|
| 234 |
+
"shower curtain", "ski", "balaclava ski mask", "sleeping bag", "slide rule", "sliding door",
|
| 235 |
+
"slot machine", "snorkel", "snowmobile", "snowplow", "soap dispenser", "soccer ball", "sock",
|
| 236 |
+
"solar thermal collector", "sombrero", "soup bowl", "keyboard space bar", "space heater",
|
| 237 |
+
"space shuttle", "spatula", "motorboat", "spider web", "spindle", "sports car", "spotlight",
|
| 238 |
+
"stage", "steam locomotive", "through arch bridge", "steel drum", "stethoscope", "scarf",
|
| 239 |
+
"stone wall", "stopwatch", "stove", "strainer", "tram", "stretcher", "couch", "stupa",
|
| 240 |
+
"submarine", "suit", "sundial", "sunglasses", "sunglasses", "sunscreen", "suspension bridge",
|
| 241 |
+
"mop", "sweatshirt", "swim trunks / shorts", "swing", "electrical switch", "syringe",
|
| 242 |
+
"table lamp", "tank", "tape player", "teapot", "teddy bear", "television", "tennis ball",
|
| 243 |
+
"thatched roof", "front curtain", "thimble", "threshing machine", "throne", "tile roof",
|
| 244 |
+
"toaster", "tobacco shop", "toilet seat", "torch", "totem pole", "tow truck", "toy store",
|
| 245 |
+
"tractor", "semi-trailer truck", "tray", "trench coat", "tricycle", "trimaran", "tripod",
|
| 246 |
+
"triumphal arch", "trolleybus", "trombone", "hot tub", "turnstile", "typewriter keyboard",
|
| 247 |
+
"umbrella", "unicycle", "upright piano", "vacuum cleaner", "vase", "vaulted or arched ceiling",
|
| 248 |
+
"velvet fabric", "vending machine", "vestment", "viaduct", "violin", "volleyball",
|
| 249 |
+
"waffle iron", "wall clock", "wallet", "wardrobe", "military aircraft", "sink",
|
| 250 |
+
"washing machine", "water bottle", "water jug", "water tower", "whiskey jug", "whistle",
|
| 251 |
+
"hair wig", "window screen", "window shade", "Windsor tie", "wine bottle", "airplane wing",
|
| 252 |
+
"wok", "wooden spoon", "wool", "split-rail fence", "shipwreck", "sailboat", "yurt", "website",
|
| 253 |
+
"comic book", "crossword", "traffic or street sign", "traffic light", "dust jacket", "menu",
|
| 254 |
+
"plate", "guacamole", "consomme", "hot pot", "trifle", "ice cream", "popsicle", "baguette",
|
| 255 |
+
"bagel", "pretzel", "cheeseburger", "hot dog", "mashed potatoes", "cabbage", "broccoli",
|
| 256 |
+
"cauliflower", "zucchini", "spaghetti squash", "acorn squash", "butternut squash", "cucumber",
|
| 257 |
+
"artichoke", "bell pepper", "cardoon", "mushroom", "Granny Smith apple", "strawberry", "orange",
|
| 258 |
+
"lemon", "fig", "pineapple", "banana", "jackfruit", "cherimoya (custard apple)", "pomegranate",
|
| 259 |
+
"hay", "carbonara", "chocolate syrup", "dough", "meatloaf", "pizza", "pot pie", "burrito",
|
| 260 |
+
"red wine", "espresso", "tea cup", "eggnog", "mountain", "bubble", "cliff", "coral reef",
|
| 261 |
+
"geyser", "lakeshore", "promontory", "sandbar", "beach", "valley", "volcano", "baseball player",
|
| 262 |
+
"bridegroom", "scuba diver", "rapeseed", "daisy", "yellow lady's slipper", "corn", "acorn",
|
| 263 |
+
"rose hip", "horse chestnut seed", "coral fungus", "agaric", "gyromitra", "stinkhorn mushroom",
|
| 264 |
+
"earth star fungus", "hen of the woods mushroom", "bolete", "corn cob", "toilet paper"
|
| 265 |
+
)
|
| 266 |
+
|
evalkit_internvl/lib/python3.10/site-packages/openai/types/fine_tuning/__pycache__/fine_tuning_job.cpython-310.pyc
ADDED
|
Binary file (1.79 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/openai/types/fine_tuning/__pycache__/fine_tuning_job_integration.cpython-310.pyc
ADDED
|
Binary file (337 Bytes). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/openai/types/fine_tuning/jobs/__pycache__/fine_tuning_job_checkpoint.cpython-310.pyc
ADDED
|
Binary file (1.24 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/openai/types/fine_tuning/jobs/fine_tuning_job_checkpoint.py
ADDED
|
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
| 2 |
+
|
| 3 |
+
from typing import Optional
|
| 4 |
+
from typing_extensions import Literal
|
| 5 |
+
|
| 6 |
+
from ...._models import BaseModel
|
| 7 |
+
|
| 8 |
+
__all__ = ["FineTuningJobCheckpoint", "Metrics"]
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class Metrics(BaseModel):
|
| 12 |
+
full_valid_loss: Optional[float] = None
|
| 13 |
+
|
| 14 |
+
full_valid_mean_token_accuracy: Optional[float] = None
|
| 15 |
+
|
| 16 |
+
step: Optional[float] = None
|
| 17 |
+
|
| 18 |
+
train_loss: Optional[float] = None
|
| 19 |
+
|
| 20 |
+
train_mean_token_accuracy: Optional[float] = None
|
| 21 |
+
|
| 22 |
+
valid_loss: Optional[float] = None
|
| 23 |
+
|
| 24 |
+
valid_mean_token_accuracy: Optional[float] = None
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
class FineTuningJobCheckpoint(BaseModel):
|
| 28 |
+
id: str
|
| 29 |
+
"""The checkpoint identifier, which can be referenced in the API endpoints."""
|
| 30 |
+
|
| 31 |
+
created_at: int
|
| 32 |
+
"""The Unix timestamp (in seconds) for when the checkpoint was created."""
|
| 33 |
+
|
| 34 |
+
fine_tuned_model_checkpoint: str
|
| 35 |
+
"""The name of the fine-tuned checkpoint model that is created."""
|
| 36 |
+
|
| 37 |
+
fine_tuning_job_id: str
|
| 38 |
+
"""The name of the fine-tuning job that this checkpoint was created from."""
|
| 39 |
+
|
| 40 |
+
metrics: Metrics
|
| 41 |
+
"""Metrics at the step number during the fine-tuning job."""
|
| 42 |
+
|
| 43 |
+
object: Literal["fine_tuning.job.checkpoint"]
|
| 44 |
+
"""The object type, which is always "fine_tuning.job.checkpoint"."""
|
| 45 |
+
|
| 46 |
+
step_number: int
|
| 47 |
+
"""The step number that the checkpoint was created at."""
|
evalkit_tf437/lib/python3.10/site-packages/decord.libs/libXau-b2e5323c.so.6.0.0
ADDED
|
Binary file (12.9 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/decord.libs/libavdevice-bf61e037.so.58.5.100
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:06e50e5ae19def6f1c73ffa308361ed4e6665f5cd53056892874848540a2017c
|
| 3 |
+
size 114320
|
evalkit_tf437/lib/python3.10/site-packages/decord.libs/libavfilter-1e2243e2.so.7.40.101
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:79b37a526b50d6ebcd2255983198276718c29c0942d1fde96306e413041e01cb
|
| 3 |
+
size 3075448
|
evalkit_tf437/lib/python3.10/site-packages/decord.libs/libavformat-8b46ea57.so.58.20.100
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9a1eee8f823048e9059d3516d69c57b8a10604f7b722bfaf2327d021b4662518
|
| 3 |
+
size 2399400
|
evalkit_tf437/lib/python3.10/site-packages/decord.libs/libavutil-2b26904a.so.56.22.100
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f257fbb5b251f0b8580b5d65296a4d391ae28b98a4ed715961db0df2aaa3c7b9
|
| 3 |
+
size 413208
|
evalkit_tf437/lib/python3.10/site-packages/decord.libs/libbz2-13e8c345.so.1.0.4
ADDED
|
Binary file (70.9 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/decord.libs/libpostproc-88b722f8.so.55.3.100
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:94f8b2c0b25e7ecce3e6648c20fc25839c0000b07616d797d02983dc75efe9f8
|
| 3 |
+
size 120064
|
evalkit_tf437/lib/python3.10/site-packages/decord.libs/libswscale-8e37dcfd.so.5.3.100
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:cc48193958276cf1ae7c1272554710a96271331f8d0b4f31061752caae8c5b13
|
| 3 |
+
size 537808
|
evalkit_tf437/lib/python3.10/site-packages/decord.libs/libxcb-77222338.so.1.1.0
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:266a250225f2e7e62c3b21496090ff8bdf9c8ff7555c7fef82b2e06385d36dab
|
| 3 |
+
size 194008
|
evalkit_tf437/lib/python3.10/site-packages/decord.libs/libxcb-shape-893f3868.so.0.0.0
ADDED
|
Binary file (21.7 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/decord.libs/libxcb-shm-7ffb2544.so.0.0.0
ADDED
|
Binary file (21.3 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/decord.libs/libxcb-xfixes-6523fc53.so.0.0.0
ADDED
|
Binary file (53.5 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/decord.libs/libz-eb09ad1d.so.1.2.3
ADDED
|
Binary file (92.1 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/einops_exts/__init__.py
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from einops_exts.einops_exts import check_shape
|
| 2 |
+
from einops_exts.einops_exts import rearrange_many, repeat_many, reduce_many
|
| 3 |
+
from einops_exts.einops_exts import rearrange_with_anon_dims, repeat_with_anon_dims, reduce_with_anon_dims
|
evalkit_tf437/lib/python3.10/site-packages/einops_exts/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (421 Bytes). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/einops_exts/__pycache__/einops_exts.cpython-310.pyc
ADDED
|
Binary file (2.45 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/einops_exts/__pycache__/torch.cpython-310.pyc
ADDED
|
Binary file (1.52 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/einops_exts/einops_exts.py
ADDED
|
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import re
|
| 2 |
+
from torch import nn
|
| 3 |
+
from functools import wraps, partial
|
| 4 |
+
|
| 5 |
+
from einops import rearrange, reduce, repeat
|
| 6 |
+
|
| 7 |
+
# checking shape
|
| 8 |
+
# @nils-werner
|
| 9 |
+
# https://github.com/arogozhnikov/einops/issues/168#issuecomment-1042933838
|
| 10 |
+
|
| 11 |
+
def check_shape(tensor, pattern, **kwargs):
|
| 12 |
+
return rearrange(tensor, f"{pattern} -> {pattern}", **kwargs)
|
| 13 |
+
|
| 14 |
+
# do same einops operations on a list of tensors
|
| 15 |
+
|
| 16 |
+
def _many(fn):
|
| 17 |
+
@wraps(fn)
|
| 18 |
+
def inner(tensors, pattern, **kwargs):
|
| 19 |
+
return (fn(tensor, pattern, **kwargs) for tensor in tensors)
|
| 20 |
+
return inner
|
| 21 |
+
|
| 22 |
+
# do einops with unflattening of anonymously named dimensions
|
| 23 |
+
# (...flattened) -> ...flattened
|
| 24 |
+
|
| 25 |
+
def _with_anon_dims(fn):
|
| 26 |
+
@wraps(fn)
|
| 27 |
+
def inner(tensor, pattern, **kwargs):
|
| 28 |
+
regex = r'(\.\.\.[a-zA-Z]+)'
|
| 29 |
+
matches = re.findall(regex, pattern)
|
| 30 |
+
get_anon_dim_name = lambda t: t.lstrip('...')
|
| 31 |
+
dim_prefixes = tuple(map(get_anon_dim_name, set(matches)))
|
| 32 |
+
|
| 33 |
+
update_kwargs_dict = dict()
|
| 34 |
+
|
| 35 |
+
for prefix in dim_prefixes:
|
| 36 |
+
assert prefix in kwargs, f'dimension list "{prefix}" was not passed in'
|
| 37 |
+
dim_list = kwargs[prefix]
|
| 38 |
+
assert isinstance(dim_list, (list, tuple)), f'dimension list "{prefix}" needs to be a tuple of list of dimensions'
|
| 39 |
+
dim_names = list(map(lambda ind: f'{prefix}{ind}', range(len(dim_list))))
|
| 40 |
+
update_kwargs_dict[prefix] = dict(zip(dim_names, dim_list))
|
| 41 |
+
|
| 42 |
+
def sub_with_anonymous_dims(t):
|
| 43 |
+
dim_name_prefix = get_anon_dim_name(t.groups()[0])
|
| 44 |
+
return ' '.join(update_kwargs_dict[dim_name_prefix].keys())
|
| 45 |
+
|
| 46 |
+
pattern_new = re.sub(regex, sub_with_anonymous_dims, pattern)
|
| 47 |
+
|
| 48 |
+
for prefix, update_dict in update_kwargs_dict.items():
|
| 49 |
+
del kwargs[prefix]
|
| 50 |
+
kwargs.update(update_dict)
|
| 51 |
+
|
| 52 |
+
return fn(tensor, pattern_new, **kwargs)
|
| 53 |
+
return inner
|
| 54 |
+
|
| 55 |
+
# generate all helper functions
|
| 56 |
+
|
| 57 |
+
rearrange_many = _many(rearrange)
|
| 58 |
+
repeat_many = _many(repeat)
|
| 59 |
+
reduce_many = _many(reduce)
|
| 60 |
+
|
| 61 |
+
rearrange_with_anon_dims = _with_anon_dims(rearrange)
|
| 62 |
+
repeat_with_anon_dims = _with_anon_dims(repeat)
|
| 63 |
+
reduce_with_anon_dims = _with_anon_dims(reduce)
|
evalkit_tf437/lib/python3.10/site-packages/parso/__init__.py
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
r"""
|
| 2 |
+
Parso is a Python parser that supports error recovery and round-trip parsing
|
| 3 |
+
for different Python versions (in multiple Python versions). Parso is also able
|
| 4 |
+
to list multiple syntax errors in your python file.
|
| 5 |
+
|
| 6 |
+
Parso has been battle-tested by jedi_. It was pulled out of jedi to be useful
|
| 7 |
+
for other projects as well.
|
| 8 |
+
|
| 9 |
+
Parso consists of a small API to parse Python and analyse the syntax tree.
|
| 10 |
+
|
| 11 |
+
.. _jedi: https://github.com/davidhalter/jedi
|
| 12 |
+
|
| 13 |
+
A simple example:
|
| 14 |
+
|
| 15 |
+
>>> import parso
|
| 16 |
+
>>> module = parso.parse('hello + 1', version="3.9")
|
| 17 |
+
>>> expr = module.children[0]
|
| 18 |
+
>>> expr
|
| 19 |
+
PythonNode(arith_expr, [<Name: hello@1,0>, <Operator: +>, <Number: 1>])
|
| 20 |
+
>>> print(expr.get_code())
|
| 21 |
+
hello + 1
|
| 22 |
+
>>> name = expr.children[0]
|
| 23 |
+
>>> name
|
| 24 |
+
<Name: hello@1,0>
|
| 25 |
+
>>> name.end_pos
|
| 26 |
+
(1, 5)
|
| 27 |
+
>>> expr.end_pos
|
| 28 |
+
(1, 9)
|
| 29 |
+
|
| 30 |
+
To list multiple issues:
|
| 31 |
+
|
| 32 |
+
>>> grammar = parso.load_grammar()
|
| 33 |
+
>>> module = grammar.parse('foo +\nbar\ncontinue')
|
| 34 |
+
>>> error1, error2 = grammar.iter_errors(module)
|
| 35 |
+
>>> error1.message
|
| 36 |
+
'SyntaxError: invalid syntax'
|
| 37 |
+
>>> error2.message
|
| 38 |
+
"SyntaxError: 'continue' not properly in loop"
|
| 39 |
+
"""
|
| 40 |
+
|
| 41 |
+
from parso.parser import ParserSyntaxError
|
| 42 |
+
from parso.grammar import Grammar, load_grammar
|
| 43 |
+
from parso.utils import split_lines, python_bytes_to_unicode
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
__version__ = '0.8.4'
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
def parse(code=None, **kwargs):
|
| 50 |
+
"""
|
| 51 |
+
A utility function to avoid loading grammars.
|
| 52 |
+
Params are documented in :py:meth:`parso.Grammar.parse`.
|
| 53 |
+
|
| 54 |
+
:param str version: The version used by :py:func:`parso.load_grammar`.
|
| 55 |
+
"""
|
| 56 |
+
version = kwargs.pop('version', None)
|
| 57 |
+
grammar = load_grammar(version=version)
|
| 58 |
+
return grammar.parse(code, **kwargs)
|
evalkit_tf437/lib/python3.10/site-packages/parso/_compatibility.py
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import platform
|
| 2 |
+
|
| 3 |
+
is_pypy = platform.python_implementation() == 'PyPy'
|
evalkit_tf437/lib/python3.10/site-packages/parso/cache.py
ADDED
|
@@ -0,0 +1,275 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import time
|
| 2 |
+
import os
|
| 3 |
+
import sys
|
| 4 |
+
import hashlib
|
| 5 |
+
import gc
|
| 6 |
+
import shutil
|
| 7 |
+
import platform
|
| 8 |
+
import logging
|
| 9 |
+
import warnings
|
| 10 |
+
import pickle
|
| 11 |
+
from pathlib import Path
|
| 12 |
+
from typing import Dict, Any
|
| 13 |
+
|
| 14 |
+
LOG = logging.getLogger(__name__)
|
| 15 |
+
|
| 16 |
+
_CACHED_FILE_MINIMUM_SURVIVAL = 60 * 10 # 10 minutes
|
| 17 |
+
"""
|
| 18 |
+
Cached files should survive at least a few minutes.
|
| 19 |
+
"""
|
| 20 |
+
|
| 21 |
+
_CACHED_FILE_MAXIMUM_SURVIVAL = 60 * 60 * 24 * 30
|
| 22 |
+
"""
|
| 23 |
+
Maximum time for a cached file to survive if it is not
|
| 24 |
+
accessed within.
|
| 25 |
+
"""
|
| 26 |
+
|
| 27 |
+
_CACHED_SIZE_TRIGGER = 600
|
| 28 |
+
"""
|
| 29 |
+
This setting limits the amount of cached files. It's basically a way to start
|
| 30 |
+
garbage collection.
|
| 31 |
+
|
| 32 |
+
The reasoning for this limit being as big as it is, is the following:
|
| 33 |
+
|
| 34 |
+
Numpy, Pandas, Matplotlib and Tensorflow together use about 500 files. This
|
| 35 |
+
makes Jedi use ~500mb of memory. Since we might want a bit more than those few
|
| 36 |
+
libraries, we just increase it a bit.
|
| 37 |
+
"""
|
| 38 |
+
|
| 39 |
+
_PICKLE_VERSION = 33
|
| 40 |
+
"""
|
| 41 |
+
Version number (integer) for file system cache.
|
| 42 |
+
|
| 43 |
+
Increment this number when there are any incompatible changes in
|
| 44 |
+
the parser tree classes. For example, the following changes
|
| 45 |
+
are regarded as incompatible.
|
| 46 |
+
|
| 47 |
+
- A class name is changed.
|
| 48 |
+
- A class is moved to another module.
|
| 49 |
+
- A __slot__ of a class is changed.
|
| 50 |
+
"""
|
| 51 |
+
|
| 52 |
+
_VERSION_TAG = '%s-%s%s-%s' % (
|
| 53 |
+
platform.python_implementation(),
|
| 54 |
+
sys.version_info[0],
|
| 55 |
+
sys.version_info[1],
|
| 56 |
+
_PICKLE_VERSION
|
| 57 |
+
)
|
| 58 |
+
"""
|
| 59 |
+
Short name for distinguish Python implementations and versions.
|
| 60 |
+
|
| 61 |
+
It's a bit similar to `sys.implementation.cache_tag`.
|
| 62 |
+
See: http://docs.python.org/3/library/sys.html#sys.implementation
|
| 63 |
+
"""
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
def _get_default_cache_path():
|
| 67 |
+
if platform.system().lower() == 'windows':
|
| 68 |
+
dir_ = Path(os.getenv('LOCALAPPDATA') or '~', 'Parso', 'Parso')
|
| 69 |
+
elif platform.system().lower() == 'darwin':
|
| 70 |
+
dir_ = Path('~', 'Library', 'Caches', 'Parso')
|
| 71 |
+
else:
|
| 72 |
+
dir_ = Path(os.getenv('XDG_CACHE_HOME') or '~/.cache', 'parso')
|
| 73 |
+
return dir_.expanduser()
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
_default_cache_path = _get_default_cache_path()
|
| 77 |
+
"""
|
| 78 |
+
The path where the cache is stored.
|
| 79 |
+
|
| 80 |
+
On Linux, this defaults to ``~/.cache/parso/``, on OS X to
|
| 81 |
+
``~/Library/Caches/Parso/`` and on Windows to ``%LOCALAPPDATA%\\Parso\\Parso\\``.
|
| 82 |
+
On Linux, if environment variable ``$XDG_CACHE_HOME`` is set,
|
| 83 |
+
``$XDG_CACHE_HOME/parso`` is used instead of the default one.
|
| 84 |
+
"""
|
| 85 |
+
|
| 86 |
+
_CACHE_CLEAR_THRESHOLD = 60 * 60 * 24
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
def _get_cache_clear_lock_path(cache_path=None):
|
| 90 |
+
"""
|
| 91 |
+
The path where the cache lock is stored.
|
| 92 |
+
|
| 93 |
+
Cache lock will prevent continous cache clearing and only allow garbage
|
| 94 |
+
collection once a day (can be configured in _CACHE_CLEAR_THRESHOLD).
|
| 95 |
+
"""
|
| 96 |
+
cache_path = cache_path or _default_cache_path
|
| 97 |
+
return cache_path.joinpath("PARSO-CACHE-LOCK")
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
parser_cache: Dict[str, Any] = {}
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
class _NodeCacheItem:
|
| 104 |
+
def __init__(self, node, lines, change_time=None):
|
| 105 |
+
self.node = node
|
| 106 |
+
self.lines = lines
|
| 107 |
+
if change_time is None:
|
| 108 |
+
change_time = time.time()
|
| 109 |
+
self.change_time = change_time
|
| 110 |
+
self.last_used = change_time
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
def load_module(hashed_grammar, file_io, cache_path=None):
|
| 114 |
+
"""
|
| 115 |
+
Returns a module or None, if it fails.
|
| 116 |
+
"""
|
| 117 |
+
p_time = file_io.get_last_modified()
|
| 118 |
+
if p_time is None:
|
| 119 |
+
return None
|
| 120 |
+
|
| 121 |
+
try:
|
| 122 |
+
module_cache_item = parser_cache[hashed_grammar][file_io.path]
|
| 123 |
+
if p_time <= module_cache_item.change_time:
|
| 124 |
+
module_cache_item.last_used = time.time()
|
| 125 |
+
return module_cache_item.node
|
| 126 |
+
except KeyError:
|
| 127 |
+
return _load_from_file_system(
|
| 128 |
+
hashed_grammar,
|
| 129 |
+
file_io.path,
|
| 130 |
+
p_time,
|
| 131 |
+
cache_path=cache_path
|
| 132 |
+
)
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
def _load_from_file_system(hashed_grammar, path, p_time, cache_path=None):
|
| 136 |
+
cache_path = _get_hashed_path(hashed_grammar, path, cache_path=cache_path)
|
| 137 |
+
try:
|
| 138 |
+
if p_time > os.path.getmtime(cache_path):
|
| 139 |
+
# Cache is outdated
|
| 140 |
+
return None
|
| 141 |
+
|
| 142 |
+
with open(cache_path, 'rb') as f:
|
| 143 |
+
gc.disable()
|
| 144 |
+
try:
|
| 145 |
+
module_cache_item = pickle.load(f)
|
| 146 |
+
finally:
|
| 147 |
+
gc.enable()
|
| 148 |
+
except FileNotFoundError:
|
| 149 |
+
return None
|
| 150 |
+
else:
|
| 151 |
+
_set_cache_item(hashed_grammar, path, module_cache_item)
|
| 152 |
+
LOG.debug('pickle loaded: %s', path)
|
| 153 |
+
return module_cache_item.node
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
def _set_cache_item(hashed_grammar, path, module_cache_item):
|
| 157 |
+
if sum(len(v) for v in parser_cache.values()) >= _CACHED_SIZE_TRIGGER:
|
| 158 |
+
# Garbage collection of old cache files.
|
| 159 |
+
# We are basically throwing everything away that hasn't been accessed
|
| 160 |
+
# in 10 minutes.
|
| 161 |
+
cutoff_time = time.time() - _CACHED_FILE_MINIMUM_SURVIVAL
|
| 162 |
+
for key, path_to_item_map in parser_cache.items():
|
| 163 |
+
parser_cache[key] = {
|
| 164 |
+
path: node_item
|
| 165 |
+
for path, node_item in path_to_item_map.items()
|
| 166 |
+
if node_item.last_used > cutoff_time
|
| 167 |
+
}
|
| 168 |
+
|
| 169 |
+
parser_cache.setdefault(hashed_grammar, {})[path] = module_cache_item
|
| 170 |
+
|
| 171 |
+
|
| 172 |
+
def try_to_save_module(hashed_grammar, file_io, module, lines, pickling=True, cache_path=None):
|
| 173 |
+
path = file_io.path
|
| 174 |
+
try:
|
| 175 |
+
p_time = None if path is None else file_io.get_last_modified()
|
| 176 |
+
except OSError:
|
| 177 |
+
p_time = None
|
| 178 |
+
pickling = False
|
| 179 |
+
|
| 180 |
+
item = _NodeCacheItem(module, lines, p_time)
|
| 181 |
+
_set_cache_item(hashed_grammar, path, item)
|
| 182 |
+
if pickling and path is not None:
|
| 183 |
+
try:
|
| 184 |
+
_save_to_file_system(hashed_grammar, path, item, cache_path=cache_path)
|
| 185 |
+
except PermissionError:
|
| 186 |
+
# It's not really a big issue if the cache cannot be saved to the
|
| 187 |
+
# file system. It's still in RAM in that case. However we should
|
| 188 |
+
# still warn the user that this is happening.
|
| 189 |
+
warnings.warn(
|
| 190 |
+
'Tried to save a file to %s, but got permission denied.' % path,
|
| 191 |
+
Warning
|
| 192 |
+
)
|
| 193 |
+
else:
|
| 194 |
+
_remove_cache_and_update_lock(cache_path=cache_path)
|
| 195 |
+
|
| 196 |
+
|
| 197 |
+
def _save_to_file_system(hashed_grammar, path, item, cache_path=None):
|
| 198 |
+
with open(_get_hashed_path(hashed_grammar, path, cache_path=cache_path), 'wb') as f:
|
| 199 |
+
pickle.dump(item, f, pickle.HIGHEST_PROTOCOL)
|
| 200 |
+
|
| 201 |
+
|
| 202 |
+
def clear_cache(cache_path=None):
|
| 203 |
+
if cache_path is None:
|
| 204 |
+
cache_path = _default_cache_path
|
| 205 |
+
shutil.rmtree(cache_path)
|
| 206 |
+
parser_cache.clear()
|
| 207 |
+
|
| 208 |
+
|
| 209 |
+
def clear_inactive_cache(
|
| 210 |
+
cache_path=None,
|
| 211 |
+
inactivity_threshold=_CACHED_FILE_MAXIMUM_SURVIVAL,
|
| 212 |
+
):
|
| 213 |
+
if cache_path is None:
|
| 214 |
+
cache_path = _default_cache_path
|
| 215 |
+
if not cache_path.exists():
|
| 216 |
+
return False
|
| 217 |
+
for dirname in os.listdir(cache_path):
|
| 218 |
+
version_path = cache_path.joinpath(dirname)
|
| 219 |
+
if not version_path.is_dir():
|
| 220 |
+
continue
|
| 221 |
+
for file in os.scandir(version_path):
|
| 222 |
+
if file.stat().st_atime + _CACHED_FILE_MAXIMUM_SURVIVAL <= time.time():
|
| 223 |
+
try:
|
| 224 |
+
os.remove(file.path)
|
| 225 |
+
except OSError: # silently ignore all failures
|
| 226 |
+
continue
|
| 227 |
+
else:
|
| 228 |
+
return True
|
| 229 |
+
|
| 230 |
+
|
| 231 |
+
def _touch(path):
|
| 232 |
+
try:
|
| 233 |
+
os.utime(path, None)
|
| 234 |
+
except FileNotFoundError:
|
| 235 |
+
try:
|
| 236 |
+
file = open(path, 'a')
|
| 237 |
+
file.close()
|
| 238 |
+
except (OSError, IOError): # TODO Maybe log this?
|
| 239 |
+
return False
|
| 240 |
+
return True
|
| 241 |
+
|
| 242 |
+
|
| 243 |
+
def _remove_cache_and_update_lock(cache_path=None):
|
| 244 |
+
lock_path = _get_cache_clear_lock_path(cache_path=cache_path)
|
| 245 |
+
try:
|
| 246 |
+
clear_lock_time = os.path.getmtime(lock_path)
|
| 247 |
+
except FileNotFoundError:
|
| 248 |
+
clear_lock_time = None
|
| 249 |
+
if (
|
| 250 |
+
clear_lock_time is None # first time
|
| 251 |
+
or clear_lock_time + _CACHE_CLEAR_THRESHOLD <= time.time()
|
| 252 |
+
):
|
| 253 |
+
if not _touch(lock_path):
|
| 254 |
+
# First make sure that as few as possible other cleanup jobs also
|
| 255 |
+
# get started. There is still a race condition but it's probably
|
| 256 |
+
# not a big problem.
|
| 257 |
+
return False
|
| 258 |
+
|
| 259 |
+
clear_inactive_cache(cache_path=cache_path)
|
| 260 |
+
|
| 261 |
+
|
| 262 |
+
def _get_hashed_path(hashed_grammar, path, cache_path=None):
|
| 263 |
+
directory = _get_cache_directory_path(cache_path=cache_path)
|
| 264 |
+
|
| 265 |
+
file_hash = hashlib.sha256(str(path).encode("utf-8")).hexdigest()
|
| 266 |
+
return os.path.join(directory, '%s-%s.pkl' % (hashed_grammar, file_hash))
|
| 267 |
+
|
| 268 |
+
|
| 269 |
+
def _get_cache_directory_path(cache_path=None):
|
| 270 |
+
if cache_path is None:
|
| 271 |
+
cache_path = _default_cache_path
|
| 272 |
+
directory = cache_path.joinpath(_VERSION_TAG)
|
| 273 |
+
if not directory.exists():
|
| 274 |
+
os.makedirs(directory)
|
| 275 |
+
return directory
|
evalkit_tf437/lib/python3.10/site-packages/parso/file_io.py
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from pathlib import Path
|
| 3 |
+
from typing import Union
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
class FileIO:
|
| 7 |
+
def __init__(self, path: Union[os.PathLike, str]):
|
| 8 |
+
if isinstance(path, str):
|
| 9 |
+
path = Path(path)
|
| 10 |
+
self.path = path
|
| 11 |
+
|
| 12 |
+
def read(self): # Returns bytes/str
|
| 13 |
+
# We would like to read unicode here, but we cannot, because we are not
|
| 14 |
+
# sure if it is a valid unicode file. Therefore just read whatever is
|
| 15 |
+
# here.
|
| 16 |
+
with open(self.path, 'rb') as f:
|
| 17 |
+
return f.read()
|
| 18 |
+
|
| 19 |
+
def get_last_modified(self):
|
| 20 |
+
"""
|
| 21 |
+
Returns float - timestamp or None, if path doesn't exist.
|
| 22 |
+
"""
|
| 23 |
+
try:
|
| 24 |
+
return os.path.getmtime(self.path)
|
| 25 |
+
except FileNotFoundError:
|
| 26 |
+
return None
|
| 27 |
+
|
| 28 |
+
def __repr__(self):
|
| 29 |
+
return '%s(%s)' % (self.__class__.__name__, self.path)
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
class KnownContentFileIO(FileIO):
|
| 33 |
+
def __init__(self, path, content):
|
| 34 |
+
super().__init__(path)
|
| 35 |
+
self._content = content
|
| 36 |
+
|
| 37 |
+
def read(self):
|
| 38 |
+
return self._content
|
evalkit_tf437/lib/python3.10/site-packages/parso/grammar.py
ADDED
|
@@ -0,0 +1,264 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import hashlib
|
| 2 |
+
import os
|
| 3 |
+
from typing import Generic, TypeVar, Union, Dict, Optional, Any
|
| 4 |
+
from pathlib import Path
|
| 5 |
+
|
| 6 |
+
from parso._compatibility import is_pypy
|
| 7 |
+
from parso.pgen2 import generate_grammar
|
| 8 |
+
from parso.utils import split_lines, python_bytes_to_unicode, \
|
| 9 |
+
PythonVersionInfo, parse_version_string
|
| 10 |
+
from parso.python.diff import DiffParser
|
| 11 |
+
from parso.python.tokenize import tokenize_lines, tokenize
|
| 12 |
+
from parso.python.token import PythonTokenTypes
|
| 13 |
+
from parso.cache import parser_cache, load_module, try_to_save_module
|
| 14 |
+
from parso.parser import BaseParser
|
| 15 |
+
from parso.python.parser import Parser as PythonParser
|
| 16 |
+
from parso.python.errors import ErrorFinderConfig
|
| 17 |
+
from parso.python import pep8
|
| 18 |
+
from parso.file_io import FileIO, KnownContentFileIO
|
| 19 |
+
from parso.normalizer import RefactoringNormalizer, NormalizerConfig
|
| 20 |
+
|
| 21 |
+
_loaded_grammars: Dict[str, 'Grammar'] = {}
|
| 22 |
+
|
| 23 |
+
_NodeT = TypeVar("_NodeT")
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
class Grammar(Generic[_NodeT]):
|
| 27 |
+
"""
|
| 28 |
+
:py:func:`parso.load_grammar` returns instances of this class.
|
| 29 |
+
|
| 30 |
+
Creating custom none-python grammars by calling this is not supported, yet.
|
| 31 |
+
|
| 32 |
+
:param text: A BNF representation of your grammar.
|
| 33 |
+
"""
|
| 34 |
+
_start_nonterminal: str
|
| 35 |
+
_error_normalizer_config: Optional[ErrorFinderConfig] = None
|
| 36 |
+
_token_namespace: Any = None
|
| 37 |
+
_default_normalizer_config: NormalizerConfig = pep8.PEP8NormalizerConfig()
|
| 38 |
+
|
| 39 |
+
def __init__(self, text: str, *, tokenizer, parser=BaseParser, diff_parser=None):
|
| 40 |
+
self._pgen_grammar = generate_grammar(
|
| 41 |
+
text,
|
| 42 |
+
token_namespace=self._get_token_namespace()
|
| 43 |
+
)
|
| 44 |
+
self._parser = parser
|
| 45 |
+
self._tokenizer = tokenizer
|
| 46 |
+
self._diff_parser = diff_parser
|
| 47 |
+
self._hashed = hashlib.sha256(text.encode("utf-8")).hexdigest()
|
| 48 |
+
|
| 49 |
+
def parse(self,
|
| 50 |
+
code: Union[str, bytes] = None,
|
| 51 |
+
*,
|
| 52 |
+
error_recovery=True,
|
| 53 |
+
path: Union[os.PathLike, str] = None,
|
| 54 |
+
start_symbol: str = None,
|
| 55 |
+
cache=False,
|
| 56 |
+
diff_cache=False,
|
| 57 |
+
cache_path: Union[os.PathLike, str] = None,
|
| 58 |
+
file_io: FileIO = None) -> _NodeT:
|
| 59 |
+
"""
|
| 60 |
+
If you want to parse a Python file you want to start here, most likely.
|
| 61 |
+
|
| 62 |
+
If you need finer grained control over the parsed instance, there will be
|
| 63 |
+
other ways to access it.
|
| 64 |
+
|
| 65 |
+
:param str code: A unicode or bytes string. When it's not possible to
|
| 66 |
+
decode bytes to a string, returns a
|
| 67 |
+
:py:class:`UnicodeDecodeError`.
|
| 68 |
+
:param bool error_recovery: If enabled, any code will be returned. If
|
| 69 |
+
it is invalid, it will be returned as an error node. If disabled,
|
| 70 |
+
you will get a ParseError when encountering syntax errors in your
|
| 71 |
+
code.
|
| 72 |
+
:param str start_symbol: The grammar rule (nonterminal) that you want
|
| 73 |
+
to parse. Only allowed to be used when error_recovery is False.
|
| 74 |
+
:param str path: The path to the file you want to open. Only needed for caching.
|
| 75 |
+
:param bool cache: Keeps a copy of the parser tree in RAM and on disk
|
| 76 |
+
if a path is given. Returns the cached trees if the corresponding
|
| 77 |
+
files on disk have not changed. Note that this stores pickle files
|
| 78 |
+
on your file system (e.g. for Linux in ``~/.cache/parso/``).
|
| 79 |
+
:param bool diff_cache: Diffs the cached python module against the new
|
| 80 |
+
code and tries to parse only the parts that have changed. Returns
|
| 81 |
+
the same (changed) module that is found in cache. Using this option
|
| 82 |
+
requires you to not do anything anymore with the cached modules
|
| 83 |
+
under that path, because the contents of it might change. This
|
| 84 |
+
option is still somewhat experimental. If you want stability,
|
| 85 |
+
please don't use it.
|
| 86 |
+
:param bool cache_path: If given saves the parso cache in this
|
| 87 |
+
directory. If not given, defaults to the default cache places on
|
| 88 |
+
each platform.
|
| 89 |
+
|
| 90 |
+
:return: A subclass of :py:class:`parso.tree.NodeOrLeaf`. Typically a
|
| 91 |
+
:py:class:`parso.python.tree.Module`.
|
| 92 |
+
"""
|
| 93 |
+
if code is None and path is None and file_io is None:
|
| 94 |
+
raise TypeError("Please provide either code or a path.")
|
| 95 |
+
|
| 96 |
+
if isinstance(path, str):
|
| 97 |
+
path = Path(path)
|
| 98 |
+
if isinstance(cache_path, str):
|
| 99 |
+
cache_path = Path(cache_path)
|
| 100 |
+
|
| 101 |
+
if start_symbol is None:
|
| 102 |
+
start_symbol = self._start_nonterminal
|
| 103 |
+
|
| 104 |
+
if error_recovery and start_symbol != 'file_input':
|
| 105 |
+
raise NotImplementedError("This is currently not implemented.")
|
| 106 |
+
|
| 107 |
+
if file_io is None:
|
| 108 |
+
if code is None:
|
| 109 |
+
file_io = FileIO(path) # type: ignore[arg-type]
|
| 110 |
+
else:
|
| 111 |
+
file_io = KnownContentFileIO(path, code)
|
| 112 |
+
|
| 113 |
+
if cache and file_io.path is not None:
|
| 114 |
+
module_node = load_module(self._hashed, file_io, cache_path=cache_path)
|
| 115 |
+
if module_node is not None:
|
| 116 |
+
return module_node # type: ignore[no-any-return]
|
| 117 |
+
|
| 118 |
+
if code is None:
|
| 119 |
+
code = file_io.read()
|
| 120 |
+
code = python_bytes_to_unicode(code)
|
| 121 |
+
|
| 122 |
+
lines = split_lines(code, keepends=True)
|
| 123 |
+
if diff_cache:
|
| 124 |
+
if self._diff_parser is None:
|
| 125 |
+
raise TypeError("You have to define a diff parser to be able "
|
| 126 |
+
"to use this option.")
|
| 127 |
+
try:
|
| 128 |
+
module_cache_item = parser_cache[self._hashed][file_io.path]
|
| 129 |
+
except KeyError:
|
| 130 |
+
pass
|
| 131 |
+
else:
|
| 132 |
+
module_node = module_cache_item.node
|
| 133 |
+
old_lines = module_cache_item.lines
|
| 134 |
+
if old_lines == lines:
|
| 135 |
+
return module_node # type: ignore[no-any-return]
|
| 136 |
+
|
| 137 |
+
new_node = self._diff_parser(
|
| 138 |
+
self._pgen_grammar, self._tokenizer, module_node
|
| 139 |
+
).update(
|
| 140 |
+
old_lines=old_lines,
|
| 141 |
+
new_lines=lines
|
| 142 |
+
)
|
| 143 |
+
try_to_save_module(self._hashed, file_io, new_node, lines,
|
| 144 |
+
# Never pickle in pypy, it's slow as hell.
|
| 145 |
+
pickling=cache and not is_pypy,
|
| 146 |
+
cache_path=cache_path)
|
| 147 |
+
return new_node # type: ignore[no-any-return]
|
| 148 |
+
|
| 149 |
+
tokens = self._tokenizer(lines)
|
| 150 |
+
|
| 151 |
+
p = self._parser(
|
| 152 |
+
self._pgen_grammar,
|
| 153 |
+
error_recovery=error_recovery,
|
| 154 |
+
start_nonterminal=start_symbol
|
| 155 |
+
)
|
| 156 |
+
root_node = p.parse(tokens=tokens)
|
| 157 |
+
|
| 158 |
+
if cache or diff_cache:
|
| 159 |
+
try_to_save_module(self._hashed, file_io, root_node, lines,
|
| 160 |
+
# Never pickle in pypy, it's slow as hell.
|
| 161 |
+
pickling=cache and not is_pypy,
|
| 162 |
+
cache_path=cache_path)
|
| 163 |
+
return root_node # type: ignore[no-any-return]
|
| 164 |
+
|
| 165 |
+
def _get_token_namespace(self):
|
| 166 |
+
ns = self._token_namespace
|
| 167 |
+
if ns is None:
|
| 168 |
+
raise ValueError("The token namespace should be set.")
|
| 169 |
+
return ns
|
| 170 |
+
|
| 171 |
+
def iter_errors(self, node):
|
| 172 |
+
"""
|
| 173 |
+
Given a :py:class:`parso.tree.NodeOrLeaf` returns a generator of
|
| 174 |
+
:py:class:`parso.normalizer.Issue` objects. For Python this is
|
| 175 |
+
a list of syntax/indentation errors.
|
| 176 |
+
"""
|
| 177 |
+
if self._error_normalizer_config is None:
|
| 178 |
+
raise ValueError("No error normalizer specified for this grammar.")
|
| 179 |
+
|
| 180 |
+
return self._get_normalizer_issues(node, self._error_normalizer_config)
|
| 181 |
+
|
| 182 |
+
def refactor(self, base_node, node_to_str_map):
|
| 183 |
+
return RefactoringNormalizer(node_to_str_map).walk(base_node)
|
| 184 |
+
|
| 185 |
+
def _get_normalizer(self, normalizer_config):
|
| 186 |
+
if normalizer_config is None:
|
| 187 |
+
normalizer_config = self._default_normalizer_config
|
| 188 |
+
if normalizer_config is None:
|
| 189 |
+
raise ValueError("You need to specify a normalizer, because "
|
| 190 |
+
"there's no default normalizer for this tree.")
|
| 191 |
+
return normalizer_config.create_normalizer(self)
|
| 192 |
+
|
| 193 |
+
def _normalize(self, node, normalizer_config=None):
|
| 194 |
+
"""
|
| 195 |
+
TODO this is not public, yet.
|
| 196 |
+
The returned code will be normalized, e.g. PEP8 for Python.
|
| 197 |
+
"""
|
| 198 |
+
normalizer = self._get_normalizer(normalizer_config)
|
| 199 |
+
return normalizer.walk(node)
|
| 200 |
+
|
| 201 |
+
def _get_normalizer_issues(self, node, normalizer_config=None):
|
| 202 |
+
normalizer = self._get_normalizer(normalizer_config)
|
| 203 |
+
normalizer.walk(node)
|
| 204 |
+
return normalizer.issues
|
| 205 |
+
|
| 206 |
+
def __repr__(self):
|
| 207 |
+
nonterminals = self._pgen_grammar.nonterminal_to_dfas.keys()
|
| 208 |
+
txt = ' '.join(list(nonterminals)[:3]) + ' ...'
|
| 209 |
+
return '<%s:%s>' % (self.__class__.__name__, txt)
|
| 210 |
+
|
| 211 |
+
|
| 212 |
+
class PythonGrammar(Grammar):
|
| 213 |
+
_error_normalizer_config = ErrorFinderConfig()
|
| 214 |
+
_token_namespace = PythonTokenTypes
|
| 215 |
+
_start_nonterminal = 'file_input'
|
| 216 |
+
|
| 217 |
+
def __init__(self, version_info: PythonVersionInfo, bnf_text: str):
|
| 218 |
+
super().__init__(
|
| 219 |
+
bnf_text,
|
| 220 |
+
tokenizer=self._tokenize_lines,
|
| 221 |
+
parser=PythonParser,
|
| 222 |
+
diff_parser=DiffParser
|
| 223 |
+
)
|
| 224 |
+
self.version_info = version_info
|
| 225 |
+
|
| 226 |
+
def _tokenize_lines(self, lines, **kwargs):
|
| 227 |
+
return tokenize_lines(lines, version_info=self.version_info, **kwargs)
|
| 228 |
+
|
| 229 |
+
def _tokenize(self, code):
|
| 230 |
+
# Used by Jedi.
|
| 231 |
+
return tokenize(code, version_info=self.version_info)
|
| 232 |
+
|
| 233 |
+
|
| 234 |
+
def load_grammar(*, version: str = None, path: str = None):
|
| 235 |
+
"""
|
| 236 |
+
Loads a :py:class:`parso.Grammar`. The default version is the current Python
|
| 237 |
+
version.
|
| 238 |
+
|
| 239 |
+
:param str version: A python version string, e.g. ``version='3.8'``.
|
| 240 |
+
:param str path: A path to a grammar file
|
| 241 |
+
"""
|
| 242 |
+
version_info = parse_version_string(version)
|
| 243 |
+
|
| 244 |
+
file = path or os.path.join(
|
| 245 |
+
'python',
|
| 246 |
+
'grammar%s%s.txt' % (version_info.major, version_info.minor)
|
| 247 |
+
)
|
| 248 |
+
|
| 249 |
+
global _loaded_grammars
|
| 250 |
+
path = os.path.join(os.path.dirname(__file__), file)
|
| 251 |
+
try:
|
| 252 |
+
return _loaded_grammars[path]
|
| 253 |
+
except KeyError:
|
| 254 |
+
try:
|
| 255 |
+
with open(path) as f:
|
| 256 |
+
bnf_text = f.read()
|
| 257 |
+
|
| 258 |
+
grammar = PythonGrammar(version_info, bnf_text)
|
| 259 |
+
return _loaded_grammars.setdefault(path, grammar)
|
| 260 |
+
except FileNotFoundError:
|
| 261 |
+
message = "Python version %s.%s is currently not supported." % (
|
| 262 |
+
version_info.major, version_info.minor
|
| 263 |
+
)
|
| 264 |
+
raise NotImplementedError(message)
|
evalkit_tf437/lib/python3.10/site-packages/parso/normalizer.py
ADDED
|
@@ -0,0 +1,198 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from contextlib import contextmanager
|
| 2 |
+
from typing import Dict, List
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
class _NormalizerMeta(type):
|
| 6 |
+
def __new__(cls, name, bases, dct):
|
| 7 |
+
new_cls = type.__new__(cls, name, bases, dct)
|
| 8 |
+
new_cls.rule_value_classes = {}
|
| 9 |
+
new_cls.rule_type_classes = {}
|
| 10 |
+
return new_cls
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class Normalizer(metaclass=_NormalizerMeta):
|
| 14 |
+
_rule_type_instances: Dict[str, List[type]] = {}
|
| 15 |
+
_rule_value_instances: Dict[str, List[type]] = {}
|
| 16 |
+
|
| 17 |
+
def __init__(self, grammar, config):
|
| 18 |
+
self.grammar = grammar
|
| 19 |
+
self._config = config
|
| 20 |
+
self.issues = []
|
| 21 |
+
|
| 22 |
+
self._rule_type_instances = self._instantiate_rules('rule_type_classes')
|
| 23 |
+
self._rule_value_instances = self._instantiate_rules('rule_value_classes')
|
| 24 |
+
|
| 25 |
+
def _instantiate_rules(self, attr):
|
| 26 |
+
dct = {}
|
| 27 |
+
for base in type(self).mro():
|
| 28 |
+
rules_map = getattr(base, attr, {})
|
| 29 |
+
for type_, rule_classes in rules_map.items():
|
| 30 |
+
new = [rule_cls(self) for rule_cls in rule_classes]
|
| 31 |
+
dct.setdefault(type_, []).extend(new)
|
| 32 |
+
return dct
|
| 33 |
+
|
| 34 |
+
def walk(self, node):
|
| 35 |
+
self.initialize(node)
|
| 36 |
+
value = self.visit(node)
|
| 37 |
+
self.finalize()
|
| 38 |
+
return value
|
| 39 |
+
|
| 40 |
+
def visit(self, node):
|
| 41 |
+
try:
|
| 42 |
+
children = node.children
|
| 43 |
+
except AttributeError:
|
| 44 |
+
return self.visit_leaf(node)
|
| 45 |
+
else:
|
| 46 |
+
with self.visit_node(node):
|
| 47 |
+
return ''.join(self.visit(child) for child in children)
|
| 48 |
+
|
| 49 |
+
@contextmanager
|
| 50 |
+
def visit_node(self, node):
|
| 51 |
+
self._check_type_rules(node)
|
| 52 |
+
yield
|
| 53 |
+
|
| 54 |
+
def _check_type_rules(self, node):
|
| 55 |
+
for rule in self._rule_type_instances.get(node.type, []):
|
| 56 |
+
rule.feed_node(node)
|
| 57 |
+
|
| 58 |
+
def visit_leaf(self, leaf):
|
| 59 |
+
self._check_type_rules(leaf)
|
| 60 |
+
|
| 61 |
+
for rule in self._rule_value_instances.get(leaf.value, []):
|
| 62 |
+
rule.feed_node(leaf)
|
| 63 |
+
|
| 64 |
+
return leaf.prefix + leaf.value
|
| 65 |
+
|
| 66 |
+
def initialize(self, node):
|
| 67 |
+
pass
|
| 68 |
+
|
| 69 |
+
def finalize(self):
|
| 70 |
+
pass
|
| 71 |
+
|
| 72 |
+
def add_issue(self, node, code, message):
|
| 73 |
+
issue = Issue(node, code, message)
|
| 74 |
+
if issue not in self.issues:
|
| 75 |
+
self.issues.append(issue)
|
| 76 |
+
return True
|
| 77 |
+
|
| 78 |
+
@classmethod
|
| 79 |
+
def register_rule(cls, *, value=None, values=(), type=None, types=()):
|
| 80 |
+
"""
|
| 81 |
+
Use it as a class decorator::
|
| 82 |
+
|
| 83 |
+
normalizer = Normalizer('grammar', 'config')
|
| 84 |
+
@normalizer.register_rule(value='foo')
|
| 85 |
+
class MyRule(Rule):
|
| 86 |
+
error_code = 42
|
| 87 |
+
"""
|
| 88 |
+
values = list(values)
|
| 89 |
+
types = list(types)
|
| 90 |
+
if value is not None:
|
| 91 |
+
values.append(value)
|
| 92 |
+
if type is not None:
|
| 93 |
+
types.append(type)
|
| 94 |
+
|
| 95 |
+
if not values and not types:
|
| 96 |
+
raise ValueError("You must register at least something.")
|
| 97 |
+
|
| 98 |
+
def decorator(rule_cls):
|
| 99 |
+
for v in values:
|
| 100 |
+
cls.rule_value_classes.setdefault(v, []).append(rule_cls)
|
| 101 |
+
for t in types:
|
| 102 |
+
cls.rule_type_classes.setdefault(t, []).append(rule_cls)
|
| 103 |
+
return rule_cls
|
| 104 |
+
|
| 105 |
+
return decorator
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
class NormalizerConfig:
|
| 109 |
+
normalizer_class = Normalizer
|
| 110 |
+
|
| 111 |
+
def create_normalizer(self, grammar):
|
| 112 |
+
if self.normalizer_class is None:
|
| 113 |
+
return None
|
| 114 |
+
|
| 115 |
+
return self.normalizer_class(grammar, self)
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
class Issue:
|
| 119 |
+
def __init__(self, node, code, message):
|
| 120 |
+
self.code = code
|
| 121 |
+
"""
|
| 122 |
+
An integer code that stands for the type of error.
|
| 123 |
+
"""
|
| 124 |
+
self.message = message
|
| 125 |
+
"""
|
| 126 |
+
A message (string) for the issue.
|
| 127 |
+
"""
|
| 128 |
+
self.start_pos = node.start_pos
|
| 129 |
+
"""
|
| 130 |
+
The start position position of the error as a tuple (line, column). As
|
| 131 |
+
always in |parso| the first line is 1 and the first column 0.
|
| 132 |
+
"""
|
| 133 |
+
self.end_pos = node.end_pos
|
| 134 |
+
|
| 135 |
+
def __eq__(self, other):
|
| 136 |
+
return self.start_pos == other.start_pos and self.code == other.code
|
| 137 |
+
|
| 138 |
+
def __ne__(self, other):
|
| 139 |
+
return not self.__eq__(other)
|
| 140 |
+
|
| 141 |
+
def __hash__(self):
|
| 142 |
+
return hash((self.code, self.start_pos))
|
| 143 |
+
|
| 144 |
+
def __repr__(self):
|
| 145 |
+
return '<%s: %s>' % (self.__class__.__name__, self.code)
|
| 146 |
+
|
| 147 |
+
|
| 148 |
+
class Rule:
|
| 149 |
+
code: int
|
| 150 |
+
message: str
|
| 151 |
+
|
| 152 |
+
def __init__(self, normalizer):
|
| 153 |
+
self._normalizer = normalizer
|
| 154 |
+
|
| 155 |
+
def is_issue(self, node):
|
| 156 |
+
raise NotImplementedError()
|
| 157 |
+
|
| 158 |
+
def get_node(self, node):
|
| 159 |
+
return node
|
| 160 |
+
|
| 161 |
+
def _get_message(self, message, node):
|
| 162 |
+
if message is None:
|
| 163 |
+
message = self.message
|
| 164 |
+
if message is None:
|
| 165 |
+
raise ValueError("The message on the class is not set.")
|
| 166 |
+
return message
|
| 167 |
+
|
| 168 |
+
def add_issue(self, node, code=None, message=None):
|
| 169 |
+
if code is None:
|
| 170 |
+
code = self.code
|
| 171 |
+
if code is None:
|
| 172 |
+
raise ValueError("The error code on the class is not set.")
|
| 173 |
+
|
| 174 |
+
message = self._get_message(message, node)
|
| 175 |
+
|
| 176 |
+
self._normalizer.add_issue(node, code, message)
|
| 177 |
+
|
| 178 |
+
def feed_node(self, node):
|
| 179 |
+
if self.is_issue(node):
|
| 180 |
+
issue_node = self.get_node(node)
|
| 181 |
+
self.add_issue(issue_node)
|
| 182 |
+
|
| 183 |
+
|
| 184 |
+
class RefactoringNormalizer(Normalizer):
|
| 185 |
+
def __init__(self, node_to_str_map):
|
| 186 |
+
self._node_to_str_map = node_to_str_map
|
| 187 |
+
|
| 188 |
+
def visit(self, node):
|
| 189 |
+
try:
|
| 190 |
+
return self._node_to_str_map[node]
|
| 191 |
+
except KeyError:
|
| 192 |
+
return super().visit(node)
|
| 193 |
+
|
| 194 |
+
def visit_leaf(self, leaf):
|
| 195 |
+
try:
|
| 196 |
+
return self._node_to_str_map[leaf]
|
| 197 |
+
except KeyError:
|
| 198 |
+
return super().visit_leaf(leaf)
|
evalkit_tf437/lib/python3.10/site-packages/parso/parser.py
ADDED
|
@@ -0,0 +1,210 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
|
| 2 |
+
# Licensed to PSF under a Contributor Agreement.
|
| 3 |
+
|
| 4 |
+
# Modifications:
|
| 5 |
+
# Copyright David Halter and Contributors
|
| 6 |
+
# Modifications are dual-licensed: MIT and PSF.
|
| 7 |
+
# 99% of the code is different from pgen2, now.
|
| 8 |
+
|
| 9 |
+
"""
|
| 10 |
+
The ``Parser`` tries to convert the available Python code in an easy to read
|
| 11 |
+
format, something like an abstract syntax tree. The classes who represent this
|
| 12 |
+
tree, are sitting in the :mod:`parso.tree` module.
|
| 13 |
+
|
| 14 |
+
The Python module ``tokenize`` is a very important part in the ``Parser``,
|
| 15 |
+
because it splits the code into different words (tokens). Sometimes it looks a
|
| 16 |
+
bit messy. Sorry for that! You might ask now: "Why didn't you use the ``ast``
|
| 17 |
+
module for this? Well, ``ast`` does a very good job understanding proper Python
|
| 18 |
+
code, but fails to work as soon as there's a single line of broken code.
|
| 19 |
+
|
| 20 |
+
There's one important optimization that needs to be known: Statements are not
|
| 21 |
+
being parsed completely. ``Statement`` is just a representation of the tokens
|
| 22 |
+
within the statement. This lowers memory usage and cpu time and reduces the
|
| 23 |
+
complexity of the ``Parser`` (there's another parser sitting inside
|
| 24 |
+
``Statement``, which produces ``Array`` and ``Call``).
|
| 25 |
+
"""
|
| 26 |
+
from typing import Dict, Type
|
| 27 |
+
|
| 28 |
+
from parso import tree
|
| 29 |
+
from parso.pgen2.generator import ReservedString
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
class ParserSyntaxError(Exception):
|
| 33 |
+
"""
|
| 34 |
+
Contains error information about the parser tree.
|
| 35 |
+
|
| 36 |
+
May be raised as an exception.
|
| 37 |
+
"""
|
| 38 |
+
def __init__(self, message, error_leaf):
|
| 39 |
+
self.message = message
|
| 40 |
+
self.error_leaf = error_leaf
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
class InternalParseError(Exception):
|
| 44 |
+
"""
|
| 45 |
+
Exception to signal the parser is stuck and error recovery didn't help.
|
| 46 |
+
Basically this shouldn't happen. It's a sign that something is really
|
| 47 |
+
wrong.
|
| 48 |
+
"""
|
| 49 |
+
|
| 50 |
+
def __init__(self, msg, type_, value, start_pos):
|
| 51 |
+
Exception.__init__(self, "%s: type=%r, value=%r, start_pos=%r" %
|
| 52 |
+
(msg, type_.name, value, start_pos))
|
| 53 |
+
self.msg = msg
|
| 54 |
+
self.type = type
|
| 55 |
+
self.value = value
|
| 56 |
+
self.start_pos = start_pos
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
class Stack(list):
|
| 60 |
+
def _allowed_transition_names_and_token_types(self):
|
| 61 |
+
def iterate():
|
| 62 |
+
# An API just for Jedi.
|
| 63 |
+
for stack_node in reversed(self):
|
| 64 |
+
for transition in stack_node.dfa.transitions:
|
| 65 |
+
if isinstance(transition, ReservedString):
|
| 66 |
+
yield transition.value
|
| 67 |
+
else:
|
| 68 |
+
yield transition # A token type
|
| 69 |
+
|
| 70 |
+
if not stack_node.dfa.is_final:
|
| 71 |
+
break
|
| 72 |
+
|
| 73 |
+
return list(iterate())
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
class StackNode:
|
| 77 |
+
def __init__(self, dfa):
|
| 78 |
+
self.dfa = dfa
|
| 79 |
+
self.nodes = []
|
| 80 |
+
|
| 81 |
+
@property
|
| 82 |
+
def nonterminal(self):
|
| 83 |
+
return self.dfa.from_rule
|
| 84 |
+
|
| 85 |
+
def __repr__(self):
|
| 86 |
+
return '%s(%s, %s)' % (self.__class__.__name__, self.dfa, self.nodes)
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
def _token_to_transition(grammar, type_, value):
|
| 90 |
+
# Map from token to label
|
| 91 |
+
if type_.value.contains_syntax:
|
| 92 |
+
# Check for reserved words (keywords)
|
| 93 |
+
try:
|
| 94 |
+
return grammar.reserved_syntax_strings[value]
|
| 95 |
+
except KeyError:
|
| 96 |
+
pass
|
| 97 |
+
|
| 98 |
+
return type_
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
class BaseParser:
|
| 102 |
+
"""Parser engine.
|
| 103 |
+
|
| 104 |
+
A Parser instance contains state pertaining to the current token
|
| 105 |
+
sequence, and should not be used concurrently by different threads
|
| 106 |
+
to parse separate token sequences.
|
| 107 |
+
|
| 108 |
+
See python/tokenize.py for how to get input tokens by a string.
|
| 109 |
+
|
| 110 |
+
When a syntax error occurs, error_recovery() is called.
|
| 111 |
+
"""
|
| 112 |
+
|
| 113 |
+
node_map: Dict[str, Type[tree.BaseNode]] = {}
|
| 114 |
+
default_node = tree.Node
|
| 115 |
+
|
| 116 |
+
leaf_map: Dict[str, Type[tree.Leaf]] = {}
|
| 117 |
+
default_leaf = tree.Leaf
|
| 118 |
+
|
| 119 |
+
def __init__(self, pgen_grammar, start_nonterminal='file_input', error_recovery=False):
|
| 120 |
+
self._pgen_grammar = pgen_grammar
|
| 121 |
+
self._start_nonterminal = start_nonterminal
|
| 122 |
+
self._error_recovery = error_recovery
|
| 123 |
+
|
| 124 |
+
def parse(self, tokens):
|
| 125 |
+
first_dfa = self._pgen_grammar.nonterminal_to_dfas[self._start_nonterminal][0]
|
| 126 |
+
self.stack = Stack([StackNode(first_dfa)])
|
| 127 |
+
|
| 128 |
+
for token in tokens:
|
| 129 |
+
self._add_token(token)
|
| 130 |
+
|
| 131 |
+
while True:
|
| 132 |
+
tos = self.stack[-1]
|
| 133 |
+
if not tos.dfa.is_final:
|
| 134 |
+
# We never broke out -- EOF is too soon -- Unfinished statement.
|
| 135 |
+
# However, the error recovery might have added the token again, if
|
| 136 |
+
# the stack is empty, we're fine.
|
| 137 |
+
raise InternalParseError(
|
| 138 |
+
"incomplete input", token.type, token.string, token.start_pos
|
| 139 |
+
)
|
| 140 |
+
|
| 141 |
+
if len(self.stack) > 1:
|
| 142 |
+
self._pop()
|
| 143 |
+
else:
|
| 144 |
+
return self.convert_node(tos.nonterminal, tos.nodes)
|
| 145 |
+
|
| 146 |
+
def error_recovery(self, token):
|
| 147 |
+
if self._error_recovery:
|
| 148 |
+
raise NotImplementedError("Error Recovery is not implemented")
|
| 149 |
+
else:
|
| 150 |
+
type_, value, start_pos, prefix = token
|
| 151 |
+
error_leaf = tree.ErrorLeaf(type_, value, start_pos, prefix)
|
| 152 |
+
raise ParserSyntaxError('SyntaxError: invalid syntax', error_leaf)
|
| 153 |
+
|
| 154 |
+
def convert_node(self, nonterminal, children):
|
| 155 |
+
try:
|
| 156 |
+
node = self.node_map[nonterminal](children)
|
| 157 |
+
except KeyError:
|
| 158 |
+
node = self.default_node(nonterminal, children)
|
| 159 |
+
return node
|
| 160 |
+
|
| 161 |
+
def convert_leaf(self, type_, value, prefix, start_pos):
|
| 162 |
+
try:
|
| 163 |
+
return self.leaf_map[type_](value, start_pos, prefix)
|
| 164 |
+
except KeyError:
|
| 165 |
+
return self.default_leaf(value, start_pos, prefix)
|
| 166 |
+
|
| 167 |
+
def _add_token(self, token):
|
| 168 |
+
"""
|
| 169 |
+
This is the only core function for parsing. Here happens basically
|
| 170 |
+
everything. Everything is well prepared by the parser generator and we
|
| 171 |
+
only apply the necessary steps here.
|
| 172 |
+
"""
|
| 173 |
+
grammar = self._pgen_grammar
|
| 174 |
+
stack = self.stack
|
| 175 |
+
type_, value, start_pos, prefix = token
|
| 176 |
+
transition = _token_to_transition(grammar, type_, value)
|
| 177 |
+
|
| 178 |
+
while True:
|
| 179 |
+
try:
|
| 180 |
+
plan = stack[-1].dfa.transitions[transition]
|
| 181 |
+
break
|
| 182 |
+
except KeyError:
|
| 183 |
+
if stack[-1].dfa.is_final:
|
| 184 |
+
self._pop()
|
| 185 |
+
else:
|
| 186 |
+
self.error_recovery(token)
|
| 187 |
+
return
|
| 188 |
+
except IndexError:
|
| 189 |
+
raise InternalParseError("too much input", type_, value, start_pos)
|
| 190 |
+
|
| 191 |
+
stack[-1].dfa = plan.next_dfa
|
| 192 |
+
|
| 193 |
+
for push in plan.dfa_pushes:
|
| 194 |
+
stack.append(StackNode(push))
|
| 195 |
+
|
| 196 |
+
leaf = self.convert_leaf(type_, value, prefix, start_pos)
|
| 197 |
+
stack[-1].nodes.append(leaf)
|
| 198 |
+
|
| 199 |
+
def _pop(self):
|
| 200 |
+
tos = self.stack.pop()
|
| 201 |
+
# If there's exactly one child, return that child instead of
|
| 202 |
+
# creating a new node. We still create expr_stmt and
|
| 203 |
+
# file_input though, because a lot of Jedi depends on its
|
| 204 |
+
# logic.
|
| 205 |
+
if len(tos.nodes) == 1:
|
| 206 |
+
new_node = tos.nodes[0]
|
| 207 |
+
else:
|
| 208 |
+
new_node = self.convert_node(tos.dfa.from_rule, tos.nodes)
|
| 209 |
+
|
| 210 |
+
self.stack[-1].nodes.append(new_node)
|
evalkit_tf437/lib/python3.10/site-packages/parso/py.typed
ADDED
|
File without changes
|
evalkit_tf437/lib/python3.10/site-packages/parso/python/__init__.py
ADDED
|
File without changes
|
evalkit_tf437/lib/python3.10/site-packages/parso/python/__pycache__/diff.cpython-310.pyc
ADDED
|
Binary file (20.3 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/parso/python/__pycache__/parser.cpython-310.pyc
ADDED
|
Binary file (5.34 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/parso/python/__pycache__/prefix.cpython-310.pyc
ADDED
|
Binary file (2.65 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/parso/python/__pycache__/token.cpython-310.pyc
ADDED
|
Binary file (1.3 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/parso/python/__pycache__/tokenize.cpython-310.pyc
ADDED
|
Binary file (14.8 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/parso/python/__pycache__/tree.cpython-310.pyc
ADDED
|
Binary file (39.5 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/parso/python/diff.py
ADDED
|
@@ -0,0 +1,886 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
The diff parser is trying to be a faster version of the normal parser by trying
|
| 3 |
+
to reuse the nodes of a previous pass over the same file. This is also called
|
| 4 |
+
incremental parsing in parser literature. The difference is mostly that with
|
| 5 |
+
incremental parsing you get a range that needs to be reparsed. Here we
|
| 6 |
+
calculate that range ourselves by using difflib. After that it's essentially
|
| 7 |
+
incremental parsing.
|
| 8 |
+
|
| 9 |
+
The biggest issue of this approach is that we reuse nodes in a mutable way. The
|
| 10 |
+
intial design and idea is quite problematic for this parser, but it is also
|
| 11 |
+
pretty fast. Measurements showed that just copying nodes in Python is simply
|
| 12 |
+
quite a bit slower (especially for big files >3 kLOC). Therefore we did not
|
| 13 |
+
want to get rid of the mutable nodes, since this is usually not an issue.
|
| 14 |
+
|
| 15 |
+
This is by far the hardest software I ever wrote, exactly because the initial
|
| 16 |
+
design is crappy. When you have to account for a lot of mutable state, it
|
| 17 |
+
creates a ton of issues that you would otherwise not have. This file took
|
| 18 |
+
probably 3-6 months to write, which is insane for a parser.
|
| 19 |
+
|
| 20 |
+
There is a fuzzer in that helps test this whole thing. Please use it if you
|
| 21 |
+
make changes here. If you run the fuzzer like::
|
| 22 |
+
|
| 23 |
+
test/fuzz_diff_parser.py random -n 100000
|
| 24 |
+
|
| 25 |
+
you can be pretty sure that everything is still fine. I sometimes run the
|
| 26 |
+
fuzzer up to 24h to make sure everything is still ok.
|
| 27 |
+
"""
|
| 28 |
+
import re
|
| 29 |
+
import difflib
|
| 30 |
+
from collections import namedtuple
|
| 31 |
+
import logging
|
| 32 |
+
|
| 33 |
+
from parso.utils import split_lines
|
| 34 |
+
from parso.python.parser import Parser
|
| 35 |
+
from parso.python.tree import EndMarker
|
| 36 |
+
from parso.python.tokenize import PythonToken, BOM_UTF8_STRING
|
| 37 |
+
from parso.python.token import PythonTokenTypes
|
| 38 |
+
|
| 39 |
+
LOG = logging.getLogger(__name__)
|
| 40 |
+
DEBUG_DIFF_PARSER = False
|
| 41 |
+
|
| 42 |
+
_INDENTATION_TOKENS = 'INDENT', 'ERROR_DEDENT', 'DEDENT'
|
| 43 |
+
|
| 44 |
+
NEWLINE = PythonTokenTypes.NEWLINE
|
| 45 |
+
DEDENT = PythonTokenTypes.DEDENT
|
| 46 |
+
NAME = PythonTokenTypes.NAME
|
| 47 |
+
ERROR_DEDENT = PythonTokenTypes.ERROR_DEDENT
|
| 48 |
+
ENDMARKER = PythonTokenTypes.ENDMARKER
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
def _is_indentation_error_leaf(node):
|
| 52 |
+
return node.type == 'error_leaf' and node.token_type in _INDENTATION_TOKENS
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
def _get_previous_leaf_if_indentation(leaf):
|
| 56 |
+
while leaf and _is_indentation_error_leaf(leaf):
|
| 57 |
+
leaf = leaf.get_previous_leaf()
|
| 58 |
+
return leaf
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
def _get_next_leaf_if_indentation(leaf):
|
| 62 |
+
while leaf and _is_indentation_error_leaf(leaf):
|
| 63 |
+
leaf = leaf.get_next_leaf()
|
| 64 |
+
return leaf
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
def _get_suite_indentation(tree_node):
|
| 68 |
+
return _get_indentation(tree_node.children[1])
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
def _get_indentation(tree_node):
|
| 72 |
+
return tree_node.start_pos[1]
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
def _assert_valid_graph(node):
|
| 76 |
+
"""
|
| 77 |
+
Checks if the parent/children relationship is correct.
|
| 78 |
+
|
| 79 |
+
This is a check that only runs during debugging/testing.
|
| 80 |
+
"""
|
| 81 |
+
try:
|
| 82 |
+
children = node.children
|
| 83 |
+
except AttributeError:
|
| 84 |
+
# Ignore INDENT is necessary, because indent/dedent tokens don't
|
| 85 |
+
# contain value/prefix and are just around, because of the tokenizer.
|
| 86 |
+
if node.type == 'error_leaf' and node.token_type in _INDENTATION_TOKENS:
|
| 87 |
+
assert not node.value
|
| 88 |
+
assert not node.prefix
|
| 89 |
+
return
|
| 90 |
+
|
| 91 |
+
# Calculate the content between two start positions.
|
| 92 |
+
previous_leaf = _get_previous_leaf_if_indentation(node.get_previous_leaf())
|
| 93 |
+
if previous_leaf is None:
|
| 94 |
+
content = node.prefix
|
| 95 |
+
previous_start_pos = 1, 0
|
| 96 |
+
else:
|
| 97 |
+
assert previous_leaf.end_pos <= node.start_pos, \
|
| 98 |
+
(previous_leaf, node)
|
| 99 |
+
|
| 100 |
+
content = previous_leaf.value + node.prefix
|
| 101 |
+
previous_start_pos = previous_leaf.start_pos
|
| 102 |
+
|
| 103 |
+
if '\n' in content or '\r' in content:
|
| 104 |
+
splitted = split_lines(content)
|
| 105 |
+
line = previous_start_pos[0] + len(splitted) - 1
|
| 106 |
+
actual = line, len(splitted[-1])
|
| 107 |
+
else:
|
| 108 |
+
actual = previous_start_pos[0], previous_start_pos[1] + len(content)
|
| 109 |
+
if content.startswith(BOM_UTF8_STRING) \
|
| 110 |
+
and node.get_start_pos_of_prefix() == (1, 0):
|
| 111 |
+
# Remove the byte order mark
|
| 112 |
+
actual = actual[0], actual[1] - 1
|
| 113 |
+
|
| 114 |
+
assert node.start_pos == actual, (node.start_pos, actual)
|
| 115 |
+
else:
|
| 116 |
+
for child in children:
|
| 117 |
+
assert child.parent == node, (node, child)
|
| 118 |
+
_assert_valid_graph(child)
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
def _assert_nodes_are_equal(node1, node2):
|
| 122 |
+
try:
|
| 123 |
+
children1 = node1.children
|
| 124 |
+
except AttributeError:
|
| 125 |
+
assert not hasattr(node2, 'children'), (node1, node2)
|
| 126 |
+
assert node1.value == node2.value, (node1, node2)
|
| 127 |
+
assert node1.type == node2.type, (node1, node2)
|
| 128 |
+
assert node1.prefix == node2.prefix, (node1, node2)
|
| 129 |
+
assert node1.start_pos == node2.start_pos, (node1, node2)
|
| 130 |
+
return
|
| 131 |
+
else:
|
| 132 |
+
try:
|
| 133 |
+
children2 = node2.children
|
| 134 |
+
except AttributeError:
|
| 135 |
+
assert False, (node1, node2)
|
| 136 |
+
for n1, n2 in zip(children1, children2):
|
| 137 |
+
_assert_nodes_are_equal(n1, n2)
|
| 138 |
+
assert len(children1) == len(children2), '\n' + repr(children1) + '\n' + repr(children2)
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
def _get_debug_error_message(module, old_lines, new_lines):
|
| 142 |
+
current_lines = split_lines(module.get_code(), keepends=True)
|
| 143 |
+
current_diff = difflib.unified_diff(new_lines, current_lines)
|
| 144 |
+
old_new_diff = difflib.unified_diff(old_lines, new_lines)
|
| 145 |
+
import parso
|
| 146 |
+
return (
|
| 147 |
+
"There's an issue with the diff parser. Please "
|
| 148 |
+
"report (parso v%s) - Old/New:\n%s\nActual Diff (May be empty):\n%s"
|
| 149 |
+
% (parso.__version__, ''.join(old_new_diff), ''.join(current_diff))
|
| 150 |
+
)
|
| 151 |
+
|
| 152 |
+
|
| 153 |
+
def _get_last_line(node_or_leaf):
|
| 154 |
+
last_leaf = node_or_leaf.get_last_leaf()
|
| 155 |
+
if _ends_with_newline(last_leaf):
|
| 156 |
+
return last_leaf.start_pos[0]
|
| 157 |
+
else:
|
| 158 |
+
n = last_leaf.get_next_leaf()
|
| 159 |
+
if n.type == 'endmarker' and '\n' in n.prefix:
|
| 160 |
+
# This is a very special case and has to do with error recovery in
|
| 161 |
+
# Parso. The problem is basically that there's no newline leaf at
|
| 162 |
+
# the end sometimes (it's required in the grammar, but not needed
|
| 163 |
+
# actually before endmarker, CPython just adds a newline to make
|
| 164 |
+
# source code pass the parser, to account for that Parso error
|
| 165 |
+
# recovery allows small_stmt instead of simple_stmt).
|
| 166 |
+
return last_leaf.end_pos[0] + 1
|
| 167 |
+
return last_leaf.end_pos[0]
|
| 168 |
+
|
| 169 |
+
|
| 170 |
+
def _skip_dedent_error_leaves(leaf):
|
| 171 |
+
while leaf is not None and leaf.type == 'error_leaf' and leaf.token_type == 'DEDENT':
|
| 172 |
+
leaf = leaf.get_previous_leaf()
|
| 173 |
+
return leaf
|
| 174 |
+
|
| 175 |
+
|
| 176 |
+
def _ends_with_newline(leaf, suffix=''):
|
| 177 |
+
leaf = _skip_dedent_error_leaves(leaf)
|
| 178 |
+
|
| 179 |
+
if leaf.type == 'error_leaf':
|
| 180 |
+
typ = leaf.token_type.lower()
|
| 181 |
+
else:
|
| 182 |
+
typ = leaf.type
|
| 183 |
+
|
| 184 |
+
return typ == 'newline' or suffix.endswith('\n') or suffix.endswith('\r')
|
| 185 |
+
|
| 186 |
+
|
| 187 |
+
def _flows_finished(pgen_grammar, stack):
|
| 188 |
+
"""
|
| 189 |
+
if, while, for and try might not be finished, because another part might
|
| 190 |
+
still be parsed.
|
| 191 |
+
"""
|
| 192 |
+
for stack_node in stack:
|
| 193 |
+
if stack_node.nonterminal in ('if_stmt', 'while_stmt', 'for_stmt', 'try_stmt'):
|
| 194 |
+
return False
|
| 195 |
+
return True
|
| 196 |
+
|
| 197 |
+
|
| 198 |
+
def _func_or_class_has_suite(node):
|
| 199 |
+
if node.type == 'decorated':
|
| 200 |
+
node = node.children[-1]
|
| 201 |
+
if node.type in ('async_funcdef', 'async_stmt'):
|
| 202 |
+
node = node.children[-1]
|
| 203 |
+
return node.type in ('classdef', 'funcdef') and node.children[-1].type == 'suite'
|
| 204 |
+
|
| 205 |
+
|
| 206 |
+
def _suite_or_file_input_is_valid(pgen_grammar, stack):
|
| 207 |
+
if not _flows_finished(pgen_grammar, stack):
|
| 208 |
+
return False
|
| 209 |
+
|
| 210 |
+
for stack_node in reversed(stack):
|
| 211 |
+
if stack_node.nonterminal == 'decorator':
|
| 212 |
+
# A decorator is only valid with the upcoming function.
|
| 213 |
+
return False
|
| 214 |
+
|
| 215 |
+
if stack_node.nonterminal == 'suite':
|
| 216 |
+
# If only newline is in the suite, the suite is not valid, yet.
|
| 217 |
+
return len(stack_node.nodes) > 1
|
| 218 |
+
# Not reaching a suite means that we're dealing with file_input levels
|
| 219 |
+
# where there's no need for a valid statement in it. It can also be empty.
|
| 220 |
+
return True
|
| 221 |
+
|
| 222 |
+
|
| 223 |
+
def _is_flow_node(node):
|
| 224 |
+
if node.type == 'async_stmt':
|
| 225 |
+
node = node.children[1]
|
| 226 |
+
try:
|
| 227 |
+
value = node.children[0].value
|
| 228 |
+
except AttributeError:
|
| 229 |
+
return False
|
| 230 |
+
return value in ('if', 'for', 'while', 'try', 'with')
|
| 231 |
+
|
| 232 |
+
|
| 233 |
+
class _PositionUpdatingFinished(Exception):
|
| 234 |
+
pass
|
| 235 |
+
|
| 236 |
+
|
| 237 |
+
def _update_positions(nodes, line_offset, last_leaf):
|
| 238 |
+
for node in nodes:
|
| 239 |
+
try:
|
| 240 |
+
children = node.children
|
| 241 |
+
except AttributeError:
|
| 242 |
+
# Is a leaf
|
| 243 |
+
node.line += line_offset
|
| 244 |
+
if node is last_leaf:
|
| 245 |
+
raise _PositionUpdatingFinished
|
| 246 |
+
else:
|
| 247 |
+
_update_positions(children, line_offset, last_leaf)
|
| 248 |
+
|
| 249 |
+
|
| 250 |
+
class DiffParser:
|
| 251 |
+
"""
|
| 252 |
+
An advanced form of parsing a file faster. Unfortunately comes with huge
|
| 253 |
+
side effects. It changes the given module.
|
| 254 |
+
"""
|
| 255 |
+
def __init__(self, pgen_grammar, tokenizer, module):
|
| 256 |
+
self._pgen_grammar = pgen_grammar
|
| 257 |
+
self._tokenizer = tokenizer
|
| 258 |
+
self._module = module
|
| 259 |
+
|
| 260 |
+
def _reset(self):
|
| 261 |
+
self._copy_count = 0
|
| 262 |
+
self._parser_count = 0
|
| 263 |
+
|
| 264 |
+
self._nodes_tree = _NodesTree(self._module)
|
| 265 |
+
|
| 266 |
+
def update(self, old_lines, new_lines):
|
| 267 |
+
'''
|
| 268 |
+
The algorithm works as follows:
|
| 269 |
+
|
| 270 |
+
Equal:
|
| 271 |
+
- Assure that the start is a newline, otherwise parse until we get
|
| 272 |
+
one.
|
| 273 |
+
- Copy from parsed_until_line + 1 to max(i2 + 1)
|
| 274 |
+
- Make sure that the indentation is correct (e.g. add DEDENT)
|
| 275 |
+
- Add old and change positions
|
| 276 |
+
Insert:
|
| 277 |
+
- Parse from parsed_until_line + 1 to min(j2 + 1), hopefully not
|
| 278 |
+
much more.
|
| 279 |
+
|
| 280 |
+
Returns the new module node.
|
| 281 |
+
'''
|
| 282 |
+
LOG.debug('diff parser start')
|
| 283 |
+
# Reset the used names cache so they get regenerated.
|
| 284 |
+
self._module._used_names = None
|
| 285 |
+
|
| 286 |
+
self._parser_lines_new = new_lines
|
| 287 |
+
|
| 288 |
+
self._reset()
|
| 289 |
+
|
| 290 |
+
line_length = len(new_lines)
|
| 291 |
+
sm = difflib.SequenceMatcher(None, old_lines, self._parser_lines_new)
|
| 292 |
+
opcodes = sm.get_opcodes()
|
| 293 |
+
LOG.debug('line_lengths old: %s; new: %s' % (len(old_lines), line_length))
|
| 294 |
+
|
| 295 |
+
for operation, i1, i2, j1, j2 in opcodes:
|
| 296 |
+
LOG.debug('-> code[%s] old[%s:%s] new[%s:%s]',
|
| 297 |
+
operation, i1 + 1, i2, j1 + 1, j2)
|
| 298 |
+
|
| 299 |
+
if j2 == line_length and new_lines[-1] == '':
|
| 300 |
+
# The empty part after the last newline is not relevant.
|
| 301 |
+
j2 -= 1
|
| 302 |
+
|
| 303 |
+
if operation == 'equal':
|
| 304 |
+
line_offset = j1 - i1
|
| 305 |
+
self._copy_from_old_parser(line_offset, i1 + 1, i2, j2)
|
| 306 |
+
elif operation == 'replace':
|
| 307 |
+
self._parse(until_line=j2)
|
| 308 |
+
elif operation == 'insert':
|
| 309 |
+
self._parse(until_line=j2)
|
| 310 |
+
else:
|
| 311 |
+
assert operation == 'delete'
|
| 312 |
+
|
| 313 |
+
# With this action all change will finally be applied and we have a
|
| 314 |
+
# changed module.
|
| 315 |
+
self._nodes_tree.close()
|
| 316 |
+
|
| 317 |
+
if DEBUG_DIFF_PARSER:
|
| 318 |
+
# If there is reasonable suspicion that the diff parser is not
|
| 319 |
+
# behaving well, this should be enabled.
|
| 320 |
+
try:
|
| 321 |
+
code = ''.join(new_lines)
|
| 322 |
+
assert self._module.get_code() == code
|
| 323 |
+
_assert_valid_graph(self._module)
|
| 324 |
+
without_diff_parser_module = Parser(
|
| 325 |
+
self._pgen_grammar,
|
| 326 |
+
error_recovery=True
|
| 327 |
+
).parse(self._tokenizer(new_lines))
|
| 328 |
+
_assert_nodes_are_equal(self._module, without_diff_parser_module)
|
| 329 |
+
except AssertionError:
|
| 330 |
+
print(_get_debug_error_message(self._module, old_lines, new_lines))
|
| 331 |
+
raise
|
| 332 |
+
|
| 333 |
+
last_pos = self._module.end_pos[0]
|
| 334 |
+
if last_pos != line_length:
|
| 335 |
+
raise Exception(
|
| 336 |
+
('(%s != %s) ' % (last_pos, line_length))
|
| 337 |
+
+ _get_debug_error_message(self._module, old_lines, new_lines)
|
| 338 |
+
)
|
| 339 |
+
LOG.debug('diff parser end')
|
| 340 |
+
return self._module
|
| 341 |
+
|
| 342 |
+
def _enabled_debugging(self, old_lines, lines_new):
|
| 343 |
+
if self._module.get_code() != ''.join(lines_new):
|
| 344 |
+
LOG.warning('parser issue:\n%s\n%s', ''.join(old_lines), ''.join(lines_new))
|
| 345 |
+
|
| 346 |
+
def _copy_from_old_parser(self, line_offset, start_line_old, until_line_old, until_line_new):
|
| 347 |
+
last_until_line = -1
|
| 348 |
+
while until_line_new > self._nodes_tree.parsed_until_line:
|
| 349 |
+
parsed_until_line_old = self._nodes_tree.parsed_until_line - line_offset
|
| 350 |
+
line_stmt = self._get_old_line_stmt(parsed_until_line_old + 1)
|
| 351 |
+
if line_stmt is None:
|
| 352 |
+
# Parse 1 line at least. We don't need more, because we just
|
| 353 |
+
# want to get into a state where the old parser has statements
|
| 354 |
+
# again that can be copied (e.g. not lines within parentheses).
|
| 355 |
+
self._parse(self._nodes_tree.parsed_until_line + 1)
|
| 356 |
+
else:
|
| 357 |
+
p_children = line_stmt.parent.children
|
| 358 |
+
index = p_children.index(line_stmt)
|
| 359 |
+
|
| 360 |
+
if start_line_old == 1 \
|
| 361 |
+
and p_children[0].get_first_leaf().prefix.startswith(BOM_UTF8_STRING):
|
| 362 |
+
# If there's a BOM in the beginning, just reparse. It's too
|
| 363 |
+
# complicated to account for it otherwise.
|
| 364 |
+
copied_nodes = []
|
| 365 |
+
else:
|
| 366 |
+
from_ = self._nodes_tree.parsed_until_line + 1
|
| 367 |
+
copied_nodes = self._nodes_tree.copy_nodes(
|
| 368 |
+
p_children[index:],
|
| 369 |
+
until_line_old,
|
| 370 |
+
line_offset
|
| 371 |
+
)
|
| 372 |
+
# Match all the nodes that are in the wanted range.
|
| 373 |
+
if copied_nodes:
|
| 374 |
+
self._copy_count += 1
|
| 375 |
+
|
| 376 |
+
to = self._nodes_tree.parsed_until_line
|
| 377 |
+
|
| 378 |
+
LOG.debug('copy old[%s:%s] new[%s:%s]',
|
| 379 |
+
copied_nodes[0].start_pos[0],
|
| 380 |
+
copied_nodes[-1].end_pos[0] - 1, from_, to)
|
| 381 |
+
else:
|
| 382 |
+
# We have copied as much as possible (but definitely not too
|
| 383 |
+
# much). Therefore we just parse a bit more.
|
| 384 |
+
self._parse(self._nodes_tree.parsed_until_line + 1)
|
| 385 |
+
# Since there are potential bugs that might loop here endlessly, we
|
| 386 |
+
# just stop here.
|
| 387 |
+
assert last_until_line != self._nodes_tree.parsed_until_line, last_until_line
|
| 388 |
+
last_until_line = self._nodes_tree.parsed_until_line
|
| 389 |
+
|
| 390 |
+
def _get_old_line_stmt(self, old_line):
|
| 391 |
+
leaf = self._module.get_leaf_for_position((old_line, 0), include_prefixes=True)
|
| 392 |
+
|
| 393 |
+
if _ends_with_newline(leaf):
|
| 394 |
+
leaf = leaf.get_next_leaf()
|
| 395 |
+
if leaf.get_start_pos_of_prefix()[0] == old_line:
|
| 396 |
+
node = leaf
|
| 397 |
+
while node.parent.type not in ('file_input', 'suite'):
|
| 398 |
+
node = node.parent
|
| 399 |
+
|
| 400 |
+
# Make sure that if only the `else:` line of an if statement is
|
| 401 |
+
# copied that not the whole thing is going to be copied.
|
| 402 |
+
if node.start_pos[0] >= old_line:
|
| 403 |
+
return node
|
| 404 |
+
# Must be on the same line. Otherwise we need to parse that bit.
|
| 405 |
+
return None
|
| 406 |
+
|
| 407 |
+
def _parse(self, until_line):
|
| 408 |
+
"""
|
| 409 |
+
Parses at least until the given line, but might just parse more until a
|
| 410 |
+
valid state is reached.
|
| 411 |
+
"""
|
| 412 |
+
last_until_line = 0
|
| 413 |
+
while until_line > self._nodes_tree.parsed_until_line:
|
| 414 |
+
node = self._try_parse_part(until_line)
|
| 415 |
+
nodes = node.children
|
| 416 |
+
|
| 417 |
+
self._nodes_tree.add_parsed_nodes(nodes, self._keyword_token_indents)
|
| 418 |
+
if self._replace_tos_indent is not None:
|
| 419 |
+
self._nodes_tree.indents[-1] = self._replace_tos_indent
|
| 420 |
+
|
| 421 |
+
LOG.debug(
|
| 422 |
+
'parse_part from %s to %s (to %s in part parser)',
|
| 423 |
+
nodes[0].get_start_pos_of_prefix()[0],
|
| 424 |
+
self._nodes_tree.parsed_until_line,
|
| 425 |
+
node.end_pos[0] - 1
|
| 426 |
+
)
|
| 427 |
+
# Since the tokenizer sometimes has bugs, we cannot be sure that
|
| 428 |
+
# this loop terminates. Therefore assert that there's always a
|
| 429 |
+
# change.
|
| 430 |
+
assert last_until_line != self._nodes_tree.parsed_until_line, last_until_line
|
| 431 |
+
last_until_line = self._nodes_tree.parsed_until_line
|
| 432 |
+
|
| 433 |
+
def _try_parse_part(self, until_line):
|
| 434 |
+
"""
|
| 435 |
+
Sets up a normal parser that uses a spezialized tokenizer to only parse
|
| 436 |
+
until a certain position (or a bit longer if the statement hasn't
|
| 437 |
+
ended.
|
| 438 |
+
"""
|
| 439 |
+
self._parser_count += 1
|
| 440 |
+
# TODO speed up, shouldn't copy the whole list all the time.
|
| 441 |
+
# memoryview?
|
| 442 |
+
parsed_until_line = self._nodes_tree.parsed_until_line
|
| 443 |
+
lines_after = self._parser_lines_new[parsed_until_line:]
|
| 444 |
+
tokens = self._diff_tokenize(
|
| 445 |
+
lines_after,
|
| 446 |
+
until_line,
|
| 447 |
+
line_offset=parsed_until_line
|
| 448 |
+
)
|
| 449 |
+
self._active_parser = Parser(
|
| 450 |
+
self._pgen_grammar,
|
| 451 |
+
error_recovery=True
|
| 452 |
+
)
|
| 453 |
+
return self._active_parser.parse(tokens=tokens)
|
| 454 |
+
|
| 455 |
+
def _diff_tokenize(self, lines, until_line, line_offset=0):
|
| 456 |
+
was_newline = False
|
| 457 |
+
indents = self._nodes_tree.indents
|
| 458 |
+
initial_indentation_count = len(indents)
|
| 459 |
+
|
| 460 |
+
tokens = self._tokenizer(
|
| 461 |
+
lines,
|
| 462 |
+
start_pos=(line_offset + 1, 0),
|
| 463 |
+
indents=indents,
|
| 464 |
+
is_first_token=line_offset == 0,
|
| 465 |
+
)
|
| 466 |
+
stack = self._active_parser.stack
|
| 467 |
+
self._replace_tos_indent = None
|
| 468 |
+
self._keyword_token_indents = {}
|
| 469 |
+
# print('start', line_offset + 1, indents)
|
| 470 |
+
for token in tokens:
|
| 471 |
+
# print(token, indents)
|
| 472 |
+
typ = token.type
|
| 473 |
+
if typ == DEDENT:
|
| 474 |
+
if len(indents) < initial_indentation_count:
|
| 475 |
+
# We are done here, only thing that can come now is an
|
| 476 |
+
# endmarker or another dedented code block.
|
| 477 |
+
while True:
|
| 478 |
+
typ, string, start_pos, prefix = token = next(tokens)
|
| 479 |
+
if typ in (DEDENT, ERROR_DEDENT):
|
| 480 |
+
if typ == ERROR_DEDENT:
|
| 481 |
+
# We want to force an error dedent in the next
|
| 482 |
+
# parser/pass. To make this possible we just
|
| 483 |
+
# increase the location by one.
|
| 484 |
+
self._replace_tos_indent = start_pos[1] + 1
|
| 485 |
+
pass
|
| 486 |
+
else:
|
| 487 |
+
break
|
| 488 |
+
|
| 489 |
+
if '\n' in prefix or '\r' in prefix:
|
| 490 |
+
prefix = re.sub(r'[^\n\r]+\Z', '', prefix)
|
| 491 |
+
else:
|
| 492 |
+
assert start_pos[1] >= len(prefix), repr(prefix)
|
| 493 |
+
if start_pos[1] - len(prefix) == 0:
|
| 494 |
+
prefix = ''
|
| 495 |
+
yield PythonToken(
|
| 496 |
+
ENDMARKER, '',
|
| 497 |
+
start_pos,
|
| 498 |
+
prefix
|
| 499 |
+
)
|
| 500 |
+
break
|
| 501 |
+
elif typ == NEWLINE and token.start_pos[0] >= until_line:
|
| 502 |
+
was_newline = True
|
| 503 |
+
elif was_newline:
|
| 504 |
+
was_newline = False
|
| 505 |
+
if len(indents) == initial_indentation_count:
|
| 506 |
+
# Check if the parser is actually in a valid suite state.
|
| 507 |
+
if _suite_or_file_input_is_valid(self._pgen_grammar, stack):
|
| 508 |
+
yield PythonToken(ENDMARKER, '', token.start_pos, '')
|
| 509 |
+
break
|
| 510 |
+
|
| 511 |
+
if typ == NAME and token.string in ('class', 'def'):
|
| 512 |
+
self._keyword_token_indents[token.start_pos] = list(indents)
|
| 513 |
+
|
| 514 |
+
yield token
|
| 515 |
+
|
| 516 |
+
|
| 517 |
+
class _NodesTreeNode:
|
| 518 |
+
_ChildrenGroup = namedtuple(
|
| 519 |
+
'_ChildrenGroup',
|
| 520 |
+
'prefix children line_offset last_line_offset_leaf')
|
| 521 |
+
|
| 522 |
+
def __init__(self, tree_node, parent=None, indentation=0):
|
| 523 |
+
self.tree_node = tree_node
|
| 524 |
+
self._children_groups = []
|
| 525 |
+
self.parent = parent
|
| 526 |
+
self._node_children = []
|
| 527 |
+
self.indentation = indentation
|
| 528 |
+
|
| 529 |
+
def finish(self):
|
| 530 |
+
children = []
|
| 531 |
+
for prefix, children_part, line_offset, last_line_offset_leaf in self._children_groups:
|
| 532 |
+
first_leaf = _get_next_leaf_if_indentation(
|
| 533 |
+
children_part[0].get_first_leaf()
|
| 534 |
+
)
|
| 535 |
+
|
| 536 |
+
first_leaf.prefix = prefix + first_leaf.prefix
|
| 537 |
+
if line_offset != 0:
|
| 538 |
+
try:
|
| 539 |
+
_update_positions(
|
| 540 |
+
children_part, line_offset, last_line_offset_leaf)
|
| 541 |
+
except _PositionUpdatingFinished:
|
| 542 |
+
pass
|
| 543 |
+
children += children_part
|
| 544 |
+
self.tree_node.children = children
|
| 545 |
+
# Reset the parents
|
| 546 |
+
for node in children:
|
| 547 |
+
node.parent = self.tree_node
|
| 548 |
+
|
| 549 |
+
for node_child in self._node_children:
|
| 550 |
+
node_child.finish()
|
| 551 |
+
|
| 552 |
+
def add_child_node(self, child_node):
|
| 553 |
+
self._node_children.append(child_node)
|
| 554 |
+
|
| 555 |
+
def add_tree_nodes(self, prefix, children, line_offset=0,
|
| 556 |
+
last_line_offset_leaf=None):
|
| 557 |
+
if last_line_offset_leaf is None:
|
| 558 |
+
last_line_offset_leaf = children[-1].get_last_leaf()
|
| 559 |
+
group = self._ChildrenGroup(
|
| 560 |
+
prefix, children, line_offset, last_line_offset_leaf
|
| 561 |
+
)
|
| 562 |
+
self._children_groups.append(group)
|
| 563 |
+
|
| 564 |
+
def get_last_line(self, suffix):
|
| 565 |
+
line = 0
|
| 566 |
+
if self._children_groups:
|
| 567 |
+
children_group = self._children_groups[-1]
|
| 568 |
+
last_leaf = _get_previous_leaf_if_indentation(
|
| 569 |
+
children_group.last_line_offset_leaf
|
| 570 |
+
)
|
| 571 |
+
|
| 572 |
+
line = last_leaf.end_pos[0] + children_group.line_offset
|
| 573 |
+
|
| 574 |
+
# Newlines end on the next line, which means that they would cover
|
| 575 |
+
# the next line. That line is not fully parsed at this point.
|
| 576 |
+
if _ends_with_newline(last_leaf, suffix):
|
| 577 |
+
line -= 1
|
| 578 |
+
line += len(split_lines(suffix)) - 1
|
| 579 |
+
|
| 580 |
+
if suffix and not suffix.endswith('\n') and not suffix.endswith('\r'):
|
| 581 |
+
# This is the end of a file (that doesn't end with a newline).
|
| 582 |
+
line += 1
|
| 583 |
+
|
| 584 |
+
if self._node_children:
|
| 585 |
+
return max(line, self._node_children[-1].get_last_line(suffix))
|
| 586 |
+
return line
|
| 587 |
+
|
| 588 |
+
def __repr__(self):
|
| 589 |
+
return '<%s: %s>' % (self.__class__.__name__, self.tree_node)
|
| 590 |
+
|
| 591 |
+
|
| 592 |
+
class _NodesTree:
|
| 593 |
+
def __init__(self, module):
|
| 594 |
+
self._base_node = _NodesTreeNode(module)
|
| 595 |
+
self._working_stack = [self._base_node]
|
| 596 |
+
self._module = module
|
| 597 |
+
self._prefix_remainder = ''
|
| 598 |
+
self.prefix = ''
|
| 599 |
+
self.indents = [0]
|
| 600 |
+
|
| 601 |
+
@property
|
| 602 |
+
def parsed_until_line(self):
|
| 603 |
+
return self._working_stack[-1].get_last_line(self.prefix)
|
| 604 |
+
|
| 605 |
+
def _update_insertion_node(self, indentation):
|
| 606 |
+
for node in reversed(list(self._working_stack)):
|
| 607 |
+
if node.indentation < indentation or node is self._working_stack[0]:
|
| 608 |
+
return node
|
| 609 |
+
self._working_stack.pop()
|
| 610 |
+
|
| 611 |
+
def add_parsed_nodes(self, tree_nodes, keyword_token_indents):
|
| 612 |
+
old_prefix = self.prefix
|
| 613 |
+
tree_nodes = self._remove_endmarker(tree_nodes)
|
| 614 |
+
if not tree_nodes:
|
| 615 |
+
self.prefix = old_prefix + self.prefix
|
| 616 |
+
return
|
| 617 |
+
|
| 618 |
+
assert tree_nodes[0].type != 'newline'
|
| 619 |
+
|
| 620 |
+
node = self._update_insertion_node(tree_nodes[0].start_pos[1])
|
| 621 |
+
assert node.tree_node.type in ('suite', 'file_input')
|
| 622 |
+
node.add_tree_nodes(old_prefix, tree_nodes)
|
| 623 |
+
# tos = Top of stack
|
| 624 |
+
self._update_parsed_node_tos(tree_nodes[-1], keyword_token_indents)
|
| 625 |
+
|
| 626 |
+
def _update_parsed_node_tos(self, tree_node, keyword_token_indents):
|
| 627 |
+
if tree_node.type == 'suite':
|
| 628 |
+
def_leaf = tree_node.parent.children[0]
|
| 629 |
+
new_tos = _NodesTreeNode(
|
| 630 |
+
tree_node,
|
| 631 |
+
indentation=keyword_token_indents[def_leaf.start_pos][-1],
|
| 632 |
+
)
|
| 633 |
+
new_tos.add_tree_nodes('', list(tree_node.children))
|
| 634 |
+
|
| 635 |
+
self._working_stack[-1].add_child_node(new_tos)
|
| 636 |
+
self._working_stack.append(new_tos)
|
| 637 |
+
|
| 638 |
+
self._update_parsed_node_tos(tree_node.children[-1], keyword_token_indents)
|
| 639 |
+
elif _func_or_class_has_suite(tree_node):
|
| 640 |
+
self._update_parsed_node_tos(tree_node.children[-1], keyword_token_indents)
|
| 641 |
+
|
| 642 |
+
def _remove_endmarker(self, tree_nodes):
|
| 643 |
+
"""
|
| 644 |
+
Helps cleaning up the tree nodes that get inserted.
|
| 645 |
+
"""
|
| 646 |
+
last_leaf = tree_nodes[-1].get_last_leaf()
|
| 647 |
+
is_endmarker = last_leaf.type == 'endmarker'
|
| 648 |
+
self._prefix_remainder = ''
|
| 649 |
+
if is_endmarker:
|
| 650 |
+
prefix = last_leaf.prefix
|
| 651 |
+
separation = max(prefix.rfind('\n'), prefix.rfind('\r'))
|
| 652 |
+
if separation > -1:
|
| 653 |
+
# Remove the whitespace part of the prefix after a newline.
|
| 654 |
+
# That is not relevant if parentheses were opened. Always parse
|
| 655 |
+
# until the end of a line.
|
| 656 |
+
last_leaf.prefix, self._prefix_remainder = \
|
| 657 |
+
last_leaf.prefix[:separation + 1], last_leaf.prefix[separation + 1:]
|
| 658 |
+
|
| 659 |
+
self.prefix = ''
|
| 660 |
+
|
| 661 |
+
if is_endmarker:
|
| 662 |
+
self.prefix = last_leaf.prefix
|
| 663 |
+
|
| 664 |
+
tree_nodes = tree_nodes[:-1]
|
| 665 |
+
return tree_nodes
|
| 666 |
+
|
| 667 |
+
def _get_matching_indent_nodes(self, tree_nodes, is_new_suite):
|
| 668 |
+
# There might be a random dedent where we have to stop copying.
|
| 669 |
+
# Invalid indents are ok, because the parser handled that
|
| 670 |
+
# properly before. An invalid dedent can happen, because a few
|
| 671 |
+
# lines above there was an invalid indent.
|
| 672 |
+
node_iterator = iter(tree_nodes)
|
| 673 |
+
if is_new_suite:
|
| 674 |
+
yield next(node_iterator)
|
| 675 |
+
|
| 676 |
+
first_node = next(node_iterator)
|
| 677 |
+
indent = _get_indentation(first_node)
|
| 678 |
+
if not is_new_suite and indent not in self.indents:
|
| 679 |
+
return
|
| 680 |
+
yield first_node
|
| 681 |
+
|
| 682 |
+
for n in node_iterator:
|
| 683 |
+
if _get_indentation(n) != indent:
|
| 684 |
+
return
|
| 685 |
+
yield n
|
| 686 |
+
|
| 687 |
+
def copy_nodes(self, tree_nodes, until_line, line_offset):
|
| 688 |
+
"""
|
| 689 |
+
Copies tree nodes from the old parser tree.
|
| 690 |
+
|
| 691 |
+
Returns the number of tree nodes that were copied.
|
| 692 |
+
"""
|
| 693 |
+
if tree_nodes[0].type in ('error_leaf', 'error_node'):
|
| 694 |
+
# Avoid copying errors in the beginning. Can lead to a lot of
|
| 695 |
+
# issues.
|
| 696 |
+
return []
|
| 697 |
+
|
| 698 |
+
indentation = _get_indentation(tree_nodes[0])
|
| 699 |
+
old_working_stack = list(self._working_stack)
|
| 700 |
+
old_prefix = self.prefix
|
| 701 |
+
old_indents = self.indents
|
| 702 |
+
self.indents = [i for i in self.indents if i <= indentation]
|
| 703 |
+
|
| 704 |
+
self._update_insertion_node(indentation)
|
| 705 |
+
|
| 706 |
+
new_nodes, self._working_stack, self.prefix, added_indents = self._copy_nodes(
|
| 707 |
+
list(self._working_stack),
|
| 708 |
+
tree_nodes,
|
| 709 |
+
until_line,
|
| 710 |
+
line_offset,
|
| 711 |
+
self.prefix,
|
| 712 |
+
)
|
| 713 |
+
if new_nodes:
|
| 714 |
+
self.indents += added_indents
|
| 715 |
+
else:
|
| 716 |
+
self._working_stack = old_working_stack
|
| 717 |
+
self.prefix = old_prefix
|
| 718 |
+
self.indents = old_indents
|
| 719 |
+
return new_nodes
|
| 720 |
+
|
| 721 |
+
def _copy_nodes(self, working_stack, nodes, until_line, line_offset,
|
| 722 |
+
prefix='', is_nested=False):
|
| 723 |
+
new_nodes = []
|
| 724 |
+
added_indents = []
|
| 725 |
+
|
| 726 |
+
nodes = list(self._get_matching_indent_nodes(
|
| 727 |
+
nodes,
|
| 728 |
+
is_new_suite=is_nested,
|
| 729 |
+
))
|
| 730 |
+
|
| 731 |
+
new_prefix = ''
|
| 732 |
+
for node in nodes:
|
| 733 |
+
if node.start_pos[0] > until_line:
|
| 734 |
+
break
|
| 735 |
+
|
| 736 |
+
if node.type == 'endmarker':
|
| 737 |
+
break
|
| 738 |
+
|
| 739 |
+
if node.type == 'error_leaf' and node.token_type in ('DEDENT', 'ERROR_DEDENT'):
|
| 740 |
+
break
|
| 741 |
+
# TODO this check might take a bit of time for large files. We
|
| 742 |
+
# might want to change this to do more intelligent guessing or
|
| 743 |
+
# binary search.
|
| 744 |
+
if _get_last_line(node) > until_line:
|
| 745 |
+
# We can split up functions and classes later.
|
| 746 |
+
if _func_or_class_has_suite(node):
|
| 747 |
+
new_nodes.append(node)
|
| 748 |
+
break
|
| 749 |
+
try:
|
| 750 |
+
c = node.children
|
| 751 |
+
except AttributeError:
|
| 752 |
+
pass
|
| 753 |
+
else:
|
| 754 |
+
# This case basically appears with error recovery of one line
|
| 755 |
+
# suites like `def foo(): bar.-`. In this case we might not
|
| 756 |
+
# include a newline in the statement and we need to take care
|
| 757 |
+
# of that.
|
| 758 |
+
n = node
|
| 759 |
+
if n.type == 'decorated':
|
| 760 |
+
n = n.children[-1]
|
| 761 |
+
if n.type in ('async_funcdef', 'async_stmt'):
|
| 762 |
+
n = n.children[-1]
|
| 763 |
+
if n.type in ('classdef', 'funcdef'):
|
| 764 |
+
suite_node = n.children[-1]
|
| 765 |
+
else:
|
| 766 |
+
suite_node = c[-1]
|
| 767 |
+
|
| 768 |
+
if suite_node.type in ('error_leaf', 'error_node'):
|
| 769 |
+
break
|
| 770 |
+
|
| 771 |
+
new_nodes.append(node)
|
| 772 |
+
|
| 773 |
+
# Pop error nodes at the end from the list
|
| 774 |
+
if new_nodes:
|
| 775 |
+
while new_nodes:
|
| 776 |
+
last_node = new_nodes[-1]
|
| 777 |
+
if (last_node.type in ('error_leaf', 'error_node')
|
| 778 |
+
or _is_flow_node(new_nodes[-1])):
|
| 779 |
+
# Error leafs/nodes don't have a defined start/end. Error
|
| 780 |
+
# nodes might not end with a newline (e.g. if there's an
|
| 781 |
+
# open `(`). Therefore ignore all of them unless they are
|
| 782 |
+
# succeeded with valid parser state.
|
| 783 |
+
# If we copy flows at the end, they might be continued
|
| 784 |
+
# after the copy limit (in the new parser).
|
| 785 |
+
# In this while loop we try to remove until we find a newline.
|
| 786 |
+
new_prefix = ''
|
| 787 |
+
new_nodes.pop()
|
| 788 |
+
while new_nodes:
|
| 789 |
+
last_node = new_nodes[-1]
|
| 790 |
+
if last_node.get_last_leaf().type == 'newline':
|
| 791 |
+
break
|
| 792 |
+
new_nodes.pop()
|
| 793 |
+
continue
|
| 794 |
+
if len(new_nodes) > 1 and new_nodes[-2].type == 'error_node':
|
| 795 |
+
# The problem here is that Parso error recovery sometimes
|
| 796 |
+
# influences nodes before this node.
|
| 797 |
+
# Since the new last node is an error node this will get
|
| 798 |
+
# cleaned up in the next while iteration.
|
| 799 |
+
new_nodes.pop()
|
| 800 |
+
continue
|
| 801 |
+
break
|
| 802 |
+
|
| 803 |
+
if not new_nodes:
|
| 804 |
+
return [], working_stack, prefix, added_indents
|
| 805 |
+
|
| 806 |
+
tos = working_stack[-1]
|
| 807 |
+
last_node = new_nodes[-1]
|
| 808 |
+
had_valid_suite_last = False
|
| 809 |
+
# Pop incomplete suites from the list
|
| 810 |
+
if _func_or_class_has_suite(last_node):
|
| 811 |
+
suite = last_node
|
| 812 |
+
while suite.type != 'suite':
|
| 813 |
+
suite = suite.children[-1]
|
| 814 |
+
|
| 815 |
+
indent = _get_suite_indentation(suite)
|
| 816 |
+
added_indents.append(indent)
|
| 817 |
+
|
| 818 |
+
suite_tos = _NodesTreeNode(suite, indentation=_get_indentation(last_node))
|
| 819 |
+
# Don't need to pass line_offset here, it's already done by the
|
| 820 |
+
# parent.
|
| 821 |
+
suite_nodes, new_working_stack, new_prefix, ai = self._copy_nodes(
|
| 822 |
+
working_stack + [suite_tos], suite.children, until_line, line_offset,
|
| 823 |
+
is_nested=True,
|
| 824 |
+
)
|
| 825 |
+
added_indents += ai
|
| 826 |
+
if len(suite_nodes) < 2:
|
| 827 |
+
# A suite only with newline is not valid.
|
| 828 |
+
new_nodes.pop()
|
| 829 |
+
new_prefix = ''
|
| 830 |
+
else:
|
| 831 |
+
assert new_nodes
|
| 832 |
+
tos.add_child_node(suite_tos)
|
| 833 |
+
working_stack = new_working_stack
|
| 834 |
+
had_valid_suite_last = True
|
| 835 |
+
|
| 836 |
+
if new_nodes:
|
| 837 |
+
if not _ends_with_newline(new_nodes[-1].get_last_leaf()) and not had_valid_suite_last:
|
| 838 |
+
p = new_nodes[-1].get_next_leaf().prefix
|
| 839 |
+
# We are not allowed to remove the newline at the end of the
|
| 840 |
+
# line, otherwise it's going to be missing. This happens e.g.
|
| 841 |
+
# if a bracket is around before that moves newlines to
|
| 842 |
+
# prefixes.
|
| 843 |
+
new_prefix = split_lines(p, keepends=True)[0]
|
| 844 |
+
|
| 845 |
+
if had_valid_suite_last:
|
| 846 |
+
last = new_nodes[-1]
|
| 847 |
+
if last.type == 'decorated':
|
| 848 |
+
last = last.children[-1]
|
| 849 |
+
if last.type in ('async_funcdef', 'async_stmt'):
|
| 850 |
+
last = last.children[-1]
|
| 851 |
+
last_line_offset_leaf = last.children[-2].get_last_leaf()
|
| 852 |
+
assert last_line_offset_leaf == ':'
|
| 853 |
+
else:
|
| 854 |
+
last_line_offset_leaf = new_nodes[-1].get_last_leaf()
|
| 855 |
+
tos.add_tree_nodes(
|
| 856 |
+
prefix, new_nodes, line_offset, last_line_offset_leaf,
|
| 857 |
+
)
|
| 858 |
+
prefix = new_prefix
|
| 859 |
+
self._prefix_remainder = ''
|
| 860 |
+
|
| 861 |
+
return new_nodes, working_stack, prefix, added_indents
|
| 862 |
+
|
| 863 |
+
def close(self):
|
| 864 |
+
self._base_node.finish()
|
| 865 |
+
|
| 866 |
+
# Add an endmarker.
|
| 867 |
+
try:
|
| 868 |
+
last_leaf = self._module.get_last_leaf()
|
| 869 |
+
except IndexError:
|
| 870 |
+
end_pos = [1, 0]
|
| 871 |
+
else:
|
| 872 |
+
last_leaf = _skip_dedent_error_leaves(last_leaf)
|
| 873 |
+
end_pos = list(last_leaf.end_pos)
|
| 874 |
+
lines = split_lines(self.prefix)
|
| 875 |
+
assert len(lines) > 0
|
| 876 |
+
if len(lines) == 1:
|
| 877 |
+
if lines[0].startswith(BOM_UTF8_STRING) and end_pos == [1, 0]:
|
| 878 |
+
end_pos[1] -= 1
|
| 879 |
+
end_pos[1] += len(lines[0])
|
| 880 |
+
else:
|
| 881 |
+
end_pos[0] += len(lines) - 1
|
| 882 |
+
end_pos[1] = len(lines[-1])
|
| 883 |
+
|
| 884 |
+
endmarker = EndMarker('', tuple(end_pos), self.prefix + self._prefix_remainder)
|
| 885 |
+
endmarker.parent = self._module
|
| 886 |
+
self._module.children.append(endmarker)
|
evalkit_tf437/lib/python3.10/site-packages/parso/python/errors.py
ADDED
|
@@ -0,0 +1,1326 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
import codecs
|
| 3 |
+
import sys
|
| 4 |
+
import warnings
|
| 5 |
+
import re
|
| 6 |
+
from contextlib import contextmanager
|
| 7 |
+
|
| 8 |
+
from parso.normalizer import Normalizer, NormalizerConfig, Issue, Rule
|
| 9 |
+
from parso.python.tokenize import _get_token_collection
|
| 10 |
+
|
| 11 |
+
_BLOCK_STMTS = ('if_stmt', 'while_stmt', 'for_stmt', 'try_stmt', 'with_stmt')
|
| 12 |
+
_STAR_EXPR_PARENTS = ('testlist_star_expr', 'testlist_comp', 'exprlist')
|
| 13 |
+
# This is the maximal block size given by python.
|
| 14 |
+
_MAX_BLOCK_SIZE = 20
|
| 15 |
+
_MAX_INDENT_COUNT = 100
|
| 16 |
+
ALLOWED_FUTURES = (
|
| 17 |
+
'nested_scopes', 'generators', 'division', 'absolute_import',
|
| 18 |
+
'with_statement', 'print_function', 'unicode_literals', 'generator_stop',
|
| 19 |
+
)
|
| 20 |
+
_COMP_FOR_TYPES = ('comp_for', 'sync_comp_for')
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def _get_rhs_name(node, version):
|
| 24 |
+
type_ = node.type
|
| 25 |
+
if type_ == "lambdef":
|
| 26 |
+
return "lambda"
|
| 27 |
+
elif type_ == "atom":
|
| 28 |
+
comprehension = _get_comprehension_type(node)
|
| 29 |
+
first, second = node.children[:2]
|
| 30 |
+
if comprehension is not None:
|
| 31 |
+
return comprehension
|
| 32 |
+
elif second.type == "dictorsetmaker":
|
| 33 |
+
if version < (3, 8):
|
| 34 |
+
return "literal"
|
| 35 |
+
else:
|
| 36 |
+
if second.children[1] == ":" or second.children[0] == "**":
|
| 37 |
+
if version < (3, 10):
|
| 38 |
+
return "dict display"
|
| 39 |
+
else:
|
| 40 |
+
return "dict literal"
|
| 41 |
+
else:
|
| 42 |
+
return "set display"
|
| 43 |
+
elif (
|
| 44 |
+
first == "("
|
| 45 |
+
and (second == ")"
|
| 46 |
+
or (len(node.children) == 3 and node.children[1].type == "testlist_comp"))
|
| 47 |
+
):
|
| 48 |
+
return "tuple"
|
| 49 |
+
elif first == "(":
|
| 50 |
+
return _get_rhs_name(_remove_parens(node), version=version)
|
| 51 |
+
elif first == "[":
|
| 52 |
+
return "list"
|
| 53 |
+
elif first == "{" and second == "}":
|
| 54 |
+
if version < (3, 10):
|
| 55 |
+
return "dict display"
|
| 56 |
+
else:
|
| 57 |
+
return "dict literal"
|
| 58 |
+
elif first == "{" and len(node.children) > 2:
|
| 59 |
+
return "set display"
|
| 60 |
+
elif type_ == "keyword":
|
| 61 |
+
if "yield" in node.value:
|
| 62 |
+
return "yield expression"
|
| 63 |
+
if version < (3, 8):
|
| 64 |
+
return "keyword"
|
| 65 |
+
else:
|
| 66 |
+
return str(node.value)
|
| 67 |
+
elif type_ == "operator" and node.value == "...":
|
| 68 |
+
if version < (3, 10):
|
| 69 |
+
return "Ellipsis"
|
| 70 |
+
else:
|
| 71 |
+
return "ellipsis"
|
| 72 |
+
elif type_ == "comparison":
|
| 73 |
+
return "comparison"
|
| 74 |
+
elif type_ in ("string", "number", "strings"):
|
| 75 |
+
return "literal"
|
| 76 |
+
elif type_ == "yield_expr":
|
| 77 |
+
return "yield expression"
|
| 78 |
+
elif type_ == "test":
|
| 79 |
+
return "conditional expression"
|
| 80 |
+
elif type_ in ("atom_expr", "power"):
|
| 81 |
+
if node.children[0] == "await":
|
| 82 |
+
return "await expression"
|
| 83 |
+
elif node.children[-1].type == "trailer":
|
| 84 |
+
trailer = node.children[-1]
|
| 85 |
+
if trailer.children[0] == "(":
|
| 86 |
+
return "function call"
|
| 87 |
+
elif trailer.children[0] == "[":
|
| 88 |
+
return "subscript"
|
| 89 |
+
elif trailer.children[0] == ".":
|
| 90 |
+
return "attribute"
|
| 91 |
+
elif (
|
| 92 |
+
("expr" in type_ and "star_expr" not in type_) # is a substring
|
| 93 |
+
or "_test" in type_
|
| 94 |
+
or type_ in ("term", "factor")
|
| 95 |
+
):
|
| 96 |
+
if version < (3, 10):
|
| 97 |
+
return "operator"
|
| 98 |
+
else:
|
| 99 |
+
return "expression"
|
| 100 |
+
elif type_ == "star_expr":
|
| 101 |
+
return "starred"
|
| 102 |
+
elif type_ == "testlist_star_expr":
|
| 103 |
+
return "tuple"
|
| 104 |
+
elif type_ == "fstring":
|
| 105 |
+
return "f-string expression"
|
| 106 |
+
return type_ # shouldn't reach here
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
def _iter_stmts(scope):
|
| 110 |
+
"""
|
| 111 |
+
Iterates over all statements and splits up simple_stmt.
|
| 112 |
+
"""
|
| 113 |
+
for child in scope.children:
|
| 114 |
+
if child.type == 'simple_stmt':
|
| 115 |
+
for child2 in child.children:
|
| 116 |
+
if child2.type == 'newline' or child2 == ';':
|
| 117 |
+
continue
|
| 118 |
+
yield child2
|
| 119 |
+
else:
|
| 120 |
+
yield child
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
def _get_comprehension_type(atom):
|
| 124 |
+
first, second = atom.children[:2]
|
| 125 |
+
if second.type == 'testlist_comp' and second.children[1].type in _COMP_FOR_TYPES:
|
| 126 |
+
if first == '[':
|
| 127 |
+
return 'list comprehension'
|
| 128 |
+
else:
|
| 129 |
+
return 'generator expression'
|
| 130 |
+
elif second.type == 'dictorsetmaker' and second.children[-1].type in _COMP_FOR_TYPES:
|
| 131 |
+
if second.children[1] == ':':
|
| 132 |
+
return 'dict comprehension'
|
| 133 |
+
else:
|
| 134 |
+
return 'set comprehension'
|
| 135 |
+
return None
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
def _is_future_import(import_from):
|
| 139 |
+
# It looks like a __future__ import that is relative is still a future
|
| 140 |
+
# import. That feels kind of odd, but whatever.
|
| 141 |
+
# if import_from.level != 0:
|
| 142 |
+
# return False
|
| 143 |
+
from_names = import_from.get_from_names()
|
| 144 |
+
return [n.value for n in from_names] == ['__future__']
|
| 145 |
+
|
| 146 |
+
|
| 147 |
+
def _remove_parens(atom):
|
| 148 |
+
"""
|
| 149 |
+
Returns the inner part of an expression like `(foo)`. Also removes nested
|
| 150 |
+
parens.
|
| 151 |
+
"""
|
| 152 |
+
try:
|
| 153 |
+
children = atom.children
|
| 154 |
+
except AttributeError:
|
| 155 |
+
pass
|
| 156 |
+
else:
|
| 157 |
+
if len(children) == 3 and children[0] == '(':
|
| 158 |
+
return _remove_parens(atom.children[1])
|
| 159 |
+
return atom
|
| 160 |
+
|
| 161 |
+
|
| 162 |
+
def _skip_parens_bottom_up(node):
|
| 163 |
+
"""
|
| 164 |
+
Returns an ancestor node of an expression, skipping all levels of parens
|
| 165 |
+
bottom-up.
|
| 166 |
+
"""
|
| 167 |
+
while node.parent is not None:
|
| 168 |
+
node = node.parent
|
| 169 |
+
if node.type != 'atom' or node.children[0] != '(':
|
| 170 |
+
return node
|
| 171 |
+
return None
|
| 172 |
+
|
| 173 |
+
|
| 174 |
+
def _iter_params(parent_node):
|
| 175 |
+
return (n for n in parent_node.children if n.type == 'param' or n.type == 'operator')
|
| 176 |
+
|
| 177 |
+
|
| 178 |
+
def _is_future_import_first(import_from):
|
| 179 |
+
"""
|
| 180 |
+
Checks if the import is the first statement of a file.
|
| 181 |
+
"""
|
| 182 |
+
found_docstring = False
|
| 183 |
+
for stmt in _iter_stmts(import_from.get_root_node()):
|
| 184 |
+
if stmt.type == 'string' and not found_docstring:
|
| 185 |
+
continue
|
| 186 |
+
found_docstring = True
|
| 187 |
+
|
| 188 |
+
if stmt == import_from:
|
| 189 |
+
return True
|
| 190 |
+
if stmt.type == 'import_from' and _is_future_import(stmt):
|
| 191 |
+
continue
|
| 192 |
+
return False
|
| 193 |
+
|
| 194 |
+
|
| 195 |
+
def _iter_definition_exprs_from_lists(exprlist):
|
| 196 |
+
def check_expr(child):
|
| 197 |
+
if child.type == 'atom':
|
| 198 |
+
if child.children[0] == '(':
|
| 199 |
+
testlist_comp = child.children[1]
|
| 200 |
+
if testlist_comp.type == 'testlist_comp':
|
| 201 |
+
yield from _iter_definition_exprs_from_lists(testlist_comp)
|
| 202 |
+
return
|
| 203 |
+
else:
|
| 204 |
+
# It's a paren that doesn't do anything, like 1 + (1)
|
| 205 |
+
yield from check_expr(testlist_comp)
|
| 206 |
+
return
|
| 207 |
+
elif child.children[0] == '[':
|
| 208 |
+
yield testlist_comp
|
| 209 |
+
return
|
| 210 |
+
yield child
|
| 211 |
+
|
| 212 |
+
if exprlist.type in _STAR_EXPR_PARENTS:
|
| 213 |
+
for child in exprlist.children[::2]:
|
| 214 |
+
yield from check_expr(child)
|
| 215 |
+
else:
|
| 216 |
+
yield from check_expr(exprlist)
|
| 217 |
+
|
| 218 |
+
|
| 219 |
+
def _get_expr_stmt_definition_exprs(expr_stmt):
|
| 220 |
+
exprs = []
|
| 221 |
+
for list_ in expr_stmt.children[:-2:2]:
|
| 222 |
+
if list_.type in ('testlist_star_expr', 'testlist'):
|
| 223 |
+
exprs += _iter_definition_exprs_from_lists(list_)
|
| 224 |
+
else:
|
| 225 |
+
exprs.append(list_)
|
| 226 |
+
return exprs
|
| 227 |
+
|
| 228 |
+
|
| 229 |
+
def _get_for_stmt_definition_exprs(for_stmt):
|
| 230 |
+
exprlist = for_stmt.children[1]
|
| 231 |
+
return list(_iter_definition_exprs_from_lists(exprlist))
|
| 232 |
+
|
| 233 |
+
|
| 234 |
+
def _is_argument_comprehension(argument):
|
| 235 |
+
return argument.children[1].type in _COMP_FOR_TYPES
|
| 236 |
+
|
| 237 |
+
|
| 238 |
+
def _any_fstring_error(version, node):
|
| 239 |
+
if version < (3, 9) or node is None:
|
| 240 |
+
return False
|
| 241 |
+
if node.type == "error_node":
|
| 242 |
+
return any(child.type == "fstring_start" for child in node.children)
|
| 243 |
+
elif node.type == "fstring":
|
| 244 |
+
return True
|
| 245 |
+
else:
|
| 246 |
+
return node.search_ancestor("fstring")
|
| 247 |
+
|
| 248 |
+
|
| 249 |
+
class _Context:
|
| 250 |
+
def __init__(self, node, add_syntax_error, parent_context=None):
|
| 251 |
+
self.node = node
|
| 252 |
+
self.blocks = []
|
| 253 |
+
self.parent_context = parent_context
|
| 254 |
+
self._used_name_dict = {}
|
| 255 |
+
self._global_names = []
|
| 256 |
+
self._local_params_names = []
|
| 257 |
+
self._nonlocal_names = []
|
| 258 |
+
self._nonlocal_names_in_subscopes = []
|
| 259 |
+
self._add_syntax_error = add_syntax_error
|
| 260 |
+
|
| 261 |
+
def is_async_funcdef(self):
|
| 262 |
+
# Stupidly enough async funcdefs can have two different forms,
|
| 263 |
+
# depending if a decorator is used or not.
|
| 264 |
+
return self.is_function() \
|
| 265 |
+
and self.node.parent.type in ('async_funcdef', 'async_stmt')
|
| 266 |
+
|
| 267 |
+
def is_function(self):
|
| 268 |
+
return self.node.type == 'funcdef'
|
| 269 |
+
|
| 270 |
+
def add_name(self, name):
|
| 271 |
+
parent_type = name.parent.type
|
| 272 |
+
if parent_type == 'trailer':
|
| 273 |
+
# We are only interested in first level names.
|
| 274 |
+
return
|
| 275 |
+
|
| 276 |
+
if parent_type == 'global_stmt':
|
| 277 |
+
self._global_names.append(name)
|
| 278 |
+
elif parent_type == 'nonlocal_stmt':
|
| 279 |
+
self._nonlocal_names.append(name)
|
| 280 |
+
elif parent_type == 'funcdef':
|
| 281 |
+
self._local_params_names.extend(
|
| 282 |
+
[param.name.value for param in name.parent.get_params()]
|
| 283 |
+
)
|
| 284 |
+
else:
|
| 285 |
+
self._used_name_dict.setdefault(name.value, []).append(name)
|
| 286 |
+
|
| 287 |
+
def finalize(self):
|
| 288 |
+
"""
|
| 289 |
+
Returns a list of nonlocal names that need to be part of that scope.
|
| 290 |
+
"""
|
| 291 |
+
self._analyze_names(self._global_names, 'global')
|
| 292 |
+
self._analyze_names(self._nonlocal_names, 'nonlocal')
|
| 293 |
+
|
| 294 |
+
global_name_strs = {n.value: n for n in self._global_names}
|
| 295 |
+
for nonlocal_name in self._nonlocal_names:
|
| 296 |
+
try:
|
| 297 |
+
global_name = global_name_strs[nonlocal_name.value]
|
| 298 |
+
except KeyError:
|
| 299 |
+
continue
|
| 300 |
+
|
| 301 |
+
message = "name '%s' is nonlocal and global" % global_name.value
|
| 302 |
+
if global_name.start_pos < nonlocal_name.start_pos:
|
| 303 |
+
error_name = global_name
|
| 304 |
+
else:
|
| 305 |
+
error_name = nonlocal_name
|
| 306 |
+
self._add_syntax_error(error_name, message)
|
| 307 |
+
|
| 308 |
+
nonlocals_not_handled = []
|
| 309 |
+
for nonlocal_name in self._nonlocal_names_in_subscopes:
|
| 310 |
+
search = nonlocal_name.value
|
| 311 |
+
if search in self._local_params_names:
|
| 312 |
+
continue
|
| 313 |
+
if search in global_name_strs or self.parent_context is None:
|
| 314 |
+
message = "no binding for nonlocal '%s' found" % nonlocal_name.value
|
| 315 |
+
self._add_syntax_error(nonlocal_name, message)
|
| 316 |
+
elif not self.is_function() or \
|
| 317 |
+
nonlocal_name.value not in self._used_name_dict:
|
| 318 |
+
nonlocals_not_handled.append(nonlocal_name)
|
| 319 |
+
return self._nonlocal_names + nonlocals_not_handled
|
| 320 |
+
|
| 321 |
+
def _analyze_names(self, globals_or_nonlocals, type_):
|
| 322 |
+
def raise_(message):
|
| 323 |
+
self._add_syntax_error(base_name, message % (base_name.value, type_))
|
| 324 |
+
|
| 325 |
+
params = []
|
| 326 |
+
if self.node.type == 'funcdef':
|
| 327 |
+
params = self.node.get_params()
|
| 328 |
+
|
| 329 |
+
for base_name in globals_or_nonlocals:
|
| 330 |
+
found_global_or_nonlocal = False
|
| 331 |
+
# Somehow Python does it the reversed way.
|
| 332 |
+
for name in reversed(self._used_name_dict.get(base_name.value, [])):
|
| 333 |
+
if name.start_pos > base_name.start_pos:
|
| 334 |
+
# All following names don't have to be checked.
|
| 335 |
+
found_global_or_nonlocal = True
|
| 336 |
+
|
| 337 |
+
parent = name.parent
|
| 338 |
+
if parent.type == 'param' and parent.name == name:
|
| 339 |
+
# Skip those here, these definitions belong to the next
|
| 340 |
+
# scope.
|
| 341 |
+
continue
|
| 342 |
+
|
| 343 |
+
if name.is_definition():
|
| 344 |
+
if parent.type == 'expr_stmt' \
|
| 345 |
+
and parent.children[1].type == 'annassign':
|
| 346 |
+
if found_global_or_nonlocal:
|
| 347 |
+
# If it's after the global the error seems to be
|
| 348 |
+
# placed there.
|
| 349 |
+
base_name = name
|
| 350 |
+
raise_("annotated name '%s' can't be %s")
|
| 351 |
+
break
|
| 352 |
+
else:
|
| 353 |
+
message = "name '%s' is assigned to before %s declaration"
|
| 354 |
+
else:
|
| 355 |
+
message = "name '%s' is used prior to %s declaration"
|
| 356 |
+
|
| 357 |
+
if not found_global_or_nonlocal:
|
| 358 |
+
raise_(message)
|
| 359 |
+
# Only add an error for the first occurence.
|
| 360 |
+
break
|
| 361 |
+
|
| 362 |
+
for param in params:
|
| 363 |
+
if param.name.value == base_name.value:
|
| 364 |
+
raise_("name '%s' is parameter and %s"),
|
| 365 |
+
|
| 366 |
+
@contextmanager
|
| 367 |
+
def add_block(self, node):
|
| 368 |
+
self.blocks.append(node)
|
| 369 |
+
yield
|
| 370 |
+
self.blocks.pop()
|
| 371 |
+
|
| 372 |
+
def add_context(self, node):
|
| 373 |
+
return _Context(node, self._add_syntax_error, parent_context=self)
|
| 374 |
+
|
| 375 |
+
def close_child_context(self, child_context):
|
| 376 |
+
self._nonlocal_names_in_subscopes += child_context.finalize()
|
| 377 |
+
|
| 378 |
+
|
| 379 |
+
class ErrorFinder(Normalizer):
|
| 380 |
+
"""
|
| 381 |
+
Searches for errors in the syntax tree.
|
| 382 |
+
"""
|
| 383 |
+
def __init__(self, *args, **kwargs):
|
| 384 |
+
super().__init__(*args, **kwargs)
|
| 385 |
+
self._error_dict = {}
|
| 386 |
+
self.version = self.grammar.version_info
|
| 387 |
+
|
| 388 |
+
def initialize(self, node):
|
| 389 |
+
def create_context(node):
|
| 390 |
+
if node is None:
|
| 391 |
+
return None
|
| 392 |
+
|
| 393 |
+
parent_context = create_context(node.parent)
|
| 394 |
+
if node.type in ('classdef', 'funcdef', 'file_input'):
|
| 395 |
+
return _Context(node, self._add_syntax_error, parent_context)
|
| 396 |
+
return parent_context
|
| 397 |
+
|
| 398 |
+
self.context = create_context(node) or _Context(node, self._add_syntax_error)
|
| 399 |
+
self._indentation_count = 0
|
| 400 |
+
|
| 401 |
+
def visit(self, node):
|
| 402 |
+
if node.type == 'error_node':
|
| 403 |
+
with self.visit_node(node):
|
| 404 |
+
# Don't need to investigate the inners of an error node. We
|
| 405 |
+
# might find errors in there that should be ignored, because
|
| 406 |
+
# the error node itself already shows that there's an issue.
|
| 407 |
+
return ''
|
| 408 |
+
return super().visit(node)
|
| 409 |
+
|
| 410 |
+
@contextmanager
|
| 411 |
+
def visit_node(self, node):
|
| 412 |
+
self._check_type_rules(node)
|
| 413 |
+
|
| 414 |
+
if node.type in _BLOCK_STMTS:
|
| 415 |
+
with self.context.add_block(node):
|
| 416 |
+
if len(self.context.blocks) == _MAX_BLOCK_SIZE:
|
| 417 |
+
self._add_syntax_error(node, "too many statically nested blocks")
|
| 418 |
+
yield
|
| 419 |
+
return
|
| 420 |
+
elif node.type == 'suite':
|
| 421 |
+
self._indentation_count += 1
|
| 422 |
+
if self._indentation_count == _MAX_INDENT_COUNT:
|
| 423 |
+
self._add_indentation_error(node.children[1], "too many levels of indentation")
|
| 424 |
+
|
| 425 |
+
yield
|
| 426 |
+
|
| 427 |
+
if node.type == 'suite':
|
| 428 |
+
self._indentation_count -= 1
|
| 429 |
+
elif node.type in ('classdef', 'funcdef'):
|
| 430 |
+
context = self.context
|
| 431 |
+
self.context = context.parent_context
|
| 432 |
+
self.context.close_child_context(context)
|
| 433 |
+
|
| 434 |
+
def visit_leaf(self, leaf):
|
| 435 |
+
if leaf.type == 'error_leaf':
|
| 436 |
+
if leaf.token_type in ('INDENT', 'ERROR_DEDENT'):
|
| 437 |
+
# Indents/Dedents itself never have a prefix. They are just
|
| 438 |
+
# "pseudo" tokens that get removed by the syntax tree later.
|
| 439 |
+
# Therefore in case of an error we also have to check for this.
|
| 440 |
+
spacing = list(leaf.get_next_leaf()._split_prefix())[-1]
|
| 441 |
+
if leaf.token_type == 'INDENT':
|
| 442 |
+
message = 'unexpected indent'
|
| 443 |
+
else:
|
| 444 |
+
message = 'unindent does not match any outer indentation level'
|
| 445 |
+
self._add_indentation_error(spacing, message)
|
| 446 |
+
else:
|
| 447 |
+
if leaf.value.startswith('\\'):
|
| 448 |
+
message = 'unexpected character after line continuation character'
|
| 449 |
+
else:
|
| 450 |
+
match = re.match('\\w{,2}("{1,3}|\'{1,3})', leaf.value)
|
| 451 |
+
if match is None:
|
| 452 |
+
message = 'invalid syntax'
|
| 453 |
+
if (
|
| 454 |
+
self.version >= (3, 9)
|
| 455 |
+
and leaf.value in _get_token_collection(
|
| 456 |
+
self.version
|
| 457 |
+
).always_break_tokens
|
| 458 |
+
):
|
| 459 |
+
message = "f-string: " + message
|
| 460 |
+
else:
|
| 461 |
+
if len(match.group(1)) == 1:
|
| 462 |
+
message = 'EOL while scanning string literal'
|
| 463 |
+
else:
|
| 464 |
+
message = 'EOF while scanning triple-quoted string literal'
|
| 465 |
+
self._add_syntax_error(leaf, message)
|
| 466 |
+
return ''
|
| 467 |
+
elif leaf.value == ':':
|
| 468 |
+
parent = leaf.parent
|
| 469 |
+
if parent.type in ('classdef', 'funcdef'):
|
| 470 |
+
self.context = self.context.add_context(parent)
|
| 471 |
+
|
| 472 |
+
# The rest is rule based.
|
| 473 |
+
return super().visit_leaf(leaf)
|
| 474 |
+
|
| 475 |
+
def _add_indentation_error(self, spacing, message):
|
| 476 |
+
self.add_issue(spacing, 903, "IndentationError: " + message)
|
| 477 |
+
|
| 478 |
+
def _add_syntax_error(self, node, message):
|
| 479 |
+
self.add_issue(node, 901, "SyntaxError: " + message)
|
| 480 |
+
|
| 481 |
+
def add_issue(self, node, code, message):
|
| 482 |
+
# Overwrite the default behavior.
|
| 483 |
+
# Check if the issues are on the same line.
|
| 484 |
+
line = node.start_pos[0]
|
| 485 |
+
args = (code, message, node)
|
| 486 |
+
self._error_dict.setdefault(line, args)
|
| 487 |
+
|
| 488 |
+
def finalize(self):
|
| 489 |
+
self.context.finalize()
|
| 490 |
+
|
| 491 |
+
for code, message, node in self._error_dict.values():
|
| 492 |
+
self.issues.append(Issue(node, code, message))
|
| 493 |
+
|
| 494 |
+
|
| 495 |
+
class IndentationRule(Rule):
|
| 496 |
+
code = 903
|
| 497 |
+
|
| 498 |
+
def _get_message(self, message, node):
|
| 499 |
+
message = super()._get_message(message, node)
|
| 500 |
+
return "IndentationError: " + message
|
| 501 |
+
|
| 502 |
+
|
| 503 |
+
@ErrorFinder.register_rule(type='error_node')
|
| 504 |
+
class _ExpectIndentedBlock(IndentationRule):
|
| 505 |
+
message = 'expected an indented block'
|
| 506 |
+
|
| 507 |
+
def get_node(self, node):
|
| 508 |
+
leaf = node.get_next_leaf()
|
| 509 |
+
return list(leaf._split_prefix())[-1]
|
| 510 |
+
|
| 511 |
+
def is_issue(self, node):
|
| 512 |
+
# This is the beginning of a suite that is not indented.
|
| 513 |
+
return node.children[-1].type == 'newline'
|
| 514 |
+
|
| 515 |
+
|
| 516 |
+
class ErrorFinderConfig(NormalizerConfig):
|
| 517 |
+
normalizer_class = ErrorFinder
|
| 518 |
+
|
| 519 |
+
|
| 520 |
+
class SyntaxRule(Rule):
|
| 521 |
+
code = 901
|
| 522 |
+
|
| 523 |
+
def _get_message(self, message, node):
|
| 524 |
+
message = super()._get_message(message, node)
|
| 525 |
+
if (
|
| 526 |
+
"f-string" not in message
|
| 527 |
+
and _any_fstring_error(self._normalizer.version, node)
|
| 528 |
+
):
|
| 529 |
+
message = "f-string: " + message
|
| 530 |
+
return "SyntaxError: " + message
|
| 531 |
+
|
| 532 |
+
|
| 533 |
+
@ErrorFinder.register_rule(type='error_node')
|
| 534 |
+
class _InvalidSyntaxRule(SyntaxRule):
|
| 535 |
+
message = "invalid syntax"
|
| 536 |
+
fstring_message = "f-string: invalid syntax"
|
| 537 |
+
|
| 538 |
+
def get_node(self, node):
|
| 539 |
+
return node.get_next_leaf()
|
| 540 |
+
|
| 541 |
+
def is_issue(self, node):
|
| 542 |
+
error = node.get_next_leaf().type != 'error_leaf'
|
| 543 |
+
if (
|
| 544 |
+
error
|
| 545 |
+
and _any_fstring_error(self._normalizer.version, node)
|
| 546 |
+
):
|
| 547 |
+
self.add_issue(node, message=self.fstring_message)
|
| 548 |
+
else:
|
| 549 |
+
# Error leafs will be added later as an error.
|
| 550 |
+
return error
|
| 551 |
+
|
| 552 |
+
|
| 553 |
+
@ErrorFinder.register_rule(value='await')
|
| 554 |
+
class _AwaitOutsideAsync(SyntaxRule):
|
| 555 |
+
message = "'await' outside async function"
|
| 556 |
+
|
| 557 |
+
def is_issue(self, leaf):
|
| 558 |
+
return not self._normalizer.context.is_async_funcdef()
|
| 559 |
+
|
| 560 |
+
def get_error_node(self, node):
|
| 561 |
+
# Return the whole await statement.
|
| 562 |
+
return node.parent
|
| 563 |
+
|
| 564 |
+
|
| 565 |
+
@ErrorFinder.register_rule(value='break')
|
| 566 |
+
class _BreakOutsideLoop(SyntaxRule):
|
| 567 |
+
message = "'break' outside loop"
|
| 568 |
+
|
| 569 |
+
def is_issue(self, leaf):
|
| 570 |
+
in_loop = False
|
| 571 |
+
for block in self._normalizer.context.blocks:
|
| 572 |
+
if block.type in ('for_stmt', 'while_stmt'):
|
| 573 |
+
in_loop = True
|
| 574 |
+
return not in_loop
|
| 575 |
+
|
| 576 |
+
|
| 577 |
+
@ErrorFinder.register_rule(value='continue')
|
| 578 |
+
class _ContinueChecks(SyntaxRule):
|
| 579 |
+
message = "'continue' not properly in loop"
|
| 580 |
+
message_in_finally = "'continue' not supported inside 'finally' clause"
|
| 581 |
+
|
| 582 |
+
def is_issue(self, leaf):
|
| 583 |
+
in_loop = False
|
| 584 |
+
for block in self._normalizer.context.blocks:
|
| 585 |
+
if block.type in ('for_stmt', 'while_stmt'):
|
| 586 |
+
in_loop = True
|
| 587 |
+
if block.type == 'try_stmt':
|
| 588 |
+
last_block = block.children[-3]
|
| 589 |
+
if (
|
| 590 |
+
last_block == "finally"
|
| 591 |
+
and leaf.start_pos > last_block.start_pos
|
| 592 |
+
and self._normalizer.version < (3, 8)
|
| 593 |
+
):
|
| 594 |
+
self.add_issue(leaf, message=self.message_in_finally)
|
| 595 |
+
return False # Error already added
|
| 596 |
+
if not in_loop:
|
| 597 |
+
return True
|
| 598 |
+
|
| 599 |
+
|
| 600 |
+
@ErrorFinder.register_rule(value='from')
|
| 601 |
+
class _YieldFromCheck(SyntaxRule):
|
| 602 |
+
message = "'yield from' inside async function"
|
| 603 |
+
|
| 604 |
+
def get_node(self, leaf):
|
| 605 |
+
return leaf.parent.parent # This is the actual yield statement.
|
| 606 |
+
|
| 607 |
+
def is_issue(self, leaf):
|
| 608 |
+
return leaf.parent.type == 'yield_arg' \
|
| 609 |
+
and self._normalizer.context.is_async_funcdef()
|
| 610 |
+
|
| 611 |
+
|
| 612 |
+
@ErrorFinder.register_rule(type='name')
|
| 613 |
+
class _NameChecks(SyntaxRule):
|
| 614 |
+
message = 'cannot assign to __debug__'
|
| 615 |
+
message_none = 'cannot assign to None'
|
| 616 |
+
|
| 617 |
+
def is_issue(self, leaf):
|
| 618 |
+
self._normalizer.context.add_name(leaf)
|
| 619 |
+
|
| 620 |
+
if leaf.value == '__debug__' and leaf.is_definition():
|
| 621 |
+
return True
|
| 622 |
+
|
| 623 |
+
|
| 624 |
+
@ErrorFinder.register_rule(type='string')
|
| 625 |
+
class _StringChecks(SyntaxRule):
|
| 626 |
+
if sys.version_info < (3, 10):
|
| 627 |
+
message = "bytes can only contain ASCII literal characters."
|
| 628 |
+
else:
|
| 629 |
+
message = "bytes can only contain ASCII literal characters"
|
| 630 |
+
|
| 631 |
+
def is_issue(self, leaf):
|
| 632 |
+
string_prefix = leaf.string_prefix.lower()
|
| 633 |
+
if 'b' in string_prefix \
|
| 634 |
+
and any(c for c in leaf.value if ord(c) > 127):
|
| 635 |
+
# b'ä'
|
| 636 |
+
return True
|
| 637 |
+
|
| 638 |
+
if 'r' not in string_prefix:
|
| 639 |
+
# Raw strings don't need to be checked if they have proper
|
| 640 |
+
# escaping.
|
| 641 |
+
|
| 642 |
+
payload = leaf._get_payload()
|
| 643 |
+
if 'b' in string_prefix:
|
| 644 |
+
payload = payload.encode('utf-8')
|
| 645 |
+
func = codecs.escape_decode
|
| 646 |
+
else:
|
| 647 |
+
func = codecs.unicode_escape_decode
|
| 648 |
+
|
| 649 |
+
try:
|
| 650 |
+
with warnings.catch_warnings():
|
| 651 |
+
# The warnings from parsing strings are not relevant.
|
| 652 |
+
warnings.filterwarnings('ignore')
|
| 653 |
+
func(payload)
|
| 654 |
+
except UnicodeDecodeError as e:
|
| 655 |
+
self.add_issue(leaf, message='(unicode error) ' + str(e))
|
| 656 |
+
except ValueError as e:
|
| 657 |
+
self.add_issue(leaf, message='(value error) ' + str(e))
|
| 658 |
+
|
| 659 |
+
|
| 660 |
+
@ErrorFinder.register_rule(value='*')
|
| 661 |
+
class _StarCheck(SyntaxRule):
|
| 662 |
+
message = "named arguments must follow bare *"
|
| 663 |
+
|
| 664 |
+
def is_issue(self, leaf):
|
| 665 |
+
params = leaf.parent
|
| 666 |
+
if params.type == 'parameters' and params:
|
| 667 |
+
after = params.children[params.children.index(leaf) + 1:]
|
| 668 |
+
after = [child for child in after
|
| 669 |
+
if child not in (',', ')') and not child.star_count]
|
| 670 |
+
return len(after) == 0
|
| 671 |
+
|
| 672 |
+
|
| 673 |
+
@ErrorFinder.register_rule(value='**')
|
| 674 |
+
class _StarStarCheck(SyntaxRule):
|
| 675 |
+
# e.g. {**{} for a in [1]}
|
| 676 |
+
# TODO this should probably get a better end_pos including
|
| 677 |
+
# the next sibling of leaf.
|
| 678 |
+
message = "dict unpacking cannot be used in dict comprehension"
|
| 679 |
+
|
| 680 |
+
def is_issue(self, leaf):
|
| 681 |
+
if leaf.parent.type == 'dictorsetmaker':
|
| 682 |
+
comp_for = leaf.get_next_sibling().get_next_sibling()
|
| 683 |
+
return comp_for is not None and comp_for.type in _COMP_FOR_TYPES
|
| 684 |
+
|
| 685 |
+
|
| 686 |
+
@ErrorFinder.register_rule(value='yield')
|
| 687 |
+
@ErrorFinder.register_rule(value='return')
|
| 688 |
+
class _ReturnAndYieldChecks(SyntaxRule):
|
| 689 |
+
message = "'return' with value in async generator"
|
| 690 |
+
message_async_yield = "'yield' inside async function"
|
| 691 |
+
|
| 692 |
+
def get_node(self, leaf):
|
| 693 |
+
return leaf.parent
|
| 694 |
+
|
| 695 |
+
def is_issue(self, leaf):
|
| 696 |
+
if self._normalizer.context.node.type != 'funcdef':
|
| 697 |
+
self.add_issue(self.get_node(leaf), message="'%s' outside function" % leaf.value)
|
| 698 |
+
elif self._normalizer.context.is_async_funcdef() \
|
| 699 |
+
and any(self._normalizer.context.node.iter_yield_exprs()):
|
| 700 |
+
if leaf.value == 'return' and leaf.parent.type == 'return_stmt':
|
| 701 |
+
return True
|
| 702 |
+
|
| 703 |
+
|
| 704 |
+
@ErrorFinder.register_rule(type='strings')
|
| 705 |
+
class _BytesAndStringMix(SyntaxRule):
|
| 706 |
+
# e.g. 's' b''
|
| 707 |
+
message = "cannot mix bytes and nonbytes literals"
|
| 708 |
+
|
| 709 |
+
def _is_bytes_literal(self, string):
|
| 710 |
+
if string.type == 'fstring':
|
| 711 |
+
return False
|
| 712 |
+
return 'b' in string.string_prefix.lower()
|
| 713 |
+
|
| 714 |
+
def is_issue(self, node):
|
| 715 |
+
first = node.children[0]
|
| 716 |
+
first_is_bytes = self._is_bytes_literal(first)
|
| 717 |
+
for string in node.children[1:]:
|
| 718 |
+
if first_is_bytes != self._is_bytes_literal(string):
|
| 719 |
+
return True
|
| 720 |
+
|
| 721 |
+
|
| 722 |
+
@ErrorFinder.register_rule(type='import_as_names')
|
| 723 |
+
class _TrailingImportComma(SyntaxRule):
|
| 724 |
+
# e.g. from foo import a,
|
| 725 |
+
message = "trailing comma not allowed without surrounding parentheses"
|
| 726 |
+
|
| 727 |
+
def is_issue(self, node):
|
| 728 |
+
if node.children[-1] == ',' and node.parent.children[-1] != ')':
|
| 729 |
+
return True
|
| 730 |
+
|
| 731 |
+
|
| 732 |
+
@ErrorFinder.register_rule(type='import_from')
|
| 733 |
+
class _ImportStarInFunction(SyntaxRule):
|
| 734 |
+
message = "import * only allowed at module level"
|
| 735 |
+
|
| 736 |
+
def is_issue(self, node):
|
| 737 |
+
return node.is_star_import() and self._normalizer.context.parent_context is not None
|
| 738 |
+
|
| 739 |
+
|
| 740 |
+
@ErrorFinder.register_rule(type='import_from')
|
| 741 |
+
class _FutureImportRule(SyntaxRule):
|
| 742 |
+
message = "from __future__ imports must occur at the beginning of the file"
|
| 743 |
+
|
| 744 |
+
def is_issue(self, node):
|
| 745 |
+
if _is_future_import(node):
|
| 746 |
+
if not _is_future_import_first(node):
|
| 747 |
+
return True
|
| 748 |
+
|
| 749 |
+
for from_name, future_name in node.get_paths():
|
| 750 |
+
name = future_name.value
|
| 751 |
+
allowed_futures = list(ALLOWED_FUTURES)
|
| 752 |
+
if self._normalizer.version >= (3, 7):
|
| 753 |
+
allowed_futures.append('annotations')
|
| 754 |
+
if name == 'braces':
|
| 755 |
+
self.add_issue(node, message="not a chance")
|
| 756 |
+
elif name == 'barry_as_FLUFL':
|
| 757 |
+
m = "Seriously I'm not implementing this :) ~ Dave"
|
| 758 |
+
self.add_issue(node, message=m)
|
| 759 |
+
elif name not in allowed_futures:
|
| 760 |
+
message = "future feature %s is not defined" % name
|
| 761 |
+
self.add_issue(node, message=message)
|
| 762 |
+
|
| 763 |
+
|
| 764 |
+
@ErrorFinder.register_rule(type='star_expr')
|
| 765 |
+
class _StarExprRule(SyntaxRule):
|
| 766 |
+
message_iterable_unpacking = "iterable unpacking cannot be used in comprehension"
|
| 767 |
+
|
| 768 |
+
def is_issue(self, node):
|
| 769 |
+
def check_delete_starred(node):
|
| 770 |
+
while node.parent is not None:
|
| 771 |
+
node = node.parent
|
| 772 |
+
if node.type == 'del_stmt':
|
| 773 |
+
return True
|
| 774 |
+
if node.type not in (*_STAR_EXPR_PARENTS, 'atom'):
|
| 775 |
+
return False
|
| 776 |
+
return False
|
| 777 |
+
|
| 778 |
+
if self._normalizer.version >= (3, 9):
|
| 779 |
+
ancestor = node.parent
|
| 780 |
+
else:
|
| 781 |
+
ancestor = _skip_parens_bottom_up(node)
|
| 782 |
+
# starred expression not in tuple/list/set
|
| 783 |
+
if ancestor.type not in (*_STAR_EXPR_PARENTS, 'dictorsetmaker') \
|
| 784 |
+
and not (ancestor.type == 'atom' and ancestor.children[0] != '('):
|
| 785 |
+
self.add_issue(node, message="can't use starred expression here")
|
| 786 |
+
return
|
| 787 |
+
|
| 788 |
+
if check_delete_starred(node):
|
| 789 |
+
if self._normalizer.version >= (3, 9):
|
| 790 |
+
self.add_issue(node, message="cannot delete starred")
|
| 791 |
+
else:
|
| 792 |
+
self.add_issue(node, message="can't use starred expression here")
|
| 793 |
+
return
|
| 794 |
+
|
| 795 |
+
if node.parent.type == 'testlist_comp':
|
| 796 |
+
# [*[] for a in [1]]
|
| 797 |
+
if node.parent.children[1].type in _COMP_FOR_TYPES:
|
| 798 |
+
self.add_issue(node, message=self.message_iterable_unpacking)
|
| 799 |
+
|
| 800 |
+
|
| 801 |
+
@ErrorFinder.register_rule(types=_STAR_EXPR_PARENTS)
|
| 802 |
+
class _StarExprParentRule(SyntaxRule):
|
| 803 |
+
def is_issue(self, node):
|
| 804 |
+
def is_definition(node, ancestor):
|
| 805 |
+
if ancestor is None:
|
| 806 |
+
return False
|
| 807 |
+
|
| 808 |
+
type_ = ancestor.type
|
| 809 |
+
if type_ == 'trailer':
|
| 810 |
+
return False
|
| 811 |
+
|
| 812 |
+
if type_ == 'expr_stmt':
|
| 813 |
+
return node.start_pos < ancestor.children[-1].start_pos
|
| 814 |
+
|
| 815 |
+
return is_definition(node, ancestor.parent)
|
| 816 |
+
|
| 817 |
+
if is_definition(node, node.parent):
|
| 818 |
+
args = [c for c in node.children if c != ',']
|
| 819 |
+
starred = [c for c in args if c.type == 'star_expr']
|
| 820 |
+
if len(starred) > 1:
|
| 821 |
+
if self._normalizer.version < (3, 9):
|
| 822 |
+
message = "two starred expressions in assignment"
|
| 823 |
+
else:
|
| 824 |
+
message = "multiple starred expressions in assignment"
|
| 825 |
+
self.add_issue(starred[1], message=message)
|
| 826 |
+
elif starred:
|
| 827 |
+
count = args.index(starred[0])
|
| 828 |
+
if count >= 256:
|
| 829 |
+
message = "too many expressions in star-unpacking assignment"
|
| 830 |
+
self.add_issue(starred[0], message=message)
|
| 831 |
+
|
| 832 |
+
|
| 833 |
+
@ErrorFinder.register_rule(type='annassign')
|
| 834 |
+
class _AnnotatorRule(SyntaxRule):
|
| 835 |
+
# True: int
|
| 836 |
+
# {}: float
|
| 837 |
+
message = "illegal target for annotation"
|
| 838 |
+
|
| 839 |
+
def get_node(self, node):
|
| 840 |
+
return node.parent
|
| 841 |
+
|
| 842 |
+
def is_issue(self, node):
|
| 843 |
+
type_ = None
|
| 844 |
+
lhs = node.parent.children[0]
|
| 845 |
+
lhs = _remove_parens(lhs)
|
| 846 |
+
try:
|
| 847 |
+
children = lhs.children
|
| 848 |
+
except AttributeError:
|
| 849 |
+
pass
|
| 850 |
+
else:
|
| 851 |
+
if ',' in children or lhs.type == 'atom' and children[0] == '(':
|
| 852 |
+
type_ = 'tuple'
|
| 853 |
+
elif lhs.type == 'atom' and children[0] == '[':
|
| 854 |
+
type_ = 'list'
|
| 855 |
+
trailer = children[-1]
|
| 856 |
+
|
| 857 |
+
if type_ is None:
|
| 858 |
+
if not (lhs.type == 'name'
|
| 859 |
+
# subscript/attributes are allowed
|
| 860 |
+
or lhs.type in ('atom_expr', 'power')
|
| 861 |
+
and trailer.type == 'trailer'
|
| 862 |
+
and trailer.children[0] != '('):
|
| 863 |
+
return True
|
| 864 |
+
else:
|
| 865 |
+
# x, y: str
|
| 866 |
+
message = "only single target (not %s) can be annotated"
|
| 867 |
+
self.add_issue(lhs.parent, message=message % type_)
|
| 868 |
+
|
| 869 |
+
|
| 870 |
+
@ErrorFinder.register_rule(type='argument')
|
| 871 |
+
class _ArgumentRule(SyntaxRule):
|
| 872 |
+
def is_issue(self, node):
|
| 873 |
+
first = node.children[0]
|
| 874 |
+
if self._normalizer.version < (3, 8):
|
| 875 |
+
# a((b)=c) is valid in <3.8
|
| 876 |
+
first = _remove_parens(first)
|
| 877 |
+
if node.children[1] == '=' and first.type != 'name':
|
| 878 |
+
if first.type == 'lambdef':
|
| 879 |
+
# f(lambda: 1=1)
|
| 880 |
+
if self._normalizer.version < (3, 8):
|
| 881 |
+
message = "lambda cannot contain assignment"
|
| 882 |
+
else:
|
| 883 |
+
message = 'expression cannot contain assignment, perhaps you meant "=="?'
|
| 884 |
+
else:
|
| 885 |
+
# f(+x=1)
|
| 886 |
+
if self._normalizer.version < (3, 8):
|
| 887 |
+
message = "keyword can't be an expression"
|
| 888 |
+
else:
|
| 889 |
+
message = 'expression cannot contain assignment, perhaps you meant "=="?'
|
| 890 |
+
self.add_issue(first, message=message)
|
| 891 |
+
|
| 892 |
+
if _is_argument_comprehension(node) and node.parent.type == 'classdef':
|
| 893 |
+
self.add_issue(node, message='invalid syntax')
|
| 894 |
+
|
| 895 |
+
|
| 896 |
+
@ErrorFinder.register_rule(type='nonlocal_stmt')
|
| 897 |
+
class _NonlocalModuleLevelRule(SyntaxRule):
|
| 898 |
+
message = "nonlocal declaration not allowed at module level"
|
| 899 |
+
|
| 900 |
+
def is_issue(self, node):
|
| 901 |
+
return self._normalizer.context.parent_context is None
|
| 902 |
+
|
| 903 |
+
|
| 904 |
+
@ErrorFinder.register_rule(type='arglist')
|
| 905 |
+
class _ArglistRule(SyntaxRule):
|
| 906 |
+
@property
|
| 907 |
+
def message(self):
|
| 908 |
+
if self._normalizer.version < (3, 7):
|
| 909 |
+
return "Generator expression must be parenthesized if not sole argument"
|
| 910 |
+
else:
|
| 911 |
+
return "Generator expression must be parenthesized"
|
| 912 |
+
|
| 913 |
+
def is_issue(self, node):
|
| 914 |
+
arg_set = set()
|
| 915 |
+
kw_only = False
|
| 916 |
+
kw_unpacking_only = False
|
| 917 |
+
for argument in node.children:
|
| 918 |
+
if argument == ',':
|
| 919 |
+
continue
|
| 920 |
+
|
| 921 |
+
if argument.type == 'argument':
|
| 922 |
+
first = argument.children[0]
|
| 923 |
+
if _is_argument_comprehension(argument) and len(node.children) >= 2:
|
| 924 |
+
# a(a, b for b in c)
|
| 925 |
+
return True
|
| 926 |
+
|
| 927 |
+
if first in ('*', '**'):
|
| 928 |
+
if first == '*':
|
| 929 |
+
if kw_unpacking_only:
|
| 930 |
+
# foo(**kwargs, *args)
|
| 931 |
+
message = "iterable argument unpacking " \
|
| 932 |
+
"follows keyword argument unpacking"
|
| 933 |
+
self.add_issue(argument, message=message)
|
| 934 |
+
else:
|
| 935 |
+
kw_unpacking_only = True
|
| 936 |
+
else: # Is a keyword argument.
|
| 937 |
+
kw_only = True
|
| 938 |
+
if first.type == 'name':
|
| 939 |
+
if first.value in arg_set:
|
| 940 |
+
# f(x=1, x=2)
|
| 941 |
+
message = "keyword argument repeated"
|
| 942 |
+
if self._normalizer.version >= (3, 9):
|
| 943 |
+
message += ": {}".format(first.value)
|
| 944 |
+
self.add_issue(first, message=message)
|
| 945 |
+
else:
|
| 946 |
+
arg_set.add(first.value)
|
| 947 |
+
else:
|
| 948 |
+
if kw_unpacking_only:
|
| 949 |
+
# f(**x, y)
|
| 950 |
+
message = "positional argument follows keyword argument unpacking"
|
| 951 |
+
self.add_issue(argument, message=message)
|
| 952 |
+
elif kw_only:
|
| 953 |
+
# f(x=2, y)
|
| 954 |
+
message = "positional argument follows keyword argument"
|
| 955 |
+
self.add_issue(argument, message=message)
|
| 956 |
+
|
| 957 |
+
|
| 958 |
+
@ErrorFinder.register_rule(type='parameters')
|
| 959 |
+
@ErrorFinder.register_rule(type='lambdef')
|
| 960 |
+
class _ParameterRule(SyntaxRule):
|
| 961 |
+
# def f(x=3, y): pass
|
| 962 |
+
message = "non-default argument follows default argument"
|
| 963 |
+
|
| 964 |
+
def is_issue(self, node):
|
| 965 |
+
param_names = set()
|
| 966 |
+
default_only = False
|
| 967 |
+
star_seen = False
|
| 968 |
+
for p in _iter_params(node):
|
| 969 |
+
if p.type == 'operator':
|
| 970 |
+
if p.value == '*':
|
| 971 |
+
star_seen = True
|
| 972 |
+
default_only = False
|
| 973 |
+
continue
|
| 974 |
+
|
| 975 |
+
if p.name.value in param_names:
|
| 976 |
+
message = "duplicate argument '%s' in function definition"
|
| 977 |
+
self.add_issue(p.name, message=message % p.name.value)
|
| 978 |
+
param_names.add(p.name.value)
|
| 979 |
+
|
| 980 |
+
if not star_seen:
|
| 981 |
+
if p.default is None and not p.star_count:
|
| 982 |
+
if default_only:
|
| 983 |
+
return True
|
| 984 |
+
elif p.star_count:
|
| 985 |
+
star_seen = True
|
| 986 |
+
default_only = False
|
| 987 |
+
else:
|
| 988 |
+
default_only = True
|
| 989 |
+
|
| 990 |
+
|
| 991 |
+
@ErrorFinder.register_rule(type='try_stmt')
|
| 992 |
+
class _TryStmtRule(SyntaxRule):
|
| 993 |
+
message = "default 'except:' must be last"
|
| 994 |
+
|
| 995 |
+
def is_issue(self, try_stmt):
|
| 996 |
+
default_except = None
|
| 997 |
+
for except_clause in try_stmt.children[3::3]:
|
| 998 |
+
if except_clause in ('else', 'finally'):
|
| 999 |
+
break
|
| 1000 |
+
if except_clause == 'except':
|
| 1001 |
+
default_except = except_clause
|
| 1002 |
+
elif default_except is not None:
|
| 1003 |
+
self.add_issue(default_except, message=self.message)
|
| 1004 |
+
|
| 1005 |
+
|
| 1006 |
+
@ErrorFinder.register_rule(type='fstring')
|
| 1007 |
+
class _FStringRule(SyntaxRule):
|
| 1008 |
+
_fstring_grammar = None
|
| 1009 |
+
message_expr = "f-string expression part cannot include a backslash"
|
| 1010 |
+
message_nested = "f-string: expressions nested too deeply"
|
| 1011 |
+
message_conversion = "f-string: invalid conversion character: expected 's', 'r', or 'a'"
|
| 1012 |
+
|
| 1013 |
+
def _check_format_spec(self, format_spec, depth):
|
| 1014 |
+
self._check_fstring_contents(format_spec.children[1:], depth)
|
| 1015 |
+
|
| 1016 |
+
def _check_fstring_expr(self, fstring_expr, depth):
|
| 1017 |
+
if depth >= 2:
|
| 1018 |
+
self.add_issue(fstring_expr, message=self.message_nested)
|
| 1019 |
+
|
| 1020 |
+
expr = fstring_expr.children[1]
|
| 1021 |
+
if '\\' in expr.get_code():
|
| 1022 |
+
self.add_issue(expr, message=self.message_expr)
|
| 1023 |
+
|
| 1024 |
+
children_2 = fstring_expr.children[2]
|
| 1025 |
+
if children_2.type == 'operator' and children_2.value == '=':
|
| 1026 |
+
conversion = fstring_expr.children[3]
|
| 1027 |
+
else:
|
| 1028 |
+
conversion = children_2
|
| 1029 |
+
if conversion.type == 'fstring_conversion':
|
| 1030 |
+
name = conversion.children[1]
|
| 1031 |
+
if name.value not in ('s', 'r', 'a'):
|
| 1032 |
+
self.add_issue(name, message=self.message_conversion)
|
| 1033 |
+
|
| 1034 |
+
format_spec = fstring_expr.children[-2]
|
| 1035 |
+
if format_spec.type == 'fstring_format_spec':
|
| 1036 |
+
self._check_format_spec(format_spec, depth + 1)
|
| 1037 |
+
|
| 1038 |
+
def is_issue(self, fstring):
|
| 1039 |
+
self._check_fstring_contents(fstring.children[1:-1])
|
| 1040 |
+
|
| 1041 |
+
def _check_fstring_contents(self, children, depth=0):
|
| 1042 |
+
for fstring_content in children:
|
| 1043 |
+
if fstring_content.type == 'fstring_expr':
|
| 1044 |
+
self._check_fstring_expr(fstring_content, depth)
|
| 1045 |
+
|
| 1046 |
+
|
| 1047 |
+
class _CheckAssignmentRule(SyntaxRule):
|
| 1048 |
+
def _check_assignment(self, node, is_deletion=False, is_namedexpr=False, is_aug_assign=False):
|
| 1049 |
+
error = None
|
| 1050 |
+
type_ = node.type
|
| 1051 |
+
if type_ == 'lambdef':
|
| 1052 |
+
error = 'lambda'
|
| 1053 |
+
elif type_ == 'atom':
|
| 1054 |
+
first, second = node.children[:2]
|
| 1055 |
+
error = _get_comprehension_type(node)
|
| 1056 |
+
if error is None:
|
| 1057 |
+
if second.type == 'dictorsetmaker':
|
| 1058 |
+
if self._normalizer.version < (3, 8):
|
| 1059 |
+
error = 'literal'
|
| 1060 |
+
else:
|
| 1061 |
+
if second.children[1] == ':':
|
| 1062 |
+
if self._normalizer.version < (3, 10):
|
| 1063 |
+
error = 'dict display'
|
| 1064 |
+
else:
|
| 1065 |
+
error = 'dict literal'
|
| 1066 |
+
else:
|
| 1067 |
+
error = 'set display'
|
| 1068 |
+
elif first == "{" and second == "}":
|
| 1069 |
+
if self._normalizer.version < (3, 8):
|
| 1070 |
+
error = 'literal'
|
| 1071 |
+
else:
|
| 1072 |
+
if self._normalizer.version < (3, 10):
|
| 1073 |
+
error = "dict display"
|
| 1074 |
+
else:
|
| 1075 |
+
error = "dict literal"
|
| 1076 |
+
elif first == "{" and len(node.children) > 2:
|
| 1077 |
+
if self._normalizer.version < (3, 8):
|
| 1078 |
+
error = 'literal'
|
| 1079 |
+
else:
|
| 1080 |
+
error = "set display"
|
| 1081 |
+
elif first in ('(', '['):
|
| 1082 |
+
if second.type == 'yield_expr':
|
| 1083 |
+
error = 'yield expression'
|
| 1084 |
+
elif second.type == 'testlist_comp':
|
| 1085 |
+
# ([a, b] := [1, 2])
|
| 1086 |
+
# ((a, b) := [1, 2])
|
| 1087 |
+
if is_namedexpr:
|
| 1088 |
+
if first == '(':
|
| 1089 |
+
error = 'tuple'
|
| 1090 |
+
elif first == '[':
|
| 1091 |
+
error = 'list'
|
| 1092 |
+
|
| 1093 |
+
# This is not a comprehension, they were handled
|
| 1094 |
+
# further above.
|
| 1095 |
+
for child in second.children[::2]:
|
| 1096 |
+
self._check_assignment(child, is_deletion, is_namedexpr, is_aug_assign)
|
| 1097 |
+
else: # Everything handled, must be useless brackets.
|
| 1098 |
+
self._check_assignment(second, is_deletion, is_namedexpr, is_aug_assign)
|
| 1099 |
+
elif type_ == 'keyword':
|
| 1100 |
+
if node.value == "yield":
|
| 1101 |
+
error = "yield expression"
|
| 1102 |
+
elif self._normalizer.version < (3, 8):
|
| 1103 |
+
error = 'keyword'
|
| 1104 |
+
else:
|
| 1105 |
+
error = str(node.value)
|
| 1106 |
+
elif type_ == 'operator':
|
| 1107 |
+
if node.value == '...':
|
| 1108 |
+
if self._normalizer.version < (3, 10):
|
| 1109 |
+
error = 'Ellipsis'
|
| 1110 |
+
else:
|
| 1111 |
+
error = 'ellipsis'
|
| 1112 |
+
elif type_ == 'comparison':
|
| 1113 |
+
error = 'comparison'
|
| 1114 |
+
elif type_ in ('string', 'number', 'strings'):
|
| 1115 |
+
error = 'literal'
|
| 1116 |
+
elif type_ == 'yield_expr':
|
| 1117 |
+
# This one seems to be a slightly different warning in Python.
|
| 1118 |
+
message = 'assignment to yield expression not possible'
|
| 1119 |
+
self.add_issue(node, message=message)
|
| 1120 |
+
elif type_ == 'test':
|
| 1121 |
+
error = 'conditional expression'
|
| 1122 |
+
elif type_ in ('atom_expr', 'power'):
|
| 1123 |
+
if node.children[0] == 'await':
|
| 1124 |
+
error = 'await expression'
|
| 1125 |
+
elif node.children[-2] == '**':
|
| 1126 |
+
if self._normalizer.version < (3, 10):
|
| 1127 |
+
error = 'operator'
|
| 1128 |
+
else:
|
| 1129 |
+
error = 'expression'
|
| 1130 |
+
else:
|
| 1131 |
+
# Has a trailer
|
| 1132 |
+
trailer = node.children[-1]
|
| 1133 |
+
assert trailer.type == 'trailer'
|
| 1134 |
+
if trailer.children[0] == '(':
|
| 1135 |
+
error = 'function call'
|
| 1136 |
+
elif is_namedexpr and trailer.children[0] == '[':
|
| 1137 |
+
error = 'subscript'
|
| 1138 |
+
elif is_namedexpr and trailer.children[0] == '.':
|
| 1139 |
+
error = 'attribute'
|
| 1140 |
+
elif type_ == "fstring":
|
| 1141 |
+
if self._normalizer.version < (3, 8):
|
| 1142 |
+
error = 'literal'
|
| 1143 |
+
else:
|
| 1144 |
+
error = "f-string expression"
|
| 1145 |
+
elif type_ in ('testlist_star_expr', 'exprlist', 'testlist'):
|
| 1146 |
+
for child in node.children[::2]:
|
| 1147 |
+
self._check_assignment(child, is_deletion, is_namedexpr, is_aug_assign)
|
| 1148 |
+
elif ('expr' in type_ and type_ != 'star_expr' # is a substring
|
| 1149 |
+
or '_test' in type_
|
| 1150 |
+
or type_ in ('term', 'factor')):
|
| 1151 |
+
if self._normalizer.version < (3, 10):
|
| 1152 |
+
error = 'operator'
|
| 1153 |
+
else:
|
| 1154 |
+
error = 'expression'
|
| 1155 |
+
elif type_ == "star_expr":
|
| 1156 |
+
if is_deletion:
|
| 1157 |
+
if self._normalizer.version >= (3, 9):
|
| 1158 |
+
error = "starred"
|
| 1159 |
+
else:
|
| 1160 |
+
self.add_issue(node, message="can't use starred expression here")
|
| 1161 |
+
else:
|
| 1162 |
+
if self._normalizer.version >= (3, 9):
|
| 1163 |
+
ancestor = node.parent
|
| 1164 |
+
else:
|
| 1165 |
+
ancestor = _skip_parens_bottom_up(node)
|
| 1166 |
+
if ancestor.type not in _STAR_EXPR_PARENTS and not is_aug_assign \
|
| 1167 |
+
and not (ancestor.type == 'atom' and ancestor.children[0] == '['):
|
| 1168 |
+
message = "starred assignment target must be in a list or tuple"
|
| 1169 |
+
self.add_issue(node, message=message)
|
| 1170 |
+
|
| 1171 |
+
self._check_assignment(node.children[1])
|
| 1172 |
+
|
| 1173 |
+
if error is not None:
|
| 1174 |
+
if is_namedexpr:
|
| 1175 |
+
message = 'cannot use assignment expressions with %s' % error
|
| 1176 |
+
else:
|
| 1177 |
+
cannot = "can't" if self._normalizer.version < (3, 8) else "cannot"
|
| 1178 |
+
message = ' '.join([cannot, "delete" if is_deletion else "assign to", error])
|
| 1179 |
+
self.add_issue(node, message=message)
|
| 1180 |
+
|
| 1181 |
+
|
| 1182 |
+
@ErrorFinder.register_rule(type='sync_comp_for')
|
| 1183 |
+
class _CompForRule(_CheckAssignmentRule):
|
| 1184 |
+
message = "asynchronous comprehension outside of an asynchronous function"
|
| 1185 |
+
|
| 1186 |
+
def is_issue(self, node):
|
| 1187 |
+
expr_list = node.children[1]
|
| 1188 |
+
if expr_list.type != 'expr_list': # Already handled.
|
| 1189 |
+
self._check_assignment(expr_list)
|
| 1190 |
+
|
| 1191 |
+
return node.parent.children[0] == 'async' \
|
| 1192 |
+
and not self._normalizer.context.is_async_funcdef()
|
| 1193 |
+
|
| 1194 |
+
|
| 1195 |
+
@ErrorFinder.register_rule(type='expr_stmt')
|
| 1196 |
+
class _ExprStmtRule(_CheckAssignmentRule):
|
| 1197 |
+
message = "illegal expression for augmented assignment"
|
| 1198 |
+
extended_message = "'{target}' is an " + message
|
| 1199 |
+
|
| 1200 |
+
def is_issue(self, node):
|
| 1201 |
+
augassign = node.children[1]
|
| 1202 |
+
is_aug_assign = augassign != '=' and augassign.type != 'annassign'
|
| 1203 |
+
|
| 1204 |
+
if self._normalizer.version <= (3, 8) or not is_aug_assign:
|
| 1205 |
+
for before_equal in node.children[:-2:2]:
|
| 1206 |
+
self._check_assignment(before_equal, is_aug_assign=is_aug_assign)
|
| 1207 |
+
|
| 1208 |
+
if is_aug_assign:
|
| 1209 |
+
target = _remove_parens(node.children[0])
|
| 1210 |
+
# a, a[b], a.b
|
| 1211 |
+
|
| 1212 |
+
if target.type == "name" or (
|
| 1213 |
+
target.type in ("atom_expr", "power")
|
| 1214 |
+
and target.children[1].type == "trailer"
|
| 1215 |
+
and target.children[-1].children[0] != "("
|
| 1216 |
+
):
|
| 1217 |
+
return False
|
| 1218 |
+
|
| 1219 |
+
if self._normalizer.version <= (3, 8):
|
| 1220 |
+
return True
|
| 1221 |
+
else:
|
| 1222 |
+
self.add_issue(
|
| 1223 |
+
node,
|
| 1224 |
+
message=self.extended_message.format(
|
| 1225 |
+
target=_get_rhs_name(node.children[0], self._normalizer.version)
|
| 1226 |
+
),
|
| 1227 |
+
)
|
| 1228 |
+
|
| 1229 |
+
|
| 1230 |
+
@ErrorFinder.register_rule(type='with_item')
|
| 1231 |
+
class _WithItemRule(_CheckAssignmentRule):
|
| 1232 |
+
def is_issue(self, with_item):
|
| 1233 |
+
self._check_assignment(with_item.children[2])
|
| 1234 |
+
|
| 1235 |
+
|
| 1236 |
+
@ErrorFinder.register_rule(type='del_stmt')
|
| 1237 |
+
class _DelStmtRule(_CheckAssignmentRule):
|
| 1238 |
+
def is_issue(self, del_stmt):
|
| 1239 |
+
child = del_stmt.children[1]
|
| 1240 |
+
|
| 1241 |
+
if child.type != 'expr_list': # Already handled.
|
| 1242 |
+
self._check_assignment(child, is_deletion=True)
|
| 1243 |
+
|
| 1244 |
+
|
| 1245 |
+
@ErrorFinder.register_rule(type='expr_list')
|
| 1246 |
+
class _ExprListRule(_CheckAssignmentRule):
|
| 1247 |
+
def is_issue(self, expr_list):
|
| 1248 |
+
for expr in expr_list.children[::2]:
|
| 1249 |
+
self._check_assignment(expr)
|
| 1250 |
+
|
| 1251 |
+
|
| 1252 |
+
@ErrorFinder.register_rule(type='for_stmt')
|
| 1253 |
+
class _ForStmtRule(_CheckAssignmentRule):
|
| 1254 |
+
def is_issue(self, for_stmt):
|
| 1255 |
+
# Some of the nodes here are already used, so no else if
|
| 1256 |
+
expr_list = for_stmt.children[1]
|
| 1257 |
+
if expr_list.type != 'expr_list': # Already handled.
|
| 1258 |
+
self._check_assignment(expr_list)
|
| 1259 |
+
|
| 1260 |
+
|
| 1261 |
+
@ErrorFinder.register_rule(type='namedexpr_test')
|
| 1262 |
+
class _NamedExprRule(_CheckAssignmentRule):
|
| 1263 |
+
# namedexpr_test: test [':=' test]
|
| 1264 |
+
|
| 1265 |
+
def is_issue(self, namedexpr_test):
|
| 1266 |
+
# assigned name
|
| 1267 |
+
first = namedexpr_test.children[0]
|
| 1268 |
+
|
| 1269 |
+
def search_namedexpr_in_comp_for(node):
|
| 1270 |
+
while True:
|
| 1271 |
+
parent = node.parent
|
| 1272 |
+
if parent is None:
|
| 1273 |
+
return parent
|
| 1274 |
+
if parent.type == 'sync_comp_for' and parent.children[3] == node:
|
| 1275 |
+
return parent
|
| 1276 |
+
node = parent
|
| 1277 |
+
|
| 1278 |
+
if search_namedexpr_in_comp_for(namedexpr_test):
|
| 1279 |
+
# [i+1 for i in (i := range(5))]
|
| 1280 |
+
# [i+1 for i in (j := range(5))]
|
| 1281 |
+
# [i+1 for i in (lambda: (j := range(5)))()]
|
| 1282 |
+
message = 'assignment expression cannot be used in a comprehension iterable expression'
|
| 1283 |
+
self.add_issue(namedexpr_test, message=message)
|
| 1284 |
+
|
| 1285 |
+
# defined names
|
| 1286 |
+
exprlist = list()
|
| 1287 |
+
|
| 1288 |
+
def process_comp_for(comp_for):
|
| 1289 |
+
if comp_for.type == 'sync_comp_for':
|
| 1290 |
+
comp = comp_for
|
| 1291 |
+
elif comp_for.type == 'comp_for':
|
| 1292 |
+
comp = comp_for.children[1]
|
| 1293 |
+
exprlist.extend(_get_for_stmt_definition_exprs(comp))
|
| 1294 |
+
|
| 1295 |
+
def search_all_comp_ancestors(node):
|
| 1296 |
+
has_ancestors = False
|
| 1297 |
+
while True:
|
| 1298 |
+
node = node.search_ancestor('testlist_comp', 'dictorsetmaker')
|
| 1299 |
+
if node is None:
|
| 1300 |
+
break
|
| 1301 |
+
for child in node.children:
|
| 1302 |
+
if child.type in _COMP_FOR_TYPES:
|
| 1303 |
+
process_comp_for(child)
|
| 1304 |
+
has_ancestors = True
|
| 1305 |
+
break
|
| 1306 |
+
return has_ancestors
|
| 1307 |
+
|
| 1308 |
+
# check assignment expressions in comprehensions
|
| 1309 |
+
search_all = search_all_comp_ancestors(namedexpr_test)
|
| 1310 |
+
if search_all:
|
| 1311 |
+
if self._normalizer.context.node.type == 'classdef':
|
| 1312 |
+
message = 'assignment expression within a comprehension ' \
|
| 1313 |
+
'cannot be used in a class body'
|
| 1314 |
+
self.add_issue(namedexpr_test, message=message)
|
| 1315 |
+
|
| 1316 |
+
namelist = [expr.value for expr in exprlist if expr.type == 'name']
|
| 1317 |
+
if first.type == 'name' and first.value in namelist:
|
| 1318 |
+
# [i := 0 for i, j in range(5)]
|
| 1319 |
+
# [[(i := i) for j in range(5)] for i in range(5)]
|
| 1320 |
+
# [i for i, j in range(5) if True or (i := 1)]
|
| 1321 |
+
# [False and (i := 0) for i, j in range(5)]
|
| 1322 |
+
message = 'assignment expression cannot rebind ' \
|
| 1323 |
+
'comprehension iteration variable %r' % first.value
|
| 1324 |
+
self.add_issue(namedexpr_test, message=message)
|
| 1325 |
+
|
| 1326 |
+
self._check_assignment(first, is_namedexpr=True)
|
evalkit_tf437/lib/python3.10/site-packages/parso/python/grammar311.txt
ADDED
|
@@ -0,0 +1,169 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Grammar for Python
|
| 2 |
+
|
| 3 |
+
# NOTE WELL: You should also follow all the steps listed at
|
| 4 |
+
# https://devguide.python.org/grammar/
|
| 5 |
+
|
| 6 |
+
# Start symbols for the grammar:
|
| 7 |
+
# single_input is a single interactive statement;
|
| 8 |
+
# file_input is a module or sequence of commands read from an input file;
|
| 9 |
+
# eval_input is the input for the eval() functions.
|
| 10 |
+
# NB: compound_stmt in single_input is followed by extra NEWLINE!
|
| 11 |
+
single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE
|
| 12 |
+
file_input: stmt* ENDMARKER
|
| 13 |
+
eval_input: testlist NEWLINE* ENDMARKER
|
| 14 |
+
|
| 15 |
+
decorator: '@' namedexpr_test NEWLINE
|
| 16 |
+
decorators: decorator+
|
| 17 |
+
decorated: decorators (classdef | funcdef | async_funcdef)
|
| 18 |
+
|
| 19 |
+
async_funcdef: 'async' funcdef
|
| 20 |
+
funcdef: 'def' NAME parameters ['->' test] ':' suite
|
| 21 |
+
|
| 22 |
+
parameters: '(' [typedargslist] ')'
|
| 23 |
+
typedargslist: (
|
| 24 |
+
(tfpdef ['=' test] (',' tfpdef ['=' test])* ',' '/' [',' [ tfpdef ['=' test] (
|
| 25 |
+
',' tfpdef ['=' test])* ([',' [
|
| 26 |
+
'*' [tfpdef] (',' tfpdef ['=' test])* [',' ['**' tfpdef [',']]]
|
| 27 |
+
| '**' tfpdef [',']]])
|
| 28 |
+
| '*' [tfpdef] (',' tfpdef ['=' test])* ([',' ['**' tfpdef [',']]])
|
| 29 |
+
| '**' tfpdef [',']]] )
|
| 30 |
+
| (tfpdef ['=' test] (',' tfpdef ['=' test])* [',' [
|
| 31 |
+
'*' [tfpdef] (',' tfpdef ['=' test])* [',' ['**' tfpdef [',']]]
|
| 32 |
+
| '**' tfpdef [',']]]
|
| 33 |
+
| '*' [tfpdef] (',' tfpdef ['=' test])* [',' ['**' tfpdef [',']]]
|
| 34 |
+
| '**' tfpdef [','])
|
| 35 |
+
)
|
| 36 |
+
tfpdef: NAME [':' test]
|
| 37 |
+
varargslist: vfpdef ['=' test ](',' vfpdef ['=' test])* ',' '/' [',' [ (vfpdef ['=' test] (',' vfpdef ['=' test])* [',' [
|
| 38 |
+
'*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]]
|
| 39 |
+
| '**' vfpdef [',']]]
|
| 40 |
+
| '*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]]
|
| 41 |
+
| '**' vfpdef [',']) ]] | (vfpdef ['=' test] (',' vfpdef ['=' test])* [',' [
|
| 42 |
+
'*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]]
|
| 43 |
+
| '**' vfpdef [',']]]
|
| 44 |
+
| '*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]]
|
| 45 |
+
| '**' vfpdef [',']
|
| 46 |
+
)
|
| 47 |
+
vfpdef: NAME
|
| 48 |
+
|
| 49 |
+
stmt: simple_stmt | compound_stmt | NEWLINE
|
| 50 |
+
simple_stmt: small_stmt (';' small_stmt)* [';'] NEWLINE
|
| 51 |
+
small_stmt: (expr_stmt | del_stmt | pass_stmt | flow_stmt |
|
| 52 |
+
import_stmt | global_stmt | nonlocal_stmt | assert_stmt)
|
| 53 |
+
expr_stmt: testlist_star_expr (annassign | augassign (yield_expr|testlist) |
|
| 54 |
+
('=' (yield_expr|testlist_star_expr))*)
|
| 55 |
+
annassign: ':' test ['=' (yield_expr|testlist_star_expr)]
|
| 56 |
+
testlist_star_expr: (test|star_expr) (',' (test|star_expr))* [',']
|
| 57 |
+
augassign: ('+=' | '-=' | '*=' | '@=' | '/=' | '%=' | '&=' | '|=' | '^=' |
|
| 58 |
+
'<<=' | '>>=' | '**=' | '//=')
|
| 59 |
+
# For normal and annotated assignments, additional restrictions enforced by the interpreter
|
| 60 |
+
del_stmt: 'del' exprlist
|
| 61 |
+
pass_stmt: 'pass'
|
| 62 |
+
flow_stmt: break_stmt | continue_stmt | return_stmt | raise_stmt | yield_stmt
|
| 63 |
+
break_stmt: 'break'
|
| 64 |
+
continue_stmt: 'continue'
|
| 65 |
+
return_stmt: 'return' [testlist_star_expr]
|
| 66 |
+
yield_stmt: yield_expr
|
| 67 |
+
raise_stmt: 'raise' [test ['from' test]]
|
| 68 |
+
import_stmt: import_name | import_from
|
| 69 |
+
import_name: 'import' dotted_as_names
|
| 70 |
+
# note below: the ('.' | '...') is necessary because '...' is tokenized as ELLIPSIS
|
| 71 |
+
import_from: ('from' (('.' | '...')* dotted_name | ('.' | '...')+)
|
| 72 |
+
'import' ('*' | '(' import_as_names ')' | import_as_names))
|
| 73 |
+
import_as_name: NAME ['as' NAME]
|
| 74 |
+
dotted_as_name: dotted_name ['as' NAME]
|
| 75 |
+
import_as_names: import_as_name (',' import_as_name)* [',']
|
| 76 |
+
dotted_as_names: dotted_as_name (',' dotted_as_name)*
|
| 77 |
+
dotted_name: NAME ('.' NAME)*
|
| 78 |
+
global_stmt: 'global' NAME (',' NAME)*
|
| 79 |
+
nonlocal_stmt: 'nonlocal' NAME (',' NAME)*
|
| 80 |
+
assert_stmt: 'assert' test [',' test]
|
| 81 |
+
|
| 82 |
+
compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | with_stmt | funcdef | classdef | decorated | async_stmt
|
| 83 |
+
async_stmt: 'async' (funcdef | with_stmt | for_stmt)
|
| 84 |
+
if_stmt: 'if' namedexpr_test ':' suite ('elif' namedexpr_test ':' suite)* ['else' ':' suite]
|
| 85 |
+
while_stmt: 'while' namedexpr_test ':' suite ['else' ':' suite]
|
| 86 |
+
for_stmt: 'for' exprlist 'in' testlist ':' suite ['else' ':' suite]
|
| 87 |
+
try_stmt: ('try' ':' suite
|
| 88 |
+
((except_clause ':' suite)+
|
| 89 |
+
['else' ':' suite]
|
| 90 |
+
['finally' ':' suite] |
|
| 91 |
+
'finally' ':' suite))
|
| 92 |
+
with_stmt: 'with' with_item (',' with_item)* ':' suite
|
| 93 |
+
with_item: test ['as' expr]
|
| 94 |
+
# NB compile.c makes sure that the default except clause is last
|
| 95 |
+
except_clause: 'except' [test ['as' NAME]]
|
| 96 |
+
suite: simple_stmt | NEWLINE INDENT stmt+ DEDENT
|
| 97 |
+
|
| 98 |
+
namedexpr_test: test [':=' test]
|
| 99 |
+
test: or_test ['if' or_test 'else' test] | lambdef
|
| 100 |
+
lambdef: 'lambda' [varargslist] ':' test
|
| 101 |
+
or_test: and_test ('or' and_test)*
|
| 102 |
+
and_test: not_test ('and' not_test)*
|
| 103 |
+
not_test: 'not' not_test | comparison
|
| 104 |
+
comparison: expr (comp_op expr)*
|
| 105 |
+
# <> isn't actually a valid comparison operator in Python. It's here for the
|
| 106 |
+
# sake of a __future__ import described in PEP 401 (which really works :-)
|
| 107 |
+
comp_op: '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not'
|
| 108 |
+
star_expr: '*' expr
|
| 109 |
+
expr: xor_expr ('|' xor_expr)*
|
| 110 |
+
xor_expr: and_expr ('^' and_expr)*
|
| 111 |
+
and_expr: shift_expr ('&' shift_expr)*
|
| 112 |
+
shift_expr: arith_expr (('<<'|'>>') arith_expr)*
|
| 113 |
+
arith_expr: term (('+'|'-') term)*
|
| 114 |
+
term: factor (('*'|'@'|'/'|'%'|'//') factor)*
|
| 115 |
+
factor: ('+'|'-'|'~') factor | power
|
| 116 |
+
power: atom_expr ['**' factor]
|
| 117 |
+
atom_expr: ['await'] atom trailer*
|
| 118 |
+
atom: ('(' [yield_expr|testlist_comp] ')' |
|
| 119 |
+
'[' [testlist_comp] ']' |
|
| 120 |
+
'{' [dictorsetmaker] '}' |
|
| 121 |
+
NAME | NUMBER | strings | '...' | 'None' | 'True' | 'False')
|
| 122 |
+
testlist_comp: (namedexpr_test|star_expr) ( comp_for | (',' (namedexpr_test|star_expr))* [','] )
|
| 123 |
+
trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME
|
| 124 |
+
subscriptlist: subscript (',' subscript)* [',']
|
| 125 |
+
subscript: test [':=' test] | [test] ':' [test] [sliceop]
|
| 126 |
+
sliceop: ':' [test]
|
| 127 |
+
exprlist: (expr|star_expr) (',' (expr|star_expr))* [',']
|
| 128 |
+
testlist: test (',' test)* [',']
|
| 129 |
+
dictorsetmaker: ( ((test ':' test | '**' expr)
|
| 130 |
+
(comp_for | (',' (test ':' test | '**' expr))* [','])) |
|
| 131 |
+
((test [':=' test] | star_expr)
|
| 132 |
+
(comp_for | (',' (test [':=' test] | star_expr))* [','])) )
|
| 133 |
+
|
| 134 |
+
classdef: 'class' NAME ['(' [arglist] ')'] ':' suite
|
| 135 |
+
|
| 136 |
+
arglist: argument (',' argument)* [',']
|
| 137 |
+
|
| 138 |
+
# The reason that keywords are test nodes instead of NAME is that using NAME
|
| 139 |
+
# results in an ambiguity. ast.c makes sure it's a NAME.
|
| 140 |
+
# "test '=' test" is really "keyword '=' test", but we have no such token.
|
| 141 |
+
# These need to be in a single rule to avoid grammar that is ambiguous
|
| 142 |
+
# to our LL(1) parser. Even though 'test' includes '*expr' in star_expr,
|
| 143 |
+
# we explicitly match '*' here, too, to give it proper precedence.
|
| 144 |
+
# Illegal combinations and orderings are blocked in ast.c:
|
| 145 |
+
# multiple (test comp_for) arguments are blocked; keyword unpackings
|
| 146 |
+
# that precede iterable unpackings are blocked; etc.
|
| 147 |
+
argument: ( test [comp_for] |
|
| 148 |
+
test ':=' test |
|
| 149 |
+
test '=' test |
|
| 150 |
+
'**' test |
|
| 151 |
+
'*' test )
|
| 152 |
+
|
| 153 |
+
comp_iter: comp_for | comp_if
|
| 154 |
+
sync_comp_for: 'for' exprlist 'in' or_test [comp_iter]
|
| 155 |
+
comp_for: ['async'] sync_comp_for
|
| 156 |
+
comp_if: 'if' or_test [comp_iter]
|
| 157 |
+
|
| 158 |
+
# not used in grammar, but may appear in "node" passed from Parser to Compiler
|
| 159 |
+
encoding_decl: NAME
|
| 160 |
+
|
| 161 |
+
yield_expr: 'yield' [yield_arg]
|
| 162 |
+
yield_arg: 'from' test | testlist_star_expr
|
| 163 |
+
|
| 164 |
+
strings: (STRING | fstring)+
|
| 165 |
+
fstring: FSTRING_START fstring_content* FSTRING_END
|
| 166 |
+
fstring_content: FSTRING_STRING | fstring_expr
|
| 167 |
+
fstring_conversion: '!' NAME
|
| 168 |
+
fstring_expr: '{' (testlist_comp | yield_expr) ['='] [ fstring_conversion ] [ fstring_format_spec ] '}'
|
| 169 |
+
fstring_format_spec: ':' fstring_content*
|