ZTWHHH commited on
Commit
d8d9404
·
verified ·
1 Parent(s): a90f772

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. deepseek/lib/python3.10/site-packages/lmformatenforcer/__init__.py +23 -0
  2. deepseek/lib/python3.10/site-packages/lmformatenforcer/consts.py +20 -0
  3. deepseek/lib/python3.10/site-packages/lmformatenforcer/exceptions.py +3 -0
  4. deepseek/lib/python3.10/site-packages/lmformatenforcer/integrations/__pycache__/__init__.cpython-310.pyc +0 -0
  5. deepseek/lib/python3.10/site-packages/lmformatenforcer/integrations/__pycache__/exllamav2.cpython-310.pyc +0 -0
  6. deepseek/lib/python3.10/site-packages/lmformatenforcer/integrations/__pycache__/haystackv1.cpython-310.pyc +0 -0
  7. deepseek/lib/python3.10/site-packages/lmformatenforcer/integrations/__pycache__/haystackv2.cpython-310.pyc +0 -0
  8. deepseek/lib/python3.10/site-packages/lmformatenforcer/integrations/__pycache__/trtllm.cpython-310.pyc +0 -0
  9. deepseek/lib/python3.10/site-packages/lmformatenforcer/integrations/__pycache__/vllm.cpython-310.pyc +0 -0
  10. deepseek/lib/python3.10/site-packages/lmformatenforcer/integrations/haystackv2.py +74 -0
  11. deepseek/lib/python3.10/site-packages/lmformatenforcer/integrations/trtllm.py +84 -0
  12. deepseek/lib/python3.10/site-packages/lmformatenforcer/integrations/vllm.py +60 -0
  13. deepseek/lib/python3.10/site-packages/lmformatenforcer/jsonschemaparser.py +710 -0
  14. deepseek/lib/python3.10/site-packages/lmformatenforcer/tokenenforcer.py +166 -0
  15. deepseekvl2/lib/python3.10/site-packages/sympy/parsing/autolev/test-examples/__pycache__/ruletest1.cpython-310.pyc +0 -0
  16. deepseekvl2/lib/python3.10/site-packages/sympy/parsing/autolev/test-examples/__pycache__/ruletest10.cpython-310.pyc +0 -0
  17. deepseekvl2/lib/python3.10/site-packages/sympy/parsing/autolev/test-examples/__pycache__/ruletest11.cpython-310.pyc +0 -0
  18. deepseekvl2/lib/python3.10/site-packages/sympy/parsing/autolev/test-examples/__pycache__/ruletest12.cpython-310.pyc +0 -0
  19. deepseekvl2/lib/python3.10/site-packages/sympy/parsing/autolev/test-examples/__pycache__/ruletest2.cpython-310.pyc +0 -0
  20. deepseekvl2/lib/python3.10/site-packages/sympy/parsing/autolev/test-examples/__pycache__/ruletest3.cpython-310.pyc +0 -0
  21. deepseekvl2/lib/python3.10/site-packages/sympy/parsing/autolev/test-examples/__pycache__/ruletest4.cpython-310.pyc +0 -0
  22. deepseekvl2/lib/python3.10/site-packages/sympy/parsing/autolev/test-examples/__pycache__/ruletest5.cpython-310.pyc +0 -0
  23. deepseekvl2/lib/python3.10/site-packages/sympy/parsing/autolev/test-examples/__pycache__/ruletest6.cpython-310.pyc +0 -0
  24. deepseekvl2/lib/python3.10/site-packages/sympy/parsing/autolev/test-examples/__pycache__/ruletest7.cpython-310.pyc +0 -0
  25. deepseekvl2/lib/python3.10/site-packages/sympy/parsing/autolev/test-examples/__pycache__/ruletest8.cpython-310.pyc +0 -0
  26. deepseekvl2/lib/python3.10/site-packages/sympy/parsing/autolev/test-examples/__pycache__/ruletest9.cpython-310.pyc +0 -0
  27. deepseekvl2/lib/python3.10/site-packages/sympy/parsing/autolev/test-examples/pydy-example-repo/__pycache__/chaos_pendulum.cpython-310.pyc +0 -0
  28. deepseekvl2/lib/python3.10/site-packages/sympy/parsing/autolev/test-examples/pydy-example-repo/__pycache__/double_pendulum.cpython-310.pyc +0 -0
  29. deepseekvl2/lib/python3.10/site-packages/sympy/parsing/autolev/test-examples/pydy-example-repo/__pycache__/mass_spring_damper.cpython-310.pyc +0 -0
  30. deepseekvl2/lib/python3.10/site-packages/sympy/parsing/autolev/test-examples/pydy-example-repo/__pycache__/non_min_pendulum.cpython-310.pyc +0 -0
  31. deepseekvl2/lib/python3.10/site-packages/sympy/parsing/autolev/test-examples/pydy-example-repo/chaos_pendulum.al +33 -0
  32. deepseekvl2/lib/python3.10/site-packages/sympy/parsing/autolev/test-examples/pydy-example-repo/chaos_pendulum.py +55 -0
  33. deepseekvl2/lib/python3.10/site-packages/sympy/parsing/autolev/test-examples/pydy-example-repo/double_pendulum.al +25 -0
  34. deepseekvl2/lib/python3.10/site-packages/sympy/parsing/autolev/test-examples/pydy-example-repo/double_pendulum.py +39 -0
  35. deepseekvl2/lib/python3.10/site-packages/sympy/parsing/autolev/test-examples/pydy-example-repo/mass_spring_damper.al +19 -0
  36. deepseekvl2/lib/python3.10/site-packages/sympy/parsing/autolev/test-examples/pydy-example-repo/mass_spring_damper.py +31 -0
  37. deepseekvl2/lib/python3.10/site-packages/sympy/parsing/autolev/test-examples/pydy-example-repo/non_min_pendulum.al +20 -0
  38. deepseekvl2/lib/python3.10/site-packages/sympy/parsing/autolev/test-examples/pydy-example-repo/non_min_pendulum.py +36 -0
  39. deepseekvl2/lib/python3.10/site-packages/sympy/parsing/autolev/test-examples/ruletest1.al +8 -0
  40. deepseekvl2/lib/python3.10/site-packages/sympy/parsing/autolev/test-examples/ruletest1.py +15 -0
  41. deepseekvl2/lib/python3.10/site-packages/sympy/parsing/autolev/test-examples/ruletest10.py +64 -0
  42. deepseekvl2/lib/python3.10/site-packages/sympy/parsing/autolev/test-examples/ruletest11.al +6 -0
  43. deepseekvl2/lib/python3.10/site-packages/sympy/parsing/autolev/test-examples/ruletest2.py +22 -0
  44. deepseekvl2/lib/python3.10/site-packages/sympy/parsing/autolev/test-examples/ruletest3.py +37 -0
  45. deepseekvl2/lib/python3.10/site-packages/sympy/parsing/autolev/test-examples/ruletest5.al +32 -0
  46. deepseekvl2/lib/python3.10/site-packages/sympy/parsing/autolev/test-examples/ruletest6.al +41 -0
  47. deepseekvl2/lib/python3.10/site-packages/sympy/parsing/autolev/test-examples/ruletest6.py +36 -0
  48. deepseekvl2/lib/python3.10/site-packages/sympy/parsing/autolev/test-examples/ruletest9.py +55 -0
  49. deepseekvl2/lib/python3.10/site-packages/sympy/parsing/fortran/__init__.py +1 -0
  50. deepseekvl2/lib/python3.10/site-packages/sympy/parsing/fortran/__pycache__/__init__.cpython-310.pyc +0 -0
deepseek/lib/python3.10/site-packages/lmformatenforcer/__init__.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ __all__ = ['CharacterLevelParser',
2
+ 'CharacterLevelParserConfig',
3
+ 'StringParser',
4
+ 'RegexParser',
5
+ 'UnionParser',
6
+ 'SequenceParser',
7
+ 'JsonSchemaParser',
8
+ 'TokenEnforcer',
9
+ 'TokenEnforcerTokenizerData',
10
+ 'LMFormatEnforcerException',
11
+ 'FormatEnforcerAnalyzer',]
12
+
13
+ from .characterlevelparser import CharacterLevelParser, CharacterLevelParserConfig, StringParser, UnionParser, SequenceParser
14
+ from .regexparser import RegexParser
15
+ from .jsonschemaparser import JsonSchemaParser
16
+ from .tokenenforcer import TokenEnforcer, TokenEnforcerTokenizerData
17
+ from .exceptions import LMFormatEnforcerException
18
+ try:
19
+ from .analyzer import FormatEnforcerAnalyzer
20
+ except ImportError as e:
21
+ import logging
22
+ logging.warning(e)
23
+ FormatEnforcerAnalyzer = None
deepseek/lib/python3.10/site-packages/lmformatenforcer/consts.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ COMPLETE_ALPHABET = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!@#$%^&*()_+-=[]{};:,./<>? `'\""
2
+ DEFAULT_MAX_CONSECUTIVE_WHITESPACES = 12
3
+ DEFAULT_FORCE_JSON_FIELD_ORDER = False
4
+ DEFAULT_MAX_JSON_ARRAY_LENGTH = 20
5
+ WHITESPACE_CHARACTERS = " \t\n\r"
6
+ BACKSLASH = "\\"
7
+ BACKSLASH_ESCAPING_CHARACTERS = '"\\/bfnrt' # Characters allowed after an escaping backslash, except unicode
8
+ BACKSLACH_UNICODE_ESCAPE = "u"
9
+
10
+ CONFIG_ENV_VAR_MAX_CONSECUTIVE_WHITESPACES = 'LMFE_MAX_CONSECUTIVE_WHITESPACES'
11
+ """Environment variable for externally controlling how many consective whitespaces the
12
+ JsonSchemaParser will allow. Default: 12"""
13
+
14
+ CONFIG_ENV_VAR_STRICT_JSON_FIELD_ORDER = 'LMFE_STRICT_JSON_FIELD_ORDER'
15
+ """Environment variable for externally controlling whether the JsonSchemaParser will force
16
+ fields to appear in the order of the 'required' field in the schema. Default: false"""
17
+
18
+ CONFIG_ENV_VAR_MAX_JSON_ARRAY_LENGTH = 'LMFE_MAX_JSON_ARRAY_LENGTH'
19
+ """Environment variable for externally controlling what is the maximal JSON array length,
20
+ if not specified by the schema. Default: 20"""
deepseek/lib/python3.10/site-packages/lmformatenforcer/exceptions.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ class LMFormatEnforcerException(Exception):
2
+ """Base class for exceptions in this module."""
3
+ pass
deepseek/lib/python3.10/site-packages/lmformatenforcer/integrations/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (183 Bytes). View file
 
deepseek/lib/python3.10/site-packages/lmformatenforcer/integrations/__pycache__/exllamav2.cpython-310.pyc ADDED
Binary file (3.11 kB). View file
 
deepseek/lib/python3.10/site-packages/lmformatenforcer/integrations/__pycache__/haystackv1.cpython-310.pyc ADDED
Binary file (3.1 kB). View file
 
deepseek/lib/python3.10/site-packages/lmformatenforcer/integrations/__pycache__/haystackv2.cpython-310.pyc ADDED
Binary file (3.89 kB). View file
 
deepseek/lib/python3.10/site-packages/lmformatenforcer/integrations/__pycache__/trtllm.cpython-310.pyc ADDED
Binary file (3.47 kB). View file
 
deepseek/lib/python3.10/site-packages/lmformatenforcer/integrations/__pycache__/vllm.cpython-310.pyc ADDED
Binary file (2.61 kB). View file
 
deepseek/lib/python3.10/site-packages/lmformatenforcer/integrations/haystackv2.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ try:
2
+ from haystack import component
3
+ from canals import Component
4
+ except ImportError:
5
+ raise ImportError('haystack is not installed. Please install it with "pip install farm-haystack" or "pip install haystack-ai')
6
+
7
+ import enum
8
+ from typing import Any, Callable, Dict, List, Optional
9
+ from lmformatenforcer import CharacterLevelParser
10
+
11
+
12
+ class _ModelType(enum.Enum):
13
+ HUGGINGFACE = 'HuggingFaceLocalGenerator'
14
+ # VLLM = 'vLLMLocalInvocationLayer' TODO: Add this when vLLM has Haystack V2 support
15
+
16
+ @component
17
+ class LMFormatEnforcerLocalGenerator:
18
+ """A generator component for Haystack V2 API that activates the LMFormatEnforcer on the generated text.
19
+ It wraps a local generator, and should be added to the pipeline instead of it"""
20
+ def __init__(self, model_component: Component, character_level_parser: Optional[CharacterLevelParser] = None):
21
+ """Initialize the generator component
22
+ :param model_component: A local generator component to wrap
23
+ :param character_level_parser: A CharacterLevelParser that will be used to enforce the format of the generated"""
24
+ self.model_component = model_component
25
+ self.character_level_parser = character_level_parser
26
+ self._model_type = self._resolve_model_type()
27
+ self.token_enforcer_fn: Optional[Callable] = None
28
+
29
+ @component.output_types(replies=List[str])
30
+ def run(self, prompt: str, generation_kwargs: Optional[Dict[str, Any]] = None):
31
+ try:
32
+ self._inject_enforcer_into_model()
33
+ kwargs = {}
34
+ if generation_kwargs:
35
+ kwargs['generation_kwargs'] = generation_kwargs
36
+ return self.model_component.run(prompt, **kwargs)
37
+ finally:
38
+ self._release_model_injection()
39
+
40
+ def warm_up(self):
41
+ if hasattr(self.model_component, 'warm_up'):
42
+ self.model_component.warm_up()
43
+ self.token_enforcer_fn = self._prepare_token_enforcer_fn()
44
+
45
+ def _prepare_token_enforcer_fn(self) -> Optional[Callable]:
46
+ if not self.character_level_parser:
47
+ return None
48
+ if self._model_type == _ModelType.HUGGINGFACE:
49
+ tokenizer = self.model_component.pipeline.tokenizer
50
+ from lmformatenforcer.integrations.transformers import build_transformers_prefix_allowed_tokens_fn
51
+ return build_transformers_prefix_allowed_tokens_fn(tokenizer, self.character_level_parser)
52
+ raise NotImplementedError(f"Token enforcer not implemented for model type {self._model_type.name}")
53
+
54
+ def _resolve_model_type(self) -> _ModelType:
55
+ generator_component_name = self.model_component.__class__.__name__
56
+ try:
57
+ return _ModelType(generator_component_name)
58
+ except ValueError:
59
+ supported_strings = ",".join(str(t.name) for t in _ModelType)
60
+ raise ValueError(f"Unsupported local generator component layer: {generator_component_name}. "
61
+ f"Must be one of {supported_strings}")
62
+
63
+ def _inject_enforcer_into_model(self):
64
+ if not self.token_enforcer_fn:
65
+ return
66
+ if self._model_type == _ModelType.HUGGINGFACE:
67
+ self.model_component.generation_kwargs['prefix_allowed_tokens_fn'] = self.token_enforcer_fn
68
+
69
+
70
+ def _release_model_injection(self):
71
+ if not self.token_enforcer_fn:
72
+ return
73
+ if self._model_type == _ModelType.HUGGINGFACE:
74
+ del self.model_component.generation_kwargs['prefix_allowed_tokens_fn']
deepseek/lib/python3.10/site-packages/lmformatenforcer/integrations/trtllm.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ from typing import List, Optional, Tuple, Union
3
+ import torch
4
+ from transformers import PreTrainedTokenizerBase
5
+ from lmformatenforcer import CharacterLevelParser, FormatEnforcerAnalyzer
6
+ from lmformatenforcer.tokenenforcer import TokenEnforcer, TokenEnforcerTokenizerData
7
+
8
+
9
+ class TRTLLMLogitsProcessor:
10
+ def __init__(self, token_enforcer: TokenEnforcer, eos_token_id, analyze):
11
+ self.token_enforcer = token_enforcer
12
+ self.analyzer = FormatEnforcerAnalyzer(token_enforcer) if analyze else None
13
+ self.mask: Optional[torch.Tensor] = None
14
+ self.mask_val = -math.inf
15
+ self.eos_token_id = eos_token_id
16
+
17
+ def _trim(self, input):
18
+ return [x for x in input.tolist() if x not in \
19
+ (self.eos_token_id if isinstance(self.eos_token_id, list) else [self.eos_token_id])]
20
+
21
+ def __call__(self, step: int, batch_input_ids: List[List[int]], logits: torch.Tensor) -> torch.Tensor:
22
+ for idx in range(len(batch_input_ids)):
23
+ if self.analyzer:
24
+ self.analyzer.report_raw_logits(batch_input_ids[idx], logits[idx].tolist())
25
+
26
+ allowed_tokens = self.token_enforcer.get_allowed_tokens(self._trim(batch_input_ids[idx]))
27
+
28
+ if self.mask is not None:
29
+ self.mask.fill_(self.mask_val)
30
+ else:
31
+ # We create it here because full_like() also copies the device and dtype
32
+ self.mask = torch.full_like(logits[idx], self.mask_val)
33
+ self.mask[allowed_tokens] = 0
34
+ logits[idx] = logits[idx] + self.mask
35
+
36
+ return logits
37
+
38
+
39
+ def _build_regular_tokens_list(tokenizer) -> List[Tuple[int, str, bool]]:
40
+ # There are many classes that can be passed here, this logic should work on all of them.
41
+ if hasattr(tokenizer, 'get_tokenizer'):
42
+ tokenizer = tokenizer.get_tokenizer()
43
+ if hasattr(tokenizer, 'tokenizer'):
44
+ tokenizer = tokenizer.tokenizer
45
+ token_0 = [tokenizer.encode("0")[-1]]
46
+ regular_tokens = []
47
+ vocab_size = tokenizer.vocab_size
48
+ for token_idx in range(vocab_size):
49
+ if token_idx in tokenizer.all_special_ids:
50
+ continue
51
+ # We prepend token 0 and skip the first letter of the result to get a space if the token is a start word.
52
+ tensor_after_0 = torch.tensor(token_0 + [token_idx], dtype=torch.long)
53
+ decoded_after_0 = tokenizer.decode(tensor_after_0)[1:]
54
+ decoded_regular = tokenizer.decode(token_0)
55
+ is_word_start_token = len(decoded_after_0) > len(decoded_regular)
56
+ regular_tokens.append((token_idx, decoded_after_0, is_word_start_token))
57
+ return regular_tokens
58
+
59
+
60
+ def build_trtlmm_tokenizer_data(tokenizer: PreTrainedTokenizerBase) -> TokenEnforcerTokenizerData:
61
+ """Build the TokenEnforcerTokenizerData from a tokenizer in order to cache it between instances"""
62
+ regular_tokens = _build_regular_tokens_list(tokenizer)
63
+
64
+ def _decode(tokens: List[int]) -> str:
65
+ tensor = torch.tensor(tokens, dtype=torch.long)
66
+ return tokenizer.decode(tensor)
67
+
68
+ tokenizer_data = TokenEnforcerTokenizerData(regular_tokens, _decode, tokenizer.eos_token_id)
69
+ return tokenizer_data
70
+
71
+
72
+ def build_trtllm_logits_processor(tokenizer: Union[PreTrainedTokenizerBase, TokenEnforcerTokenizerData],
73
+ character_level_parser: CharacterLevelParser,
74
+ analyze: bool = False) -> TRTLLMLogitsProcessor:
75
+ """
76
+ Build logits processor for feeding it into generate function (use_py_session should be True)
77
+ """
78
+ if isinstance(tokenizer, TokenEnforcerTokenizerData):
79
+ tokenizer_data = tokenizer
80
+ else:
81
+ tokenizer_data = build_trtlmm_tokenizer_data(tokenizer)
82
+
83
+ token_enforcer = TokenEnforcer(tokenizer_data, character_level_parser)
84
+ return TRTLLMLogitsProcessor(token_enforcer, tokenizer.eos_token_id, analyze)
deepseek/lib/python3.10/site-packages/lmformatenforcer/integrations/vllm.py ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ try:
2
+ import torch
3
+ import vllm
4
+ from vllm.transformers_utils.tokenizer import MistralTokenizer
5
+ from transformers import PreTrainedTokenizerBase
6
+ except ImportError:
7
+ raise ImportError('vllm is not installed. Please install it with "pip install vllm"')
8
+ from lmformatenforcer import CharacterLevelParser, TokenEnforcer, FormatEnforcerAnalyzer, TokenEnforcerTokenizerData
9
+ from lmformatenforcer.integrations.transformers import build_token_enforcer_tokenizer_data
10
+ from typing import List, Optional, Union
11
+ import math
12
+
13
+
14
+ class VLLMLogitsProcessor:
15
+ def __init__(self, token_enforcer: TokenEnforcer, analyze):
16
+ self.token_enforcer = token_enforcer
17
+ self.analyzer = FormatEnforcerAnalyzer(token_enforcer) if analyze else None
18
+ self.mask: Optional[torch.Tensor] = None
19
+
20
+ def __call__(self, input_ids: List[int], scores: torch.Tensor) -> torch.Tensor:
21
+ token_sequence = input_ids
22
+ if self.analyzer:
23
+ self.analyzer.report_raw_logits(token_sequence, scores.tolist())
24
+ allowed_tokens = self.token_enforcer.get_allowed_tokens(token_sequence)
25
+ if self.mask is not None:
26
+ self.mask.fill_(-math.inf)
27
+ else:
28
+ # We create it here because full_like() also copies the device and dtype
29
+ self.mask = torch.full_like(scores, -math.inf)
30
+ self.mask[allowed_tokens] = 0
31
+ scores = scores + self.mask
32
+ return scores
33
+
34
+
35
+ def build_vllm_token_enforcer_tokenizer_data(tokenizer: Union[vllm.LLM, PreTrainedTokenizerBase]) -> TokenEnforcerTokenizerData:
36
+ # There are many classes that can be passed here, this logic should work on all of them.
37
+ vocab_size = None
38
+ if hasattr(tokenizer, 'llm_engine'):
39
+ vocab_size = tokenizer.llm_engine.get_model_config().get_vocab_size()
40
+ if hasattr(tokenizer, 'get_tokenizer'):
41
+ tokenizer = tokenizer.get_tokenizer()
42
+ if isinstance(tokenizer, MistralTokenizer):
43
+ return build_token_enforcer_tokenizer_data(tokenizer, vocab_size)
44
+ if hasattr(tokenizer, 'tokenizer'):
45
+ tokenizer = tokenizer.tokenizer
46
+ return build_token_enforcer_tokenizer_data(tokenizer, vocab_size)
47
+
48
+
49
+ def build_vllm_logits_processor(llm: Union[vllm.LLM, PreTrainedTokenizerBase, TokenEnforcerTokenizerData],
50
+ character_level_parser: CharacterLevelParser,
51
+ analyze: bool=False) -> VLLMLogitsProcessor:
52
+ """Build the logits processor function that llama.cpp will use to filter the tokens generated by the model. The result
53
+ can be passed in the logits_processor list that is sent to the call or generate() method of llama.cpp models."""
54
+ if not isinstance(llm, TokenEnforcerTokenizerData):
55
+ llm = build_vllm_token_enforcer_tokenizer_data(llm)
56
+ token_enforcer = TokenEnforcer(llm, character_level_parser)
57
+ return VLLMLogitsProcessor(token_enforcer, analyze)
58
+
59
+
60
+ __all__ = ['build_vllm_logits_processor', 'build_vllm_token_enforcer_tokenizer_data']
deepseek/lib/python3.10/site-packages/lmformatenforcer/jsonschemaparser.py ADDED
@@ -0,0 +1,710 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from copy import deepcopy
2
+ import enum
3
+ import sys
4
+ from typing import Dict, Hashable, List, Optional, Union, cast
5
+
6
+
7
+ from .external.jsonschemaobject import JsonSchemaObject, json_schema_data_formats
8
+ from .exceptions import LMFormatEnforcerException
9
+ from .characterlevelparser import CharacterLevelParser, CharacterLevelParserConfig, ForceStopParser, SequenceParser, StringParser, UnionParser
10
+ from .consts import BACKSLASH, BACKSLASH_ESCAPING_CHARACTERS, WHITESPACE_CHARACTERS
11
+ from .regexparser import RegexParser
12
+
13
+ # No need to include the 'integer' option in the anyOf, as it is a subset of 'number'
14
+ _ANY_JSON_SCHEMA_DICT = {'anyOf': [{'type': type} for type in json_schema_data_formats.keys() if type != 'integer']}
15
+
16
+ class JsonSchemaParser(CharacterLevelParser):
17
+ ANY_JSON_OBJECT_SCHEMA: JsonSchemaObject = JsonSchemaObject(**_ANY_JSON_SCHEMA_DICT)
18
+ class _Context:
19
+ model_class: JsonSchemaObject
20
+ # We store the active parser in the context, so that if a node adds to the stack, it knows
21
+ # to which parser's stack to add.
22
+ active_parser: "JsonSchemaParser"
23
+ alphabet_without_quotes: str
24
+ regex_parser_cache: Dict[str, RegexParser] = {}
25
+
26
+ object_stack: List[CharacterLevelParser]
27
+ context: _Context
28
+ num_consecutive_whitespaces: int
29
+ last_parsed_string: str # Slight hack to allow communicating the parsed key to the object parser
30
+ last_non_whitespace_character: str # Slight hack to allow list parser to know if there is an item on top
31
+
32
+ def __init__(self,
33
+ json_schema: Union[dict, _Context, None],
34
+ config: Optional[CharacterLevelParserConfig] = None,
35
+ existing_stack: Optional[List[CharacterLevelParser]] = None,
36
+ num_consecutive_whitespaces: int = 0):
37
+ """Create a CharacterLevelParser for parsing JSON.
38
+ :param json_schema: The json schema to parse. Can be a dict of a JSON schema, or None if any json output is allowed."""
39
+ super().__init__(config)
40
+ if isinstance(json_schema, JsonSchemaParser._Context):
41
+ self.context = json_schema
42
+ else:
43
+ self.context = JsonSchemaParser._Context()
44
+ json_schema = json_schema or _ANY_JSON_SCHEMA_DICT
45
+ self.context.model_class = JsonSchemaObject(**json_schema)
46
+ self.context.active_parser = self
47
+ self.context.alphabet_without_quotes = self.config.alphabet.replace('"', '')
48
+
49
+ self.num_consecutive_whitespaces = num_consecutive_whitespaces
50
+ if existing_stack is None:
51
+ self.object_stack = [get_parser(self, self.context.model_class)]
52
+ else:
53
+ self.object_stack = existing_stack
54
+ self.last_parsed_string = ""
55
+ self.last_non_whitespace_character = ""
56
+
57
+ def add_character(self, new_character: str) -> CharacterLevelParser:
58
+ self.context.active_parser = self
59
+ # Assumption: The top-most parser that can accept the character is the one that should accept it.
60
+ # This is different from the SequenceParser, in which we need to split (union) into all options.
61
+ receiving_idx = len(self.object_stack) - 1
62
+ last_parsed_string = self.last_parsed_string
63
+ while receiving_idx >= 0 and new_character not in self.object_stack[receiving_idx].get_allowed_characters():
64
+ finished_receiver = self.object_stack[receiving_idx]
65
+ if isinstance(finished_receiver, StringParsingState):
66
+ last_parsed_string = finished_receiver.parsed_string
67
+ receiving_idx -= 1
68
+
69
+ updated_stack = self.object_stack[:receiving_idx + 1]
70
+ updated_parser = JsonSchemaParser(self.context, self.config, updated_stack, self.num_consecutive_whitespaces)
71
+ updated_parser.context.active_parser = updated_parser
72
+ updated_parser.last_parsed_string = last_parsed_string
73
+ if receiving_idx >= 0:
74
+ updated_parser.object_stack[receiving_idx] = updated_parser.object_stack[receiving_idx].add_character(new_character)
75
+ if new_character in WHITESPACE_CHARACTERS:
76
+ updated_parser.num_consecutive_whitespaces += 1
77
+ updated_parser.last_non_whitespace_character = self.last_non_whitespace_character
78
+ else:
79
+ updated_parser.num_consecutive_whitespaces = 0
80
+ updated_parser.last_non_whitespace_character = new_character
81
+
82
+ if updated_parser.object_stack and isinstance(updated_parser.object_stack[-1], UnionParser) and \
83
+ any(isinstance(parser, (ObjectParsingState, ListParsingState)) for parser in updated_parser.object_stack[-1].parsers):
84
+ # If the top parser is a union parser with "advanced" (=parsers that modify the object stack) parsers inside,
85
+ # we need to split the top level parser into the different options,
86
+ # As each "fork" can live with a different object stack, and we need to make sure they have their own ones.
87
+ option_json_schema_parsers = []
88
+ for option_parser in updated_parser.object_stack[-1].parsers:
89
+ option_stack = updated_parser.object_stack[:-1] + [option_parser]
90
+ option_parser = JsonSchemaParser(self.context, self.config, option_stack, updated_parser.num_consecutive_whitespaces)
91
+ option_parser.context.active_parser = option_parser
92
+ option_parser.last_parsed_string = last_parsed_string
93
+ option_parser.last_non_whitespace_character = updated_parser.last_non_whitespace_character
94
+ option_json_schema_parsers.append(option_parser)
95
+ return UnionParser(option_json_schema_parsers)
96
+
97
+ # For some performance optimizations to work, we want to make sure we don't leave irrelevant
98
+ # objects at the top of the stack, which we know will be passed over next timestep
99
+ new_object_stack = updated_parser.object_stack
100
+ while new_object_stack and new_object_stack[-1].can_end() and new_object_stack[-1].get_allowed_characters() == '':
101
+ finished_receiver = new_object_stack[-1]
102
+ if isinstance(finished_receiver, StringParsingState):
103
+ updated_parser.last_parsed_string = finished_receiver.parsed_string
104
+ del new_object_stack[-1]
105
+ if new_object_stack:
106
+ new_top_parser = new_object_stack[-1]
107
+ if isinstance(new_top_parser, ListParsingState):
108
+ new_top_parser = new_top_parser._clone()
109
+ new_top_parser.num_items_seen += 1
110
+ new_object_stack[-1] = new_top_parser
111
+
112
+
113
+ return updated_parser
114
+
115
+ def get_allowed_characters(self) -> str:
116
+ self.context.active_parser = self
117
+
118
+ allowed_character_strs = []
119
+ for parser in reversed(self.object_stack):
120
+ # Similar to SequenceParser, if the top object can end, we need to know to accept the next character of parser below, etc.
121
+ allowed_character_strs.append(parser.get_allowed_characters())
122
+ if not parser.can_end():
123
+ break
124
+ if len(allowed_character_strs) > 0:
125
+ allowed_characters = "".join(allowed_character_strs)
126
+ else:
127
+ # In certain cases, beam search / sample crashes when there are less legal
128
+ # continuation tokens than there are beams. Therefore, we allow whitespace
129
+ # characters when the object stack is empty (= we are done parsing)
130
+ allowed_characters = WHITESPACE_CHARACTERS
131
+
132
+ if self.num_consecutive_whitespaces >= self.config.max_consecutive_whitespaces:
133
+ # print("Filtering whitespace characters")
134
+ allowed_characters = "".join(c for c in allowed_characters if c not in WHITESPACE_CHARACTERS)
135
+ return allowed_characters
136
+
137
+ def can_end(self) -> bool:
138
+ return all(parser.can_end() for parser in self.object_stack)
139
+
140
+ def shortcut_key(self) -> Optional[Hashable]:
141
+ if self.object_stack:
142
+ current_parser = self.object_stack[-1]
143
+ if isinstance(current_parser, StringParsingState):
144
+ if not current_parser.allowed_strings and current_parser.seen_opening_quote and not current_parser.seen_closing_quote and not current_parser.regex_parser:
145
+ # Performance optimization: When we are parsing a string that is not from a list of allowed strings, most tokens
146
+ # are legal. The exploration can be more costly than the LM itself for large tokenizers (because this is pure python),
147
+ # so we signal that we are in a "freetext" mode, and reuse the allowed token list throughout the run.
148
+ cur_len = len(current_parser.parsed_string)
149
+ min_len = current_parser.min_length or 0
150
+ max_len = current_parser.max_length or sys.maxsize
151
+ assert min_len <= max_len, "Invalid schema for str: min length is larger than max length"
152
+ if cur_len < max_len:
153
+ return ('json_freetext', cur_len, min_len, max_len)
154
+ return None
155
+
156
+
157
+ class BaseParsingState(CharacterLevelParser):
158
+ def __init__(self, root: JsonSchemaParser):
159
+ self.root = root
160
+
161
+
162
+ def _merge_object_schemas(base_schema: JsonSchemaObject, option_schema: JsonSchemaObject) -> JsonSchemaObject:
163
+ base_schema_properties = base_schema.properties or {}
164
+ for property_name, property_value in base_schema_properties.items():
165
+ # We assume that if a property exists in both base and option, the option version will be
166
+ # more specific, therefore we only take missing entries
167
+ if property_name not in option_schema.properties:
168
+ option_schema.properties[property_name] = property_value
169
+ for required_property in base_schema.required:
170
+ if required_property not in option_schema.required:
171
+ option_schema.required.append(required_property)
172
+ return option_schema
173
+
174
+
175
+ def get_parser(
176
+ parsing_state: JsonSchemaParser,
177
+ value_schema: JsonSchemaObject
178
+ ) -> CharacterLevelParser:
179
+ if value_schema is None:
180
+ raise Exception("JsonSchemaParser: Value schema is None")
181
+ if value_schema.anyOf:
182
+ parsers = [get_parser(parsing_state, schema) for schema in value_schema.anyOf]
183
+ return UnionParser(parsers)
184
+ if value_schema.allOf:
185
+ merged_schema = value_schema.allOf[0]
186
+ for schema in value_schema.allOf[1:]:
187
+ merged_schema = _merge_object_schemas(merged_schema, schema)
188
+ return get_parser(parsing_state, merged_schema)
189
+ if value_schema.extras and 'const' in value_schema.extras:
190
+ allowed_value = value_schema.extras['const']
191
+ is_string = type(allowed_value) == str
192
+ return StringParsingState(parsing_state,
193
+ [allowed_value],
194
+ require_opening_quote=is_string,
195
+ require_closing_quote=is_string)
196
+ if value_schema.type == "string":
197
+ return StringParsingState(
198
+ parsing_state,
199
+ value_schema.enum,
200
+ require_opening_quote=True,
201
+ min_length=value_schema.minLength,
202
+ max_length=value_schema.maxLength,
203
+ pattern=value_schema.pattern,
204
+ )
205
+ if value_schema.oneOf:
206
+ # We create a combined object schema for each option that includes the information from the parent
207
+ # And then create a UnionParser based on the combined options
208
+ merged_schemas = [_merge_object_schemas(value_schema, option_schema) for option_schema in value_schema.oneOf]
209
+ object_parsing_options = [ObjectParsingState(merged_schema, parsing_state) for merged_schema in merged_schemas]
210
+ return UnionParser(object_parsing_options)
211
+ elif value_schema.type == "object":
212
+ return ObjectParsingState(value_schema, parsing_state)
213
+ elif value_schema.type == None and value_schema.ref:
214
+ value_class_name = value_schema.ref.split('/')[-1]
215
+ extras = parsing_state.context.model_class.extras
216
+ # Pydantic V1 and V2 have different names for the definitions field
217
+ if 'definitions' in extras:
218
+ definitions = extras['definitions']
219
+ elif '$defs' in extras:
220
+ definitions = extras['$defs']
221
+ else:
222
+ raise ValueError("No definitions found in schema")
223
+ class_dict = definitions[value_class_name]
224
+ value_schema = JsonSchemaObject(**class_dict)
225
+ return get_parser(parsing_state, value_schema)
226
+ elif value_schema.enum:
227
+ is_numeric = all(isinstance(i, (int, float)) for i in value_schema.enum)
228
+ is_string = all(isinstance(i, (str)) for i in value_schema.enum)
229
+ if is_string:
230
+ return StringParsingState(
231
+ parsing_state,
232
+ value_schema.enum,
233
+ require_opening_quote=True,
234
+ )
235
+ elif is_numeric:
236
+ return StringParsingState(
237
+ parsing_state,
238
+ [str(i) for i in value_schema.enum],
239
+ require_opening_quote=False,
240
+ require_closing_quote=False,
241
+ )
242
+ else:
243
+ raise Exception("Unsupported enum type " + str(value_schema.enum))
244
+ elif value_schema.type == "integer":
245
+ return NumberParsingState(parsing_state, False)
246
+ elif value_schema.type == "boolean":
247
+ return StringParsingState(
248
+ parsing_state,
249
+ ["true", "false"],
250
+ require_opening_quote=False,
251
+ require_closing_quote=False,
252
+ )
253
+ elif value_schema.type == "null":
254
+ return StringParsingState(
255
+ parsing_state,
256
+ ["null"],
257
+ require_opening_quote=False,
258
+ require_closing_quote=False,
259
+ )
260
+ elif value_schema.type == "number":
261
+ return NumberParsingState(parsing_state, True)
262
+ elif value_schema.type == "array":
263
+ item_schema = value_schema.items or JsonSchemaParser.ANY_JSON_OBJECT_SCHEMA
264
+ return ListParsingState(parsing_state, item_schema, value_schema.minItems, value_schema.maxItems)
265
+ else:
266
+ raise Exception("Unsupported type " + str(value_schema.type))
267
+
268
+
269
+ class ObjectParsingStage(enum.Enum):
270
+ START_OBJECT = "StartObject"
271
+ PARSING_KEY_OR_END = "ParsingKey"
272
+ PARSING_KEY_VALUE_SEPARATOR = "ParsingKeyValueSeparator"
273
+ PARSING_VALUE = "ParsingValue"
274
+ PARSING_SEPARATOR_OR_END = "ParsingSeparatorOrEnd"
275
+ END_OBJECT = "EndObject"
276
+
277
+
278
+ class ObjectParsingState(BaseParsingState):
279
+ schema_object: JsonSchemaObject
280
+ current_stage: ObjectParsingStage
281
+ existing_keys: List[str]
282
+ current_key: Optional[str]
283
+ is_dictionary: bool
284
+
285
+ def __init__(self, schema_object: JsonSchemaObject, root: JsonSchemaParser):
286
+ super().__init__(root)
287
+ self.schema_object = schema_object
288
+ self.current_stage = ObjectParsingStage.START_OBJECT
289
+ self.root = root
290
+ self.existing_keys = []
291
+ self.current_key = None
292
+ # Javascript objects represent both classes and dictionaries, so we need to know which one we are parsing
293
+ self.is_dictionary = self.schema_object.properties is None
294
+
295
+ def clone(self) -> 'ObjectParsingState':
296
+ clone = ObjectParsingState(self.schema_object, self.root)
297
+ clone.current_stage = self.current_stage
298
+ clone.existing_keys = self.existing_keys[:]
299
+ clone.current_key = self.current_key
300
+ clone.is_dictionary = self.is_dictionary
301
+ return clone
302
+
303
+ def add_character(self, new_character: str) -> CharacterLevelParser:
304
+ if new_character.strip() == "":
305
+ # In object scope, whitespaces can be ignored
306
+ return self
307
+ self = self.clone() # Immutability requirement
308
+ if (
309
+ self.current_stage == ObjectParsingStage.START_OBJECT
310
+ and new_character == "{"
311
+ ):
312
+ self.current_stage = ObjectParsingStage.PARSING_KEY_OR_END
313
+ elif self.current_stage == ObjectParsingStage.PARSING_KEY_OR_END:
314
+ if new_character == "}":
315
+ self.current_stage = ObjectParsingStage.END_OBJECT
316
+ if new_character == '"':
317
+ possible_keys = None
318
+ if not self.is_dictionary:
319
+ required_keys = self.schema_object.required or []
320
+ next_required_key = next((key for key in required_keys if key not in self.existing_keys), None)
321
+ if self.root.config.force_json_field_order and next_required_key:
322
+ possible_keys = [next_required_key]
323
+ else:
324
+ possible_keys = list(self.schema_object.properties.keys())
325
+ possible_keys = list(
326
+ set(possible_keys).difference(self.existing_keys)
327
+ )
328
+ # We send require_opening_quote=True and then add_character('"') instead of require_opening_quote=False
329
+ # Because there is a difference between "don't need a quote" and "received it before creating the parser"
330
+ key_parser = StringParsingState(
331
+ self.root, possible_keys, require_opening_quote=True, require_closing_quote=True
332
+ )
333
+ key_parser = key_parser.add_character('"')
334
+ self.root.context.active_parser.object_stack.append(key_parser)
335
+ self.current_stage = ObjectParsingStage.PARSING_KEY_VALUE_SEPARATOR
336
+ elif self.current_stage == ObjectParsingStage.PARSING_KEY_VALUE_SEPARATOR:
337
+ if new_character == ":":
338
+ self.current_stage = ObjectParsingStage.PARSING_VALUE
339
+ self.current_key = self.root.context.active_parser.last_parsed_string
340
+ self.existing_keys.append(self.current_key)
341
+ if self.is_dictionary:
342
+ if self.schema_object.additionalProperties:
343
+ value_schema = self.schema_object.additionalProperties
344
+ else:
345
+ value_schema = JsonSchemaParser.ANY_JSON_OBJECT_SCHEMA
346
+ else:
347
+ value_schema = self.schema_object.properties[self.current_key]
348
+ self.current_key_parser = get_parser(
349
+ self.root, value_schema
350
+ )
351
+ self.root.context.active_parser.object_stack.append(self.current_key_parser)
352
+ self.current_key_parser = None
353
+ elif self.current_stage == ObjectParsingStage.PARSING_VALUE:
354
+ # If we recieve a character during parsing value, it means that its the finishing character
355
+ # of the value parser
356
+ if new_character == '"':
357
+ self.current_stage = ObjectParsingStage.PARSING_SEPARATOR_OR_END
358
+ elif new_character == ",":
359
+ self.current_stage = ObjectParsingStage.PARSING_KEY_OR_END
360
+ elif new_character == "}":
361
+ self.current_stage = ObjectParsingStage.END_OBJECT
362
+ elif self.current_stage == ObjectParsingStage.PARSING_SEPARATOR_OR_END:
363
+ if new_character == ",":
364
+ self.current_stage = ObjectParsingStage.PARSING_KEY_OR_END
365
+ elif new_character == "}":
366
+ self.current_stage = ObjectParsingStage.END_OBJECT
367
+ return self
368
+
369
+ def get_allowed_characters(self) -> str:
370
+ possible_keys = (
371
+ list(self.schema_object.properties.keys())
372
+ if not self.is_dictionary
373
+ else None
374
+ )
375
+ required_keys = self.schema_object.required or []
376
+ can_end = set(self.existing_keys).issuperset(required_keys)
377
+ can_parse_key = self.is_dictionary or set(possible_keys).difference(
378
+ self.existing_keys
379
+ )
380
+
381
+ possible_characters = [c for c in WHITESPACE_CHARACTERS]
382
+ if self.current_stage == ObjectParsingStage.START_OBJECT:
383
+ possible_characters.append('{')
384
+ elif self.current_stage == ObjectParsingStage.PARSING_KEY_OR_END:
385
+ if can_end:
386
+ possible_characters.append('}')
387
+ if can_parse_key:
388
+ possible_characters.append('"')
389
+ elif self.current_stage == ObjectParsingStage.PARSING_KEY_VALUE_SEPARATOR:
390
+ possible_characters.append(':')
391
+ elif self.current_stage == ObjectParsingStage.PARSING_VALUE:
392
+ # Sometimes the value parser considers finishing, so it needs to know which continuations are possible
393
+ if can_end:
394
+ possible_characters.append('}')
395
+ if can_parse_key:
396
+ possible_characters.append(',')
397
+ elif self.current_stage == ObjectParsingStage.PARSING_SEPARATOR_OR_END:
398
+ if can_end:
399
+ possible_characters.append('}')
400
+ if can_parse_key:
401
+ possible_characters.append(',')
402
+ return "".join(possible_characters)
403
+
404
+ def can_end(self) -> bool:
405
+ return self.current_stage == ObjectParsingStage.END_OBJECT
406
+
407
+
408
+ class StringParsingStage:
409
+ START_TOKEN = "StartToken"
410
+ PARSING_STRING = "ParsingString"
411
+ END_TOKEN = "EndToken"
412
+
413
+
414
+ class PrimitiveParsingState(BaseParsingState):
415
+ def __init__(self, root: JsonSchemaParser):
416
+ super().__init__(root)
417
+ self.stage = StringParsingStage.START_TOKEN
418
+ self.parsed_string = ""
419
+
420
+ def _clone(self) -> "PrimitiveParsingState":
421
+ raise NotImplementedError()
422
+
423
+ def add_character(self, new_character: str) -> "PrimitiveParsingState":
424
+ new = self._clone()
425
+ new.parsed_string += new_character
426
+ return new
427
+
428
+ def can_end(self) -> bool:
429
+ return True
430
+
431
+
432
+ class NumberParsingState(PrimitiveParsingState):
433
+ def __init__(
434
+ self,
435
+ root: JsonSchemaParser,
436
+ allow_floating_point: bool,
437
+ ):
438
+ super().__init__(root)
439
+ self.allow_floating_point = allow_floating_point
440
+ self.seen_decimal_point = False
441
+ self.seen_whitespace_after_digits = False
442
+ self.seen_exponent = False
443
+ self.seen_digit = False
444
+
445
+ def _clone(self) -> "NumberParsingState":
446
+ clone = NumberParsingState(self.root, self.allow_floating_point)
447
+ clone.parsed_string = self.parsed_string
448
+ clone.seen_decimal_point = self.seen_decimal_point
449
+ clone.seen_whitespace_after_digits = self.seen_whitespace_after_digits
450
+ clone.seen_exponent = self.seen_exponent
451
+ clone.seen_digit = self.seen_digit
452
+ return clone
453
+
454
+ def add_character(self, new_character: str) -> CharacterLevelParser:
455
+ if not self.parsed_string and new_character in WHITESPACE_CHARACTERS:
456
+ return self
457
+ self = cast(NumberParsingState, super().add_character(new_character))
458
+ if new_character in WHITESPACE_CHARACTERS:
459
+ if self.parsed_string:
460
+ self.seen_whitespace_after_digits = True
461
+ return self
462
+ if new_character == ".":
463
+ if not self.parsed_string or len(self.parsed_string) == 1:
464
+ raise LMFormatEnforcerException("Numbers cannot start with a decimal point.")
465
+ if self.seen_decimal_point:
466
+ raise LMFormatEnforcerException("Numbers cannot contain more than two decimal points.")
467
+ self.seen_decimal_point = True
468
+ elif new_character in "eE":
469
+ if self.seen_exponent or not self.seen_digit:
470
+ raise LMFormatEnforcerException("Invalid number format")
471
+ self.seen_exponent = True
472
+ elif new_character.isdigit():
473
+ self.seen_digit = True
474
+ return self
475
+
476
+ def get_allowed_characters(self) -> str:
477
+ if self.seen_whitespace_after_digits:
478
+ return WHITESPACE_CHARACTERS
479
+ allowed_characters = "0123456789"
480
+ if not self.parsed_string:
481
+ allowed_characters += "-" + WHITESPACE_CHARACTERS
482
+ if self.parsed_string and len(self.parsed_string) == 1 and self.parsed_string[0] == "0":
483
+ allowed_characters = WHITESPACE_CHARACTERS
484
+ if self.parsed_string and len(self.parsed_string) == 2 and self.parsed_string == "-0":
485
+ allowed_characters = "." + WHITESPACE_CHARACTERS
486
+ if self.parsed_string and self.parsed_string[-1] in "eE":
487
+ allowed_characters += "-+"
488
+ if self.seen_digit and not self.seen_exponent:
489
+ allowed_characters += "eE"
490
+ if self.allow_floating_point and not self.seen_decimal_point and self.seen_digit and not self.seen_exponent:
491
+ allowed_characters += "."
492
+ if self.parsed_string and self.parsed_string[-1].isdigit():
493
+ allowed_characters += WHITESPACE_CHARACTERS
494
+ return allowed_characters
495
+
496
+ def can_end(self) -> bool:
497
+ if self.seen_exponent and self.parsed_string[-1] in "eE+-":
498
+ return False
499
+ return bool(self.parsed_string) and (self.parsed_string[-1].isdigit() or self.seen_whitespace_after_digits)
500
+
501
+
502
+ class StringParsingState(PrimitiveParsingState):
503
+ allowed_strings: List[str]
504
+ parsed_string: str
505
+ seen_closing_quote: bool
506
+ seen_opening_quote: bool
507
+ min_length: Optional[int]
508
+ max_length: Optional[int]
509
+ pattern: Optional[str]
510
+ regex_parser: Optional[RegexParser]
511
+
512
+ def __init__(
513
+ self,
514
+ root: JsonSchemaParser,
515
+ allowed_strings: List[str],
516
+ require_opening_quote: bool,
517
+ require_closing_quote: bool = True,
518
+ min_length: Optional[int]=None,
519
+ max_length: Optional[int]=None,
520
+ pattern: Optional[str]=None,
521
+ regex_parser: Optional[RegexParser]=None,
522
+ ):
523
+ super().__init__(root)
524
+ self.allowed_strings = allowed_strings
525
+ self.seen_closing_quote = False
526
+ self.seen_opening_quote = not require_opening_quote
527
+ self.require_closing_quote = require_closing_quote
528
+ self.require_opening_quote = require_opening_quote
529
+ self.min_length = min_length
530
+ self.max_length = max_length
531
+ self.pattern = pattern
532
+ if self.pattern and (self.min_length or self.max_length):
533
+ raise LMFormatEnforcerException("String schema contains both a pattern and a min/max length, which is not currently supported")
534
+ self.regex_parser = regex_parser
535
+ if self.pattern and not regex_parser:
536
+ if self.pattern not in self.root.context.regex_parser_cache:
537
+ self.root.context.regex_parser_cache[self.pattern] = RegexParser(self.pattern, self.root.config)
538
+ self.regex_parser = self.root.context.regex_parser_cache[self.pattern]
539
+
540
+
541
+ def _clone(self) -> "StringParsingState":
542
+ clone = StringParsingState(
543
+ self.root,
544
+ self.allowed_strings,
545
+ self.require_opening_quote,
546
+ self.require_closing_quote,
547
+ self.min_length,
548
+ self.max_length,
549
+ self.pattern,
550
+ self.regex_parser
551
+ )
552
+ clone.parsed_string = self.parsed_string
553
+ clone.seen_closing_quote = self.seen_closing_quote
554
+ clone.seen_opening_quote = self.seen_opening_quote
555
+ return clone
556
+
557
+ def add_character(self, new_character: str):
558
+ if (not self.parsed_string or self.seen_closing_quote) and new_character in WHITESPACE_CHARACTERS:
559
+ return self
560
+ self = cast(StringParsingState, super().add_character(new_character))
561
+ if new_character == '"':
562
+ if not self.seen_opening_quote:
563
+ self.seen_opening_quote = True
564
+ self.parsed_string = ""
565
+ else:
566
+ self.seen_closing_quote = True
567
+ self.parsed_string = self.parsed_string[:-1]
568
+ if self.regex_parser and new_character != '"' and self.seen_opening_quote and not self.seen_closing_quote:
569
+ self.regex_parser = self.regex_parser.add_character(new_character)
570
+ if new_character == BACKSLASH:
571
+ # After a backslack we immediately have the escaping character, and if its 'u', we have 4 hex digits
572
+ escaping_character_parsers: List[CharacterLevelParser] = [StringParser(c) for c in BACKSLASH_ESCAPING_CHARACTERS]
573
+ hex_digit_parser: CharacterLevelParser = UnionParser([StringParser(c) for c in "0123456789abcdefABCDEF"])
574
+ unicode_components: List[CharacterLevelParser] = list([StringParser("u")] + [hex_digit_parser] * 4)
575
+ unicode_escape_parser: CharacterLevelParser = SequenceParser(unicode_components)
576
+ json_escaping_parser = UnionParser(escaping_character_parsers + [unicode_escape_parser])
577
+ self.root.context.active_parser.object_stack.append(json_escaping_parser)
578
+ return self
579
+
580
+ def get_allowed_characters(self) -> str:
581
+ if not self.seen_opening_quote:
582
+ return '"' + WHITESPACE_CHARACTERS
583
+ if self.seen_closing_quote:
584
+ return WHITESPACE_CHARACTERS
585
+ if self.regex_parser:
586
+ regex_chars = self.regex_parser.get_allowed_characters()
587
+ # We don't currently support regexes with quotes or escaping backslashes, so we remove them from the allowed characters
588
+ regex_chars = regex_chars.replace('"', '').replace(BACKSLASH, '')
589
+ if self.regex_parser.can_end():
590
+ regex_chars += '"'
591
+ return regex_chars
592
+ if self.allowed_strings:
593
+ allowed_continuations = [
594
+ s[len(self.parsed_string) :]
595
+ for s in self.allowed_strings
596
+ if s.startswith(self.parsed_string)
597
+ ]
598
+ allowed_next_characters = [allowed_continuation[0] for allowed_continuation in allowed_continuations if len(allowed_continuation) > 0]
599
+ allowed_next_characters = list(set(allowed_next_characters))
600
+ if self.parsed_string in self.allowed_strings and self.require_closing_quote:
601
+ allowed_next_characters.append('"')
602
+ if (not self.parsed_string) and (not self.seen_opening_quote or not self.require_opening_quote):
603
+ allowed_next_characters.extend(WHITESPACE_CHARACTERS)
604
+ return "".join(allowed_next_characters)
605
+ else:
606
+ if self.min_length is not None and len(self.parsed_string) < self.min_length:
607
+ return self.root.context.alphabet_without_quotes + BACKSLASH
608
+ if self.max_length is not None and len(self.parsed_string) >= self.max_length:
609
+ return '"'
610
+ return self.root.config.alphabet + BACKSLASH
611
+
612
+ def can_end(self) -> bool:
613
+ if self.require_closing_quote:
614
+ return self.seen_closing_quote
615
+ else:
616
+ if self.allowed_strings:
617
+ return self.parsed_string in self.allowed_strings
618
+ else:
619
+ return bool(self.parsed_string)
620
+
621
+
622
+ class ListParsingState(PrimitiveParsingState):
623
+ list_member_type: JsonSchemaObject
624
+ seen_list_opener: bool = False
625
+ seen_list_closer: bool = False
626
+ num_items_seen: int = 0
627
+
628
+ def __init__(
629
+ self,
630
+ root: JsonSchemaParser,
631
+ list_member_type: JsonSchemaObject,
632
+ min_items: Optional[int],
633
+ max_items: Optional[int],
634
+ ):
635
+ super().__init__(root)
636
+ self.list_member_type = list_member_type
637
+ self.min_items = min_items
638
+ self.max_items = max_items
639
+ default_max = root.config.max_json_array_length
640
+ if self.max_items is None and default_max > 0 and (min_items is None or min_items < default_max):
641
+ self.max_items = default_max
642
+
643
+ def _clone(self) -> PrimitiveParsingState:
644
+ new = ListParsingState(self.root, self.list_member_type, self.min_items, self.max_items)
645
+ new.parsed_string = self.parsed_string
646
+ new.num_items_seen = self.num_items_seen
647
+ new.seen_list_opener = self.seen_list_opener
648
+ new.seen_list_closer = self.seen_list_closer
649
+ return new
650
+
651
+ def add_character(self, new_character: str) -> "ListParsingState":
652
+ self = cast(ListParsingState, super().add_character(new_character))
653
+ if new_character == "[":
654
+ self.seen_list_opener = True
655
+ item_parser = get_parser(self.root, self.list_member_type)
656
+ requires_items = self.min_items is not None and self.min_items > 0
657
+ if requires_items:
658
+ parser_to_push = item_parser
659
+ else:
660
+ # If we don't require items, we can also end immediately, the Union + ForceStopParser combination achieves this
661
+ empty_list_parser = ForceStopParser(allow_whitespace=True)
662
+ if isinstance(item_parser, UnionParser):
663
+ item_parser.parsers.append(empty_list_parser)
664
+ parser_to_push = item_parser
665
+ else:
666
+ parser_to_push = UnionParser([item_parser, empty_list_parser])
667
+ self.root.context.active_parser.object_stack.append(parser_to_push)
668
+ elif new_character == "]":
669
+ self.seen_list_closer = True
670
+ elif new_character == ",":
671
+ if not self.seen_list_closer:
672
+ self.num_items_seen += 1
673
+
674
+ self.root.context.active_parser.object_stack.append(
675
+ get_parser(
676
+ self.root,
677
+ self.list_member_type,
678
+ )
679
+ )
680
+ return self
681
+
682
+ def get_allowed_characters(self) -> str:
683
+ if not self.seen_list_opener:
684
+ return "[" + WHITESPACE_CHARACTERS
685
+ elif not self.seen_list_closer:
686
+ return self.get_allowed_control_characters() + WHITESPACE_CHARACTERS
687
+ else:
688
+ return ""
689
+
690
+ def can_end(self) -> bool:
691
+ return self.seen_list_closer
692
+
693
+ def get_allowed_control_characters(self):
694
+ num_items = self.num_items_seen
695
+ top_parser = self.root.context.active_parser.object_stack[-1]
696
+ is_on_top = top_parser == self or isinstance(top_parser, UnionParser) and self in top_parser.parsers
697
+ if (not is_on_top) and self.root.context.active_parser.last_non_whitespace_character != "[":
698
+ # If there is an active parser above us, and the last character is not [,
699
+ # there is an active item parser on the stack that we did not count yet.
700
+ num_items += 1
701
+ control_characters = ""
702
+ has_enough_items = self.min_items is None or num_items >= self.min_items
703
+ can_add_another_item = self.max_items is None or num_items < self.max_items
704
+
705
+ if num_items > 0 and can_add_another_item:
706
+ control_characters += ","
707
+ if has_enough_items:
708
+ control_characters += "]"
709
+ return control_characters
710
+
deepseek/lib/python3.10/site-packages/lmformatenforcer/tokenenforcer.py ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass, field
2
+ import sys
3
+ from typing import Callable, Dict, Hashable, List, Optional, Tuple, Union
4
+ import logging
5
+
6
+ from .exceptions import LMFormatEnforcerException
7
+ from .characterlevelparser import CharacterLevelParser, ForceStopParser, CharacterLevelParserConfig
8
+ from .tokenizerprefixtree import TokenizerPrefixTree, TokenizerPrefixTreeNode
9
+
10
+
11
+ class TokenEnforcerTokenizerData:
12
+ """TokenEnforcerTokenizerData contains all of the preprocessing for preparing the TokenEnforcer to work with a
13
+ specific tokenizer. It does some calculations, so it is recommended to reuse it for multiple TokenEnforcers"""
14
+ def __init__(self,
15
+ regular_tokens: List[Tuple[int, str, bool]],
16
+ decoder: Callable[[List[int]], str],
17
+ eos_token_id: Union[int, List[int]]):
18
+ """
19
+ Create the tokenizer data that the TokenEnforcer needs. This can be reused for multiple TokenEnforcers if they work with the same tokenizer.
20
+ :param regular_tokens: A list of tuples (token_id, token_string, is_new_word_token) for all the regular (not special) tokens in the tokenizer vocabulary.
21
+ Note that token_string is expected to include leading / trailing whitespaces if relevant.
22
+ :param decoder: A function that decodes a list of token ids into a string.
23
+ :param eos_token_id: The token id(s) of the end-of-string token(s).
24
+ """
25
+ self.regular_tokens = regular_tokens
26
+ self.tokenizer_tree = TokenizerPrefixTree(regular_tokens)
27
+ self.decoder = decoder
28
+ self.eos_token_id = eos_token_id
29
+ self.tokenizer_alphabet = "".join(token_str for token_str in self.tokenizer_tree.root.children.keys() if len(token_str) == 1)
30
+
31
+
32
+ class TokenEnforcer:
33
+ """TokenEnforcer provides a token filtering mechanism, given a CharacterLevelParser and some information about the tokenizer.
34
+ It is the main entry point for extending lm-format-enforcer to new inference libraries. See __init__() and get_allowed_tokens()"""
35
+ @dataclass
36
+ class OutputTensorState:
37
+ parser: CharacterLevelParser
38
+ allowed_tokens: List[int] = field(default_factory=list)
39
+ current_word_tokens: List[int] = field(default_factory=list)
40
+
41
+ def __init__(self, tokenizer_data: TokenEnforcerTokenizerData, parser: CharacterLevelParser):
42
+ """
43
+ Create a new TokenEnforcer.
44
+ :param tokenizer_data: Per tokenizer data that the token enforcer needs in order to operate.
45
+ :param parser: A CharacterLevelParser that defines the allowed strings.
46
+ """
47
+ self.prefix_states: Dict[Tuple, TokenEnforcer.OutputTensorState] = {}
48
+ self.root_parser = parser
49
+ self.tokenizer_tree = tokenizer_data.tokenizer_tree
50
+ self.decoder = tokenizer_data.decoder
51
+ self.eos_token_id = tokenizer_data.eos_token_id
52
+ self.regular_tokens = tokenizer_data.regular_tokens
53
+ self.allowed_token_cache: Dict[Hashable, List[int]] = {}
54
+
55
+ config = CharacterLevelParserConfig(alphabet=tokenizer_data.tokenizer_alphabet)
56
+ parser.config = config
57
+
58
+ def get_allowed_tokens(self, token_sequence: List[int]) -> List[int]:
59
+ """
60
+ Get a list of allowed tokens, given a list of tokens that were already generated.
61
+ :param token_sequence: The tokens that were already generated, and the next token will be generated for.
62
+ :return: A list of token ids that are allowed to be selected next.
63
+ """
64
+ # In order to elegantly support beam search and batching, we don't store per-batch information.
65
+ # Instead, we store a hash of all the states (unique token tensors) we encountered so far.
66
+ # When we encounter a new unique token tensor, we find the token tensor that led to it, and continue from there.
67
+ sent_tuple = tuple(token_sequence)
68
+ prev_step_tuple = sent_tuple[:-1]
69
+
70
+ if sent_tuple in self.prefix_states:
71
+ # We already calculated for this node, return cached list
72
+ return self.prefix_states[sent_tuple].allowed_tokens
73
+ elif prev_step_tuple not in self.prefix_states:
74
+ # We have not encountered the tensor up to the before-last entry. This means that this is the first call - the instruction / prompt tensor.
75
+ # Initialize the root node
76
+ state = TokenEnforcer.OutputTensorState(parser=self.root_parser)
77
+ self.prefix_states[sent_tuple] = state
78
+ self._compute_allowed_tokens(sent_tuple, state)
79
+ return state.allowed_tokens
80
+ else:
81
+ # Find the state that led to this node. We explicitly don't use the concept of "timestep" because of beam search
82
+ prev_step_state = self.prefix_states[prev_step_tuple]
83
+ new_state = self._apply_new_characters(prev_step_state, token_sequence)
84
+ self.prefix_states[sent_tuple] = new_state
85
+ self._compute_allowed_tokens(sent_tuple, new_state)
86
+ return new_state.allowed_tokens
87
+
88
+ def _compute_allowed_tokens(self, state_tokens: Tuple, state: 'TokenEnforcer.OutputTensorState'):
89
+ try:
90
+ allowed_tokens: List[int] = []
91
+ cache_key = state.parser.cache_key()
92
+ if cache_key is not None and cache_key in self.allowed_token_cache:
93
+ state.allowed_tokens = self.allowed_token_cache[cache_key]
94
+ return
95
+ shortcut_key = state.parser.shortcut_key()
96
+ self._collect_allowed_tokens(state.parser, self.tokenizer_tree.root, allowed_tokens, shortcut_key)
97
+ if state.parser.can_end():
98
+ allowed_tokens.extend(self.eos_token_id if isinstance(self.eos_token_id, list) else [self.eos_token_id])
99
+ if not allowed_tokens:
100
+ raise ValueError(f"Parser reached state with no allowed tokens")
101
+ # root_state = next(state for state in self.prefix_states.values() if state.parser == self.root_parser)
102
+ # print(f"Allowing {len(allowed_tokens)} tokens after {state.str_so_far[len(root_state.str_so_far):]}")
103
+ state.allowed_tokens = allowed_tokens
104
+ if cache_key is not None:
105
+ self.allowed_token_cache[cache_key] = allowed_tokens
106
+ except LMFormatEnforcerException:
107
+ # Getting an LMFormatEnforcerException means that we know what the user did wrong,
108
+ # and we can give a nice error message for them to fix.
109
+ raise
110
+ except Exception:
111
+ # Other exceptions are potential bugs and should be reported
112
+ logging.basicConfig(level=logging.ERROR) # Initialize if no loggers
113
+ prefix = self.decoder(list(state_tokens))
114
+ logging.exception(f"Unknown LMFormatEnforcer Problem. Prefix: '{prefix}'\n"
115
+ "Terminating the parser. Please open an issue at \n"
116
+ "https://github.com/noamgat/lm-format-enforcer/issues with the prefix and "
117
+ "CharacterLevelParser parameters")
118
+ state.allowed_tokens = self.eos_token_id if isinstance(self.eos_token_id, list) else [self.eos_token_id]
119
+
120
+ def _collect_allowed_tokens(self, parser: CharacterLevelParser, tree_node: TokenizerPrefixTreeNode, allowed_tokens: List[int], shortcut_key: Optional[Hashable]):
121
+ allowed_tokens.extend(tree_node.tokens)
122
+ allowed_characters = parser.get_allowed_characters()
123
+ relevant_characters = tree_node.children.keys()
124
+ # This next line is the heart of the traversal algorithm. We only explore paths that are shared by both the parser and the tokenizer.
125
+ characters_to_explore = set(relevant_characters).intersection(allowed_characters)
126
+
127
+ # Performance optimization: If we are in JSON freetext, all of the tokens that don't contain quote, or end with quote, are legal, so we take
128
+ # their cached list. If the quote character is allowed, we only need to dynamically explore the cases where the string starts with a quote.
129
+ # This breaks the elegance of the API, but otherwise it is a huge performance hit.
130
+ if isinstance(shortcut_key, tuple) and shortcut_key[0] == 'json_freetext':
131
+ assert len(shortcut_key) == 4
132
+ _, cur_len, min_len, max_len = shortcut_key
133
+ cache = self.tokenizer_tree.json_freetext_tokens
134
+
135
+ min_remaining = min(cache.max_token_len, max(0, min_len - cur_len)) # no " allowed before this many chars
136
+ max_allowed_len = min(cache.max_token_len, max_len - cur_len) # max new characters allowed (before ")
137
+
138
+ allowed_tokens.extend(cache.lookup_allowed_tokens(min_remaining, max_allowed_len))
139
+ characters_to_explore = characters_to_explore.intersection(['"'])
140
+
141
+ for character in characters_to_explore:
142
+ next_parser = parser.add_character(character)
143
+ next_tree_node = tree_node.children[character]
144
+ self._collect_allowed_tokens(next_parser, next_tree_node, allowed_tokens, None)
145
+
146
+ def _apply_new_characters(self, state: 'TokenEnforcer.OutputTensorState', token_sequence: List[int]):
147
+ new_state = TokenEnforcer.OutputTensorState(parser=state.parser)
148
+ new_token = token_sequence[-1]
149
+ if new_token in self.tokenizer_tree.new_word_tokens:
150
+ new_state.current_word_tokens = [new_token]
151
+ new_characters = self.tokenizer_tree.tokens_to_strs[new_token]
152
+ else:
153
+ new_state.current_word_tokens = state.current_word_tokens + [new_token]
154
+ prev_decoded = self.decoder(state.current_word_tokens)
155
+ new_decoded = self.decoder(new_state.current_word_tokens)
156
+ new_characters = new_decoded[len(prev_decoded):]
157
+ for character in new_characters:
158
+ try:
159
+ new_state.parser = new_state.parser.add_character(character)
160
+ except Exception as e:
161
+ # This can happen in beam / batch scenarios, when some of the batches finished but others are continuing.
162
+ logging.debug(f"Received an invalid character '{character}', switching to ForceStopParser (Exception:{e})")
163
+ new_state.parser = ForceStopParser()
164
+ return new_state
165
+
166
+
deepseekvl2/lib/python3.10/site-packages/sympy/parsing/autolev/test-examples/__pycache__/ruletest1.cpython-310.pyc ADDED
Binary file (841 Bytes). View file
 
deepseekvl2/lib/python3.10/site-packages/sympy/parsing/autolev/test-examples/__pycache__/ruletest10.cpython-310.pyc ADDED
Binary file (2.91 kB). View file
 
deepseekvl2/lib/python3.10/site-packages/sympy/parsing/autolev/test-examples/__pycache__/ruletest11.cpython-310.pyc ADDED
Binary file (764 Bytes). View file
 
deepseekvl2/lib/python3.10/site-packages/sympy/parsing/autolev/test-examples/__pycache__/ruletest12.cpython-310.pyc ADDED
Binary file (798 Bytes). View file
 
deepseekvl2/lib/python3.10/site-packages/sympy/parsing/autolev/test-examples/__pycache__/ruletest2.cpython-310.pyc ADDED
Binary file (1.01 kB). View file
 
deepseekvl2/lib/python3.10/site-packages/sympy/parsing/autolev/test-examples/__pycache__/ruletest3.cpython-310.pyc ADDED
Binary file (1.48 kB). View file
 
deepseekvl2/lib/python3.10/site-packages/sympy/parsing/autolev/test-examples/__pycache__/ruletest4.cpython-310.pyc ADDED
Binary file (875 Bytes). View file
 
deepseekvl2/lib/python3.10/site-packages/sympy/parsing/autolev/test-examples/__pycache__/ruletest5.cpython-310.pyc ADDED
Binary file (2.59 kB). View file
 
deepseekvl2/lib/python3.10/site-packages/sympy/parsing/autolev/test-examples/__pycache__/ruletest6.cpython-310.pyc ADDED
Binary file (2.18 kB). View file
 
deepseekvl2/lib/python3.10/site-packages/sympy/parsing/autolev/test-examples/__pycache__/ruletest7.cpython-310.pyc ADDED
Binary file (2 kB). View file
 
deepseekvl2/lib/python3.10/site-packages/sympy/parsing/autolev/test-examples/__pycache__/ruletest8.cpython-310.pyc ADDED
Binary file (1.87 kB). View file
 
deepseekvl2/lib/python3.10/site-packages/sympy/parsing/autolev/test-examples/__pycache__/ruletest9.cpython-310.pyc ADDED
Binary file (1.96 kB). View file
 
deepseekvl2/lib/python3.10/site-packages/sympy/parsing/autolev/test-examples/pydy-example-repo/__pycache__/chaos_pendulum.cpython-310.pyc ADDED
Binary file (2.1 kB). View file
 
deepseekvl2/lib/python3.10/site-packages/sympy/parsing/autolev/test-examples/pydy-example-repo/__pycache__/double_pendulum.cpython-310.pyc ADDED
Binary file (1.57 kB). View file
 
deepseekvl2/lib/python3.10/site-packages/sympy/parsing/autolev/test-examples/pydy-example-repo/__pycache__/mass_spring_damper.cpython-310.pyc ADDED
Binary file (1.54 kB). View file
 
deepseekvl2/lib/python3.10/site-packages/sympy/parsing/autolev/test-examples/pydy-example-repo/__pycache__/non_min_pendulum.cpython-310.pyc ADDED
Binary file (1.59 kB). View file
 
deepseekvl2/lib/python3.10/site-packages/sympy/parsing/autolev/test-examples/pydy-example-repo/chaos_pendulum.al ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ CONSTANTS G,LB,W,H
2
+ MOTIONVARIABLES' THETA'',PHI'',OMEGA',ALPHA'
3
+ NEWTONIAN N
4
+ BODIES A,B
5
+ SIMPROT(N,A,2,THETA)
6
+ SIMPROT(A,B,3,PHI)
7
+ POINT O
8
+ LA = (LB-H/2)/2
9
+ P_O_AO> = LA*A3>
10
+ P_O_BO> = LB*A3>
11
+ OMEGA = THETA'
12
+ ALPHA = PHI'
13
+ W_A_N> = OMEGA*N2>
14
+ W_B_A> = ALPHA*A3>
15
+ V_O_N> = 0>
16
+ V2PTS(N, A, O, AO)
17
+ V2PTS(N, A, O, BO)
18
+ MASS A=MA, B=MB
19
+ IAXX = 1/12*MA*(2*LA)^2
20
+ IAYY = IAXX
21
+ IAZZ = 0
22
+ IBXX = 1/12*MB*H^2
23
+ IBYY = 1/12*MB*(W^2+H^2)
24
+ IBZZ = 1/12*MB*W^2
25
+ INERTIA A, IAXX, IAYY, IAZZ
26
+ INERTIA B, IBXX, IBYY, IBZZ
27
+ GRAVITY(G*N3>)
28
+ ZERO = FR() + FRSTAR()
29
+ KANE()
30
+ INPUT LB=0.2,H=0.1,W=0.2,MA=0.01,MB=0.1,G=9.81
31
+ INPUT THETA = 90 DEG, PHI = 0.5 DEG, OMEGA=0, ALPHA=0
32
+ INPUT TFINAL=10, INTEGSTP=0.02
33
+ CODE DYNAMICS() some_filename.c
deepseekvl2/lib/python3.10/site-packages/sympy/parsing/autolev/test-examples/pydy-example-repo/chaos_pendulum.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sympy.physics.mechanics as _me
2
+ import sympy as _sm
3
+ import math as m
4
+ import numpy as _np
5
+
6
+ g, lb, w, h = _sm.symbols('g lb w h', real=True)
7
+ theta, phi, omega, alpha = _me.dynamicsymbols('theta phi omega alpha')
8
+ theta_d, phi_d, omega_d, alpha_d = _me.dynamicsymbols('theta_ phi_ omega_ alpha_', 1)
9
+ theta_dd, phi_dd = _me.dynamicsymbols('theta_ phi_', 2)
10
+ frame_n = _me.ReferenceFrame('n')
11
+ body_a_cm = _me.Point('a_cm')
12
+ body_a_cm.set_vel(frame_n, 0)
13
+ body_a_f = _me.ReferenceFrame('a_f')
14
+ body_a = _me.RigidBody('a', body_a_cm, body_a_f, _sm.symbols('m'), (_me.outer(body_a_f.x,body_a_f.x),body_a_cm))
15
+ body_b_cm = _me.Point('b_cm')
16
+ body_b_cm.set_vel(frame_n, 0)
17
+ body_b_f = _me.ReferenceFrame('b_f')
18
+ body_b = _me.RigidBody('b', body_b_cm, body_b_f, _sm.symbols('m'), (_me.outer(body_b_f.x,body_b_f.x),body_b_cm))
19
+ body_a_f.orient(frame_n, 'Axis', [theta, frame_n.y])
20
+ body_b_f.orient(body_a_f, 'Axis', [phi, body_a_f.z])
21
+ point_o = _me.Point('o')
22
+ la = (lb-h/2)/2
23
+ body_a_cm.set_pos(point_o, la*body_a_f.z)
24
+ body_b_cm.set_pos(point_o, lb*body_a_f.z)
25
+ body_a_f.set_ang_vel(frame_n, omega*frame_n.y)
26
+ body_b_f.set_ang_vel(body_a_f, alpha*body_a_f.z)
27
+ point_o.set_vel(frame_n, 0)
28
+ body_a_cm.v2pt_theory(point_o,frame_n,body_a_f)
29
+ body_b_cm.v2pt_theory(point_o,frame_n,body_a_f)
30
+ ma = _sm.symbols('ma')
31
+ body_a.mass = ma
32
+ mb = _sm.symbols('mb')
33
+ body_b.mass = mb
34
+ iaxx = 1/12*ma*(2*la)**2
35
+ iayy = iaxx
36
+ iazz = 0
37
+ ibxx = 1/12*mb*h**2
38
+ ibyy = 1/12*mb*(w**2+h**2)
39
+ ibzz = 1/12*mb*w**2
40
+ body_a.inertia = (_me.inertia(body_a_f, iaxx, iayy, iazz, 0, 0, 0), body_a_cm)
41
+ body_b.inertia = (_me.inertia(body_b_f, ibxx, ibyy, ibzz, 0, 0, 0), body_b_cm)
42
+ force_a = body_a.mass*(g*frame_n.z)
43
+ force_b = body_b.mass*(g*frame_n.z)
44
+ kd_eqs = [theta_d - omega, phi_d - alpha]
45
+ forceList = [(body_a.masscenter,body_a.mass*(g*frame_n.z)), (body_b.masscenter,body_b.mass*(g*frame_n.z))]
46
+ kane = _me.KanesMethod(frame_n, q_ind=[theta,phi], u_ind=[omega, alpha], kd_eqs = kd_eqs)
47
+ fr, frstar = kane.kanes_equations([body_a, body_b], forceList)
48
+ zero = fr+frstar
49
+ from pydy.system import System
50
+ sys = System(kane, constants = {g:9.81, lb:0.2, w:0.2, h:0.1, ma:0.01, mb:0.1},
51
+ specifieds={},
52
+ initial_conditions={theta:_np.deg2rad(90), phi:_np.deg2rad(0.5), omega:0, alpha:0},
53
+ times = _np.linspace(0.0, 10, 10/0.02))
54
+
55
+ y=sys.integrate()
deepseekvl2/lib/python3.10/site-packages/sympy/parsing/autolev/test-examples/pydy-example-repo/double_pendulum.al ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MOTIONVARIABLES' Q{2}', U{2}'
2
+ CONSTANTS L,M,G
3
+ NEWTONIAN N
4
+ FRAMES A,B
5
+ SIMPROT(N, A, 3, Q1)
6
+ SIMPROT(N, B, 3, Q2)
7
+ W_A_N>=U1*N3>
8
+ W_B_N>=U2*N3>
9
+ POINT O
10
+ PARTICLES P,R
11
+ P_O_P> = L*A1>
12
+ P_P_R> = L*B1>
13
+ V_O_N> = 0>
14
+ V2PTS(N, A, O, P)
15
+ V2PTS(N, B, P, R)
16
+ MASS P=M, R=M
17
+ Q1' = U1
18
+ Q2' = U2
19
+ GRAVITY(G*N1>)
20
+ ZERO = FR() + FRSTAR()
21
+ KANE()
22
+ INPUT M=1,G=9.81,L=1
23
+ INPUT Q1=.1,Q2=.2,U1=0,U2=0
24
+ INPUT TFINAL=10, INTEGSTP=.01
25
+ CODE DYNAMICS() some_filename.c
deepseekvl2/lib/python3.10/site-packages/sympy/parsing/autolev/test-examples/pydy-example-repo/double_pendulum.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sympy.physics.mechanics as _me
2
+ import sympy as _sm
3
+ import math as m
4
+ import numpy as _np
5
+
6
+ q1, q2, u1, u2 = _me.dynamicsymbols('q1 q2 u1 u2')
7
+ q1_d, q2_d, u1_d, u2_d = _me.dynamicsymbols('q1_ q2_ u1_ u2_', 1)
8
+ l, m, g = _sm.symbols('l m g', real=True)
9
+ frame_n = _me.ReferenceFrame('n')
10
+ frame_a = _me.ReferenceFrame('a')
11
+ frame_b = _me.ReferenceFrame('b')
12
+ frame_a.orient(frame_n, 'Axis', [q1, frame_n.z])
13
+ frame_b.orient(frame_n, 'Axis', [q2, frame_n.z])
14
+ frame_a.set_ang_vel(frame_n, u1*frame_n.z)
15
+ frame_b.set_ang_vel(frame_n, u2*frame_n.z)
16
+ point_o = _me.Point('o')
17
+ particle_p = _me.Particle('p', _me.Point('p_pt'), _sm.Symbol('m'))
18
+ particle_r = _me.Particle('r', _me.Point('r_pt'), _sm.Symbol('m'))
19
+ particle_p.point.set_pos(point_o, l*frame_a.x)
20
+ particle_r.point.set_pos(particle_p.point, l*frame_b.x)
21
+ point_o.set_vel(frame_n, 0)
22
+ particle_p.point.v2pt_theory(point_o,frame_n,frame_a)
23
+ particle_r.point.v2pt_theory(particle_p.point,frame_n,frame_b)
24
+ particle_p.mass = m
25
+ particle_r.mass = m
26
+ force_p = particle_p.mass*(g*frame_n.x)
27
+ force_r = particle_r.mass*(g*frame_n.x)
28
+ kd_eqs = [q1_d - u1, q2_d - u2]
29
+ forceList = [(particle_p.point,particle_p.mass*(g*frame_n.x)), (particle_r.point,particle_r.mass*(g*frame_n.x))]
30
+ kane = _me.KanesMethod(frame_n, q_ind=[q1,q2], u_ind=[u1, u2], kd_eqs = kd_eqs)
31
+ fr, frstar = kane.kanes_equations([particle_p, particle_r], forceList)
32
+ zero = fr+frstar
33
+ from pydy.system import System
34
+ sys = System(kane, constants = {l:1, m:1, g:9.81},
35
+ specifieds={},
36
+ initial_conditions={q1:.1, q2:.2, u1:0, u2:0},
37
+ times = _np.linspace(0.0, 10, 10/.01))
38
+
39
+ y=sys.integrate()
deepseekvl2/lib/python3.10/site-packages/sympy/parsing/autolev/test-examples/pydy-example-repo/mass_spring_damper.al ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ CONSTANTS M,K,B,G
2
+ MOTIONVARIABLES' POSITION',SPEED'
3
+ VARIABLES O
4
+ FORCE = O*SIN(T)
5
+ NEWTONIAN CEILING
6
+ POINTS ORIGIN
7
+ V_ORIGIN_CEILING> = 0>
8
+ PARTICLES BLOCK
9
+ P_ORIGIN_BLOCK> = POSITION*CEILING1>
10
+ MASS BLOCK=M
11
+ V_BLOCK_CEILING>=SPEED*CEILING1>
12
+ POSITION' = SPEED
13
+ FORCE_MAGNITUDE = M*G-K*POSITION-B*SPEED+FORCE
14
+ FORCE_BLOCK>=EXPLICIT(FORCE_MAGNITUDE*CEILING1>)
15
+ ZERO = FR() + FRSTAR()
16
+ KANE()
17
+ INPUT TFINAL=10.0, INTEGSTP=0.01
18
+ INPUT M=1.0, K=1.0, B=0.2, G=9.8, POSITION=0.1, SPEED=-1.0, O=2
19
+ CODE DYNAMICS() dummy_file.c
deepseekvl2/lib/python3.10/site-packages/sympy/parsing/autolev/test-examples/pydy-example-repo/mass_spring_damper.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sympy.physics.mechanics as _me
2
+ import sympy as _sm
3
+ import math as m
4
+ import numpy as _np
5
+
6
+ m, k, b, g = _sm.symbols('m k b g', real=True)
7
+ position, speed = _me.dynamicsymbols('position speed')
8
+ position_d, speed_d = _me.dynamicsymbols('position_ speed_', 1)
9
+ o = _me.dynamicsymbols('o')
10
+ force = o*_sm.sin(_me.dynamicsymbols._t)
11
+ frame_ceiling = _me.ReferenceFrame('ceiling')
12
+ point_origin = _me.Point('origin')
13
+ point_origin.set_vel(frame_ceiling, 0)
14
+ particle_block = _me.Particle('block', _me.Point('block_pt'), _sm.Symbol('m'))
15
+ particle_block.point.set_pos(point_origin, position*frame_ceiling.x)
16
+ particle_block.mass = m
17
+ particle_block.point.set_vel(frame_ceiling, speed*frame_ceiling.x)
18
+ force_magnitude = m*g-k*position-b*speed+force
19
+ force_block = (force_magnitude*frame_ceiling.x).subs({position_d:speed})
20
+ kd_eqs = [position_d - speed]
21
+ forceList = [(particle_block.point,(force_magnitude*frame_ceiling.x).subs({position_d:speed}))]
22
+ kane = _me.KanesMethod(frame_ceiling, q_ind=[position], u_ind=[speed], kd_eqs = kd_eqs)
23
+ fr, frstar = kane.kanes_equations([particle_block], forceList)
24
+ zero = fr+frstar
25
+ from pydy.system import System
26
+ sys = System(kane, constants = {m:1.0, k:1.0, b:0.2, g:9.8},
27
+ specifieds={_me.dynamicsymbols('t'):lambda x, t: t, o:2},
28
+ initial_conditions={position:0.1, speed:-1*1.0},
29
+ times = _np.linspace(0.0, 10.0, 10.0/0.01))
30
+
31
+ y=sys.integrate()
deepseekvl2/lib/python3.10/site-packages/sympy/parsing/autolev/test-examples/pydy-example-repo/non_min_pendulum.al ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MOTIONVARIABLES' Q{2}''
2
+ CONSTANTS L,M,G
3
+ NEWTONIAN N
4
+ POINT PN
5
+ V_PN_N> = 0>
6
+ THETA1 = ATAN(Q2/Q1)
7
+ FRAMES A
8
+ SIMPROT(N, A, 3, THETA1)
9
+ PARTICLES P
10
+ P_PN_P> = Q1*N1>+Q2*N2>
11
+ MASS P=M
12
+ V_P_N>=DT(P_P_PN>, N)
13
+ F_V = DOT(EXPRESS(V_P_N>,A), A1>)
14
+ GRAVITY(G*N1>)
15
+ DEPENDENT[1] = F_V
16
+ CONSTRAIN(DEPENDENT[Q1'])
17
+ ZERO=FR()+FRSTAR()
18
+ F_C = MAG(P_P_PN>)-L
19
+ CONFIG[1]=F_C
20
+ ZERO[2]=CONFIG[1]
deepseekvl2/lib/python3.10/site-packages/sympy/parsing/autolev/test-examples/pydy-example-repo/non_min_pendulum.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sympy.physics.mechanics as _me
2
+ import sympy as _sm
3
+ import math as m
4
+ import numpy as _np
5
+
6
+ q1, q2 = _me.dynamicsymbols('q1 q2')
7
+ q1_d, q2_d = _me.dynamicsymbols('q1_ q2_', 1)
8
+ q1_dd, q2_dd = _me.dynamicsymbols('q1_ q2_', 2)
9
+ l, m, g = _sm.symbols('l m g', real=True)
10
+ frame_n = _me.ReferenceFrame('n')
11
+ point_pn = _me.Point('pn')
12
+ point_pn.set_vel(frame_n, 0)
13
+ theta1 = _sm.atan(q2/q1)
14
+ frame_a = _me.ReferenceFrame('a')
15
+ frame_a.orient(frame_n, 'Axis', [theta1, frame_n.z])
16
+ particle_p = _me.Particle('p', _me.Point('p_pt'), _sm.Symbol('m'))
17
+ particle_p.point.set_pos(point_pn, q1*frame_n.x+q2*frame_n.y)
18
+ particle_p.mass = m
19
+ particle_p.point.set_vel(frame_n, (point_pn.pos_from(particle_p.point)).dt(frame_n))
20
+ f_v = _me.dot((particle_p.point.vel(frame_n)).express(frame_a), frame_a.x)
21
+ force_p = particle_p.mass*(g*frame_n.x)
22
+ dependent = _sm.Matrix([[0]])
23
+ dependent[0] = f_v
24
+ velocity_constraints = [i for i in dependent]
25
+ u_q1_d = _me.dynamicsymbols('u_q1_d')
26
+ u_q2_d = _me.dynamicsymbols('u_q2_d')
27
+ kd_eqs = [q1_d-u_q1_d, q2_d-u_q2_d]
28
+ forceList = [(particle_p.point,particle_p.mass*(g*frame_n.x))]
29
+ kane = _me.KanesMethod(frame_n, q_ind=[q1,q2], u_ind=[u_q2_d], u_dependent=[u_q1_d], kd_eqs = kd_eqs, velocity_constraints = velocity_constraints)
30
+ fr, frstar = kane.kanes_equations([particle_p], forceList)
31
+ zero = fr+frstar
32
+ f_c = point_pn.pos_from(particle_p.point).magnitude()-l
33
+ config = _sm.Matrix([[0]])
34
+ config[0] = f_c
35
+ zero = zero.row_insert(zero.shape[0], _sm.Matrix([[0]]))
36
+ zero[zero.shape[0]-1] = config[0]
deepseekvl2/lib/python3.10/site-packages/sympy/parsing/autolev/test-examples/ruletest1.al ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ % ruletest1.al
2
+ CONSTANTS F = 3, G = 9.81
3
+ CONSTANTS A, B
4
+ CONSTANTS S, S1, S2+, S3+, S4-
5
+ CONSTANTS K{4}, L{1:3}, P{1:2,1:3}
6
+ CONSTANTS C{2,3}
7
+ E1 = A*F + S2 - G
8
+ E2 = F^2 + K3*K2*G
deepseekvl2/lib/python3.10/site-packages/sympy/parsing/autolev/test-examples/ruletest1.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sympy.physics.mechanics as _me
2
+ import sympy as _sm
3
+ import math as m
4
+ import numpy as _np
5
+
6
+ f = _sm.S(3)
7
+ g = _sm.S(9.81)
8
+ a, b = _sm.symbols('a b', real=True)
9
+ s, s1 = _sm.symbols('s s1', real=True)
10
+ s2, s3 = _sm.symbols('s2 s3', real=True, nonnegative=True)
11
+ s4 = _sm.symbols('s4', real=True, nonpositive=True)
12
+ k1, k2, k3, k4, l1, l2, l3, p11, p12, p13, p21, p22, p23 = _sm.symbols('k1 k2 k3 k4 l1 l2 l3 p11 p12 p13 p21 p22 p23', real=True)
13
+ c11, c12, c13, c21, c22, c23 = _sm.symbols('c11 c12 c13 c21 c22 c23', real=True)
14
+ e1 = a*f+s2-g
15
+ e2 = f**2+k3*k2*g
deepseekvl2/lib/python3.10/site-packages/sympy/parsing/autolev/test-examples/ruletest10.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sympy.physics.mechanics as _me
2
+ import sympy as _sm
3
+ import math as m
4
+ import numpy as _np
5
+
6
+ x, y = _me.dynamicsymbols('x y')
7
+ a, b = _sm.symbols('a b', real=True)
8
+ e = a*(b*x+y)**2
9
+ m = _sm.Matrix([e,e]).reshape(2, 1)
10
+ e = e.expand()
11
+ m = _sm.Matrix([i.expand() for i in m]).reshape((m).shape[0], (m).shape[1])
12
+ e = _sm.factor(e, x)
13
+ m = _sm.Matrix([_sm.factor(i,x) for i in m]).reshape((m).shape[0], (m).shape[1])
14
+ eqn = _sm.Matrix([[0]])
15
+ eqn[0] = a*x+b*y
16
+ eqn = eqn.row_insert(eqn.shape[0], _sm.Matrix([[0]]))
17
+ eqn[eqn.shape[0]-1] = 2*a*x-3*b*y
18
+ print(_sm.solve(eqn,x,y))
19
+ rhs_y = _sm.solve(eqn,x,y)[y]
20
+ e = (x+y)**2+2*x**2
21
+ e.collect(x)
22
+ a, b, c = _sm.symbols('a b c', real=True)
23
+ m = _sm.Matrix([a,b,c,0]).reshape(2, 2)
24
+ m2 = _sm.Matrix([i.subs({a:1,b:2,c:3}) for i in m]).reshape((m).shape[0], (m).shape[1])
25
+ eigvalue = _sm.Matrix([i.evalf() for i in (m2).eigenvals().keys()])
26
+ eigvec = _sm.Matrix([i[2][0].evalf() for i in (m2).eigenvects()]).reshape(m2.shape[0], m2.shape[1])
27
+ frame_n = _me.ReferenceFrame('n')
28
+ frame_a = _me.ReferenceFrame('a')
29
+ frame_a.orient(frame_n, 'Axis', [x, frame_n.x])
30
+ frame_a.orient(frame_n, 'Axis', [_sm.pi/2, frame_n.x])
31
+ c1, c2, c3 = _sm.symbols('c1 c2 c3', real=True)
32
+ v = c1*frame_a.x+c2*frame_a.y+c3*frame_a.z
33
+ point_o = _me.Point('o')
34
+ point_p = _me.Point('p')
35
+ point_o.set_pos(point_p, c1*frame_a.x)
36
+ v = (v).express(frame_n)
37
+ point_o.set_pos(point_p, (point_o.pos_from(point_p)).express(frame_n))
38
+ frame_a.set_ang_vel(frame_n, c3*frame_a.z)
39
+ print(frame_n.ang_vel_in(frame_a))
40
+ point_p.v2pt_theory(point_o,frame_n,frame_a)
41
+ particle_p1 = _me.Particle('p1', _me.Point('p1_pt'), _sm.Symbol('m'))
42
+ particle_p2 = _me.Particle('p2', _me.Point('p2_pt'), _sm.Symbol('m'))
43
+ particle_p2.point.v2pt_theory(particle_p1.point,frame_n,frame_a)
44
+ point_p.a2pt_theory(particle_p1.point,frame_n,frame_a)
45
+ body_b1_cm = _me.Point('b1_cm')
46
+ body_b1_cm.set_vel(frame_n, 0)
47
+ body_b1_f = _me.ReferenceFrame('b1_f')
48
+ body_b1 = _me.RigidBody('b1', body_b1_cm, body_b1_f, _sm.symbols('m'), (_me.outer(body_b1_f.x,body_b1_f.x),body_b1_cm))
49
+ body_b2_cm = _me.Point('b2_cm')
50
+ body_b2_cm.set_vel(frame_n, 0)
51
+ body_b2_f = _me.ReferenceFrame('b2_f')
52
+ body_b2 = _me.RigidBody('b2', body_b2_cm, body_b2_f, _sm.symbols('m'), (_me.outer(body_b2_f.x,body_b2_f.x),body_b2_cm))
53
+ g = _sm.symbols('g', real=True)
54
+ force_p1 = particle_p1.mass*(g*frame_n.x)
55
+ force_p2 = particle_p2.mass*(g*frame_n.x)
56
+ force_b1 = body_b1.mass*(g*frame_n.x)
57
+ force_b2 = body_b2.mass*(g*frame_n.x)
58
+ z = _me.dynamicsymbols('z')
59
+ v = x*frame_a.x+y*frame_a.z
60
+ point_o.set_pos(point_p, x*frame_a.x+y*frame_a.y)
61
+ v = (v).subs({x:2*z, y:z})
62
+ point_o.set_pos(point_p, (point_o.pos_from(point_p)).subs({x:2*z, y:z}))
63
+ force_o = -1*(x*y*frame_a.x)
64
+ force_p1 = particle_p1.mass*(g*frame_n.x)+ x*y*frame_a.x
deepseekvl2/lib/python3.10/site-packages/sympy/parsing/autolev/test-examples/ruletest11.al ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ VARIABLES X, Y
2
+ CONSTANTS A{1:2, 1:2}, B{1:2}
3
+ EQN[1] = A11*x + A12*y - B1
4
+ EQN[2] = A21*x + A22*y - B2
5
+ INPUT A11=2, A12=5, A21=3, A22=4, B1=7, B2=6
6
+ CODE ALGEBRAIC(EQN, X, Y) some_filename.c
deepseekvl2/lib/python3.10/site-packages/sympy/parsing/autolev/test-examples/ruletest2.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sympy.physics.mechanics as _me
2
+ import sympy as _sm
3
+ import math as m
4
+ import numpy as _np
5
+
6
+ x1, x2 = _me.dynamicsymbols('x1 x2')
7
+ f1 = x1*x2+3*x1**2
8
+ f2 = x1*_me.dynamicsymbols._t+x2*_me.dynamicsymbols._t**2
9
+ x, y = _me.dynamicsymbols('x y')
10
+ x_d, y_d = _me.dynamicsymbols('x_ y_', 1)
11
+ y_dd = _me.dynamicsymbols('y_', 2)
12
+ q1, q2, q3, u1, u2 = _me.dynamicsymbols('q1 q2 q3 u1 u2')
13
+ p1, p2 = _me.dynamicsymbols('p1 p2')
14
+ p1_d, p2_d = _me.dynamicsymbols('p1_ p2_', 1)
15
+ w1, w2, w3, r1, r2 = _me.dynamicsymbols('w1 w2 w3 r1 r2')
16
+ w1_d, w2_d, w3_d, r1_d, r2_d = _me.dynamicsymbols('w1_ w2_ w3_ r1_ r2_', 1)
17
+ r1_dd, r2_dd = _me.dynamicsymbols('r1_ r2_', 2)
18
+ c11, c12, c21, c22 = _me.dynamicsymbols('c11 c12 c21 c22')
19
+ d11, d12, d13 = _me.dynamicsymbols('d11 d12 d13')
20
+ j1, j2 = _me.dynamicsymbols('j1 j2')
21
+ n = _sm.symbols('n')
22
+ n = _sm.I
deepseekvl2/lib/python3.10/site-packages/sympy/parsing/autolev/test-examples/ruletest3.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sympy.physics.mechanics as _me
2
+ import sympy as _sm
3
+ import math as m
4
+ import numpy as _np
5
+
6
+ frame_a = _me.ReferenceFrame('a')
7
+ frame_b = _me.ReferenceFrame('b')
8
+ frame_n = _me.ReferenceFrame('n')
9
+ x1, x2, x3 = _me.dynamicsymbols('x1 x2 x3')
10
+ l = _sm.symbols('l', real=True)
11
+ v1 = x1*frame_a.x+x2*frame_a.y+x3*frame_a.z
12
+ v2 = x1*frame_b.x+x2*frame_b.y+x3*frame_b.z
13
+ v3 = x1*frame_n.x+x2*frame_n.y+x3*frame_n.z
14
+ v = v1+v2+v3
15
+ point_c = _me.Point('c')
16
+ point_d = _me.Point('d')
17
+ point_po1 = _me.Point('po1')
18
+ point_po2 = _me.Point('po2')
19
+ point_po3 = _me.Point('po3')
20
+ particle_l = _me.Particle('l', _me.Point('l_pt'), _sm.Symbol('m'))
21
+ particle_p1 = _me.Particle('p1', _me.Point('p1_pt'), _sm.Symbol('m'))
22
+ particle_p2 = _me.Particle('p2', _me.Point('p2_pt'), _sm.Symbol('m'))
23
+ particle_p3 = _me.Particle('p3', _me.Point('p3_pt'), _sm.Symbol('m'))
24
+ body_s_cm = _me.Point('s_cm')
25
+ body_s_cm.set_vel(frame_n, 0)
26
+ body_s_f = _me.ReferenceFrame('s_f')
27
+ body_s = _me.RigidBody('s', body_s_cm, body_s_f, _sm.symbols('m'), (_me.outer(body_s_f.x,body_s_f.x),body_s_cm))
28
+ body_r1_cm = _me.Point('r1_cm')
29
+ body_r1_cm.set_vel(frame_n, 0)
30
+ body_r1_f = _me.ReferenceFrame('r1_f')
31
+ body_r1 = _me.RigidBody('r1', body_r1_cm, body_r1_f, _sm.symbols('m'), (_me.outer(body_r1_f.x,body_r1_f.x),body_r1_cm))
32
+ body_r2_cm = _me.Point('r2_cm')
33
+ body_r2_cm.set_vel(frame_n, 0)
34
+ body_r2_f = _me.ReferenceFrame('r2_f')
35
+ body_r2 = _me.RigidBody('r2', body_r2_cm, body_r2_f, _sm.symbols('m'), (_me.outer(body_r2_f.x,body_r2_f.x),body_r2_cm))
36
+ v4 = x1*body_s_f.x+x2*body_s_f.y+x3*body_s_f.z
37
+ body_s_cm.set_pos(point_c, l*frame_n.x)
deepseekvl2/lib/python3.10/site-packages/sympy/parsing/autolev/test-examples/ruletest5.al ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ % ruletest5.al
2
+ VARIABLES X', Y'
3
+
4
+ E1 = (X+Y)^2 + (X-Y)^3
5
+ E2 = (X-Y)^2
6
+ E3 = X^2 + Y^2 + 2*X*Y
7
+
8
+ M1 = [E1;E2]
9
+ M2 = [(X+Y)^2,(X-Y)^2]
10
+ M3 = M1 + [X;Y]
11
+
12
+ AM = EXPAND(M1)
13
+ CM = EXPAND([(X+Y)^2,(X-Y)^2])
14
+ EM = EXPAND(M1 + [X;Y])
15
+ F = EXPAND(E1)
16
+ G = EXPAND(E2)
17
+
18
+ A = FACTOR(E3, X)
19
+ BM = FACTOR(M1, X)
20
+ CM = FACTOR(M1 + [X;Y], X)
21
+
22
+ A = D(E3, X)
23
+ B = D(E3, Y)
24
+ CM = D(M2, X)
25
+ DM = D(M1 + [X;Y], X)
26
+ FRAMES A, B
27
+ A_B = [1,0,0;1,0,0;1,0,0]
28
+ V1> = X*A1> + Y*A2> + X*Y*A3>
29
+ E> = D(V1>, X, B)
30
+ FM = DT(M1)
31
+ GM = DT([(X+Y)^2,(X-Y)^2])
32
+ H> = DT(V1>, B)
deepseekvl2/lib/python3.10/site-packages/sympy/parsing/autolev/test-examples/ruletest6.al ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ % ruletest6.al
2
+ VARIABLES Q{2}
3
+ VARIABLES X,Y,Z
4
+ Q1 = X^2 + Y^2
5
+ Q2 = X-Y
6
+ E = Q1 + Q2
7
+ A = EXPLICIT(E)
8
+ E2 = COS(X)
9
+ E3 = COS(X*Y)
10
+ A = TAYLOR(E2, 0:2, X=0)
11
+ B = TAYLOR(E3, 0:2, X=0, Y=0)
12
+
13
+ E = EXPAND((X+Y)^2)
14
+ A = EVALUATE(E, X=1, Y=Z)
15
+ BM = EVALUATE([E;2*E], X=1, Y=Z)
16
+
17
+ E = Q1 + Q2
18
+ A = EVALUATE(E, X=2, Y=Z^2)
19
+
20
+ CONSTANTS J,K,L
21
+ P1 = POLYNOMIAL([J,K,L],X)
22
+ P2 = POLYNOMIAL(J*X+K,X,1)
23
+
24
+ ROOT1 = ROOTS(P1, X, 2)
25
+ ROOT2 = ROOTS([1;2;3])
26
+
27
+ M = [1,2,3,4;5,6,7,8;9,10,11,12;13,14,15,16]
28
+
29
+ AM = TRANSPOSE(M) + M
30
+ BM = EIG(M)
31
+ C1 = DIAGMAT(4, 1)
32
+ C2 = DIAGMAT(3, 4, 2)
33
+ DM = INV(M+C1)
34
+ E = DET(M+C1) + TRACE([1,0;0,1])
35
+ F = ELEMENT(M, 2, 3)
36
+
37
+ A = COLS(M)
38
+ BM = COLS(M, 1)
39
+ CM = COLS(M, 1, 2:4, 3)
40
+ DM = ROWS(M, 1)
41
+ EM = ROWS(M, 1, 2:4, 3)
deepseekvl2/lib/python3.10/site-packages/sympy/parsing/autolev/test-examples/ruletest6.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sympy.physics.mechanics as _me
2
+ import sympy as _sm
3
+ import math as m
4
+ import numpy as _np
5
+
6
+ q1, q2 = _me.dynamicsymbols('q1 q2')
7
+ x, y, z = _me.dynamicsymbols('x y z')
8
+ e = q1+q2
9
+ a = (e).subs({q1:x**2+y**2, q2:x-y})
10
+ e2 = _sm.cos(x)
11
+ e3 = _sm.cos(x*y)
12
+ a = (e2).series(x, 0, 2).removeO()
13
+ b = (e3).series(x, 0, 2).removeO().series(y, 0, 2).removeO()
14
+ e = ((x+y)**2).expand()
15
+ a = (e).subs({q1:x**2+y**2,q2:x-y}).subs({x:1,y:z})
16
+ bm = _sm.Matrix([i.subs({x:1,y:z}) for i in _sm.Matrix([e,2*e]).reshape(2, 1)]).reshape((_sm.Matrix([e,2*e]).reshape(2, 1)).shape[0], (_sm.Matrix([e,2*e]).reshape(2, 1)).shape[1])
17
+ e = q1+q2
18
+ a = (e).subs({q1:x**2+y**2,q2:x-y}).subs({x:2,y:z**2})
19
+ j, k, l = _sm.symbols('j k l', real=True)
20
+ p1 = _sm.Poly(_sm.Matrix([j,k,l]).reshape(1, 3), x)
21
+ p2 = _sm.Poly(j*x+k, x)
22
+ root1 = [i.evalf() for i in _sm.solve(p1, x)]
23
+ root2 = [i.evalf() for i in _sm.solve(_sm.Poly(_sm.Matrix([1,2,3]).reshape(3, 1), x),x)]
24
+ m = _sm.Matrix([1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16]).reshape(4, 4)
25
+ am = (m).T+m
26
+ bm = _sm.Matrix([i.evalf() for i in (m).eigenvals().keys()])
27
+ c1 = _sm.diag(1,1,1,1)
28
+ c2 = _sm.Matrix([2 if i==j else 0 for i in range(3) for j in range(4)]).reshape(3, 4)
29
+ dm = (m+c1)**(-1)
30
+ e = (m+c1).det()+(_sm.Matrix([1,0,0,1]).reshape(2, 2)).trace()
31
+ f = (m)[1,2]
32
+ a = (m).cols
33
+ bm = (m).col(0)
34
+ cm = _sm.Matrix([(m).T.row(0),(m).T.row(1),(m).T.row(2),(m).T.row(3),(m).T.row(2)])
35
+ dm = (m).row(0)
36
+ em = _sm.Matrix([(m).row(0),(m).row(1),(m).row(2),(m).row(3),(m).row(2)])
deepseekvl2/lib/python3.10/site-packages/sympy/parsing/autolev/test-examples/ruletest9.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sympy.physics.mechanics as _me
2
+ import sympy as _sm
3
+ import math as m
4
+ import numpy as _np
5
+
6
+ frame_n = _me.ReferenceFrame('n')
7
+ frame_a = _me.ReferenceFrame('a')
8
+ a = 0
9
+ d = _me.inertia(frame_a, 1, 1, 1)
10
+ point_po1 = _me.Point('po1')
11
+ point_po2 = _me.Point('po2')
12
+ particle_p1 = _me.Particle('p1', _me.Point('p1_pt'), _sm.Symbol('m'))
13
+ particle_p2 = _me.Particle('p2', _me.Point('p2_pt'), _sm.Symbol('m'))
14
+ c1, c2, c3 = _me.dynamicsymbols('c1 c2 c3')
15
+ c1_d, c2_d, c3_d = _me.dynamicsymbols('c1_ c2_ c3_', 1)
16
+ body_r_cm = _me.Point('r_cm')
17
+ body_r_cm.set_vel(frame_n, 0)
18
+ body_r_f = _me.ReferenceFrame('r_f')
19
+ body_r = _me.RigidBody('r', body_r_cm, body_r_f, _sm.symbols('m'), (_me.outer(body_r_f.x,body_r_f.x),body_r_cm))
20
+ point_po2.set_pos(particle_p1.point, c1*frame_a.x)
21
+ v = 2*point_po2.pos_from(particle_p1.point)+c2*frame_a.y
22
+ frame_a.set_ang_vel(frame_n, c3*frame_a.z)
23
+ v = 2*frame_a.ang_vel_in(frame_n)+c2*frame_a.y
24
+ body_r_f.set_ang_vel(frame_n, c3*frame_a.z)
25
+ v = 2*body_r_f.ang_vel_in(frame_n)+c2*frame_a.y
26
+ frame_a.set_ang_acc(frame_n, (frame_a.ang_vel_in(frame_n)).dt(frame_a))
27
+ v = 2*frame_a.ang_acc_in(frame_n)+c2*frame_a.y
28
+ particle_p1.point.set_vel(frame_a, c1*frame_a.x+c3*frame_a.y)
29
+ body_r_cm.set_acc(frame_n, c2*frame_a.y)
30
+ v_a = _me.cross(body_r_cm.acc(frame_n), particle_p1.point.vel(frame_a))
31
+ x_b_c = v_a
32
+ x_b_d = 2*x_b_c
33
+ a_b_c_d_e = x_b_d*2
34
+ a_b_c = 2*c1*c2*c3
35
+ a_b_c += 2*c1
36
+ a_b_c = 3*c1
37
+ q1, q2, u1, u2 = _me.dynamicsymbols('q1 q2 u1 u2')
38
+ q1_d, q2_d, u1_d, u2_d = _me.dynamicsymbols('q1_ q2_ u1_ u2_', 1)
39
+ x, y = _me.dynamicsymbols('x y')
40
+ x_d, y_d = _me.dynamicsymbols('x_ y_', 1)
41
+ x_dd, y_dd = _me.dynamicsymbols('x_ y_', 2)
42
+ yy = _me.dynamicsymbols('yy')
43
+ yy = x*x_d**2+1
44
+ m = _sm.Matrix([[0]])
45
+ m[0] = 2*x
46
+ m = m.row_insert(m.shape[0], _sm.Matrix([[0]]))
47
+ m[m.shape[0]-1] = 2*y
48
+ a = 2*m[0]
49
+ m = _sm.Matrix([1,2,3,4,5,6,7,8,9]).reshape(3, 3)
50
+ m[0,1] = 5
51
+ a = m[0, 1]*2
52
+ force_ro = q1*frame_n.x
53
+ torque_a = q2*frame_n.z
54
+ force_ro = q1*frame_n.x + q2*frame_n.y
55
+ f = force_ro*2
deepseekvl2/lib/python3.10/site-packages/sympy/parsing/fortran/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ """Used for translating Fortran source code into a SymPy expression. """
deepseekvl2/lib/python3.10/site-packages/sympy/parsing/fortran/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (256 Bytes). View file