koichi12 commited on
Commit
bb9058c
·
verified ·
1 Parent(s): b163abe

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llm_tutorial/merged_models/en-ja-base_0.33/tokenizer_config.json +83 -0
  2. llm_tutorial/merged_models/en-ja-base_dare-linear/README.md +46 -0
  3. llm_tutorial/merged_models/en-ja-base_dare-linear/mergekit_config.yml +16 -0
  4. llm_tutorial/merged_models/en-ja-base_dare/special_tokens_map.json +10 -0
  5. llm_tutorial/merged_models/en-ja_ja-en_base_linear+instruct/special_tokens_map.json +51 -0
  6. llm_tutorial/merged_models/en-ja_ja-en_instruct_linear_0.33/README.md +43 -0
  7. llm_tutorial/merged_models/en-ja_ja-en_instruct_linear_0.33/special_tokens_map.json +51 -0
  8. llm_tutorial/merged_models/en-ja_ja-en_instruct_linear_0.5/special_tokens_map.json +51 -0
  9. llm_tutorial/merged_models/en-ja_merged/mergekit_config.yml +9 -0
  10. llm_tutorial/merged_models/en-ja_merged/tokenizer_config.json +83 -0
  11. llm_tutorial/merged_models4/ja-en_en-ja_ties01-instruct/mergekit_config.yml +12 -0
  12. llm_tutorial/merged_models4/ja-en_en-ja_ties01/README.md +42 -0
  13. llm_tutorial/merged_models4/ja-en_en-ja_ties01/model.safetensors.index.json +1 -0
  14. llm_tutorial/merged_models4/ja-en_en-ja_ties01/tokenizer.json +0 -0
  15. llm_tutorial/merged_models4/ja-en_en-ja_ties02/config.json +30 -0
  16. llm_tutorial/merged_models4/ja-en_en-ja_ties02/tokenizer.json +0 -0
  17. llm_tutorial/merged_models4/ja-en_en-ja_ties03-instruct/README.md +42 -0
  18. llm_tutorial/merged_models4/ja-en_en-ja_ties03/README.md +42 -0
  19. llm_tutorial/merged_models4/ja-en_en-ja_ties03/config.json +30 -0
  20. llm_tutorial/merged_models4/ja-en_en-ja_ties03/mergekit_config.yml +12 -0
  21. llm_tutorial/merged_models4/ja-en_en-ja_ties03/model.safetensors.index.json +1 -0
  22. llm_tutorial/merged_models4/ja-en_en-ja_ties04/README.md +42 -0
  23. llm_tutorial/merged_models4/ja-en_en-ja_ties04/mergekit_config.yml +12 -0
  24. llm_tutorial/merged_models4/ja-en_en-ja_ties04/model.safetensors.index.json +1 -0
  25. llm_tutorial/merged_models4/ja-en_en-ja_ties04/special_tokens_map.json +10 -0
  26. tuning-competition-baseline/.venv/lib/python3.11/site-packages/sympy/categories/tests/__pycache__/test_baseclasses.cpython-311.pyc +0 -0
  27. tuning-competition-baseline/.venv/lib/python3.11/site-packages/sympy/categories/tests/__pycache__/test_drawing.cpython-311.pyc +0 -0
  28. tuning-competition-baseline/.venv/lib/python3.11/site-packages/sympy/categories/tests/test_baseclasses.py +209 -0
  29. tuning-competition-baseline/.venv/lib/python3.11/site-packages/sympy/codegen/__pycache__/pynodes.cpython-311.pyc +0 -0
  30. tuning-competition-baseline/.venv/lib/python3.11/site-packages/sympy/codegen/__pycache__/pyutils.cpython-311.pyc +0 -0
  31. tuning-competition-baseline/.venv/lib/python3.11/site-packages/sympy/codegen/abstract_nodes.py +18 -0
  32. tuning-competition-baseline/.venv/lib/python3.11/site-packages/sympy/codegen/ast.py +1906 -0
  33. tuning-competition-baseline/.venv/lib/python3.11/site-packages/sympy/codegen/cnodes.py +156 -0
  34. tuning-competition-baseline/.venv/lib/python3.11/site-packages/sympy/codegen/cutils.py +8 -0
  35. tuning-competition-baseline/.venv/lib/python3.11/site-packages/sympy/codegen/futils.py +40 -0
  36. tuning-competition-baseline/.venv/lib/python3.11/site-packages/sympy/codegen/matrix_nodes.py +71 -0
  37. tuning-competition-baseline/.venv/lib/python3.11/site-packages/sympy/codegen/pynodes.py +11 -0
  38. tuning-competition-baseline/.venv/lib/python3.11/site-packages/sympy/codegen/scipy_nodes.py +79 -0
  39. tuning-competition-baseline/.venv/lib/python3.11/site-packages/sympy/combinatorics/tests/__pycache__/__init__.cpython-311.pyc +0 -0
  40. tuning-competition-baseline/.venv/lib/python3.11/site-packages/sympy/concrete/__pycache__/delta.cpython-311.pyc +0 -0
  41. tuning-competition-baseline/.venv/lib/python3.11/site-packages/sympy/concrete/delta.py +330 -0
  42. tuning-competition-baseline/.venv/lib/python3.11/site-packages/sympy/concrete/expr_with_intlimits.py +354 -0
  43. tuning-competition-baseline/.venv/lib/python3.11/site-packages/sympy/concrete/expr_with_limits.py +603 -0
  44. tuning-competition-baseline/.venv/lib/python3.11/site-packages/sympy/concrete/products.py +610 -0
  45. tuning-competition-baseline/.venv/lib/python3.11/site-packages/sympy/concrete/summations.py +1646 -0
  46. tuning-competition-baseline/.venv/lib/python3.11/site-packages/sympy/matrices/kind.py +97 -0
  47. tuning-competition-baseline/.venv/lib/python3.11/site-packages/sympy/ntheory/tests/__pycache__/test_factor_.cpython-311.pyc +0 -0
  48. tuning-competition-baseline/.venv/lib/python3.11/site-packages/sympy/plotting/backends/__pycache__/__init__.cpython-311.pyc +0 -0
  49. tuning-competition-baseline/.venv/lib/python3.11/site-packages/sympy/plotting/backends/__pycache__/base_backend.cpython-311.pyc +0 -0
  50. tuning-competition-baseline/.venv/lib/python3.11/site-packages/sympy/plotting/backends/matplotlibbackend/__init__.py +5 -0
llm_tutorial/merged_models/en-ja-base_0.33/tokenizer_config.json ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "added_tokens_decoder": {
5
+ "0": {
6
+ "content": "<unk>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "1": {
14
+ "content": "<s>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "2": {
22
+ "content": "</s>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "3": {
30
+ "content": "<MASK|LLM-jp>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "4": {
38
+ "content": "<PAD|LLM-jp>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "5": {
46
+ "content": "<CLS|LLM-jp>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "6": {
54
+ "content": "<SEP|LLM-jp>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "7": {
62
+ "content": "<EOD|LLM-jp>",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": false,
66
+ "single_word": false,
67
+ "special": true
68
+ }
69
+ },
70
+ "bos_token": "<s>",
71
+ "clean_up_tokenization_spaces": false,
72
+ "cls_token": "<CLS|LLM-jp>",
73
+ "eod_token": "</s>",
74
+ "eos_token": "</s>",
75
+ "extra_ids": 0,
76
+ "mask_token": "<MASK|LLM-jp>",
77
+ "model_max_length": 1000000000000000019884624838656,
78
+ "pad_token": "<PAD|LLM-jp>",
79
+ "sep_token": "<SEP|LLM-jp>",
80
+ "sp_model_kwargs": {},
81
+ "tokenizer_class": "PreTrainedTokenizerFast",
82
+ "unk_token": "<unk>"
83
+ }
llm_tutorial/merged_models/en-ja-base_dare-linear/README.md ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: []
3
+ library_name: transformers
4
+ tags:
5
+ - mergekit
6
+ - merge
7
+
8
+ ---
9
+ # en-ja-base_dare-linear
10
+
11
+ This is a merge of pre-trained language models created using [mergekit](https://github.com/cg123/mergekit).
12
+
13
+ ## Merge Details
14
+ ### Merge Method
15
+
16
+ This model was merged using the linear [DARE](https://arxiv.org/abs/2311.03099) merge method using /work01/llm_tutorial/pretrained_lm/llm-jp/llm-jp-v3-3.7b as a base.
17
+
18
+ ### Models Merged
19
+
20
+ The following models were included in the merge:
21
+ * /home/koiwa/work/llm_tutorial/llm_recipes/models/hf-model/llm-jp-v3-3.7b_ja-en_actual_3M-pairs/iter_0000698
22
+ * /home/koiwa/work/llm_tutorial/llm_recipes/models/hf-model/llm-jp-v3-3.7b_ja-en_3M-pairs/iter_0000698
23
+
24
+ ### Configuration
25
+
26
+ The following YAML configuration was used to produce this model:
27
+
28
+ ```yaml
29
+ models:
30
+ - model: /work01/llm_tutorial/pretrained_lm/llm-jp/llm-jp-v3-3.7b
31
+ # No parameters necessary for base model
32
+ - model: /home/koiwa/work/llm_tutorial/llm_recipes/models/hf-model/llm-jp-v3-3.7b_ja-en_actual_3M-pairs/iter_0000698
33
+ parameters:
34
+ density: 0.53
35
+ weight: 0.3
36
+ - model: /home/koiwa/work/llm_tutorial/llm_recipes/models/hf-model/llm-jp-v3-3.7b_ja-en_3M-pairs/iter_0000698
37
+ parameters:
38
+ density: 0.53
39
+ weight: 0.3
40
+ merge_method: dare_linear
41
+ base_model: /work01/llm_tutorial/pretrained_lm/llm-jp/llm-jp-v3-3.7b
42
+ parameters:
43
+ int8_mask: true
44
+ dtype: bfloat16
45
+
46
+ ```
llm_tutorial/merged_models/en-ja-base_dare-linear/mergekit_config.yml ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ models:
2
+ - model: /work01/llm_tutorial/pretrained_lm/llm-jp/llm-jp-v3-3.7b
3
+ # No parameters necessary for base model
4
+ - model: /home/koiwa/work/llm_tutorial/llm_recipes/models/hf-model/llm-jp-v3-3.7b_ja-en_actual_3M-pairs/iter_0000698
5
+ parameters:
6
+ density: 0.53
7
+ weight: 0.3
8
+ - model: /home/koiwa/work/llm_tutorial/llm_recipes/models/hf-model/llm-jp-v3-3.7b_ja-en_3M-pairs/iter_0000698
9
+ parameters:
10
+ density: 0.53
11
+ weight: 0.3
12
+ merge_method: dare_linear
13
+ base_model: /work01/llm_tutorial/pretrained_lm/llm-jp/llm-jp-v3-3.7b
14
+ parameters:
15
+ int8_mask: true
16
+ dtype: bfloat16
llm_tutorial/merged_models/en-ja-base_dare/special_tokens_map.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "cls_token": "<CLS|LLM-jp>",
4
+ "eod_token": "</s>",
5
+ "eos_token": "</s>",
6
+ "mask_token": "<MASK|LLM-jp>",
7
+ "pad_token": "<PAD|LLM-jp>",
8
+ "sep_token": "<SEP|LLM-jp>",
9
+ "unk_token": "<unk>"
10
+ }
llm_tutorial/merged_models/en-ja_ja-en_base_linear+instruct/special_tokens_map.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "cls_token": {
10
+ "content": "<CLS|LLM-jp>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "eos_token": {
17
+ "content": "</s>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "mask_token": {
24
+ "content": "<MASK|LLM-jp>",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ },
30
+ "pad_token": {
31
+ "content": "<PAD|LLM-jp>",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": false,
35
+ "single_word": false
36
+ },
37
+ "sep_token": {
38
+ "content": "<SEP|LLM-jp>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false
43
+ },
44
+ "unk_token": {
45
+ "content": "<unk>",
46
+ "lstrip": false,
47
+ "normalized": false,
48
+ "rstrip": false,
49
+ "single_word": false
50
+ }
51
+ }
llm_tutorial/merged_models/en-ja_ja-en_instruct_linear_0.33/README.md ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: []
3
+ library_name: transformers
4
+ tags:
5
+ - mergekit
6
+ - merge
7
+
8
+ ---
9
+ # en-ja_ja-en_instruct_linear_0.33
10
+
11
+ This is a merge of pre-trained language models created using [mergekit](https://github.com/cg123/mergekit).
12
+
13
+ ## Merge Details
14
+ ### Merge Method
15
+
16
+ This model was merged using the [linear](https://arxiv.org/abs/2203.05482) merge method.
17
+
18
+ ### Models Merged
19
+
20
+ The following models were included in the merge:
21
+ * /home/koiwa/work/llm_tutorial/llm_recipes/models/hf-model/llm-jp-v3-3.7b_ja-en_3M-pairs/iter_0000698
22
+ * /home/koiwa/work/llm_tutorial/llm_recipes/models/hf-model/llm-jp-v3-3.7b_ja-en_actual_3M-pairs/iter_0000698
23
+ * /work01/llm_tutorial/pretrained_lm/llm-jp/llm-jp-v3-3.7b-instruct
24
+
25
+ ### Configuration
26
+
27
+ The following YAML configuration was used to produce this model:
28
+
29
+ ```yaml
30
+ models:
31
+ - model: /home/koiwa/work/llm_tutorial/llm_recipes/models/hf-model/llm-jp-v3-3.7b_ja-en_3M-pairs/iter_0000698
32
+ parameters:
33
+ weight: 0.33
34
+ - model: /home/koiwa/work/llm_tutorial/llm_recipes/models/hf-model/llm-jp-v3-3.7b_ja-en_actual_3M-pairs/iter_0000698
35
+ parameters:
36
+ weight: 0.33
37
+ - model: /work01/llm_tutorial/pretrained_lm/llm-jp/llm-jp-v3-3.7b-instruct
38
+ parameters:
39
+ weight: 0.33
40
+ merge_method: linear
41
+ dtype: float16
42
+
43
+ ```
llm_tutorial/merged_models/en-ja_ja-en_instruct_linear_0.33/special_tokens_map.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "cls_token": {
10
+ "content": "<CLS|LLM-jp>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "eos_token": {
17
+ "content": "</s>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "mask_token": {
24
+ "content": "<MASK|LLM-jp>",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ },
30
+ "pad_token": {
31
+ "content": "<PAD|LLM-jp>",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": false,
35
+ "single_word": false
36
+ },
37
+ "sep_token": {
38
+ "content": "<SEP|LLM-jp>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false
43
+ },
44
+ "unk_token": {
45
+ "content": "<unk>",
46
+ "lstrip": false,
47
+ "normalized": false,
48
+ "rstrip": false,
49
+ "single_word": false
50
+ }
51
+ }
llm_tutorial/merged_models/en-ja_ja-en_instruct_linear_0.5/special_tokens_map.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "cls_token": {
10
+ "content": "<CLS|LLM-jp>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "eos_token": {
17
+ "content": "</s>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "mask_token": {
24
+ "content": "<MASK|LLM-jp>",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ },
30
+ "pad_token": {
31
+ "content": "<PAD|LLM-jp>",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": false,
35
+ "single_word": false
36
+ },
37
+ "sep_token": {
38
+ "content": "<SEP|LLM-jp>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false
43
+ },
44
+ "unk_token": {
45
+ "content": "<unk>",
46
+ "lstrip": false,
47
+ "normalized": false,
48
+ "rstrip": false,
49
+ "single_word": false
50
+ }
51
+ }
llm_tutorial/merged_models/en-ja_merged/mergekit_config.yml ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ models:
2
+ - model: /home/koiwa/work/llm_tutorial/llm_recipes/models/hf-model/llm-jp-v3-3.7b_ja-en_3M-pairs/iter_0000698
3
+ parameters:
4
+ weight: 0.5
5
+ - model: /home/koiwa/work/llm_tutorial/llm_recipes/models/hf-model/llm-jp-v3-3.7b_ja-en_actual_3M-pairs/iter_0000698
6
+ parameters:
7
+ weight: 0.5
8
+ merge_method: linear
9
+ dtype: float16
llm_tutorial/merged_models/en-ja_merged/tokenizer_config.json ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "added_tokens_decoder": {
5
+ "0": {
6
+ "content": "<unk>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "1": {
14
+ "content": "<s>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "2": {
22
+ "content": "</s>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "3": {
30
+ "content": "<MASK|LLM-jp>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "4": {
38
+ "content": "<PAD|LLM-jp>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "5": {
46
+ "content": "<CLS|LLM-jp>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "6": {
54
+ "content": "<SEP|LLM-jp>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "7": {
62
+ "content": "<EOD|LLM-jp>",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": false,
66
+ "single_word": false,
67
+ "special": true
68
+ }
69
+ },
70
+ "bos_token": "<s>",
71
+ "clean_up_tokenization_spaces": false,
72
+ "cls_token": "<CLS|LLM-jp>",
73
+ "eod_token": "</s>",
74
+ "eos_token": "</s>",
75
+ "extra_ids": 0,
76
+ "mask_token": "<MASK|LLM-jp>",
77
+ "model_max_length": 1000000000000000019884624838656,
78
+ "pad_token": "<PAD|LLM-jp>",
79
+ "sep_token": "<SEP|LLM-jp>",
80
+ "sp_model_kwargs": {},
81
+ "tokenizer_class": "PreTrainedTokenizerFast",
82
+ "unk_token": "<unk>"
83
+ }
llm_tutorial/merged_models4/ja-en_en-ja_ties01-instruct/mergekit_config.yml ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ models:
2
+ - model: /home/koiwa/work/llm_tutorial/llm_recipes/models/hf-model-eval/llm-jp-v3-3.7b_ja-en-instruct_3M-pairs/iter_0000698
3
+ parameters:
4
+ weight: 0.5
5
+ density: 0.1
6
+ - model: /home/koiwa/work/llm_tutorial/llm_recipes/models/hf-model-eval/llm-jp-v3-3.7b_en-ja-instruct_3M-pairs/iter_0000698
7
+ parameters:
8
+ weight: 0.5
9
+ density: 0.1
10
+ base_model: /work01/llm_tutorial/pretrained_lm/llm-jp/llm-jp-v3-3.7b-instruct
11
+ merge_method: ties
12
+ dtype: float16
llm_tutorial/merged_models4/ja-en_en-ja_ties01/README.md ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: []
3
+ library_name: transformers
4
+ tags:
5
+ - mergekit
6
+ - merge
7
+
8
+ ---
9
+ # ja-en_en-ja_ties01
10
+
11
+ This is a merge of pre-trained language models created using [mergekit](https://github.com/cg123/mergekit).
12
+
13
+ ## Merge Details
14
+ ### Merge Method
15
+
16
+ This model was merged using the [TIES](https://arxiv.org/abs/2306.01708) merge method using /work01/llm_tutorial/pretrained_lm/llm-jp/llm-jp-v3-3.7b as a base.
17
+
18
+ ### Models Merged
19
+
20
+ The following models were included in the merge:
21
+ * /home/koiwa/work/llm_tutorial/llm_recipes/models/hf-model-eval/llm-jp-v3-3.7b_ja-en_3M-pairs_3.5e-5/iter_0000698
22
+ * /home/koiwa/work/llm_tutorial/llm_recipes/models/hf-model-eval/llm-jp-v3-3.7b_en-ja_3M-pairs_3.5e-5/iter_0000698
23
+
24
+ ### Configuration
25
+
26
+ The following YAML configuration was used to produce this model:
27
+
28
+ ```yaml
29
+ models:
30
+ - model: /home/koiwa/work/llm_tutorial/llm_recipes/models/hf-model-eval/llm-jp-v3-3.7b_ja-en_3M-pairs_3.5e-5/iter_0000698
31
+ parameters:
32
+ density: 0.1
33
+ weight: 0.5
34
+ - model: /home/koiwa/work/llm_tutorial/llm_recipes/models/hf-model-eval/llm-jp-v3-3.7b_en-ja_3M-pairs_3.5e-5/iter_0000698
35
+ parameters:
36
+ density: 0.1
37
+ weight: 0.5
38
+ base_model: /work01/llm_tutorial/pretrained_lm/llm-jp/llm-jp-v3-3.7b
39
+ merge_method: ties
40
+ dtype: float16
41
+
42
+ ```
llm_tutorial/merged_models4/ja-en_en-ja_ties01/model.safetensors.index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"metadata": {"mergekit_version": "0.0.5.1", "total_size": 7565826048}, "weight_map": {"lm_head.weight": "model-00001-of-00002.safetensors", "model.embed_tokens.weight": "model-00001-of-00002.safetensors", "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors", "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors", "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors", "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors", "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors", "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors", "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors", "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors", "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors", "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors", "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors", "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors", "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors", "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors", "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors", "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors", "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors", "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors", "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors", "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors", "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors", "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors", "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors", "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors", "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors", "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors", "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors", "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors", "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", "model.layers.21.input_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.21.mlp.down_proj.weight": "model-00001-of-00002.safetensors", "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors", "model.layers.21.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", "model.layers.22.input_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.22.mlp.down_proj.weight": "model-00001-of-00002.safetensors", "model.layers.22.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", "model.layers.22.mlp.up_proj.weight": "model-00001-of-00002.safetensors", "model.layers.22.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.22.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", "model.layers.22.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", "model.layers.22.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", "model.layers.22.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", "model.layers.23.input_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.23.mlp.down_proj.weight": "model-00001-of-00002.safetensors", "model.layers.23.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", "model.layers.23.mlp.up_proj.weight": "model-00001-of-00002.safetensors", "model.layers.23.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors", "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors", "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors", "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors", "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors", "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors", "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors", "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors", "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors", "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors", "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors", "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors", "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors", "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors", "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors", "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors", "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors", "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors", "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors", "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors", "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors", "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors", "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors", "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors", "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors", "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors", "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors", "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors", "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors", "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors", "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors", "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors", "model.layers.3.input_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.3.mlp.down_proj.weight": "model-00002-of-00002.safetensors", "model.layers.3.mlp.gate_proj.weight": "model-00002-of-00002.safetensors", "model.layers.3.mlp.up_proj.weight": "model-00002-of-00002.safetensors", "model.layers.3.post_attention_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.3.self_attn.k_proj.weight": "model-00002-of-00002.safetensors", "model.layers.3.self_attn.o_proj.weight": "model-00002-of-00002.safetensors", "model.layers.3.self_attn.q_proj.weight": "model-00002-of-00002.safetensors", "model.layers.3.self_attn.v_proj.weight": "model-00002-of-00002.safetensors", "model.layers.4.input_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.4.mlp.down_proj.weight": "model-00002-of-00002.safetensors", "model.layers.4.mlp.gate_proj.weight": "model-00002-of-00002.safetensors", "model.layers.4.mlp.up_proj.weight": "model-00002-of-00002.safetensors", "model.layers.4.post_attention_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.4.self_attn.k_proj.weight": "model-00002-of-00002.safetensors", "model.layers.4.self_attn.o_proj.weight": "model-00002-of-00002.safetensors", "model.layers.4.self_attn.q_proj.weight": "model-00002-of-00002.safetensors", "model.layers.4.self_attn.v_proj.weight": "model-00002-of-00002.safetensors", "model.layers.5.input_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.5.mlp.down_proj.weight": "model-00002-of-00002.safetensors", "model.layers.5.mlp.gate_proj.weight": "model-00002-of-00002.safetensors", "model.layers.5.mlp.up_proj.weight": "model-00002-of-00002.safetensors", "model.layers.5.post_attention_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.5.self_attn.k_proj.weight": "model-00002-of-00002.safetensors", "model.layers.5.self_attn.o_proj.weight": "model-00002-of-00002.safetensors", "model.layers.5.self_attn.q_proj.weight": "model-00002-of-00002.safetensors", "model.layers.5.self_attn.v_proj.weight": "model-00002-of-00002.safetensors", "model.layers.6.input_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.6.mlp.down_proj.weight": "model-00002-of-00002.safetensors", "model.layers.6.mlp.gate_proj.weight": "model-00002-of-00002.safetensors", "model.layers.6.mlp.up_proj.weight": "model-00002-of-00002.safetensors", "model.layers.6.post_attention_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.6.self_attn.k_proj.weight": "model-00002-of-00002.safetensors", "model.layers.6.self_attn.o_proj.weight": "model-00002-of-00002.safetensors", "model.layers.6.self_attn.q_proj.weight": "model-00002-of-00002.safetensors", "model.layers.6.self_attn.v_proj.weight": "model-00002-of-00002.safetensors", "model.layers.7.input_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.7.mlp.down_proj.weight": "model-00002-of-00002.safetensors", "model.layers.7.mlp.gate_proj.weight": "model-00002-of-00002.safetensors", "model.layers.7.mlp.up_proj.weight": "model-00002-of-00002.safetensors", "model.layers.7.post_attention_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.7.self_attn.k_proj.weight": "model-00002-of-00002.safetensors", "model.layers.7.self_attn.o_proj.weight": "model-00002-of-00002.safetensors", "model.layers.7.self_attn.q_proj.weight": "model-00002-of-00002.safetensors", "model.layers.7.self_attn.v_proj.weight": "model-00002-of-00002.safetensors", "model.layers.8.input_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.8.mlp.down_proj.weight": "model-00002-of-00002.safetensors", "model.layers.8.mlp.gate_proj.weight": "model-00002-of-00002.safetensors", "model.layers.8.mlp.up_proj.weight": "model-00002-of-00002.safetensors", "model.layers.8.post_attention_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.8.self_attn.k_proj.weight": "model-00002-of-00002.safetensors", "model.layers.8.self_attn.o_proj.weight": "model-00002-of-00002.safetensors", "model.layers.8.self_attn.q_proj.weight": "model-00002-of-00002.safetensors", "model.layers.8.self_attn.v_proj.weight": "model-00002-of-00002.safetensors", "model.layers.9.input_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.9.mlp.down_proj.weight": "model-00002-of-00002.safetensors", "model.layers.9.mlp.gate_proj.weight": "model-00002-of-00002.safetensors", "model.layers.9.mlp.up_proj.weight": "model-00002-of-00002.safetensors", "model.layers.9.post_attention_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.9.self_attn.k_proj.weight": "model-00002-of-00002.safetensors", "model.layers.9.self_attn.o_proj.weight": "model-00002-of-00002.safetensors", "model.layers.9.self_attn.q_proj.weight": "model-00002-of-00002.safetensors", "model.layers.9.self_attn.v_proj.weight": "model-00002-of-00002.safetensors", "model.norm.weight": "model-00002-of-00002.safetensors"}}
llm_tutorial/merged_models4/ja-en_en-ja_ties01/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
llm_tutorial/merged_models4/ja-en_en-ja_ties02/config.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/work01/llm_tutorial/pretrained_lm/llm-jp/llm-jp-v3-3.7b",
3
+ "architectures": [
4
+ "LlamaForCausalLM"
5
+ ],
6
+ "attention_bias": false,
7
+ "attention_dropout": 0.0,
8
+ "bos_token_id": 1,
9
+ "eos_token_id": 2,
10
+ "head_dim": 128,
11
+ "hidden_act": "silu",
12
+ "hidden_size": 3072,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 8192,
15
+ "max_position_embeddings": 4096,
16
+ "mlp_bias": false,
17
+ "model_type": "llama",
18
+ "num_attention_heads": 24,
19
+ "num_hidden_layers": 28,
20
+ "num_key_value_heads": 24,
21
+ "pretraining_tp": 1,
22
+ "rms_norm_eps": 1e-05,
23
+ "rope_scaling": null,
24
+ "rope_theta": 10000,
25
+ "tie_word_embeddings": false,
26
+ "torch_dtype": "float16",
27
+ "transformers_version": "4.46.2",
28
+ "use_cache": true,
29
+ "vocab_size": 99584
30
+ }
llm_tutorial/merged_models4/ja-en_en-ja_ties02/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
llm_tutorial/merged_models4/ja-en_en-ja_ties03-instruct/README.md ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: []
3
+ library_name: transformers
4
+ tags:
5
+ - mergekit
6
+ - merge
7
+
8
+ ---
9
+ # ja-en_en-ja_ties03-instruct
10
+
11
+ This is a merge of pre-trained language models created using [mergekit](https://github.com/cg123/mergekit).
12
+
13
+ ## Merge Details
14
+ ### Merge Method
15
+
16
+ This model was merged using the [TIES](https://arxiv.org/abs/2306.01708) merge method using /work01/llm_tutorial/pretrained_lm/llm-jp/llm-jp-v3-3.7b-instruct as a base.
17
+
18
+ ### Models Merged
19
+
20
+ The following models were included in the merge:
21
+ * /home/koiwa/work/llm_tutorial/llm_recipes/models/hf-model-eval/llm-jp-v3-3.7b_en-ja-instruct_3M-pairs/iter_0000698
22
+ * /home/koiwa/work/llm_tutorial/llm_recipes/models/hf-model-eval/llm-jp-v3-3.7b_ja-en-instruct_3M-pairs/iter_0000698
23
+
24
+ ### Configuration
25
+
26
+ The following YAML configuration was used to produce this model:
27
+
28
+ ```yaml
29
+ models:
30
+ - model: /home/koiwa/work/llm_tutorial/llm_recipes/models/hf-model-eval/llm-jp-v3-3.7b_ja-en-instruct_3M-pairs/iter_0000698
31
+ parameters:
32
+ weight: 0.5
33
+ density: 0.3
34
+ - model: /home/koiwa/work/llm_tutorial/llm_recipes/models/hf-model-eval/llm-jp-v3-3.7b_en-ja-instruct_3M-pairs/iter_0000698
35
+ parameters:
36
+ weight: 0.5
37
+ density: 0.3
38
+ base_model: /work01/llm_tutorial/pretrained_lm/llm-jp/llm-jp-v3-3.7b-instruct
39
+ merge_method: ties
40
+ dtype: float16
41
+
42
+ ```
llm_tutorial/merged_models4/ja-en_en-ja_ties03/README.md ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: []
3
+ library_name: transformers
4
+ tags:
5
+ - mergekit
6
+ - merge
7
+
8
+ ---
9
+ # ja-en_en-ja_ties03
10
+
11
+ This is a merge of pre-trained language models created using [mergekit](https://github.com/cg123/mergekit).
12
+
13
+ ## Merge Details
14
+ ### Merge Method
15
+
16
+ This model was merged using the [TIES](https://arxiv.org/abs/2306.01708) merge method using /work01/llm_tutorial/pretrained_lm/llm-jp/llm-jp-v3-3.7b as a base.
17
+
18
+ ### Models Merged
19
+
20
+ The following models were included in the merge:
21
+ * /home/koiwa/work/llm_tutorial/llm_recipes/models/hf-model-eval/llm-jp-v3-3.7b_en-ja_3M-pairs_3.5e-5/iter_0000698
22
+ * /home/koiwa/work/llm_tutorial/llm_recipes/models/hf-model-eval/llm-jp-v3-3.7b_ja-en_3M-pairs_3.5e-5/iter_0000698
23
+
24
+ ### Configuration
25
+
26
+ The following YAML configuration was used to produce this model:
27
+
28
+ ```yaml
29
+ models:
30
+ - model: /home/koiwa/work/llm_tutorial/llm_recipes/models/hf-model-eval/llm-jp-v3-3.7b_ja-en_3M-pairs_3.5e-5/iter_0000698
31
+ parameters:
32
+ density: 0.3
33
+ weight: 0.5
34
+ - model: /home/koiwa/work/llm_tutorial/llm_recipes/models/hf-model-eval/llm-jp-v3-3.7b_en-ja_3M-pairs_3.5e-5/iter_0000698
35
+ parameters:
36
+ density: 0.3
37
+ weight: 0.5
38
+ base_model: /work01/llm_tutorial/pretrained_lm/llm-jp/llm-jp-v3-3.7b
39
+ merge_method: ties
40
+ dtype: float16
41
+
42
+ ```
llm_tutorial/merged_models4/ja-en_en-ja_ties03/config.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/work01/llm_tutorial/pretrained_lm/llm-jp/llm-jp-v3-3.7b",
3
+ "architectures": [
4
+ "LlamaForCausalLM"
5
+ ],
6
+ "attention_bias": false,
7
+ "attention_dropout": 0.0,
8
+ "bos_token_id": 1,
9
+ "eos_token_id": 2,
10
+ "head_dim": 128,
11
+ "hidden_act": "silu",
12
+ "hidden_size": 3072,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 8192,
15
+ "max_position_embeddings": 4096,
16
+ "mlp_bias": false,
17
+ "model_type": "llama",
18
+ "num_attention_heads": 24,
19
+ "num_hidden_layers": 28,
20
+ "num_key_value_heads": 24,
21
+ "pretraining_tp": 1,
22
+ "rms_norm_eps": 1e-05,
23
+ "rope_scaling": null,
24
+ "rope_theta": 10000,
25
+ "tie_word_embeddings": false,
26
+ "torch_dtype": "float16",
27
+ "transformers_version": "4.46.2",
28
+ "use_cache": true,
29
+ "vocab_size": 99584
30
+ }
llm_tutorial/merged_models4/ja-en_en-ja_ties03/mergekit_config.yml ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ models:
2
+ - model: /home/koiwa/work/llm_tutorial/llm_recipes/models/hf-model-eval/llm-jp-v3-3.7b_ja-en_3M-pairs_3.5e-5/iter_0000698
3
+ parameters:
4
+ density: 0.3
5
+ weight: 0.5
6
+ - model: /home/koiwa/work/llm_tutorial/llm_recipes/models/hf-model-eval/llm-jp-v3-3.7b_en-ja_3M-pairs_3.5e-5/iter_0000698
7
+ parameters:
8
+ density: 0.3
9
+ weight: 0.5
10
+ base_model: /work01/llm_tutorial/pretrained_lm/llm-jp/llm-jp-v3-3.7b
11
+ merge_method: ties
12
+ dtype: float16
llm_tutorial/merged_models4/ja-en_en-ja_ties03/model.safetensors.index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"metadata": {"mergekit_version": "0.0.5.1", "total_size": 7565826048}, "weight_map": {"lm_head.weight": "model-00001-of-00002.safetensors", "model.embed_tokens.weight": "model-00001-of-00002.safetensors", "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors", "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors", "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors", "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors", "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors", "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors", "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors", "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors", "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors", "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors", "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors", "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors", "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors", "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors", "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors", "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors", "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors", "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors", "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors", "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors", "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors", "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors", "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors", "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors", "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors", "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors", "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors", "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors", "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", "model.layers.21.input_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.21.mlp.down_proj.weight": "model-00001-of-00002.safetensors", "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors", "model.layers.21.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", "model.layers.22.input_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.22.mlp.down_proj.weight": "model-00001-of-00002.safetensors", "model.layers.22.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", "model.layers.22.mlp.up_proj.weight": "model-00001-of-00002.safetensors", "model.layers.22.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.22.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", "model.layers.22.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", "model.layers.22.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", "model.layers.22.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", "model.layers.23.input_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.23.mlp.down_proj.weight": "model-00001-of-00002.safetensors", "model.layers.23.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", "model.layers.23.mlp.up_proj.weight": "model-00001-of-00002.safetensors", "model.layers.23.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors", "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors", "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors", "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors", "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors", "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors", "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors", "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors", "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors", "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors", "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors", "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors", "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors", "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors", "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors", "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors", "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors", "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors", "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors", "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors", "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors", "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors", "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors", "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors", "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors", "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors", "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors", "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors", "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors", "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors", "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors", "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors", "model.layers.3.input_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.3.mlp.down_proj.weight": "model-00002-of-00002.safetensors", "model.layers.3.mlp.gate_proj.weight": "model-00002-of-00002.safetensors", "model.layers.3.mlp.up_proj.weight": "model-00002-of-00002.safetensors", "model.layers.3.post_attention_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.3.self_attn.k_proj.weight": "model-00002-of-00002.safetensors", "model.layers.3.self_attn.o_proj.weight": "model-00002-of-00002.safetensors", "model.layers.3.self_attn.q_proj.weight": "model-00002-of-00002.safetensors", "model.layers.3.self_attn.v_proj.weight": "model-00002-of-00002.safetensors", "model.layers.4.input_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.4.mlp.down_proj.weight": "model-00002-of-00002.safetensors", "model.layers.4.mlp.gate_proj.weight": "model-00002-of-00002.safetensors", "model.layers.4.mlp.up_proj.weight": "model-00002-of-00002.safetensors", "model.layers.4.post_attention_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.4.self_attn.k_proj.weight": "model-00002-of-00002.safetensors", "model.layers.4.self_attn.o_proj.weight": "model-00002-of-00002.safetensors", "model.layers.4.self_attn.q_proj.weight": "model-00002-of-00002.safetensors", "model.layers.4.self_attn.v_proj.weight": "model-00002-of-00002.safetensors", "model.layers.5.input_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.5.mlp.down_proj.weight": "model-00002-of-00002.safetensors", "model.layers.5.mlp.gate_proj.weight": "model-00002-of-00002.safetensors", "model.layers.5.mlp.up_proj.weight": "model-00002-of-00002.safetensors", "model.layers.5.post_attention_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.5.self_attn.k_proj.weight": "model-00002-of-00002.safetensors", "model.layers.5.self_attn.o_proj.weight": "model-00002-of-00002.safetensors", "model.layers.5.self_attn.q_proj.weight": "model-00002-of-00002.safetensors", "model.layers.5.self_attn.v_proj.weight": "model-00002-of-00002.safetensors", "model.layers.6.input_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.6.mlp.down_proj.weight": "model-00002-of-00002.safetensors", "model.layers.6.mlp.gate_proj.weight": "model-00002-of-00002.safetensors", "model.layers.6.mlp.up_proj.weight": "model-00002-of-00002.safetensors", "model.layers.6.post_attention_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.6.self_attn.k_proj.weight": "model-00002-of-00002.safetensors", "model.layers.6.self_attn.o_proj.weight": "model-00002-of-00002.safetensors", "model.layers.6.self_attn.q_proj.weight": "model-00002-of-00002.safetensors", "model.layers.6.self_attn.v_proj.weight": "model-00002-of-00002.safetensors", "model.layers.7.input_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.7.mlp.down_proj.weight": "model-00002-of-00002.safetensors", "model.layers.7.mlp.gate_proj.weight": "model-00002-of-00002.safetensors", "model.layers.7.mlp.up_proj.weight": "model-00002-of-00002.safetensors", "model.layers.7.post_attention_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.7.self_attn.k_proj.weight": "model-00002-of-00002.safetensors", "model.layers.7.self_attn.o_proj.weight": "model-00002-of-00002.safetensors", "model.layers.7.self_attn.q_proj.weight": "model-00002-of-00002.safetensors", "model.layers.7.self_attn.v_proj.weight": "model-00002-of-00002.safetensors", "model.layers.8.input_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.8.mlp.down_proj.weight": "model-00002-of-00002.safetensors", "model.layers.8.mlp.gate_proj.weight": "model-00002-of-00002.safetensors", "model.layers.8.mlp.up_proj.weight": "model-00002-of-00002.safetensors", "model.layers.8.post_attention_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.8.self_attn.k_proj.weight": "model-00002-of-00002.safetensors", "model.layers.8.self_attn.o_proj.weight": "model-00002-of-00002.safetensors", "model.layers.8.self_attn.q_proj.weight": "model-00002-of-00002.safetensors", "model.layers.8.self_attn.v_proj.weight": "model-00002-of-00002.safetensors", "model.layers.9.input_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.9.mlp.down_proj.weight": "model-00002-of-00002.safetensors", "model.layers.9.mlp.gate_proj.weight": "model-00002-of-00002.safetensors", "model.layers.9.mlp.up_proj.weight": "model-00002-of-00002.safetensors", "model.layers.9.post_attention_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.9.self_attn.k_proj.weight": "model-00002-of-00002.safetensors", "model.layers.9.self_attn.o_proj.weight": "model-00002-of-00002.safetensors", "model.layers.9.self_attn.q_proj.weight": "model-00002-of-00002.safetensors", "model.layers.9.self_attn.v_proj.weight": "model-00002-of-00002.safetensors", "model.norm.weight": "model-00002-of-00002.safetensors"}}
llm_tutorial/merged_models4/ja-en_en-ja_ties04/README.md ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: []
3
+ library_name: transformers
4
+ tags:
5
+ - mergekit
6
+ - merge
7
+
8
+ ---
9
+ # ja-en_en-ja_ties04
10
+
11
+ This is a merge of pre-trained language models created using [mergekit](https://github.com/cg123/mergekit).
12
+
13
+ ## Merge Details
14
+ ### Merge Method
15
+
16
+ This model was merged using the [TIES](https://arxiv.org/abs/2306.01708) merge method using /work01/llm_tutorial/pretrained_lm/llm-jp/llm-jp-v3-3.7b as a base.
17
+
18
+ ### Models Merged
19
+
20
+ The following models were included in the merge:
21
+ * /home/koiwa/work/llm_tutorial/llm_recipes/models/hf-model-eval/llm-jp-v3-3.7b_en-ja_3M-pairs_3.5e-5/iter_0000698
22
+ * /home/koiwa/work/llm_tutorial/llm_recipes/models/hf-model-eval/llm-jp-v3-3.7b_ja-en_3M-pairs_3.5e-5/iter_0000698
23
+
24
+ ### Configuration
25
+
26
+ The following YAML configuration was used to produce this model:
27
+
28
+ ```yaml
29
+ models:
30
+ - model: /home/koiwa/work/llm_tutorial/llm_recipes/models/hf-model-eval/llm-jp-v3-3.7b_ja-en_3M-pairs_3.5e-5/iter_0000698
31
+ parameters:
32
+ density: 0.4
33
+ weight: 0.5
34
+ - model: /home/koiwa/work/llm_tutorial/llm_recipes/models/hf-model-eval/llm-jp-v3-3.7b_en-ja_3M-pairs_3.5e-5/iter_0000698
35
+ parameters:
36
+ density: 0.4
37
+ weight: 0.5
38
+ base_model: /work01/llm_tutorial/pretrained_lm/llm-jp/llm-jp-v3-3.7b
39
+ merge_method: ties
40
+ dtype: float16
41
+
42
+ ```
llm_tutorial/merged_models4/ja-en_en-ja_ties04/mergekit_config.yml ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ models:
2
+ - model: /home/koiwa/work/llm_tutorial/llm_recipes/models/hf-model-eval/llm-jp-v3-3.7b_ja-en_3M-pairs_3.5e-5/iter_0000698
3
+ parameters:
4
+ density: 0.4
5
+ weight: 0.5
6
+ - model: /home/koiwa/work/llm_tutorial/llm_recipes/models/hf-model-eval/llm-jp-v3-3.7b_en-ja_3M-pairs_3.5e-5/iter_0000698
7
+ parameters:
8
+ density: 0.4
9
+ weight: 0.5
10
+ base_model: /work01/llm_tutorial/pretrained_lm/llm-jp/llm-jp-v3-3.7b
11
+ merge_method: ties
12
+ dtype: float16
llm_tutorial/merged_models4/ja-en_en-ja_ties04/model.safetensors.index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"metadata": {"mergekit_version": "0.0.5.1", "total_size": 7565826048}, "weight_map": {"lm_head.weight": "model-00001-of-00002.safetensors", "model.embed_tokens.weight": "model-00001-of-00002.safetensors", "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors", "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors", "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors", "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors", "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors", "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors", "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors", "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors", "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors", "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors", "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors", "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors", "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors", "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors", "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors", "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors", "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors", "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors", "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors", "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors", "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors", "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors", "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors", "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors", "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors", "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors", "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors", "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors", "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", "model.layers.21.input_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.21.mlp.down_proj.weight": "model-00001-of-00002.safetensors", "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors", "model.layers.21.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", "model.layers.22.input_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.22.mlp.down_proj.weight": "model-00001-of-00002.safetensors", "model.layers.22.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", "model.layers.22.mlp.up_proj.weight": "model-00001-of-00002.safetensors", "model.layers.22.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.22.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", "model.layers.22.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", "model.layers.22.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", "model.layers.22.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", "model.layers.23.input_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.23.mlp.down_proj.weight": "model-00001-of-00002.safetensors", "model.layers.23.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", "model.layers.23.mlp.up_proj.weight": "model-00001-of-00002.safetensors", "model.layers.23.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors", "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors", "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors", "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors", "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors", "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors", "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors", "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors", "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors", "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors", "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors", "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors", "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors", "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors", "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors", "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors", "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors", "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors", "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors", "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors", "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors", "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors", "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors", "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors", "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors", "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors", "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors", "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors", "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors", "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors", "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors", "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors", "model.layers.3.input_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.3.mlp.down_proj.weight": "model-00002-of-00002.safetensors", "model.layers.3.mlp.gate_proj.weight": "model-00002-of-00002.safetensors", "model.layers.3.mlp.up_proj.weight": "model-00002-of-00002.safetensors", "model.layers.3.post_attention_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.3.self_attn.k_proj.weight": "model-00002-of-00002.safetensors", "model.layers.3.self_attn.o_proj.weight": "model-00002-of-00002.safetensors", "model.layers.3.self_attn.q_proj.weight": "model-00002-of-00002.safetensors", "model.layers.3.self_attn.v_proj.weight": "model-00002-of-00002.safetensors", "model.layers.4.input_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.4.mlp.down_proj.weight": "model-00002-of-00002.safetensors", "model.layers.4.mlp.gate_proj.weight": "model-00002-of-00002.safetensors", "model.layers.4.mlp.up_proj.weight": "model-00002-of-00002.safetensors", "model.layers.4.post_attention_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.4.self_attn.k_proj.weight": "model-00002-of-00002.safetensors", "model.layers.4.self_attn.o_proj.weight": "model-00002-of-00002.safetensors", "model.layers.4.self_attn.q_proj.weight": "model-00002-of-00002.safetensors", "model.layers.4.self_attn.v_proj.weight": "model-00002-of-00002.safetensors", "model.layers.5.input_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.5.mlp.down_proj.weight": "model-00002-of-00002.safetensors", "model.layers.5.mlp.gate_proj.weight": "model-00002-of-00002.safetensors", "model.layers.5.mlp.up_proj.weight": "model-00002-of-00002.safetensors", "model.layers.5.post_attention_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.5.self_attn.k_proj.weight": "model-00002-of-00002.safetensors", "model.layers.5.self_attn.o_proj.weight": "model-00002-of-00002.safetensors", "model.layers.5.self_attn.q_proj.weight": "model-00002-of-00002.safetensors", "model.layers.5.self_attn.v_proj.weight": "model-00002-of-00002.safetensors", "model.layers.6.input_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.6.mlp.down_proj.weight": "model-00002-of-00002.safetensors", "model.layers.6.mlp.gate_proj.weight": "model-00002-of-00002.safetensors", "model.layers.6.mlp.up_proj.weight": "model-00002-of-00002.safetensors", "model.layers.6.post_attention_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.6.self_attn.k_proj.weight": "model-00002-of-00002.safetensors", "model.layers.6.self_attn.o_proj.weight": "model-00002-of-00002.safetensors", "model.layers.6.self_attn.q_proj.weight": "model-00002-of-00002.safetensors", "model.layers.6.self_attn.v_proj.weight": "model-00002-of-00002.safetensors", "model.layers.7.input_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.7.mlp.down_proj.weight": "model-00002-of-00002.safetensors", "model.layers.7.mlp.gate_proj.weight": "model-00002-of-00002.safetensors", "model.layers.7.mlp.up_proj.weight": "model-00002-of-00002.safetensors", "model.layers.7.post_attention_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.7.self_attn.k_proj.weight": "model-00002-of-00002.safetensors", "model.layers.7.self_attn.o_proj.weight": "model-00002-of-00002.safetensors", "model.layers.7.self_attn.q_proj.weight": "model-00002-of-00002.safetensors", "model.layers.7.self_attn.v_proj.weight": "model-00002-of-00002.safetensors", "model.layers.8.input_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.8.mlp.down_proj.weight": "model-00002-of-00002.safetensors", "model.layers.8.mlp.gate_proj.weight": "model-00002-of-00002.safetensors", "model.layers.8.mlp.up_proj.weight": "model-00002-of-00002.safetensors", "model.layers.8.post_attention_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.8.self_attn.k_proj.weight": "model-00002-of-00002.safetensors", "model.layers.8.self_attn.o_proj.weight": "model-00002-of-00002.safetensors", "model.layers.8.self_attn.q_proj.weight": "model-00002-of-00002.safetensors", "model.layers.8.self_attn.v_proj.weight": "model-00002-of-00002.safetensors", "model.layers.9.input_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.9.mlp.down_proj.weight": "model-00002-of-00002.safetensors", "model.layers.9.mlp.gate_proj.weight": "model-00002-of-00002.safetensors", "model.layers.9.mlp.up_proj.weight": "model-00002-of-00002.safetensors", "model.layers.9.post_attention_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.9.self_attn.k_proj.weight": "model-00002-of-00002.safetensors", "model.layers.9.self_attn.o_proj.weight": "model-00002-of-00002.safetensors", "model.layers.9.self_attn.q_proj.weight": "model-00002-of-00002.safetensors", "model.layers.9.self_attn.v_proj.weight": "model-00002-of-00002.safetensors", "model.norm.weight": "model-00002-of-00002.safetensors"}}
llm_tutorial/merged_models4/ja-en_en-ja_ties04/special_tokens_map.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "cls_token": "<CLS|LLM-jp>",
4
+ "eod_token": "</s>",
5
+ "eos_token": "</s>",
6
+ "mask_token": "<MASK|LLM-jp>",
7
+ "pad_token": "<PAD|LLM-jp>",
8
+ "sep_token": "<SEP|LLM-jp>",
9
+ "unk_token": "<unk>"
10
+ }
tuning-competition-baseline/.venv/lib/python3.11/site-packages/sympy/categories/tests/__pycache__/test_baseclasses.cpython-311.pyc ADDED
Binary file (11.8 kB). View file
 
tuning-competition-baseline/.venv/lib/python3.11/site-packages/sympy/categories/tests/__pycache__/test_drawing.cpython-311.pyc ADDED
Binary file (39.1 kB). View file
 
tuning-competition-baseline/.venv/lib/python3.11/site-packages/sympy/categories/tests/test_baseclasses.py ADDED
@@ -0,0 +1,209 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from sympy.categories import (Object, Morphism, IdentityMorphism,
2
+ NamedMorphism, CompositeMorphism,
3
+ Diagram, Category)
4
+ from sympy.categories.baseclasses import Class
5
+ from sympy.testing.pytest import raises
6
+ from sympy.core.containers import (Dict, Tuple)
7
+ from sympy.sets import EmptySet
8
+ from sympy.sets.sets import FiniteSet
9
+
10
+
11
+ def test_morphisms():
12
+ A = Object("A")
13
+ B = Object("B")
14
+ C = Object("C")
15
+ D = Object("D")
16
+
17
+ # Test the base morphism.
18
+ f = NamedMorphism(A, B, "f")
19
+ assert f.domain == A
20
+ assert f.codomain == B
21
+ assert f == NamedMorphism(A, B, "f")
22
+
23
+ # Test identities.
24
+ id_A = IdentityMorphism(A)
25
+ id_B = IdentityMorphism(B)
26
+ assert id_A.domain == A
27
+ assert id_A.codomain == A
28
+ assert id_A == IdentityMorphism(A)
29
+ assert id_A != id_B
30
+
31
+ # Test named morphisms.
32
+ g = NamedMorphism(B, C, "g")
33
+ assert g.name == "g"
34
+ assert g != f
35
+ assert g == NamedMorphism(B, C, "g")
36
+ assert g != NamedMorphism(B, C, "f")
37
+
38
+ # Test composite morphisms.
39
+ assert f == CompositeMorphism(f)
40
+
41
+ k = g.compose(f)
42
+ assert k.domain == A
43
+ assert k.codomain == C
44
+ assert k.components == Tuple(f, g)
45
+ assert g * f == k
46
+ assert CompositeMorphism(f, g) == k
47
+
48
+ assert CompositeMorphism(g * f) == g * f
49
+
50
+ # Test the associativity of composition.
51
+ h = NamedMorphism(C, D, "h")
52
+
53
+ p = h * g
54
+ u = h * g * f
55
+
56
+ assert h * k == u
57
+ assert p * f == u
58
+ assert CompositeMorphism(f, g, h) == u
59
+
60
+ # Test flattening.
61
+ u2 = u.flatten("u")
62
+ assert isinstance(u2, NamedMorphism)
63
+ assert u2.name == "u"
64
+ assert u2.domain == A
65
+ assert u2.codomain == D
66
+
67
+ # Test identities.
68
+ assert f * id_A == f
69
+ assert id_B * f == f
70
+ assert id_A * id_A == id_A
71
+ assert CompositeMorphism(id_A) == id_A
72
+
73
+ # Test bad compositions.
74
+ raises(ValueError, lambda: f * g)
75
+
76
+ raises(TypeError, lambda: f.compose(None))
77
+ raises(TypeError, lambda: id_A.compose(None))
78
+ raises(TypeError, lambda: f * None)
79
+ raises(TypeError, lambda: id_A * None)
80
+
81
+ raises(TypeError, lambda: CompositeMorphism(f, None, 1))
82
+
83
+ raises(ValueError, lambda: NamedMorphism(A, B, ""))
84
+ raises(NotImplementedError, lambda: Morphism(A, B))
85
+
86
+
87
+ def test_diagram():
88
+ A = Object("A")
89
+ B = Object("B")
90
+ C = Object("C")
91
+
92
+ f = NamedMorphism(A, B, "f")
93
+ g = NamedMorphism(B, C, "g")
94
+ id_A = IdentityMorphism(A)
95
+ id_B = IdentityMorphism(B)
96
+
97
+ empty = EmptySet
98
+
99
+ # Test the addition of identities.
100
+ d1 = Diagram([f])
101
+
102
+ assert d1.objects == FiniteSet(A, B)
103
+ assert d1.hom(A, B) == (FiniteSet(f), empty)
104
+ assert d1.hom(A, A) == (FiniteSet(id_A), empty)
105
+ assert d1.hom(B, B) == (FiniteSet(id_B), empty)
106
+
107
+ assert d1 == Diagram([id_A, f])
108
+ assert d1 == Diagram([f, f])
109
+
110
+ # Test the addition of composites.
111
+ d2 = Diagram([f, g])
112
+ homAC = d2.hom(A, C)[0]
113
+
114
+ assert d2.objects == FiniteSet(A, B, C)
115
+ assert g * f in d2.premises.keys()
116
+ assert homAC == FiniteSet(g * f)
117
+
118
+ # Test equality, inequality and hash.
119
+ d11 = Diagram([f])
120
+
121
+ assert d1 == d11
122
+ assert d1 != d2
123
+ assert hash(d1) == hash(d11)
124
+
125
+ d11 = Diagram({f: "unique"})
126
+ assert d1 != d11
127
+
128
+ # Make sure that (re-)adding composites (with new properties)
129
+ # works as expected.
130
+ d = Diagram([f, g], {g * f: "unique"})
131
+ assert d.conclusions == Dict({g * f: FiniteSet("unique")})
132
+
133
+ # Check the hom-sets when there are premises and conclusions.
134
+ assert d.hom(A, C) == (FiniteSet(g * f), FiniteSet(g * f))
135
+ d = Diagram([f, g], [g * f])
136
+ assert d.hom(A, C) == (FiniteSet(g * f), FiniteSet(g * f))
137
+
138
+ # Check how the properties of composite morphisms are computed.
139
+ d = Diagram({f: ["unique", "isomorphism"], g: "unique"})
140
+ assert d.premises[g * f] == FiniteSet("unique")
141
+
142
+ # Check that conclusion morphisms with new objects are not allowed.
143
+ d = Diagram([f], [g])
144
+ assert d.conclusions == Dict({})
145
+
146
+ # Test an empty diagram.
147
+ d = Diagram()
148
+ assert d.premises == Dict({})
149
+ assert d.conclusions == Dict({})
150
+ assert d.objects == empty
151
+
152
+ # Check a SymPy Dict object.
153
+ d = Diagram(Dict({f: FiniteSet("unique", "isomorphism"), g: "unique"}))
154
+ assert d.premises[g * f] == FiniteSet("unique")
155
+
156
+ # Check the addition of components of composite morphisms.
157
+ d = Diagram([g * f])
158
+ assert f in d.premises
159
+ assert g in d.premises
160
+
161
+ # Check subdiagrams.
162
+ d = Diagram([f, g], {g * f: "unique"})
163
+
164
+ d1 = Diagram([f])
165
+ assert d.is_subdiagram(d1)
166
+ assert not d1.is_subdiagram(d)
167
+
168
+ d = Diagram([NamedMorphism(B, A, "f'")])
169
+ assert not d.is_subdiagram(d1)
170
+ assert not d1.is_subdiagram(d)
171
+
172
+ d1 = Diagram([f, g], {g * f: ["unique", "something"]})
173
+ assert not d.is_subdiagram(d1)
174
+ assert not d1.is_subdiagram(d)
175
+
176
+ d = Diagram({f: "blooh"})
177
+ d1 = Diagram({f: "bleeh"})
178
+ assert not d.is_subdiagram(d1)
179
+ assert not d1.is_subdiagram(d)
180
+
181
+ d = Diagram([f, g], {f: "unique", g * f: "veryunique"})
182
+ d1 = d.subdiagram_from_objects(FiniteSet(A, B))
183
+ assert d1 == Diagram([f], {f: "unique"})
184
+ raises(ValueError, lambda: d.subdiagram_from_objects(FiniteSet(A,
185
+ Object("D"))))
186
+
187
+ raises(ValueError, lambda: Diagram({IdentityMorphism(A): "unique"}))
188
+
189
+
190
+ def test_category():
191
+ A = Object("A")
192
+ B = Object("B")
193
+ C = Object("C")
194
+
195
+ f = NamedMorphism(A, B, "f")
196
+ g = NamedMorphism(B, C, "g")
197
+
198
+ d1 = Diagram([f, g])
199
+ d2 = Diagram([f])
200
+
201
+ objects = d1.objects | d2.objects
202
+
203
+ K = Category("K", objects, commutative_diagrams=[d1, d2])
204
+
205
+ assert K.name == "K"
206
+ assert K.objects == Class(objects)
207
+ assert K.commutative_diagrams == FiniteSet(d1, d2)
208
+
209
+ raises(ValueError, lambda: Category(""))
tuning-competition-baseline/.venv/lib/python3.11/site-packages/sympy/codegen/__pycache__/pynodes.cpython-311.pyc ADDED
Binary file (884 Bytes). View file
 
tuning-competition-baseline/.venv/lib/python3.11/site-packages/sympy/codegen/__pycache__/pyutils.cpython-311.pyc ADDED
Binary file (1.76 kB). View file
 
tuning-competition-baseline/.venv/lib/python3.11/site-packages/sympy/codegen/abstract_nodes.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """This module provides containers for python objects that are valid
2
+ printing targets but are not a subclass of SymPy's Printable.
3
+ """
4
+
5
+
6
+ from sympy.core.containers import Tuple
7
+
8
+
9
+ class List(Tuple):
10
+ """Represents a (frozen) (Python) list (for code printing purposes)."""
11
+ def __eq__(self, other):
12
+ if isinstance(other, list):
13
+ return self == List(*other)
14
+ else:
15
+ return self.args == other
16
+
17
+ def __hash__(self):
18
+ return super().__hash__()
tuning-competition-baseline/.venv/lib/python3.11/site-packages/sympy/codegen/ast.py ADDED
@@ -0,0 +1,1906 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Types used to represent a full function/module as an Abstract Syntax Tree.
3
+
4
+ Most types are small, and are merely used as tokens in the AST. A tree diagram
5
+ has been included below to illustrate the relationships between the AST types.
6
+
7
+
8
+ AST Type Tree
9
+ -------------
10
+ ::
11
+
12
+ *Basic*
13
+ |
14
+ |
15
+ CodegenAST
16
+ |
17
+ |--->AssignmentBase
18
+ | |--->Assignment
19
+ | |--->AugmentedAssignment
20
+ | |--->AddAugmentedAssignment
21
+ | |--->SubAugmentedAssignment
22
+ | |--->MulAugmentedAssignment
23
+ | |--->DivAugmentedAssignment
24
+ | |--->ModAugmentedAssignment
25
+ |
26
+ |--->CodeBlock
27
+ |
28
+ |
29
+ |--->Token
30
+ |--->Attribute
31
+ |--->For
32
+ |--->String
33
+ | |--->QuotedString
34
+ | |--->Comment
35
+ |--->Type
36
+ | |--->IntBaseType
37
+ | | |--->_SizedIntType
38
+ | | |--->SignedIntType
39
+ | | |--->UnsignedIntType
40
+ | |--->FloatBaseType
41
+ | |--->FloatType
42
+ | |--->ComplexBaseType
43
+ | |--->ComplexType
44
+ |--->Node
45
+ | |--->Variable
46
+ | | |---> Pointer
47
+ | |--->FunctionPrototype
48
+ | |--->FunctionDefinition
49
+ |--->Element
50
+ |--->Declaration
51
+ |--->While
52
+ |--->Scope
53
+ |--->Stream
54
+ |--->Print
55
+ |--->FunctionCall
56
+ |--->BreakToken
57
+ |--->ContinueToken
58
+ |--->NoneToken
59
+ |--->Return
60
+
61
+
62
+ Predefined types
63
+ ----------------
64
+
65
+ A number of ``Type`` instances are provided in the ``sympy.codegen.ast`` module
66
+ for convenience. Perhaps the two most common ones for code-generation (of numeric
67
+ codes) are ``float32`` and ``float64`` (known as single and double precision respectively).
68
+ There are also precision generic versions of Types (for which the codeprinters selects the
69
+ underlying data type at time of printing): ``real``, ``integer``, ``complex_``, ``bool_``.
70
+
71
+ The other ``Type`` instances defined are:
72
+
73
+ - ``intc``: Integer type used by C's "int".
74
+ - ``intp``: Integer type used by C's "unsigned".
75
+ - ``int8``, ``int16``, ``int32``, ``int64``: n-bit integers.
76
+ - ``uint8``, ``uint16``, ``uint32``, ``uint64``: n-bit unsigned integers.
77
+ - ``float80``: known as "extended precision" on modern x86/amd64 hardware.
78
+ - ``complex64``: Complex number represented by two ``float32`` numbers
79
+ - ``complex128``: Complex number represented by two ``float64`` numbers
80
+
81
+ Using the nodes
82
+ ---------------
83
+
84
+ It is possible to construct simple algorithms using the AST nodes. Let's construct a loop applying
85
+ Newton's method::
86
+
87
+ >>> from sympy import symbols, cos
88
+ >>> from sympy.codegen.ast import While, Assignment, aug_assign, Print, QuotedString
89
+ >>> t, dx, x = symbols('tol delta val')
90
+ >>> expr = cos(x) - x**3
91
+ >>> whl = While(abs(dx) > t, [
92
+ ... Assignment(dx, -expr/expr.diff(x)),
93
+ ... aug_assign(x, '+', dx),
94
+ ... Print([x])
95
+ ... ])
96
+ >>> from sympy import pycode
97
+ >>> py_str = pycode(whl)
98
+ >>> print(py_str)
99
+ while (abs(delta) > tol):
100
+ delta = (val**3 - math.cos(val))/(-3*val**2 - math.sin(val))
101
+ val += delta
102
+ print(val)
103
+ >>> import math
104
+ >>> tol, val, delta = 1e-5, 0.5, float('inf')
105
+ >>> exec(py_str)
106
+ 1.1121416371
107
+ 0.909672693737
108
+ 0.867263818209
109
+ 0.865477135298
110
+ 0.865474033111
111
+ >>> print('%3.1g' % (math.cos(val) - val**3))
112
+ -3e-11
113
+
114
+ If we want to generate Fortran code for the same while loop we simple call ``fcode``::
115
+
116
+ >>> from sympy import fcode
117
+ >>> print(fcode(whl, standard=2003, source_format='free'))
118
+ do while (abs(delta) > tol)
119
+ delta = (val**3 - cos(val))/(-3*val**2 - sin(val))
120
+ val = val + delta
121
+ print *, val
122
+ end do
123
+
124
+ There is a function constructing a loop (or a complete function) like this in
125
+ :mod:`sympy.codegen.algorithms`.
126
+
127
+ """
128
+
129
+ from __future__ import annotations
130
+ from typing import Any
131
+
132
+ from collections import defaultdict
133
+
134
+ from sympy.core.relational import (Ge, Gt, Le, Lt)
135
+ from sympy.core import Symbol, Tuple, Dummy
136
+ from sympy.core.basic import Basic
137
+ from sympy.core.expr import Expr, Atom
138
+ from sympy.core.numbers import Float, Integer, oo
139
+ from sympy.core.sympify import _sympify, sympify, SympifyError
140
+ from sympy.utilities.iterables import (iterable, topological_sort,
141
+ numbered_symbols, filter_symbols)
142
+
143
+
144
+ def _mk_Tuple(args):
145
+ """
146
+ Create a SymPy Tuple object from an iterable, converting Python strings to
147
+ AST strings.
148
+
149
+ Parameters
150
+ ==========
151
+
152
+ args: iterable
153
+ Arguments to :class:`sympy.Tuple`.
154
+
155
+ Returns
156
+ =======
157
+
158
+ sympy.Tuple
159
+ """
160
+ args = [String(arg) if isinstance(arg, str) else arg for arg in args]
161
+ return Tuple(*args)
162
+
163
+
164
+ class CodegenAST(Basic):
165
+ __slots__ = ()
166
+
167
+
168
+ class Token(CodegenAST):
169
+ """ Base class for the AST types.
170
+
171
+ Explanation
172
+ ===========
173
+
174
+ Defining fields are set in ``_fields``. Attributes (defined in _fields)
175
+ are only allowed to contain instances of Basic (unless atomic, see
176
+ ``String``). The arguments to ``__new__()`` correspond to the attributes in
177
+ the order defined in ``_fields`. The ``defaults`` class attribute is a
178
+ dictionary mapping attribute names to their default values.
179
+
180
+ Subclasses should not need to override the ``__new__()`` method. They may
181
+ define a class or static method named ``_construct_<attr>`` for each
182
+ attribute to process the value passed to ``__new__()``. Attributes listed
183
+ in the class attribute ``not_in_args`` are not passed to :class:`~.Basic`.
184
+ """
185
+
186
+ __slots__: tuple[str, ...] = ()
187
+ _fields = __slots__
188
+ defaults: dict[str, Any] = {}
189
+ not_in_args: list[str] = []
190
+ indented_args = ['body']
191
+
192
+ @property
193
+ def is_Atom(self):
194
+ return len(self._fields) == 0
195
+
196
+ @classmethod
197
+ def _get_constructor(cls, attr):
198
+ """ Get the constructor function for an attribute by name. """
199
+ return getattr(cls, '_construct_%s' % attr, lambda x: x)
200
+
201
+ @classmethod
202
+ def _construct(cls, attr, arg):
203
+ """ Construct an attribute value from argument passed to ``__new__()``. """
204
+ # arg may be ``NoneToken()``, so comparison is done using == instead of ``is`` operator
205
+ if arg == None:
206
+ return cls.defaults.get(attr, none)
207
+ else:
208
+ if isinstance(arg, Dummy): # SymPy's replace uses Dummy instances
209
+ return arg
210
+ else:
211
+ return cls._get_constructor(attr)(arg)
212
+
213
+ def __new__(cls, *args, **kwargs):
214
+ # Pass through existing instances when given as sole argument
215
+ if len(args) == 1 and not kwargs and isinstance(args[0], cls):
216
+ return args[0]
217
+
218
+ if len(args) > len(cls._fields):
219
+ raise ValueError("Too many arguments (%d), expected at most %d" % (len(args), len(cls._fields)))
220
+
221
+ attrvals = []
222
+
223
+ # Process positional arguments
224
+ for attrname, argval in zip(cls._fields, args):
225
+ if attrname in kwargs:
226
+ raise TypeError('Got multiple values for attribute %r' % attrname)
227
+
228
+ attrvals.append(cls._construct(attrname, argval))
229
+
230
+ # Process keyword arguments
231
+ for attrname in cls._fields[len(args):]:
232
+ if attrname in kwargs:
233
+ argval = kwargs.pop(attrname)
234
+
235
+ elif attrname in cls.defaults:
236
+ argval = cls.defaults[attrname]
237
+
238
+ else:
239
+ raise TypeError('No value for %r given and attribute has no default' % attrname)
240
+
241
+ attrvals.append(cls._construct(attrname, argval))
242
+
243
+ if kwargs:
244
+ raise ValueError("Unknown keyword arguments: %s" % ' '.join(kwargs))
245
+
246
+ # Parent constructor
247
+ basic_args = [
248
+ val for attr, val in zip(cls._fields, attrvals)
249
+ if attr not in cls.not_in_args
250
+ ]
251
+ obj = CodegenAST.__new__(cls, *basic_args)
252
+
253
+ # Set attributes
254
+ for attr, arg in zip(cls._fields, attrvals):
255
+ setattr(obj, attr, arg)
256
+
257
+ return obj
258
+
259
+ def __eq__(self, other):
260
+ if not isinstance(other, self.__class__):
261
+ return False
262
+ for attr in self._fields:
263
+ if getattr(self, attr) != getattr(other, attr):
264
+ return False
265
+ return True
266
+
267
+ def _hashable_content(self):
268
+ return tuple([getattr(self, attr) for attr in self._fields])
269
+
270
+ def __hash__(self):
271
+ return super().__hash__()
272
+
273
+ def _joiner(self, k, indent_level):
274
+ return (',\n' + ' '*indent_level) if k in self.indented_args else ', '
275
+
276
+ def _indented(self, printer, k, v, *args, **kwargs):
277
+ il = printer._context['indent_level']
278
+ def _print(arg):
279
+ if isinstance(arg, Token):
280
+ return printer._print(arg, *args, joiner=self._joiner(k, il), **kwargs)
281
+ else:
282
+ return printer._print(arg, *args, **kwargs)
283
+
284
+ if isinstance(v, Tuple):
285
+ joined = self._joiner(k, il).join([_print(arg) for arg in v.args])
286
+ if k in self.indented_args:
287
+ return '(\n' + ' '*il + joined + ',\n' + ' '*(il - 4) + ')'
288
+ else:
289
+ return ('({0},)' if len(v.args) == 1 else '({0})').format(joined)
290
+ else:
291
+ return _print(v)
292
+
293
+ def _sympyrepr(self, printer, *args, joiner=', ', **kwargs):
294
+ from sympy.printing.printer import printer_context
295
+ exclude = kwargs.get('exclude', ())
296
+ values = [getattr(self, k) for k in self._fields]
297
+ indent_level = printer._context.get('indent_level', 0)
298
+
299
+ arg_reprs = []
300
+
301
+ for i, (attr, value) in enumerate(zip(self._fields, values)):
302
+ if attr in exclude:
303
+ continue
304
+
305
+ # Skip attributes which have the default value
306
+ if attr in self.defaults and value == self.defaults[attr]:
307
+ continue
308
+
309
+ ilvl = indent_level + 4 if attr in self.indented_args else 0
310
+ with printer_context(printer, indent_level=ilvl):
311
+ indented = self._indented(printer, attr, value, *args, **kwargs)
312
+ arg_reprs.append(('{1}' if i == 0 else '{0}={1}').format(attr, indented.lstrip()))
313
+
314
+ return "{}({})".format(self.__class__.__name__, joiner.join(arg_reprs))
315
+
316
+ _sympystr = _sympyrepr
317
+
318
+ def __repr__(self): # sympy.core.Basic.__repr__ uses sstr
319
+ from sympy.printing import srepr
320
+ return srepr(self)
321
+
322
+ def kwargs(self, exclude=(), apply=None):
323
+ """ Get instance's attributes as dict of keyword arguments.
324
+
325
+ Parameters
326
+ ==========
327
+
328
+ exclude : collection of str
329
+ Collection of keywords to exclude.
330
+
331
+ apply : callable, optional
332
+ Function to apply to all values.
333
+ """
334
+ kwargs = {k: getattr(self, k) for k in self._fields if k not in exclude}
335
+ if apply is not None:
336
+ return {k: apply(v) for k, v in kwargs.items()}
337
+ else:
338
+ return kwargs
339
+
340
+ class BreakToken(Token):
341
+ """ Represents 'break' in C/Python ('exit' in Fortran).
342
+
343
+ Use the premade instance ``break_`` or instantiate manually.
344
+
345
+ Examples
346
+ ========
347
+
348
+ >>> from sympy import ccode, fcode
349
+ >>> from sympy.codegen.ast import break_
350
+ >>> ccode(break_)
351
+ 'break'
352
+ >>> fcode(break_, source_format='free')
353
+ 'exit'
354
+ """
355
+
356
+ break_ = BreakToken()
357
+
358
+
359
+ class ContinueToken(Token):
360
+ """ Represents 'continue' in C/Python ('cycle' in Fortran)
361
+
362
+ Use the premade instance ``continue_`` or instantiate manually.
363
+
364
+ Examples
365
+ ========
366
+
367
+ >>> from sympy import ccode, fcode
368
+ >>> from sympy.codegen.ast import continue_
369
+ >>> ccode(continue_)
370
+ 'continue'
371
+ >>> fcode(continue_, source_format='free')
372
+ 'cycle'
373
+ """
374
+
375
+ continue_ = ContinueToken()
376
+
377
+ class NoneToken(Token):
378
+ """ The AST equivalence of Python's NoneType
379
+
380
+ The corresponding instance of Python's ``None`` is ``none``.
381
+
382
+ Examples
383
+ ========
384
+
385
+ >>> from sympy.codegen.ast import none, Variable
386
+ >>> from sympy import pycode
387
+ >>> print(pycode(Variable('x').as_Declaration(value=none)))
388
+ x = None
389
+
390
+ """
391
+ def __eq__(self, other):
392
+ return other is None or isinstance(other, NoneToken)
393
+
394
+ def _hashable_content(self):
395
+ return ()
396
+
397
+ def __hash__(self):
398
+ return super().__hash__()
399
+
400
+
401
+ none = NoneToken()
402
+
403
+
404
+ class AssignmentBase(CodegenAST):
405
+ """ Abstract base class for Assignment and AugmentedAssignment.
406
+
407
+ Attributes:
408
+ ===========
409
+
410
+ op : str
411
+ Symbol for assignment operator, e.g. "=", "+=", etc.
412
+ """
413
+
414
+ def __new__(cls, lhs, rhs):
415
+ lhs = _sympify(lhs)
416
+ rhs = _sympify(rhs)
417
+
418
+ cls._check_args(lhs, rhs)
419
+
420
+ return super().__new__(cls, lhs, rhs)
421
+
422
+ @property
423
+ def lhs(self):
424
+ return self.args[0]
425
+
426
+ @property
427
+ def rhs(self):
428
+ return self.args[1]
429
+
430
+ @classmethod
431
+ def _check_args(cls, lhs, rhs):
432
+ """ Check arguments to __new__ and raise exception if any problems found.
433
+
434
+ Derived classes may wish to override this.
435
+ """
436
+ from sympy.matrices.expressions.matexpr import (
437
+ MatrixElement, MatrixSymbol)
438
+ from sympy.tensor.indexed import Indexed
439
+ from sympy.tensor.array.expressions import ArrayElement
440
+
441
+ # Tuple of things that can be on the lhs of an assignment
442
+ assignable = (Symbol, MatrixSymbol, MatrixElement, Indexed, Element, Variable,
443
+ ArrayElement)
444
+ if not isinstance(lhs, assignable):
445
+ raise TypeError("Cannot assign to lhs of type %s." % type(lhs))
446
+
447
+ # Indexed types implement shape, but don't define it until later. This
448
+ # causes issues in assignment validation. For now, matrices are defined
449
+ # as anything with a shape that is not an Indexed
450
+ lhs_is_mat = hasattr(lhs, 'shape') and not isinstance(lhs, Indexed)
451
+ rhs_is_mat = hasattr(rhs, 'shape') and not isinstance(rhs, Indexed)
452
+
453
+ # If lhs and rhs have same structure, then this assignment is ok
454
+ if lhs_is_mat:
455
+ if not rhs_is_mat:
456
+ raise ValueError("Cannot assign a scalar to a matrix.")
457
+ elif lhs.shape != rhs.shape:
458
+ raise ValueError("Dimensions of lhs and rhs do not align.")
459
+ elif rhs_is_mat and not lhs_is_mat:
460
+ raise ValueError("Cannot assign a matrix to a scalar.")
461
+
462
+
463
+ class Assignment(AssignmentBase):
464
+ """
465
+ Represents variable assignment for code generation.
466
+
467
+ Parameters
468
+ ==========
469
+
470
+ lhs : Expr
471
+ SymPy object representing the lhs of the expression. These should be
472
+ singular objects, such as one would use in writing code. Notable types
473
+ include Symbol, MatrixSymbol, MatrixElement, and Indexed. Types that
474
+ subclass these types are also supported.
475
+
476
+ rhs : Expr
477
+ SymPy object representing the rhs of the expression. This can be any
478
+ type, provided its shape corresponds to that of the lhs. For example,
479
+ a Matrix type can be assigned to MatrixSymbol, but not to Symbol, as
480
+ the dimensions will not align.
481
+
482
+ Examples
483
+ ========
484
+
485
+ >>> from sympy import symbols, MatrixSymbol, Matrix
486
+ >>> from sympy.codegen.ast import Assignment
487
+ >>> x, y, z = symbols('x, y, z')
488
+ >>> Assignment(x, y)
489
+ Assignment(x, y)
490
+ >>> Assignment(x, 0)
491
+ Assignment(x, 0)
492
+ >>> A = MatrixSymbol('A', 1, 3)
493
+ >>> mat = Matrix([x, y, z]).T
494
+ >>> Assignment(A, mat)
495
+ Assignment(A, Matrix([[x, y, z]]))
496
+ >>> Assignment(A[0, 1], x)
497
+ Assignment(A[0, 1], x)
498
+ """
499
+
500
+ op = ':='
501
+
502
+
503
+ class AugmentedAssignment(AssignmentBase):
504
+ """
505
+ Base class for augmented assignments.
506
+
507
+ Attributes:
508
+ ===========
509
+
510
+ binop : str
511
+ Symbol for binary operation being applied in the assignment, such as "+",
512
+ "*", etc.
513
+ """
514
+ binop = None # type: str
515
+
516
+ @property
517
+ def op(self):
518
+ return self.binop + '='
519
+
520
+
521
+ class AddAugmentedAssignment(AugmentedAssignment):
522
+ binop = '+'
523
+
524
+
525
+ class SubAugmentedAssignment(AugmentedAssignment):
526
+ binop = '-'
527
+
528
+
529
+ class MulAugmentedAssignment(AugmentedAssignment):
530
+ binop = '*'
531
+
532
+
533
+ class DivAugmentedAssignment(AugmentedAssignment):
534
+ binop = '/'
535
+
536
+
537
+ class ModAugmentedAssignment(AugmentedAssignment):
538
+ binop = '%'
539
+
540
+
541
+ # Mapping from binary op strings to AugmentedAssignment subclasses
542
+ augassign_classes = {
543
+ cls.binop: cls for cls in [
544
+ AddAugmentedAssignment, SubAugmentedAssignment, MulAugmentedAssignment,
545
+ DivAugmentedAssignment, ModAugmentedAssignment
546
+ ]
547
+ }
548
+
549
+
550
+ def aug_assign(lhs, op, rhs):
551
+ """
552
+ Create 'lhs op= rhs'.
553
+
554
+ Explanation
555
+ ===========
556
+
557
+ Represents augmented variable assignment for code generation. This is a
558
+ convenience function. You can also use the AugmentedAssignment classes
559
+ directly, like AddAugmentedAssignment(x, y).
560
+
561
+ Parameters
562
+ ==========
563
+
564
+ lhs : Expr
565
+ SymPy object representing the lhs of the expression. These should be
566
+ singular objects, such as one would use in writing code. Notable types
567
+ include Symbol, MatrixSymbol, MatrixElement, and Indexed. Types that
568
+ subclass these types are also supported.
569
+
570
+ op : str
571
+ Operator (+, -, /, \\*, %).
572
+
573
+ rhs : Expr
574
+ SymPy object representing the rhs of the expression. This can be any
575
+ type, provided its shape corresponds to that of the lhs. For example,
576
+ a Matrix type can be assigned to MatrixSymbol, but not to Symbol, as
577
+ the dimensions will not align.
578
+
579
+ Examples
580
+ ========
581
+
582
+ >>> from sympy import symbols
583
+ >>> from sympy.codegen.ast import aug_assign
584
+ >>> x, y = symbols('x, y')
585
+ >>> aug_assign(x, '+', y)
586
+ AddAugmentedAssignment(x, y)
587
+ """
588
+ if op not in augassign_classes:
589
+ raise ValueError("Unrecognized operator %s" % op)
590
+ return augassign_classes[op](lhs, rhs)
591
+
592
+
593
+ class CodeBlock(CodegenAST):
594
+ """
595
+ Represents a block of code.
596
+
597
+ Explanation
598
+ ===========
599
+
600
+ For now only assignments are supported. This restriction will be lifted in
601
+ the future.
602
+
603
+ Useful attributes on this object are:
604
+
605
+ ``left_hand_sides``:
606
+ Tuple of left-hand sides of assignments, in order.
607
+ ``left_hand_sides``:
608
+ Tuple of right-hand sides of assignments, in order.
609
+ ``free_symbols``: Free symbols of the expressions in the right-hand sides
610
+ which do not appear in the left-hand side of an assignment.
611
+
612
+ Useful methods on this object are:
613
+
614
+ ``topological_sort``:
615
+ Class method. Return a CodeBlock with assignments
616
+ sorted so that variables are assigned before they
617
+ are used.
618
+ ``cse``:
619
+ Return a new CodeBlock with common subexpressions eliminated and
620
+ pulled out as assignments.
621
+
622
+ Examples
623
+ ========
624
+
625
+ >>> from sympy import symbols, ccode
626
+ >>> from sympy.codegen.ast import CodeBlock, Assignment
627
+ >>> x, y = symbols('x y')
628
+ >>> c = CodeBlock(Assignment(x, 1), Assignment(y, x + 1))
629
+ >>> print(ccode(c))
630
+ x = 1;
631
+ y = x + 1;
632
+
633
+ """
634
+ def __new__(cls, *args):
635
+ left_hand_sides = []
636
+ right_hand_sides = []
637
+ for i in args:
638
+ if isinstance(i, Assignment):
639
+ lhs, rhs = i.args
640
+ left_hand_sides.append(lhs)
641
+ right_hand_sides.append(rhs)
642
+
643
+ obj = CodegenAST.__new__(cls, *args)
644
+
645
+ obj.left_hand_sides = Tuple(*left_hand_sides)
646
+ obj.right_hand_sides = Tuple(*right_hand_sides)
647
+ return obj
648
+
649
+ def __iter__(self):
650
+ return iter(self.args)
651
+
652
+ def _sympyrepr(self, printer, *args, **kwargs):
653
+ il = printer._context.get('indent_level', 0)
654
+ joiner = ',\n' + ' '*il
655
+ joined = joiner.join(map(printer._print, self.args))
656
+ return ('{}(\n'.format(' '*(il-4) + self.__class__.__name__,) +
657
+ ' '*il + joined + '\n' + ' '*(il - 4) + ')')
658
+
659
+ _sympystr = _sympyrepr
660
+
661
+ @property
662
+ def free_symbols(self):
663
+ return super().free_symbols - set(self.left_hand_sides)
664
+
665
+ @classmethod
666
+ def topological_sort(cls, assignments):
667
+ """
668
+ Return a CodeBlock with topologically sorted assignments so that
669
+ variables are assigned before they are used.
670
+
671
+ Examples
672
+ ========
673
+
674
+ The existing order of assignments is preserved as much as possible.
675
+
676
+ This function assumes that variables are assigned to only once.
677
+
678
+ This is a class constructor so that the default constructor for
679
+ CodeBlock can error when variables are used before they are assigned.
680
+
681
+ >>> from sympy import symbols
682
+ >>> from sympy.codegen.ast import CodeBlock, Assignment
683
+ >>> x, y, z = symbols('x y z')
684
+
685
+ >>> assignments = [
686
+ ... Assignment(x, y + z),
687
+ ... Assignment(y, z + 1),
688
+ ... Assignment(z, 2),
689
+ ... ]
690
+ >>> CodeBlock.topological_sort(assignments)
691
+ CodeBlock(
692
+ Assignment(z, 2),
693
+ Assignment(y, z + 1),
694
+ Assignment(x, y + z)
695
+ )
696
+
697
+ """
698
+
699
+ if not all(isinstance(i, Assignment) for i in assignments):
700
+ # Will support more things later
701
+ raise NotImplementedError("CodeBlock.topological_sort only supports Assignments")
702
+
703
+ if any(isinstance(i, AugmentedAssignment) for i in assignments):
704
+ raise NotImplementedError("CodeBlock.topological_sort does not yet work with AugmentedAssignments")
705
+
706
+ # Create a graph where the nodes are assignments and there is a directed edge
707
+ # between nodes that use a variable and nodes that assign that
708
+ # variable, like
709
+
710
+ # [(x := 1, y := x + 1), (x := 1, z := y + z), (y := x + 1, z := y + z)]
711
+
712
+ # If we then topologically sort these nodes, they will be in
713
+ # assignment order, like
714
+
715
+ # x := 1
716
+ # y := x + 1
717
+ # z := y + z
718
+
719
+ # A = The nodes
720
+ #
721
+ # enumerate keeps nodes in the same order they are already in if
722
+ # possible. It will also allow us to handle duplicate assignments to
723
+ # the same variable when those are implemented.
724
+ A = list(enumerate(assignments))
725
+
726
+ # var_map = {variable: [nodes for which this variable is assigned to]}
727
+ # like {x: [(1, x := y + z), (4, x := 2 * w)], ...}
728
+ var_map = defaultdict(list)
729
+ for node in A:
730
+ i, a = node
731
+ var_map[a.lhs].append(node)
732
+
733
+ # E = Edges in the graph
734
+ E = []
735
+ for dst_node in A:
736
+ i, a = dst_node
737
+ for s in a.rhs.free_symbols:
738
+ for src_node in var_map[s]:
739
+ E.append((src_node, dst_node))
740
+
741
+ ordered_assignments = topological_sort([A, E])
742
+
743
+ # De-enumerate the result
744
+ return cls(*[a for i, a in ordered_assignments])
745
+
746
+ def cse(self, symbols=None, optimizations=None, postprocess=None,
747
+ order='canonical'):
748
+ """
749
+ Return a new code block with common subexpressions eliminated.
750
+
751
+ Explanation
752
+ ===========
753
+
754
+ See the docstring of :func:`sympy.simplify.cse_main.cse` for more
755
+ information.
756
+
757
+ Examples
758
+ ========
759
+
760
+ >>> from sympy import symbols, sin
761
+ >>> from sympy.codegen.ast import CodeBlock, Assignment
762
+ >>> x, y, z = symbols('x y z')
763
+
764
+ >>> c = CodeBlock(
765
+ ... Assignment(x, 1),
766
+ ... Assignment(y, sin(x) + 1),
767
+ ... Assignment(z, sin(x) - 1),
768
+ ... )
769
+ ...
770
+ >>> c.cse()
771
+ CodeBlock(
772
+ Assignment(x, 1),
773
+ Assignment(x0, sin(x)),
774
+ Assignment(y, x0 + 1),
775
+ Assignment(z, x0 - 1)
776
+ )
777
+
778
+ """
779
+ from sympy.simplify.cse_main import cse
780
+
781
+ # Check that the CodeBlock only contains assignments to unique variables
782
+ if not all(isinstance(i, Assignment) for i in self.args):
783
+ # Will support more things later
784
+ raise NotImplementedError("CodeBlock.cse only supports Assignments")
785
+
786
+ if any(isinstance(i, AugmentedAssignment) for i in self.args):
787
+ raise NotImplementedError("CodeBlock.cse does not yet work with AugmentedAssignments")
788
+
789
+ for i, lhs in enumerate(self.left_hand_sides):
790
+ if lhs in self.left_hand_sides[:i]:
791
+ raise NotImplementedError("Duplicate assignments to the same "
792
+ "variable are not yet supported (%s)" % lhs)
793
+
794
+ # Ensure new symbols for subexpressions do not conflict with existing
795
+ existing_symbols = self.atoms(Symbol)
796
+ if symbols is None:
797
+ symbols = numbered_symbols()
798
+ symbols = filter_symbols(symbols, existing_symbols)
799
+
800
+ replacements, reduced_exprs = cse(list(self.right_hand_sides),
801
+ symbols=symbols, optimizations=optimizations, postprocess=postprocess,
802
+ order=order)
803
+
804
+ new_block = [Assignment(var, expr) for var, expr in
805
+ zip(self.left_hand_sides, reduced_exprs)]
806
+ new_assignments = [Assignment(var, expr) for var, expr in replacements]
807
+ return self.topological_sort(new_assignments + new_block)
808
+
809
+
810
+ class For(Token):
811
+ """Represents a 'for-loop' in the code.
812
+
813
+ Expressions are of the form:
814
+ "for target in iter:
815
+ body..."
816
+
817
+ Parameters
818
+ ==========
819
+
820
+ target : symbol
821
+ iter : iterable
822
+ body : CodeBlock or iterable
823
+ ! When passed an iterable it is used to instantiate a CodeBlock.
824
+
825
+ Examples
826
+ ========
827
+
828
+ >>> from sympy import symbols, Range
829
+ >>> from sympy.codegen.ast import aug_assign, For
830
+ >>> x, i, j, k = symbols('x i j k')
831
+ >>> for_i = For(i, Range(10), [aug_assign(x, '+', i*j*k)])
832
+ >>> for_i # doctest: -NORMALIZE_WHITESPACE
833
+ For(i, iterable=Range(0, 10, 1), body=CodeBlock(
834
+ AddAugmentedAssignment(x, i*j*k)
835
+ ))
836
+ >>> for_ji = For(j, Range(7), [for_i])
837
+ >>> for_ji # doctest: -NORMALIZE_WHITESPACE
838
+ For(j, iterable=Range(0, 7, 1), body=CodeBlock(
839
+ For(i, iterable=Range(0, 10, 1), body=CodeBlock(
840
+ AddAugmentedAssignment(x, i*j*k)
841
+ ))
842
+ ))
843
+ >>> for_kji =For(k, Range(5), [for_ji])
844
+ >>> for_kji # doctest: -NORMALIZE_WHITESPACE
845
+ For(k, iterable=Range(0, 5, 1), body=CodeBlock(
846
+ For(j, iterable=Range(0, 7, 1), body=CodeBlock(
847
+ For(i, iterable=Range(0, 10, 1), body=CodeBlock(
848
+ AddAugmentedAssignment(x, i*j*k)
849
+ ))
850
+ ))
851
+ ))
852
+ """
853
+ __slots__ = _fields = ('target', 'iterable', 'body')
854
+ _construct_target = staticmethod(_sympify)
855
+
856
+ @classmethod
857
+ def _construct_body(cls, itr):
858
+ if isinstance(itr, CodeBlock):
859
+ return itr
860
+ else:
861
+ return CodeBlock(*itr)
862
+
863
+ @classmethod
864
+ def _construct_iterable(cls, itr):
865
+ if not iterable(itr):
866
+ raise TypeError("iterable must be an iterable")
867
+ if isinstance(itr, list): # _sympify errors on lists because they are mutable
868
+ itr = tuple(itr)
869
+ return _sympify(itr)
870
+
871
+
872
+ class String(Atom, Token):
873
+ """ SymPy object representing a string.
874
+
875
+ Atomic object which is not an expression (as opposed to Symbol).
876
+
877
+ Parameters
878
+ ==========
879
+
880
+ text : str
881
+
882
+ Examples
883
+ ========
884
+
885
+ >>> from sympy.codegen.ast import String
886
+ >>> f = String('foo')
887
+ >>> f
888
+ foo
889
+ >>> str(f)
890
+ 'foo'
891
+ >>> f.text
892
+ 'foo'
893
+ >>> print(repr(f))
894
+ String('foo')
895
+
896
+ """
897
+ __slots__ = _fields = ('text',)
898
+ not_in_args = ['text']
899
+ is_Atom = True
900
+
901
+ @classmethod
902
+ def _construct_text(cls, text):
903
+ if not isinstance(text, str):
904
+ raise TypeError("Argument text is not a string type.")
905
+ return text
906
+
907
+ def _sympystr(self, printer, *args, **kwargs):
908
+ return self.text
909
+
910
+ def kwargs(self, exclude = (), apply = None):
911
+ return {}
912
+
913
+ #to be removed when Atom is given a suitable func
914
+ @property
915
+ def func(self):
916
+ return lambda: self
917
+
918
+ def _latex(self, printer):
919
+ from sympy.printing.latex import latex_escape
920
+ return r'\texttt{{"{}"}}'.format(latex_escape(self.text))
921
+
922
+ class QuotedString(String):
923
+ """ Represents a string which should be printed with quotes. """
924
+
925
+ class Comment(String):
926
+ """ Represents a comment. """
927
+
928
+ class Node(Token):
929
+ """ Subclass of Token, carrying the attribute 'attrs' (Tuple)
930
+
931
+ Examples
932
+ ========
933
+
934
+ >>> from sympy.codegen.ast import Node, value_const, pointer_const
935
+ >>> n1 = Node([value_const])
936
+ >>> n1.attr_params('value_const') # get the parameters of attribute (by name)
937
+ ()
938
+ >>> from sympy.codegen.fnodes import dimension
939
+ >>> n2 = Node([value_const, dimension(5, 3)])
940
+ >>> n2.attr_params(value_const) # get the parameters of attribute (by Attribute instance)
941
+ ()
942
+ >>> n2.attr_params('dimension') # get the parameters of attribute (by name)
943
+ (5, 3)
944
+ >>> n2.attr_params(pointer_const) is None
945
+ True
946
+
947
+ """
948
+
949
+ __slots__: tuple[str, ...] = ('attrs',)
950
+ _fields = __slots__
951
+
952
+ defaults: dict[str, Any] = {'attrs': Tuple()}
953
+
954
+ _construct_attrs = staticmethod(_mk_Tuple)
955
+
956
+ def attr_params(self, looking_for):
957
+ """ Returns the parameters of the Attribute with name ``looking_for`` in self.attrs """
958
+ for attr in self.attrs:
959
+ if str(attr.name) == str(looking_for):
960
+ return attr.parameters
961
+
962
+
963
+ class Type(Token):
964
+ """ Represents a type.
965
+
966
+ Explanation
967
+ ===========
968
+
969
+ The naming is a super-set of NumPy naming. Type has a classmethod
970
+ ``from_expr`` which offer type deduction. It also has a method
971
+ ``cast_check`` which casts the argument to its type, possibly raising an
972
+ exception if rounding error is not within tolerances, or if the value is not
973
+ representable by the underlying data type (e.g. unsigned integers).
974
+
975
+ Parameters
976
+ ==========
977
+
978
+ name : str
979
+ Name of the type, e.g. ``object``, ``int16``, ``float16`` (where the latter two
980
+ would use the ``Type`` sub-classes ``IntType`` and ``FloatType`` respectively).
981
+ If a ``Type`` instance is given, the said instance is returned.
982
+
983
+ Examples
984
+ ========
985
+
986
+ >>> from sympy.codegen.ast import Type
987
+ >>> t = Type.from_expr(42)
988
+ >>> t
989
+ integer
990
+ >>> print(repr(t))
991
+ IntBaseType(String('integer'))
992
+ >>> from sympy.codegen.ast import uint8
993
+ >>> uint8.cast_check(-1) # doctest: +ELLIPSIS
994
+ Traceback (most recent call last):
995
+ ...
996
+ ValueError: Minimum value for data type bigger than new value.
997
+ >>> from sympy.codegen.ast import float32
998
+ >>> v6 = 0.123456
999
+ >>> float32.cast_check(v6)
1000
+ 0.123456
1001
+ >>> v10 = 12345.67894
1002
+ >>> float32.cast_check(v10) # doctest: +ELLIPSIS
1003
+ Traceback (most recent call last):
1004
+ ...
1005
+ ValueError: Casting gives a significantly different value.
1006
+ >>> boost_mp50 = Type('boost::multiprecision::cpp_dec_float_50')
1007
+ >>> from sympy import cxxcode
1008
+ >>> from sympy.codegen.ast import Declaration, Variable
1009
+ >>> cxxcode(Declaration(Variable('x', type=boost_mp50)))
1010
+ 'boost::multiprecision::cpp_dec_float_50 x'
1011
+
1012
+ References
1013
+ ==========
1014
+
1015
+ .. [1] https://numpy.org/doc/stable/user/basics.types.html
1016
+
1017
+ """
1018
+ __slots__: tuple[str, ...] = ('name',)
1019
+ _fields = __slots__
1020
+
1021
+ _construct_name = String
1022
+
1023
+ def _sympystr(self, printer, *args, **kwargs):
1024
+ return str(self.name)
1025
+
1026
+ @classmethod
1027
+ def from_expr(cls, expr):
1028
+ """ Deduces type from an expression or a ``Symbol``.
1029
+
1030
+ Parameters
1031
+ ==========
1032
+
1033
+ expr : number or SymPy object
1034
+ The type will be deduced from type or properties.
1035
+
1036
+ Examples
1037
+ ========
1038
+
1039
+ >>> from sympy.codegen.ast import Type, integer, complex_
1040
+ >>> Type.from_expr(2) == integer
1041
+ True
1042
+ >>> from sympy import Symbol
1043
+ >>> Type.from_expr(Symbol('z', complex=True)) == complex_
1044
+ True
1045
+ >>> Type.from_expr(sum) # doctest: +ELLIPSIS
1046
+ Traceback (most recent call last):
1047
+ ...
1048
+ ValueError: Could not deduce type from expr.
1049
+
1050
+ Raises
1051
+ ======
1052
+
1053
+ ValueError when type deduction fails.
1054
+
1055
+ """
1056
+ if isinstance(expr, (float, Float)):
1057
+ return real
1058
+ if isinstance(expr, (int, Integer)) or getattr(expr, 'is_integer', False):
1059
+ return integer
1060
+ if getattr(expr, 'is_real', False):
1061
+ return real
1062
+ if isinstance(expr, complex) or getattr(expr, 'is_complex', False):
1063
+ return complex_
1064
+ if isinstance(expr, bool) or getattr(expr, 'is_Relational', False):
1065
+ return bool_
1066
+ else:
1067
+ raise ValueError("Could not deduce type from expr.")
1068
+
1069
+ def _check(self, value):
1070
+ pass
1071
+
1072
+ def cast_check(self, value, rtol=None, atol=0, precision_targets=None):
1073
+ """ Casts a value to the data type of the instance.
1074
+
1075
+ Parameters
1076
+ ==========
1077
+
1078
+ value : number
1079
+ rtol : floating point number
1080
+ Relative tolerance. (will be deduced if not given).
1081
+ atol : floating point number
1082
+ Absolute tolerance (in addition to ``rtol``).
1083
+ type_aliases : dict
1084
+ Maps substitutions for Type, e.g. {integer: int64, real: float32}
1085
+
1086
+ Examples
1087
+ ========
1088
+
1089
+ >>> from sympy.codegen.ast import integer, float32, int8
1090
+ >>> integer.cast_check(3.0) == 3
1091
+ True
1092
+ >>> float32.cast_check(1e-40) # doctest: +ELLIPSIS
1093
+ Traceback (most recent call last):
1094
+ ...
1095
+ ValueError: Minimum value for data type bigger than new value.
1096
+ >>> int8.cast_check(256) # doctest: +ELLIPSIS
1097
+ Traceback (most recent call last):
1098
+ ...
1099
+ ValueError: Maximum value for data type smaller than new value.
1100
+ >>> v10 = 12345.67894
1101
+ >>> float32.cast_check(v10) # doctest: +ELLIPSIS
1102
+ Traceback (most recent call last):
1103
+ ...
1104
+ ValueError: Casting gives a significantly different value.
1105
+ >>> from sympy.codegen.ast import float64
1106
+ >>> float64.cast_check(v10)
1107
+ 12345.67894
1108
+ >>> from sympy import Float
1109
+ >>> v18 = Float('0.123456789012345646')
1110
+ >>> float64.cast_check(v18)
1111
+ Traceback (most recent call last):
1112
+ ...
1113
+ ValueError: Casting gives a significantly different value.
1114
+ >>> from sympy.codegen.ast import float80
1115
+ >>> float80.cast_check(v18)
1116
+ 0.123456789012345649
1117
+
1118
+ """
1119
+ val = sympify(value)
1120
+
1121
+ ten = Integer(10)
1122
+ exp10 = getattr(self, 'decimal_dig', None)
1123
+
1124
+ if rtol is None:
1125
+ rtol = 1e-15 if exp10 is None else 2.0*ten**(-exp10)
1126
+
1127
+ def tol(num):
1128
+ return atol + rtol*abs(num)
1129
+
1130
+ new_val = self.cast_nocheck(value)
1131
+ self._check(new_val)
1132
+
1133
+ delta = new_val - val
1134
+ if abs(delta) > tol(val): # rounding, e.g. int(3.5) != 3.5
1135
+ raise ValueError("Casting gives a significantly different value.")
1136
+
1137
+ return new_val
1138
+
1139
+ def _latex(self, printer):
1140
+ from sympy.printing.latex import latex_escape
1141
+ type_name = latex_escape(self.__class__.__name__)
1142
+ name = latex_escape(self.name.text)
1143
+ return r"\text{{{}}}\left(\texttt{{{}}}\right)".format(type_name, name)
1144
+
1145
+
1146
+ class IntBaseType(Type):
1147
+ """ Integer base type, contains no size information. """
1148
+ __slots__ = ()
1149
+ cast_nocheck = lambda self, i: Integer(int(i))
1150
+
1151
+
1152
+ class _SizedIntType(IntBaseType):
1153
+ __slots__ = ('nbits',)
1154
+ _fields = Type._fields + __slots__
1155
+
1156
+ _construct_nbits = Integer
1157
+
1158
+ def _check(self, value):
1159
+ if value < self.min:
1160
+ raise ValueError("Value is too small: %d < %d" % (value, self.min))
1161
+ if value > self.max:
1162
+ raise ValueError("Value is too big: %d > %d" % (value, self.max))
1163
+
1164
+
1165
+ class SignedIntType(_SizedIntType):
1166
+ """ Represents a signed integer type. """
1167
+ __slots__ = ()
1168
+ @property
1169
+ def min(self):
1170
+ return -2**(self.nbits-1)
1171
+
1172
+ @property
1173
+ def max(self):
1174
+ return 2**(self.nbits-1) - 1
1175
+
1176
+
1177
+ class UnsignedIntType(_SizedIntType):
1178
+ """ Represents an unsigned integer type. """
1179
+ __slots__ = ()
1180
+ @property
1181
+ def min(self):
1182
+ return 0
1183
+
1184
+ @property
1185
+ def max(self):
1186
+ return 2**self.nbits - 1
1187
+
1188
+ two = Integer(2)
1189
+
1190
+ class FloatBaseType(Type):
1191
+ """ Represents a floating point number type. """
1192
+ __slots__ = ()
1193
+ cast_nocheck = Float
1194
+
1195
+ class FloatType(FloatBaseType):
1196
+ """ Represents a floating point type with fixed bit width.
1197
+
1198
+ Base 2 & one sign bit is assumed.
1199
+
1200
+ Parameters
1201
+ ==========
1202
+
1203
+ name : str
1204
+ Name of the type.
1205
+ nbits : integer
1206
+ Number of bits used (storage).
1207
+ nmant : integer
1208
+ Number of bits used to represent the mantissa.
1209
+ nexp : integer
1210
+ Number of bits used to represent the mantissa.
1211
+
1212
+ Examples
1213
+ ========
1214
+
1215
+ >>> from sympy import S
1216
+ >>> from sympy.codegen.ast import FloatType
1217
+ >>> half_precision = FloatType('f16', nbits=16, nmant=10, nexp=5)
1218
+ >>> half_precision.max
1219
+ 65504
1220
+ >>> half_precision.tiny == S(2)**-14
1221
+ True
1222
+ >>> half_precision.eps == S(2)**-10
1223
+ True
1224
+ >>> half_precision.dig == 3
1225
+ True
1226
+ >>> half_precision.decimal_dig == 5
1227
+ True
1228
+ >>> half_precision.cast_check(1.0)
1229
+ 1.0
1230
+ >>> half_precision.cast_check(1e5) # doctest: +ELLIPSIS
1231
+ Traceback (most recent call last):
1232
+ ...
1233
+ ValueError: Maximum value for data type smaller than new value.
1234
+ """
1235
+
1236
+ __slots__ = ('nbits', 'nmant', 'nexp',)
1237
+ _fields = Type._fields + __slots__
1238
+
1239
+ _construct_nbits = _construct_nmant = _construct_nexp = Integer
1240
+
1241
+
1242
+ @property
1243
+ def max_exponent(self):
1244
+ """ The largest positive number n, such that 2**(n - 1) is a representable finite value. """
1245
+ # cf. C++'s ``std::numeric_limits::max_exponent``
1246
+ return two**(self.nexp - 1)
1247
+
1248
+ @property
1249
+ def min_exponent(self):
1250
+ """ The lowest negative number n, such that 2**(n - 1) is a valid normalized number. """
1251
+ # cf. C++'s ``std::numeric_limits::min_exponent``
1252
+ return 3 - self.max_exponent
1253
+
1254
+ @property
1255
+ def max(self):
1256
+ """ Maximum value representable. """
1257
+ return (1 - two**-(self.nmant+1))*two**self.max_exponent
1258
+
1259
+ @property
1260
+ def tiny(self):
1261
+ """ The minimum positive normalized value. """
1262
+ # See C macros: FLT_MIN, DBL_MIN, LDBL_MIN
1263
+ # or C++'s ``std::numeric_limits::min``
1264
+ # or numpy.finfo(dtype).tiny
1265
+ return two**(self.min_exponent - 1)
1266
+
1267
+
1268
+ @property
1269
+ def eps(self):
1270
+ """ Difference between 1.0 and the next representable value. """
1271
+ return two**(-self.nmant)
1272
+
1273
+ @property
1274
+ def dig(self):
1275
+ """ Number of decimal digits that are guaranteed to be preserved in text.
1276
+
1277
+ When converting text -> float -> text, you are guaranteed that at least ``dig``
1278
+ number of digits are preserved with respect to rounding or overflow.
1279
+ """
1280
+ from sympy.functions import floor, log
1281
+ return floor(self.nmant * log(2)/log(10))
1282
+
1283
+ @property
1284
+ def decimal_dig(self):
1285
+ """ Number of digits needed to store & load without loss.
1286
+
1287
+ Explanation
1288
+ ===========
1289
+
1290
+ Number of decimal digits needed to guarantee that two consecutive conversions
1291
+ (float -> text -> float) to be idempotent. This is useful when one do not want
1292
+ to loose precision due to rounding errors when storing a floating point value
1293
+ as text.
1294
+ """
1295
+ from sympy.functions import ceiling, log
1296
+ return ceiling((self.nmant + 1) * log(2)/log(10) + 1)
1297
+
1298
+ def cast_nocheck(self, value):
1299
+ """ Casts without checking if out of bounds or subnormal. """
1300
+ if value == oo: # float(oo) or oo
1301
+ return float(oo)
1302
+ elif value == -oo: # float(-oo) or -oo
1303
+ return float(-oo)
1304
+ return Float(str(sympify(value).evalf(self.decimal_dig)), self.decimal_dig)
1305
+
1306
+ def _check(self, value):
1307
+ if value < -self.max:
1308
+ raise ValueError("Value is too small: %d < %d" % (value, -self.max))
1309
+ if value > self.max:
1310
+ raise ValueError("Value is too big: %d > %d" % (value, self.max))
1311
+ if abs(value) < self.tiny:
1312
+ raise ValueError("Smallest (absolute) value for data type bigger than new value.")
1313
+
1314
+ class ComplexBaseType(FloatBaseType):
1315
+
1316
+ __slots__ = ()
1317
+
1318
+ def cast_nocheck(self, value):
1319
+ """ Casts without checking if out of bounds or subnormal. """
1320
+ from sympy.functions import re, im
1321
+ return (
1322
+ super().cast_nocheck(re(value)) +
1323
+ super().cast_nocheck(im(value))*1j
1324
+ )
1325
+
1326
+ def _check(self, value):
1327
+ from sympy.functions import re, im
1328
+ super()._check(re(value))
1329
+ super()._check(im(value))
1330
+
1331
+
1332
+ class ComplexType(ComplexBaseType, FloatType):
1333
+ """ Represents a complex floating point number. """
1334
+ __slots__ = ()
1335
+
1336
+
1337
+ # NumPy types:
1338
+ intc = IntBaseType('intc')
1339
+ intp = IntBaseType('intp')
1340
+ int8 = SignedIntType('int8', 8)
1341
+ int16 = SignedIntType('int16', 16)
1342
+ int32 = SignedIntType('int32', 32)
1343
+ int64 = SignedIntType('int64', 64)
1344
+ uint8 = UnsignedIntType('uint8', 8)
1345
+ uint16 = UnsignedIntType('uint16', 16)
1346
+ uint32 = UnsignedIntType('uint32', 32)
1347
+ uint64 = UnsignedIntType('uint64', 64)
1348
+ float16 = FloatType('float16', 16, nexp=5, nmant=10) # IEEE 754 binary16, Half precision
1349
+ float32 = FloatType('float32', 32, nexp=8, nmant=23) # IEEE 754 binary32, Single precision
1350
+ float64 = FloatType('float64', 64, nexp=11, nmant=52) # IEEE 754 binary64, Double precision
1351
+ float80 = FloatType('float80', 80, nexp=15, nmant=63) # x86 extended precision (1 integer part bit), "long double"
1352
+ float128 = FloatType('float128', 128, nexp=15, nmant=112) # IEEE 754 binary128, Quadruple precision
1353
+ float256 = FloatType('float256', 256, nexp=19, nmant=236) # IEEE 754 binary256, Octuple precision
1354
+
1355
+ complex64 = ComplexType('complex64', nbits=64, **float32.kwargs(exclude=('name', 'nbits')))
1356
+ complex128 = ComplexType('complex128', nbits=128, **float64.kwargs(exclude=('name', 'nbits')))
1357
+
1358
+ # Generic types (precision may be chosen by code printers):
1359
+ untyped = Type('untyped')
1360
+ real = FloatBaseType('real')
1361
+ integer = IntBaseType('integer')
1362
+ complex_ = ComplexBaseType('complex')
1363
+ bool_ = Type('bool')
1364
+
1365
+
1366
+ class Attribute(Token):
1367
+ """ Attribute (possibly parametrized)
1368
+
1369
+ For use with :class:`sympy.codegen.ast.Node` (which takes instances of
1370
+ ``Attribute`` as ``attrs``).
1371
+
1372
+ Parameters
1373
+ ==========
1374
+
1375
+ name : str
1376
+ parameters : Tuple
1377
+
1378
+ Examples
1379
+ ========
1380
+
1381
+ >>> from sympy.codegen.ast import Attribute
1382
+ >>> volatile = Attribute('volatile')
1383
+ >>> volatile
1384
+ volatile
1385
+ >>> print(repr(volatile))
1386
+ Attribute(String('volatile'))
1387
+ >>> a = Attribute('foo', [1, 2, 3])
1388
+ >>> a
1389
+ foo(1, 2, 3)
1390
+ >>> a.parameters == (1, 2, 3)
1391
+ True
1392
+ """
1393
+ __slots__ = _fields = ('name', 'parameters')
1394
+ defaults = {'parameters': Tuple()}
1395
+
1396
+ _construct_name = String
1397
+ _construct_parameters = staticmethod(_mk_Tuple)
1398
+
1399
+ def _sympystr(self, printer, *args, **kwargs):
1400
+ result = str(self.name)
1401
+ if self.parameters:
1402
+ result += '(%s)' % ', '.join((printer._print(
1403
+ arg, *args, **kwargs) for arg in self.parameters))
1404
+ return result
1405
+
1406
+ value_const = Attribute('value_const')
1407
+ pointer_const = Attribute('pointer_const')
1408
+
1409
+
1410
+ class Variable(Node):
1411
+ """ Represents a variable.
1412
+
1413
+ Parameters
1414
+ ==========
1415
+
1416
+ symbol : Symbol
1417
+ type : Type (optional)
1418
+ Type of the variable.
1419
+ attrs : iterable of Attribute instances
1420
+ Will be stored as a Tuple.
1421
+
1422
+ Examples
1423
+ ========
1424
+
1425
+ >>> from sympy import Symbol
1426
+ >>> from sympy.codegen.ast import Variable, float32, integer
1427
+ >>> x = Symbol('x')
1428
+ >>> v = Variable(x, type=float32)
1429
+ >>> v.attrs
1430
+ ()
1431
+ >>> v == Variable('x')
1432
+ False
1433
+ >>> v == Variable('x', type=float32)
1434
+ True
1435
+ >>> v
1436
+ Variable(x, type=float32)
1437
+
1438
+ One may also construct a ``Variable`` instance with the type deduced from
1439
+ assumptions about the symbol using the ``deduced`` classmethod:
1440
+
1441
+ >>> i = Symbol('i', integer=True)
1442
+ >>> v = Variable.deduced(i)
1443
+ >>> v.type == integer
1444
+ True
1445
+ >>> v == Variable('i')
1446
+ False
1447
+ >>> from sympy.codegen.ast import value_const
1448
+ >>> value_const in v.attrs
1449
+ False
1450
+ >>> w = Variable('w', attrs=[value_const])
1451
+ >>> w
1452
+ Variable(w, attrs=(value_const,))
1453
+ >>> value_const in w.attrs
1454
+ True
1455
+ >>> w.as_Declaration(value=42)
1456
+ Declaration(Variable(w, value=42, attrs=(value_const,)))
1457
+
1458
+ """
1459
+
1460
+ __slots__ = ('symbol', 'type', 'value')
1461
+ _fields = __slots__ + Node._fields
1462
+
1463
+ defaults = Node.defaults.copy()
1464
+ defaults.update({'type': untyped, 'value': none})
1465
+
1466
+ _construct_symbol = staticmethod(sympify)
1467
+ _construct_value = staticmethod(sympify)
1468
+
1469
+ @classmethod
1470
+ def deduced(cls, symbol, value=None, attrs=Tuple(), cast_check=True):
1471
+ """ Alt. constructor with type deduction from ``Type.from_expr``.
1472
+
1473
+ Deduces type primarily from ``symbol``, secondarily from ``value``.
1474
+
1475
+ Parameters
1476
+ ==========
1477
+
1478
+ symbol : Symbol
1479
+ value : expr
1480
+ (optional) value of the variable.
1481
+ attrs : iterable of Attribute instances
1482
+ cast_check : bool
1483
+ Whether to apply ``Type.cast_check`` on ``value``.
1484
+
1485
+ Examples
1486
+ ========
1487
+
1488
+ >>> from sympy import Symbol
1489
+ >>> from sympy.codegen.ast import Variable, complex_
1490
+ >>> n = Symbol('n', integer=True)
1491
+ >>> str(Variable.deduced(n).type)
1492
+ 'integer'
1493
+ >>> x = Symbol('x', real=True)
1494
+ >>> v = Variable.deduced(x)
1495
+ >>> v.type
1496
+ real
1497
+ >>> z = Symbol('z', complex=True)
1498
+ >>> Variable.deduced(z).type == complex_
1499
+ True
1500
+
1501
+ """
1502
+ if isinstance(symbol, Variable):
1503
+ return symbol
1504
+
1505
+ try:
1506
+ type_ = Type.from_expr(symbol)
1507
+ except ValueError:
1508
+ type_ = Type.from_expr(value)
1509
+
1510
+ if value is not None and cast_check:
1511
+ value = type_.cast_check(value)
1512
+ return cls(symbol, type=type_, value=value, attrs=attrs)
1513
+
1514
+ def as_Declaration(self, **kwargs):
1515
+ """ Convenience method for creating a Declaration instance.
1516
+
1517
+ Explanation
1518
+ ===========
1519
+
1520
+ If the variable of the Declaration need to wrap a modified
1521
+ variable keyword arguments may be passed (overriding e.g.
1522
+ the ``value`` of the Variable instance).
1523
+
1524
+ Examples
1525
+ ========
1526
+
1527
+ >>> from sympy.codegen.ast import Variable, NoneToken
1528
+ >>> x = Variable('x')
1529
+ >>> decl1 = x.as_Declaration()
1530
+ >>> # value is special NoneToken() which must be tested with == operator
1531
+ >>> decl1.variable.value is None # won't work
1532
+ False
1533
+ >>> decl1.variable.value == None # not PEP-8 compliant
1534
+ True
1535
+ >>> decl1.variable.value == NoneToken() # OK
1536
+ True
1537
+ >>> decl2 = x.as_Declaration(value=42.0)
1538
+ >>> decl2.variable.value == 42.0
1539
+ True
1540
+
1541
+ """
1542
+ kw = self.kwargs()
1543
+ kw.update(kwargs)
1544
+ return Declaration(self.func(**kw))
1545
+
1546
+ def _relation(self, rhs, op):
1547
+ try:
1548
+ rhs = _sympify(rhs)
1549
+ except SympifyError:
1550
+ raise TypeError("Invalid comparison %s < %s" % (self, rhs))
1551
+ return op(self, rhs, evaluate=False)
1552
+
1553
+ __lt__ = lambda self, other: self._relation(other, Lt)
1554
+ __le__ = lambda self, other: self._relation(other, Le)
1555
+ __ge__ = lambda self, other: self._relation(other, Ge)
1556
+ __gt__ = lambda self, other: self._relation(other, Gt)
1557
+
1558
+ class Pointer(Variable):
1559
+ """ Represents a pointer. See ``Variable``.
1560
+
1561
+ Examples
1562
+ ========
1563
+
1564
+ Can create instances of ``Element``:
1565
+
1566
+ >>> from sympy import Symbol
1567
+ >>> from sympy.codegen.ast import Pointer
1568
+ >>> i = Symbol('i', integer=True)
1569
+ >>> p = Pointer('x')
1570
+ >>> p[i+1]
1571
+ Element(x, indices=(i + 1,))
1572
+
1573
+ """
1574
+ __slots__ = ()
1575
+
1576
+ def __getitem__(self, key):
1577
+ try:
1578
+ return Element(self.symbol, key)
1579
+ except TypeError:
1580
+ return Element(self.symbol, (key,))
1581
+
1582
+
1583
+ class Element(Token):
1584
+ """ Element in (a possibly N-dimensional) array.
1585
+
1586
+ Examples
1587
+ ========
1588
+
1589
+ >>> from sympy.codegen.ast import Element
1590
+ >>> elem = Element('x', 'ijk')
1591
+ >>> elem.symbol.name == 'x'
1592
+ True
1593
+ >>> elem.indices
1594
+ (i, j, k)
1595
+ >>> from sympy import ccode
1596
+ >>> ccode(elem)
1597
+ 'x[i][j][k]'
1598
+ >>> ccode(Element('x', 'ijk', strides='lmn', offset='o'))
1599
+ 'x[i*l + j*m + k*n + o]'
1600
+
1601
+ """
1602
+ __slots__ = _fields = ('symbol', 'indices', 'strides', 'offset')
1603
+ defaults = {'strides': none, 'offset': none}
1604
+ _construct_symbol = staticmethod(sympify)
1605
+ _construct_indices = staticmethod(lambda arg: Tuple(*arg))
1606
+ _construct_strides = staticmethod(lambda arg: Tuple(*arg))
1607
+ _construct_offset = staticmethod(sympify)
1608
+
1609
+
1610
+ class Declaration(Token):
1611
+ """ Represents a variable declaration
1612
+
1613
+ Parameters
1614
+ ==========
1615
+
1616
+ variable : Variable
1617
+
1618
+ Examples
1619
+ ========
1620
+
1621
+ >>> from sympy.codegen.ast import Declaration, NoneToken, untyped
1622
+ >>> z = Declaration('z')
1623
+ >>> z.variable.type == untyped
1624
+ True
1625
+ >>> # value is special NoneToken() which must be tested with == operator
1626
+ >>> z.variable.value is None # won't work
1627
+ False
1628
+ >>> z.variable.value == None # not PEP-8 compliant
1629
+ True
1630
+ >>> z.variable.value == NoneToken() # OK
1631
+ True
1632
+ """
1633
+ __slots__ = _fields = ('variable',)
1634
+ _construct_variable = Variable
1635
+
1636
+
1637
+ class While(Token):
1638
+ """ Represents a 'for-loop' in the code.
1639
+
1640
+ Expressions are of the form:
1641
+ "while condition:
1642
+ body..."
1643
+
1644
+ Parameters
1645
+ ==========
1646
+
1647
+ condition : expression convertible to Boolean
1648
+ body : CodeBlock or iterable
1649
+ When passed an iterable it is used to instantiate a CodeBlock.
1650
+
1651
+ Examples
1652
+ ========
1653
+
1654
+ >>> from sympy import symbols, Gt, Abs
1655
+ >>> from sympy.codegen import aug_assign, Assignment, While
1656
+ >>> x, dx = symbols('x dx')
1657
+ >>> expr = 1 - x**2
1658
+ >>> whl = While(Gt(Abs(dx), 1e-9), [
1659
+ ... Assignment(dx, -expr/expr.diff(x)),
1660
+ ... aug_assign(x, '+', dx)
1661
+ ... ])
1662
+
1663
+ """
1664
+ __slots__ = _fields = ('condition', 'body')
1665
+ _construct_condition = staticmethod(lambda cond: _sympify(cond))
1666
+
1667
+ @classmethod
1668
+ def _construct_body(cls, itr):
1669
+ if isinstance(itr, CodeBlock):
1670
+ return itr
1671
+ else:
1672
+ return CodeBlock(*itr)
1673
+
1674
+
1675
+ class Scope(Token):
1676
+ """ Represents a scope in the code.
1677
+
1678
+ Parameters
1679
+ ==========
1680
+
1681
+ body : CodeBlock or iterable
1682
+ When passed an iterable it is used to instantiate a CodeBlock.
1683
+
1684
+ """
1685
+ __slots__ = _fields = ('body',)
1686
+
1687
+ @classmethod
1688
+ def _construct_body(cls, itr):
1689
+ if isinstance(itr, CodeBlock):
1690
+ return itr
1691
+ else:
1692
+ return CodeBlock(*itr)
1693
+
1694
+
1695
+ class Stream(Token):
1696
+ """ Represents a stream.
1697
+
1698
+ There are two predefined Stream instances ``stdout`` & ``stderr``.
1699
+
1700
+ Parameters
1701
+ ==========
1702
+
1703
+ name : str
1704
+
1705
+ Examples
1706
+ ========
1707
+
1708
+ >>> from sympy import pycode, Symbol
1709
+ >>> from sympy.codegen.ast import Print, stderr, QuotedString
1710
+ >>> print(pycode(Print(['x'], file=stderr)))
1711
+ print(x, file=sys.stderr)
1712
+ >>> x = Symbol('x')
1713
+ >>> print(pycode(Print([QuotedString('x')], file=stderr))) # print literally "x"
1714
+ print("x", file=sys.stderr)
1715
+
1716
+ """
1717
+ __slots__ = _fields = ('name',)
1718
+ _construct_name = String
1719
+
1720
+ stdout = Stream('stdout')
1721
+ stderr = Stream('stderr')
1722
+
1723
+
1724
+ class Print(Token):
1725
+ r""" Represents print command in the code.
1726
+
1727
+ Parameters
1728
+ ==========
1729
+
1730
+ formatstring : str
1731
+ *args : Basic instances (or convertible to such through sympify)
1732
+
1733
+ Examples
1734
+ ========
1735
+
1736
+ >>> from sympy.codegen.ast import Print
1737
+ >>> from sympy import pycode
1738
+ >>> print(pycode(Print('x y'.split(), "coordinate: %12.5g %12.5g\\n")))
1739
+ print("coordinate: %12.5g %12.5g\n" % (x, y), end="")
1740
+
1741
+ """
1742
+
1743
+ __slots__ = _fields = ('print_args', 'format_string', 'file')
1744
+ defaults = {'format_string': none, 'file': none}
1745
+
1746
+ _construct_print_args = staticmethod(_mk_Tuple)
1747
+ _construct_format_string = QuotedString
1748
+ _construct_file = Stream
1749
+
1750
+
1751
+ class FunctionPrototype(Node):
1752
+ """ Represents a function prototype
1753
+
1754
+ Allows the user to generate forward declaration in e.g. C/C++.
1755
+
1756
+ Parameters
1757
+ ==========
1758
+
1759
+ return_type : Type
1760
+ name : str
1761
+ parameters: iterable of Variable instances
1762
+ attrs : iterable of Attribute instances
1763
+
1764
+ Examples
1765
+ ========
1766
+
1767
+ >>> from sympy import ccode, symbols
1768
+ >>> from sympy.codegen.ast import real, FunctionPrototype
1769
+ >>> x, y = symbols('x y', real=True)
1770
+ >>> fp = FunctionPrototype(real, 'foo', [x, y])
1771
+ >>> ccode(fp)
1772
+ 'double foo(double x, double y)'
1773
+
1774
+ """
1775
+
1776
+ __slots__ = ('return_type', 'name', 'parameters')
1777
+ _fields: tuple[str, ...] = __slots__ + Node._fields
1778
+
1779
+ _construct_return_type = Type
1780
+ _construct_name = String
1781
+
1782
+ @staticmethod
1783
+ def _construct_parameters(args):
1784
+ def _var(arg):
1785
+ if isinstance(arg, Declaration):
1786
+ return arg.variable
1787
+ elif isinstance(arg, Variable):
1788
+ return arg
1789
+ else:
1790
+ return Variable.deduced(arg)
1791
+ return Tuple(*map(_var, args))
1792
+
1793
+ @classmethod
1794
+ def from_FunctionDefinition(cls, func_def):
1795
+ if not isinstance(func_def, FunctionDefinition):
1796
+ raise TypeError("func_def is not an instance of FunctionDefinition")
1797
+ return cls(**func_def.kwargs(exclude=('body',)))
1798
+
1799
+
1800
+ class FunctionDefinition(FunctionPrototype):
1801
+ """ Represents a function definition in the code.
1802
+
1803
+ Parameters
1804
+ ==========
1805
+
1806
+ return_type : Type
1807
+ name : str
1808
+ parameters: iterable of Variable instances
1809
+ body : CodeBlock or iterable
1810
+ attrs : iterable of Attribute instances
1811
+
1812
+ Examples
1813
+ ========
1814
+
1815
+ >>> from sympy import ccode, symbols
1816
+ >>> from sympy.codegen.ast import real, FunctionPrototype
1817
+ >>> x, y = symbols('x y', real=True)
1818
+ >>> fp = FunctionPrototype(real, 'foo', [x, y])
1819
+ >>> ccode(fp)
1820
+ 'double foo(double x, double y)'
1821
+ >>> from sympy.codegen.ast import FunctionDefinition, Return
1822
+ >>> body = [Return(x*y)]
1823
+ >>> fd = FunctionDefinition.from_FunctionPrototype(fp, body)
1824
+ >>> print(ccode(fd))
1825
+ double foo(double x, double y){
1826
+ return x*y;
1827
+ }
1828
+ """
1829
+
1830
+ __slots__ = ('body', )
1831
+ _fields = FunctionPrototype._fields[:-1] + __slots__ + Node._fields
1832
+
1833
+ @classmethod
1834
+ def _construct_body(cls, itr):
1835
+ if isinstance(itr, CodeBlock):
1836
+ return itr
1837
+ else:
1838
+ return CodeBlock(*itr)
1839
+
1840
+ @classmethod
1841
+ def from_FunctionPrototype(cls, func_proto, body):
1842
+ if not isinstance(func_proto, FunctionPrototype):
1843
+ raise TypeError("func_proto is not an instance of FunctionPrototype")
1844
+ return cls(body=body, **func_proto.kwargs())
1845
+
1846
+
1847
+ class Return(Token):
1848
+ """ Represents a return command in the code.
1849
+
1850
+ Parameters
1851
+ ==========
1852
+
1853
+ return : Basic
1854
+
1855
+ Examples
1856
+ ========
1857
+
1858
+ >>> from sympy.codegen.ast import Return
1859
+ >>> from sympy.printing.pycode import pycode
1860
+ >>> from sympy import Symbol
1861
+ >>> x = Symbol('x')
1862
+ >>> print(pycode(Return(x)))
1863
+ return x
1864
+
1865
+ """
1866
+ __slots__ = _fields = ('return',)
1867
+ _construct_return=staticmethod(_sympify)
1868
+
1869
+
1870
+ class FunctionCall(Token, Expr):
1871
+ """ Represents a call to a function in the code.
1872
+
1873
+ Parameters
1874
+ ==========
1875
+
1876
+ name : str
1877
+ function_args : Tuple
1878
+
1879
+ Examples
1880
+ ========
1881
+
1882
+ >>> from sympy.codegen.ast import FunctionCall
1883
+ >>> from sympy import pycode
1884
+ >>> fcall = FunctionCall('foo', 'bar baz'.split())
1885
+ >>> print(pycode(fcall))
1886
+ foo(bar, baz)
1887
+
1888
+ """
1889
+ __slots__ = _fields = ('name', 'function_args')
1890
+
1891
+ _construct_name = String
1892
+ _construct_function_args = staticmethod(lambda args: Tuple(*args))
1893
+
1894
+
1895
+ class Raise(Token):
1896
+ """ Prints as 'raise ...' in Python, 'throw ...' in C++"""
1897
+ __slots__ = _fields = ('exception',)
1898
+
1899
+
1900
+ class RuntimeError_(Token):
1901
+ """ Represents 'std::runtime_error' in C++ and 'RuntimeError' in Python.
1902
+
1903
+ Note that the latter is uncommon, and you might want to use e.g. ValueError.
1904
+ """
1905
+ __slots__ = _fields = ('message',)
1906
+ _construct_message = String
tuning-competition-baseline/.venv/lib/python3.11/site-packages/sympy/codegen/cnodes.py ADDED
@@ -0,0 +1,156 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ AST nodes specific to the C family of languages
3
+ """
4
+
5
+ from sympy.codegen.ast import (
6
+ Attribute, Declaration, Node, String, Token, Type, none,
7
+ FunctionCall, CodeBlock
8
+ )
9
+ from sympy.core.basic import Basic
10
+ from sympy.core.containers import Tuple
11
+ from sympy.core.sympify import sympify
12
+
13
+ void = Type('void')
14
+
15
+ restrict = Attribute('restrict') # guarantees no pointer aliasing
16
+ volatile = Attribute('volatile')
17
+ static = Attribute('static')
18
+
19
+
20
+ def alignof(arg):
21
+ """ Generate of FunctionCall instance for calling 'alignof' """
22
+ return FunctionCall('alignof', [String(arg) if isinstance(arg, str) else arg])
23
+
24
+
25
+ def sizeof(arg):
26
+ """ Generate of FunctionCall instance for calling 'sizeof'
27
+
28
+ Examples
29
+ ========
30
+
31
+ >>> from sympy.codegen.ast import real
32
+ >>> from sympy.codegen.cnodes import sizeof
33
+ >>> from sympy import ccode
34
+ >>> ccode(sizeof(real))
35
+ 'sizeof(double)'
36
+ """
37
+ return FunctionCall('sizeof', [String(arg) if isinstance(arg, str) else arg])
38
+
39
+
40
+ class CommaOperator(Basic):
41
+ """ Represents the comma operator in C """
42
+ def __new__(cls, *args):
43
+ return Basic.__new__(cls, *[sympify(arg) for arg in args])
44
+
45
+
46
+ class Label(Node):
47
+ """ Label for use with e.g. goto statement.
48
+
49
+ Examples
50
+ ========
51
+
52
+ >>> from sympy import ccode, Symbol
53
+ >>> from sympy.codegen.cnodes import Label, PreIncrement
54
+ >>> print(ccode(Label('foo')))
55
+ foo:
56
+ >>> print(ccode(Label('bar', [PreIncrement(Symbol('a'))])))
57
+ bar:
58
+ ++(a);
59
+
60
+ """
61
+ __slots__ = _fields = ('name', 'body')
62
+ defaults = {'body': none}
63
+ _construct_name = String
64
+
65
+ @classmethod
66
+ def _construct_body(cls, itr):
67
+ if isinstance(itr, CodeBlock):
68
+ return itr
69
+ else:
70
+ return CodeBlock(*itr)
71
+
72
+
73
+ class goto(Token):
74
+ """ Represents goto in C """
75
+ __slots__ = _fields = ('label',)
76
+ _construct_label = Label
77
+
78
+
79
+ class PreDecrement(Basic):
80
+ """ Represents the pre-decrement operator
81
+
82
+ Examples
83
+ ========
84
+
85
+ >>> from sympy.abc import x
86
+ >>> from sympy.codegen.cnodes import PreDecrement
87
+ >>> from sympy import ccode
88
+ >>> ccode(PreDecrement(x))
89
+ '--(x)'
90
+
91
+ """
92
+ nargs = 1
93
+
94
+
95
+ class PostDecrement(Basic):
96
+ """ Represents the post-decrement operator
97
+
98
+ Examples
99
+ ========
100
+
101
+ >>> from sympy.abc import x
102
+ >>> from sympy.codegen.cnodes import PostDecrement
103
+ >>> from sympy import ccode
104
+ >>> ccode(PostDecrement(x))
105
+ '(x)--'
106
+
107
+ """
108
+ nargs = 1
109
+
110
+
111
+ class PreIncrement(Basic):
112
+ """ Represents the pre-increment operator
113
+
114
+ Examples
115
+ ========
116
+
117
+ >>> from sympy.abc import x
118
+ >>> from sympy.codegen.cnodes import PreIncrement
119
+ >>> from sympy import ccode
120
+ >>> ccode(PreIncrement(x))
121
+ '++(x)'
122
+
123
+ """
124
+ nargs = 1
125
+
126
+
127
+ class PostIncrement(Basic):
128
+ """ Represents the post-increment operator
129
+
130
+ Examples
131
+ ========
132
+
133
+ >>> from sympy.abc import x
134
+ >>> from sympy.codegen.cnodes import PostIncrement
135
+ >>> from sympy import ccode
136
+ >>> ccode(PostIncrement(x))
137
+ '(x)++'
138
+
139
+ """
140
+ nargs = 1
141
+
142
+
143
+ class struct(Node):
144
+ """ Represents a struct in C """
145
+ __slots__ = _fields = ('name', 'declarations')
146
+ defaults = {'name': none}
147
+ _construct_name = String
148
+
149
+ @classmethod
150
+ def _construct_declarations(cls, args):
151
+ return Tuple(*[Declaration(arg) for arg in args])
152
+
153
+
154
+ class union(struct):
155
+ """ Represents a union in C """
156
+ __slots__ = ()
tuning-competition-baseline/.venv/lib/python3.11/site-packages/sympy/codegen/cutils.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ from sympy.printing.c import C99CodePrinter
2
+
3
+ def render_as_source_file(content, Printer=C99CodePrinter, settings=None):
4
+ """ Renders a C source file (with required #include statements) """
5
+ printer = Printer(settings or {})
6
+ code_str = printer.doprint(content)
7
+ includes = '\n'.join(['#include <%s>' % h for h in printer.headers])
8
+ return includes + '\n\n' + code_str
tuning-competition-baseline/.venv/lib/python3.11/site-packages/sympy/codegen/futils.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from itertools import chain
2
+ from sympy.codegen.fnodes import Module
3
+ from sympy.core.symbol import Dummy
4
+ from sympy.printing.fortran import FCodePrinter
5
+
6
+ """ This module collects utilities for rendering Fortran code. """
7
+
8
+
9
+ def render_as_module(definitions, name, declarations=(), printer_settings=None):
10
+ """ Creates a ``Module`` instance and renders it as a string.
11
+
12
+ This generates Fortran source code for a module with the correct ``use`` statements.
13
+
14
+ Parameters
15
+ ==========
16
+
17
+ definitions : iterable
18
+ Passed to :class:`sympy.codegen.fnodes.Module`.
19
+ name : str
20
+ Passed to :class:`sympy.codegen.fnodes.Module`.
21
+ declarations : iterable
22
+ Passed to :class:`sympy.codegen.fnodes.Module`. It will be extended with
23
+ use statements, 'implicit none' and public list generated from ``definitions``.
24
+ printer_settings : dict
25
+ Passed to ``FCodePrinter`` (default: ``{'standard': 2003, 'source_format': 'free'}``).
26
+
27
+ """
28
+ printer_settings = printer_settings or {'standard': 2003, 'source_format': 'free'}
29
+ printer = FCodePrinter(printer_settings)
30
+ dummy = Dummy()
31
+ if isinstance(definitions, Module):
32
+ raise ValueError("This function expects to construct a module on its own.")
33
+ mod = Module(name, chain(declarations, [dummy]), definitions)
34
+ fstr = printer.doprint(mod)
35
+ module_use_str = ' %s\n' % ' \n'.join(['use %s, only: %s' % (k, ', '.join(v)) for
36
+ k, v in printer.module_uses.items()])
37
+ module_use_str += ' implicit none\n'
38
+ module_use_str += ' private\n'
39
+ module_use_str += ' public %s\n' % ', '.join([str(node.name) for node in definitions if getattr(node, 'name', None)])
40
+ return fstr.replace(printer.doprint(dummy), module_use_str)
tuning-competition-baseline/.venv/lib/python3.11/site-packages/sympy/codegen/matrix_nodes.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Additional AST nodes for operations on matrices. The nodes in this module
3
+ are meant to represent optimization of matrix expressions within codegen's
4
+ target languages that cannot be represented by SymPy expressions.
5
+
6
+ As an example, we can use :meth:`sympy.codegen.rewriting.optimize` and the
7
+ ``matin_opt`` optimization provided in :mod:`sympy.codegen.rewriting` to
8
+ transform matrix multiplication under certain assumptions:
9
+
10
+ >>> from sympy import symbols, MatrixSymbol
11
+ >>> n = symbols('n', integer=True)
12
+ >>> A = MatrixSymbol('A', n, n)
13
+ >>> x = MatrixSymbol('x', n, 1)
14
+ >>> expr = A**(-1) * x
15
+ >>> from sympy import assuming, Q
16
+ >>> from sympy.codegen.rewriting import matinv_opt, optimize
17
+ >>> with assuming(Q.fullrank(A)):
18
+ ... optimize(expr, [matinv_opt])
19
+ MatrixSolve(A, vector=x)
20
+ """
21
+
22
+ from .ast import Token
23
+ from sympy.matrices import MatrixExpr
24
+ from sympy.core.sympify import sympify
25
+
26
+
27
+ class MatrixSolve(Token, MatrixExpr):
28
+ """Represents an operation to solve a linear matrix equation.
29
+
30
+ Parameters
31
+ ==========
32
+
33
+ matrix : MatrixSymbol
34
+
35
+ Matrix representing the coefficients of variables in the linear
36
+ equation. This matrix must be square and full-rank (i.e. all columns must
37
+ be linearly independent) for the solving operation to be valid.
38
+
39
+ vector : MatrixSymbol
40
+
41
+ One-column matrix representing the solutions to the equations
42
+ represented in ``matrix``.
43
+
44
+ Examples
45
+ ========
46
+
47
+ >>> from sympy import symbols, MatrixSymbol
48
+ >>> from sympy.codegen.matrix_nodes import MatrixSolve
49
+ >>> n = symbols('n', integer=True)
50
+ >>> A = MatrixSymbol('A', n, n)
51
+ >>> x = MatrixSymbol('x', n, 1)
52
+ >>> from sympy.printing.numpy import NumPyPrinter
53
+ >>> NumPyPrinter().doprint(MatrixSolve(A, x))
54
+ 'numpy.linalg.solve(A, x)'
55
+ >>> from sympy import octave_code
56
+ >>> octave_code(MatrixSolve(A, x))
57
+ 'A \\\\ x'
58
+
59
+ """
60
+ __slots__ = _fields = ('matrix', 'vector')
61
+
62
+ _construct_matrix = staticmethod(sympify)
63
+ _construct_vector = staticmethod(sympify)
64
+
65
+ @property
66
+ def shape(self):
67
+ return self.vector.shape
68
+
69
+ def _eval_derivative(self, x):
70
+ A, b = self.matrix, self.vector
71
+ return MatrixSolve(A, b.diff(x) - A.diff(x) * MatrixSolve(A, b))
tuning-competition-baseline/.venv/lib/python3.11/site-packages/sympy/codegen/pynodes.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .abstract_nodes import List as AbstractList
2
+ from .ast import Token
3
+
4
+
5
+ class List(AbstractList):
6
+ pass
7
+
8
+
9
+ class NumExprEvaluate(Token):
10
+ """represents a call to :class:`numexpr`s :func:`evaluate`"""
11
+ __slots__ = _fields = ('expr',)
tuning-competition-baseline/.venv/lib/python3.11/site-packages/sympy/codegen/scipy_nodes.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from sympy.core.function import Add, ArgumentIndexError, Function
2
+ from sympy.core.power import Pow
3
+ from sympy.core.singleton import S
4
+ from sympy.functions.elementary.exponential import log
5
+ from sympy.functions.elementary.trigonometric import cos, sin
6
+
7
+
8
+ def _cosm1(x, *, evaluate=True):
9
+ return Add(cos(x, evaluate=evaluate), -S.One, evaluate=evaluate)
10
+
11
+
12
+ class cosm1(Function):
13
+ """ Minus one plus cosine of x, i.e. cos(x) - 1. For use when x is close to zero.
14
+
15
+ Helper class for use with e.g. scipy.special.cosm1
16
+ See: https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.cosm1.html
17
+ """
18
+ nargs = 1
19
+
20
+ def fdiff(self, argindex=1):
21
+ """
22
+ Returns the first derivative of this function.
23
+ """
24
+ if argindex == 1:
25
+ return -sin(*self.args)
26
+ else:
27
+ raise ArgumentIndexError(self, argindex)
28
+
29
+ def _eval_rewrite_as_cos(self, x, **kwargs):
30
+ return _cosm1(x)
31
+
32
+ def _eval_evalf(self, *args, **kwargs):
33
+ return self.rewrite(cos).evalf(*args, **kwargs)
34
+
35
+ def _eval_simplify(self, **kwargs):
36
+ x, = self.args
37
+ candidate = _cosm1(x.simplify(**kwargs))
38
+ if candidate != _cosm1(x, evaluate=False):
39
+ return candidate
40
+ else:
41
+ return cosm1(x)
42
+
43
+
44
+ def _powm1(x, y, *, evaluate=True):
45
+ return Add(Pow(x, y, evaluate=evaluate), -S.One, evaluate=evaluate)
46
+
47
+
48
+ class powm1(Function):
49
+ """ Minus one plus x to the power of y, i.e. x**y - 1. For use when x is close to one or y is close to zero.
50
+
51
+ Helper class for use with e.g. scipy.special.powm1
52
+ See: https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.powm1.html
53
+ """
54
+ nargs = 2
55
+
56
+ def fdiff(self, argindex=1):
57
+ """
58
+ Returns the first derivative of this function.
59
+ """
60
+ if argindex == 1:
61
+ return Pow(self.args[0], self.args[1])*self.args[1]/self.args[0]
62
+ elif argindex == 2:
63
+ return log(self.args[0])*Pow(*self.args)
64
+ else:
65
+ raise ArgumentIndexError(self, argindex)
66
+
67
+ def _eval_rewrite_as_Pow(self, x, y, **kwargs):
68
+ return _powm1(x, y)
69
+
70
+ def _eval_evalf(self, *args, **kwargs):
71
+ return self.rewrite(Pow).evalf(*args, **kwargs)
72
+
73
+ def _eval_simplify(self, **kwargs):
74
+ x, y = self.args
75
+ candidate = _powm1(x.simplify(**kwargs), y.simplify(**kwargs))
76
+ if candidate != _powm1(x, y, evaluate=False):
77
+ return candidate
78
+ else:
79
+ return powm1(x, y)
tuning-competition-baseline/.venv/lib/python3.11/site-packages/sympy/combinatorics/tests/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (226 Bytes). View file
 
tuning-competition-baseline/.venv/lib/python3.11/site-packages/sympy/concrete/__pycache__/delta.cpython-311.pyc ADDED
Binary file (15.3 kB). View file
 
tuning-competition-baseline/.venv/lib/python3.11/site-packages/sympy/concrete/delta.py ADDED
@@ -0,0 +1,330 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This module implements sums and products containing the Kronecker Delta function.
3
+
4
+ References
5
+ ==========
6
+
7
+ .. [1] https://mathworld.wolfram.com/KroneckerDelta.html
8
+
9
+ """
10
+ from .products import product
11
+ from .summations import Sum, summation
12
+ from sympy.core import Add, Mul, S, Dummy
13
+ from sympy.core.cache import cacheit
14
+ from sympy.core.sorting import default_sort_key
15
+ from sympy.functions import KroneckerDelta, Piecewise, piecewise_fold
16
+ from sympy.polys.polytools import factor
17
+ from sympy.sets.sets import Interval
18
+ from sympy.solvers.solvers import solve
19
+
20
+
21
+ @cacheit
22
+ def _expand_delta(expr, index):
23
+ """
24
+ Expand the first Add containing a simple KroneckerDelta.
25
+ """
26
+ if not expr.is_Mul:
27
+ return expr
28
+ delta = None
29
+ func = Add
30
+ terms = [S.One]
31
+ for h in expr.args:
32
+ if delta is None and h.is_Add and _has_simple_delta(h, index):
33
+ delta = True
34
+ func = h.func
35
+ terms = [terms[0]*t for t in h.args]
36
+ else:
37
+ terms = [t*h for t in terms]
38
+ return func(*terms)
39
+
40
+
41
+ @cacheit
42
+ def _extract_delta(expr, index):
43
+ """
44
+ Extract a simple KroneckerDelta from the expression.
45
+
46
+ Explanation
47
+ ===========
48
+
49
+ Returns the tuple ``(delta, newexpr)`` where:
50
+
51
+ - ``delta`` is a simple KroneckerDelta expression if one was found,
52
+ or ``None`` if no simple KroneckerDelta expression was found.
53
+
54
+ - ``newexpr`` is a Mul containing the remaining terms; ``expr`` is
55
+ returned unchanged if no simple KroneckerDelta expression was found.
56
+
57
+ Examples
58
+ ========
59
+
60
+ >>> from sympy import KroneckerDelta
61
+ >>> from sympy.concrete.delta import _extract_delta
62
+ >>> from sympy.abc import x, y, i, j, k
63
+ >>> _extract_delta(4*x*y*KroneckerDelta(i, j), i)
64
+ (KroneckerDelta(i, j), 4*x*y)
65
+ >>> _extract_delta(4*x*y*KroneckerDelta(i, j), k)
66
+ (None, 4*x*y*KroneckerDelta(i, j))
67
+
68
+ See Also
69
+ ========
70
+
71
+ sympy.functions.special.tensor_functions.KroneckerDelta
72
+ deltaproduct
73
+ deltasummation
74
+ """
75
+ if not _has_simple_delta(expr, index):
76
+ return (None, expr)
77
+ if isinstance(expr, KroneckerDelta):
78
+ return (expr, S.One)
79
+ if not expr.is_Mul:
80
+ raise ValueError("Incorrect expr")
81
+ delta = None
82
+ terms = []
83
+
84
+ for arg in expr.args:
85
+ if delta is None and _is_simple_delta(arg, index):
86
+ delta = arg
87
+ else:
88
+ terms.append(arg)
89
+ return (delta, expr.func(*terms))
90
+
91
+
92
+ @cacheit
93
+ def _has_simple_delta(expr, index):
94
+ """
95
+ Returns True if ``expr`` is an expression that contains a KroneckerDelta
96
+ that is simple in the index ``index``, meaning that this KroneckerDelta
97
+ is nonzero for a single value of the index ``index``.
98
+ """
99
+ if expr.has(KroneckerDelta):
100
+ if _is_simple_delta(expr, index):
101
+ return True
102
+ if expr.is_Add or expr.is_Mul:
103
+ for arg in expr.args:
104
+ if _has_simple_delta(arg, index):
105
+ return True
106
+ return False
107
+
108
+
109
+ @cacheit
110
+ def _is_simple_delta(delta, index):
111
+ """
112
+ Returns True if ``delta`` is a KroneckerDelta and is nonzero for a single
113
+ value of the index ``index``.
114
+ """
115
+ if isinstance(delta, KroneckerDelta) and delta.has(index):
116
+ p = (delta.args[0] - delta.args[1]).as_poly(index)
117
+ if p:
118
+ return p.degree() == 1
119
+ return False
120
+
121
+
122
+ @cacheit
123
+ def _remove_multiple_delta(expr):
124
+ """
125
+ Evaluate products of KroneckerDelta's.
126
+ """
127
+ if expr.is_Add:
128
+ return expr.func(*list(map(_remove_multiple_delta, expr.args)))
129
+ if not expr.is_Mul:
130
+ return expr
131
+ eqs = []
132
+ newargs = []
133
+ for arg in expr.args:
134
+ if isinstance(arg, KroneckerDelta):
135
+ eqs.append(arg.args[0] - arg.args[1])
136
+ else:
137
+ newargs.append(arg)
138
+ if not eqs:
139
+ return expr
140
+ solns = solve(eqs, dict=True)
141
+ if len(solns) == 0:
142
+ return S.Zero
143
+ elif len(solns) == 1:
144
+ for key in solns[0].keys():
145
+ newargs.append(KroneckerDelta(key, solns[0][key]))
146
+ expr2 = expr.func(*newargs)
147
+ if expr != expr2:
148
+ return _remove_multiple_delta(expr2)
149
+ return expr
150
+
151
+
152
+ @cacheit
153
+ def _simplify_delta(expr):
154
+ """
155
+ Rewrite a KroneckerDelta's indices in its simplest form.
156
+ """
157
+ if isinstance(expr, KroneckerDelta):
158
+ try:
159
+ slns = solve(expr.args[0] - expr.args[1], dict=True)
160
+ if slns and len(slns) == 1:
161
+ return Mul(*[KroneckerDelta(*(key, value))
162
+ for key, value in slns[0].items()])
163
+ except NotImplementedError:
164
+ pass
165
+ return expr
166
+
167
+
168
+ @cacheit
169
+ def deltaproduct(f, limit):
170
+ """
171
+ Handle products containing a KroneckerDelta.
172
+
173
+ See Also
174
+ ========
175
+
176
+ deltasummation
177
+ sympy.functions.special.tensor_functions.KroneckerDelta
178
+ sympy.concrete.products.product
179
+ """
180
+ if ((limit[2] - limit[1]) < 0) == True:
181
+ return S.One
182
+
183
+ if not f.has(KroneckerDelta):
184
+ return product(f, limit)
185
+
186
+ if f.is_Add:
187
+ # Identify the term in the Add that has a simple KroneckerDelta
188
+ delta = None
189
+ terms = []
190
+ for arg in sorted(f.args, key=default_sort_key):
191
+ if delta is None and _has_simple_delta(arg, limit[0]):
192
+ delta = arg
193
+ else:
194
+ terms.append(arg)
195
+ newexpr = f.func(*terms)
196
+ k = Dummy("kprime", integer=True)
197
+ if isinstance(limit[1], int) and isinstance(limit[2], int):
198
+ result = deltaproduct(newexpr, limit) + sum(deltaproduct(newexpr, (limit[0], limit[1], ik - 1)) *
199
+ delta.subs(limit[0], ik) *
200
+ deltaproduct(newexpr, (limit[0], ik + 1, limit[2])) for ik in range(int(limit[1]), int(limit[2] + 1))
201
+ )
202
+ else:
203
+ result = deltaproduct(newexpr, limit) + deltasummation(
204
+ deltaproduct(newexpr, (limit[0], limit[1], k - 1)) *
205
+ delta.subs(limit[0], k) *
206
+ deltaproduct(newexpr, (limit[0], k + 1, limit[2])),
207
+ (k, limit[1], limit[2]),
208
+ no_piecewise=_has_simple_delta(newexpr, limit[0])
209
+ )
210
+ return _remove_multiple_delta(result)
211
+
212
+ delta, _ = _extract_delta(f, limit[0])
213
+
214
+ if not delta:
215
+ g = _expand_delta(f, limit[0])
216
+ if f != g:
217
+ try:
218
+ return factor(deltaproduct(g, limit))
219
+ except AssertionError:
220
+ return deltaproduct(g, limit)
221
+ return product(f, limit)
222
+
223
+ return _remove_multiple_delta(f.subs(limit[0], limit[1])*KroneckerDelta(limit[2], limit[1])) + \
224
+ S.One*_simplify_delta(KroneckerDelta(limit[2], limit[1] - 1))
225
+
226
+
227
+ @cacheit
228
+ def deltasummation(f, limit, no_piecewise=False):
229
+ """
230
+ Handle summations containing a KroneckerDelta.
231
+
232
+ Explanation
233
+ ===========
234
+
235
+ The idea for summation is the following:
236
+
237
+ - If we are dealing with a KroneckerDelta expression, i.e. KroneckerDelta(g(x), j),
238
+ we try to simplify it.
239
+
240
+ If we could simplify it, then we sum the resulting expression.
241
+ We already know we can sum a simplified expression, because only
242
+ simple KroneckerDelta expressions are involved.
243
+
244
+ If we could not simplify it, there are two cases:
245
+
246
+ 1) The expression is a simple expression: we return the summation,
247
+ taking care if we are dealing with a Derivative or with a proper
248
+ KroneckerDelta.
249
+
250
+ 2) The expression is not simple (i.e. KroneckerDelta(cos(x))): we can do
251
+ nothing at all.
252
+
253
+ - If the expr is a multiplication expr having a KroneckerDelta term:
254
+
255
+ First we expand it.
256
+
257
+ If the expansion did work, then we try to sum the expansion.
258
+
259
+ If not, we try to extract a simple KroneckerDelta term, then we have two
260
+ cases:
261
+
262
+ 1) We have a simple KroneckerDelta term, so we return the summation.
263
+
264
+ 2) We did not have a simple term, but we do have an expression with
265
+ simplified KroneckerDelta terms, so we sum this expression.
266
+
267
+ Examples
268
+ ========
269
+
270
+ >>> from sympy import oo, symbols
271
+ >>> from sympy.abc import k
272
+ >>> i, j = symbols('i, j', integer=True, finite=True)
273
+ >>> from sympy.concrete.delta import deltasummation
274
+ >>> from sympy import KroneckerDelta
275
+ >>> deltasummation(KroneckerDelta(i, k), (k, -oo, oo))
276
+ 1
277
+ >>> deltasummation(KroneckerDelta(i, k), (k, 0, oo))
278
+ Piecewise((1, i >= 0), (0, True))
279
+ >>> deltasummation(KroneckerDelta(i, k), (k, 1, 3))
280
+ Piecewise((1, (i >= 1) & (i <= 3)), (0, True))
281
+ >>> deltasummation(k*KroneckerDelta(i, j)*KroneckerDelta(j, k), (k, -oo, oo))
282
+ j*KroneckerDelta(i, j)
283
+ >>> deltasummation(j*KroneckerDelta(i, j), (j, -oo, oo))
284
+ i
285
+ >>> deltasummation(i*KroneckerDelta(i, j), (i, -oo, oo))
286
+ j
287
+
288
+ See Also
289
+ ========
290
+
291
+ deltaproduct
292
+ sympy.functions.special.tensor_functions.KroneckerDelta
293
+ sympy.concrete.sums.summation
294
+ """
295
+ if ((limit[2] - limit[1]) < 0) == True:
296
+ return S.Zero
297
+
298
+ if not f.has(KroneckerDelta):
299
+ return summation(f, limit)
300
+
301
+ x = limit[0]
302
+
303
+ g = _expand_delta(f, x)
304
+ if g.is_Add:
305
+ return piecewise_fold(
306
+ g.func(*[deltasummation(h, limit, no_piecewise) for h in g.args]))
307
+
308
+ # try to extract a simple KroneckerDelta term
309
+ delta, expr = _extract_delta(g, x)
310
+
311
+ if (delta is not None) and (delta.delta_range is not None):
312
+ dinf, dsup = delta.delta_range
313
+ if (limit[1] - dinf <= 0) == True and (limit[2] - dsup >= 0) == True:
314
+ no_piecewise = True
315
+
316
+ if not delta:
317
+ return summation(f, limit)
318
+
319
+ solns = solve(delta.args[0] - delta.args[1], x)
320
+ if len(solns) == 0:
321
+ return S.Zero
322
+ elif len(solns) != 1:
323
+ return Sum(f, limit)
324
+ value = solns[0]
325
+ if no_piecewise:
326
+ return expr.subs(x, value)
327
+ return Piecewise(
328
+ (expr.subs(x, value), Interval(*limit[1:3]).as_relational(value)),
329
+ (S.Zero, True)
330
+ )
tuning-competition-baseline/.venv/lib/python3.11/site-packages/sympy/concrete/expr_with_intlimits.py ADDED
@@ -0,0 +1,354 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from sympy.concrete.expr_with_limits import ExprWithLimits
2
+ from sympy.core.singleton import S
3
+ from sympy.core.relational import Eq
4
+
5
+ class ReorderError(NotImplementedError):
6
+ """
7
+ Exception raised when trying to reorder dependent limits.
8
+ """
9
+ def __init__(self, expr, msg):
10
+ super().__init__(
11
+ "%s could not be reordered: %s." % (expr, msg))
12
+
13
+ class ExprWithIntLimits(ExprWithLimits):
14
+ """
15
+ Superclass for Product and Sum.
16
+
17
+ See Also
18
+ ========
19
+
20
+ sympy.concrete.expr_with_limits.ExprWithLimits
21
+ sympy.concrete.products.Product
22
+ sympy.concrete.summations.Sum
23
+ """
24
+ __slots__ = ()
25
+
26
+ def change_index(self, var, trafo, newvar=None):
27
+ r"""
28
+ Change index of a Sum or Product.
29
+
30
+ Perform a linear transformation `x \mapsto a x + b` on the index variable
31
+ `x`. For `a` the only values allowed are `\pm 1`. A new variable to be used
32
+ after the change of index can also be specified.
33
+
34
+ Explanation
35
+ ===========
36
+
37
+ ``change_index(expr, var, trafo, newvar=None)`` where ``var`` specifies the
38
+ index variable `x` to transform. The transformation ``trafo`` must be linear
39
+ and given in terms of ``var``. If the optional argument ``newvar`` is
40
+ provided then ``var`` gets replaced by ``newvar`` in the final expression.
41
+
42
+ Examples
43
+ ========
44
+
45
+ >>> from sympy import Sum, Product, simplify
46
+ >>> from sympy.abc import x, y, a, b, c, d, u, v, i, j, k, l
47
+
48
+ >>> S = Sum(x, (x, a, b))
49
+ >>> S.doit()
50
+ -a**2/2 + a/2 + b**2/2 + b/2
51
+
52
+ >>> Sn = S.change_index(x, x + 1, y)
53
+ >>> Sn
54
+ Sum(y - 1, (y, a + 1, b + 1))
55
+ >>> Sn.doit()
56
+ -a**2/2 + a/2 + b**2/2 + b/2
57
+
58
+ >>> Sn = S.change_index(x, -x, y)
59
+ >>> Sn
60
+ Sum(-y, (y, -b, -a))
61
+ >>> Sn.doit()
62
+ -a**2/2 + a/2 + b**2/2 + b/2
63
+
64
+ >>> Sn = S.change_index(x, x+u)
65
+ >>> Sn
66
+ Sum(-u + x, (x, a + u, b + u))
67
+ >>> Sn.doit()
68
+ -a**2/2 - a*u + a/2 + b**2/2 + b*u + b/2 - u*(-a + b + 1) + u
69
+ >>> simplify(Sn.doit())
70
+ -a**2/2 + a/2 + b**2/2 + b/2
71
+
72
+ >>> Sn = S.change_index(x, -x - u, y)
73
+ >>> Sn
74
+ Sum(-u - y, (y, -b - u, -a - u))
75
+ >>> Sn.doit()
76
+ -a**2/2 - a*u + a/2 + b**2/2 + b*u + b/2 - u*(-a + b + 1) + u
77
+ >>> simplify(Sn.doit())
78
+ -a**2/2 + a/2 + b**2/2 + b/2
79
+
80
+ >>> P = Product(i*j**2, (i, a, b), (j, c, d))
81
+ >>> P
82
+ Product(i*j**2, (i, a, b), (j, c, d))
83
+ >>> P2 = P.change_index(i, i+3, k)
84
+ >>> P2
85
+ Product(j**2*(k - 3), (k, a + 3, b + 3), (j, c, d))
86
+ >>> P3 = P2.change_index(j, -j, l)
87
+ >>> P3
88
+ Product(l**2*(k - 3), (k, a + 3, b + 3), (l, -d, -c))
89
+
90
+ When dealing with symbols only, we can make a
91
+ general linear transformation:
92
+
93
+ >>> Sn = S.change_index(x, u*x+v, y)
94
+ >>> Sn
95
+ Sum((-v + y)/u, (y, b*u + v, a*u + v))
96
+ >>> Sn.doit()
97
+ -v*(a*u - b*u + 1)/u + (a**2*u**2/2 + a*u*v + a*u/2 - b**2*u**2/2 - b*u*v + b*u/2 + v)/u
98
+ >>> simplify(Sn.doit())
99
+ a**2*u/2 + a/2 - b**2*u/2 + b/2
100
+
101
+ However, the last result can be inconsistent with usual
102
+ summation where the index increment is always 1. This is
103
+ obvious as we get back the original value only for ``u``
104
+ equal +1 or -1.
105
+
106
+ See Also
107
+ ========
108
+
109
+ sympy.concrete.expr_with_intlimits.ExprWithIntLimits.index,
110
+ reorder_limit,
111
+ sympy.concrete.expr_with_intlimits.ExprWithIntLimits.reorder,
112
+ sympy.concrete.summations.Sum.reverse_order,
113
+ sympy.concrete.products.Product.reverse_order
114
+ """
115
+ if newvar is None:
116
+ newvar = var
117
+
118
+ limits = []
119
+ for limit in self.limits:
120
+ if limit[0] == var:
121
+ p = trafo.as_poly(var)
122
+ if p.degree() != 1:
123
+ raise ValueError("Index transformation is not linear")
124
+ alpha = p.coeff_monomial(var)
125
+ beta = p.coeff_monomial(S.One)
126
+ if alpha.is_number:
127
+ if alpha == S.One:
128
+ limits.append((newvar, alpha*limit[1] + beta, alpha*limit[2] + beta))
129
+ elif alpha == S.NegativeOne:
130
+ limits.append((newvar, alpha*limit[2] + beta, alpha*limit[1] + beta))
131
+ else:
132
+ raise ValueError("Linear transformation results in non-linear summation stepsize")
133
+ else:
134
+ # Note that the case of alpha being symbolic can give issues if alpha < 0.
135
+ limits.append((newvar, alpha*limit[2] + beta, alpha*limit[1] + beta))
136
+ else:
137
+ limits.append(limit)
138
+
139
+ function = self.function.subs(var, (var - beta)/alpha)
140
+ function = function.subs(var, newvar)
141
+
142
+ return self.func(function, *limits)
143
+
144
+
145
+ def index(expr, x):
146
+ """
147
+ Return the index of a dummy variable in the list of limits.
148
+
149
+ Explanation
150
+ ===========
151
+
152
+ ``index(expr, x)`` returns the index of the dummy variable ``x`` in the
153
+ limits of ``expr``. Note that we start counting with 0 at the inner-most
154
+ limits tuple.
155
+
156
+ Examples
157
+ ========
158
+
159
+ >>> from sympy.abc import x, y, a, b, c, d
160
+ >>> from sympy import Sum, Product
161
+ >>> Sum(x*y, (x, a, b), (y, c, d)).index(x)
162
+ 0
163
+ >>> Sum(x*y, (x, a, b), (y, c, d)).index(y)
164
+ 1
165
+ >>> Product(x*y, (x, a, b), (y, c, d)).index(x)
166
+ 0
167
+ >>> Product(x*y, (x, a, b), (y, c, d)).index(y)
168
+ 1
169
+
170
+ See Also
171
+ ========
172
+
173
+ reorder_limit, reorder, sympy.concrete.summations.Sum.reverse_order,
174
+ sympy.concrete.products.Product.reverse_order
175
+ """
176
+ variables = [limit[0] for limit in expr.limits]
177
+
178
+ if variables.count(x) != 1:
179
+ raise ValueError(expr, "Number of instances of variable not equal to one")
180
+ else:
181
+ return variables.index(x)
182
+
183
+ def reorder(expr, *arg):
184
+ """
185
+ Reorder limits in a expression containing a Sum or a Product.
186
+
187
+ Explanation
188
+ ===========
189
+
190
+ ``expr.reorder(*arg)`` reorders the limits in the expression ``expr``
191
+ according to the list of tuples given by ``arg``. These tuples can
192
+ contain numerical indices or index variable names or involve both.
193
+
194
+ Examples
195
+ ========
196
+
197
+ >>> from sympy import Sum, Product
198
+ >>> from sympy.abc import x, y, z, a, b, c, d, e, f
199
+
200
+ >>> Sum(x*y, (x, a, b), (y, c, d)).reorder((x, y))
201
+ Sum(x*y, (y, c, d), (x, a, b))
202
+
203
+ >>> Sum(x*y*z, (x, a, b), (y, c, d), (z, e, f)).reorder((x, y), (x, z), (y, z))
204
+ Sum(x*y*z, (z, e, f), (y, c, d), (x, a, b))
205
+
206
+ >>> P = Product(x*y*z, (x, a, b), (y, c, d), (z, e, f))
207
+ >>> P.reorder((x, y), (x, z), (y, z))
208
+ Product(x*y*z, (z, e, f), (y, c, d), (x, a, b))
209
+
210
+ We can also select the index variables by counting them, starting
211
+ with the inner-most one:
212
+
213
+ >>> Sum(x**2, (x, a, b), (x, c, d)).reorder((0, 1))
214
+ Sum(x**2, (x, c, d), (x, a, b))
215
+
216
+ And of course we can mix both schemes:
217
+
218
+ >>> Sum(x*y, (x, a, b), (y, c, d)).reorder((y, x))
219
+ Sum(x*y, (y, c, d), (x, a, b))
220
+ >>> Sum(x*y, (x, a, b), (y, c, d)).reorder((y, 0))
221
+ Sum(x*y, (y, c, d), (x, a, b))
222
+
223
+ See Also
224
+ ========
225
+
226
+ reorder_limit, index, sympy.concrete.summations.Sum.reverse_order,
227
+ sympy.concrete.products.Product.reverse_order
228
+ """
229
+ new_expr = expr
230
+
231
+ for r in arg:
232
+ if len(r) != 2:
233
+ raise ValueError(r, "Invalid number of arguments")
234
+
235
+ index1 = r[0]
236
+ index2 = r[1]
237
+
238
+ if not isinstance(r[0], int):
239
+ index1 = expr.index(r[0])
240
+ if not isinstance(r[1], int):
241
+ index2 = expr.index(r[1])
242
+
243
+ new_expr = new_expr.reorder_limit(index1, index2)
244
+
245
+ return new_expr
246
+
247
+
248
+ def reorder_limit(expr, x, y):
249
+ """
250
+ Interchange two limit tuples of a Sum or Product expression.
251
+
252
+ Explanation
253
+ ===========
254
+
255
+ ``expr.reorder_limit(x, y)`` interchanges two limit tuples. The
256
+ arguments ``x`` and ``y`` are integers corresponding to the index
257
+ variables of the two limits which are to be interchanged. The
258
+ expression ``expr`` has to be either a Sum or a Product.
259
+
260
+ Examples
261
+ ========
262
+
263
+ >>> from sympy.abc import x, y, z, a, b, c, d, e, f
264
+ >>> from sympy import Sum, Product
265
+
266
+ >>> Sum(x*y*z, (x, a, b), (y, c, d), (z, e, f)).reorder_limit(0, 2)
267
+ Sum(x*y*z, (z, e, f), (y, c, d), (x, a, b))
268
+ >>> Sum(x**2, (x, a, b), (x, c, d)).reorder_limit(1, 0)
269
+ Sum(x**2, (x, c, d), (x, a, b))
270
+
271
+ >>> Product(x*y*z, (x, a, b), (y, c, d), (z, e, f)).reorder_limit(0, 2)
272
+ Product(x*y*z, (z, e, f), (y, c, d), (x, a, b))
273
+
274
+ See Also
275
+ ========
276
+
277
+ index, reorder, sympy.concrete.summations.Sum.reverse_order,
278
+ sympy.concrete.products.Product.reverse_order
279
+ """
280
+ var = {limit[0] for limit in expr.limits}
281
+ limit_x = expr.limits[x]
282
+ limit_y = expr.limits[y]
283
+
284
+ if (len(set(limit_x[1].free_symbols).intersection(var)) == 0 and
285
+ len(set(limit_x[2].free_symbols).intersection(var)) == 0 and
286
+ len(set(limit_y[1].free_symbols).intersection(var)) == 0 and
287
+ len(set(limit_y[2].free_symbols).intersection(var)) == 0):
288
+
289
+ limits = []
290
+ for i, limit in enumerate(expr.limits):
291
+ if i == x:
292
+ limits.append(limit_y)
293
+ elif i == y:
294
+ limits.append(limit_x)
295
+ else:
296
+ limits.append(limit)
297
+
298
+ return type(expr)(expr.function, *limits)
299
+ else:
300
+ raise ReorderError(expr, "could not interchange the two limits specified")
301
+
302
+ @property
303
+ def has_empty_sequence(self):
304
+ """
305
+ Returns True if the Sum or Product is computed for an empty sequence.
306
+
307
+ Examples
308
+ ========
309
+
310
+ >>> from sympy import Sum, Product, Symbol
311
+ >>> m = Symbol('m')
312
+ >>> Sum(m, (m, 1, 0)).has_empty_sequence
313
+ True
314
+
315
+ >>> Sum(m, (m, 1, 1)).has_empty_sequence
316
+ False
317
+
318
+ >>> M = Symbol('M', integer=True, positive=True)
319
+ >>> Product(m, (m, 1, M)).has_empty_sequence
320
+ False
321
+
322
+ >>> Product(m, (m, 2, M)).has_empty_sequence
323
+
324
+ >>> Product(m, (m, M + 1, M)).has_empty_sequence
325
+ True
326
+
327
+ >>> N = Symbol('N', integer=True, positive=True)
328
+ >>> Sum(m, (m, N, M)).has_empty_sequence
329
+
330
+ >>> N = Symbol('N', integer=True, negative=True)
331
+ >>> Sum(m, (m, N, M)).has_empty_sequence
332
+ False
333
+
334
+ See Also
335
+ ========
336
+
337
+ has_reversed_limits
338
+ has_finite_limits
339
+
340
+ """
341
+ ret_None = False
342
+ for lim in self.limits:
343
+ dif = lim[1] - lim[2]
344
+ eq = Eq(dif, 1)
345
+ if eq == True:
346
+ return True
347
+ elif eq == False:
348
+ continue
349
+ else:
350
+ ret_None = True
351
+
352
+ if ret_None:
353
+ return None
354
+ return False
tuning-competition-baseline/.venv/lib/python3.11/site-packages/sympy/concrete/expr_with_limits.py ADDED
@@ -0,0 +1,603 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from sympy.core.add import Add
2
+ from sympy.core.containers import Tuple
3
+ from sympy.core.expr import Expr
4
+ from sympy.core.function import AppliedUndef, UndefinedFunction
5
+ from sympy.core.mul import Mul
6
+ from sympy.core.relational import Equality, Relational
7
+ from sympy.core.singleton import S
8
+ from sympy.core.symbol import Symbol, Dummy
9
+ from sympy.core.sympify import sympify
10
+ from sympy.functions.elementary.piecewise import (piecewise_fold,
11
+ Piecewise)
12
+ from sympy.logic.boolalg import BooleanFunction
13
+ from sympy.matrices.matrixbase import MatrixBase
14
+ from sympy.sets.sets import Interval, Set
15
+ from sympy.sets.fancysets import Range
16
+ from sympy.tensor.indexed import Idx
17
+ from sympy.utilities import flatten
18
+ from sympy.utilities.iterables import sift, is_sequence
19
+ from sympy.utilities.exceptions import sympy_deprecation_warning
20
+
21
+
22
+ def _common_new(cls, function, *symbols, discrete, **assumptions):
23
+ """Return either a special return value or the tuple,
24
+ (function, limits, orientation). This code is common to
25
+ both ExprWithLimits and AddWithLimits."""
26
+ function = sympify(function)
27
+
28
+ if isinstance(function, Equality):
29
+ # This transforms e.g. Integral(Eq(x, y)) to Eq(Integral(x), Integral(y))
30
+ # but that is only valid for definite integrals.
31
+ limits, orientation = _process_limits(*symbols, discrete=discrete)
32
+ if not (limits and all(len(limit) == 3 for limit in limits)):
33
+ sympy_deprecation_warning(
34
+ """
35
+ Creating a indefinite integral with an Eq() argument is
36
+ deprecated.
37
+
38
+ This is because indefinite integrals do not preserve equality
39
+ due to the arbitrary constants. If you want an equality of
40
+ indefinite integrals, use Eq(Integral(a, x), Integral(b, x))
41
+ explicitly.
42
+ """,
43
+ deprecated_since_version="1.6",
44
+ active_deprecations_target="deprecated-indefinite-integral-eq",
45
+ stacklevel=5,
46
+ )
47
+
48
+ lhs = function.lhs
49
+ rhs = function.rhs
50
+ return Equality(cls(lhs, *symbols, **assumptions), \
51
+ cls(rhs, *symbols, **assumptions))
52
+
53
+ if function is S.NaN:
54
+ return S.NaN
55
+
56
+ if symbols:
57
+ limits, orientation = _process_limits(*symbols, discrete=discrete)
58
+ for i, li in enumerate(limits):
59
+ if len(li) == 4:
60
+ function = function.subs(li[0], li[-1])
61
+ limits[i] = Tuple(*li[:-1])
62
+ else:
63
+ # symbol not provided -- we can still try to compute a general form
64
+ free = function.free_symbols
65
+ if len(free) != 1:
66
+ raise ValueError(
67
+ "specify dummy variables for %s" % function)
68
+ limits, orientation = [Tuple(s) for s in free], 1
69
+
70
+ # denest any nested calls
71
+ while cls == type(function):
72
+ limits = list(function.limits) + limits
73
+ function = function.function
74
+
75
+ # Any embedded piecewise functions need to be brought out to the
76
+ # top level. We only fold Piecewise that contain the integration
77
+ # variable.
78
+ reps = {}
79
+ symbols_of_integration = {i[0] for i in limits}
80
+ for p in function.atoms(Piecewise):
81
+ if not p.has(*symbols_of_integration):
82
+ reps[p] = Dummy()
83
+ # mask off those that don't
84
+ function = function.xreplace(reps)
85
+ # do the fold
86
+ function = piecewise_fold(function)
87
+ # remove the masking
88
+ function = function.xreplace({v: k for k, v in reps.items()})
89
+
90
+ return function, limits, orientation
91
+
92
+
93
+ def _process_limits(*symbols, discrete=None):
94
+ """Process the list of symbols and convert them to canonical limits,
95
+ storing them as Tuple(symbol, lower, upper). The orientation of
96
+ the function is also returned when the upper limit is missing
97
+ so (x, 1, None) becomes (x, None, 1) and the orientation is changed.
98
+ In the case that a limit is specified as (symbol, Range), a list of
99
+ length 4 may be returned if a change of variables is needed; the
100
+ expression that should replace the symbol in the expression is
101
+ the fourth element in the list.
102
+ """
103
+ limits = []
104
+ orientation = 1
105
+ if discrete is None:
106
+ err_msg = 'discrete must be True or False'
107
+ elif discrete:
108
+ err_msg = 'use Range, not Interval or Relational'
109
+ else:
110
+ err_msg = 'use Interval or Relational, not Range'
111
+ for V in symbols:
112
+ if isinstance(V, (Relational, BooleanFunction)):
113
+ if discrete:
114
+ raise TypeError(err_msg)
115
+ variable = V.atoms(Symbol).pop()
116
+ V = (variable, V.as_set())
117
+ elif isinstance(V, Symbol) or getattr(V, '_diff_wrt', False):
118
+ if isinstance(V, Idx):
119
+ if V.lower is None or V.upper is None:
120
+ limits.append(Tuple(V))
121
+ else:
122
+ limits.append(Tuple(V, V.lower, V.upper))
123
+ else:
124
+ limits.append(Tuple(V))
125
+ continue
126
+ if is_sequence(V) and not isinstance(V, Set):
127
+ if len(V) == 2 and isinstance(V[1], Set):
128
+ V = list(V)
129
+ if isinstance(V[1], Interval): # includes Reals
130
+ if discrete:
131
+ raise TypeError(err_msg)
132
+ V[1:] = V[1].inf, V[1].sup
133
+ elif isinstance(V[1], Range):
134
+ if not discrete:
135
+ raise TypeError(err_msg)
136
+ lo = V[1].inf
137
+ hi = V[1].sup
138
+ dx = abs(V[1].step) # direction doesn't matter
139
+ if dx == 1:
140
+ V[1:] = [lo, hi]
141
+ else:
142
+ if lo is not S.NegativeInfinity:
143
+ V = [V[0]] + [0, (hi - lo)//dx, dx*V[0] + lo]
144
+ else:
145
+ V = [V[0]] + [0, S.Infinity, -dx*V[0] + hi]
146
+ else:
147
+ # more complicated sets would require splitting, e.g.
148
+ # Union(Interval(1, 3), interval(6,10))
149
+ raise NotImplementedError(
150
+ 'expecting Range' if discrete else
151
+ 'Relational or single Interval' )
152
+ V = sympify(flatten(V)) # list of sympified elements/None
153
+ if isinstance(V[0], (Symbol, Idx)) or getattr(V[0], '_diff_wrt', False):
154
+ newsymbol = V[0]
155
+ if len(V) == 3:
156
+ # general case
157
+ if V[2] is None and V[1] is not None:
158
+ orientation *= -1
159
+ V = [newsymbol] + [i for i in V[1:] if i is not None]
160
+
161
+ lenV = len(V)
162
+ if not isinstance(newsymbol, Idx) or lenV == 3:
163
+ if lenV == 4:
164
+ limits.append(Tuple(*V))
165
+ continue
166
+ if lenV == 3:
167
+ if isinstance(newsymbol, Idx):
168
+ # Idx represents an integer which may have
169
+ # specified values it can take on; if it is
170
+ # given such a value, an error is raised here
171
+ # if the summation would try to give it a larger
172
+ # or smaller value than permitted. None and Symbolic
173
+ # values will not raise an error.
174
+ lo, hi = newsymbol.lower, newsymbol.upper
175
+ try:
176
+ if lo is not None and not bool(V[1] >= lo):
177
+ raise ValueError("Summation will set Idx value too low.")
178
+ except TypeError:
179
+ pass
180
+ try:
181
+ if hi is not None and not bool(V[2] <= hi):
182
+ raise ValueError("Summation will set Idx value too high.")
183
+ except TypeError:
184
+ pass
185
+ limits.append(Tuple(*V))
186
+ continue
187
+ if lenV == 1 or (lenV == 2 and V[1] is None):
188
+ limits.append(Tuple(newsymbol))
189
+ continue
190
+ elif lenV == 2:
191
+ limits.append(Tuple(newsymbol, V[1]))
192
+ continue
193
+
194
+ raise ValueError('Invalid limits given: %s' % str(symbols))
195
+
196
+ return limits, orientation
197
+
198
+
199
+ class ExprWithLimits(Expr):
200
+ __slots__ = ('is_commutative',)
201
+
202
+ def __new__(cls, function, *symbols, **assumptions):
203
+ from sympy.concrete.products import Product
204
+ pre = _common_new(cls, function, *symbols,
205
+ discrete=issubclass(cls, Product), **assumptions)
206
+ if isinstance(pre, tuple):
207
+ function, limits, _ = pre
208
+ else:
209
+ return pre
210
+
211
+ # limits must have upper and lower bounds; the indefinite form
212
+ # is not supported. This restriction does not apply to AddWithLimits
213
+ if any(len(l) != 3 or None in l for l in limits):
214
+ raise ValueError('ExprWithLimits requires values for lower and upper bounds.')
215
+
216
+ obj = Expr.__new__(cls, **assumptions)
217
+ arglist = [function]
218
+ arglist.extend(limits)
219
+ obj._args = tuple(arglist)
220
+ obj.is_commutative = function.is_commutative # limits already checked
221
+
222
+ return obj
223
+
224
+ @property
225
+ def function(self):
226
+ """Return the function applied across limits.
227
+
228
+ Examples
229
+ ========
230
+
231
+ >>> from sympy import Integral
232
+ >>> from sympy.abc import x
233
+ >>> Integral(x**2, (x,)).function
234
+ x**2
235
+
236
+ See Also
237
+ ========
238
+
239
+ limits, variables, free_symbols
240
+ """
241
+ return self._args[0]
242
+
243
+ @property
244
+ def kind(self):
245
+ return self.function.kind
246
+
247
+ @property
248
+ def limits(self):
249
+ """Return the limits of expression.
250
+
251
+ Examples
252
+ ========
253
+
254
+ >>> from sympy import Integral
255
+ >>> from sympy.abc import x, i
256
+ >>> Integral(x**i, (i, 1, 3)).limits
257
+ ((i, 1, 3),)
258
+
259
+ See Also
260
+ ========
261
+
262
+ function, variables, free_symbols
263
+ """
264
+ return self._args[1:]
265
+
266
+ @property
267
+ def variables(self):
268
+ """Return a list of the limit variables.
269
+
270
+ >>> from sympy import Sum
271
+ >>> from sympy.abc import x, i
272
+ >>> Sum(x**i, (i, 1, 3)).variables
273
+ [i]
274
+
275
+ See Also
276
+ ========
277
+
278
+ function, limits, free_symbols
279
+ as_dummy : Rename dummy variables
280
+ sympy.integrals.integrals.Integral.transform : Perform mapping on the dummy variable
281
+ """
282
+ return [l[0] for l in self.limits]
283
+
284
+ @property
285
+ def bound_symbols(self):
286
+ """Return only variables that are dummy variables.
287
+
288
+ Examples
289
+ ========
290
+
291
+ >>> from sympy import Integral
292
+ >>> from sympy.abc import x, i, j, k
293
+ >>> Integral(x**i, (i, 1, 3), (j, 2), k).bound_symbols
294
+ [i, j]
295
+
296
+ See Also
297
+ ========
298
+
299
+ function, limits, free_symbols
300
+ as_dummy : Rename dummy variables
301
+ sympy.integrals.integrals.Integral.transform : Perform mapping on the dummy variable
302
+ """
303
+ return [l[0] for l in self.limits if len(l) != 1]
304
+
305
+ @property
306
+ def free_symbols(self):
307
+ """
308
+ This method returns the symbols in the object, excluding those
309
+ that take on a specific value (i.e. the dummy symbols).
310
+
311
+ Examples
312
+ ========
313
+
314
+ >>> from sympy import Sum
315
+ >>> from sympy.abc import x, y
316
+ >>> Sum(x, (x, y, 1)).free_symbols
317
+ {y}
318
+ """
319
+ # don't test for any special values -- nominal free symbols
320
+ # should be returned, e.g. don't return set() if the
321
+ # function is zero -- treat it like an unevaluated expression.
322
+ function, limits = self.function, self.limits
323
+ # mask off non-symbol integration variables that have
324
+ # more than themself as a free symbol
325
+ reps = {i[0]: i[0] if i[0].free_symbols == {i[0]} else Dummy()
326
+ for i in self.limits}
327
+ function = function.xreplace(reps)
328
+ isyms = function.free_symbols
329
+ for xab in limits:
330
+ v = reps[xab[0]]
331
+ if len(xab) == 1:
332
+ isyms.add(v)
333
+ continue
334
+ # take out the target symbol
335
+ if v in isyms:
336
+ isyms.remove(v)
337
+ # add in the new symbols
338
+ for i in xab[1:]:
339
+ isyms.update(i.free_symbols)
340
+ reps = {v: k for k, v in reps.items()}
341
+ return {reps.get(_, _) for _ in isyms}
342
+
343
+ @property
344
+ def is_number(self):
345
+ """Return True if the Sum has no free symbols, else False."""
346
+ return not self.free_symbols
347
+
348
+ def _eval_interval(self, x, a, b):
349
+ limits = [(i if i[0] != x else (x, a, b)) for i in self.limits]
350
+ integrand = self.function
351
+ return self.func(integrand, *limits)
352
+
353
+ def _eval_subs(self, old, new):
354
+ """
355
+ Perform substitutions over non-dummy variables
356
+ of an expression with limits. Also, can be used
357
+ to specify point-evaluation of an abstract antiderivative.
358
+
359
+ Examples
360
+ ========
361
+
362
+ >>> from sympy import Sum, oo
363
+ >>> from sympy.abc import s, n
364
+ >>> Sum(1/n**s, (n, 1, oo)).subs(s, 2)
365
+ Sum(n**(-2), (n, 1, oo))
366
+
367
+ >>> from sympy import Integral
368
+ >>> from sympy.abc import x, a
369
+ >>> Integral(a*x**2, x).subs(x, 4)
370
+ Integral(a*x**2, (x, 4))
371
+
372
+ See Also
373
+ ========
374
+
375
+ variables : Lists the integration variables
376
+ transform : Perform mapping on the dummy variable for integrals
377
+ change_index : Perform mapping on the sum and product dummy variables
378
+
379
+ """
380
+ func, limits = self.function, list(self.limits)
381
+
382
+ # If one of the expressions we are replacing is used as a func index
383
+ # one of two things happens.
384
+ # - the old variable first appears as a free variable
385
+ # so we perform all free substitutions before it becomes
386
+ # a func index.
387
+ # - the old variable first appears as a func index, in
388
+ # which case we ignore. See change_index.
389
+
390
+ # Reorder limits to match standard mathematical practice for scoping
391
+ limits.reverse()
392
+
393
+ if not isinstance(old, Symbol) or \
394
+ old.free_symbols.intersection(self.free_symbols):
395
+ sub_into_func = True
396
+ for i, xab in enumerate(limits):
397
+ if 1 == len(xab) and old == xab[0]:
398
+ if new._diff_wrt:
399
+ xab = (new,)
400
+ else:
401
+ xab = (old, old)
402
+ limits[i] = Tuple(xab[0], *[l._subs(old, new) for l in xab[1:]])
403
+ if len(xab[0].free_symbols.intersection(old.free_symbols)) != 0:
404
+ sub_into_func = False
405
+ break
406
+ if isinstance(old, (AppliedUndef, UndefinedFunction)):
407
+ sy2 = set(self.variables).intersection(set(new.atoms(Symbol)))
408
+ sy1 = set(self.variables).intersection(set(old.args))
409
+ if not sy2.issubset(sy1):
410
+ raise ValueError(
411
+ "substitution cannot create dummy dependencies")
412
+ sub_into_func = True
413
+ if sub_into_func:
414
+ func = func.subs(old, new)
415
+ else:
416
+ # old is a Symbol and a dummy variable of some limit
417
+ for i, xab in enumerate(limits):
418
+ if len(xab) == 3:
419
+ limits[i] = Tuple(xab[0], *[l._subs(old, new) for l in xab[1:]])
420
+ if old == xab[0]:
421
+ break
422
+ # simplify redundant limits (x, x) to (x, )
423
+ for i, xab in enumerate(limits):
424
+ if len(xab) == 2 and (xab[0] - xab[1]).is_zero:
425
+ limits[i] = Tuple(xab[0], )
426
+
427
+ # Reorder limits back to representation-form
428
+ limits.reverse()
429
+
430
+ return self.func(func, *limits)
431
+
432
+ @property
433
+ def has_finite_limits(self):
434
+ """
435
+ Returns True if the limits are known to be finite, either by the
436
+ explicit bounds, assumptions on the bounds, or assumptions on the
437
+ variables. False if known to be infinite, based on the bounds.
438
+ None if not enough information is available to determine.
439
+
440
+ Examples
441
+ ========
442
+
443
+ >>> from sympy import Sum, Integral, Product, oo, Symbol
444
+ >>> x = Symbol('x')
445
+ >>> Sum(x, (x, 1, 8)).has_finite_limits
446
+ True
447
+
448
+ >>> Integral(x, (x, 1, oo)).has_finite_limits
449
+ False
450
+
451
+ >>> M = Symbol('M')
452
+ >>> Sum(x, (x, 1, M)).has_finite_limits
453
+
454
+ >>> N = Symbol('N', integer=True)
455
+ >>> Product(x, (x, 1, N)).has_finite_limits
456
+ True
457
+
458
+ See Also
459
+ ========
460
+
461
+ has_reversed_limits
462
+
463
+ """
464
+
465
+ ret_None = False
466
+ for lim in self.limits:
467
+ if len(lim) == 3:
468
+ if any(l.is_infinite for l in lim[1:]):
469
+ # Any of the bounds are +/-oo
470
+ return False
471
+ elif any(l.is_infinite is None for l in lim[1:]):
472
+ # Maybe there are assumptions on the variable?
473
+ if lim[0].is_infinite is None:
474
+ ret_None = True
475
+ else:
476
+ if lim[0].is_infinite is None:
477
+ ret_None = True
478
+
479
+ if ret_None:
480
+ return None
481
+ return True
482
+
483
+ @property
484
+ def has_reversed_limits(self):
485
+ """
486
+ Returns True if the limits are known to be in reversed order, either
487
+ by the explicit bounds, assumptions on the bounds, or assumptions on the
488
+ variables. False if known to be in normal order, based on the bounds.
489
+ None if not enough information is available to determine.
490
+
491
+ Examples
492
+ ========
493
+
494
+ >>> from sympy import Sum, Integral, Product, oo, Symbol
495
+ >>> x = Symbol('x')
496
+ >>> Sum(x, (x, 8, 1)).has_reversed_limits
497
+ True
498
+
499
+ >>> Sum(x, (x, 1, oo)).has_reversed_limits
500
+ False
501
+
502
+ >>> M = Symbol('M')
503
+ >>> Integral(x, (x, 1, M)).has_reversed_limits
504
+
505
+ >>> N = Symbol('N', integer=True, positive=True)
506
+ >>> Sum(x, (x, 1, N)).has_reversed_limits
507
+ False
508
+
509
+ >>> Product(x, (x, 2, N)).has_reversed_limits
510
+
511
+ >>> Product(x, (x, 2, N)).subs(N, N + 2).has_reversed_limits
512
+ False
513
+
514
+ See Also
515
+ ========
516
+
517
+ sympy.concrete.expr_with_intlimits.ExprWithIntLimits.has_empty_sequence
518
+
519
+ """
520
+ ret_None = False
521
+ for lim in self.limits:
522
+ if len(lim) == 3:
523
+ var, a, b = lim
524
+ dif = b - a
525
+ if dif.is_extended_negative:
526
+ return True
527
+ elif dif.is_extended_nonnegative:
528
+ continue
529
+ else:
530
+ ret_None = True
531
+ else:
532
+ return None
533
+ if ret_None:
534
+ return None
535
+ return False
536
+
537
+
538
+ class AddWithLimits(ExprWithLimits):
539
+ r"""Represents unevaluated oriented additions.
540
+ Parent class for Integral and Sum.
541
+ """
542
+
543
+ __slots__ = ()
544
+
545
+ def __new__(cls, function, *symbols, **assumptions):
546
+ from sympy.concrete.summations import Sum
547
+ pre = _common_new(cls, function, *symbols,
548
+ discrete=issubclass(cls, Sum), **assumptions)
549
+ if isinstance(pre, tuple):
550
+ function, limits, orientation = pre
551
+ else:
552
+ return pre
553
+
554
+ obj = Expr.__new__(cls, **assumptions)
555
+ arglist = [orientation*function] # orientation not used in ExprWithLimits
556
+ arglist.extend(limits)
557
+ obj._args = tuple(arglist)
558
+ obj.is_commutative = function.is_commutative # limits already checked
559
+
560
+ return obj
561
+
562
+ def _eval_adjoint(self):
563
+ if all(x.is_real for x in flatten(self.limits)):
564
+ return self.func(self.function.adjoint(), *self.limits)
565
+ return None
566
+
567
+ def _eval_conjugate(self):
568
+ if all(x.is_real for x in flatten(self.limits)):
569
+ return self.func(self.function.conjugate(), *self.limits)
570
+ return None
571
+
572
+ def _eval_transpose(self):
573
+ if all(x.is_real for x in flatten(self.limits)):
574
+ return self.func(self.function.transpose(), *self.limits)
575
+ return None
576
+
577
+ def _eval_factor(self, **hints):
578
+ if 1 == len(self.limits):
579
+ summand = self.function.factor(**hints)
580
+ if summand.is_Mul:
581
+ out = sift(summand.args, lambda w: w.is_commutative \
582
+ and not set(self.variables) & w.free_symbols)
583
+ return Mul(*out[True])*self.func(Mul(*out[False]), \
584
+ *self.limits)
585
+ else:
586
+ summand = self.func(self.function, *self.limits[0:-1]).factor()
587
+ if not summand.has(self.variables[-1]):
588
+ return self.func(1, [self.limits[-1]]).doit()*summand
589
+ elif isinstance(summand, Mul):
590
+ return self.func(summand, self.limits[-1]).factor()
591
+ return self
592
+
593
+ def _eval_expand_basic(self, **hints):
594
+ summand = self.function.expand(**hints)
595
+ force = hints.get('force', False)
596
+ if (summand.is_Add and (force or summand.is_commutative and
597
+ self.has_finite_limits is not False)):
598
+ return Add(*[self.func(i, *self.limits) for i in summand.args])
599
+ elif isinstance(summand, MatrixBase):
600
+ return summand.applyfunc(lambda x: self.func(x, *self.limits))
601
+ elif summand != self.function:
602
+ return self.func(summand, *self.limits)
603
+ return self
tuning-competition-baseline/.venv/lib/python3.11/site-packages/sympy/concrete/products.py ADDED
@@ -0,0 +1,610 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Tuple as tTuple
2
+
3
+ from .expr_with_intlimits import ExprWithIntLimits
4
+ from .summations import Sum, summation, _dummy_with_inherited_properties_concrete
5
+ from sympy.core.expr import Expr
6
+ from sympy.core.exprtools import factor_terms
7
+ from sympy.core.function import Derivative
8
+ from sympy.core.mul import Mul
9
+ from sympy.core.singleton import S
10
+ from sympy.core.symbol import Dummy, Symbol
11
+ from sympy.functions.combinatorial.factorials import RisingFactorial
12
+ from sympy.functions.elementary.exponential import exp, log
13
+ from sympy.functions.special.tensor_functions import KroneckerDelta
14
+ from sympy.polys import quo, roots
15
+
16
+
17
+ class Product(ExprWithIntLimits):
18
+ r"""
19
+ Represents unevaluated products.
20
+
21
+ Explanation
22
+ ===========
23
+
24
+ ``Product`` represents a finite or infinite product, with the first
25
+ argument being the general form of terms in the series, and the second
26
+ argument being ``(dummy_variable, start, end)``, with ``dummy_variable``
27
+ taking all integer values from ``start`` through ``end``. In accordance
28
+ with long-standing mathematical convention, the end term is included in
29
+ the product.
30
+
31
+ Finite products
32
+ ===============
33
+
34
+ For finite products (and products with symbolic limits assumed to be finite)
35
+ we follow the analogue of the summation convention described by Karr [1],
36
+ especially definition 3 of section 1.4. The product:
37
+
38
+ .. math::
39
+
40
+ \prod_{m \leq i < n} f(i)
41
+
42
+ has *the obvious meaning* for `m < n`, namely:
43
+
44
+ .. math::
45
+
46
+ \prod_{m \leq i < n} f(i) = f(m) f(m+1) \cdot \ldots \cdot f(n-2) f(n-1)
47
+
48
+ with the upper limit value `f(n)` excluded. The product over an empty set is
49
+ one if and only if `m = n`:
50
+
51
+ .. math::
52
+
53
+ \prod_{m \leq i < n} f(i) = 1 \quad \mathrm{for} \quad m = n
54
+
55
+ Finally, for all other products over empty sets we assume the following
56
+ definition:
57
+
58
+ .. math::
59
+
60
+ \prod_{m \leq i < n} f(i) = \frac{1}{\prod_{n \leq i < m} f(i)} \quad \mathrm{for} \quad m > n
61
+
62
+ It is important to note that above we define all products with the upper
63
+ limit being exclusive. This is in contrast to the usual mathematical notation,
64
+ but does not affect the product convention. Indeed we have:
65
+
66
+ .. math::
67
+
68
+ \prod_{m \leq i < n} f(i) = \prod_{i = m}^{n - 1} f(i)
69
+
70
+ where the difference in notation is intentional to emphasize the meaning,
71
+ with limits typeset on the top being inclusive.
72
+
73
+ Examples
74
+ ========
75
+
76
+ >>> from sympy.abc import a, b, i, k, m, n, x
77
+ >>> from sympy import Product, oo
78
+ >>> Product(k, (k, 1, m))
79
+ Product(k, (k, 1, m))
80
+ >>> Product(k, (k, 1, m)).doit()
81
+ factorial(m)
82
+ >>> Product(k**2,(k, 1, m))
83
+ Product(k**2, (k, 1, m))
84
+ >>> Product(k**2,(k, 1, m)).doit()
85
+ factorial(m)**2
86
+
87
+ Wallis' product for pi:
88
+
89
+ >>> W = Product(2*i/(2*i-1) * 2*i/(2*i+1), (i, 1, oo))
90
+ >>> W
91
+ Product(4*i**2/((2*i - 1)*(2*i + 1)), (i, 1, oo))
92
+
93
+ Direct computation currently fails:
94
+
95
+ >>> W.doit()
96
+ Product(4*i**2/((2*i - 1)*(2*i + 1)), (i, 1, oo))
97
+
98
+ But we can approach the infinite product by a limit of finite products:
99
+
100
+ >>> from sympy import limit
101
+ >>> W2 = Product(2*i/(2*i-1)*2*i/(2*i+1), (i, 1, n))
102
+ >>> W2
103
+ Product(4*i**2/((2*i - 1)*(2*i + 1)), (i, 1, n))
104
+ >>> W2e = W2.doit()
105
+ >>> W2e
106
+ 4**n*factorial(n)**2/(2**(2*n)*RisingFactorial(1/2, n)*RisingFactorial(3/2, n))
107
+ >>> limit(W2e, n, oo)
108
+ pi/2
109
+
110
+ By the same formula we can compute sin(pi/2):
111
+
112
+ >>> from sympy import combsimp, pi, gamma, simplify
113
+ >>> P = pi * x * Product(1 - x**2/k**2, (k, 1, n))
114
+ >>> P = P.subs(x, pi/2)
115
+ >>> P
116
+ pi**2*Product(1 - pi**2/(4*k**2), (k, 1, n))/2
117
+ >>> Pe = P.doit()
118
+ >>> Pe
119
+ pi**2*RisingFactorial(1 - pi/2, n)*RisingFactorial(1 + pi/2, n)/(2*factorial(n)**2)
120
+ >>> limit(Pe, n, oo).gammasimp()
121
+ sin(pi**2/2)
122
+ >>> Pe.rewrite(gamma)
123
+ (-1)**n*pi**2*gamma(pi/2)*gamma(n + 1 + pi/2)/(2*gamma(1 + pi/2)*gamma(-n + pi/2)*gamma(n + 1)**2)
124
+
125
+ Products with the lower limit being larger than the upper one:
126
+
127
+ >>> Product(1/i, (i, 6, 1)).doit()
128
+ 120
129
+ >>> Product(i, (i, 2, 5)).doit()
130
+ 120
131
+
132
+ The empty product:
133
+
134
+ >>> Product(i, (i, n, n-1)).doit()
135
+ 1
136
+
137
+ An example showing that the symbolic result of a product is still
138
+ valid for seemingly nonsensical values of the limits. Then the Karr
139
+ convention allows us to give a perfectly valid interpretation to
140
+ those products by interchanging the limits according to the above rules:
141
+
142
+ >>> P = Product(2, (i, 10, n)).doit()
143
+ >>> P
144
+ 2**(n - 9)
145
+ >>> P.subs(n, 5)
146
+ 1/16
147
+ >>> Product(2, (i, 10, 5)).doit()
148
+ 1/16
149
+ >>> 1/Product(2, (i, 6, 9)).doit()
150
+ 1/16
151
+
152
+ An explicit example of the Karr summation convention applied to products:
153
+
154
+ >>> P1 = Product(x, (i, a, b)).doit()
155
+ >>> P1
156
+ x**(-a + b + 1)
157
+ >>> P2 = Product(x, (i, b+1, a-1)).doit()
158
+ >>> P2
159
+ x**(a - b - 1)
160
+ >>> simplify(P1 * P2)
161
+ 1
162
+
163
+ And another one:
164
+
165
+ >>> P1 = Product(i, (i, b, a)).doit()
166
+ >>> P1
167
+ RisingFactorial(b, a - b + 1)
168
+ >>> P2 = Product(i, (i, a+1, b-1)).doit()
169
+ >>> P2
170
+ RisingFactorial(a + 1, -a + b - 1)
171
+ >>> P1 * P2
172
+ RisingFactorial(b, a - b + 1)*RisingFactorial(a + 1, -a + b - 1)
173
+ >>> combsimp(P1 * P2)
174
+ 1
175
+
176
+ See Also
177
+ ========
178
+
179
+ Sum, summation
180
+ product
181
+
182
+ References
183
+ ==========
184
+
185
+ .. [1] Michael Karr, "Summation in Finite Terms", Journal of the ACM,
186
+ Volume 28 Issue 2, April 1981, Pages 305-350
187
+ https://dl.acm.org/doi/10.1145/322248.322255
188
+ .. [2] https://en.wikipedia.org/wiki/Multiplication#Capital_Pi_notation
189
+ .. [3] https://en.wikipedia.org/wiki/Empty_product
190
+ """
191
+
192
+ __slots__ = ()
193
+
194
+ limits: tTuple[tTuple[Symbol, Expr, Expr]]
195
+
196
+ def __new__(cls, function, *symbols, **assumptions):
197
+ obj = ExprWithIntLimits.__new__(cls, function, *symbols, **assumptions)
198
+ return obj
199
+
200
+ def _eval_rewrite_as_Sum(self, *args, **kwargs):
201
+ return exp(Sum(log(self.function), *self.limits))
202
+
203
+ @property
204
+ def term(self):
205
+ return self._args[0]
206
+ function = term
207
+
208
+ def _eval_is_zero(self):
209
+ if self.has_empty_sequence:
210
+ return False
211
+
212
+ z = self.term.is_zero
213
+ if z is True:
214
+ return True
215
+ if self.has_finite_limits:
216
+ # A Product is zero only if its term is zero assuming finite limits.
217
+ return z
218
+
219
+ def _eval_is_extended_real(self):
220
+ if self.has_empty_sequence:
221
+ return True
222
+
223
+ return self.function.is_extended_real
224
+
225
+ def _eval_is_positive(self):
226
+ if self.has_empty_sequence:
227
+ return True
228
+ if self.function.is_positive and self.has_finite_limits:
229
+ return True
230
+
231
+ def _eval_is_nonnegative(self):
232
+ if self.has_empty_sequence:
233
+ return True
234
+ if self.function.is_nonnegative and self.has_finite_limits:
235
+ return True
236
+
237
+ def _eval_is_extended_nonnegative(self):
238
+ if self.has_empty_sequence:
239
+ return True
240
+ if self.function.is_extended_nonnegative:
241
+ return True
242
+
243
+ def _eval_is_extended_nonpositive(self):
244
+ if self.has_empty_sequence:
245
+ return True
246
+
247
+ def _eval_is_finite(self):
248
+ if self.has_finite_limits and self.function.is_finite:
249
+ return True
250
+
251
+ def doit(self, **hints):
252
+ # first make sure any definite limits have product
253
+ # variables with matching assumptions
254
+ reps = {}
255
+ for xab in self.limits:
256
+ d = _dummy_with_inherited_properties_concrete(xab)
257
+ if d:
258
+ reps[xab[0]] = d
259
+ if reps:
260
+ undo = {v: k for k, v in reps.items()}
261
+ did = self.xreplace(reps).doit(**hints)
262
+ if isinstance(did, tuple): # when separate=True
263
+ did = tuple([i.xreplace(undo) for i in did])
264
+ else:
265
+ did = did.xreplace(undo)
266
+ return did
267
+
268
+ from sympy.simplify.powsimp import powsimp
269
+ f = self.function
270
+ for index, limit in enumerate(self.limits):
271
+ i, a, b = limit
272
+ dif = b - a
273
+ if dif.is_integer and dif.is_negative:
274
+ a, b = b + 1, a - 1
275
+ f = 1 / f
276
+
277
+ g = self._eval_product(f, (i, a, b))
278
+ if g in (None, S.NaN):
279
+ return self.func(powsimp(f), *self.limits[index:])
280
+ else:
281
+ f = g
282
+
283
+ if hints.get('deep', True):
284
+ return f.doit(**hints)
285
+ else:
286
+ return powsimp(f)
287
+
288
+ def _eval_adjoint(self):
289
+ if self.is_commutative:
290
+ return self.func(self.function.adjoint(), *self.limits)
291
+ return None
292
+
293
+ def _eval_conjugate(self):
294
+ return self.func(self.function.conjugate(), *self.limits)
295
+
296
+ def _eval_product(self, term, limits):
297
+
298
+ (k, a, n) = limits
299
+
300
+ if k not in term.free_symbols:
301
+ if (term - 1).is_zero:
302
+ return S.One
303
+ return term**(n - a + 1)
304
+
305
+ if a == n:
306
+ return term.subs(k, a)
307
+
308
+ from .delta import deltaproduct, _has_simple_delta
309
+ if term.has(KroneckerDelta) and _has_simple_delta(term, limits[0]):
310
+ return deltaproduct(term, limits)
311
+
312
+ dif = n - a
313
+ definite = dif.is_Integer
314
+ if definite and (dif < 100):
315
+ return self._eval_product_direct(term, limits)
316
+
317
+ elif term.is_polynomial(k):
318
+ poly = term.as_poly(k)
319
+
320
+ A = B = Q = S.One
321
+
322
+ all_roots = roots(poly)
323
+
324
+ M = 0
325
+ for r, m in all_roots.items():
326
+ M += m
327
+ A *= RisingFactorial(a - r, n - a + 1)**m
328
+ Q *= (n - r)**m
329
+
330
+ if M < poly.degree():
331
+ arg = quo(poly, Q.as_poly(k))
332
+ B = self.func(arg, (k, a, n)).doit()
333
+
334
+ return poly.LC()**(n - a + 1) * A * B
335
+
336
+ elif term.is_Add:
337
+ factored = factor_terms(term, fraction=True)
338
+ if factored.is_Mul:
339
+ return self._eval_product(factored, (k, a, n))
340
+
341
+ elif term.is_Mul:
342
+ # Factor in part without the summation variable and part with
343
+ without_k, with_k = term.as_coeff_mul(k)
344
+
345
+ if len(with_k) >= 2:
346
+ # More than one term including k, so still a multiplication
347
+ exclude, include = [], []
348
+ for t in with_k:
349
+ p = self._eval_product(t, (k, a, n))
350
+
351
+ if p is not None:
352
+ exclude.append(p)
353
+ else:
354
+ include.append(t)
355
+
356
+ if not exclude:
357
+ return None
358
+ else:
359
+ arg = term._new_rawargs(*include)
360
+ A = Mul(*exclude)
361
+ B = self.func(arg, (k, a, n)).doit()
362
+ return without_k**(n - a + 1)*A * B
363
+ else:
364
+ # Just a single term
365
+ p = self._eval_product(with_k[0], (k, a, n))
366
+ if p is None:
367
+ p = self.func(with_k[0], (k, a, n)).doit()
368
+ return without_k**(n - a + 1)*p
369
+
370
+
371
+ elif term.is_Pow:
372
+ if not term.base.has(k):
373
+ s = summation(term.exp, (k, a, n))
374
+
375
+ return term.base**s
376
+ elif not term.exp.has(k):
377
+ p = self._eval_product(term.base, (k, a, n))
378
+
379
+ if p is not None:
380
+ return p**term.exp
381
+
382
+ elif isinstance(term, Product):
383
+ evaluated = term.doit()
384
+ f = self._eval_product(evaluated, limits)
385
+ if f is None:
386
+ return self.func(evaluated, limits)
387
+ else:
388
+ return f
389
+
390
+ if definite:
391
+ return self._eval_product_direct(term, limits)
392
+
393
+ def _eval_simplify(self, **kwargs):
394
+ from sympy.simplify.simplify import product_simplify
395
+ rv = product_simplify(self, **kwargs)
396
+ return rv.doit() if kwargs['doit'] else rv
397
+
398
+ def _eval_transpose(self):
399
+ if self.is_commutative:
400
+ return self.func(self.function.transpose(), *self.limits)
401
+ return None
402
+
403
+ def _eval_product_direct(self, term, limits):
404
+ (k, a, n) = limits
405
+ return Mul(*[term.subs(k, a + i) for i in range(n - a + 1)])
406
+
407
+ def _eval_derivative(self, x):
408
+ if isinstance(x, Symbol) and x not in self.free_symbols:
409
+ return S.Zero
410
+ f, limits = self.function, list(self.limits)
411
+ limit = limits.pop(-1)
412
+ if limits:
413
+ f = self.func(f, *limits)
414
+ i, a, b = limit
415
+ if x in a.free_symbols or x in b.free_symbols:
416
+ return None
417
+ h = Dummy()
418
+ rv = Sum( Product(f, (i, a, h - 1)) * Product(f, (i, h + 1, b)) * Derivative(f, x, evaluate=True).subs(i, h), (h, a, b))
419
+ return rv
420
+
421
+ def is_convergent(self):
422
+ r"""
423
+ See docs of :obj:`.Sum.is_convergent()` for explanation of convergence
424
+ in SymPy.
425
+
426
+ Explanation
427
+ ===========
428
+
429
+ The infinite product:
430
+
431
+ .. math::
432
+
433
+ \prod_{1 \leq i < \infty} f(i)
434
+
435
+ is defined by the sequence of partial products:
436
+
437
+ .. math::
438
+
439
+ \prod_{i=1}^{n} f(i) = f(1) f(2) \cdots f(n)
440
+
441
+ as n increases without bound. The product converges to a non-zero
442
+ value if and only if the sum:
443
+
444
+ .. math::
445
+
446
+ \sum_{1 \leq i < \infty} \log{f(n)}
447
+
448
+ converges.
449
+
450
+ Examples
451
+ ========
452
+
453
+ >>> from sympy import Product, Symbol, cos, pi, exp, oo
454
+ >>> n = Symbol('n', integer=True)
455
+ >>> Product(n/(n + 1), (n, 1, oo)).is_convergent()
456
+ False
457
+ >>> Product(1/n**2, (n, 1, oo)).is_convergent()
458
+ False
459
+ >>> Product(cos(pi/n), (n, 1, oo)).is_convergent()
460
+ True
461
+ >>> Product(exp(-n**2), (n, 1, oo)).is_convergent()
462
+ False
463
+
464
+ References
465
+ ==========
466
+
467
+ .. [1] https://en.wikipedia.org/wiki/Infinite_product
468
+ """
469
+ sequence_term = self.function
470
+ log_sum = log(sequence_term)
471
+ lim = self.limits
472
+ try:
473
+ is_conv = Sum(log_sum, *lim).is_convergent()
474
+ except NotImplementedError:
475
+ if Sum(sequence_term - 1, *lim).is_absolutely_convergent() is S.true:
476
+ return S.true
477
+ raise NotImplementedError("The algorithm to find the product convergence of %s "
478
+ "is not yet implemented" % (sequence_term))
479
+ return is_conv
480
+
481
+ def reverse_order(expr, *indices):
482
+ """
483
+ Reverse the order of a limit in a Product.
484
+
485
+ Explanation
486
+ ===========
487
+
488
+ ``reverse_order(expr, *indices)`` reverses some limits in the expression
489
+ ``expr`` which can be either a ``Sum`` or a ``Product``. The selectors in
490
+ the argument ``indices`` specify some indices whose limits get reversed.
491
+ These selectors are either variable names or numerical indices counted
492
+ starting from the inner-most limit tuple.
493
+
494
+ Examples
495
+ ========
496
+
497
+ >>> from sympy import gamma, Product, simplify, Sum
498
+ >>> from sympy.abc import x, y, a, b, c, d
499
+ >>> P = Product(x, (x, a, b))
500
+ >>> Pr = P.reverse_order(x)
501
+ >>> Pr
502
+ Product(1/x, (x, b + 1, a - 1))
503
+ >>> Pr = Pr.doit()
504
+ >>> Pr
505
+ 1/RisingFactorial(b + 1, a - b - 1)
506
+ >>> simplify(Pr.rewrite(gamma))
507
+ Piecewise((gamma(b + 1)/gamma(a), b > -1), ((-1)**(-a + b + 1)*gamma(1 - a)/gamma(-b), True))
508
+ >>> P = P.doit()
509
+ >>> P
510
+ RisingFactorial(a, -a + b + 1)
511
+ >>> simplify(P.rewrite(gamma))
512
+ Piecewise((gamma(b + 1)/gamma(a), a > 0), ((-1)**(-a + b + 1)*gamma(1 - a)/gamma(-b), True))
513
+
514
+ While one should prefer variable names when specifying which limits
515
+ to reverse, the index counting notation comes in handy in case there
516
+ are several symbols with the same name.
517
+
518
+ >>> S = Sum(x*y, (x, a, b), (y, c, d))
519
+ >>> S
520
+ Sum(x*y, (x, a, b), (y, c, d))
521
+ >>> S0 = S.reverse_order(0)
522
+ >>> S0
523
+ Sum(-x*y, (x, b + 1, a - 1), (y, c, d))
524
+ >>> S1 = S0.reverse_order(1)
525
+ >>> S1
526
+ Sum(x*y, (x, b + 1, a - 1), (y, d + 1, c - 1))
527
+
528
+ Of course we can mix both notations:
529
+
530
+ >>> Sum(x*y, (x, a, b), (y, 2, 5)).reverse_order(x, 1)
531
+ Sum(x*y, (x, b + 1, a - 1), (y, 6, 1))
532
+ >>> Sum(x*y, (x, a, b), (y, 2, 5)).reverse_order(y, x)
533
+ Sum(x*y, (x, b + 1, a - 1), (y, 6, 1))
534
+
535
+ See Also
536
+ ========
537
+
538
+ sympy.concrete.expr_with_intlimits.ExprWithIntLimits.index,
539
+ reorder_limit,
540
+ sympy.concrete.expr_with_intlimits.ExprWithIntLimits.reorder
541
+
542
+ References
543
+ ==========
544
+
545
+ .. [1] Michael Karr, "Summation in Finite Terms", Journal of the ACM,
546
+ Volume 28 Issue 2, April 1981, Pages 305-350
547
+ https://dl.acm.org/doi/10.1145/322248.322255
548
+
549
+ """
550
+ l_indices = list(indices)
551
+
552
+ for i, indx in enumerate(l_indices):
553
+ if not isinstance(indx, int):
554
+ l_indices[i] = expr.index(indx)
555
+
556
+ e = 1
557
+ limits = []
558
+ for i, limit in enumerate(expr.limits):
559
+ l = limit
560
+ if i in l_indices:
561
+ e = -e
562
+ l = (limit[0], limit[2] + 1, limit[1] - 1)
563
+ limits.append(l)
564
+
565
+ return Product(expr.function ** e, *limits)
566
+
567
+
568
+ def product(*args, **kwargs):
569
+ r"""
570
+ Compute the product.
571
+
572
+ Explanation
573
+ ===========
574
+
575
+ The notation for symbols is similar to the notation used in Sum or
576
+ Integral. product(f, (i, a, b)) computes the product of f with
577
+ respect to i from a to b, i.e.,
578
+
579
+ ::
580
+
581
+ b
582
+ _____
583
+ product(f(n), (i, a, b)) = | | f(n)
584
+ | |
585
+ i = a
586
+
587
+ If it cannot compute the product, it returns an unevaluated Product object.
588
+ Repeated products can be computed by introducing additional symbols tuples::
589
+
590
+ Examples
591
+ ========
592
+
593
+ >>> from sympy import product, symbols
594
+ >>> i, n, m, k = symbols('i n m k', integer=True)
595
+
596
+ >>> product(i, (i, 1, k))
597
+ factorial(k)
598
+ >>> product(m, (i, 1, k))
599
+ m**k
600
+ >>> product(i, (i, 1, k), (k, 1, n))
601
+ Product(factorial(k), (k, 1, n))
602
+
603
+ """
604
+
605
+ prod = Product(*args, **kwargs)
606
+
607
+ if isinstance(prod, Product):
608
+ return prod.doit(deep=False)
609
+ else:
610
+ return prod
tuning-competition-baseline/.venv/lib/python3.11/site-packages/sympy/concrete/summations.py ADDED
@@ -0,0 +1,1646 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Tuple as tTuple
2
+
3
+ from sympy.calculus.singularities import is_decreasing
4
+ from sympy.calculus.accumulationbounds import AccumulationBounds
5
+ from .expr_with_intlimits import ExprWithIntLimits
6
+ from .expr_with_limits import AddWithLimits
7
+ from .gosper import gosper_sum
8
+ from sympy.core.expr import Expr
9
+ from sympy.core.add import Add
10
+ from sympy.core.containers import Tuple
11
+ from sympy.core.function import Derivative, expand
12
+ from sympy.core.mul import Mul
13
+ from sympy.core.numbers import Float, _illegal
14
+ from sympy.core.relational import Eq
15
+ from sympy.core.singleton import S
16
+ from sympy.core.sorting import ordered
17
+ from sympy.core.symbol import Dummy, Wild, Symbol, symbols
18
+ from sympy.functions.combinatorial.factorials import factorial
19
+ from sympy.functions.combinatorial.numbers import bernoulli, harmonic
20
+ from sympy.functions.elementary.exponential import exp, log
21
+ from sympy.functions.elementary.piecewise import Piecewise
22
+ from sympy.functions.elementary.trigonometric import cot, csc
23
+ from sympy.functions.special.hyper import hyper
24
+ from sympy.functions.special.tensor_functions import KroneckerDelta
25
+ from sympy.functions.special.zeta_functions import zeta
26
+ from sympy.integrals.integrals import Integral
27
+ from sympy.logic.boolalg import And
28
+ from sympy.polys.partfrac import apart
29
+ from sympy.polys.polyerrors import PolynomialError, PolificationFailed
30
+ from sympy.polys.polytools import parallel_poly_from_expr, Poly, factor
31
+ from sympy.polys.rationaltools import together
32
+ from sympy.series.limitseq import limit_seq
33
+ from sympy.series.order import O
34
+ from sympy.series.residues import residue
35
+ from sympy.sets.sets import FiniteSet, Interval
36
+ from sympy.utilities.iterables import sift
37
+ import itertools
38
+
39
+
40
+ class Sum(AddWithLimits, ExprWithIntLimits):
41
+ r"""
42
+ Represents unevaluated summation.
43
+
44
+ Explanation
45
+ ===========
46
+
47
+ ``Sum`` represents a finite or infinite series, with the first argument
48
+ being the general form of terms in the series, and the second argument
49
+ being ``(dummy_variable, start, end)``, with ``dummy_variable`` taking
50
+ all integer values from ``start`` through ``end``. In accordance with
51
+ long-standing mathematical convention, the end term is included in the
52
+ summation.
53
+
54
+ Finite sums
55
+ ===========
56
+
57
+ For finite sums (and sums with symbolic limits assumed to be finite) we
58
+ follow the summation convention described by Karr [1], especially
59
+ definition 3 of section 1.4. The sum:
60
+
61
+ .. math::
62
+
63
+ \sum_{m \leq i < n} f(i)
64
+
65
+ has *the obvious meaning* for `m < n`, namely:
66
+
67
+ .. math::
68
+
69
+ \sum_{m \leq i < n} f(i) = f(m) + f(m+1) + \ldots + f(n-2) + f(n-1)
70
+
71
+ with the upper limit value `f(n)` excluded. The sum over an empty set is
72
+ zero if and only if `m = n`:
73
+
74
+ .. math::
75
+
76
+ \sum_{m \leq i < n} f(i) = 0 \quad \mathrm{for} \quad m = n
77
+
78
+ Finally, for all other sums over empty sets we assume the following
79
+ definition:
80
+
81
+ .. math::
82
+
83
+ \sum_{m \leq i < n} f(i) = - \sum_{n \leq i < m} f(i) \quad \mathrm{for} \quad m > n
84
+
85
+ It is important to note that Karr defines all sums with the upper
86
+ limit being exclusive. This is in contrast to the usual mathematical notation,
87
+ but does not affect the summation convention. Indeed we have:
88
+
89
+ .. math::
90
+
91
+ \sum_{m \leq i < n} f(i) = \sum_{i = m}^{n - 1} f(i)
92
+
93
+ where the difference in notation is intentional to emphasize the meaning,
94
+ with limits typeset on the top being inclusive.
95
+
96
+ Examples
97
+ ========
98
+
99
+ >>> from sympy.abc import i, k, m, n, x
100
+ >>> from sympy import Sum, factorial, oo, IndexedBase, Function
101
+ >>> Sum(k, (k, 1, m))
102
+ Sum(k, (k, 1, m))
103
+ >>> Sum(k, (k, 1, m)).doit()
104
+ m**2/2 + m/2
105
+ >>> Sum(k**2, (k, 1, m))
106
+ Sum(k**2, (k, 1, m))
107
+ >>> Sum(k**2, (k, 1, m)).doit()
108
+ m**3/3 + m**2/2 + m/6
109
+ >>> Sum(x**k, (k, 0, oo))
110
+ Sum(x**k, (k, 0, oo))
111
+ >>> Sum(x**k, (k, 0, oo)).doit()
112
+ Piecewise((1/(1 - x), Abs(x) < 1), (Sum(x**k, (k, 0, oo)), True))
113
+ >>> Sum(x**k/factorial(k), (k, 0, oo)).doit()
114
+ exp(x)
115
+
116
+ Here are examples to do summation with symbolic indices. You
117
+ can use either Function of IndexedBase classes:
118
+
119
+ >>> f = Function('f')
120
+ >>> Sum(f(n), (n, 0, 3)).doit()
121
+ f(0) + f(1) + f(2) + f(3)
122
+ >>> Sum(f(n), (n, 0, oo)).doit()
123
+ Sum(f(n), (n, 0, oo))
124
+ >>> f = IndexedBase('f')
125
+ >>> Sum(f[n]**2, (n, 0, 3)).doit()
126
+ f[0]**2 + f[1]**2 + f[2]**2 + f[3]**2
127
+
128
+ An example showing that the symbolic result of a summation is still
129
+ valid for seemingly nonsensical values of the limits. Then the Karr
130
+ convention allows us to give a perfectly valid interpretation to
131
+ those sums by interchanging the limits according to the above rules:
132
+
133
+ >>> S = Sum(i, (i, 1, n)).doit()
134
+ >>> S
135
+ n**2/2 + n/2
136
+ >>> S.subs(n, -4)
137
+ 6
138
+ >>> Sum(i, (i, 1, -4)).doit()
139
+ 6
140
+ >>> Sum(-i, (i, -3, 0)).doit()
141
+ 6
142
+
143
+ An explicit example of the Karr summation convention:
144
+
145
+ >>> S1 = Sum(i**2, (i, m, m+n-1)).doit()
146
+ >>> S1
147
+ m**2*n + m*n**2 - m*n + n**3/3 - n**2/2 + n/6
148
+ >>> S2 = Sum(i**2, (i, m+n, m-1)).doit()
149
+ >>> S2
150
+ -m**2*n - m*n**2 + m*n - n**3/3 + n**2/2 - n/6
151
+ >>> S1 + S2
152
+ 0
153
+ >>> S3 = Sum(i, (i, m, m-1)).doit()
154
+ >>> S3
155
+ 0
156
+
157
+ See Also
158
+ ========
159
+
160
+ summation
161
+ Product, sympy.concrete.products.product
162
+
163
+ References
164
+ ==========
165
+
166
+ .. [1] Michael Karr, "Summation in Finite Terms", Journal of the ACM,
167
+ Volume 28 Issue 2, April 1981, Pages 305-350
168
+ https://dl.acm.org/doi/10.1145/322248.322255
169
+ .. [2] https://en.wikipedia.org/wiki/Summation#Capital-sigma_notation
170
+ .. [3] https://en.wikipedia.org/wiki/Empty_sum
171
+ """
172
+
173
+ __slots__ = ()
174
+
175
+ limits: tTuple[tTuple[Symbol, Expr, Expr]]
176
+
177
+ def __new__(cls, function, *symbols, **assumptions):
178
+ obj = AddWithLimits.__new__(cls, function, *symbols, **assumptions)
179
+ if not hasattr(obj, 'limits'):
180
+ return obj
181
+ if any(len(l) != 3 or None in l for l in obj.limits):
182
+ raise ValueError('Sum requires values for lower and upper bounds.')
183
+
184
+ return obj
185
+
186
+ def _eval_is_zero(self):
187
+ # a Sum is only zero if its function is zero or if all terms
188
+ # cancel out. This only answers whether the summand is zero; if
189
+ # not then None is returned since we don't analyze whether all
190
+ # terms cancel out.
191
+ if self.function.is_zero or self.has_empty_sequence:
192
+ return True
193
+
194
+ def _eval_is_extended_real(self):
195
+ if self.has_empty_sequence:
196
+ return True
197
+ return self.function.is_extended_real
198
+
199
+ def _eval_is_positive(self):
200
+ if self.has_finite_limits and self.has_reversed_limits is False:
201
+ return self.function.is_positive
202
+
203
+ def _eval_is_negative(self):
204
+ if self.has_finite_limits and self.has_reversed_limits is False:
205
+ return self.function.is_negative
206
+
207
+ def _eval_is_finite(self):
208
+ if self.has_finite_limits and self.function.is_finite:
209
+ return True
210
+
211
+ def doit(self, **hints):
212
+ if hints.get('deep', True):
213
+ f = self.function.doit(**hints)
214
+ else:
215
+ f = self.function
216
+
217
+ # first make sure any definite limits have summation
218
+ # variables with matching assumptions
219
+ reps = {}
220
+ for xab in self.limits:
221
+ d = _dummy_with_inherited_properties_concrete(xab)
222
+ if d:
223
+ reps[xab[0]] = d
224
+ if reps:
225
+ undo = {v: k for k, v in reps.items()}
226
+ did = self.xreplace(reps).doit(**hints)
227
+ if isinstance(did, tuple): # when separate=True
228
+ did = tuple([i.xreplace(undo) for i in did])
229
+ elif did is not None:
230
+ did = did.xreplace(undo)
231
+ else:
232
+ did = self
233
+ return did
234
+
235
+
236
+ if self.function.is_Matrix:
237
+ expanded = self.expand()
238
+ if self != expanded:
239
+ return expanded.doit()
240
+ return _eval_matrix_sum(self)
241
+
242
+ for n, limit in enumerate(self.limits):
243
+ i, a, b = limit
244
+ dif = b - a
245
+ if dif == -1:
246
+ # Any summation over an empty set is zero
247
+ return S.Zero
248
+ if dif.is_integer and dif.is_negative:
249
+ a, b = b + 1, a - 1
250
+ f = -f
251
+
252
+ newf = eval_sum(f, (i, a, b))
253
+ if newf is None:
254
+ if f == self.function:
255
+ zeta_function = self.eval_zeta_function(f, (i, a, b))
256
+ if zeta_function is not None:
257
+ return zeta_function
258
+ return self
259
+ else:
260
+ return self.func(f, *self.limits[n:])
261
+ f = newf
262
+
263
+ if hints.get('deep', True):
264
+ # eval_sum could return partially unevaluated
265
+ # result with Piecewise. In this case we won't
266
+ # doit() recursively.
267
+ if not isinstance(f, Piecewise):
268
+ return f.doit(**hints)
269
+
270
+ return f
271
+
272
+ def eval_zeta_function(self, f, limits):
273
+ """
274
+ Check whether the function matches with the zeta function.
275
+
276
+ If it matches, then return a `Piecewise` expression because
277
+ zeta function does not converge unless `s > 1` and `q > 0`
278
+ """
279
+ i, a, b = limits
280
+ w, y, z = Wild('w', exclude=[i]), Wild('y', exclude=[i]), Wild('z', exclude=[i])
281
+ result = f.match((w * i + y) ** (-z))
282
+ if result is not None and b is S.Infinity:
283
+ coeff = 1 / result[w] ** result[z]
284
+ s = result[z]
285
+ q = result[y] / result[w] + a
286
+ return Piecewise((coeff * zeta(s, q), And(q > 0, s > 1)), (self, True))
287
+
288
+ def _eval_derivative(self, x):
289
+ """
290
+ Differentiate wrt x as long as x is not in the free symbols of any of
291
+ the upper or lower limits.
292
+
293
+ Explanation
294
+ ===========
295
+
296
+ Sum(a*b*x, (x, 1, a)) can be differentiated wrt x or b but not `a`
297
+ since the value of the sum is discontinuous in `a`. In a case
298
+ involving a limit variable, the unevaluated derivative is returned.
299
+ """
300
+
301
+ # diff already confirmed that x is in the free symbols of self, but we
302
+ # don't want to differentiate wrt any free symbol in the upper or lower
303
+ # limits
304
+ # XXX remove this test for free_symbols when the default _eval_derivative is in
305
+ if isinstance(x, Symbol) and x not in self.free_symbols:
306
+ return S.Zero
307
+
308
+ # get limits and the function
309
+ f, limits = self.function, list(self.limits)
310
+
311
+ limit = limits.pop(-1)
312
+
313
+ if limits: # f is the argument to a Sum
314
+ f = self.func(f, *limits)
315
+
316
+ _, a, b = limit
317
+ if x in a.free_symbols or x in b.free_symbols:
318
+ return None
319
+ df = Derivative(f, x, evaluate=True)
320
+ rv = self.func(df, limit)
321
+ return rv
322
+
323
+ def _eval_difference_delta(self, n, step):
324
+ k, _, upper = self.args[-1]
325
+ new_upper = upper.subs(n, n + step)
326
+
327
+ if len(self.args) == 2:
328
+ f = self.args[0]
329
+ else:
330
+ f = self.func(*self.args[:-1])
331
+
332
+ return Sum(f, (k, upper + 1, new_upper)).doit()
333
+
334
+ def _eval_simplify(self, **kwargs):
335
+
336
+ function = self.function
337
+
338
+ if kwargs.get('deep', True):
339
+ function = function.simplify(**kwargs)
340
+
341
+ # split the function into adds
342
+ terms = Add.make_args(expand(function))
343
+ s_t = [] # Sum Terms
344
+ o_t = [] # Other Terms
345
+
346
+ for term in terms:
347
+ if term.has(Sum):
348
+ # if there is an embedded sum here
349
+ # it is of the form x * (Sum(whatever))
350
+ # hence we make a Mul out of it, and simplify all interior sum terms
351
+ subterms = Mul.make_args(expand(term))
352
+ out_terms = []
353
+ for subterm in subterms:
354
+ # go through each term
355
+ if isinstance(subterm, Sum):
356
+ # if it's a sum, simplify it
357
+ out_terms.append(subterm._eval_simplify(**kwargs))
358
+ else:
359
+ # otherwise, add it as is
360
+ out_terms.append(subterm)
361
+
362
+ # turn it back into a Mul
363
+ s_t.append(Mul(*out_terms))
364
+ else:
365
+ o_t.append(term)
366
+
367
+ # next try to combine any interior sums for further simplification
368
+ from sympy.simplify.simplify import factor_sum, sum_combine
369
+ result = Add(sum_combine(s_t), *o_t)
370
+
371
+ return factor_sum(result, limits=self.limits)
372
+
373
+ def is_convergent(self):
374
+ r"""
375
+ Checks for the convergence of a Sum.
376
+
377
+ Explanation
378
+ ===========
379
+
380
+ We divide the study of convergence of infinite sums and products in
381
+ two parts.
382
+
383
+ First Part:
384
+ One part is the question whether all the terms are well defined, i.e.,
385
+ they are finite in a sum and also non-zero in a product. Zero
386
+ is the analogy of (minus) infinity in products as
387
+ :math:`e^{-\infty} = 0`.
388
+
389
+ Second Part:
390
+ The second part is the question of convergence after infinities,
391
+ and zeros in products, have been omitted assuming that their number
392
+ is finite. This means that we only consider the tail of the sum or
393
+ product, starting from some point after which all terms are well
394
+ defined.
395
+
396
+ For example, in a sum of the form:
397
+
398
+ .. math::
399
+
400
+ \sum_{1 \leq i < \infty} \frac{1}{n^2 + an + b}
401
+
402
+ where a and b are numbers. The routine will return true, even if there
403
+ are infinities in the term sequence (at most two). An analogous
404
+ product would be:
405
+
406
+ .. math::
407
+
408
+ \prod_{1 \leq i < \infty} e^{\frac{1}{n^2 + an + b}}
409
+
410
+ This is how convergence is interpreted. It is concerned with what
411
+ happens at the limit. Finding the bad terms is another independent
412
+ matter.
413
+
414
+ Note: It is responsibility of user to see that the sum or product
415
+ is well defined.
416
+
417
+ There are various tests employed to check the convergence like
418
+ divergence test, root test, integral test, alternating series test,
419
+ comparison tests, Dirichlet tests. It returns true if Sum is convergent
420
+ and false if divergent and NotImplementedError if it cannot be checked.
421
+
422
+ References
423
+ ==========
424
+
425
+ .. [1] https://en.wikipedia.org/wiki/Convergence_tests
426
+
427
+ Examples
428
+ ========
429
+
430
+ >>> from sympy import factorial, S, Sum, Symbol, oo
431
+ >>> n = Symbol('n', integer=True)
432
+ >>> Sum(n/(n - 1), (n, 4, 7)).is_convergent()
433
+ True
434
+ >>> Sum(n/(2*n + 1), (n, 1, oo)).is_convergent()
435
+ False
436
+ >>> Sum(factorial(n)/5**n, (n, 1, oo)).is_convergent()
437
+ False
438
+ >>> Sum(1/n**(S(6)/5), (n, 1, oo)).is_convergent()
439
+ True
440
+
441
+ See Also
442
+ ========
443
+
444
+ Sum.is_absolutely_convergent
445
+ sympy.concrete.products.Product.is_convergent
446
+ """
447
+ p, q, r = symbols('p q r', cls=Wild)
448
+
449
+ sym = self.limits[0][0]
450
+ lower_limit = self.limits[0][1]
451
+ upper_limit = self.limits[0][2]
452
+ sequence_term = self.function.simplify()
453
+
454
+ if len(sequence_term.free_symbols) > 1:
455
+ raise NotImplementedError("convergence checking for more than one symbol "
456
+ "containing series is not handled")
457
+
458
+ if lower_limit.is_finite and upper_limit.is_finite:
459
+ return S.true
460
+
461
+ # transform sym -> -sym and swap the upper_limit = S.Infinity
462
+ # and lower_limit = - upper_limit
463
+ if lower_limit is S.NegativeInfinity:
464
+ if upper_limit is S.Infinity:
465
+ return Sum(sequence_term, (sym, 0, S.Infinity)).is_convergent() and \
466
+ Sum(sequence_term, (sym, S.NegativeInfinity, 0)).is_convergent()
467
+ from sympy.simplify.simplify import simplify
468
+ sequence_term = simplify(sequence_term.xreplace({sym: -sym}))
469
+ lower_limit = -upper_limit
470
+ upper_limit = S.Infinity
471
+
472
+ sym_ = Dummy(sym.name, integer=True, positive=True)
473
+ sequence_term = sequence_term.xreplace({sym: sym_})
474
+ sym = sym_
475
+
476
+ interval = Interval(lower_limit, upper_limit)
477
+
478
+ # Piecewise function handle
479
+ if sequence_term.is_Piecewise:
480
+ for func, cond in sequence_term.args:
481
+ # see if it represents something going to oo
482
+ if cond == True or cond.as_set().sup is S.Infinity:
483
+ s = Sum(func, (sym, lower_limit, upper_limit))
484
+ return s.is_convergent()
485
+ return S.true
486
+
487
+ ### -------- Divergence test ----------- ###
488
+ try:
489
+ lim_val = limit_seq(sequence_term, sym)
490
+ if lim_val is not None and lim_val.is_zero is False:
491
+ return S.false
492
+ except NotImplementedError:
493
+ pass
494
+
495
+ try:
496
+ lim_val_abs = limit_seq(abs(sequence_term), sym)
497
+ if lim_val_abs is not None and lim_val_abs.is_zero is False:
498
+ return S.false
499
+ except NotImplementedError:
500
+ pass
501
+
502
+ order = O(sequence_term, (sym, S.Infinity))
503
+
504
+ ### --------- p-series test (1/n**p) ---------- ###
505
+ p_series_test = order.expr.match(sym**p)
506
+ if p_series_test is not None:
507
+ if p_series_test[p] < -1:
508
+ return S.true
509
+ if p_series_test[p] >= -1:
510
+ return S.false
511
+
512
+ ### ------------- comparison test ------------- ###
513
+ # 1/(n**p*log(n)**q*log(log(n))**r) comparison
514
+ n_log_test = (order.expr.match(1/(sym**p*log(1/sym)**q*log(-log(1/sym))**r)) or
515
+ order.expr.match(1/(sym**p*(-log(1/sym))**q*log(-log(1/sym))**r)))
516
+ if n_log_test is not None:
517
+ if (n_log_test[p] > 1 or
518
+ (n_log_test[p] == 1 and n_log_test[q] > 1) or
519
+ (n_log_test[p] == n_log_test[q] == 1 and n_log_test[r] > 1)):
520
+ return S.true
521
+ return S.false
522
+
523
+ ### ------------- Limit comparison test -----------###
524
+ # (1/n) comparison
525
+ try:
526
+ lim_comp = limit_seq(sym*sequence_term, sym)
527
+ if lim_comp is not None and lim_comp.is_number and lim_comp > 0:
528
+ return S.false
529
+ except NotImplementedError:
530
+ pass
531
+
532
+ ### ----------- ratio test ---------------- ###
533
+ next_sequence_term = sequence_term.xreplace({sym: sym + 1})
534
+ from sympy.simplify.combsimp import combsimp
535
+ from sympy.simplify.powsimp import powsimp
536
+ ratio = combsimp(powsimp(next_sequence_term/sequence_term))
537
+ try:
538
+ lim_ratio = limit_seq(ratio, sym)
539
+ if lim_ratio is not None and lim_ratio.is_number:
540
+ if abs(lim_ratio) > 1:
541
+ return S.false
542
+ if abs(lim_ratio) < 1:
543
+ return S.true
544
+ except NotImplementedError:
545
+ lim_ratio = None
546
+
547
+ ### ---------- Raabe's test -------------- ###
548
+ if lim_ratio == 1: # ratio test inconclusive
549
+ test_val = sym*(sequence_term/
550
+ sequence_term.subs(sym, sym + 1) - 1)
551
+ test_val = test_val.gammasimp()
552
+ try:
553
+ lim_val = limit_seq(test_val, sym)
554
+ if lim_val is not None and lim_val.is_number:
555
+ if lim_val > 1:
556
+ return S.true
557
+ if lim_val < 1:
558
+ return S.false
559
+ except NotImplementedError:
560
+ pass
561
+
562
+ ### ----------- root test ---------------- ###
563
+ # lim = Limit(abs(sequence_term)**(1/sym), sym, S.Infinity)
564
+ try:
565
+ lim_evaluated = limit_seq(abs(sequence_term)**(1/sym), sym)
566
+ if lim_evaluated is not None and lim_evaluated.is_number:
567
+ if lim_evaluated < 1:
568
+ return S.true
569
+ if lim_evaluated > 1:
570
+ return S.false
571
+ except NotImplementedError:
572
+ pass
573
+
574
+ ### ------------- alternating series test ----------- ###
575
+ dict_val = sequence_term.match(S.NegativeOne**(sym + p)*q)
576
+ if not dict_val[p].has(sym) and is_decreasing(dict_val[q], interval):
577
+ return S.true
578
+
579
+ ### ------------- integral test -------------- ###
580
+ check_interval = None
581
+ from sympy.solvers.solveset import solveset
582
+ maxima = solveset(sequence_term.diff(sym), sym, interval)
583
+ if not maxima:
584
+ check_interval = interval
585
+ elif isinstance(maxima, FiniteSet) and maxima.sup.is_number:
586
+ check_interval = Interval(maxima.sup, interval.sup)
587
+ if (check_interval is not None and
588
+ (is_decreasing(sequence_term, check_interval) or
589
+ is_decreasing(-sequence_term, check_interval))):
590
+ integral_val = Integral(
591
+ sequence_term, (sym, lower_limit, upper_limit))
592
+ try:
593
+ integral_val_evaluated = integral_val.doit()
594
+ if integral_val_evaluated.is_number:
595
+ return S(integral_val_evaluated.is_finite)
596
+ except NotImplementedError:
597
+ pass
598
+
599
+ ### ----- Dirichlet and bounded times convergent tests ----- ###
600
+ # TODO
601
+ #
602
+ # Dirichlet_test
603
+ # https://en.wikipedia.org/wiki/Dirichlet%27s_test
604
+ #
605
+ # Bounded times convergent test
606
+ # It is based on comparison theorems for series.
607
+ # In particular, if the general term of a series can
608
+ # be written as a product of two terms a_n and b_n
609
+ # and if a_n is bounded and if Sum(b_n) is absolutely
610
+ # convergent, then the original series Sum(a_n * b_n)
611
+ # is absolutely convergent and so convergent.
612
+ #
613
+ # The following code can grows like 2**n where n is the
614
+ # number of args in order.expr
615
+ # Possibly combined with the potentially slow checks
616
+ # inside the loop, could make this test extremely slow
617
+ # for larger summation expressions.
618
+
619
+ if order.expr.is_Mul:
620
+ args = order.expr.args
621
+ argset = set(args)
622
+
623
+ ### -------------- Dirichlet tests -------------- ###
624
+ m = Dummy('m', integer=True)
625
+ def _dirichlet_test(g_n):
626
+ try:
627
+ ing_val = limit_seq(Sum(g_n, (sym, interval.inf, m)).doit(), m)
628
+ if ing_val is not None and ing_val.is_finite:
629
+ return S.true
630
+ except NotImplementedError:
631
+ pass
632
+
633
+ ### -------- bounded times convergent test ---------###
634
+ def _bounded_convergent_test(g1_n, g2_n):
635
+ try:
636
+ lim_val = limit_seq(g1_n, sym)
637
+ if lim_val is not None and (lim_val.is_finite or (
638
+ isinstance(lim_val, AccumulationBounds)
639
+ and (lim_val.max - lim_val.min).is_finite)):
640
+ if Sum(g2_n, (sym, lower_limit, upper_limit)).is_absolutely_convergent():
641
+ return S.true
642
+ except NotImplementedError:
643
+ pass
644
+
645
+ for n in range(1, len(argset)):
646
+ for a_tuple in itertools.combinations(args, n):
647
+ b_set = argset - set(a_tuple)
648
+ a_n = Mul(*a_tuple)
649
+ b_n = Mul(*b_set)
650
+
651
+ if is_decreasing(a_n, interval):
652
+ dirich = _dirichlet_test(b_n)
653
+ if dirich is not None:
654
+ return dirich
655
+
656
+ bc_test = _bounded_convergent_test(a_n, b_n)
657
+ if bc_test is not None:
658
+ return bc_test
659
+
660
+ _sym = self.limits[0][0]
661
+ sequence_term = sequence_term.xreplace({sym: _sym})
662
+ raise NotImplementedError("The algorithm to find the Sum convergence of %s "
663
+ "is not yet implemented" % (sequence_term))
664
+
665
+ def is_absolutely_convergent(self):
666
+ """
667
+ Checks for the absolute convergence of an infinite series.
668
+
669
+ Same as checking convergence of absolute value of sequence_term of
670
+ an infinite series.
671
+
672
+ References
673
+ ==========
674
+
675
+ .. [1] https://en.wikipedia.org/wiki/Absolute_convergence
676
+
677
+ Examples
678
+ ========
679
+
680
+ >>> from sympy import Sum, Symbol, oo
681
+ >>> n = Symbol('n', integer=True)
682
+ >>> Sum((-1)**n, (n, 1, oo)).is_absolutely_convergent()
683
+ False
684
+ >>> Sum((-1)**n/n**2, (n, 1, oo)).is_absolutely_convergent()
685
+ True
686
+
687
+ See Also
688
+ ========
689
+
690
+ Sum.is_convergent
691
+ """
692
+ return Sum(abs(self.function), self.limits).is_convergent()
693
+
694
+ def euler_maclaurin(self, m=0, n=0, eps=0, eval_integral=True):
695
+ """
696
+ Return an Euler-Maclaurin approximation of self, where m is the
697
+ number of leading terms to sum directly and n is the number of
698
+ terms in the tail.
699
+
700
+ With m = n = 0, this is simply the corresponding integral
701
+ plus a first-order endpoint correction.
702
+
703
+ Returns (s, e) where s is the Euler-Maclaurin approximation
704
+ and e is the estimated error (taken to be the magnitude of
705
+ the first omitted term in the tail):
706
+
707
+ >>> from sympy.abc import k, a, b
708
+ >>> from sympy import Sum
709
+ >>> Sum(1/k, (k, 2, 5)).doit().evalf()
710
+ 1.28333333333333
711
+ >>> s, e = Sum(1/k, (k, 2, 5)).euler_maclaurin()
712
+ >>> s
713
+ -log(2) + 7/20 + log(5)
714
+ >>> from sympy import sstr
715
+ >>> print(sstr((s.evalf(), e.evalf()), full_prec=True))
716
+ (1.26629073187415, 0.0175000000000000)
717
+
718
+ The endpoints may be symbolic:
719
+
720
+ >>> s, e = Sum(1/k, (k, a, b)).euler_maclaurin()
721
+ >>> s
722
+ -log(a) + log(b) + 1/(2*b) + 1/(2*a)
723
+ >>> e
724
+ Abs(1/(12*b**2) - 1/(12*a**2))
725
+
726
+ If the function is a polynomial of degree at most 2n+1, the
727
+ Euler-Maclaurin formula becomes exact (and e = 0 is returned):
728
+
729
+ >>> Sum(k, (k, 2, b)).euler_maclaurin()
730
+ (b**2/2 + b/2 - 1, 0)
731
+ >>> Sum(k, (k, 2, b)).doit()
732
+ b**2/2 + b/2 - 1
733
+
734
+ With a nonzero eps specified, the summation is ended
735
+ as soon as the remainder term is less than the epsilon.
736
+ """
737
+ m = int(m)
738
+ n = int(n)
739
+ f = self.function
740
+ if len(self.limits) != 1:
741
+ raise ValueError("More than 1 limit")
742
+ i, a, b = self.limits[0]
743
+ if (a > b) == True:
744
+ if a - b == 1:
745
+ return S.Zero, S.Zero
746
+ a, b = b + 1, a - 1
747
+ f = -f
748
+ s = S.Zero
749
+ if m:
750
+ if b.is_Integer and a.is_Integer:
751
+ m = min(m, b - a + 1)
752
+ if not eps or f.is_polynomial(i):
753
+ s = Add(*[f.subs(i, a + k) for k in range(m)])
754
+ else:
755
+ term = f.subs(i, a)
756
+ if term:
757
+ test = abs(term.evalf(3)) < eps
758
+ if test == True:
759
+ return s, abs(term)
760
+ elif not (test == False):
761
+ # a symbolic Relational class, can't go further
762
+ return term, S.Zero
763
+ s = term
764
+ for k in range(1, m):
765
+ term = f.subs(i, a + k)
766
+ if abs(term.evalf(3)) < eps and term != 0:
767
+ return s, abs(term)
768
+ s += term
769
+ if b - a + 1 == m:
770
+ return s, S.Zero
771
+ a += m
772
+ x = Dummy('x')
773
+ I = Integral(f.subs(i, x), (x, a, b))
774
+ if eval_integral:
775
+ I = I.doit()
776
+ s += I
777
+
778
+ def fpoint(expr):
779
+ if b is S.Infinity:
780
+ return expr.subs(i, a), 0
781
+ return expr.subs(i, a), expr.subs(i, b)
782
+ fa, fb = fpoint(f)
783
+ iterm = (fa + fb)/2
784
+ g = f.diff(i)
785
+ for k in range(1, n + 2):
786
+ ga, gb = fpoint(g)
787
+ term = bernoulli(2*k)/factorial(2*k)*(gb - ga)
788
+ if k > n:
789
+ break
790
+ if eps and term:
791
+ term_evalf = term.evalf(3)
792
+ if term_evalf is S.NaN:
793
+ return S.NaN, S.NaN
794
+ if abs(term_evalf) < eps:
795
+ break
796
+ s += term
797
+ g = g.diff(i, 2, simplify=False)
798
+ return s + iterm, abs(term)
799
+
800
+
801
+ def reverse_order(self, *indices):
802
+ """
803
+ Reverse the order of a limit in a Sum.
804
+
805
+ Explanation
806
+ ===========
807
+
808
+ ``reverse_order(self, *indices)`` reverses some limits in the expression
809
+ ``self`` which can be either a ``Sum`` or a ``Product``. The selectors in
810
+ the argument ``indices`` specify some indices whose limits get reversed.
811
+ These selectors are either variable names or numerical indices counted
812
+ starting from the inner-most limit tuple.
813
+
814
+ Examples
815
+ ========
816
+
817
+ >>> from sympy import Sum
818
+ >>> from sympy.abc import x, y, a, b, c, d
819
+
820
+ >>> Sum(x, (x, 0, 3)).reverse_order(x)
821
+ Sum(-x, (x, 4, -1))
822
+ >>> Sum(x*y, (x, 1, 5), (y, 0, 6)).reverse_order(x, y)
823
+ Sum(x*y, (x, 6, 0), (y, 7, -1))
824
+ >>> Sum(x, (x, a, b)).reverse_order(x)
825
+ Sum(-x, (x, b + 1, a - 1))
826
+ >>> Sum(x, (x, a, b)).reverse_order(0)
827
+ Sum(-x, (x, b + 1, a - 1))
828
+
829
+ While one should prefer variable names when specifying which limits
830
+ to reverse, the index counting notation comes in handy in case there
831
+ are several symbols with the same name.
832
+
833
+ >>> S = Sum(x**2, (x, a, b), (x, c, d))
834
+ >>> S
835
+ Sum(x**2, (x, a, b), (x, c, d))
836
+ >>> S0 = S.reverse_order(0)
837
+ >>> S0
838
+ Sum(-x**2, (x, b + 1, a - 1), (x, c, d))
839
+ >>> S1 = S0.reverse_order(1)
840
+ >>> S1
841
+ Sum(x**2, (x, b + 1, a - 1), (x, d + 1, c - 1))
842
+
843
+ Of course we can mix both notations:
844
+
845
+ >>> Sum(x*y, (x, a, b), (y, 2, 5)).reverse_order(x, 1)
846
+ Sum(x*y, (x, b + 1, a - 1), (y, 6, 1))
847
+ >>> Sum(x*y, (x, a, b), (y, 2, 5)).reverse_order(y, x)
848
+ Sum(x*y, (x, b + 1, a - 1), (y, 6, 1))
849
+
850
+ See Also
851
+ ========
852
+
853
+ sympy.concrete.expr_with_intlimits.ExprWithIntLimits.index, reorder_limit,
854
+ sympy.concrete.expr_with_intlimits.ExprWithIntLimits.reorder
855
+
856
+ References
857
+ ==========
858
+
859
+ .. [1] Michael Karr, "Summation in Finite Terms", Journal of the ACM,
860
+ Volume 28 Issue 2, April 1981, Pages 305-350
861
+ https://dl.acm.org/doi/10.1145/322248.322255
862
+ """
863
+ l_indices = list(indices)
864
+
865
+ for i, indx in enumerate(l_indices):
866
+ if not isinstance(indx, int):
867
+ l_indices[i] = self.index(indx)
868
+
869
+ e = 1
870
+ limits = []
871
+ for i, limit in enumerate(self.limits):
872
+ l = limit
873
+ if i in l_indices:
874
+ e = -e
875
+ l = (limit[0], limit[2] + 1, limit[1] - 1)
876
+ limits.append(l)
877
+
878
+ return Sum(e * self.function, *limits)
879
+
880
+ def _eval_rewrite_as_Product(self, *args, **kwargs):
881
+ from sympy.concrete.products import Product
882
+ if self.function.is_extended_real:
883
+ return log(Product(exp(self.function), *self.limits))
884
+
885
+
886
+ def summation(f, *symbols, **kwargs):
887
+ r"""
888
+ Compute the summation of f with respect to symbols.
889
+
890
+ Explanation
891
+ ===========
892
+
893
+ The notation for symbols is similar to the notation used in Integral.
894
+ summation(f, (i, a, b)) computes the sum of f with respect to i from a to b,
895
+ i.e.,
896
+
897
+ ::
898
+
899
+ b
900
+ ____
901
+ \ `
902
+ summation(f, (i, a, b)) = ) f
903
+ /___,
904
+ i = a
905
+
906
+ If it cannot compute the sum, it returns an unevaluated Sum object.
907
+ Repeated sums can be computed by introducing additional symbols tuples::
908
+
909
+ Examples
910
+ ========
911
+
912
+ >>> from sympy import summation, oo, symbols, log
913
+ >>> i, n, m = symbols('i n m', integer=True)
914
+
915
+ >>> summation(2*i - 1, (i, 1, n))
916
+ n**2
917
+ >>> summation(1/2**i, (i, 0, oo))
918
+ 2
919
+ >>> summation(1/log(n)**n, (n, 2, oo))
920
+ Sum(log(n)**(-n), (n, 2, oo))
921
+ >>> summation(i, (i, 0, n), (n, 0, m))
922
+ m**3/6 + m**2/2 + m/3
923
+
924
+ >>> from sympy.abc import x
925
+ >>> from sympy import factorial
926
+ >>> summation(x**n/factorial(n), (n, 0, oo))
927
+ exp(x)
928
+
929
+ See Also
930
+ ========
931
+
932
+ Sum
933
+ Product, sympy.concrete.products.product
934
+
935
+ """
936
+ return Sum(f, *symbols, **kwargs).doit(deep=False)
937
+
938
+
939
+ def telescopic_direct(L, R, n, limits):
940
+ """
941
+ Returns the direct summation of the terms of a telescopic sum
942
+
943
+ Explanation
944
+ ===========
945
+
946
+ L is the term with lower index
947
+ R is the term with higher index
948
+ n difference between the indexes of L and R
949
+
950
+ Examples
951
+ ========
952
+
953
+ >>> from sympy.concrete.summations import telescopic_direct
954
+ >>> from sympy.abc import k, a, b
955
+ >>> telescopic_direct(1/k, -1/(k+2), 2, (k, a, b))
956
+ -1/(b + 2) - 1/(b + 1) + 1/(a + 1) + 1/a
957
+
958
+ """
959
+ (i, a, b) = limits
960
+ return Add(*[L.subs(i, a + m) + R.subs(i, b - m) for m in range(n)])
961
+
962
+
963
+ def telescopic(L, R, limits):
964
+ '''
965
+ Tries to perform the summation using the telescopic property.
966
+
967
+ Return None if not possible.
968
+ '''
969
+ (i, a, b) = limits
970
+ if L.is_Add or R.is_Add:
971
+ return None
972
+
973
+ # We want to solve(L.subs(i, i + m) + R, m)
974
+ # First we try a simple match since this does things that
975
+ # solve doesn't do, e.g. solve(cos(k+m)-cos(k), m) gives
976
+ # a more complicated solution than m == 0.
977
+
978
+ k = Wild("k")
979
+ sol = (-R).match(L.subs(i, i + k))
980
+ s = None
981
+ if sol and k in sol:
982
+ s = sol[k]
983
+ if not (s.is_Integer and L.subs(i, i + s) + R == 0):
984
+ # invalid match or match didn't work
985
+ s = None
986
+
987
+ # But there are things that match doesn't do that solve
988
+ # can do, e.g. determine that 1/(x + m) = 1/(1 - x) when m = 1
989
+
990
+ if s is None:
991
+ m = Dummy('m')
992
+ try:
993
+ from sympy.solvers.solvers import solve
994
+ sol = solve(L.subs(i, i + m) + R, m) or []
995
+ except NotImplementedError:
996
+ return None
997
+ sol = [si for si in sol if si.is_Integer and
998
+ (L.subs(i, i + si) + R).expand().is_zero]
999
+ if len(sol) != 1:
1000
+ return None
1001
+ s = sol[0]
1002
+
1003
+ if s < 0:
1004
+ return telescopic_direct(R, L, abs(s), (i, a, b))
1005
+ elif s > 0:
1006
+ return telescopic_direct(L, R, s, (i, a, b))
1007
+
1008
+
1009
+ def eval_sum(f, limits):
1010
+ (i, a, b) = limits
1011
+ if f.is_zero:
1012
+ return S.Zero
1013
+ if i not in f.free_symbols:
1014
+ return f*(b - a + 1)
1015
+ if a == b:
1016
+ return f.subs(i, a)
1017
+ if isinstance(f, Piecewise):
1018
+ if not any(i in arg.args[1].free_symbols for arg in f.args):
1019
+ # Piecewise conditions do not depend on the dummy summation variable,
1020
+ # therefore we can fold: Sum(Piecewise((e, c), ...), limits)
1021
+ # --> Piecewise((Sum(e, limits), c), ...)
1022
+ newargs = []
1023
+ for arg in f.args:
1024
+ newexpr = eval_sum(arg.expr, limits)
1025
+ if newexpr is None:
1026
+ return None
1027
+ newargs.append((newexpr, arg.cond))
1028
+ return f.func(*newargs)
1029
+
1030
+ if f.has(KroneckerDelta):
1031
+ from .delta import deltasummation, _has_simple_delta
1032
+ f = f.replace(
1033
+ lambda x: isinstance(x, Sum),
1034
+ lambda x: x.factor()
1035
+ )
1036
+ if _has_simple_delta(f, limits[0]):
1037
+ return deltasummation(f, limits)
1038
+
1039
+ dif = b - a
1040
+ definite = dif.is_Integer
1041
+ # Doing it directly may be faster if there are very few terms.
1042
+ if definite and (dif < 100):
1043
+ return eval_sum_direct(f, (i, a, b))
1044
+ if isinstance(f, Piecewise):
1045
+ return None
1046
+ # Try to do it symbolically. Even when the number of terms is
1047
+ # known, this can save time when b-a is big.
1048
+ value = eval_sum_symbolic(f.expand(), (i, a, b))
1049
+ if value is not None:
1050
+ return value
1051
+ # Do it directly
1052
+ if definite:
1053
+ return eval_sum_direct(f, (i, a, b))
1054
+
1055
+
1056
+ def eval_sum_direct(expr, limits):
1057
+ """
1058
+ Evaluate expression directly, but perform some simple checks first
1059
+ to possibly result in a smaller expression and faster execution.
1060
+ """
1061
+ (i, a, b) = limits
1062
+
1063
+ dif = b - a
1064
+ # Linearity
1065
+ if expr.is_Mul:
1066
+ # Try factor out everything not including i
1067
+ without_i, with_i = expr.as_independent(i)
1068
+ if without_i != 1:
1069
+ s = eval_sum_direct(with_i, (i, a, b))
1070
+ if s:
1071
+ r = without_i*s
1072
+ if r is not S.NaN:
1073
+ return r
1074
+ else:
1075
+ # Try term by term
1076
+ L, R = expr.as_two_terms()
1077
+
1078
+ if not L.has(i):
1079
+ sR = eval_sum_direct(R, (i, a, b))
1080
+ if sR:
1081
+ return L*sR
1082
+
1083
+ if not R.has(i):
1084
+ sL = eval_sum_direct(L, (i, a, b))
1085
+ if sL:
1086
+ return sL*R
1087
+
1088
+ # do this whether its an Add or Mul
1089
+ # e.g. apart(1/(25*i**2 + 45*i + 14)) and
1090
+ # apart(1/((5*i + 2)*(5*i + 7))) ->
1091
+ # -1/(5*(5*i + 7)) + 1/(5*(5*i + 2))
1092
+ try:
1093
+ expr = apart(expr, i) # see if it becomes an Add
1094
+ except PolynomialError:
1095
+ pass
1096
+
1097
+ if expr.is_Add:
1098
+ # Try factor out everything not including i
1099
+ without_i, with_i = expr.as_independent(i)
1100
+ if without_i != 0:
1101
+ s = eval_sum_direct(with_i, (i, a, b))
1102
+ if s:
1103
+ r = without_i*(dif + 1) + s
1104
+ if r is not S.NaN:
1105
+ return r
1106
+ else:
1107
+ # Try term by term
1108
+ L, R = expr.as_two_terms()
1109
+ lsum = eval_sum_direct(L, (i, a, b))
1110
+ rsum = eval_sum_direct(R, (i, a, b))
1111
+
1112
+ if None not in (lsum, rsum):
1113
+ r = lsum + rsum
1114
+ if r is not S.NaN:
1115
+ return r
1116
+
1117
+ return Add(*[expr.subs(i, a + j) for j in range(dif + 1)])
1118
+
1119
+
1120
+ def eval_sum_symbolic(f, limits):
1121
+ f_orig = f
1122
+ (i, a, b) = limits
1123
+ if not f.has(i):
1124
+ return f*(b - a + 1)
1125
+
1126
+ # Linearity
1127
+ if f.is_Mul:
1128
+ # Try factor out everything not including i
1129
+ without_i, with_i = f.as_independent(i)
1130
+ if without_i != 1:
1131
+ s = eval_sum_symbolic(with_i, (i, a, b))
1132
+ if s:
1133
+ r = without_i*s
1134
+ if r is not S.NaN:
1135
+ return r
1136
+ else:
1137
+ # Try term by term
1138
+ L, R = f.as_two_terms()
1139
+
1140
+ if not L.has(i):
1141
+ sR = eval_sum_symbolic(R, (i, a, b))
1142
+ if sR:
1143
+ return L*sR
1144
+
1145
+ if not R.has(i):
1146
+ sL = eval_sum_symbolic(L, (i, a, b))
1147
+ if sL:
1148
+ return sL*R
1149
+
1150
+ # do this whether its an Add or Mul
1151
+ # e.g. apart(1/(25*i**2 + 45*i + 14)) and
1152
+ # apart(1/((5*i + 2)*(5*i + 7))) ->
1153
+ # -1/(5*(5*i + 7)) + 1/(5*(5*i + 2))
1154
+ try:
1155
+ f = apart(f, i)
1156
+ except PolynomialError:
1157
+ pass
1158
+
1159
+ if f.is_Add:
1160
+ L, R = f.as_two_terms()
1161
+ lrsum = telescopic(L, R, (i, a, b))
1162
+
1163
+ if lrsum:
1164
+ return lrsum
1165
+
1166
+ # Try factor out everything not including i
1167
+ without_i, with_i = f.as_independent(i)
1168
+ if without_i != 0:
1169
+ s = eval_sum_symbolic(with_i, (i, a, b))
1170
+ if s:
1171
+ r = without_i*(b - a + 1) + s
1172
+ if r is not S.NaN:
1173
+ return r
1174
+ else:
1175
+ # Try term by term
1176
+ lsum = eval_sum_symbolic(L, (i, a, b))
1177
+ rsum = eval_sum_symbolic(R, (i, a, b))
1178
+
1179
+ if None not in (lsum, rsum):
1180
+ r = lsum + rsum
1181
+ if r is not S.NaN:
1182
+ return r
1183
+
1184
+
1185
+ # Polynomial terms with Faulhaber's formula
1186
+ n = Wild('n')
1187
+ result = f.match(i**n)
1188
+
1189
+ if result is not None:
1190
+ n = result[n]
1191
+
1192
+ if n.is_Integer:
1193
+ if n >= 0:
1194
+ if (b is S.Infinity and a is not S.NegativeInfinity) or \
1195
+ (a is S.NegativeInfinity and b is not S.Infinity):
1196
+ return S.Infinity
1197
+ return ((bernoulli(n + 1, b + 1) - bernoulli(n + 1, a))/(n + 1)).expand()
1198
+ elif a.is_Integer and a >= 1:
1199
+ if n == -1:
1200
+ return harmonic(b) - harmonic(a - 1)
1201
+ else:
1202
+ return harmonic(b, abs(n)) - harmonic(a - 1, abs(n))
1203
+
1204
+ if not (a.has(S.Infinity, S.NegativeInfinity) or
1205
+ b.has(S.Infinity, S.NegativeInfinity)):
1206
+ # Geometric terms
1207
+ c1 = Wild('c1', exclude=[i])
1208
+ c2 = Wild('c2', exclude=[i])
1209
+ c3 = Wild('c3', exclude=[i])
1210
+ wexp = Wild('wexp')
1211
+
1212
+ # Here we first attempt powsimp on f for easier matching with the
1213
+ # exponential pattern, and attempt expansion on the exponent for easier
1214
+ # matching with the linear pattern.
1215
+ e = f.powsimp().match(c1 ** wexp)
1216
+ if e is not None:
1217
+ e_exp = e.pop(wexp).expand().match(c2*i + c3)
1218
+ if e_exp is not None:
1219
+ e.update(e_exp)
1220
+
1221
+ p = (c1**c3).subs(e)
1222
+ q = (c1**c2).subs(e)
1223
+ r = p*(q**a - q**(b + 1))/(1 - q)
1224
+ l = p*(b - a + 1)
1225
+ return Piecewise((l, Eq(q, S.One)), (r, True))
1226
+
1227
+ r = gosper_sum(f, (i, a, b))
1228
+
1229
+ if isinstance(r, (Mul,Add)):
1230
+ from sympy.simplify.radsimp import denom
1231
+ from sympy.solvers.solvers import solve
1232
+ non_limit = r.free_symbols - Tuple(*limits[1:]).free_symbols
1233
+ den = denom(together(r))
1234
+ den_sym = non_limit & den.free_symbols
1235
+ args = []
1236
+ for v in ordered(den_sym):
1237
+ try:
1238
+ s = solve(den, v)
1239
+ m = Eq(v, s[0]) if s else S.false
1240
+ if m != False:
1241
+ args.append((Sum(f_orig.subs(*m.args), limits).doit(), m))
1242
+ break
1243
+ except NotImplementedError:
1244
+ continue
1245
+
1246
+ args.append((r, True))
1247
+ return Piecewise(*args)
1248
+
1249
+ if r not in (None, S.NaN):
1250
+ return r
1251
+
1252
+ h = eval_sum_hyper(f_orig, (i, a, b))
1253
+ if h is not None:
1254
+ return h
1255
+
1256
+ r = eval_sum_residue(f_orig, (i, a, b))
1257
+ if r is not None:
1258
+ return r
1259
+
1260
+ factored = f_orig.factor()
1261
+ if factored != f_orig:
1262
+ return eval_sum_symbolic(factored, (i, a, b))
1263
+
1264
+
1265
+ def _eval_sum_hyper(f, i, a):
1266
+ """ Returns (res, cond). Sums from a to oo. """
1267
+ if a != 0:
1268
+ return _eval_sum_hyper(f.subs(i, i + a), i, 0)
1269
+
1270
+ if f.subs(i, 0) == 0:
1271
+ from sympy.simplify.simplify import simplify
1272
+ if simplify(f.subs(i, Dummy('i', integer=True, positive=True))) == 0:
1273
+ return S.Zero, True
1274
+ return _eval_sum_hyper(f.subs(i, i + 1), i, 0)
1275
+
1276
+ from sympy.simplify.simplify import hypersimp
1277
+ hs = hypersimp(f, i)
1278
+ if hs is None:
1279
+ return None
1280
+
1281
+ if isinstance(hs, Float):
1282
+ from sympy.simplify.simplify import nsimplify
1283
+ hs = nsimplify(hs)
1284
+
1285
+ from sympy.simplify.combsimp import combsimp
1286
+ from sympy.simplify.hyperexpand import hyperexpand
1287
+ from sympy.simplify.radsimp import fraction
1288
+ numer, denom = fraction(factor(hs))
1289
+ top, topl = numer.as_coeff_mul(i)
1290
+ bot, botl = denom.as_coeff_mul(i)
1291
+ ab = [top, bot]
1292
+ factors = [topl, botl]
1293
+ params = [[], []]
1294
+ for k in range(2):
1295
+ for fac in factors[k]:
1296
+ mul = 1
1297
+ if fac.is_Pow:
1298
+ mul = fac.exp
1299
+ fac = fac.base
1300
+ if not mul.is_Integer:
1301
+ return None
1302
+ p = Poly(fac, i)
1303
+ if p.degree() != 1:
1304
+ return None
1305
+ m, n = p.all_coeffs()
1306
+ ab[k] *= m**mul
1307
+ params[k] += [n/m]*mul
1308
+
1309
+ # Add "1" to numerator parameters, to account for implicit n! in
1310
+ # hypergeometric series.
1311
+ ap = params[0] + [1]
1312
+ bq = params[1]
1313
+ x = ab[0]/ab[1]
1314
+ h = hyper(ap, bq, x)
1315
+ f = combsimp(f)
1316
+ return f.subs(i, 0)*hyperexpand(h), h.convergence_statement
1317
+
1318
+
1319
+ def eval_sum_hyper(f, i_a_b):
1320
+ i, a, b = i_a_b
1321
+
1322
+ if f.is_hypergeometric(i) is False:
1323
+ return
1324
+
1325
+ if (b - a).is_Integer:
1326
+ # We are never going to do better than doing the sum in the obvious way
1327
+ return None
1328
+
1329
+ old_sum = Sum(f, (i, a, b))
1330
+
1331
+ if b != S.Infinity:
1332
+ if a is S.NegativeInfinity:
1333
+ res = _eval_sum_hyper(f.subs(i, -i), i, -b)
1334
+ if res is not None:
1335
+ return Piecewise(res, (old_sum, True))
1336
+ else:
1337
+ n_illegal = lambda x: sum(x.count(_) for _ in _illegal)
1338
+ had = n_illegal(f)
1339
+ # check that no extra illegals are introduced
1340
+ res1 = _eval_sum_hyper(f, i, a)
1341
+ if res1 is None or n_illegal(res1) > had:
1342
+ return
1343
+ res2 = _eval_sum_hyper(f, i, b + 1)
1344
+ if res2 is None or n_illegal(res2) > had:
1345
+ return
1346
+ (res1, cond1), (res2, cond2) = res1, res2
1347
+ cond = And(cond1, cond2)
1348
+ if cond == False:
1349
+ return None
1350
+ return Piecewise((res1 - res2, cond), (old_sum, True))
1351
+
1352
+ if a is S.NegativeInfinity:
1353
+ res1 = _eval_sum_hyper(f.subs(i, -i), i, 1)
1354
+ res2 = _eval_sum_hyper(f, i, 0)
1355
+ if res1 is None or res2 is None:
1356
+ return None
1357
+ res1, cond1 = res1
1358
+ res2, cond2 = res2
1359
+ cond = And(cond1, cond2)
1360
+ if cond == False or cond.as_set() == S.EmptySet:
1361
+ return None
1362
+ return Piecewise((res1 + res2, cond), (old_sum, True))
1363
+
1364
+ # Now b == oo, a != -oo
1365
+ res = _eval_sum_hyper(f, i, a)
1366
+ if res is not None:
1367
+ r, c = res
1368
+ if c == False:
1369
+ if r.is_number:
1370
+ f = f.subs(i, Dummy('i', integer=True, positive=True) + a)
1371
+ if f.is_positive or f.is_zero:
1372
+ return S.Infinity
1373
+ elif f.is_negative:
1374
+ return S.NegativeInfinity
1375
+ return None
1376
+ return Piecewise(res, (old_sum, True))
1377
+
1378
+
1379
+ def eval_sum_residue(f, i_a_b):
1380
+ r"""Compute the infinite summation with residues
1381
+
1382
+ Notes
1383
+ =====
1384
+
1385
+ If $f(n), g(n)$ are polynomials with $\deg(g(n)) - \deg(f(n)) \ge 2$,
1386
+ some infinite summations can be computed by the following residue
1387
+ evaluations.
1388
+
1389
+ .. math::
1390
+ \sum_{n=-\infty, g(n) \ne 0}^{\infty} \frac{f(n)}{g(n)} =
1391
+ -\pi \sum_{\alpha|g(\alpha)=0}
1392
+ \text{Res}(\cot(\pi x) \frac{f(x)}{g(x)}, \alpha)
1393
+
1394
+ .. math::
1395
+ \sum_{n=-\infty, g(n) \ne 0}^{\infty} (-1)^n \frac{f(n)}{g(n)} =
1396
+ -\pi \sum_{\alpha|g(\alpha)=0}
1397
+ \text{Res}(\csc(\pi x) \frac{f(x)}{g(x)}, \alpha)
1398
+
1399
+ Examples
1400
+ ========
1401
+
1402
+ >>> from sympy import Sum, oo, Symbol
1403
+ >>> x = Symbol('x')
1404
+
1405
+ Doubly infinite series of rational functions.
1406
+
1407
+ >>> Sum(1 / (x**2 + 1), (x, -oo, oo)).doit()
1408
+ pi/tanh(pi)
1409
+
1410
+ Doubly infinite alternating series of rational functions.
1411
+
1412
+ >>> Sum((-1)**x / (x**2 + 1), (x, -oo, oo)).doit()
1413
+ pi/sinh(pi)
1414
+
1415
+ Infinite series of even rational functions.
1416
+
1417
+ >>> Sum(1 / (x**2 + 1), (x, 0, oo)).doit()
1418
+ 1/2 + pi/(2*tanh(pi))
1419
+
1420
+ Infinite series of alternating even rational functions.
1421
+
1422
+ >>> Sum((-1)**x / (x**2 + 1), (x, 0, oo)).doit()
1423
+ pi/(2*sinh(pi)) + 1/2
1424
+
1425
+ This also have heuristics to transform arbitrarily shifted summand or
1426
+ arbitrarily shifted summation range to the canonical problem the
1427
+ formula can handle.
1428
+
1429
+ >>> Sum(1 / (x**2 + 2*x + 2), (x, -1, oo)).doit()
1430
+ 1/2 + pi/(2*tanh(pi))
1431
+ >>> Sum(1 / (x**2 + 4*x + 5), (x, -2, oo)).doit()
1432
+ 1/2 + pi/(2*tanh(pi))
1433
+ >>> Sum(1 / (x**2 + 1), (x, 1, oo)).doit()
1434
+ -1/2 + pi/(2*tanh(pi))
1435
+ >>> Sum(1 / (x**2 + 1), (x, 2, oo)).doit()
1436
+ -1 + pi/(2*tanh(pi))
1437
+
1438
+ References
1439
+ ==========
1440
+
1441
+ .. [#] http://www.supermath.info/InfiniteSeriesandtheResidueTheorem.pdf
1442
+
1443
+ .. [#] Asmar N.H., Grafakos L. (2018) Residue Theory.
1444
+ In: Complex Analysis with Applications.
1445
+ Undergraduate Texts in Mathematics. Springer, Cham.
1446
+ https://doi.org/10.1007/978-3-319-94063-2_5
1447
+ """
1448
+ i, a, b = i_a_b
1449
+
1450
+ def is_even_function(numer, denom):
1451
+ """Test if the rational function is an even function"""
1452
+ numer_even = all(i % 2 == 0 for (i,) in numer.monoms())
1453
+ denom_even = all(i % 2 == 0 for (i,) in denom.monoms())
1454
+ numer_odd = all(i % 2 == 1 for (i,) in numer.monoms())
1455
+ denom_odd = all(i % 2 == 1 for (i,) in denom.monoms())
1456
+ return (numer_even and denom_even) or (numer_odd and denom_odd)
1457
+
1458
+ def match_rational(f, i):
1459
+ numer, denom = f.as_numer_denom()
1460
+ try:
1461
+ (numer, denom), opt = parallel_poly_from_expr((numer, denom), i)
1462
+ except (PolificationFailed, PolynomialError):
1463
+ return None
1464
+ return numer, denom
1465
+
1466
+ def get_poles(denom):
1467
+ roots = denom.sqf_part().all_roots()
1468
+ roots = sift(roots, lambda x: x.is_integer)
1469
+ if None in roots:
1470
+ return None
1471
+ int_roots, nonint_roots = roots[True], roots[False]
1472
+ return int_roots, nonint_roots
1473
+
1474
+ def get_shift(denom):
1475
+ n = denom.degree(i)
1476
+ a = denom.coeff_monomial(i**n)
1477
+ b = denom.coeff_monomial(i**(n-1))
1478
+ shift = - b / a / n
1479
+ return shift
1480
+
1481
+ #Need a dummy symbol with no assumptions set for get_residue_factor
1482
+ z = Dummy('z')
1483
+
1484
+ def get_residue_factor(numer, denom, alternating):
1485
+ residue_factor = (numer.as_expr() / denom.as_expr()).subs(i, z)
1486
+ if not alternating:
1487
+ residue_factor *= cot(S.Pi * z)
1488
+ else:
1489
+ residue_factor *= csc(S.Pi * z)
1490
+ return residue_factor
1491
+
1492
+ # We don't know how to deal with symbolic constants in summand
1493
+ if f.free_symbols - {i}:
1494
+ return None
1495
+
1496
+ if not (a.is_Integer or a in (S.Infinity, S.NegativeInfinity)):
1497
+ return None
1498
+ if not (b.is_Integer or b in (S.Infinity, S.NegativeInfinity)):
1499
+ return None
1500
+
1501
+ # Quick exit heuristic for the sums which doesn't have infinite range
1502
+ if a != S.NegativeInfinity and b != S.Infinity:
1503
+ return None
1504
+
1505
+ match = match_rational(f, i)
1506
+ if match:
1507
+ alternating = False
1508
+ numer, denom = match
1509
+ else:
1510
+ match = match_rational(f / S.NegativeOne**i, i)
1511
+ if match:
1512
+ alternating = True
1513
+ numer, denom = match
1514
+ else:
1515
+ return None
1516
+
1517
+ if denom.degree(i) - numer.degree(i) < 2:
1518
+ return None
1519
+
1520
+ if (a, b) == (S.NegativeInfinity, S.Infinity):
1521
+ poles = get_poles(denom)
1522
+ if poles is None:
1523
+ return None
1524
+ int_roots, nonint_roots = poles
1525
+
1526
+ if int_roots:
1527
+ return None
1528
+
1529
+ residue_factor = get_residue_factor(numer, denom, alternating)
1530
+ residues = [residue(residue_factor, z, root) for root in nonint_roots]
1531
+ return -S.Pi * sum(residues)
1532
+
1533
+ if not (a.is_finite and b is S.Infinity):
1534
+ return None
1535
+
1536
+ if not is_even_function(numer, denom):
1537
+ # Try shifting summation and check if the summand can be made
1538
+ # and even function from the origin.
1539
+ # Sum(f(n), (n, a, b)) => Sum(f(n + s), (n, a - s, b - s))
1540
+ shift = get_shift(denom)
1541
+
1542
+ if not shift.is_Integer:
1543
+ return None
1544
+ if shift == 0:
1545
+ return None
1546
+
1547
+ numer = numer.shift(shift)
1548
+ denom = denom.shift(shift)
1549
+
1550
+ if not is_even_function(numer, denom):
1551
+ return None
1552
+
1553
+ if alternating:
1554
+ f = S.NegativeOne**i * (S.NegativeOne**shift * numer.as_expr() / denom.as_expr())
1555
+ else:
1556
+ f = numer.as_expr() / denom.as_expr()
1557
+ return eval_sum_residue(f, (i, a-shift, b-shift))
1558
+
1559
+ poles = get_poles(denom)
1560
+ if poles is None:
1561
+ return None
1562
+ int_roots, nonint_roots = poles
1563
+
1564
+ if int_roots:
1565
+ int_roots = [int(root) for root in int_roots]
1566
+ int_roots_max = max(int_roots)
1567
+ int_roots_min = min(int_roots)
1568
+ # Integer valued poles must be next to each other
1569
+ # and also symmetric from origin (Because the function is even)
1570
+ if not len(int_roots) == int_roots_max - int_roots_min + 1:
1571
+ return None
1572
+
1573
+ # Check whether the summation indices contain poles
1574
+ if a <= max(int_roots):
1575
+ return None
1576
+
1577
+ residue_factor = get_residue_factor(numer, denom, alternating)
1578
+ residues = [residue(residue_factor, z, root) for root in int_roots + nonint_roots]
1579
+ full_sum = -S.Pi * sum(residues)
1580
+
1581
+ if not int_roots:
1582
+ # Compute Sum(f, (i, 0, oo)) by adding a extraneous evaluation
1583
+ # at the origin.
1584
+ half_sum = (full_sum + f.xreplace({i: 0})) / 2
1585
+
1586
+ # Add and subtract extraneous evaluations
1587
+ extraneous_neg = [f.xreplace({i: i0}) for i0 in range(int(a), 0)]
1588
+ extraneous_pos = [f.xreplace({i: i0}) for i0 in range(0, int(a))]
1589
+ result = half_sum + sum(extraneous_neg) - sum(extraneous_pos)
1590
+
1591
+ return result
1592
+
1593
+ # Compute Sum(f, (i, min(poles) + 1, oo))
1594
+ half_sum = full_sum / 2
1595
+
1596
+ # Subtract extraneous evaluations
1597
+ extraneous = [f.xreplace({i: i0}) for i0 in range(max(int_roots) + 1, int(a))]
1598
+ result = half_sum - sum(extraneous)
1599
+
1600
+ return result
1601
+
1602
+
1603
+ def _eval_matrix_sum(expression):
1604
+ f = expression.function
1605
+ for limit in expression.limits:
1606
+ i, a, b = limit
1607
+ dif = b - a
1608
+ if dif.is_Integer:
1609
+ if (dif < 0) == True:
1610
+ a, b = b + 1, a - 1
1611
+ f = -f
1612
+
1613
+ newf = eval_sum_direct(f, (i, a, b))
1614
+ if newf is not None:
1615
+ return newf.doit()
1616
+
1617
+
1618
+ def _dummy_with_inherited_properties_concrete(limits):
1619
+ """
1620
+ Return a Dummy symbol that inherits as many assumptions as possible
1621
+ from the provided symbol and limits.
1622
+
1623
+ If the symbol already has all True assumption shared by the limits
1624
+ then return None.
1625
+ """
1626
+ x, a, b = limits
1627
+ l = [a, b]
1628
+
1629
+ assumptions_to_consider = ['extended_nonnegative', 'nonnegative',
1630
+ 'extended_nonpositive', 'nonpositive',
1631
+ 'extended_positive', 'positive',
1632
+ 'extended_negative', 'negative',
1633
+ 'integer', 'rational', 'finite',
1634
+ 'zero', 'real', 'extended_real']
1635
+
1636
+ assumptions_to_keep = {}
1637
+ assumptions_to_add = {}
1638
+ for assum in assumptions_to_consider:
1639
+ assum_true = x._assumptions.get(assum, None)
1640
+ if assum_true:
1641
+ assumptions_to_keep[assum] = True
1642
+ elif all(getattr(i, 'is_' + assum) for i in l):
1643
+ assumptions_to_add[assum] = True
1644
+ if assumptions_to_add:
1645
+ assumptions_to_keep.update(assumptions_to_add)
1646
+ return Dummy('d', **assumptions_to_keep)
tuning-competition-baseline/.venv/lib/python3.11/site-packages/sympy/matrices/kind.py ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # sympy.matrices.kind
2
+
3
+ from sympy.core.kind import Kind, _NumberKind, NumberKind
4
+ from sympy.core.mul import Mul
5
+
6
+
7
+ class MatrixKind(Kind):
8
+ """
9
+ Kind for all matrices in SymPy.
10
+
11
+ Basic class for this kind is ``MatrixBase`` and ``MatrixExpr``,
12
+ but any expression representing the matrix can have this.
13
+
14
+ Parameters
15
+ ==========
16
+
17
+ element_kind : Kind
18
+ Kind of the element. Default is
19
+ :class:`sympy.core.kind.NumberKind`,
20
+ which means that the matrix contains only numbers.
21
+
22
+ Examples
23
+ ========
24
+
25
+ Any instance of matrix class has kind ``MatrixKind``:
26
+
27
+ >>> from sympy import MatrixSymbol
28
+ >>> A = MatrixSymbol('A', 2, 2)
29
+ >>> A.kind
30
+ MatrixKind(NumberKind)
31
+
32
+ An expression representing a matrix may not be an instance of
33
+ the Matrix class, but it will have kind ``MatrixKind``:
34
+
35
+ >>> from sympy import MatrixExpr, Integral
36
+ >>> from sympy.abc import x
37
+ >>> intM = Integral(A, x)
38
+ >>> isinstance(intM, MatrixExpr)
39
+ False
40
+ >>> intM.kind
41
+ MatrixKind(NumberKind)
42
+
43
+ Use ``isinstance()`` to check for ``MatrixKind`` without specifying the
44
+ element kind. Use ``is`` to check the kind including the element kind:
45
+
46
+ >>> from sympy import Matrix
47
+ >>> from sympy.core import NumberKind
48
+ >>> from sympy.matrices import MatrixKind
49
+ >>> M = Matrix([1, 2])
50
+ >>> isinstance(M.kind, MatrixKind)
51
+ True
52
+ >>> M.kind is MatrixKind(NumberKind)
53
+ True
54
+
55
+ See Also
56
+ ========
57
+
58
+ sympy.core.kind.NumberKind
59
+ sympy.core.kind.UndefinedKind
60
+ sympy.core.containers.TupleKind
61
+ sympy.sets.sets.SetKind
62
+
63
+ """
64
+ def __new__(cls, element_kind=NumberKind):
65
+ obj = super().__new__(cls, element_kind)
66
+ obj.element_kind = element_kind
67
+ return obj
68
+
69
+ def __repr__(self):
70
+ return "MatrixKind(%s)" % self.element_kind
71
+
72
+
73
+ @Mul._kind_dispatcher.register(_NumberKind, MatrixKind)
74
+ def num_mat_mul(k1, k2):
75
+ """
76
+ Return MatrixKind. The element kind is selected by recursive dispatching.
77
+ Do not need to dispatch in reversed order because KindDispatcher
78
+ searches for this automatically.
79
+ """
80
+ # Deal with Mul._kind_dispatcher's commutativity
81
+ # XXX: this function is called with either k1 or k2 as MatrixKind because
82
+ # the Mul kind dispatcher is commutative. Maybe it shouldn't be. Need to
83
+ # swap the args here because NumberKind does not have an element_kind
84
+ # attribute.
85
+ if not isinstance(k2, MatrixKind):
86
+ k1, k2 = k2, k1
87
+ elemk = Mul._kind_dispatcher(k1, k2.element_kind)
88
+ return MatrixKind(elemk)
89
+
90
+
91
+ @Mul._kind_dispatcher.register(MatrixKind, MatrixKind)
92
+ def mat_mat_mul(k1, k2):
93
+ """
94
+ Return MatrixKind. The element kind is selected by recursive dispatching.
95
+ """
96
+ elemk = Mul._kind_dispatcher(k1.element_kind, k2.element_kind)
97
+ return MatrixKind(elemk)
tuning-competition-baseline/.venv/lib/python3.11/site-packages/sympy/ntheory/tests/__pycache__/test_factor_.cpython-311.pyc ADDED
Binary file (51.8 kB). View file
 
tuning-competition-baseline/.venv/lib/python3.11/site-packages/sympy/plotting/backends/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (224 Bytes). View file
 
tuning-competition-baseline/.venv/lib/python3.11/site-packages/sympy/plotting/backends/__pycache__/base_backend.cpython-311.pyc ADDED
Binary file (19.5 kB). View file
 
tuning-competition-baseline/.venv/lib/python3.11/site-packages/sympy/plotting/backends/matplotlibbackend/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ from sympy.plotting.backends.matplotlibbackend.matplotlib import (
2
+ MatplotlibBackend, _matplotlib_list
3
+ )
4
+
5
+ __all__ = ["MatplotlibBackend", "_matplotlib_list"]