Dorothydu commited on
Commit
92f27bb
·
verified ·
1 Parent(s): 4564ae4

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +3 -0
  2. 5485-master/demos/node_modules/mongoose/node_modules/bson/node_modules/bson-ext/node_modules/node-pre-gyp/node_modules/semver/semver.min.js.gz +3 -0
  3. 670proj-master/cnn-text-classification/word2vector3.model +3 -0
  4. 8004-master/fluid_en/static/fluid_en/Img_I5.png +3 -0
  5. 8004-master/fluid_en/static/fluid_en/Img_T3.jpeg +3 -0
  6. 8004-master/task_sums/static/task_sums/Img_A4.jpeg +3 -0
  7. 8210_A1P3-master/Lib/site-packages/weasyprint/tests/resources/blue.jpg +3 -0
  8. 8210_A1P3-master/Lib/site-packages/weasyprint/tests/resources/icon.png +3 -0
  9. 8210_A1P3-master/Lib/site-packages/weasyprint/tests/resources/logo_small.png +3 -0
  10. 8210_A1P3-master/Lib/site-packages/weasyprint/tests/resources/pattern.gif +3 -0
  11. 8210_A1P3-master/Lib/site-packages/weasyprint/tests/resources/pattern.palette.png +3 -0
  12. 8210_A1P3-master/Lib/site-packages/weasyprint/tests/resources/pattern.png +3 -0
  13. 8210_A1P3-master/Lib/site-packages/weasyprint/tests/resources/really-a-svg.png +3 -0
  14. 8210_A1P3-master/Scripts/python.exe +3 -0
  15. 8210_A1P3-master/Scripts/pythonw.exe +3 -0
  16. 8210_A1P3-master/mfscrm/crm/static/images/cupcakes.jpeg +3 -0
  17. 8210_A1P3-master/mfscrm/crm/static/images/customer.jpg +3 -0
  18. 8210_A1P3-master/mfscrm/crm/static/images/lunch.jpeg +3 -0
  19. 8210_A1P3-master/mfscrm/db.sqlite3 +3 -0
  20. A-Reinforcement-Learning-based-Follow-Up-Framework-master/src/models/rf/plots/color_test.png +3 -0
  21. A-Reinforcement-Learning-based-Follow-Up-Framework-master/src/models/rf/plots/color_train.png +3 -0
  22. A-Reinforcement-Learning-based-Follow-Up-Framework-master/src/models/rf/plots/photo_color_test.png +3 -0
  23. A-Reinforcement-Learning-based-Follow-Up-Framework-master/src/models/rf/plots/photo_color_train.png +3 -0
  24. A-Reinforcement-Learning-based-Follow-Up-Framework-master/src/models/rf/plots/photo_spec_color_test.png +3 -0
  25. A-Reinforcement-Learning-based-Follow-Up-Framework-master/src/models/rf/plots/photo_spec_color_train.png +3 -0
  26. A-Reinforcement-Learning-based-Follow-Up-Framework-master/src/models/rf/plots/photo_spec_test.png +3 -0
  27. A-Reinforcement-Learning-based-Follow-Up-Framework-master/src/models/rf/plots/photo_spec_train.png +3 -0
  28. A-Reinforcement-Learning-based-Follow-Up-Framework-master/src/models/rf/plots/photo_test.png +3 -0
  29. A-Reinforcement-Learning-based-Follow-Up-Framework-master/src/models/rf/plots/photo_train.png +3 -0
  30. A-Reinforcement-Learning-based-Follow-Up-Framework-master/src/models/rf/plots/spec_color_test.png +3 -0
  31. A-Reinforcement-Learning-based-Follow-Up-Framework-master/src/models/rf/plots/spec_color_train.png +3 -0
  32. A-Reinforcement-Learning-based-Follow-Up-Framework-master/src/models/rf/plots/spec_test.png +3 -0
  33. A-Reinforcement-Learning-based-Follow-Up-Framework-master/src/models/rf/plots/spec_train.png +3 -0
  34. A-Reinforcement-Learning-based-Follow-Up-Framework-master/src/models/rl/costs/job_data/classifiers.pkl +3 -0
  35. A-Reinforcement-Learning-based-Follow-Up-Framework-master/src/models/rl/player/job_data/classifiers.pkl +3 -0
  36. A-Reinforcement-Learning-based-Follow-Up-Framework-master/src/models/rl/player/plots/convergence.png +3 -0
  37. A-Reinforcement-Learning-based-Follow-Up-Framework-master/src/models/rl/player/plots/densities.png +3 -0
  38. A-Reinforcement-Learning-based-Follow-Up-Framework-master/src/models/rl/player/plots/samples.png +3 -0
  39. A-Reinforcement-Learning-based-Follow-Up-Framework-master/src/models/rl/player/results.png +3 -0
  40. A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/nltk/test/unit/translate/test_bleu.py +244 -0
  41. A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/nltk/test/unit/translate/test_gdfa.py +57 -0
  42. A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/nltk/test/unit/translate/test_ibm1.py +75 -0
  43. A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/nltk/test/unit/translate/test_ibm2.py +87 -0
  44. A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/nltk/test/unit/translate/test_ibm3.py +105 -0
  45. A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/nltk/test/unit/translate/test_ibm4.py +126 -0
  46. A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/nltk/test/unit/translate/test_ibm5.py +166 -0
  47. A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/nltk/test/unit/translate/test_ibm_model.py +270 -0
  48. A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/nltk/test/unit/translate/test_nist.py +37 -0
  49. A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/nltk/test/unit/translate/test_stack_decoder.py +300 -0
  50. A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/nltk/test/unit/utils.py +42 -0
.gitattributes CHANGED
@@ -137,3 +137,6 @@ A-news-Agrregation-system-master/myvenv/bin/python filter=lfs diff=lfs merge=lfs
137
  670proj-master/app/data/checkpoints/model-3200.meta filter=lfs diff=lfs merge=lfs -text
138
  8210_A1P3-master/Lib/site-packages/reportlab/graphics/_renderPM.cp37-win32.pyd filter=lfs diff=lfs merge=lfs -text
139
  A-news-Agrregation-system-master/myvenv/share/python-wheels/chardet-3.0.4-py2.py3-none-any.whl filter=lfs diff=lfs merge=lfs -text
 
 
 
 
137
  670proj-master/app/data/checkpoints/model-3200.meta filter=lfs diff=lfs merge=lfs -text
138
  8210_A1P3-master/Lib/site-packages/reportlab/graphics/_renderPM.cp37-win32.pyd filter=lfs diff=lfs merge=lfs -text
139
  A-news-Agrregation-system-master/myvenv/share/python-wheels/chardet-3.0.4-py2.py3-none-any.whl filter=lfs diff=lfs merge=lfs -text
140
+ 8210_A1P3-master/Scripts/python.exe filter=lfs diff=lfs merge=lfs -text
141
+ 8210_A1P3-master/Scripts/pythonw.exe filter=lfs diff=lfs merge=lfs -text
142
+ 8210_A1P3-master/mfscrm/db.sqlite3 filter=lfs diff=lfs merge=lfs -text
5485-master/demos/node_modules/mongoose/node_modules/bson/node_modules/bson-ext/node_modules/node-pre-gyp/node_modules/semver/semver.min.js.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:76f1f843068cdccd6f9ff5bec03751df0527a334be3eae933fc34be4ce726af8
3
+ size 3776
670proj-master/cnn-text-classification/word2vector3.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:934da0dc35ab57d0be240681c85fd2cc9b8f13c0c2d9013f962dac6dbd684a81
3
+ size 39782407
8004-master/fluid_en/static/fluid_en/Img_I5.png ADDED

Git LFS Details

  • SHA256: 722bfb4d4504141160c9b064205b42296df3f2beea0d2cc98536a347a2f7472c
  • Pointer size: 130 Bytes
  • Size of remote file: 78.8 kB
8004-master/fluid_en/static/fluid_en/Img_T3.jpeg ADDED

Git LFS Details

  • SHA256: 699aa234b1b820ae6d823418665940ecf7fedee08900eb4c8708cfcfe2077358
  • Pointer size: 130 Bytes
  • Size of remote file: 68 kB
8004-master/task_sums/static/task_sums/Img_A4.jpeg ADDED

Git LFS Details

  • SHA256: 628d940adb399fbf14181ce879727c73e784d003554d7a5166c1e45e6e247af0
  • Pointer size: 131 Bytes
  • Size of remote file: 233 kB
8210_A1P3-master/Lib/site-packages/weasyprint/tests/resources/blue.jpg ADDED

Git LFS Details

  • SHA256: b33e0f980c9cd5ce68b4547b7aca1a1ca3b0c3f3ef0d667d0bddcd0617a504fd
  • Pointer size: 128 Bytes
  • Size of remote file: 289 Bytes
8210_A1P3-master/Lib/site-packages/weasyprint/tests/resources/icon.png ADDED

Git LFS Details

  • SHA256: 8a2f5c14ae0b747ba532975147b91c21e06a1ee19710c90c5551e2a6f1452990
  • Pointer size: 128 Bytes
  • Size of remote file: 814 Bytes
8210_A1P3-master/Lib/site-packages/weasyprint/tests/resources/logo_small.png ADDED

Git LFS Details

  • SHA256: f05151b9ba74924b7a5342f48c2a4d8859f8b6c1aadd06c0c1c918bb49540824
  • Pointer size: 129 Bytes
  • Size of remote file: 6.52 kB
8210_A1P3-master/Lib/site-packages/weasyprint/tests/resources/pattern.gif ADDED

Git LFS Details

  • SHA256: f2296027e2a3f75a93e4034e89322aab412017919f7a13f9cf1bf82c60109ec4
  • Pointer size: 127 Bytes
  • Size of remote file: 37 Bytes
8210_A1P3-master/Lib/site-packages/weasyprint/tests/resources/pattern.palette.png ADDED

Git LFS Details

  • SHA256: 07089fe34959111269d0d4f71d4916f61b0295c11516ad104eb2e0170b43d3d5
  • Pointer size: 128 Bytes
  • Size of remote file: 140 Bytes
8210_A1P3-master/Lib/site-packages/weasyprint/tests/resources/pattern.png ADDED

Git LFS Details

  • SHA256: 0394782b76c6e6e711119ade9a6794b807435a83751e7bea4939b69966b81139
  • Pointer size: 127 Bytes
  • Size of remote file: 76 Bytes
8210_A1P3-master/Lib/site-packages/weasyprint/tests/resources/really-a-svg.png ADDED

Git LFS Details

  • SHA256: e5d9647dce3fc4c4daefabd352fcf22d4b694bd83afb215744c3129dbbd9e057
  • Pointer size: 128 Bytes
  • Size of remote file: 203 Bytes
8210_A1P3-master/Scripts/python.exe ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:61596ea4e41b7a0fe7b629ab66c28273295ee311f6aa8431119525cb06b4f1e3
3
+ size 415760
8210_A1P3-master/Scripts/pythonw.exe ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:623870d10a0f497b0657deebf91ca19efff1a7d5c0342b3385c6f7fd84cb7a8f
3
+ size 414736
8210_A1P3-master/mfscrm/crm/static/images/cupcakes.jpeg ADDED

Git LFS Details

  • SHA256: d965d475435c0d6514369bad1f2d87d2844ef9505968b490a9a61933466f5100
  • Pointer size: 130 Bytes
  • Size of remote file: 97.1 kB
8210_A1P3-master/mfscrm/crm/static/images/customer.jpg ADDED

Git LFS Details

  • SHA256: cac8eac1c86b8df7c7a703fbc10916bae0f2884da78ef6dcafafe4e3b55fcb34
  • Pointer size: 129 Bytes
  • Size of remote file: 7.58 kB
8210_A1P3-master/mfscrm/crm/static/images/lunch.jpeg ADDED

Git LFS Details

  • SHA256: 6ad55d03f0cc519e1705cb2eff7cc498d2a4cbf5826062d0182cbf49228f5a6a
  • Pointer size: 130 Bytes
  • Size of remote file: 61.2 kB
8210_A1P3-master/mfscrm/db.sqlite3 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:75b8b9f471a44c5c3a4146f278a00aafbd909ef5ab625c5f603caaadd614c820
3
+ size 151552
A-Reinforcement-Learning-based-Follow-Up-Framework-master/src/models/rf/plots/color_test.png ADDED

Git LFS Details

  • SHA256: fd16f4b356cfb2cf34fb7c0b91ff6af208398b341b8c7df2c4f412f924464617
  • Pointer size: 130 Bytes
  • Size of remote file: 86.9 kB
A-Reinforcement-Learning-based-Follow-Up-Framework-master/src/models/rf/plots/color_train.png ADDED

Git LFS Details

  • SHA256: 9a69ce7639683c7dc80bd27ef73ccd23aea99a369400561c92b6ca4b2ddf14d7
  • Pointer size: 130 Bytes
  • Size of remote file: 83 kB
A-Reinforcement-Learning-based-Follow-Up-Framework-master/src/models/rf/plots/photo_color_test.png ADDED

Git LFS Details

  • SHA256: 88c2a4a25edc81539fe80306e28de2b8a01e6bcf44407a598f8e3ddf2bfced62
  • Pointer size: 130 Bytes
  • Size of remote file: 85.8 kB
A-Reinforcement-Learning-based-Follow-Up-Framework-master/src/models/rf/plots/photo_color_train.png ADDED

Git LFS Details

  • SHA256: 30b418c8277d0b72af4fb4dd7d1d99e96855267347631fa9d4a4360561b6d5e3
  • Pointer size: 130 Bytes
  • Size of remote file: 74.6 kB
A-Reinforcement-Learning-based-Follow-Up-Framework-master/src/models/rf/plots/photo_spec_color_test.png ADDED

Git LFS Details

  • SHA256: dbd4a8ee981c652e12da7cc1388421a1371be1548f8d8fe2d700a053efb19fa6
  • Pointer size: 130 Bytes
  • Size of remote file: 88.3 kB
A-Reinforcement-Learning-based-Follow-Up-Framework-master/src/models/rf/plots/photo_spec_color_train.png ADDED

Git LFS Details

  • SHA256: 4b61644a7b2a8ca87482674cf33595a38c78f2622b20dfacd5115f794a67806d
  • Pointer size: 130 Bytes
  • Size of remote file: 79 kB
A-Reinforcement-Learning-based-Follow-Up-Framework-master/src/models/rf/plots/photo_spec_test.png ADDED

Git LFS Details

  • SHA256: 2e62770903df71bc61337763e7a685b259eba944d9416232a737d9da205dfda5
  • Pointer size: 130 Bytes
  • Size of remote file: 87.6 kB
A-Reinforcement-Learning-based-Follow-Up-Framework-master/src/models/rf/plots/photo_spec_train.png ADDED

Git LFS Details

  • SHA256: 9cbf93fa24c83f1f23368f65eafdb548b3fd9058b1f89e62a2d149a8d0b0af89
  • Pointer size: 130 Bytes
  • Size of remote file: 74.9 kB
A-Reinforcement-Learning-based-Follow-Up-Framework-master/src/models/rf/plots/photo_test.png ADDED

Git LFS Details

  • SHA256: dcc0717c8fe73a87e073a18ef03f9ffa41abccc95705d10c8523dc6d851f575c
  • Pointer size: 130 Bytes
  • Size of remote file: 83.9 kB
A-Reinforcement-Learning-based-Follow-Up-Framework-master/src/models/rf/plots/photo_train.png ADDED

Git LFS Details

  • SHA256: 4bd891a4f8797e61c32c6e04b5bf855c54e22fe44d4379f7164ff18e7853e4ac
  • Pointer size: 130 Bytes
  • Size of remote file: 73.2 kB
A-Reinforcement-Learning-based-Follow-Up-Framework-master/src/models/rf/plots/spec_color_test.png ADDED

Git LFS Details

  • SHA256: 8b244ddd775b351c3a1518321e0edb0ab29d8dff9a50ec6cbdcfbcc7c6ccca67
  • Pointer size: 130 Bytes
  • Size of remote file: 89.2 kB
A-Reinforcement-Learning-based-Follow-Up-Framework-master/src/models/rf/plots/spec_color_train.png ADDED

Git LFS Details

  • SHA256: 8f5b49c9c8cf95605332d6a0bece6576a50e0696eb7ff0489053222c9e969fea
  • Pointer size: 130 Bytes
  • Size of remote file: 80.1 kB
A-Reinforcement-Learning-based-Follow-Up-Framework-master/src/models/rf/plots/spec_test.png ADDED

Git LFS Details

  • SHA256: 05ecf23387eaf8a20dc94e78e007866fad9e8645ce740615d3b736fdb8a10d99
  • Pointer size: 130 Bytes
  • Size of remote file: 86 kB
A-Reinforcement-Learning-based-Follow-Up-Framework-master/src/models/rf/plots/spec_train.png ADDED

Git LFS Details

  • SHA256: df106a863ba944473c857bcbf174adeed77e68e87491545b5dd0ebd7f05dd910
  • Pointer size: 130 Bytes
  • Size of remote file: 83.1 kB
A-Reinforcement-Learning-based-Follow-Up-Framework-master/src/models/rl/costs/job_data/classifiers.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:78a6b6ff05ce846c2554cb583c7266e80bf9a64fc7846d26816a7a48a61a36cf
3
+ size 6801472
A-Reinforcement-Learning-based-Follow-Up-Framework-master/src/models/rl/player/job_data/classifiers.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:78a6b6ff05ce846c2554cb583c7266e80bf9a64fc7846d26816a7a48a61a36cf
3
+ size 6801472
A-Reinforcement-Learning-based-Follow-Up-Framework-master/src/models/rl/player/plots/convergence.png ADDED

Git LFS Details

  • SHA256: 077a46dfc5427fde7defc45afe572aa28123d1a7149592ce1a42d6a1dbffdffc
  • Pointer size: 130 Bytes
  • Size of remote file: 97.8 kB
A-Reinforcement-Learning-based-Follow-Up-Framework-master/src/models/rl/player/plots/densities.png ADDED

Git LFS Details

  • SHA256: c151f2b75b9683f18d2286cbc378bcc172d6d4d7fec4673861dd073d9f47960e
  • Pointer size: 130 Bytes
  • Size of remote file: 76.2 kB
A-Reinforcement-Learning-based-Follow-Up-Framework-master/src/models/rl/player/plots/samples.png ADDED

Git LFS Details

  • SHA256: 02b95b6672481c9eacb3a5de321b4c6ef95ba4a23cf3101732d21a267f1cc889
  • Pointer size: 131 Bytes
  • Size of remote file: 220 kB
A-Reinforcement-Learning-based-Follow-Up-Framework-master/src/models/rl/player/results.png ADDED

Git LFS Details

  • SHA256: c740f0b46b839c64ad9190ab936fe7dc2256013f6665dfa877523410954dd718
  • Pointer size: 131 Bytes
  • Size of remote file: 103 kB
A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/nltk/test/unit/translate/test_bleu.py ADDED
@@ -0,0 +1,244 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """
3
+ Tests for BLEU translation evaluation metric
4
+ """
5
+
6
+ import functools
7
+ import io
8
+ import unittest
9
+
10
+ from nltk.data import find
11
+ from nltk.translate.bleu_score import modified_precision, brevity_penalty, closest_ref_length
12
+ from nltk.translate.bleu_score import sentence_bleu, corpus_bleu, SmoothingFunction
13
+
14
+
15
+ class TestBLEU(unittest.TestCase):
16
+ def test_modified_precision(self):
17
+ """
18
+ Examples from the original BLEU paper
19
+ http://www.aclweb.org/anthology/P02-1040.pdf
20
+ """
21
+ # Example 1: the "the*" example.
22
+ # Reference sentences.
23
+ ref1 = 'the cat is on the mat'.split()
24
+ ref2 = 'there is a cat on the mat'.split()
25
+ # Hypothesis sentence(s).
26
+ hyp1 = 'the the the the the the the'.split()
27
+
28
+ references = [ref1, ref2]
29
+
30
+ # Testing modified unigram precision.
31
+ hyp1_unigram_precision = float(modified_precision(references, hyp1, n=1))
32
+ assert (round(hyp1_unigram_precision, 4) == 0.2857)
33
+ # With assertAlmostEqual at 4 place precision.
34
+ self.assertAlmostEqual(hyp1_unigram_precision, 0.28571428, places=4)
35
+
36
+ # Testing modified bigram precision.
37
+ assert(float(modified_precision(references, hyp1, n=2)) == 0.0)
38
+
39
+
40
+ # Example 2: the "of the" example.
41
+ # Reference sentences
42
+ ref1 = str('It is a guide to action that ensures that the military '
43
+ 'will forever heed Party commands').split()
44
+ ref2 = str('It is the guiding principle which guarantees the military '
45
+ 'forces always being under the command of the Party').split()
46
+ ref3 = str('It is the practical guide for the army always to heed '
47
+ 'the directions of the party').split()
48
+ # Hypothesis sentence(s).
49
+ hyp1 = 'of the'.split()
50
+
51
+ references = [ref1, ref2, ref3]
52
+ # Testing modified unigram precision.
53
+ assert (float(modified_precision(references, hyp1, n=1)) == 1.0)
54
+
55
+ # Testing modified bigram precision.
56
+ assert(float(modified_precision(references, hyp1, n=2)) == 1.0)
57
+
58
+
59
+ # Example 3: Proper MT outputs.
60
+ hyp1 = str('It is a guide to action which ensures that the military '
61
+ 'always obeys the commands of the party').split()
62
+ hyp2 = str('It is to insure the troops forever hearing the activity '
63
+ 'guidebook that party direct').split()
64
+
65
+ references = [ref1, ref2, ref3]
66
+
67
+ # Unigram precision.
68
+ hyp1_unigram_precision = float(modified_precision(references, hyp1, n=1))
69
+ hyp2_unigram_precision = float(modified_precision(references, hyp2, n=1))
70
+ # Test unigram precision with assertAlmostEqual at 4 place precision.
71
+ self.assertAlmostEqual(hyp1_unigram_precision, 0.94444444, places=4)
72
+ self.assertAlmostEqual(hyp2_unigram_precision, 0.57142857, places=4)
73
+ # Test unigram precision with rounding.
74
+ assert (round(hyp1_unigram_precision, 4) == 0.9444)
75
+ assert (round(hyp2_unigram_precision, 4) == 0.5714)
76
+
77
+ # Bigram precision
78
+ hyp1_bigram_precision = float(modified_precision(references, hyp1, n=2))
79
+ hyp2_bigram_precision = float(modified_precision(references, hyp2, n=2))
80
+ # Test bigram precision with assertAlmostEqual at 4 place precision.
81
+ self.assertAlmostEqual(hyp1_bigram_precision, 0.58823529, places=4)
82
+ self.assertAlmostEqual(hyp2_bigram_precision, 0.07692307, places=4)
83
+ # Test bigram precision with rounding.
84
+ assert (round(hyp1_bigram_precision, 4) == 0.5882)
85
+ assert (round(hyp2_bigram_precision, 4) == 0.0769)
86
+
87
+ def test_brevity_penalty(self):
88
+ # Test case from brevity_penalty_closest function in mteval-v13a.pl.
89
+ # Same test cases as in the doctest in nltk.translate.bleu_score.py
90
+ references = [['a'] * 11, ['a'] * 8]
91
+ hypothesis = ['a'] * 7
92
+ hyp_len = len(hypothesis)
93
+ closest_ref_len = closest_ref_length(references, hyp_len)
94
+ self.assertAlmostEqual(brevity_penalty(closest_ref_len, hyp_len), 0.8669, places=4)
95
+
96
+ references = [['a'] * 11, ['a'] * 8, ['a'] * 6, ['a'] * 7]
97
+ hypothesis = ['a'] * 7
98
+ hyp_len = len(hypothesis)
99
+ closest_ref_len = closest_ref_length(references, hyp_len)
100
+ assert brevity_penalty(closest_ref_len, hyp_len) == 1.0
101
+
102
+ def test_zero_matches(self):
103
+ # Test case where there's 0 matches
104
+ references = ['The candidate has no alignment to any of the references'.split()]
105
+ hypothesis = 'John loves Mary'.split()
106
+
107
+ # Test BLEU to nth order of n-grams, where n is len(hypothesis).
108
+ for n in range(1,len(hypothesis)):
109
+ weights = [1.0/n] * n # Uniform weights.
110
+ assert(sentence_bleu(references, hypothesis, weights) == 0)
111
+
112
+ def test_full_matches(self):
113
+ # Test case where there's 100% matches
114
+ references = ['John loves Mary'.split()]
115
+ hypothesis = 'John loves Mary'.split()
116
+
117
+ # Test BLEU to nth order of n-grams, where n is len(hypothesis).
118
+ for n in range(1,len(hypothesis)):
119
+ weights = [1.0/n] * n # Uniform weights.
120
+ assert(sentence_bleu(references, hypothesis, weights) == 1.0)
121
+
122
+ def test_partial_matches_hypothesis_longer_than_reference(self):
123
+ references = ['John loves Mary'.split()]
124
+ hypothesis = 'John loves Mary who loves Mike'.split()
125
+ # Since no 4-grams matches were found the result should be zero
126
+ # exp(w_1 * 1 * w_2 * 1 * w_3 * 1 * w_4 * -inf) = 0
127
+ self.assertAlmostEqual(sentence_bleu(references, hypothesis), 0.0, places=4)
128
+ # Checks that the warning has been raised because len(reference) < 4.
129
+ try:
130
+ self.assertWarns(UserWarning, sentence_bleu, references, hypothesis)
131
+ except AttributeError:
132
+ pass # unittest.TestCase.assertWarns is only supported in Python >= 3.2.
133
+
134
+
135
+ #@unittest.skip("Skipping fringe cases for BLEU.")
136
+ class TestBLEUFringeCases(unittest.TestCase):
137
+
138
+ def test_case_where_n_is_bigger_than_hypothesis_length(self):
139
+ # Test BLEU to nth order of n-grams, where n > len(hypothesis).
140
+ references = ['John loves Mary ?'.split()]
141
+ hypothesis = 'John loves Mary'.split()
142
+ n = len(hypothesis) + 1 #
143
+ weights = [1.0/n] * n # Uniform weights.
144
+ # Since no n-grams matches were found the result should be zero
145
+ # exp(w_1 * 1 * w_2 * 1 * w_3 * 1 * w_4 * -inf) = 0
146
+ self.assertAlmostEqual(sentence_bleu(references, hypothesis, weights), 0.0, places=4)
147
+ # Checks that the warning has been raised because len(hypothesis) < 4.
148
+ try:
149
+ self.assertWarns(UserWarning, sentence_bleu, references, hypothesis)
150
+ except AttributeError:
151
+ pass # unittest.TestCase.assertWarns is only supported in Python >= 3.2.
152
+
153
+ # Test case where n > len(hypothesis) but so is n > len(reference), and
154
+ # it's a special case where reference == hypothesis.
155
+ references = ['John loves Mary'.split()]
156
+ hypothesis = 'John loves Mary'.split()
157
+ # Since no 4-grams matches were found the result should be zero
158
+ # exp(w_1 * 1 * w_2 * 1 * w_3 * 1 * w_4 * -inf) = 0
159
+ self.assertAlmostEqual(sentence_bleu(references, hypothesis, weights), 0.0, places=4)
160
+
161
+ def test_empty_hypothesis(self):
162
+ # Test case where there's hypothesis is empty.
163
+ references = ['The candidate has no alignment to any of the references'.split()]
164
+ hypothesis = []
165
+ assert(sentence_bleu(references, hypothesis) == 0)
166
+
167
+ def test_empty_references(self):
168
+ # Test case where there's reference is empty.
169
+ references = [[]]
170
+ hypothesis = 'John loves Mary'.split()
171
+ assert(sentence_bleu(references, hypothesis) == 0)
172
+
173
+ def test_empty_references_and_hypothesis(self):
174
+ # Test case where both references and hypothesis is empty.
175
+ references = [[]]
176
+ hypothesis = []
177
+ assert(sentence_bleu(references, hypothesis) == 0)
178
+
179
+ def test_reference_or_hypothesis_shorter_than_fourgrams(self):
180
+ # Tese case where the length of reference or hypothesis
181
+ # is shorter than 4.
182
+ references = ['let it go'.split()]
183
+ hypothesis = 'let go it'.split()
184
+ # Checks that the value the hypothesis and reference returns is 0.0
185
+ # exp(w_1 * 1 * w_2 * 1 * w_3 * 1 * w_4 * -inf) = 0
186
+ self.assertAlmostEqual(sentence_bleu(references, hypothesis), 0.0, places=4)
187
+ # Checks that the warning has been raised.
188
+ try:
189
+ self.assertWarns(UserWarning, sentence_bleu, references, hypothesis)
190
+ except AttributeError:
191
+ pass # unittest.TestCase.assertWarns is only supported in Python >= 3.2.
192
+
193
+ class TestBLEUvsMteval13a(unittest.TestCase):
194
+
195
+ def test_corpus_bleu(self):
196
+ ref_file = find('models/wmt15_eval/ref.ru')
197
+ hyp_file = find('models/wmt15_eval/google.ru')
198
+ mteval_output_file = find('models/wmt15_eval/mteval-13a.output')
199
+
200
+ # Reads the BLEU scores from the `mteval-13a.output` file.
201
+ # The order of the list corresponds to the order of the ngrams.
202
+ with open(mteval_output_file, 'r') as mteval_fin:
203
+ # The numbers are located in the last 2nd line of the file.
204
+ # The first and 2nd item in the list are the score and system names.
205
+ mteval_bleu_scores = map(float, mteval_fin.readlines()[-2].split()[1:-1])
206
+
207
+ with io.open(ref_file, 'r', encoding='utf8') as ref_fin:
208
+ with io.open(hyp_file, 'r', encoding='utf8') as hyp_fin:
209
+ # Whitespace tokenize the file.
210
+ # Note: split() automatically strip().
211
+ hypothesis = list(map(lambda x: x.split(), hyp_fin))
212
+ # Note that the corpus_bleu input is list of list of references.
213
+ references = list(map(lambda x: [x.split()], ref_fin))
214
+ # Without smoothing.
215
+ for i, mteval_bleu in zip(range(1,10), mteval_bleu_scores):
216
+ nltk_bleu = corpus_bleu(references, hypothesis, weights=(1.0/i,)*i)
217
+ # Check that the BLEU scores difference is less than 0.005 .
218
+ # Note: This is an approximate comparison; as much as
219
+ # +/- 0.01 BLEU might be "statistically significant",
220
+ # the actual translation quality might not be.
221
+ assert abs(mteval_bleu - nltk_bleu) < 0.005
222
+
223
+ # With the same smoothing method used in mteval-v13a.pl
224
+ chencherry = SmoothingFunction()
225
+ for i, mteval_bleu in zip(range(1,10), mteval_bleu_scores):
226
+ nltk_bleu = corpus_bleu(references, hypothesis,
227
+ weights=(1.0/i,)*i,
228
+ smoothing_function=chencherry.method3)
229
+ assert abs(mteval_bleu - nltk_bleu) < 0.005
230
+
231
+ class TestBLEUWithBadSentence(unittest.TestCase):
232
+ def test_corpus_bleu_with_bad_sentence(self):
233
+ hyp = "Teo S yb , oe uNb , R , T t , , t Tue Ar saln S , , 5istsi l , 5oe R ulO sae oR R"
234
+ ref = str("Their tasks include changing a pump on the faulty stokehold ."
235
+ "Likewise , two species that are very similar in morphology "
236
+ "were distinguished using genetics .")
237
+ references = [[ref.split()]]
238
+ hypotheses = [hyp.split()]
239
+ try: # Check that the warning is raised since no. of 2-grams < 0.
240
+ with self.assertWarns(UserWarning):
241
+ # Verify that the BLEU output is undesired since no. of 2-grams < 0.
242
+ self.assertAlmostEqual(corpus_bleu(references, hypotheses), 0.0, places=4)
243
+ except AttributeError: # unittest.TestCase.assertWarns is only supported in Python >= 3.2.
244
+ self.assertAlmostEqual(corpus_bleu(references, hypotheses), 0.0, places=4)
A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/nltk/test/unit/translate/test_gdfa.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """
3
+ Tests GDFA alignments
4
+ """
5
+
6
+ import functools
7
+ import io
8
+ import unittest
9
+
10
+ from nltk.translate.gdfa import grow_diag_final_and
11
+
12
+ class TestGDFA(unittest.TestCase):
13
+ def test_from_eflomal_outputs(self):
14
+ """
15
+ Testing GDFA with first 10 eflomal outputs from issue #1829
16
+ https://github.com/nltk/nltk/issues/1829
17
+ """
18
+ # Input.
19
+ forwards = ['0-0 1-2',
20
+ '0-0 1-1',
21
+ '0-0 2-1 3-2 4-3 5-4 6-5 7-6 8-7 7-8 9-9 10-10 9-11 11-12 12-13 13-14',
22
+ '0-0 1-1 1-2 2-3 3-4 4-5 4-6 5-7 6-8 8-9 9-10',
23
+ '0-0 14-1 15-2 16-3 20-5 21-6 22-7 5-8 6-9 7-10 8-11 9-12 10-13 11-14 12-15 13-16 14-17 17-18 18-19 19-20 20-21 23-22 24-23 25-24 26-25 27-27 28-28 29-29 30-30 31-31',
24
+ '0-0 1-1 0-2 2-3',
25
+ '0-0 2-2 4-4',
26
+ '0-0 1-1 2-3 3-4 5-5 7-6 8-7 9-8 10-9 11-10 12-11 13-12 14-13 15-14 16-16 17-17 18-18 19-19 20-20',
27
+ '3-0 4-1 6-2 5-3 6-4 7-5 8-6 9-7 10-8 11-9 16-10 9-12 10-13 12-14',
28
+ '1-0']
29
+ backwards = ['0-0 1-2',
30
+ '0-0 1-1',
31
+ '0-0 2-1 3-2 4-3 5-4 6-5 7-6 8-7 9-8 10-10 11-12 12-11 13-13',
32
+ '0-0 1-2 2-3 3-4 4-6 6-8 7-5 8-7 9-8',
33
+ '0-0 1-8 2-9 3-10 4-11 5-12 6-11 8-13 9-14 10-15 11-16 12-17 13-18 14-19 15-20 16-21 17-22 18-23 19-24 20-29 21-30 22-31 23-2 24-3 25-4 26-5 27-5 28-6 29-7 30-28 31-31',
34
+ '0-0 1-1 2-3',
35
+ '0-0 1-1 2-3 4-4',
36
+ '0-0 1-1 2-3 3-4 5-5 7-6 8-7 9-8 10-9 11-10 12-11 13-12 14-13 15-14 16-16 17-17 18-18 19-19 20-16 21-18',
37
+ '0-0 1-1 3-2 4-1 5-3 6-4 7-5 8-6 9-7 10-8 11-9 12-8 13-9 14-8 15-9 16-10',
38
+ '1-0']
39
+ source_lens = [2, 3, 3, 15, 11, 33, 4, 6, 23, 18]
40
+ target_lens = [2, 4, 3, 16, 12, 33, 5, 6, 22, 16]
41
+ # Expected Output.
42
+ expected = [ [(0, 0), (1, 2)],
43
+ [(0, 0), (1, 1)],
44
+ [(0, 0), (2, 1), (3, 2), (4, 3), (5, 4), (6, 5), (7, 6), (8, 7), (10, 10), (11, 12)],
45
+ [(0, 0), (1, 1), (1, 2), (2, 3), (3, 4), (4, 5), (4, 6), (5, 7), (6, 8), (7, 5), (8, 7), (8, 9), (9, 8), (9, 10)],
46
+ [(0, 0), (1, 8), (2, 9), (3, 10), (4, 11), (5, 8), (6, 9), (6, 11), (7, 10), (8, 11), (31, 31)],
47
+ [(0, 0), (0, 2), (1, 1), (2, 3)],
48
+ [(0, 0), (1, 1), (2, 2), (2, 3), (4, 4)],
49
+ [(0, 0), (1, 1), (2, 3), (3, 4), (5, 5), (7, 6), (8, 7), (9, 8), (10, 9), (11, 10), (12, 11), (13, 12), (14, 13), (15, 14), (16, 16), (17, 17), (18, 18), (19, 19)],
50
+ [(0, 0), (1, 1), (3, 0), (3, 2), (4, 1), (5, 3), (6, 2), (6, 4), (7, 5), (8, 6), (9, 7), (9, 12), (10, 8), (10, 13), (11, 9), (12, 8), (12, 14), (13, 9), (14, 8), (15, 9), (16, 10)],
51
+ [(1, 0)],
52
+ [(0, 0), (1, 1), (3, 2), (4, 3), (5, 4), (6, 5), (7, 6), (9, 10), (10, 12), (11, 13), (12, 14), (13, 15)],
53
+ ]
54
+
55
+ # Iterate through all 10 examples and check for expected outputs.
56
+ for fw, bw, src_len, trg_len, expect in zip(forwards, backwards, source_lens, target_lens, expected):
57
+ self.assertListEqual(expect, grow_diag_final_and(src_len, trg_len, fw, bw))
A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/nltk/test/unit/translate/test_ibm1.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """
3
+ Tests for IBM Model 1 training methods
4
+ """
5
+
6
+ import unittest
7
+
8
+ from collections import defaultdict
9
+ from nltk.translate import AlignedSent
10
+ from nltk.translate import IBMModel
11
+ from nltk.translate import IBMModel1
12
+ from nltk.translate.ibm_model import AlignmentInfo
13
+
14
+
15
+ class TestIBMModel1(unittest.TestCase):
16
+ def test_set_uniform_translation_probabilities(self):
17
+ # arrange
18
+ corpus = [
19
+ AlignedSent(['ham', 'eggs'], ['schinken', 'schinken', 'eier']),
20
+ AlignedSent(['spam', 'spam', 'spam', 'spam'], ['spam', 'spam']),
21
+ ]
22
+ model1 = IBMModel1(corpus, 0)
23
+
24
+ # act
25
+ model1.set_uniform_probabilities(corpus)
26
+
27
+ # assert
28
+ # expected_prob = 1.0 / (target vocab size + 1)
29
+ self.assertEqual(model1.translation_table['ham']['eier'], 1.0 / 3)
30
+ self.assertEqual(model1.translation_table['eggs'][None], 1.0 / 3)
31
+
32
+ def test_set_uniform_translation_probabilities_of_non_domain_values(self):
33
+ # arrange
34
+ corpus = [
35
+ AlignedSent(['ham', 'eggs'], ['schinken', 'schinken', 'eier']),
36
+ AlignedSent(['spam', 'spam', 'spam', 'spam'], ['spam', 'spam']),
37
+ ]
38
+ model1 = IBMModel1(corpus, 0)
39
+
40
+ # act
41
+ model1.set_uniform_probabilities(corpus)
42
+
43
+ # assert
44
+ # examine target words that are not in the training data domain
45
+ self.assertEqual(model1.translation_table['parrot']['eier'],
46
+ IBMModel.MIN_PROB)
47
+
48
+ def test_prob_t_a_given_s(self):
49
+ # arrange
50
+ src_sentence = ["ich", 'esse', 'ja', 'gern', 'räucherschinken']
51
+ trg_sentence = ['i', 'love', 'to', 'eat', 'smoked', 'ham']
52
+ corpus = [AlignedSent(trg_sentence, src_sentence)]
53
+ alignment_info = AlignmentInfo((0, 1, 4, 0, 2, 5, 5),
54
+ [None] + src_sentence,
55
+ ['UNUSED'] + trg_sentence,
56
+ None)
57
+
58
+ translation_table = defaultdict(lambda: defaultdict(float))
59
+ translation_table['i']['ich'] = 0.98
60
+ translation_table['love']['gern'] = 0.98
61
+ translation_table['to'][None] = 0.98
62
+ translation_table['eat']['esse'] = 0.98
63
+ translation_table['smoked']['räucherschinken'] = 0.98
64
+ translation_table['ham']['räucherschinken'] = 0.98
65
+
66
+ model1 = IBMModel1(corpus, 0)
67
+ model1.translation_table = translation_table
68
+
69
+ # act
70
+ probability = model1.prob_t_a_given_s(alignment_info)
71
+
72
+ # assert
73
+ lexical_translation = 0.98 * 0.98 * 0.98 * 0.98 * 0.98 * 0.98
74
+ expected_probability = lexical_translation
75
+ self.assertEqual(round(probability, 4), round(expected_probability, 4))
A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/nltk/test/unit/translate/test_ibm2.py ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """
3
+ Tests for IBM Model 2 training methods
4
+ """
5
+
6
+ import unittest
7
+
8
+ from collections import defaultdict
9
+ from nltk.translate import AlignedSent
10
+ from nltk.translate import IBMModel
11
+ from nltk.translate import IBMModel2
12
+ from nltk.translate.ibm_model import AlignmentInfo
13
+
14
+
15
+ class TestIBMModel2(unittest.TestCase):
16
+ def test_set_uniform_alignment_probabilities(self):
17
+ # arrange
18
+ corpus = [
19
+ AlignedSent(['ham', 'eggs'], ['schinken', 'schinken', 'eier']),
20
+ AlignedSent(['spam', 'spam', 'spam', 'spam'], ['spam', 'spam']),
21
+ ]
22
+ model2 = IBMModel2(corpus, 0)
23
+
24
+ # act
25
+ model2.set_uniform_probabilities(corpus)
26
+
27
+ # assert
28
+ # expected_prob = 1.0 / (length of source sentence + 1)
29
+ self.assertEqual(model2.alignment_table[0][1][3][2], 1.0 / 4)
30
+ self.assertEqual(model2.alignment_table[2][4][2][4], 1.0 / 3)
31
+
32
+ def test_set_uniform_alignment_probabilities_of_non_domain_values(self):
33
+ # arrange
34
+ corpus = [
35
+ AlignedSent(['ham', 'eggs'], ['schinken', 'schinken', 'eier']),
36
+ AlignedSent(['spam', 'spam', 'spam', 'spam'], ['spam', 'spam']),
37
+ ]
38
+ model2 = IBMModel2(corpus, 0)
39
+
40
+ # act
41
+ model2.set_uniform_probabilities(corpus)
42
+
43
+ # assert
44
+ # examine i and j values that are not in the training data domain
45
+ self.assertEqual(model2.alignment_table[99][1][3][2], IBMModel.MIN_PROB)
46
+ self.assertEqual(model2.alignment_table[2][99][2][4], IBMModel.MIN_PROB)
47
+
48
+ def test_prob_t_a_given_s(self):
49
+ # arrange
50
+ src_sentence = ["ich", 'esse', 'ja', 'gern', 'räucherschinken']
51
+ trg_sentence = ['i', 'love', 'to', 'eat', 'smoked', 'ham']
52
+ corpus = [AlignedSent(trg_sentence, src_sentence)]
53
+ alignment_info = AlignmentInfo((0, 1, 4, 0, 2, 5, 5),
54
+ [None] + src_sentence,
55
+ ['UNUSED'] + trg_sentence,
56
+ None)
57
+
58
+ translation_table = defaultdict(lambda: defaultdict(float))
59
+ translation_table['i']['ich'] = 0.98
60
+ translation_table['love']['gern'] = 0.98
61
+ translation_table['to'][None] = 0.98
62
+ translation_table['eat']['esse'] = 0.98
63
+ translation_table['smoked']['räucherschinken'] = 0.98
64
+ translation_table['ham']['räucherschinken'] = 0.98
65
+
66
+ alignment_table = defaultdict(
67
+ lambda: defaultdict(lambda: defaultdict(
68
+ lambda: defaultdict(float))))
69
+ alignment_table[0][3][5][6] = 0.97 # None -> to
70
+ alignment_table[1][1][5][6] = 0.97 # ich -> i
71
+ alignment_table[2][4][5][6] = 0.97 # esse -> eat
72
+ alignment_table[4][2][5][6] = 0.97 # gern -> love
73
+ alignment_table[5][5][5][6] = 0.96 # räucherschinken -> smoked
74
+ alignment_table[5][6][5][6] = 0.96 # räucherschinken -> ham
75
+
76
+ model2 = IBMModel2(corpus, 0)
77
+ model2.translation_table = translation_table
78
+ model2.alignment_table = alignment_table
79
+
80
+ # act
81
+ probability = model2.prob_t_a_given_s(alignment_info)
82
+
83
+ # assert
84
+ lexical_translation = 0.98 * 0.98 * 0.98 * 0.98 * 0.98 * 0.98
85
+ alignment = 0.97 * 0.97 * 0.97 * 0.97 * 0.96 * 0.96
86
+ expected_probability = lexical_translation * alignment
87
+ self.assertEqual(round(probability, 4), round(expected_probability, 4))
A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/nltk/test/unit/translate/test_ibm3.py ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """
3
+ Tests for IBM Model 3 training methods
4
+ """
5
+
6
+ import unittest
7
+
8
+ from collections import defaultdict
9
+ from nltk.translate import AlignedSent
10
+ from nltk.translate import IBMModel
11
+ from nltk.translate import IBMModel3
12
+ from nltk.translate.ibm_model import AlignmentInfo
13
+
14
+
15
+ class TestIBMModel3(unittest.TestCase):
16
+ def test_set_uniform_distortion_probabilities(self):
17
+ # arrange
18
+ corpus = [
19
+ AlignedSent(['ham', 'eggs'], ['schinken', 'schinken', 'eier']),
20
+ AlignedSent(['spam', 'spam', 'spam', 'spam'], ['spam', 'spam']),
21
+ ]
22
+ model3 = IBMModel3(corpus, 0)
23
+
24
+ # act
25
+ model3.set_uniform_probabilities(corpus)
26
+
27
+ # assert
28
+ # expected_prob = 1.0 / length of target sentence
29
+ self.assertEqual(model3.distortion_table[1][0][3][2], 1.0 / 2)
30
+ self.assertEqual(model3.distortion_table[4][2][2][4], 1.0 / 4)
31
+
32
+ def test_set_uniform_distortion_probabilities_of_non_domain_values(self):
33
+ # arrange
34
+ corpus = [
35
+ AlignedSent(['ham', 'eggs'], ['schinken', 'schinken', 'eier']),
36
+ AlignedSent(['spam', 'spam', 'spam', 'spam'], ['spam', 'spam']),
37
+ ]
38
+ model3 = IBMModel3(corpus, 0)
39
+
40
+ # act
41
+ model3.set_uniform_probabilities(corpus)
42
+
43
+ # assert
44
+ # examine i and j values that are not in the training data domain
45
+ self.assertEqual(model3.distortion_table[0][0][3][2], IBMModel.MIN_PROB)
46
+ self.assertEqual(model3.distortion_table[9][2][2][4], IBMModel.MIN_PROB)
47
+ self.assertEqual(model3.distortion_table[2][9][2][4], IBMModel.MIN_PROB)
48
+
49
+ def test_prob_t_a_given_s(self):
50
+ # arrange
51
+ src_sentence = ["ich", 'esse', 'ja', 'gern', 'räucherschinken']
52
+ trg_sentence = ['i', 'love', 'to', 'eat', 'smoked', 'ham']
53
+ corpus = [AlignedSent(trg_sentence, src_sentence)]
54
+ alignment_info = AlignmentInfo((0, 1, 4, 0, 2, 5, 5),
55
+ [None] + src_sentence,
56
+ ['UNUSED'] + trg_sentence,
57
+ [[3], [1], [4], [], [2], [5, 6]])
58
+
59
+ distortion_table = defaultdict(
60
+ lambda: defaultdict(lambda: defaultdict(
61
+ lambda: defaultdict(float))))
62
+ distortion_table[1][1][5][6] = 0.97 # i -> ich
63
+ distortion_table[2][4][5][6] = 0.97 # love -> gern
64
+ distortion_table[3][0][5][6] = 0.97 # to -> NULL
65
+ distortion_table[4][2][5][6] = 0.97 # eat -> esse
66
+ distortion_table[5][5][5][6] = 0.97 # smoked -> räucherschinken
67
+ distortion_table[6][5][5][6] = 0.97 # ham -> räucherschinken
68
+
69
+ translation_table = defaultdict(lambda: defaultdict(float))
70
+ translation_table['i']['ich'] = 0.98
71
+ translation_table['love']['gern'] = 0.98
72
+ translation_table['to'][None] = 0.98
73
+ translation_table['eat']['esse'] = 0.98
74
+ translation_table['smoked']['räucherschinken'] = 0.98
75
+ translation_table['ham']['räucherschinken'] = 0.98
76
+
77
+ fertility_table = defaultdict(lambda: defaultdict(float))
78
+ fertility_table[1]['ich'] = 0.99
79
+ fertility_table[1]['esse'] = 0.99
80
+ fertility_table[0]['ja'] = 0.99
81
+ fertility_table[1]['gern'] = 0.99
82
+ fertility_table[2]['räucherschinken'] = 0.999
83
+ fertility_table[1][None] = 0.99
84
+
85
+ probabilities = {
86
+ 'p1': 0.167,
87
+ 'translation_table': translation_table,
88
+ 'distortion_table': distortion_table,
89
+ 'fertility_table': fertility_table,
90
+ 'alignment_table': None
91
+ }
92
+
93
+ model3 = IBMModel3(corpus, 0, probabilities)
94
+
95
+ # act
96
+ probability = model3.prob_t_a_given_s(alignment_info)
97
+
98
+ # assert
99
+ null_generation = 5 * pow(0.167, 1) * pow(0.833, 4)
100
+ fertility = 1*0.99 * 1*0.99 * 1*0.99 * 1*0.99 * 2*0.999
101
+ lexical_translation = 0.98 * 0.98 * 0.98 * 0.98 * 0.98 * 0.98
102
+ distortion = 0.97 * 0.97 * 0.97 * 0.97 * 0.97 * 0.97
103
+ expected_probability = (null_generation * fertility *
104
+ lexical_translation * distortion)
105
+ self.assertEqual(round(probability, 4), round(expected_probability, 4))
A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/nltk/test/unit/translate/test_ibm4.py ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """
3
+ Tests for IBM Model 4 training methods
4
+ """
5
+
6
+ import unittest
7
+
8
+ from collections import defaultdict
9
+ from nltk.translate import AlignedSent
10
+ from nltk.translate import IBMModel
11
+ from nltk.translate import IBMModel4
12
+ from nltk.translate.ibm_model import AlignmentInfo
13
+
14
+
15
+ class TestIBMModel4(unittest.TestCase):
16
+ def test_set_uniform_distortion_probabilities_of_max_displacements(self):
17
+ # arrange
18
+ src_classes = {'schinken': 0, 'eier': 0, 'spam': 1}
19
+ trg_classes = {'ham': 0, 'eggs': 1, 'spam': 2}
20
+ corpus = [
21
+ AlignedSent(['ham', 'eggs'], ['schinken', 'schinken', 'eier']),
22
+ AlignedSent(['spam', 'spam', 'spam', 'spam'], ['spam', 'spam']),
23
+ ]
24
+ model4 = IBMModel4(corpus, 0, src_classes, trg_classes)
25
+
26
+ # act
27
+ model4.set_uniform_probabilities(corpus)
28
+
29
+ # assert
30
+ # number of displacement values =
31
+ # 2 *(number of words in longest target sentence - 1)
32
+ expected_prob = 1.0 / (2 * (4 - 1))
33
+
34
+ # examine the boundary values for (displacement, src_class, trg_class)
35
+ self.assertEqual(model4.head_distortion_table[3][0][0], expected_prob)
36
+ self.assertEqual(model4.head_distortion_table[-3][1][2], expected_prob)
37
+ self.assertEqual(model4.non_head_distortion_table[3][0], expected_prob)
38
+ self.assertEqual(model4.non_head_distortion_table[-3][2], expected_prob)
39
+
40
+ def test_set_uniform_distortion_probabilities_of_non_domain_values(self):
41
+ # arrange
42
+ src_classes = {'schinken': 0, 'eier': 0, 'spam': 1}
43
+ trg_classes = {'ham': 0, 'eggs': 1, 'spam': 2}
44
+ corpus = [
45
+ AlignedSent(['ham', 'eggs'], ['schinken', 'schinken', 'eier']),
46
+ AlignedSent(['spam', 'spam', 'spam', 'spam'], ['spam', 'spam']),
47
+ ]
48
+ model4 = IBMModel4(corpus, 0, src_classes, trg_classes)
49
+
50
+ # act
51
+ model4.set_uniform_probabilities(corpus)
52
+
53
+ # assert
54
+ # examine displacement values that are not in the training data domain
55
+ self.assertEqual(model4.head_distortion_table[4][0][0],
56
+ IBMModel.MIN_PROB)
57
+ self.assertEqual(model4.head_distortion_table[100][1][2],
58
+ IBMModel.MIN_PROB)
59
+ self.assertEqual(model4.non_head_distortion_table[4][0],
60
+ IBMModel.MIN_PROB)
61
+ self.assertEqual(model4.non_head_distortion_table[100][2],
62
+ IBMModel.MIN_PROB)
63
+
64
+ def test_prob_t_a_given_s(self):
65
+ # arrange
66
+ src_sentence = ["ich", 'esse', 'ja', 'gern', 'räucherschinken']
67
+ trg_sentence = ['i', 'love', 'to', 'eat', 'smoked', 'ham']
68
+ src_classes = {'räucherschinken': 0, 'ja': 1, 'ich': 2, 'esse': 3,
69
+ 'gern': 4}
70
+ trg_classes = {'ham': 0, 'smoked': 1, 'i': 3, 'love': 4, 'to': 2,
71
+ 'eat': 4}
72
+ corpus = [AlignedSent(trg_sentence, src_sentence)]
73
+ alignment_info = AlignmentInfo((0, 1, 4, 0, 2, 5, 5),
74
+ [None] + src_sentence,
75
+ ['UNUSED'] + trg_sentence,
76
+ [[3], [1], [4], [], [2], [5, 6]])
77
+
78
+ head_distortion_table = defaultdict(
79
+ lambda: defaultdict(lambda: defaultdict(float)))
80
+ head_distortion_table[1][None][3] = 0.97 # None, i
81
+ head_distortion_table[3][2][4] = 0.97 # ich, eat
82
+ head_distortion_table[-2][3][4] = 0.97 # esse, love
83
+ head_distortion_table[3][4][1] = 0.97 # gern, smoked
84
+
85
+ non_head_distortion_table = defaultdict(lambda: defaultdict(float))
86
+ non_head_distortion_table[1][0] = 0.96 # ham
87
+
88
+ translation_table = defaultdict(lambda: defaultdict(float))
89
+ translation_table['i']['ich'] = 0.98
90
+ translation_table['love']['gern'] = 0.98
91
+ translation_table['to'][None] = 0.98
92
+ translation_table['eat']['esse'] = 0.98
93
+ translation_table['smoked']['räucherschinken'] = 0.98
94
+ translation_table['ham']['räucherschinken'] = 0.98
95
+
96
+ fertility_table = defaultdict(lambda: defaultdict(float))
97
+ fertility_table[1]['ich'] = 0.99
98
+ fertility_table[1]['esse'] = 0.99
99
+ fertility_table[0]['ja'] = 0.99
100
+ fertility_table[1]['gern'] = 0.99
101
+ fertility_table[2]['räucherschinken'] = 0.999
102
+ fertility_table[1][None] = 0.99
103
+
104
+ probabilities = {
105
+ 'p1': 0.167,
106
+ 'translation_table': translation_table,
107
+ 'head_distortion_table': head_distortion_table,
108
+ 'non_head_distortion_table': non_head_distortion_table,
109
+ 'fertility_table': fertility_table,
110
+ 'alignment_table': None
111
+ }
112
+
113
+ model4 = IBMModel4(corpus, 0, src_classes, trg_classes,
114
+ probabilities)
115
+
116
+ # act
117
+ probability = model4.prob_t_a_given_s(alignment_info)
118
+
119
+ # assert
120
+ null_generation = 5 * pow(0.167, 1) * pow(0.833, 4)
121
+ fertility = 1*0.99 * 1*0.99 * 1*0.99 * 1*0.99 * 2*0.999
122
+ lexical_translation = 0.98 * 0.98 * 0.98 * 0.98 * 0.98 * 0.98
123
+ distortion = 0.97 * 0.97 * 1 * 0.97 * 0.97 * 0.96
124
+ expected_probability = (null_generation * fertility *
125
+ lexical_translation * distortion)
126
+ self.assertEqual(round(probability, 4), round(expected_probability, 4))
A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/nltk/test/unit/translate/test_ibm5.py ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """
3
+ Tests for IBM Model 5 training methods
4
+ """
5
+
6
+ import unittest
7
+
8
+ from collections import defaultdict
9
+ from nltk.translate import AlignedSent
10
+ from nltk.translate import IBMModel
11
+ from nltk.translate import IBMModel4
12
+ from nltk.translate import IBMModel5
13
+ from nltk.translate.ibm_model import AlignmentInfo
14
+
15
+
16
+ class TestIBMModel5(unittest.TestCase):
17
+ def test_set_uniform_vacancy_probabilities_of_max_displacements(self):
18
+ # arrange
19
+ src_classes = {'schinken': 0, 'eier': 0, 'spam': 1}
20
+ trg_classes = {'ham': 0, 'eggs': 1, 'spam': 2}
21
+ corpus = [
22
+ AlignedSent(['ham', 'eggs'], ['schinken', 'schinken', 'eier']),
23
+ AlignedSent(['spam', 'spam', 'spam', 'spam'], ['spam', 'spam']),
24
+ ]
25
+ model5 = IBMModel5(corpus, 0, src_classes, trg_classes)
26
+
27
+ # act
28
+ model5.set_uniform_probabilities(corpus)
29
+
30
+ # assert
31
+ # number of vacancy difference values =
32
+ # 2 * number of words in longest target sentence
33
+ expected_prob = 1.0 / (2 * 4)
34
+
35
+ # examine the boundary values for (dv, max_v, trg_class)
36
+ self.assertEqual(model5.head_vacancy_table[4][4][0], expected_prob)
37
+ self.assertEqual(model5.head_vacancy_table[-3][1][2], expected_prob)
38
+ self.assertEqual(model5.non_head_vacancy_table[4][4][0], expected_prob)
39
+ self.assertEqual(model5.non_head_vacancy_table[-3][1][2], expected_prob)
40
+
41
+ def test_set_uniform_vacancy_probabilities_of_non_domain_values(self):
42
+ # arrange
43
+ src_classes = {'schinken': 0, 'eier': 0, 'spam': 1}
44
+ trg_classes = {'ham': 0, 'eggs': 1, 'spam': 2}
45
+ corpus = [
46
+ AlignedSent(['ham', 'eggs'], ['schinken', 'schinken', 'eier']),
47
+ AlignedSent(['spam', 'spam', 'spam', 'spam'], ['spam', 'spam']),
48
+ ]
49
+ model5 = IBMModel5(corpus, 0, src_classes, trg_classes)
50
+
51
+ # act
52
+ model5.set_uniform_probabilities(corpus)
53
+
54
+ # assert
55
+ # examine dv and max_v values that are not in the training data domain
56
+ self.assertEqual(model5.head_vacancy_table[5][4][0],
57
+ IBMModel.MIN_PROB)
58
+ self.assertEqual(model5.head_vacancy_table[-4][1][2],
59
+ IBMModel.MIN_PROB)
60
+ self.assertEqual(model5.head_vacancy_table[4][0][0],
61
+ IBMModel.MIN_PROB)
62
+ self.assertEqual(model5.non_head_vacancy_table[5][4][0],
63
+ IBMModel.MIN_PROB)
64
+ self.assertEqual(model5.non_head_vacancy_table[-4][1][2],
65
+ IBMModel.MIN_PROB)
66
+
67
+ def test_prob_t_a_given_s(self):
68
+ # arrange
69
+ src_sentence = ["ich", 'esse', 'ja', 'gern', 'räucherschinken']
70
+ trg_sentence = ['i', 'love', 'to', 'eat', 'smoked', 'ham']
71
+ src_classes = {'räucherschinken': 0, 'ja': 1, 'ich': 2, 'esse': 3,
72
+ 'gern': 4}
73
+ trg_classes = {'ham': 0, 'smoked': 1, 'i': 3, 'love': 4, 'to': 2,
74
+ 'eat': 4}
75
+ corpus = [AlignedSent(trg_sentence, src_sentence)]
76
+ alignment_info = AlignmentInfo((0, 1, 4, 0, 2, 5, 5),
77
+ [None] + src_sentence,
78
+ ['UNUSED'] + trg_sentence,
79
+ [[3], [1], [4], [], [2], [5, 6]])
80
+
81
+ head_vacancy_table = defaultdict(
82
+ lambda: defaultdict(lambda: defaultdict(float)))
83
+ head_vacancy_table[1 - 0][6][3] = 0.97 # ich -> i
84
+ head_vacancy_table[3 - 0][5][4] = 0.97 # esse -> eat
85
+ head_vacancy_table[1 - 2][4][4] = 0.97 # gern -> love
86
+ head_vacancy_table[2 - 0][2][1] = 0.97 # räucherschinken -> smoked
87
+
88
+ non_head_vacancy_table = defaultdict(
89
+ lambda: defaultdict(lambda: defaultdict(float)))
90
+ non_head_vacancy_table[1 - 0][1][0] = 0.96 # räucherschinken -> ham
91
+
92
+ translation_table = defaultdict(lambda: defaultdict(float))
93
+ translation_table['i']['ich'] = 0.98
94
+ translation_table['love']['gern'] = 0.98
95
+ translation_table['to'][None] = 0.98
96
+ translation_table['eat']['esse'] = 0.98
97
+ translation_table['smoked']['räucherschinken'] = 0.98
98
+ translation_table['ham']['räucherschinken'] = 0.98
99
+
100
+ fertility_table = defaultdict(lambda: defaultdict(float))
101
+ fertility_table[1]['ich'] = 0.99
102
+ fertility_table[1]['esse'] = 0.99
103
+ fertility_table[0]['ja'] = 0.99
104
+ fertility_table[1]['gern'] = 0.99
105
+ fertility_table[2]['räucherschinken'] = 0.999
106
+ fertility_table[1][None] = 0.99
107
+
108
+ probabilities = {
109
+ 'p1': 0.167,
110
+ 'translation_table': translation_table,
111
+ 'fertility_table': fertility_table,
112
+ 'head_vacancy_table': head_vacancy_table,
113
+ 'non_head_vacancy_table': non_head_vacancy_table,
114
+ 'head_distortion_table': None,
115
+ 'non_head_distortion_table': None,
116
+ 'alignment_table': None
117
+ }
118
+
119
+ model5 = IBMModel5(corpus, 0, src_classes, trg_classes,
120
+ probabilities)
121
+
122
+ # act
123
+ probability = model5.prob_t_a_given_s(alignment_info)
124
+
125
+ # assert
126
+ null_generation = 5 * pow(0.167, 1) * pow(0.833, 4)
127
+ fertility = 1*0.99 * 1*0.99 * 1*0.99 * 1*0.99 * 2*0.999
128
+ lexical_translation = 0.98 * 0.98 * 0.98 * 0.98 * 0.98 * 0.98
129
+ vacancy = 0.97 * 0.97 * 1 * 0.97 * 0.97 * 0.96
130
+ expected_probability = (null_generation * fertility *
131
+ lexical_translation * vacancy)
132
+ self.assertEqual(round(probability, 4), round(expected_probability, 4))
133
+
134
+ def test_prune(self):
135
+ # arrange
136
+ alignment_infos = [
137
+ AlignmentInfo((1, 1), None, None, None),
138
+ AlignmentInfo((1, 2), None, None, None),
139
+ AlignmentInfo((2, 1), None, None, None),
140
+ AlignmentInfo((2, 2), None, None, None),
141
+ AlignmentInfo((0, 0), None, None, None)
142
+ ]
143
+ min_factor = IBMModel5.MIN_SCORE_FACTOR
144
+ best_score = 0.9
145
+ scores = {
146
+ (1, 1): min(min_factor * 1.5, 1) * best_score, # above threshold
147
+ (1, 2): best_score,
148
+ (2, 1): min_factor * best_score, # at threshold
149
+ (2, 2): min_factor * best_score * 0.5, # low score
150
+ (0, 0): min(min_factor * 1.1, 1) * 1.2 # above threshold
151
+ }
152
+ corpus = [AlignedSent(['a'], ['b'])]
153
+ original_prob_function = IBMModel4.model4_prob_t_a_given_s
154
+ # mock static method
155
+ IBMModel4.model4_prob_t_a_given_s = staticmethod(
156
+ lambda a, model: scores[a.alignment])
157
+ model5 = IBMModel5(corpus, 0, None, None)
158
+
159
+ # act
160
+ pruned_alignments = model5.prune(alignment_infos)
161
+
162
+ # assert
163
+ self.assertEqual(len(pruned_alignments), 3)
164
+
165
+ # restore static method
166
+ IBMModel4.model4_prob_t_a_given_s = original_prob_function
A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/nltk/test/unit/translate/test_ibm_model.py ADDED
@@ -0,0 +1,270 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """
3
+ Tests for common methods of IBM translation models
4
+ """
5
+
6
+ import unittest
7
+
8
+ from collections import defaultdict
9
+ from nltk.translate import AlignedSent
10
+ from nltk.translate import IBMModel
11
+ from nltk.translate.ibm_model import AlignmentInfo
12
+
13
+
14
+ class TestIBMModel(unittest.TestCase):
15
+ __TEST_SRC_SENTENCE = ["j'", 'aime', 'bien', 'jambon']
16
+ __TEST_TRG_SENTENCE = ['i', 'love', 'ham']
17
+
18
+ def test_vocabularies_are_initialized(self):
19
+ parallel_corpora = [
20
+ AlignedSent(['one', 'two', 'three', 'four'],
21
+ ['un', 'deux', 'trois']),
22
+ AlignedSent(['five', 'one', 'six'], ['quatre', 'cinq', 'six']),
23
+ AlignedSent([], ['sept'])
24
+ ]
25
+
26
+ ibm_model = IBMModel(parallel_corpora)
27
+ self.assertEqual(len(ibm_model.src_vocab), 8)
28
+ self.assertEqual(len(ibm_model.trg_vocab), 6)
29
+
30
+ def test_vocabularies_are_initialized_even_with_empty_corpora(self):
31
+ parallel_corpora = []
32
+
33
+ ibm_model = IBMModel(parallel_corpora)
34
+ self.assertEqual(len(ibm_model.src_vocab), 1) # addition of NULL token
35
+ self.assertEqual(len(ibm_model.trg_vocab), 0)
36
+
37
+ def test_best_model2_alignment(self):
38
+ # arrange
39
+ sentence_pair = AlignedSent(
40
+ TestIBMModel.__TEST_TRG_SENTENCE,
41
+ TestIBMModel.__TEST_SRC_SENTENCE)
42
+ # None and 'bien' have zero fertility
43
+ translation_table = {
44
+ 'i': {"j'": 0.9, 'aime': 0.05, 'bien': 0.02, 'jambon': 0.03,
45
+ None: 0},
46
+ 'love': {"j'": 0.05, 'aime': 0.9, 'bien': 0.01, 'jambon': 0.01,
47
+ None: 0.03},
48
+ 'ham': {"j'": 0, 'aime': 0.01, 'bien': 0, 'jambon': 0.99,
49
+ None: 0}
50
+ }
51
+ alignment_table = defaultdict(
52
+ lambda: defaultdict(lambda: defaultdict(lambda: defaultdict(
53
+ lambda: 0.2))))
54
+
55
+ ibm_model = IBMModel([])
56
+ ibm_model.translation_table = translation_table
57
+ ibm_model.alignment_table = alignment_table
58
+
59
+ # act
60
+ a_info = ibm_model.best_model2_alignment(sentence_pair)
61
+
62
+ # assert
63
+ self.assertEqual(a_info.alignment[1:], (1, 2, 4)) # 0th element unused
64
+ self.assertEqual(a_info.cepts, [[], [1], [2], [], [3]])
65
+
66
+ def test_best_model2_alignment_does_not_change_pegged_alignment(self):
67
+ # arrange
68
+ sentence_pair = AlignedSent(
69
+ TestIBMModel.__TEST_TRG_SENTENCE,
70
+ TestIBMModel.__TEST_SRC_SENTENCE)
71
+ translation_table = {
72
+ 'i': {"j'": 0.9, 'aime': 0.05, 'bien': 0.02, 'jambon': 0.03,
73
+ None: 0},
74
+ 'love': {"j'": 0.05, 'aime': 0.9, 'bien': 0.01, 'jambon': 0.01,
75
+ None: 0.03},
76
+ 'ham': {"j'": 0, 'aime': 0.01, 'bien': 0, 'jambon': 0.99, None: 0}
77
+ }
78
+ alignment_table = defaultdict(
79
+ lambda: defaultdict(lambda: defaultdict(lambda: defaultdict(
80
+ lambda: 0.2))))
81
+
82
+ ibm_model = IBMModel([])
83
+ ibm_model.translation_table = translation_table
84
+ ibm_model.alignment_table = alignment_table
85
+
86
+ # act: force 'love' to be pegged to 'jambon'
87
+ a_info = ibm_model.best_model2_alignment(sentence_pair, 2, 4)
88
+ # assert
89
+ self.assertEqual(a_info.alignment[1:], (1, 4, 4))
90
+ self.assertEqual(a_info.cepts, [[], [1], [], [], [2, 3]])
91
+
92
+ def test_best_model2_alignment_handles_fertile_words(self):
93
+ # arrange
94
+ sentence_pair = AlignedSent(
95
+ ['i', 'really', ',', 'really', 'love', 'ham'],
96
+ TestIBMModel.__TEST_SRC_SENTENCE)
97
+ # 'bien' produces 2 target words: 'really' and another 'really'
98
+ translation_table = {
99
+ 'i': {"j'": 0.9, 'aime': 0.05, 'bien': 0.02, 'jambon': 0.03, None: 0},
100
+ 'really': {"j'": 0, 'aime': 0, 'bien': 0.9, 'jambon': 0.01, None: 0.09},
101
+ ',': {"j'": 0, 'aime': 0, 'bien': 0.3, 'jambon': 0, None: 0.7},
102
+ 'love': {"j'": 0.05, 'aime': 0.9, 'bien': 0.01, 'jambon': 0.01, None: 0.03},
103
+ 'ham': {"j'": 0, 'aime': 0.01, 'bien': 0, 'jambon': 0.99, None: 0}
104
+ }
105
+ alignment_table = defaultdict(
106
+ lambda: defaultdict(lambda: defaultdict(lambda: defaultdict(
107
+ lambda: 0.2))))
108
+
109
+ ibm_model = IBMModel([])
110
+ ibm_model.translation_table = translation_table
111
+ ibm_model.alignment_table = alignment_table
112
+
113
+ # act
114
+ a_info = ibm_model.best_model2_alignment(sentence_pair)
115
+
116
+ # assert
117
+ self.assertEqual(a_info.alignment[1:], (1, 3, 0, 3, 2, 4))
118
+ self.assertEqual(a_info.cepts, [[3], [1], [5], [2, 4], [6]])
119
+
120
+ def test_best_model2_alignment_handles_empty_src_sentence(self):
121
+ # arrange
122
+ sentence_pair = AlignedSent(TestIBMModel.__TEST_TRG_SENTENCE, [])
123
+ ibm_model = IBMModel([])
124
+
125
+ # act
126
+ a_info = ibm_model.best_model2_alignment(sentence_pair)
127
+
128
+ # assert
129
+ self.assertEqual(a_info.alignment[1:], (0, 0, 0))
130
+ self.assertEqual(a_info.cepts, [[1, 2, 3]])
131
+
132
+ def test_best_model2_alignment_handles_empty_trg_sentence(self):
133
+ # arrange
134
+ sentence_pair = AlignedSent([], TestIBMModel.__TEST_SRC_SENTENCE)
135
+ ibm_model = IBMModel([])
136
+
137
+ # act
138
+ a_info = ibm_model.best_model2_alignment(sentence_pair)
139
+
140
+ # assert
141
+ self.assertEqual(a_info.alignment[1:], ())
142
+ self.assertEqual(a_info.cepts, [[], [], [], [], []])
143
+
144
+ def test_neighboring_finds_neighbor_alignments(self):
145
+ # arrange
146
+ a_info = AlignmentInfo(
147
+ (0, 3, 2),
148
+ (None, 'des', 'œufs', 'verts'),
149
+ ('UNUSED', 'green', 'eggs'),
150
+ [[], [], [2], [1]]
151
+ )
152
+ ibm_model = IBMModel([])
153
+
154
+ # act
155
+ neighbors = ibm_model.neighboring(a_info)
156
+
157
+ # assert
158
+ neighbor_alignments = set()
159
+ for neighbor in neighbors:
160
+ neighbor_alignments.add(neighbor.alignment)
161
+ expected_alignments = set([
162
+ # moves
163
+ (0, 0, 2), (0, 1, 2), (0, 2, 2),
164
+ (0, 3, 0), (0, 3, 1), (0, 3, 3),
165
+ # swaps
166
+ (0, 2, 3),
167
+ # original alignment
168
+ (0, 3, 2)
169
+ ])
170
+ self.assertEqual(neighbor_alignments, expected_alignments)
171
+
172
+ def test_neighboring_sets_neighbor_alignment_info(self):
173
+ # arrange
174
+ a_info = AlignmentInfo(
175
+ (0, 3, 2),
176
+ (None, 'des', 'œufs', 'verts'),
177
+ ('UNUSED', 'green', 'eggs'),
178
+ [[], [], [2], [1]]
179
+ )
180
+ ibm_model = IBMModel([])
181
+
182
+ # act
183
+ neighbors = ibm_model.neighboring(a_info)
184
+
185
+ # assert: select a few particular alignments
186
+ for neighbor in neighbors:
187
+ if neighbor.alignment == (0, 2, 2):
188
+ moved_alignment = neighbor
189
+ elif neighbor.alignment == (0, 3, 2):
190
+ swapped_alignment = neighbor
191
+
192
+ self.assertEqual(moved_alignment.cepts, [[], [], [1, 2], []])
193
+ self.assertEqual(swapped_alignment.cepts, [[], [], [2], [1]])
194
+
195
+ def test_neighboring_returns_neighbors_with_pegged_alignment(self):
196
+ # arrange
197
+ a_info = AlignmentInfo(
198
+ (0, 3, 2),
199
+ (None, 'des', 'œufs', 'verts'),
200
+ ('UNUSED', 'green', 'eggs'),
201
+ [[], [], [2], [1]]
202
+ )
203
+ ibm_model = IBMModel([])
204
+
205
+ # act: peg 'eggs' to align with 'œufs'
206
+ neighbors = ibm_model.neighboring(a_info, 2)
207
+
208
+ # assert
209
+ neighbor_alignments = set()
210
+ for neighbor in neighbors:
211
+ neighbor_alignments.add(neighbor.alignment)
212
+ expected_alignments = set([
213
+ # moves
214
+ (0, 0, 2), (0, 1, 2), (0, 2, 2),
215
+ # no swaps
216
+ # original alignment
217
+ (0, 3, 2)
218
+ ])
219
+ self.assertEqual(neighbor_alignments, expected_alignments)
220
+
221
+ def test_hillclimb(self):
222
+ # arrange
223
+ initial_alignment = AlignmentInfo((0, 3, 2), None, None, None)
224
+
225
+ def neighboring_mock(a, j):
226
+ if a.alignment == (0, 3, 2):
227
+ return set([
228
+ AlignmentInfo((0, 2, 2), None, None, None),
229
+ AlignmentInfo((0, 1, 1), None, None, None)
230
+ ])
231
+ elif a.alignment == (0, 2, 2):
232
+ return set([
233
+ AlignmentInfo((0, 3, 3), None, None, None),
234
+ AlignmentInfo((0, 4, 4), None, None, None)
235
+ ])
236
+ return set()
237
+
238
+ def prob_t_a_given_s_mock(a):
239
+ prob_values = {
240
+ (0, 3, 2): 0.5,
241
+ (0, 2, 2): 0.6,
242
+ (0, 1, 1): 0.4,
243
+ (0, 3, 3): 0.6,
244
+ (0, 4, 4): 0.7
245
+ }
246
+ return prob_values.get(a.alignment, 0.01)
247
+
248
+ ibm_model = IBMModel([])
249
+ ibm_model.neighboring = neighboring_mock
250
+ ibm_model.prob_t_a_given_s = prob_t_a_given_s_mock
251
+
252
+ # act
253
+ best_alignment = ibm_model.hillclimb(initial_alignment)
254
+
255
+ # assert: hill climbing goes from (0, 3, 2) -> (0, 2, 2) -> (0, 4, 4)
256
+ self.assertEqual(best_alignment.alignment, (0, 4, 4))
257
+
258
+ def test_sample(self):
259
+ # arrange
260
+ sentence_pair = AlignedSent(
261
+ TestIBMModel.__TEST_TRG_SENTENCE,
262
+ TestIBMModel.__TEST_SRC_SENTENCE)
263
+ ibm_model = IBMModel([])
264
+ ibm_model.prob_t_a_given_s = lambda x: 0.001
265
+
266
+ # act
267
+ samples, best_alignment = ibm_model.sample(sentence_pair)
268
+
269
+ # assert
270
+ self.assertEqual(len(samples), 61)
A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/nltk/test/unit/translate/test_nist.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """
3
+ Tests for NIST translation evaluation metric
4
+ """
5
+
6
+ import io
7
+ import unittest
8
+
9
+ from nltk.data import find
10
+ from nltk.translate.nist_score import sentence_nist, corpus_nist
11
+
12
+
13
+ class TestNIST(unittest.TestCase):
14
+ def test_sentence_nist(self):
15
+ ref_file = find('models/wmt15_eval/ref.ru')
16
+ hyp_file = find('models/wmt15_eval/google.ru')
17
+ mteval_output_file = find('models/wmt15_eval/mteval-13a.output')
18
+
19
+ # Reads the NIST scores from the `mteval-13a.output` file.
20
+ # The order of the list corresponds to the order of the ngrams.
21
+ with open(mteval_output_file, 'r') as mteval_fin:
22
+ # The numbers are located in the last 4th line of the file.
23
+ # The first and 2nd item in the list are the score and system names.
24
+ mteval_nist_scores = map(float, mteval_fin.readlines()[-4].split()[1:-1])
25
+
26
+ with io.open(ref_file, 'r', encoding='utf8') as ref_fin:
27
+ with io.open(hyp_file, 'r', encoding='utf8') as hyp_fin:
28
+ # Whitespace tokenize the file.
29
+ # Note: split() automatically strip().
30
+ hypotheses = list(map(lambda x: x.split(), hyp_fin))
31
+ # Note that the corpus_bleu input is list of list of references.
32
+ references = list(map(lambda x: [x.split()], ref_fin))
33
+ # Without smoothing.
34
+ for i, mteval_nist in zip(range(1,10), mteval_nist_scores):
35
+ nltk_nist = corpus_nist(references, hypotheses, i)
36
+ # Check that the NIST scores difference is less than 0.5
37
+ assert abs(mteval_nist - nltk_nist) < 0.05
A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/nltk/test/unit/translate/test_stack_decoder.py ADDED
@@ -0,0 +1,300 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Natural Language Toolkit: Stack decoder
3
+ #
4
+ # Copyright (C) 2001-2018 NLTK Project
5
+ # Author: Tah Wei Hoon <hoon.tw@gmail.com>
6
+ # URL: <http://nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ """
10
+ Tests for stack decoder
11
+ """
12
+
13
+ import unittest
14
+ from collections import defaultdict
15
+ from math import log
16
+ from nltk.translate import PhraseTable
17
+ from nltk.translate import StackDecoder
18
+ from nltk.translate.stack_decoder import _Hypothesis, _Stack
19
+
20
+
21
+ class TestStackDecoder(unittest.TestCase):
22
+ def test_find_all_src_phrases(self):
23
+ # arrange
24
+ phrase_table = TestStackDecoder.create_fake_phrase_table()
25
+ stack_decoder = StackDecoder(phrase_table, None)
26
+ sentence = ('my', 'hovercraft', 'is', 'full', 'of', 'eels')
27
+
28
+ # act
29
+ src_phrase_spans = stack_decoder.find_all_src_phrases(sentence)
30
+
31
+ # assert
32
+ self.assertEqual(src_phrase_spans[0], [2]) # 'my hovercraft'
33
+ self.assertEqual(src_phrase_spans[1], [2]) # 'hovercraft'
34
+ self.assertEqual(src_phrase_spans[2], [3]) # 'is'
35
+ self.assertEqual(src_phrase_spans[3], [5, 6]) # 'full of', 'full of eels'
36
+ self.assertFalse(src_phrase_spans[4]) # no entry starting with 'of'
37
+ self.assertEqual(src_phrase_spans[5], [6]) # 'eels'
38
+
39
+ def test_distortion_score(self):
40
+ # arrange
41
+ stack_decoder = StackDecoder(None, None)
42
+ stack_decoder.distortion_factor = 0.5
43
+ hypothesis = _Hypothesis()
44
+ hypothesis.src_phrase_span = (3, 5)
45
+
46
+ # act
47
+ score = stack_decoder.distortion_score(hypothesis, (8, 10))
48
+
49
+ # assert
50
+ expected_score = log(stack_decoder.distortion_factor) * (8 - 5)
51
+ self.assertEqual(score, expected_score)
52
+
53
+ def test_distortion_score_of_first_expansion(self):
54
+ # arrange
55
+ stack_decoder = StackDecoder(None, None)
56
+ stack_decoder.distortion_factor = 0.5
57
+ hypothesis = _Hypothesis()
58
+
59
+ # act
60
+ score = stack_decoder.distortion_score(hypothesis, (8, 10))
61
+
62
+ # assert
63
+ # expansion from empty hypothesis always has zero distortion cost
64
+ self.assertEqual(score, 0.0)
65
+
66
+ def test_compute_future_costs(self):
67
+ # arrange
68
+ phrase_table = TestStackDecoder.create_fake_phrase_table()
69
+ language_model = TestStackDecoder.create_fake_language_model()
70
+ stack_decoder = StackDecoder(phrase_table, language_model)
71
+ sentence = ('my', 'hovercraft', 'is', 'full', 'of', 'eels')
72
+
73
+ # act
74
+ future_scores = stack_decoder.compute_future_scores(sentence)
75
+
76
+ # assert
77
+ self.assertEqual(
78
+ future_scores[1][2],
79
+ (phrase_table.translations_for(('hovercraft',))[0].log_prob +
80
+ language_model.probability(('hovercraft',))))
81
+ self.assertEqual(
82
+ future_scores[0][2],
83
+ (phrase_table.translations_for(('my', 'hovercraft'))[0].log_prob +
84
+ language_model.probability(('my', 'hovercraft'))))
85
+
86
+ def test_compute_future_costs_for_phrases_not_in_phrase_table(self):
87
+ # arrange
88
+ phrase_table = TestStackDecoder.create_fake_phrase_table()
89
+ language_model = TestStackDecoder.create_fake_language_model()
90
+ stack_decoder = StackDecoder(phrase_table, language_model)
91
+ sentence = ('my', 'hovercraft', 'is', 'full', 'of', 'eels')
92
+
93
+ # act
94
+ future_scores = stack_decoder.compute_future_scores(sentence)
95
+
96
+ # assert
97
+ self.assertEqual(
98
+ future_scores[1][3], # 'hovercraft is' is not in phrase table
99
+ future_scores[1][2] + future_scores[2][3]) # backoff
100
+
101
+ def test_future_score(self):
102
+ # arrange: sentence with 8 words; words 2, 3, 4 already translated
103
+ hypothesis = _Hypothesis()
104
+ hypothesis.untranslated_spans = lambda _: [(0, 2), (5, 8)] # mock
105
+ future_score_table = defaultdict(lambda: defaultdict(float))
106
+ future_score_table[0][2] = 0.4
107
+ future_score_table[5][8] = 0.5
108
+ stack_decoder = StackDecoder(None, None)
109
+
110
+ # act
111
+ future_score = stack_decoder.future_score(
112
+ hypothesis, future_score_table, 8)
113
+
114
+ # assert
115
+ self.assertEqual(future_score, 0.4 + 0.5)
116
+
117
+ def test_valid_phrases(self):
118
+ # arrange
119
+ hypothesis = _Hypothesis()
120
+ # mock untranslated_spans method
121
+ hypothesis.untranslated_spans = lambda _: [
122
+ (0, 2),
123
+ (3, 6)
124
+ ]
125
+ all_phrases_from = [
126
+ [1, 4],
127
+ [2],
128
+ [],
129
+ [5],
130
+ [5, 6, 7],
131
+ [],
132
+ [7]
133
+ ]
134
+
135
+ # act
136
+ phrase_spans = StackDecoder.valid_phrases(all_phrases_from, hypothesis)
137
+
138
+ # assert
139
+ self.assertEqual(phrase_spans, [(0, 1), (1, 2), (3, 5), (4, 5), (4, 6)])
140
+
141
+ @staticmethod
142
+ def create_fake_phrase_table():
143
+ phrase_table = PhraseTable()
144
+ phrase_table.add(('hovercraft',), ('',), 0.8)
145
+ phrase_table.add(('my', 'hovercraft'), ('', ''), 0.7)
146
+ phrase_table.add(('my', 'cheese'), ('', ''), 0.7)
147
+ phrase_table.add(('is',), ('',), 0.8)
148
+ phrase_table.add(('is',), ('',), 0.5)
149
+ phrase_table.add(('full', 'of'), ('', ''), 0.01)
150
+ phrase_table.add(('full', 'of', 'eels'), ('', '', ''), 0.5)
151
+ phrase_table.add(('full', 'of', 'spam'), ('', ''), 0.5)
152
+ phrase_table.add(('eels',), ('',), 0.5)
153
+ phrase_table.add(('spam',), ('',), 0.5)
154
+ return phrase_table
155
+
156
+ @staticmethod
157
+ def create_fake_language_model():
158
+ # nltk.model should be used here once it is implemented
159
+ language_prob = defaultdict(lambda: -999.0)
160
+ language_prob[('my',)] = log(0.1)
161
+ language_prob[('hovercraft',)] = log(0.1)
162
+ language_prob[('is',)] = log(0.1)
163
+ language_prob[('full',)] = log(0.1)
164
+ language_prob[('of',)] = log(0.1)
165
+ language_prob[('eels',)] = log(0.1)
166
+ language_prob[('my', 'hovercraft',)] = log(0.3)
167
+ language_model = type(
168
+ '', (object,),
169
+ {'probability': lambda _, phrase: language_prob[phrase]})()
170
+ return language_model
171
+
172
+
173
+ class TestHypothesis(unittest.TestCase):
174
+ def setUp(self):
175
+ root = _Hypothesis()
176
+ child = _Hypothesis(
177
+ raw_score=0.5,
178
+ src_phrase_span=(3, 7),
179
+ trg_phrase=('hello', 'world'),
180
+ previous=root
181
+ )
182
+ grandchild = _Hypothesis(
183
+ raw_score=0.4,
184
+ src_phrase_span=(1, 2),
185
+ trg_phrase=('and', 'goodbye'),
186
+ previous=child
187
+ )
188
+ self.hypothesis_chain = grandchild
189
+
190
+ def test_translation_so_far(self):
191
+ # act
192
+ translation = self.hypothesis_chain.translation_so_far()
193
+
194
+ # assert
195
+ self.assertEqual(translation, ['hello', 'world', 'and', 'goodbye'])
196
+
197
+ def test_translation_so_far_for_empty_hypothesis(self):
198
+ # arrange
199
+ hypothesis = _Hypothesis()
200
+
201
+ # act
202
+ translation = hypothesis.translation_so_far()
203
+
204
+ # assert
205
+ self.assertEqual(translation, [])
206
+
207
+ def test_total_translated_words(self):
208
+ # act
209
+ total_translated_words = self.hypothesis_chain.total_translated_words()
210
+
211
+ # assert
212
+ self.assertEqual(total_translated_words, 5)
213
+
214
+ def test_translated_positions(self):
215
+ # act
216
+ translated_positions = self.hypothesis_chain.translated_positions()
217
+
218
+ # assert
219
+ translated_positions.sort()
220
+ self.assertEqual(translated_positions, [1, 3, 4, 5, 6])
221
+
222
+ def test_untranslated_spans(self):
223
+ # act
224
+ untranslated_spans = self.hypothesis_chain.untranslated_spans(10)
225
+
226
+ # assert
227
+ self.assertEqual(untranslated_spans, [(0, 1), (2, 3), (7, 10)])
228
+
229
+ def test_untranslated_spans_for_empty_hypothesis(self):
230
+ # arrange
231
+ hypothesis = _Hypothesis()
232
+
233
+ # act
234
+ untranslated_spans = hypothesis.untranslated_spans(10)
235
+
236
+ # assert
237
+ self.assertEqual(untranslated_spans, [(0, 10)])
238
+
239
+
240
+ class TestStack(unittest.TestCase):
241
+ def test_push_bumps_off_worst_hypothesis_when_stack_is_full(self):
242
+ # arrange
243
+ stack = _Stack(3)
244
+ poor_hypothesis = _Hypothesis(0.01)
245
+
246
+ # act
247
+ stack.push(_Hypothesis(0.2))
248
+ stack.push(poor_hypothesis)
249
+ stack.push(_Hypothesis(0.1))
250
+ stack.push(_Hypothesis(0.3))
251
+
252
+ # assert
253
+ self.assertFalse(poor_hypothesis in stack)
254
+
255
+ def test_push_removes_hypotheses_that_fall_below_beam_threshold(self):
256
+ # arrange
257
+ stack = _Stack(3, 0.5)
258
+ poor_hypothesis = _Hypothesis(0.01)
259
+ worse_hypothesis = _Hypothesis(0.009)
260
+
261
+ # act
262
+ stack.push(poor_hypothesis)
263
+ stack.push(worse_hypothesis)
264
+ stack.push(_Hypothesis(0.9)) # greatly superior hypothesis
265
+
266
+ # assert
267
+ self.assertFalse(poor_hypothesis in stack)
268
+ self.assertFalse(worse_hypothesis in stack)
269
+
270
+ def test_push_does_not_add_hypothesis_that_falls_below_beam_threshold(self):
271
+ # arrange
272
+ stack = _Stack(3, 0.5)
273
+ poor_hypothesis = _Hypothesis(0.01)
274
+
275
+ # act
276
+ stack.push(_Hypothesis(0.9)) # greatly superior hypothesis
277
+ stack.push(poor_hypothesis)
278
+
279
+ # assert
280
+ self.assertFalse(poor_hypothesis in stack)
281
+
282
+ def test_best_returns_the_best_hypothesis(self):
283
+ # arrange
284
+ stack = _Stack(3)
285
+ best_hypothesis = _Hypothesis(0.99)
286
+
287
+ # act
288
+ stack.push(_Hypothesis(0.0))
289
+ stack.push(best_hypothesis)
290
+ stack.push(_Hypothesis(0.5))
291
+
292
+ # assert
293
+ self.assertEqual(stack.best(), best_hypothesis)
294
+
295
+ def test_best_returns_none_when_stack_is_empty(self):
296
+ # arrange
297
+ stack = _Stack(3)
298
+
299
+ # assert
300
+ self.assertEqual(stack.best(), None)
A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/nltk/test/unit/utils.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ from __future__ import absolute_import
3
+ from unittest import TestCase
4
+ from functools import wraps
5
+ from nose.plugins.skip import SkipTest
6
+ from nltk.util import py26
7
+
8
+ def skip(reason):
9
+ """
10
+ Unconditionally skip a test.
11
+ """
12
+ def decorator(test_item):
13
+ is_test_class = isinstance(test_item, type) and issubclass(test_item, TestCase)
14
+
15
+ if is_test_class and py26():
16
+ # Patch all test_ methods to raise SkipText exception.
17
+ # This is necessary for Python 2.6 because its unittest
18
+ # doesn't understand __unittest_skip__.
19
+ for meth_name in (m for m in dir(test_item) if m.startswith('test_')):
20
+ patched_method = skip(reason)(getattr(test_item, meth_name))
21
+ setattr(test_item, meth_name, patched_method)
22
+
23
+ if not is_test_class:
24
+ @wraps(test_item)
25
+ def skip_wrapper(*args, **kwargs):
26
+ raise SkipTest(reason)
27
+ skip_wrapper.__name__ = test_item.__name__
28
+ test_item = skip_wrapper
29
+
30
+ test_item.__unittest_skip__ = True
31
+ test_item.__unittest_skip_why__ = reason
32
+ return test_item
33
+ return decorator
34
+
35
+
36
+ def skipIf(condition, reason):
37
+ """
38
+ Skip a test if the condition is true.
39
+ """
40
+ if condition:
41
+ return skip(reason)
42
+ return lambda obj: obj