ZTWHHH commited on
Commit
8ac7ef1
·
verified ·
1 Parent(s): c021a3a

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. evalkit_eagle/lib/python3.10/site-packages/sacrebleu/py.typed +0 -0
  2. evalkit_eagle/lib/python3.10/site-packages/sacrebleu/tokenizers/__init__.py +2 -0
  3. evalkit_eagle/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_base.cpython-310.pyc +0 -0
  4. evalkit_eagle/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_char.cpython-310.pyc +0 -0
  5. evalkit_eagle/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_intl.cpython-310.pyc +0 -0
  6. evalkit_eagle/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_ja_mecab.cpython-310.pyc +0 -0
  7. evalkit_eagle/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_re.cpython-310.pyc +0 -0
  8. evalkit_eagle/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_zh.cpython-310.pyc +0 -0
  9. evalkit_eagle/lib/python3.10/site-packages/sacrebleu/tokenizers/tokenizer_13a.py +34 -0
  10. evalkit_eagle/lib/python3.10/site-packages/sacrebleu/tokenizers/tokenizer_base.py +19 -0
  11. evalkit_eagle/lib/python3.10/site-packages/sacrebleu/tokenizers/tokenizer_char.py +19 -0
  12. evalkit_eagle/lib/python3.10/site-packages/sacrebleu/tokenizers/tokenizer_intl.py +50 -0
  13. evalkit_eagle/lib/python3.10/site-packages/sacrebleu/tokenizers/tokenizer_ja_mecab.py +52 -0
  14. evalkit_eagle/lib/python3.10/site-packages/sacrebleu/tokenizers/tokenizer_ko_mecab.py +52 -0
  15. evalkit_eagle/lib/python3.10/site-packages/sacrebleu/tokenizers/tokenizer_none.py +10 -0
  16. evalkit_eagle/lib/python3.10/site-packages/sacrebleu/tokenizers/tokenizer_re.py +38 -0
  17. evalkit_eagle/lib/python3.10/site-packages/sacrebleu/tokenizers/tokenizer_spm.py +70 -0
  18. evalkit_eagle/lib/python3.10/site-packages/sacrebleu/tokenizers/tokenizer_ter.py +171 -0
  19. evalkit_eagle/lib/python3.10/site-packages/sacrebleu/tokenizers/tokenizer_zh.py +119 -0
  20. janus/lib/tk8.6/images/pwrdLogo175.gif +3 -0
  21. janus/share/terminfo/M/MtxOrb162 +0 -0
  22. janus/share/terminfo/M/MtxOrb204 +0 -0
  23. janus/share/terminfo/h/h-100 +0 -0
  24. janus/share/terminfo/h/h100 +0 -0
  25. janus/share/terminfo/h/h19-bs +0 -0
  26. janus/share/terminfo/h/h19-u +0 -0
  27. janus/share/terminfo/h/h29a-nkc-bc +0 -0
  28. janus/share/terminfo/h/heath +0 -0
  29. janus/share/terminfo/h/heathkit +0 -0
  30. janus/share/terminfo/h/hft +0 -0
  31. janus/share/terminfo/h/hp+color +0 -0
  32. janus/share/terminfo/h/hp+labels +0 -0
  33. janus/share/terminfo/h/hp+pfk+cr +0 -0
  34. janus/share/terminfo/h/hp150 +0 -0
  35. janus/share/terminfo/h/hp2 +0 -0
  36. janus/share/terminfo/h/hp2382 +0 -0
  37. janus/share/terminfo/h/hp2621-48 +0 -0
  38. janus/share/terminfo/h/hp2621-nl +0 -0
  39. janus/share/terminfo/h/hp2621a +0 -0
  40. janus/share/terminfo/h/hp2621a-a +0 -0
  41. janus/share/terminfo/h/hp2621p-a +0 -0
  42. janus/share/terminfo/h/hp2623 +0 -0
  43. janus/share/terminfo/h/hp2624-10p +0 -0
  44. janus/share/terminfo/h/hp2624a +0 -0
  45. janus/share/terminfo/h/hp2624b +0 -0
  46. janus/share/terminfo/h/hp2624b-10p +0 -0
  47. janus/share/terminfo/h/hp2624b-p +0 -0
  48. janus/share/terminfo/h/hp2626 +0 -0
  49. janus/share/terminfo/h/hp2647a +0 -0
  50. janus/share/terminfo/h/hp2648 +0 -0
evalkit_eagle/lib/python3.10/site-packages/sacrebleu/py.typed ADDED
File without changes
evalkit_eagle/lib/python3.10/site-packages/sacrebleu/tokenizers/__init__.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ # Base tokenizer to derive from
2
+ from .tokenizer_base import BaseTokenizer # noqa: F401
evalkit_eagle/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_base.cpython-310.pyc ADDED
Binary file (902 Bytes). View file
 
evalkit_eagle/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_char.cpython-310.pyc ADDED
Binary file (1.18 kB). View file
 
evalkit_eagle/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_intl.cpython-310.pyc ADDED
Binary file (2.27 kB). View file
 
evalkit_eagle/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_ja_mecab.cpython-310.pyc ADDED
Binary file (1.73 kB). View file
 
evalkit_eagle/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_re.cpython-310.pyc ADDED
Binary file (1.29 kB). View file
 
evalkit_eagle/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_zh.cpython-310.pyc ADDED
Binary file (2.19 kB). View file
 
evalkit_eagle/lib/python3.10/site-packages/sacrebleu/tokenizers/tokenizer_13a.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import lru_cache
2
+ from .tokenizer_base import BaseTokenizer
3
+ from .tokenizer_re import TokenizerRegexp
4
+
5
+
6
+ class Tokenizer13a(BaseTokenizer):
7
+
8
+ def signature(self):
9
+ return '13a'
10
+
11
+ def __init__(self):
12
+ self._post_tokenizer = TokenizerRegexp()
13
+
14
+ @lru_cache(maxsize=2**16)
15
+ def __call__(self, line):
16
+ """Tokenizes an input line using a relatively minimal tokenization
17
+ that is however equivalent to mteval-v13a, used by WMT.
18
+
19
+ :param line: a segment to tokenize
20
+ :return: the tokenized line
21
+ """
22
+
23
+ # language-independent part:
24
+ line = line.replace('<skipped>', '')
25
+ line = line.replace('-\n', '')
26
+ line = line.replace('\n', ' ')
27
+
28
+ if '&' in line:
29
+ line = line.replace('&quot;', '"')
30
+ line = line.replace('&amp;', '&')
31
+ line = line.replace('&lt;', '<')
32
+ line = line.replace('&gt;', '>')
33
+
34
+ return self._post_tokenizer(f' {line} ')
evalkit_eagle/lib/python3.10/site-packages/sacrebleu/tokenizers/tokenizer_base.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ class BaseTokenizer:
2
+ """A base dummy tokenizer to derive from."""
3
+
4
+ def signature(self):
5
+ """
6
+ Returns a signature for the tokenizer.
7
+
8
+ :return: signature string
9
+ """
10
+ raise NotImplementedError()
11
+
12
+ def __call__(self, line):
13
+ """
14
+ Tokenizes an input line with the tokenizer.
15
+
16
+ :param line: a segment to tokenize
17
+ :return: the tokenized line
18
+ """
19
+ raise NotImplementedError()
evalkit_eagle/lib/python3.10/site-packages/sacrebleu/tokenizers/tokenizer_char.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import lru_cache
2
+ from .tokenizer_base import BaseTokenizer
3
+
4
+
5
+ class TokenizerChar(BaseTokenizer):
6
+ def signature(self):
7
+ return 'char'
8
+
9
+ def __init__(self):
10
+ pass
11
+
12
+ @lru_cache(maxsize=2**16)
13
+ def __call__(self, line):
14
+ """Tokenizes all the characters in the input line.
15
+
16
+ :param line: a segment to tokenize
17
+ :return: the tokenized line
18
+ """
19
+ return " ".join((char for char in line))
evalkit_eagle/lib/python3.10/site-packages/sacrebleu/tokenizers/tokenizer_intl.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import lru_cache
2
+
3
+ import regex
4
+
5
+ from .tokenizer_base import BaseTokenizer
6
+
7
+
8
+ class TokenizerV14International(BaseTokenizer):
9
+ """Tokenizes a string following the official BLEU implementation.
10
+
11
+ See github.com/moses-smt/mosesdecoder/blob/master/scripts/generic/mteval-v14.pl#L954-L983
12
+
13
+ In our case, the input string is expected to be just one line.
14
+ We just tokenize on punctuation and symbols,
15
+ except when a punctuation is preceded and followed by a digit
16
+ (e.g. a comma/dot as a thousand/decimal separator).
17
+ We do not recover escaped forms of punctuations such as &apos; or &gt;
18
+ as these should never appear in MT system outputs (see issue #138)
19
+
20
+ Note that a number (e.g., a year) followed by a dot at the end of
21
+ sentence is NOT tokenized, i.e. the dot stays with the number because
22
+ `s/(\\p{P})(\\P{N})/ $1 $2/g` does not match this case (unless we add a
23
+ space after each sentence). However, this error is already in the
24
+ original mteval-v14.pl and we want to be consistent with it.
25
+ The error is not present in the non-international version,
26
+ which uses `$norm_text = " $norm_text "`.
27
+
28
+ :param line: the input string to tokenize.
29
+ :return: The tokenized string.
30
+ """
31
+
32
+ def signature(self):
33
+ return 'intl'
34
+
35
+ def __init__(self):
36
+ self._re = [
37
+ # Separate out punctuations preceeded by a non-digit
38
+ (regex.compile(r'(\P{N})(\p{P})'), r'\1 \2 '),
39
+ # Separate out punctuations followed by a non-digit
40
+ (regex.compile(r'(\p{P})(\P{N})'), r' \1 \2'),
41
+ # Separate out symbols
42
+ (regex.compile(r'(\p{S})'), r' \1 '),
43
+ ]
44
+
45
+ @lru_cache(maxsize=2**16)
46
+ def __call__(self, line: str) -> str:
47
+ for (_re, repl) in self._re:
48
+ line = _re.sub(repl, line)
49
+
50
+ return ' '.join(line.split())
evalkit_eagle/lib/python3.10/site-packages/sacrebleu/tokenizers/tokenizer_ja_mecab.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import lru_cache
2
+
3
+ try:
4
+ import MeCab
5
+ import ipadic
6
+ except ImportError:
7
+ # Don't fail until the tokenizer is actually used
8
+ MeCab = None
9
+
10
+ from .tokenizer_base import BaseTokenizer
11
+
12
+ FAIL_MESSAGE = """
13
+ Japanese tokenization requires extra dependencies, but you do not have them installed.
14
+ Please install them like so.
15
+
16
+ pip install sacrebleu[ja]
17
+ """
18
+
19
+
20
+ class TokenizerJaMecab(BaseTokenizer):
21
+ def __init__(self):
22
+ if MeCab is None:
23
+ raise RuntimeError(FAIL_MESSAGE)
24
+ self.tagger = MeCab.Tagger(ipadic.MECAB_ARGS + " -Owakati")
25
+
26
+ # make sure the dictionary is IPA
27
+ d = self.tagger.dictionary_info()
28
+ assert d.size == 392126, \
29
+ "Please make sure to use the IPA dictionary for MeCab"
30
+ # This asserts that no user dictionary has been loaded
31
+ assert d.next is None
32
+
33
+ @lru_cache(maxsize=2**16)
34
+ def __call__(self, line):
35
+ """
36
+ Tokenizes an Japanese input line using MeCab morphological analyzer.
37
+
38
+ :param line: a segment to tokenize
39
+ :return: the tokenized line
40
+ """
41
+ line = line.strip()
42
+ sentence = self.tagger.parse(line).strip()
43
+ return sentence
44
+
45
+ def signature(self):
46
+ """
47
+ Returns the MeCab parameters.
48
+
49
+ :return: signature string
50
+ """
51
+ signature = self.tagger.version() + "-IPA"
52
+ return 'ja-mecab-' + signature
evalkit_eagle/lib/python3.10/site-packages/sacrebleu/tokenizers/tokenizer_ko_mecab.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import lru_cache
2
+
3
+ try:
4
+ import mecab_ko as MeCab
5
+ import mecab_ko_dic
6
+ except ImportError:
7
+ # Don't fail until the tokenizer is actually used
8
+ MeCab = None
9
+
10
+ from .tokenizer_base import BaseTokenizer
11
+
12
+ FAIL_MESSAGE = """
13
+ Korean tokenization requires extra dependencies, but you do not have them installed.
14
+ Please install them like so.
15
+
16
+ pip install sacrebleu[ko]
17
+ """
18
+
19
+
20
+ class TokenizerKoMecab(BaseTokenizer):
21
+ def __init__(self):
22
+ if MeCab is None:
23
+ raise RuntimeError(FAIL_MESSAGE)
24
+ self.tagger = MeCab.Tagger(mecab_ko_dic.MECAB_ARGS + " -Owakati")
25
+
26
+ # make sure the dictionary is mecab-ko-dic
27
+ d = self.tagger.dictionary_info()
28
+ assert d.size == 811795, \
29
+ "Please make sure to use the mecab-ko-dic for MeCab-ko"
30
+ # This asserts that no user dictionary has been loaded
31
+ assert d.next is None
32
+
33
+ @lru_cache(maxsize=2**16)
34
+ def __call__(self, line):
35
+ """
36
+ Tokenizes an Korean input line using MeCab-ko morphological analyzer.
37
+
38
+ :param line: a segment to tokenize
39
+ :return: the tokenized line
40
+ """
41
+ line = line.strip()
42
+ sentence = self.tagger.parse(line).strip()
43
+ return sentence
44
+
45
+ def signature(self):
46
+ """
47
+ Returns the MeCab-ko parameters.
48
+
49
+ :return: signature string
50
+ """
51
+ signature = self.tagger.version() + "-KO"
52
+ return 'ko-mecab-' + signature
evalkit_eagle/lib/python3.10/site-packages/sacrebleu/tokenizers/tokenizer_none.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ from .tokenizer_base import BaseTokenizer
2
+
3
+ class NoneTokenizer(BaseTokenizer):
4
+ """Don't apply any tokenization. Not recommended!."""
5
+
6
+ def signature(self):
7
+ return 'none'
8
+
9
+ def __call__(self, line):
10
+ return line
evalkit_eagle/lib/python3.10/site-packages/sacrebleu/tokenizers/tokenizer_re.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import lru_cache
2
+ import re
3
+
4
+ from .tokenizer_base import BaseTokenizer
5
+
6
+
7
+ class TokenizerRegexp(BaseTokenizer):
8
+
9
+ def signature(self):
10
+ return 're'
11
+
12
+ def __init__(self):
13
+ self._re = [
14
+ # language-dependent part (assuming Western languages)
15
+ (re.compile(r'([\{-\~\[-\` -\&\(-\+\:-\@\/])'), r' \1 '),
16
+ # tokenize period and comma unless preceded by a digit
17
+ (re.compile(r'([^0-9])([\.,])'), r'\1 \2 '),
18
+ # tokenize period and comma unless followed by a digit
19
+ (re.compile(r'([\.,])([^0-9])'), r' \1 \2'),
20
+ # tokenize dash when preceded by a digit
21
+ (re.compile(r'([0-9])(-)'), r'\1 \2 '),
22
+ # one space only between words
23
+ # NOTE: Doing this in Python (below) is faster
24
+ # (re.compile(r'\s+'), r' '),
25
+ ]
26
+
27
+ @lru_cache(maxsize=2**16)
28
+ def __call__(self, line):
29
+ """Common post-processing tokenizer for `13a` and `zh` tokenizers.
30
+
31
+ :param line: a segment to tokenize
32
+ :return: the tokenized line
33
+ """
34
+ for (_re, repl) in self._re:
35
+ line = _re.sub(repl, line)
36
+
37
+ # no leading or trailing spaces, single space within words
38
+ return ' '.join(line.split())
evalkit_eagle/lib/python3.10/site-packages/sacrebleu/tokenizers/tokenizer_spm.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+
3
+ import os
4
+ import logging
5
+
6
+ from functools import lru_cache
7
+ from ..utils import SACREBLEU_DIR, download_file
8
+ from .tokenizer_base import BaseTokenizer
9
+
10
+ sacrelogger = logging.getLogger('sacrebleu')
11
+
12
+
13
+ SPM_MODELS = {
14
+ "spm": {
15
+ "url": "https://dl.fbaipublicfiles.com/fairseq/models/flores/sacrebleu_tokenizer_spm.model",
16
+ "signature": "flores101",
17
+ },
18
+ # same as the default of "spm"
19
+ "flores101": {
20
+ "url": "https://dl.fbaipublicfiles.com/fairseq/models/flores/sacrebleu_tokenizer_spm.model",
21
+ "signature": "flores101",
22
+ },
23
+ "flores200": {
24
+ "url": "https://tinyurl.com/flores200sacrebleuspm",
25
+ "signature": "flores200",
26
+ },
27
+ }
28
+
29
+ class TokenizerSPM(BaseTokenizer):
30
+ def signature(self):
31
+ return self.name
32
+
33
+ def __init__(self, key="spm"):
34
+ self.name = SPM_MODELS[key]["signature"]
35
+
36
+ if key == "spm":
37
+ sacrelogger.warn("Tokenizer 'spm' has been changed to 'flores101', and may be removed in the future.")
38
+
39
+ try:
40
+ import sentencepiece as spm
41
+ except (ImportError, ModuleNotFoundError):
42
+ raise ImportError(
43
+ '\n\nPlease install the sentencepiece library for SPM tokenization:'
44
+ '\n\n pip install sentencepiece '
45
+ )
46
+ self.sp = spm.SentencePieceProcessor()
47
+
48
+ model_path = os.path.join(SACREBLEU_DIR, "models", os.path.basename(SPM_MODELS[key]["url"]))
49
+ if not os.path.exists(model_path):
50
+ url = SPM_MODELS[self.name]["url"]
51
+ download_file(url, model_path)
52
+ self.sp.Load(model_path)
53
+
54
+ @lru_cache(maxsize=2**16)
55
+ def __call__(self, line):
56
+ """Tokenizes all the characters in the input line.
57
+
58
+ :param line: a segment to tokenize
59
+ :return: the tokenized line
60
+ """
61
+ return " ".join(self.sp.EncodeAsPieces(line))
62
+
63
+
64
+ class Flores200Tokenizer(TokenizerSPM):
65
+ def __init__(self):
66
+ super().__init__("flores200")
67
+
68
+ class Flores101Tokenizer(TokenizerSPM):
69
+ def __init__(self):
70
+ super().__init__("flores101")
evalkit_eagle/lib/python3.10/site-packages/sacrebleu/tokenizers/tokenizer_ter.py ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 Memsource
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+
16
+ import re
17
+ from functools import lru_cache
18
+
19
+ from .tokenizer_base import BaseTokenizer
20
+
21
+
22
+ def _normalize_general_and_western(sent: str) -> str:
23
+ # language-independent (general) part
24
+
25
+ # strip end-of-line hyphenation and join lines
26
+ sent = re.sub(r"\n-", "", sent)
27
+
28
+ # join lines
29
+ sent = re.sub(r"\n", " ", sent)
30
+
31
+ # handle XML escaped symbols
32
+ sent = re.sub(r"&quot;", "\"", sent)
33
+ sent = re.sub(r"&amp;", "&", sent)
34
+ sent = re.sub(r"&lt;", "<", sent)
35
+ sent = re.sub(r"&gt;", ">", sent)
36
+
37
+ # language-dependent (Western) part
38
+ sent = f" {sent} "
39
+
40
+ # tokenize punctuation
41
+ sent = re.sub(r"([{-~[-` -&(-+:-@/])", r" \1 ", sent)
42
+
43
+ # handle possesives
44
+ sent = re.sub(r"'s ", r" 's ", sent)
45
+ sent = re.sub(r"'s$", r" 's", sent)
46
+
47
+ # tokenize period and comma unless preceded by a digit
48
+ sent = re.sub(r"([^0-9])([\.,])", r"\1 \2 ", sent)
49
+
50
+ # tokenize period and comma unless followed by a digit
51
+ sent = re.sub(r"([\.,])([^0-9])", r" \1 \2", sent)
52
+
53
+ # tokenize dash when preceded by a digit
54
+ sent = re.sub(r"([0-9])(-)", r"\1 \2 ", sent)
55
+
56
+ return sent
57
+
58
+
59
+ def _normalize_asian(sent: str) -> str:
60
+ # Split Chinese chars and Japanese kanji down to character level
61
+
62
+ # 4E00—9FFF CJK Unified Ideographs
63
+ # 3400—4DBF CJK Unified Ideographs Extension A
64
+ sent = re.sub(r"([\u4e00-\u9fff\u3400-\u4dbf])", r" \1 ", sent)
65
+
66
+ # 31C0—31EF CJK Strokes
67
+ # 2E80—2EFF CJK Radicals Supplement
68
+ sent = re.sub(r"([\u31c0-\u31ef\u2e80-\u2eff])", r" \1 ", sent)
69
+
70
+ # 3300—33FF CJK Compatibility
71
+ # F900—FAFF CJK Compatibility Ideographs
72
+ # FE30—FE4F CJK Compatibility Forms
73
+ sent = re.sub(
74
+ r"([\u3300-\u33ff\uf900-\ufaff\ufe30-\ufe4f])", r" \1 ", sent)
75
+
76
+ # 3200—32FF Enclosed CJK Letters and Months
77
+ sent = re.sub(r"([\u3200-\u3f22])", r" \1 ", sent)
78
+
79
+ # Split Hiragana, Katakana, and KatakanaPhoneticExtensions
80
+ # only when adjacent to something else
81
+ # 3040—309F Hiragana
82
+ # 30A0—30FF Katakana
83
+ # 31F0—31FF Katakana Phonetic Extensions
84
+ sent = re.sub(
85
+ r"(^|^[\u3040-\u309f])([\u3040-\u309f]+)(?=$|^[\u3040-\u309f])",
86
+ r"\1 \2 ", sent)
87
+ sent = re.sub(
88
+ r"(^|^[\u30a0-\u30ff])([\u30a0-\u30ff]+)(?=$|^[\u30a0-\u30ff])",
89
+ r"\1 \2 ", sent)
90
+ sent = re.sub(
91
+ r"(^|^[\u31f0-\u31ff])([\u31f0-\u31ff]+)(?=$|^[\u31f0-\u31ff])",
92
+ r"\1 \2 ", sent)
93
+
94
+ sent = re.sub(TercomTokenizer.ASIAN_PUNCT, r" \1 ", sent)
95
+ sent = re.sub(TercomTokenizer.FULL_WIDTH_PUNCT, r" \1 ", sent)
96
+ return sent
97
+
98
+
99
+ def _remove_punct(sent: str) -> str:
100
+ return re.sub(r"[\.,\?:;!\"\(\)]", "", sent)
101
+
102
+
103
+ def _remove_asian_punct(sent: str) -> str:
104
+ sent = re.sub(TercomTokenizer.ASIAN_PUNCT, r"", sent)
105
+ sent = re.sub(TercomTokenizer.FULL_WIDTH_PUNCT, r"", sent)
106
+ return sent
107
+
108
+
109
+ class TercomTokenizer(BaseTokenizer):
110
+ """Re-implementation of Tercom Tokenizer in Python 3.
111
+
112
+ See src/ter/core/Normalizer.java in https://github.com/jhclark/tercom
113
+
114
+ Note that Python doesn't support named Unicode blocks so the mapping for
115
+ relevant blocks was taken from here:
116
+
117
+ https://unicode-table.com/en/blocks/
118
+ """
119
+ ASIAN_PUNCT = r"([\u3001\u3002\u3008-\u3011\u3014-\u301f\uff61-\uff65\u30fb])"
120
+ FULL_WIDTH_PUNCT = r"([\uff0e\uff0c\uff1f\uff1a\uff1b\uff01\uff02\uff08\uff09])"
121
+
122
+ def __init__(self,
123
+ normalized: bool = False,
124
+ no_punct: bool = False,
125
+ asian_support: bool = False,
126
+ case_sensitive: bool = False):
127
+ """Initialize the tokenizer.
128
+
129
+ :param normalized: Enable character normalization. By default, normalizes a couple of things such as
130
+ newlines being stripped, retrieving XML encoded characters, and fixing tokenization for punctuation. When
131
+ 'asian_support' is enabled, also normalizes specific Asian (CJK) character sequences, i.e.
132
+ split them down to the character level.
133
+ :param no_punct: Remove punctuation. Can be used in conjunction with 'asian_support' to also remove typical
134
+ punctuation markers in Asian languages (CJK).
135
+ :param asian_support: Enable special treatment of Asian characters. This option only has an effect when
136
+ 'normalized' and/or 'no_punct' is enabled. If 'normalized' is also enabled, then Asian (CJK)
137
+ characters are split down to the character level. If 'no_punct' is enabled alongside 'asian_support',
138
+ specific unicode ranges for CJK and full-width punctuations are also removed.
139
+ :param case_sensitive: Enable case sensitivity, i.e., do not lower case data.
140
+ """
141
+ self._normalized = normalized
142
+ self._no_punct = no_punct
143
+ self._asian_support = asian_support
144
+ self._case_sensitive = case_sensitive
145
+
146
+ @lru_cache(maxsize=2**16)
147
+ # Although the cache is shared across different instances, same sentence
148
+ # queries do not return invalid returns across different instances since
149
+ # `self` becomes part of the query as well.
150
+ def __call__(self, sent: str) -> str:
151
+ if not sent:
152
+ return ""
153
+
154
+ if not self._case_sensitive:
155
+ sent = sent.lower()
156
+
157
+ if self._normalized:
158
+ sent = _normalize_general_and_western(sent)
159
+ if self._asian_support:
160
+ sent = _normalize_asian(sent)
161
+
162
+ if self._no_punct:
163
+ sent = _remove_punct(sent)
164
+ if self._asian_support:
165
+ sent = _remove_asian_punct(sent)
166
+
167
+ # Strip extra whitespaces
168
+ return ' '.join(sent.split())
169
+
170
+ def signature(self):
171
+ return 'tercom'
evalkit_eagle/lib/python3.10/site-packages/sacrebleu/tokenizers/tokenizer_zh.py ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2017--2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License"). You may not
4
+ # use this file except in compliance with the License. A copy of the License
5
+ # is located at
6
+ #
7
+ # http://aws.amazon.com/apache2.0/
8
+ #
9
+ # or in the "license" file accompanying this file. This file is distributed on
10
+ # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
11
+ # express or implied. See the License for the specific language governing
12
+ # permissions and limitations under the License.
13
+
14
+ ##############
15
+
16
+ # MIT License
17
+ # Copyright (c) 2017 - Shujian Huang <huangsj@nju.edu.cn>
18
+
19
+ # Permission is hereby granted, free of charge, to any person obtaining a copy
20
+ # of this software and associated documentation files (the "Software"), to deal
21
+ # in the Software without restriction, including without limitation the rights
22
+ # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
23
+ # copies of the Software, and to permit persons to whom the Software is
24
+ # furnished to do so, subject to the following conditions:
25
+
26
+ # The above copyright notice and this permission notice shall be included in
27
+ # all copies or substantial portions of the Software.
28
+
29
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
30
+ # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
31
+ # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
32
+ # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
33
+ # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
34
+ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35
+ # SOFTWARE.
36
+
37
+ # Author: Shujian Huang huangsj@nju.edu.cn
38
+
39
+
40
+ from functools import lru_cache
41
+
42
+ from .tokenizer_base import BaseTokenizer
43
+ from .tokenizer_re import TokenizerRegexp
44
+
45
+ _UCODE_RANGES = [
46
+ (u'\u3400', u'\u4db5'), # CJK Unified Ideographs Extension A, release 3.0
47
+ (u'\u4e00', u'\u9fa5'), # CJK Unified Ideographs, release 1.1
48
+ (u'\u9fa6', u'\u9fbb'), # CJK Unified Ideographs, release 4.1
49
+ (u'\uf900', u'\ufa2d'), # CJK Compatibility Ideographs, release 1.1
50
+ (u'\ufa30', u'\ufa6a'), # CJK Compatibility Ideographs, release 3.2
51
+ (u'\ufa70', u'\ufad9'), # CJK Compatibility Ideographs, release 4.1
52
+ (u'\u20000', u'\u2a6d6'), # (UTF16) CJK Unified Ideographs Extension B, release 3.1
53
+ (u'\u2f800', u'\u2fa1d'), # (UTF16) CJK Compatibility Supplement, release 3.1
54
+ (u'\uff00', u'\uffef'), # Full width ASCII, full width of English punctuation,
55
+ # half width Katakana, half wide half width kana, Korean alphabet
56
+ (u'\u2e80', u'\u2eff'), # CJK Radicals Supplement
57
+ (u'\u3000', u'\u303f'), # CJK punctuation mark
58
+ (u'\u31c0', u'\u31ef'), # CJK stroke
59
+ (u'\u2f00', u'\u2fdf'), # Kangxi Radicals
60
+ (u'\u2ff0', u'\u2fff'), # Chinese character structure
61
+ (u'\u3100', u'\u312f'), # Phonetic symbols
62
+ (u'\u31a0', u'\u31bf'), # Phonetic symbols (Taiwanese and Hakka expansion)
63
+ (u'\ufe10', u'\ufe1f'),
64
+ (u'\ufe30', u'\ufe4f'),
65
+ (u'\u2600', u'\u26ff'),
66
+ (u'\u2700', u'\u27bf'),
67
+ (u'\u3200', u'\u32ff'),
68
+ (u'\u3300', u'\u33ff'),
69
+ ]
70
+
71
+
72
+ class TokenizerZh(BaseTokenizer):
73
+
74
+ def signature(self):
75
+ return 'zh'
76
+
77
+ def __init__(self):
78
+ self._post_tokenizer = TokenizerRegexp()
79
+
80
+ @staticmethod
81
+ @lru_cache(maxsize=2**16)
82
+ def _is_chinese_char(uchar):
83
+ """
84
+ :param uchar: input char in unicode
85
+ :return: whether the input char is a Chinese character.
86
+ """
87
+ for start, end in _UCODE_RANGES:
88
+ if start <= uchar <= end:
89
+ return True
90
+ return False
91
+
92
+ @lru_cache(maxsize=2**16)
93
+ def __call__(self, line):
94
+ """The tokenization of Chinese text in this script contains two
95
+ steps: separate each Chinese characters (by utf-8 encoding); tokenize
96
+ the non Chinese part (following the `13a` i.e. mteval tokenizer).
97
+
98
+ Author: Shujian Huang huangsj@nju.edu.cn
99
+
100
+ :param line: input sentence
101
+ :return: tokenized sentence
102
+ """
103
+
104
+ line = line.strip()
105
+ line_in_chars = ""
106
+
107
+ # TODO: the below code could probably be replaced with the following:
108
+ # @ozan: Gives slightly different scores, need to investigate
109
+ # import regex
110
+ # line = regex.sub(r'(\p{Han})', r' \1 ', line)
111
+ for char in line:
112
+ if self._is_chinese_char(char):
113
+ line_in_chars += " "
114
+ line_in_chars += char
115
+ line_in_chars += " "
116
+ else:
117
+ line_in_chars += char
118
+
119
+ return self._post_tokenizer(line_in_chars)
janus/lib/tk8.6/images/pwrdLogo175.gif ADDED

Git LFS Details

  • SHA256: 62866e95501c436b329a15432355743c6efd64a37cfb65bcece465ab63ecf240
  • Pointer size: 129 Bytes
  • Size of remote file: 2.98 kB
janus/share/terminfo/M/MtxOrb162 ADDED
Binary file (193 Bytes). View file
 
janus/share/terminfo/M/MtxOrb204 ADDED
Binary file (193 Bytes). View file
 
janus/share/terminfo/h/h-100 ADDED
Binary file (707 Bytes). View file
 
janus/share/terminfo/h/h100 ADDED
Binary file (707 Bytes). View file
 
janus/share/terminfo/h/h19-bs ADDED
Binary file (609 Bytes). View file
 
janus/share/terminfo/h/h19-u ADDED
Binary file (599 Bytes). View file
 
janus/share/terminfo/h/h29a-nkc-bc ADDED
Binary file (1.69 kB). View file
 
janus/share/terminfo/h/heath ADDED
Binary file (627 Bytes). View file
 
janus/share/terminfo/h/heathkit ADDED
Binary file (627 Bytes). View file
 
janus/share/terminfo/h/hft ADDED
Binary file (1.81 kB). View file
 
janus/share/terminfo/h/hp+color ADDED
Binary file (965 Bytes). View file
 
janus/share/terminfo/h/hp+labels ADDED
Binary file (489 Bytes). View file
 
janus/share/terminfo/h/hp+pfk+cr ADDED
Binary file (230 Bytes). View file
 
janus/share/terminfo/h/hp150 ADDED
Binary file (1.22 kB). View file
 
janus/share/terminfo/h/hp2 ADDED
Binary file (1.37 kB). View file
 
janus/share/terminfo/h/hp2382 ADDED
Binary file (1.12 kB). View file
 
janus/share/terminfo/h/hp2621-48 ADDED
Binary file (577 Bytes). View file
 
janus/share/terminfo/h/hp2621-nl ADDED
Binary file (570 Bytes). View file
 
janus/share/terminfo/h/hp2621a ADDED
Binary file (622 Bytes). View file
 
janus/share/terminfo/h/hp2621a-a ADDED
Binary file (592 Bytes). View file
 
janus/share/terminfo/h/hp2621p-a ADDED
Binary file (598 Bytes). View file
 
janus/share/terminfo/h/hp2623 ADDED
Binary file (1.2 kB). View file
 
janus/share/terminfo/h/hp2624-10p ADDED
Binary file (1.29 kB). View file
 
janus/share/terminfo/h/hp2624a ADDED
Binary file (1.27 kB). View file
 
janus/share/terminfo/h/hp2624b ADDED
Binary file (1.27 kB). View file
 
janus/share/terminfo/h/hp2624b-10p ADDED
Binary file (1.29 kB). View file
 
janus/share/terminfo/h/hp2624b-p ADDED
Binary file (1.3 kB). View file
 
janus/share/terminfo/h/hp2626 ADDED
Binary file (1.23 kB). View file
 
janus/share/terminfo/h/hp2647a ADDED
Binary file (738 Bytes). View file
 
janus/share/terminfo/h/hp2648 ADDED
Binary file (731 Bytes). View file