Current progress on evaluation module. See readme & documentation for usage.

#3
evaluation/README.md ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Evaluation
2
+ This module contains utilities and scripts for empirical, numerical evaluation of model outputs. Much of the code written in this module is guided by methodologies described in [this paper](https://staff.aist.go.jp/m.goto/PAPER/TIEICE202309watanabe.pdf) (Watanabe, 2023).
3
+
4
+ ### Data formatting
5
+ Using the [evaluate_model.py](evaluate_model.py) script, evaluation can be run on an existing `json` database of outputs. Database must contain a list of song containers in the following format:
6
+ ```json
7
+ [
8
+ { // container for song 1
9
+ "id": (int) ...,
10
+ "prompt": (str) ..., //optional
11
+ "model_response": (str) ...,
12
+ "target_response": (str) ... //optional
13
+ },
14
+ { // container for song 2
15
+ "id": (int) ..., // unique from song 1
16
+ "prompt": (str) ..., //optional
17
+ "model_response": (str) ...,
18
+ "target_response": (str) ... //optional
19
+ },
20
+ .
21
+ .
22
+ .
23
+ ]
24
+ ```
25
+
26
+ ### Quick start
27
+ Run the following script to quick start an evaluation:
28
+ ```bash
29
+ py evaluation/evaluate_model.py <path_to_your_database>
30
+ ```
31
+
32
+ To run different measures, consider passing in a list of measures into the `--measures` argument:
33
+ ```bash
34
+ py evaluation/evaluate_model.py <path_to_your_database> --measures diversity meter syllable
35
+ ```
evaluation/encode_lines.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import syllable_analysis as sylco
2
+ import meter_analysis as metco
3
+ import re, unidecode
4
+
5
+
6
+ def prep_encoding(text):
7
+ """
8
+ Cleans text by removing whitespace, replacing nonstandard characters with
9
+ their standard counterparts, etc
10
+
11
+ Parameters
12
+ ----------
13
+ text : str
14
+ any text
15
+
16
+ Returns
17
+ -------
18
+ str
19
+ cleaned text
20
+ """
21
+ text = unidecode.unidecode(text).strip()
22
+
23
+ if not re.search(r"\S", text):
24
+ text = ""
25
+ return text
26
+
27
+
28
+ def encode_line_meter_count(line, to_stdout=False):
29
+ """
30
+ Encodes a song line (line of text) into a line of digit words representing
31
+ the stress of each word in the line.
32
+
33
+ Ex:
34
+ what so proudly we hailed at the twilight's last gleaming
35
+ -> 1 1 10 1 1 1 0 12 1 10
36
+
37
+ Parameters
38
+ ----------
39
+ line : str
40
+ string of words (line)
41
+ to_stdout : bool, optional
42
+ whether to print to stdout, by default False
43
+
44
+ Returns
45
+ -------
46
+ str
47
+ string of stress encodings (digits)
48
+ """
49
+ line = prep_encoding(line)
50
+
51
+ if line == "":
52
+ if to_stdout:
53
+ print(line)
54
+ return ""
55
+
56
+ line_stress_list = metco.get_line_meter(line)
57
+ out = " ".join(line_stress_list)
58
+
59
+ if to_stdout:
60
+ print(out)
61
+ return out
62
+
63
+
64
+ def encode_line_syllable_count(line, to_stdout=False):
65
+ """
66
+ Encodes a song line (line of text) into a line of digits representing
67
+ the number of syllables per line.
68
+ Ex:
69
+ the quick brown fox jumps over the lazy dog
70
+ -> 1 1 1 1 1 1 1 2 1
71
+
72
+ Parameters
73
+ ----------
74
+ line : str
75
+ string of words (line)
76
+ to_stdout : bool, optional
77
+ whether to print to stdout, by default False
78
+
79
+ Returns
80
+ -------
81
+ string
82
+ string of digits, one digit per word
83
+ """
84
+ line = prep_encoding(line)
85
+
86
+ if line == "":
87
+ if to_stdout:
88
+ print(line)
89
+ return line
90
+
91
+ words = re.findall(r"\b\w+\b", line)
92
+ syllable_counts = [sylco.count_syllables(word) for word in words]
93
+
94
+ out = " ".join(map(str, syllable_counts))
95
+
96
+ if to_stdout:
97
+ if len(syllable_counts) > 0:
98
+ out += " " * (30 - len(out))
99
+ out += f": {sum(syllable_counts)}"
100
+ print(out)
101
+ return out
evaluation/evaluate_model.py ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from score_acculumator import ScoreAccumulator
2
+ import argparse, json, os
3
+
4
+
5
+ def main(filepath, measures, writeback=False, outpath=""):
6
+ """
7
+ Evaluate a model using specified measures given a filepath to database of songs.
8
+
9
+ Database must be formatted as follows:
10
+ [
11
+ #dict 1 for song 1
12
+ {
13
+ "id": (int) ...,
14
+ "prompt": (str) ..., #optional
15
+ "model_response": (str) ...,
16
+ "target_response": (str) ... #optional
17
+ },
18
+ #dict 2 for song 2
19
+ {
20
+ " "
21
+ },
22
+ .
23
+ .
24
+ .
25
+ ]
26
+
27
+ Parameters
28
+ ----------
29
+ filepath : str
30
+ path to database .json file
31
+ measures : list
32
+ list of measures to evaluate model outputs, from {'diversity','meter','syllable'}
33
+ writeback : bool, optional
34
+ Whether to write evaluation scores to filepath or to output, by default False
35
+ outpath : str, optional
36
+ path to output .json file if writeback is False, by default ""
37
+
38
+ Raises
39
+ ------
40
+ FileNotFoundError
41
+ """
42
+ # read file
43
+ if os.path.exists(filepath):
44
+ with open(filepath, "r") as f:
45
+ database = json.load(f)
46
+ else:
47
+ raise FileNotFoundError(f"No such file exists: {filepath}")
48
+
49
+ if not writeback:
50
+ if not os.path.exists(outpath):
51
+ raise FileNotFoundError(f"No such file exists: {outpath}")
52
+ else:
53
+ outpath = filepath
54
+
55
+ # evaluate for measures
56
+ accumulator = ScoreAccumulator(
57
+ measures=measures,
58
+ require_prompt="prompt" in database[0],
59
+ require_target="target_response" in database[0],
60
+ )
61
+ accumulator.score_all_songs(database)
62
+
63
+ # print total scores
64
+ for measure in measures:
65
+ pred_score = accumulator.get_total_pred_score(measure)
66
+ target_score = accumulator.get_total_target_score(measure)
67
+ print(f"Score: pred {pred_score:2f}, target {target_score:2f} : ({measure})")
68
+
69
+ # save evaluation
70
+ out_database = []
71
+ for id, song_dict in accumulator._database.items():
72
+ song_dict["id"] = int(id)
73
+ out_database.append(song_dict)
74
+
75
+ with open(filepath, "w") as f:
76
+ f.write(json.dumps(out_database, indent=2, separators=[",", ":"]))
77
+
78
+
79
+ if __name__ == "__main__":
80
+ parser = argparse.ArgumentParser(
81
+ description="Evaluate a model given a filepath to a database of songs."
82
+ )
83
+ parser.add_argument(
84
+ "filepath", type=str, help="The path to the file to be processed"
85
+ )
86
+ parser.add_argument(
87
+ "--measures",
88
+ default=["diversity", "meter", "syllable"],
89
+ nargs="+",
90
+ help="List of measures to evaluate. From {'diversity','meter','syllable'}",
91
+ )
92
+ parser.add_argument(
93
+ "--writeback",
94
+ type=bool,
95
+ default=True,
96
+ help="Write evaluation scores back to the same dict or create a new one",
97
+ )
98
+ parser.add_argument(
99
+ "--output",
100
+ default="",
101
+ type=str,
102
+ help="The path to the write output scores if writeback is false",
103
+ )
104
+
105
+ args = parser.parse_args()
106
+ main(args.filepath, args.measures, args.writeback, args.output)
evaluation/meter_analysis.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import nltk
2
+ import re
3
+ import syllable_analysis as sylco
4
+ import string
5
+
6
+ nltk.download("cmudict")
7
+ WORDS = nltk.corpus.cmudict.dict()
8
+
9
+
10
+ def get_stress(word):
11
+ syllables = WORDS.get(word.lower())
12
+ if syllables:
13
+ stresses = "".join(
14
+ [phoneme[-1] for phoneme in syllables[0] if phoneme[-1].isdigit()]
15
+ )
16
+ return stresses
17
+ else:
18
+ return "Word not found in CMU Pronouncing Dictionary"
19
+
20
+
21
+ def get_line_meter(line):
22
+ """Credit to https://stackoverflow.com/questions/9666838/"""
23
+
24
+ # Clean punctuation
25
+ exclude = set(string.punctuation)
26
+ exclude.remove("-")
27
+ line = "".join(ch for ch in line if ch not in exclude)
28
+
29
+ # stem words
30
+ words = re.split(r"[\s-]+", line)
31
+ stemmer = nltk.stem.SnowballStemmer("english")
32
+ stemmed_words = []
33
+ for word in words:
34
+ if word[-3:] == "ing" or word[-2:] == "ly":
35
+ stemmed_words.append(word)
36
+ else:
37
+ stemmed_words.append(stemmer.stem(word))
38
+
39
+ # get meter
40
+ line_stress = []
41
+ for word in stemmed_words:
42
+ if word not in WORDS:
43
+ line_stress.append("0" * sylco.count_syllables(word))
44
+ else:
45
+ line_stress.append(get_stress(word))
46
+
47
+ return map(str, line_stress)
evaluation/score_acculumator.py ADDED
@@ -0,0 +1,318 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections import defaultdict, Counter
2
+ import copy, pprint, re
3
+ import itertools
4
+ from tqdm import tqdm
5
+
6
+ import scoring_metrics as metrics
7
+ import text_processing_utils as text_utils
8
+
9
+
10
+ class ScoreAccumulator(object):
11
+ def __init__(
12
+ self,
13
+ measures=["diversity"],
14
+ default_matching="",
15
+ require_prompt=False,
16
+ require_target=True,
17
+ verbose=True,
18
+ ):
19
+ """
20
+ Score accumulation object for measuring model inference performance
21
+ on a dataset of songs.
22
+
23
+ Capable of measuring:
24
+ Lexical diversity : "diversity"
25
+ Meter consistency : "meter"
26
+ Syllabic consistency : "syllable"
27
+
28
+ Parameters
29
+ ----------
30
+ measures : list, optional
31
+ list of measures to calculate for each song, by default ["diversity"]
32
+ default_matching : str
33
+ string defining the default stanza or line matching for comparison
34
+ require_prompt : bool, optional
35
+ whether to require the model prompt for each song, by default False
36
+ require_target : bool, optional
37
+ whether to requre a target response for each song, by default True
38
+ """
39
+ self.measures = measures
40
+ self.default_matching = default_matching
41
+ self.require_prompt = require_prompt
42
+ self.require_target = require_target
43
+ self.verbose = verbose
44
+
45
+ self.__total_score = Counter({measure: 0 for measure in self.measures})
46
+ self.__total_target_score = Counter({measure: 0 for measure in self.measures})
47
+ self._database = {}
48
+
49
+ self.measure_functions = {
50
+ "diversity": metrics.measure_lex_div,
51
+ "meter": metrics.measure_meter,
52
+ "syllable": metrics.measure_syllable,
53
+ }
54
+
55
+ supported_meas = {"diversity", "meter", "syllable"}
56
+ for measure in self.measures:
57
+ if measure not in {"diversity", "meter", "syllable"}:
58
+ raise NotImplementedError(f"Only measures {supported_meas} supported.")
59
+
60
+ def get_total_pred_score(self, measure):
61
+ """
62
+ Return current score totals of model predictions for a given
63
+ measure
64
+
65
+ Parameters
66
+ ----------
67
+ measure : str
68
+ measure to get score for
69
+
70
+ Returns
71
+ -------
72
+ float
73
+ average (by song) cumulative score
74
+ """
75
+ measure_score = self.__total_score[measure]
76
+ return measure_score / len(self._database)
77
+
78
+ def get_total_target_score(self, measure):
79
+ """
80
+ Return current score total of target responses for a given
81
+ measure
82
+
83
+ Parameters
84
+ ----------
85
+ measure : str
86
+ measure to get score for
87
+
88
+ Returns
89
+ -------
90
+ float
91
+ average (by song) cumulative score
92
+ """
93
+ measure_score = self.__total_target_score[measure]
94
+ return measure_score / len(self._database)
95
+
96
+ def get_ids(self):
97
+ return list(self._database.keys())
98
+
99
+ def get_prompts(self, ids=[]):
100
+ if ids:
101
+ return [self._database[id]["prompt"] for id in ids]
102
+ return [self._database[songid]["prompt"] for songid in self.get_ids]
103
+
104
+ def get_preds(self, ids=[]):
105
+ if ids:
106
+ return [self._database[id]["model_response"] for id in ids]
107
+ return [self._database[songid]["model_response"] for songid in self.get_ids]
108
+
109
+ def get_targets(self, ids=[]):
110
+ if ids:
111
+ return [self._database[id]["target_response"] for id in ids]
112
+ return [self._database[songid]["target_response"] for songid in self.get_ids]
113
+
114
+ def get_pred_scores(self, ids=[]):
115
+ if ids:
116
+ return [self._database[id]["pred_scores"] for id in ids]
117
+ return [self._database[songid]["pred_scores"] for songid in self.get_ids]
118
+
119
+ def get_target_scores(self, ids=[]):
120
+ if ids:
121
+ return [self._database[id]["target_scores"] for id in ids]
122
+ return [self._database[songid]["target_scores"] for songid in self.get_ids]
123
+
124
+ def score_all_songs(self, song_database):
125
+ """
126
+ Scores all songs in a song database (a list of song dictionaries) and adds
127
+ them to accumulators database.
128
+
129
+ Parameters
130
+ ----------
131
+ song_database : list
132
+ list of song dictionaries containing {id, prompt, model_response, and
133
+ target_response} keys
134
+ """
135
+ # TODO: add safety checks
136
+
137
+ for song_dict in tqdm(song_database, position=0):
138
+ self.score_song(song_dict)
139
+
140
+ def score_song(self, song_dict):
141
+ """
142
+ Adds song to accumulator and scores song.
143
+ Song dicts are formatted as follows:
144
+ {
145
+ id: unique id number,
146
+ prompt: prompt string, #if required
147
+ model_response: response from model,
148
+ target_response: ground truth text #if required
149
+ }
150
+
151
+ Parameters
152
+ ----------
153
+ song_dict : dict
154
+ dict containing model predictions and targets for song
155
+ """
156
+ song_dict = copy.deepcopy(song_dict)
157
+ try:
158
+ id = song_dict["id"]
159
+ pred = song_dict["model_response"]
160
+ if self.require_prompt:
161
+ song_dict["prompt"]
162
+ if self.require_target:
163
+ target = song_dict["target_response"]
164
+ except Exception as e:
165
+ raise AssertionError(f"Provided song dict formatted incorrectly: {e}")
166
+
167
+ assert id not in self._database, f"Multiple entries found for unique id: {id}"
168
+
169
+ scores = {
170
+ "pred_scores": {measure: 0 for measure in self.measures},
171
+ "target_scores": {measure: 0 for measure in self.measures},
172
+ }
173
+ pred_stanzas, pred_matching = self.get_stanzas(pred, get_matching=True)
174
+ target_stanzas, target_matching = self.get_stanzas(target, get_matching=True)
175
+ for measure in (
176
+ tqdm(self.measures, position=1, leave=0) if self.verbose else self.measures
177
+ ):
178
+ scores["pred_scores"][measure] = self.measure_stanzas(
179
+ pred_stanzas,
180
+ self.measure_functions[measure],
181
+ matching=pred_matching if measure != "diversity" else "",
182
+ )
183
+ scores["target_scores"][measure] = self.measure_stanzas(
184
+ target_stanzas,
185
+ self.measure_functions[measure],
186
+ matching=target_matching if measure != "diversity" else "",
187
+ )
188
+
189
+ self.__total_score += Counter(scores["pred_scores"])
190
+ self.__total_target_score += Counter(scores["target_scores"])
191
+
192
+ song_dict.pop("id")
193
+ song_dict.update(scores)
194
+ self._database[id] = song_dict
195
+
196
+ def get_stanzas(self, song_text, get_matching=False):
197
+ """
198
+ Given the raw text of a song, gets the stanza paragraphs according to
199
+ their in-text label as {hook, verse, chorus, bridge, intro, outro}
200
+
201
+ Parameters
202
+ ----------
203
+ song_text : str
204
+ raw song text
205
+ get_matching : bool, optional,
206
+ whether to return a matching
207
+
208
+ Returns
209
+ -------
210
+ list : str
211
+ raw text, stripped of their stanza identifiers, split into stanzas
212
+ """
213
+ stanza_pairs = text_utils.get_stanzas(song_text)
214
+
215
+ matching_map = {}
216
+ matching_str = ""
217
+ match_idx = 0 # NOTE: using ints prohibits > 10 keywords per song
218
+
219
+ stanzas = []
220
+ for i, (kword, stanza) in enumerate(stanza_pairs):
221
+ assert match_idx < 10, "Stanza keywords are too complex, please simplify"
222
+ stanzas.append(
223
+ [n for n in stanza.split("\n") if len(re.sub(r"\s+", "", n)) > 0]
224
+ )
225
+
226
+ if i == 0:
227
+ matching_map[kword] = str(match_idx)
228
+ matching_str = str(match_idx)
229
+ match_idx += 1
230
+ elif kword in matching_map:
231
+ matching_str += "-" + matching_map[kword]
232
+ else:
233
+ matching_map[kword] = str(match_idx)
234
+ matching_str += "-" + matching_map[kword]
235
+ match_idx += 1
236
+
237
+ if get_matching:
238
+ return stanzas, matching_str
239
+ else:
240
+ return stanzas
241
+
242
+ def build_matches(self, stanzas, matching=""):
243
+ """
244
+ Builds matchings between stanzas given a matching string. If no matching
245
+ string provided, matchings are just the stanzas themselves.
246
+
247
+ Parameters
248
+ ----------
249
+ stanzas : list
250
+ list of input strings (stanzas) for different stanzas in a song
251
+ matching : str, optional
252
+ string defining the stanza or line matching for comparison, by default ""
253
+
254
+ Returns
255
+ -------
256
+ list
257
+ list of "matched" stanza pairings
258
+ """
259
+ if matching == "":
260
+ return [(s,) for s in stanzas]
261
+ matching = matching.split("-")
262
+ assert len(matching) == len(stanzas), "Incorrect matching to stanzas"
263
+
264
+ matches_dict = defaultdict(lambda: [])
265
+ pairings = []
266
+
267
+ # map stanzas to their matching char
268
+ for i, p in enumerate(stanzas):
269
+ matches_dict[matching[i]].append(p)
270
+
271
+ # build pairings
272
+ for matches in matches_dict.values():
273
+ for p1, p2 in itertools.combinations(matches, 2):
274
+ pairings.append([p1, p2])
275
+
276
+ return pairings
277
+
278
+ def measure_stanzas(self, stanzas, measure_function, matching=""):
279
+ """
280
+ Computes and returns a measure of a list of stanzas (string segments made up of numerous
281
+ lines). If measure is a consistency score, a comparison stanza is required.
282
+
283
+ Comparison metrics will be computed along the cartesian product of stanzas with matching
284
+ keys.
285
+
286
+ For example, given:
287
+ stanzas = ["line1", "line2", "line3", "line4"]
288
+ matching = "a-b-a-b"
289
+ -> Comparisons computed between [("line1", "line3"), ("line2", "line4")]
290
+
291
+ Parameters
292
+ ----------
293
+ stanzas : list
294
+ list of input strings (stanzas) for different stanzas in a song
295
+ measure_function : function
296
+ callable function that takes in a pairing and returns a float
297
+ matching : str
298
+ string defining the stanza or line matching for comparison
299
+
300
+ Returns
301
+ -------
302
+ float
303
+ averaged (across measurings) measure score
304
+ """
305
+ pairings = self.build_matches(stanzas, matching)
306
+
307
+ if len(pairings) == 0:
308
+ # NOTE: dangerous, since if no pairings the song structure could be fine..
309
+ return 0
310
+
311
+ score = 0
312
+
313
+ for pairing in pairings:
314
+ score += measure_function(*pairing)
315
+
316
+ final_score = score / len(pairings)
317
+
318
+ return final_score
evaluation/scoring_metrics.py ADDED
@@ -0,0 +1,234 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ import math
3
+ import unidecode
4
+ import copy
5
+ from pprint import pp
6
+
7
+ from lexical_diversity import lex_div as ld
8
+ from dtaidistance import dtw
9
+ from fastdtw import fastdtw
10
+ import numpy as np
11
+
12
+ import encode_lines as encode
13
+
14
+
15
+ def measure_lex_div(p1, mode="mtld"):
16
+ """
17
+ Measure of lexical diversity of a set of lines
18
+
19
+ Parameters
20
+ ----------
21
+ p1 : list(str)
22
+ list of lines of paragraph or song as strings
23
+ mode : str, optional
24
+ lexical diversity metric, by default "mtld"
25
+
26
+ Returns
27
+ -------
28
+ float
29
+ lexical diversity score
30
+ """
31
+ lines = " ".join(p1)
32
+ flem_tokens = ld.flemmatize(unidecode.unidecode(lines))
33
+ if mode == "avg_ttr":
34
+ lex_div = ld.ttr(flem_tokens) / len(flem_tokens)
35
+ elif mode == "mtld":
36
+ lex_div = ld.mtld(flem_tokens)
37
+ elif mode == "avg_mtld":
38
+ lex_div = ld.mtld_ma_wrap(flem_tokens)
39
+ return lex_div
40
+
41
+
42
+ def measure_meter(p1, p2):
43
+ """
44
+ Measure: meter consistency
45
+
46
+ Meter consistency between two lines or paragraphs is defined as the negative
47
+ exponential of the edit distance between their stress encodings.
48
+
49
+ Note that this mapping limits the "maximal" error, as extremely high syllable differences
50
+ will incur exponentially lower loss. The score is scaled between 0 and 100
51
+
52
+ Parameters
53
+ ----------
54
+ p1 : list
55
+ paragraph as a list of line strings
56
+ p2 : list
57
+ comparison paragraph as a list of line strings
58
+
59
+ Returns
60
+ -------
61
+ float
62
+ score of meter consistency
63
+ """
64
+ # encode into syllabic stress indicators
65
+ p1 = [encode.encode_line_meter_count(line) for line in p1]
66
+ p2 = [encode.encode_line_meter_count(line) for line in p2]
67
+
68
+ p1_string = "".join(line_stress for line_stress in p1).replace(" ", "").strip()
69
+ p2_string = "".join(line_stress for line_stress in p2).replace(" ", "").strip()
70
+
71
+ p1_string = re.sub(r"\s+", "", p1_string, flags=re.UNICODE)
72
+ p2_string = re.sub(r"\s+", "", p2_string, flags=re.UNICODE)
73
+
74
+ edit_dist = levenshteinDistance(p1_string, p2_string)
75
+ return edit_dist # min(100, 100 / (edit_dist + 2e-7)) # math.exp(-0.1 * avg_edit_distance)
76
+
77
+
78
+ def measure_syllable(p1: list, p2: list):
79
+ """
80
+ Measure: syllable consistency
81
+
82
+ We perform an altered version of the syllable DTW described in the methods
83
+ section of https://staff.aist.go.jp/m.goto/PAPER/TIEICE202309watanabe.pdf.
84
+ By recognizing that sometimes entire lines are out of "sync" between paragraphs,
85
+ we first minize the DTW via sliding window between the two paragraphs, then compute
86
+ the DTW line-by-line at the minimized offset.
87
+
88
+ Syllable consistency between two lines or paragraphs is defined as the
89
+ negative exponential of the DTW distance between two paragraph syllables.
90
+
91
+ Note that this mapping limits the "maximal" error, as extremely high syllable differences
92
+ will incur exponentially lower loss. The score is scaled between 0 and 100
93
+
94
+ Parameters
95
+ ----------
96
+ p1 : list
97
+ paragraph as a list of line strings
98
+ p2 : list
99
+ comparison paragraph as a list of line strings
100
+
101
+ Returns
102
+ -------
103
+ float
104
+ length-normalized syllabic consistency score
105
+ """
106
+ score = 0
107
+
108
+ # encode p1 and p2 into syllable counts
109
+ enc_fn = encode.encode_line_syllable_count
110
+
111
+ def encode_paragraph(par):
112
+ out = []
113
+ for line in par:
114
+ encoded_line = enc_fn(line).split(" ")
115
+ encoded_line = [word for word in encoded_line if word]
116
+ if len(encoded_line) == 0:
117
+ out.append([0])
118
+ else:
119
+ out.append(list(map(int, encoded_line)))
120
+ return out
121
+
122
+ p1, p2 = encode_paragraph(p1), encode_paragraph(p2)
123
+
124
+ # find and shift/crop p1 and p2 to the best matching offset
125
+ _, p1_c, p2_c = min_dtw_offset(p1, p2, return_cropped=True, use_short_window=True)
126
+
127
+ for i in range(len(p1_c)):
128
+ score += abs(sum(p1_c[i]) - sum(p2_c[i]))
129
+
130
+ return score # min(100, 100 / (score + 2e-7)) # math.exp(-0.1 * score)
131
+
132
+
133
+ def min_dtw_offset(p1, p2, return_cropped=True, use_short_window=True):
134
+ """
135
+ Use a sliding window (of lines) to find the line index offset which minimizes
136
+ the syllabic DTW between two paragraphs.
137
+
138
+ In addition to outside sliding window, setting inner_window > 0 allows for
139
+ an moving inner window across the smaller signal to find a cropping
140
+
141
+ Parameters
142
+ ----------
143
+ p1 : list
144
+ list of lists of song lines encoded as syllable counts
145
+ p2 : list
146
+ list of song lines encoded as syllable counts
147
+ return_cropped : bool, optional
148
+ whether to return the cropped min window for p1 and p2 ro the offset, by
149
+ default True
150
+ use_short_window : bool, optional
151
+ whether to compare at smaller or larger paragraphs length, by default true
152
+ Note: if False, p1 and p2 will not be the same length
153
+
154
+ Returns
155
+ -------
156
+ int
157
+ dtw minimizing offset value of smaller signal in larger signal
158
+ list
159
+ (p1) cropped or uncropped p1, squared with zeros
160
+ list
161
+ (p2) cropped or uncropped p2, squared with zeros
162
+ """
163
+ switched = False
164
+ if len(p1) < len(p2):
165
+ switched = True
166
+ p1, p2 = p2, p1
167
+
168
+ # crop or pad to same number of lines
169
+ win_length = len(p2) if use_short_window else len(p1)
170
+
171
+ # square by padding line lengths with zeros
172
+ sig1, sig2 = copy.deepcopy(p1), copy.deepcopy(p2)
173
+ max_val = max(max(len(l) for l in sig1), max(len(l) for l in sig2))
174
+ for i in range(len(p1)):
175
+ sig1[i] += [0] * (max_val - len(sig1[i]))
176
+ for i in range(len(p2)):
177
+ sig2[i] += [0] * (max_val - len(sig2[i]))
178
+ sig1, sig2 = np.sum(np.array(sig1), axis=1), np.sum(np.array(sig2), axis=1)
179
+
180
+ # compute dtw with a sliding window to find the best offset
181
+ sig1, sig2 = np.pad(sig1, (win_length, win_length)), sig2.astype(np.double)
182
+ min_idx, min_error = 0, np.inf
183
+ for j in range(sig1.shape[0] - win_length):
184
+ sig1_win = sig1[j : j + sig1.shape[0]].astype(np.double)
185
+
186
+ error = dtw.distance_fast(sig1_win, sig2, inner_dist="euclidean")
187
+
188
+ if error < min_error:
189
+ min_error = error
190
+ min_idx = j
191
+
192
+ # crop to window
193
+ if return_cropped:
194
+ p1 = [[0]] * win_length + p1 + [[0]] * win_length
195
+ p1 = p1[min_idx : min_idx + win_length]
196
+
197
+ if switched:
198
+ p1, p2 = p2, p1
199
+
200
+ return min_idx - win_length, p1, p2
201
+
202
+
203
+ def levenshteinDistance(s1, s2):
204
+ """
205
+ DP edit distance implementation
206
+ credit to https://stackoverflow.com/questions/2460177
207
+
208
+ Parameters
209
+ ----------
210
+ s1 : str
211
+ first string to compare
212
+ s2 : str
213
+ second string to compare
214
+
215
+ Returns
216
+ -------
217
+ int
218
+ edit distance between two strings (absolute)
219
+ """
220
+ if len(s1) > len(s2):
221
+ s1, s2 = s2, s1
222
+
223
+ distances = range(len(s1) + 1)
224
+ for i2, c2 in enumerate(s2):
225
+ distances_ = [i2 + 1]
226
+ for i1, c1 in enumerate(s1):
227
+ if c1 == c2:
228
+ distances_.append(distances[i1])
229
+ else:
230
+ distances_.append(
231
+ 1 + min((distances[i1], distances[i1 + 1], distances_[-1]))
232
+ )
233
+ distances = distances_
234
+ return distances[-1]
evaluation/syllable_analysis.py ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ from num2words import num2words
3
+
4
+ SYL_DICT_PATH = "Syllables.txt"
5
+ SYL_DICT = None
6
+
7
+
8
+ def count_syllables(word):
9
+ global SYL_DICT
10
+ if SYL_DICT is None:
11
+ SYL_DICT = get_syllables_dict(SYL_DICT_PATH)
12
+
13
+ if word in SYL_DICT:
14
+ return SYL_DICT.get(word)["num_syllables"]
15
+ else:
16
+ return sylco(word)
17
+
18
+
19
+ def get_syllables_dict(path):
20
+ with open(path, "r") as f:
21
+ lines = f.readlines()
22
+
23
+ duplicates = {}
24
+ syllable_dict = {}
25
+ for line in lines:
26
+ word, syllables = line.split("=")
27
+ syllables = [s for s in re.split("[^a-zA-Z]+", syllables) if len(s) > 0]
28
+
29
+ if word in syllable_dict:
30
+ duplicates[word] = 1
31
+ else:
32
+ syllable_dict[word] = {
33
+ "num_syllables": len(syllables),
34
+ "suffixes": syllables,
35
+ }
36
+
37
+ return syllable_dict
38
+
39
+
40
+ def replace_numbers_with_words(text):
41
+ def replace_match(match):
42
+ number = int(match.group())
43
+ return num2words(number)
44
+
45
+ return re.sub(r"\b\d+\b", replace_match, text)
46
+
47
+
48
+ def sylco(word):
49
+ """Credit to https://stackoverflow.com/questions/46759492/"""
50
+ word = word.lower()
51
+ word = replace_numbers_with_words(word)
52
+
53
+ # exception_add are words that need extra syllables
54
+ # exception_del are words that need less syllables
55
+
56
+ exception_add = ["serious", "crucial"]
57
+ exception_del = ["fortunately", "unfortunately"]
58
+
59
+ co_one = [
60
+ "cool",
61
+ "coach",
62
+ "coat",
63
+ "coal",
64
+ "count",
65
+ "coin",
66
+ "coarse",
67
+ "coup",
68
+ "coif",
69
+ "cook",
70
+ "coign",
71
+ "coiffe",
72
+ "coof",
73
+ "court",
74
+ ]
75
+ co_two = ["coapt", "coed", "coinci"]
76
+
77
+ pre_one = ["preach"]
78
+
79
+ syls = 0 # added syllable number
80
+ disc = 0 # discarded syllable number
81
+
82
+ # 1) if letters < 3 : return 1
83
+ if len(word) <= 3:
84
+ syls = 1
85
+ return syls
86
+
87
+ # 2) if doesn't end with "ted" or "tes" or "ses" or "ied" or "ies", discard "es" and "ed" at the end.
88
+ # if it has only 1 vowel or 1 set of consecutive vowels, discard. (like "speed", "fled" etc.)
89
+
90
+ if word[-2:] == "es" or word[-2:] == "ed":
91
+ doubleAndtripple_1 = len(re.findall(r"[eaoui][eaoui]", word))
92
+ if doubleAndtripple_1 > 1 or len(re.findall(r"[eaoui][^eaoui]", word)) > 1:
93
+ if (
94
+ word[-3:] == "ted"
95
+ or word[-3:] == "tes"
96
+ or word[-3:] == "ses"
97
+ or word[-3:] == "ied"
98
+ or word[-3:] == "ies"
99
+ ):
100
+ pass
101
+ else:
102
+ disc += 1
103
+
104
+ # 3) discard trailing "e", except where ending is "le"
105
+
106
+ le_except = [
107
+ "whole",
108
+ "mobile",
109
+ "pole",
110
+ "male",
111
+ "female",
112
+ "hale",
113
+ "pale",
114
+ "tale",
115
+ "sale",
116
+ "aisle",
117
+ "whale",
118
+ "while",
119
+ ]
120
+
121
+ if word[-1:] == "e":
122
+ if word[-2:] == "le" and word not in le_except:
123
+ pass
124
+
125
+ else:
126
+ disc += 1
127
+
128
+ # 4) check if consecutive vowels exists, triplets or pairs, count them as one.
129
+
130
+ doubleAndtripple = len(re.findall(r"[eaoui][eaoui]", word))
131
+ tripple = len(re.findall(r"[eaoui][eaoui][eaoui]", word))
132
+ disc += doubleAndtripple + tripple
133
+
134
+ # 5) count remaining vowels in word.
135
+ numVowels = len(re.findall(r"[eaoui]", word))
136
+
137
+ # 6) add one if starts with "mc"
138
+ if word[:2] == "mc":
139
+ syls += 1
140
+
141
+ # 7) add one if ends with "y" but is not surrouned by vowel
142
+ if word[-1:] == "y" and word[-2] not in "aeoui":
143
+ syls += 1
144
+
145
+ # 8) add one if "y" is surrounded by non-vowels and is not in the last word.
146
+
147
+ for i, j in enumerate(word):
148
+ if j == "y":
149
+ if (i != 0) and (i != len(word) - 1):
150
+ if word[i - 1] not in "aeoui" and word[i + 1] not in "aeoui":
151
+ syls += 1
152
+
153
+ # 9) if starts with "tri-" or "bi-" and is followed by a vowel, add one.
154
+
155
+ if word[:3] == "tri" and word[3] in "aeoui":
156
+ syls += 1
157
+
158
+ if word[:2] == "bi" and word[2] in "aeoui":
159
+ syls += 1
160
+
161
+ # 10) if ends with "-ian", should be counted as two syllables, except for "-tian" and "-cian"
162
+
163
+ if word[-3:] == "ian":
164
+ # and (word[-4:] != "cian" or word[-4:] != "tian") :
165
+ if word[-4:] == "cian" or word[-4:] == "tian":
166
+ pass
167
+ else:
168
+ syls += 1
169
+
170
+ # 11) if starts with "co-" and is followed by a vowel, check if exists in the double syllable dictionary, if not, check if in single dictionary and act accordingly.
171
+
172
+ if word[:2] == "co" and word[2] in "eaoui":
173
+ if word[:4] in co_two or word[:5] in co_two or word[:6] in co_two:
174
+ syls += 1
175
+ elif word[:4] in co_one or word[:5] in co_one or word[:6] in co_one:
176
+ pass
177
+ else:
178
+ syls += 1
179
+
180
+ # 12) if starts with "pre-" and is followed by a vowel, check if exists in the double syllable dictionary, if not, check if in single dictionary and act accordingly.
181
+
182
+ if word[:3] == "pre" and word[3] in "eaoui":
183
+ if word[:6] in pre_one:
184
+ pass
185
+ else:
186
+ syls += 1
187
+
188
+ # 13) check for "-n't" and cross match with dictionary to add syllable.
189
+
190
+ negative = ["doesn't", "isn't", "shouldn't", "couldn't", "wouldn't"]
191
+
192
+ if word[-3:] == "n't":
193
+ if word in negative:
194
+ syls += 1
195
+ else:
196
+ pass
197
+
198
+ # 14) Handling the exceptional words.
199
+
200
+ if word in exception_del:
201
+ disc += 1
202
+
203
+ if word in exception_add:
204
+ syls += 1
205
+
206
+ # calculate the output
207
+ return numVowels - disc + syls
evaluation/text_processing_utils.py ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ from unidecode import unidecode
3
+
4
+ STANZA_KEYWORDS = {
5
+ "pre-hook",
6
+ "post-hook",
7
+ "pre-drop",
8
+ "pre-chorus",
9
+ "post-chorus",
10
+ "pre-coro",
11
+ "post-coro",
12
+ "breakdown",
13
+ "drop",
14
+ "hook",
15
+ "verse",
16
+ "chorus",
17
+ "bridge",
18
+ "intro",
19
+ "outro",
20
+ "refrain",
21
+ "guitar solo",
22
+ "solo",
23
+ "letra de",
24
+ "instrumental",
25
+ "verso",
26
+ "coro",
27
+ "couplet",
28
+ "pont",
29
+ "ponte",
30
+ "interlude",
31
+ "part",
32
+ "refrão",
33
+ }
34
+
35
+
36
+ def get_kword(delin):
37
+ """Gets kword readable string from matched delineator"""
38
+ delin = delin.split(":")[0]
39
+ delin = re.sub(r"\d+", "", delin)
40
+ return delin.strip()
41
+
42
+
43
+ def clean_song(text):
44
+ """
45
+ Custom rules for "cleaning" the song data to disambiguate stanza
46
+ delineators
47
+
48
+ Parameters
49
+ ----------
50
+ text : str
51
+ raw song data
52
+
53
+ Returns
54
+ -------
55
+ str
56
+ cleaned song data
57
+ """
58
+ text = unidecode(text).lower()
59
+
60
+ # Replace all "[?]", "[chuckles]", "[laughs]", "[Mumbling]" with "nan"
61
+ text = re.sub(r"\[\?\]|\[chuckles\]|\[laughs\]|\[Mumbling\]", "nan", text)
62
+
63
+ # Replace all "]:" with "]\n"
64
+ text = re.sub(r"\]:", "]\n", text)
65
+
66
+ # Replace all "[X]" with "nan" where X is any number of "." characters
67
+ text = re.sub(r"\[\.*?\]", "nan", text)
68
+
69
+ # For any remaining bracketed texts replace with kword readable string and add a newline
70
+ def replace_bracketed(match):
71
+ kword = get_kword(match.group(1))
72
+ return f"\n[{kword}]\n"
73
+
74
+ text = re.sub(r"\[([\s\S]*?)\]", replace_bracketed, text)
75
+
76
+ return text
77
+
78
+
79
+ def get_stanzas(text):
80
+ """
81
+ Process song as raw text to return a list of stanza - keyword pairings.
82
+ If keyword match is unidentified, pairs with entire match rather than just the
83
+ known keyword
84
+
85
+ Parameters
86
+ ----------
87
+ text : str
88
+ raw song text
89
+
90
+ Returns
91
+ -------
92
+ list(tuple)
93
+ list of tuple (keyword, stanza_text) pairings
94
+ """
95
+ stanzas = []
96
+ text = clean_song(text)
97
+
98
+ # Find all identifiers inside brackets
99
+ matches = re.findall(r"\[([\s\S]*?)\]", text)
100
+ split_text = re.split(r"\[(?:[\s\S]*?)\]", text)[1:]
101
+
102
+ # pair text in stanzas with existing keyword or new match
103
+ for i, match in enumerate(matches):
104
+ matched_with_kword = False
105
+
106
+ for keyword in STANZA_KEYWORDS:
107
+ if match.startswith(keyword):
108
+ stanzas.append((keyword, split_text[i]))
109
+ matched_with_kword = True
110
+ break
111
+
112
+ if not matched_with_kword:
113
+ stanzas.append((match, split_text[i]))
114
+
115
+ # remove empty stanzas
116
+ stanzas = [(keyword, stanza) for keyword, stanza in stanzas if stanza.strip()]
117
+
118
+ return stanzas
119
+
120
+
121
+ def find_surrounding_chars(text, pattern, before=50, after=50):
122
+ """Helpful testing utility"""
123
+ regex_pattern = f".{{0,{before}}}{pattern}.{{0,{after}}}"
124
+ return re.findall(regex_pattern, text)