Upload 10 files
Browse files- 1_batch_xml2abc.py +54 -0
- 2_data_preprocess.py +181 -0
- 3_batch_abc2xml.py +56 -0
- abc2xml (1).py +0 -0
- abc2xml (2).py +0 -0
- abc2xml.py +0 -0
- data.py +136 -0
- train-gen (1).py +325 -0
- train-gen.py +374 -0
- xml2abc.py +1609 -0
1_batch_xml2abc.py
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
ORI_FOLDER = "" # Replace with the path to your folder containing XML (.xml, .mxl, .musicxml) files
|
| 2 |
+
DES_FOLDER = "" # The script will convert the musicxml files and output standard abc notation files to this folder
|
| 3 |
+
|
| 4 |
+
import os
|
| 5 |
+
import math
|
| 6 |
+
import random
|
| 7 |
+
import subprocess
|
| 8 |
+
from tqdm import tqdm
|
| 9 |
+
from multiprocessing import Pool
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def convert_xml2abc(file_list):
|
| 13 |
+
cmd = 'python xml2abc.py -d 8 -c 6 -x '
|
| 14 |
+
for file in tqdm(file_list):
|
| 15 |
+
filename = os.path.basename(file)
|
| 16 |
+
os.makedirs(DES_FOLDER, exist_ok=True)
|
| 17 |
+
|
| 18 |
+
try:
|
| 19 |
+
p = subprocess.Popen(cmd + '"' + file + '"', stdout=subprocess.PIPE, shell=True)
|
| 20 |
+
result = p.communicate()
|
| 21 |
+
output = result[0].decode('utf-8')
|
| 22 |
+
|
| 23 |
+
if output == '':
|
| 24 |
+
with open("logs/xml2abc_error_log.txt", "a", encoding="utf-8") as f:
|
| 25 |
+
f.write(file + '\n')
|
| 26 |
+
continue
|
| 27 |
+
else:
|
| 28 |
+
with open(os.path.join(DES_FOLDER, filename.rsplit('.', 1)[0] + '.abc'), 'w', encoding='utf-8') as f:
|
| 29 |
+
f.write(output)
|
| 30 |
+
except Exception as e:
|
| 31 |
+
with open("logs/xml2abc_error_log.txt", "a", encoding="utf-8") as f:
|
| 32 |
+
f.write(file + ' ' + str(e) + '\n')
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
if __name__ == '__main__':
|
| 36 |
+
file_list = []
|
| 37 |
+
os.makedirs("logs", exist_ok=True)
|
| 38 |
+
|
| 39 |
+
# Traverse the specified folder for XML/MXL files
|
| 40 |
+
for root, dirs, files in os.walk(os.path.abspath(ORI_FOLDER)):
|
| 41 |
+
for file in files:
|
| 42 |
+
if file.endswith((".mxl", ".xml", ".musicxml")):
|
| 43 |
+
filename = os.path.join(root, file).replace("\\", "/")
|
| 44 |
+
file_list.append(filename)
|
| 45 |
+
|
| 46 |
+
# Shuffle and prepare for multiprocessing
|
| 47 |
+
random.shuffle(file_list)
|
| 48 |
+
num_files = len(file_list)
|
| 49 |
+
num_processes = os.cpu_count()
|
| 50 |
+
file_lists = [file_list[i::num_processes] for i in range(num_processes)]
|
| 51 |
+
|
| 52 |
+
# Create a pool for processing
|
| 53 |
+
with Pool(processes=num_processes) as pool:
|
| 54 |
+
pool.map(convert_xml2abc, file_lists)
|
2_data_preprocess.py
ADDED
|
@@ -0,0 +1,181 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
ORI_FOLDER = '' # Replace with the path to your folder containing standard ABC notation files
|
| 2 |
+
INTERLEAVED_FOLDER = '' # Output interleaved ABC notation files to this folder
|
| 3 |
+
AUGMENTED_FOLDER = '' # Output key-augmented and rest-omitted ABC notation files to this folder
|
| 4 |
+
EVAL_SPLIT = 0.1 # The ratio of eval data
|
| 5 |
+
|
| 6 |
+
import os
|
| 7 |
+
import re
|
| 8 |
+
import json
|
| 9 |
+
import shutil
|
| 10 |
+
import random
|
| 11 |
+
from tqdm import tqdm
|
| 12 |
+
from abctoolkit.utils import (
|
| 13 |
+
remove_information_field,
|
| 14 |
+
remove_bar_no_annotations,
|
| 15 |
+
Quote_re,
|
| 16 |
+
Barlines,
|
| 17 |
+
extract_metadata_and_parts,
|
| 18 |
+
extract_global_and_local_metadata,
|
| 19 |
+
extract_barline_and_bartext_dict)
|
| 20 |
+
from abctoolkit.convert import unidecode_abc_lines
|
| 21 |
+
from abctoolkit.rotate import rotate_abc
|
| 22 |
+
from abctoolkit.check import check_alignment_unrotated
|
| 23 |
+
from abctoolkit.transpose import Key2index, transpose_an_abc_text
|
| 24 |
+
|
| 25 |
+
os.makedirs(INTERLEAVED_FOLDER, exist_ok=True)
|
| 26 |
+
os.makedirs(AUGMENTED_FOLDER, exist_ok=True)
|
| 27 |
+
for key in Key2index.keys():
|
| 28 |
+
key_folder = os.path.join(AUGMENTED_FOLDER, key)
|
| 29 |
+
os.makedirs(key_folder, exist_ok=True)
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def abc_preprocess_pipeline(abc_path):
|
| 33 |
+
|
| 34 |
+
with open(abc_path, 'r', encoding='utf-8') as f:
|
| 35 |
+
abc_lines = f.readlines()
|
| 36 |
+
|
| 37 |
+
# delete blank lines
|
| 38 |
+
abc_lines = [line for line in abc_lines if line.strip() != '']
|
| 39 |
+
|
| 40 |
+
# unidecode
|
| 41 |
+
abc_lines = unidecode_abc_lines(abc_lines)
|
| 42 |
+
|
| 43 |
+
# clean information field
|
| 44 |
+
abc_lines = remove_information_field(abc_lines=abc_lines, info_fields=['X:', 'T:', 'C:', 'W:', 'w:', 'Z:', '%%MIDI'])
|
| 45 |
+
|
| 46 |
+
# delete bar number annotations
|
| 47 |
+
abc_lines = remove_bar_no_annotations(abc_lines)
|
| 48 |
+
|
| 49 |
+
# delete \"
|
| 50 |
+
for i, line in enumerate(abc_lines):
|
| 51 |
+
if re.search(r'^[A-Za-z]:', line) or line.startswith('%'):
|
| 52 |
+
continue
|
| 53 |
+
else:
|
| 54 |
+
if r'\"' in line:
|
| 55 |
+
abc_lines[i] = abc_lines[i].replace(r'\"', '')
|
| 56 |
+
|
| 57 |
+
# delete text annotations with quotes
|
| 58 |
+
for i, line in enumerate(abc_lines):
|
| 59 |
+
quote_contents = re.findall(Quote_re, line)
|
| 60 |
+
for quote_content in quote_contents:
|
| 61 |
+
for barline in Barlines:
|
| 62 |
+
if barline in quote_content:
|
| 63 |
+
line = line.replace(quote_content, '')
|
| 64 |
+
abc_lines[i] = line
|
| 65 |
+
|
| 66 |
+
# check bar alignment
|
| 67 |
+
try:
|
| 68 |
+
_, bar_no_equal_flag, _ = check_alignment_unrotated(abc_lines)
|
| 69 |
+
if not bar_no_equal_flag:
|
| 70 |
+
print(abc_path, 'Unequal bar number')
|
| 71 |
+
raise Exception
|
| 72 |
+
except:
|
| 73 |
+
raise Exception
|
| 74 |
+
|
| 75 |
+
# deal with text annotations: remove too long text annotations; remove consecutive non-alphabet/number characters
|
| 76 |
+
for i, line in enumerate(abc_lines):
|
| 77 |
+
quote_matches = re.findall(r'"[^"]*"', line)
|
| 78 |
+
for match in quote_matches:
|
| 79 |
+
if match == '""':
|
| 80 |
+
line = line.replace(match, '')
|
| 81 |
+
if match[1] in ['^', '_']:
|
| 82 |
+
sub_string = match
|
| 83 |
+
pattern = r'([^a-zA-Z0-9])\1+'
|
| 84 |
+
sub_string = re.sub(pattern, r'\1', sub_string)
|
| 85 |
+
if len(sub_string) <= 40:
|
| 86 |
+
line = line.replace(match, sub_string)
|
| 87 |
+
else:
|
| 88 |
+
line = line.replace(match, '')
|
| 89 |
+
abc_lines[i] = line
|
| 90 |
+
|
| 91 |
+
abc_name = os.path.splitext(os.path.split(abc_path)[-1])[0]
|
| 92 |
+
|
| 93 |
+
# transpose
|
| 94 |
+
metadata_lines, part_text_dict = extract_metadata_and_parts(abc_lines)
|
| 95 |
+
global_metadata_dict, local_metadata_dict = extract_global_and_local_metadata(metadata_lines)
|
| 96 |
+
if global_metadata_dict['K'][0] == 'none':
|
| 97 |
+
global_metadata_dict['K'][0] = 'C'
|
| 98 |
+
ori_key = global_metadata_dict['K'][0]
|
| 99 |
+
|
| 100 |
+
interleaved_abc = rotate_abc(abc_lines)
|
| 101 |
+
interleaved_path = os.path.join(INTERLEAVED_FOLDER, abc_name + '.abc')
|
| 102 |
+
with open(interleaved_path, 'w') as w:
|
| 103 |
+
w.writelines(interleaved_abc)
|
| 104 |
+
|
| 105 |
+
for key in Key2index.keys():
|
| 106 |
+
transposed_abc_text = transpose_an_abc_text(abc_lines, key)
|
| 107 |
+
transposed_abc_lines = transposed_abc_text.split('\n')
|
| 108 |
+
transposed_abc_lines = list(filter(None, transposed_abc_lines))
|
| 109 |
+
transposed_abc_lines = [line + '\n' for line in transposed_abc_lines]
|
| 110 |
+
|
| 111 |
+
# rest reduction
|
| 112 |
+
metadata_lines, prefix_dict, left_barline_dict, bar_text_dict, right_barline_dict = \
|
| 113 |
+
extract_barline_and_bartext_dict(transposed_abc_lines)
|
| 114 |
+
reduced_abc_lines = metadata_lines
|
| 115 |
+
for i in range(len(bar_text_dict['V:1'])):
|
| 116 |
+
line = ''
|
| 117 |
+
for symbol in prefix_dict.keys():
|
| 118 |
+
valid_flag = False
|
| 119 |
+
for char in bar_text_dict[symbol][i]:
|
| 120 |
+
if char.isalpha() and not char in ['Z', 'z', 'X', 'x']:
|
| 121 |
+
valid_flag = True
|
| 122 |
+
break
|
| 123 |
+
if valid_flag:
|
| 124 |
+
if i == 0:
|
| 125 |
+
part_patch = '[' + symbol + ']' + prefix_dict[symbol] + left_barline_dict[symbol][0] + bar_text_dict[symbol][0] + right_barline_dict[symbol][0]
|
| 126 |
+
else:
|
| 127 |
+
part_patch = '[' + symbol + ']' + bar_text_dict[symbol][i] + right_barline_dict[symbol][i]
|
| 128 |
+
line += part_patch
|
| 129 |
+
line += '\n'
|
| 130 |
+
reduced_abc_lines.append(line)
|
| 131 |
+
|
| 132 |
+
reduced_abc_name = abc_name + '_' + key
|
| 133 |
+
reduced_abc_path = os.path.join(AUGMENTED_FOLDER, key, reduced_abc_name + '.abc')
|
| 134 |
+
|
| 135 |
+
with open(reduced_abc_path, 'w', encoding='utf-8') as w:
|
| 136 |
+
w.writelines(reduced_abc_lines)
|
| 137 |
+
|
| 138 |
+
return abc_name, ori_key
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
|
| 142 |
+
|
| 143 |
+
|
| 144 |
+
if __name__ == '__main__':
|
| 145 |
+
|
| 146 |
+
data = []
|
| 147 |
+
file_list = os.listdir(ORI_FOLDER)
|
| 148 |
+
for file in tqdm(file_list):
|
| 149 |
+
ori_abc_path = os.path.join(ORI_FOLDER, file)
|
| 150 |
+
try:
|
| 151 |
+
abc_name, ori_key = abc_preprocess_pipeline(ori_abc_path)
|
| 152 |
+
except:
|
| 153 |
+
print(ori_abc_path, 'failed to pre-process.')
|
| 154 |
+
continue
|
| 155 |
+
|
| 156 |
+
data.append({
|
| 157 |
+
'path': os.path.join(AUGMENTED_FOLDER, abc_name),
|
| 158 |
+
'key': ori_key
|
| 159 |
+
})
|
| 160 |
+
|
| 161 |
+
random.shuffle(data)
|
| 162 |
+
eval_data = data[ : int(EVAL_SPLIT * len(data))]
|
| 163 |
+
train_data = data[int(EVAL_SPLIT * len(data)) : ]
|
| 164 |
+
|
| 165 |
+
data_index_path = AUGMENTED_FOLDER + '.jsonl'
|
| 166 |
+
eval_index_path = AUGMENTED_FOLDER + '_eval.jsonl'
|
| 167 |
+
train_index_path = AUGMENTED_FOLDER + '_train.jsonl'
|
| 168 |
+
|
| 169 |
+
|
| 170 |
+
with open(data_index_path, 'w', encoding='utf-8') as w:
|
| 171 |
+
for d in data:
|
| 172 |
+
w.write(json.dumps(d) + '\n')
|
| 173 |
+
with open(eval_index_path, 'w', encoding='utf-8') as w:
|
| 174 |
+
for d in eval_data:
|
| 175 |
+
w.write(json.dumps(d) + '\n')
|
| 176 |
+
with open(train_index_path, 'w', encoding='utf-8') as w:
|
| 177 |
+
for d in train_data:
|
| 178 |
+
w.write(json.dumps(d) + '\n')
|
| 179 |
+
|
| 180 |
+
|
| 181 |
+
|
3_batch_abc2xml.py
ADDED
|
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
ORI_FOLDER = "" # Replace with the path to your folder containing standard/interleaved abc files
|
| 2 |
+
DES_FOLDER = "" # The script will convert the abc files and output musicxml files to this folder
|
| 3 |
+
|
| 4 |
+
import os
|
| 5 |
+
import math
|
| 6 |
+
import random
|
| 7 |
+
import subprocess
|
| 8 |
+
from tqdm import tqdm
|
| 9 |
+
from multiprocessing import Pool
|
| 10 |
+
|
| 11 |
+
def convert_abc2xml(file_list):
|
| 12 |
+
cmd = 'python abc2xml.py '
|
| 13 |
+
for file in tqdm(file_list):
|
| 14 |
+
filename = file.split('/')[-1] # Extract file name
|
| 15 |
+
os.makedirs(DES_FOLDER, exist_ok=True)
|
| 16 |
+
|
| 17 |
+
try:
|
| 18 |
+
p = subprocess.Popen(cmd + '"' + file + '"', stdout=subprocess.PIPE, shell=True)
|
| 19 |
+
result = p.communicate()
|
| 20 |
+
output = result[0].decode('utf-8')
|
| 21 |
+
|
| 22 |
+
if output == '':
|
| 23 |
+
with open("logs/abc2xml_error_log.txt", "a", encoding="utf-8") as f:
|
| 24 |
+
f.write(file + '\n')
|
| 25 |
+
continue
|
| 26 |
+
else:
|
| 27 |
+
output_path = f"{DES_FOLDER}/" + ".".join(filename.split(".")[:-1]) + ".xml"
|
| 28 |
+
with open(output_path, 'w', encoding='utf-8') as f:
|
| 29 |
+
f.write(output)
|
| 30 |
+
except Exception as e:
|
| 31 |
+
with open("logs/abc2xml_error_log.txt", "a", encoding="utf-8") as f:
|
| 32 |
+
f.write(file + ' ' + str(e) + '\n')
|
| 33 |
+
pass
|
| 34 |
+
|
| 35 |
+
if __name__ == '__main__':
|
| 36 |
+
file_list = []
|
| 37 |
+
os.makedirs("logs", exist_ok=True)
|
| 38 |
+
|
| 39 |
+
# Traverse the specified folder for ABC files
|
| 40 |
+
for root, dirs, files in os.walk(ORI_FOLDER):
|
| 41 |
+
for file in files:
|
| 42 |
+
if not file.endswith(".abc"):
|
| 43 |
+
continue
|
| 44 |
+
filename = os.path.join(root, file).replace("\\", "/")
|
| 45 |
+
file_list.append(filename)
|
| 46 |
+
|
| 47 |
+
# Prepare for multiprocessing
|
| 48 |
+
file_lists = []
|
| 49 |
+
random.shuffle(file_list)
|
| 50 |
+
for i in range(os.cpu_count()):
|
| 51 |
+
start_idx = int(math.floor(i * len(file_list) / os.cpu_count()))
|
| 52 |
+
end_idx = int(math.floor((i + 1) * len(file_list) / os.cpu_count()))
|
| 53 |
+
file_lists.append(file_list[start_idx:end_idx])
|
| 54 |
+
|
| 55 |
+
pool = Pool(processes=os.cpu_count())
|
| 56 |
+
pool.map(convert_abc2xml, file_lists)
|
abc2xml (1).py
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
abc2xml (2).py
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
abc2xml.py
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data.py
ADDED
|
@@ -0,0 +1,136 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
gt_feature_folder = '../clamp2/feature/schubert_interleaved'
|
| 2 |
+
output_feature_folder = '../clamp2/feature/weights_notagen_schubert-RL2_beta_0.1_lambda_10_p_size_16_p_length_1024_p_layers_20_h_size_1280_lr_1e-06_k_9_p_0.9_temp_1.2'
|
| 3 |
+
output_original_abc_folder = '../output/original/weights_notagen_schubert-RL2_beta_0.1_lambda_10_p_size_16_p_length_1024_p_layers_20_h_size_1280_lr_1e-06_k_9_p_0.9_temp_1.2'
|
| 4 |
+
output_interleaved_abc_folder = '../output/interleaved/weights_notagen_schubert-RL2_beta_0.1_lambda_10_p_size_16_p_length_1024_p_layers_20_h_size_1280_lr_1e-06_k_9_p_0.9_temp_1.2'
|
| 5 |
+
data_index_path = 'schubert_RL3.json'
|
| 6 |
+
data_select_portion = 0.1
|
| 7 |
+
|
| 8 |
+
import os
|
| 9 |
+
import re
|
| 10 |
+
import json
|
| 11 |
+
import random
|
| 12 |
+
import numpy as np
|
| 13 |
+
from config import *
|
| 14 |
+
from abctoolkit.check import check_alignment_rotated, check_alignment_unrotated
|
| 15 |
+
from abctoolkit.rotate import unrotate_abc
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def load_npy_files(folder_path_list):
|
| 19 |
+
"""
|
| 20 |
+
Load all .npy files from a specified folder and return a list of numpy arrays.
|
| 21 |
+
"""
|
| 22 |
+
npy_list = []
|
| 23 |
+
for file_path in folder_path_list:
|
| 24 |
+
if file_path.endswith('.npy'):
|
| 25 |
+
# file_path = os.path.join(folder_path, file_name)
|
| 26 |
+
np_array = np.load(file_path)[0]
|
| 27 |
+
npy_list.append(np_array)
|
| 28 |
+
return npy_list
|
| 29 |
+
|
| 30 |
+
def average_npy(npy_list):
|
| 31 |
+
"""
|
| 32 |
+
Compute the average of a list of numpy arrays.
|
| 33 |
+
"""
|
| 34 |
+
return np.mean(npy_list, axis=0)
|
| 35 |
+
|
| 36 |
+
def cosine_similarity(vec1, vec2):
|
| 37 |
+
"""
|
| 38 |
+
Compute cosine similarity between two numpy arrays.
|
| 39 |
+
"""
|
| 40 |
+
dot_product = np.dot(vec1, vec2)
|
| 41 |
+
|
| 42 |
+
norm_vec1 = np.linalg.norm(vec1)
|
| 43 |
+
norm_vec2 = np.linalg.norm(vec2)
|
| 44 |
+
|
| 45 |
+
cosine_sim = dot_product / (norm_vec1 * norm_vec2)
|
| 46 |
+
|
| 47 |
+
return cosine_sim
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
def generate_preference_dict():
|
| 51 |
+
|
| 52 |
+
gt_feature_paths = []
|
| 53 |
+
for gt_feature_file in os.listdir(gt_feature_folder):
|
| 54 |
+
gt_feature_paths.append(os.path.join(gt_feature_folder, gt_feature_file))
|
| 55 |
+
gt_features = load_npy_files(gt_feature_paths)
|
| 56 |
+
gt_avg_feature = average_npy(gt_features)
|
| 57 |
+
|
| 58 |
+
output_feature_sim_dict = {}
|
| 59 |
+
for file in os.listdir(output_feature_folder):
|
| 60 |
+
output_feature_path = os.path.join(output_feature_folder, file)
|
| 61 |
+
output_feature = np.load(output_feature_path)[0]
|
| 62 |
+
sim = cosine_similarity(gt_avg_feature, output_feature)
|
| 63 |
+
output_feature_sim_dict[file[:-4]] = sim
|
| 64 |
+
|
| 65 |
+
threshold = int(len(output_feature_sim_dict) * data_select_portion)
|
| 66 |
+
sorted_output_files = sorted(output_feature_sim_dict.keys(), key=lambda item: output_feature_sim_dict[item], reverse=True)
|
| 67 |
+
|
| 68 |
+
chosen_index = 0
|
| 69 |
+
i = 0
|
| 70 |
+
chosen_abc_paths = []
|
| 71 |
+
while chosen_index < threshold and i < len(sorted_output_files):
|
| 72 |
+
|
| 73 |
+
chosen_flag = True
|
| 74 |
+
|
| 75 |
+
file = sorted_output_files[i]
|
| 76 |
+
output_interleaved_abc_path = os.path.join(output_interleaved_abc_folder, file + '.abc')
|
| 77 |
+
|
| 78 |
+
with open(output_interleaved_abc_path, 'r') as f:
|
| 79 |
+
abc_lines = f.readlines()
|
| 80 |
+
|
| 81 |
+
# check aligment
|
| 82 |
+
try:
|
| 83 |
+
abc_lines_unrotated = unrotate_abc(abc_lines)
|
| 84 |
+
barline_equal_flag, bar_no_equal_flag, bar_dur_equal_flag = check_alignment_unrotated(abc_lines_unrotated)
|
| 85 |
+
if not (barline_equal_flag and bar_no_equal_flag and bar_dur_equal_flag):
|
| 86 |
+
raise Exception
|
| 87 |
+
except:
|
| 88 |
+
chosen_flag = False
|
| 89 |
+
|
| 90 |
+
# check header: sheets where staves for the same instrument are not grouped together are excluded from the chosen set.
|
| 91 |
+
appeared_inst = set()
|
| 92 |
+
last_inst = ''
|
| 93 |
+
for line in abc_lines:
|
| 94 |
+
if line.startswith('V:') and 'nm=' in line:
|
| 95 |
+
match = re.search(r'nm="([^"]+)"', line)
|
| 96 |
+
if match:
|
| 97 |
+
inst = match.group(1)
|
| 98 |
+
if inst != last_inst and inst in appeared_inst:
|
| 99 |
+
chosen_flag = False
|
| 100 |
+
break
|
| 101 |
+
else:
|
| 102 |
+
last_inst = inst
|
| 103 |
+
appeared_inst.add(inst)
|
| 104 |
+
|
| 105 |
+
# check plagiarism: sheets with sim > 0.95 are excluded
|
| 106 |
+
output_feature_path = os.path.join(output_feature_folder, file + '.npy')
|
| 107 |
+
output_feature = np.load(output_feature_path)[0]
|
| 108 |
+
for gt_feature_file in os.listdir(gt_feature_folder):
|
| 109 |
+
gt_feature_path = os.path.join(gt_feature_folder, gt_feature_file)
|
| 110 |
+
gt_feature = np.load(gt_feature_path)[0]
|
| 111 |
+
sim = cosine_similarity(output_feature, gt_feature)
|
| 112 |
+
if sim > 0.95:
|
| 113 |
+
chosen_flag = False
|
| 114 |
+
break
|
| 115 |
+
|
| 116 |
+
if chosen_flag:
|
| 117 |
+
original_abc_path = os.path.join(output_original_abc_folder, file + '.abc')
|
| 118 |
+
chosen_abc_paths.append(original_abc_path)
|
| 119 |
+
chosen_index += 1
|
| 120 |
+
else:
|
| 121 |
+
print(file, 'skipped')
|
| 122 |
+
|
| 123 |
+
i += 1
|
| 124 |
+
|
| 125 |
+
rejected_abc_paths = [os.path.join(output_original_abc_folder, file + '.abc') for file in sorted_output_files[-threshold:]]
|
| 126 |
+
preference_dict = {'chosen': chosen_abc_paths, 'rejected': rejected_abc_paths}
|
| 127 |
+
|
| 128 |
+
with open(data_index_path, 'w') as w:
|
| 129 |
+
json.dump(preference_dict, w, indent=4)
|
| 130 |
+
|
| 131 |
+
|
| 132 |
+
if __name__ == '__main__':
|
| 133 |
+
|
| 134 |
+
generate_preference_dict()
|
| 135 |
+
|
| 136 |
+
|
train-gen (1).py
ADDED
|
@@ -0,0 +1,325 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import gc
|
| 3 |
+
import time
|
| 4 |
+
import math
|
| 5 |
+
import json
|
| 6 |
+
import wandb
|
| 7 |
+
import torch
|
| 8 |
+
import random
|
| 9 |
+
import numpy as np
|
| 10 |
+
from utils import *
|
| 11 |
+
from config import *
|
| 12 |
+
from tqdm import tqdm
|
| 13 |
+
from copy import deepcopy
|
| 14 |
+
from torch.cuda.amp import autocast, GradScaler
|
| 15 |
+
from torch.utils.data import Dataset, DataLoader
|
| 16 |
+
from transformers import GPT2Config, LlamaConfig, get_scheduler, get_constant_schedule_with_warmup
|
| 17 |
+
import torch.distributed as dist
|
| 18 |
+
from torch.nn.parallel import DistributedDataParallel as DDP
|
| 19 |
+
from torch.utils.data.distributed import DistributedSampler
|
| 20 |
+
|
| 21 |
+
# Set up distributed training
|
| 22 |
+
world_size = int(os.environ['WORLD_SIZE']) if 'WORLD_SIZE' in os.environ else 1
|
| 23 |
+
global_rank = int(os.environ['RANK']) if 'RANK' in os.environ else 0
|
| 24 |
+
local_rank = int(os.environ['LOCAL_RANK']) if 'LOCAL_RANK' in os.environ else 0
|
| 25 |
+
|
| 26 |
+
if world_size > 1:
|
| 27 |
+
torch.cuda.set_device(local_rank)
|
| 28 |
+
device = torch.device("cuda", local_rank)
|
| 29 |
+
dist.init_process_group(backend='nccl') if world_size > 1 else None
|
| 30 |
+
else:
|
| 31 |
+
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
|
| 32 |
+
|
| 33 |
+
# Set random seed
|
| 34 |
+
seed = 0 + global_rank
|
| 35 |
+
random.seed(seed)
|
| 36 |
+
np.random.seed(seed)
|
| 37 |
+
torch.manual_seed(seed)
|
| 38 |
+
torch.cuda.manual_seed_all(seed)
|
| 39 |
+
torch.backends.cudnn.deterministic = True
|
| 40 |
+
torch.backends.cudnn.benchmark = False
|
| 41 |
+
|
| 42 |
+
batch_size = BATCH_SIZE
|
| 43 |
+
|
| 44 |
+
patchilizer = Patchilizer()
|
| 45 |
+
|
| 46 |
+
patch_config = GPT2Config(num_hidden_layers=PATCH_NUM_LAYERS,
|
| 47 |
+
max_length=PATCH_LENGTH,
|
| 48 |
+
max_position_embeddings=PATCH_LENGTH,
|
| 49 |
+
n_embd=HIDDEN_SIZE,
|
| 50 |
+
num_attention_heads=HIDDEN_SIZE//64,
|
| 51 |
+
vocab_size=1)
|
| 52 |
+
char_config = GPT2Config(num_hidden_layers=CHAR_NUM_LAYERS,
|
| 53 |
+
max_length=PATCH_SIZE+1,
|
| 54 |
+
max_position_embeddings=PATCH_SIZE+1,
|
| 55 |
+
hidden_size=HIDDEN_SIZE,
|
| 56 |
+
num_attention_heads=HIDDEN_SIZE//64,
|
| 57 |
+
vocab_size=128)
|
| 58 |
+
|
| 59 |
+
model = NotaGenLMHeadModel(encoder_config=patch_config, decoder_config=char_config)
|
| 60 |
+
|
| 61 |
+
model = model.to(device)
|
| 62 |
+
|
| 63 |
+
# print parameter number
|
| 64 |
+
print("Parameter Number: "+str(sum(p.numel() for p in model.parameters() if p.requires_grad)))
|
| 65 |
+
|
| 66 |
+
if world_size > 1:
|
| 67 |
+
model = DDP(model, device_ids=[local_rank], output_device=local_rank, find_unused_parameters=True)
|
| 68 |
+
|
| 69 |
+
scaler = GradScaler()
|
| 70 |
+
is_autocast = True
|
| 71 |
+
optimizer = torch.optim.AdamW(model.parameters(), lr=LEARNING_RATE)
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
def clear_unused_tensors():
|
| 75 |
+
gc.disable() # Temporarily disable garbage collection
|
| 76 |
+
try:
|
| 77 |
+
# Get the set of tensor ids used by the model
|
| 78 |
+
if hasattr(model, "module"):
|
| 79 |
+
model_tensors = {id(p) for p in model.module.parameters()}
|
| 80 |
+
else:
|
| 81 |
+
model_tensors = {id(p) for p in model.parameters()}
|
| 82 |
+
|
| 83 |
+
# Get the set of tensor ids used by the optimizer
|
| 84 |
+
optimizer_tensors = {
|
| 85 |
+
id(state)
|
| 86 |
+
for state_dict in optimizer.state.values()
|
| 87 |
+
for state in state_dict.values()
|
| 88 |
+
if isinstance(state, torch.Tensor) # Ensure only tensors are considered
|
| 89 |
+
}
|
| 90 |
+
|
| 91 |
+
# List of all CUDA tensors currently in memory
|
| 92 |
+
tensors = [obj for obj in gc.get_objects() if isinstance(obj, torch.Tensor) and obj.is_cuda]
|
| 93 |
+
|
| 94 |
+
# Create weak references to avoid interfering with garbage collection
|
| 95 |
+
tensor_refs = [weakref.ref(tensor) for tensor in tensors]
|
| 96 |
+
|
| 97 |
+
for tensor_ref in tensor_refs:
|
| 98 |
+
tensor = tensor_ref() # Dereference the weak reference
|
| 99 |
+
if tensor is not None and id(tensor) not in model_tensors and id(tensor) not in optimizer_tensors:
|
| 100 |
+
# Mark the tensor for deletion
|
| 101 |
+
tensor.detach_() # Detach from computation graph
|
| 102 |
+
del tensor # Delete the tensor reference
|
| 103 |
+
except:
|
| 104 |
+
pass
|
| 105 |
+
|
| 106 |
+
finally:
|
| 107 |
+
gc.enable() # Re-enable garbage collection
|
| 108 |
+
gc.collect() # Force a garbage collection
|
| 109 |
+
torch.cuda.empty_cache() # Clear the CUDA cache
|
| 110 |
+
|
| 111 |
+
def collate_batch(input_batches):
|
| 112 |
+
|
| 113 |
+
input_patches, input_masks = zip(*input_batches)
|
| 114 |
+
input_patches = torch.nn.utils.rnn.pad_sequence(input_patches, batch_first=True, padding_value=0)
|
| 115 |
+
input_masks = torch.nn.utils.rnn.pad_sequence(input_masks, batch_first=True, padding_value=0)
|
| 116 |
+
|
| 117 |
+
return input_patches.to(device), input_masks.to(device)
|
| 118 |
+
|
| 119 |
+
def split_into_minibatches(input_patches, input_masks, minibatch_size):
|
| 120 |
+
minibatches = []
|
| 121 |
+
for start_idx in range(0, len(input_patches), minibatch_size):
|
| 122 |
+
end_idx = start_idx + minibatch_size
|
| 123 |
+
minibatch_patches = input_patches[start_idx:end_idx]
|
| 124 |
+
minibatch_masks = input_masks[start_idx:end_idx]
|
| 125 |
+
minibatches.append((minibatch_patches, minibatch_masks))
|
| 126 |
+
return minibatches
|
| 127 |
+
|
| 128 |
+
class NotaGenDataset(Dataset):
|
| 129 |
+
def __init__(self, filenames):
|
| 130 |
+
self.filenames = filenames
|
| 131 |
+
|
| 132 |
+
def __len__(self):
|
| 133 |
+
return len(self.filenames)
|
| 134 |
+
|
| 135 |
+
def __getitem__(self, idx):
|
| 136 |
+
|
| 137 |
+
filepath = self.filenames[idx]['path']
|
| 138 |
+
|
| 139 |
+
key = random.choice(['C#', 'F#', 'B', 'E', 'A', 'D', 'G', 'C', 'F', 'Bb', 'Eb', 'Ab', 'Db', 'Gb', 'Cb'])
|
| 140 |
+
|
| 141 |
+
folder = os.path.dirname(filepath)
|
| 142 |
+
name = os.path.split(filepath)[-1]
|
| 143 |
+
des_filepath = os.path.join(folder, key, name + '_' + key + '.abc')
|
| 144 |
+
|
| 145 |
+
with open(des_filepath, 'r', encoding='utf-8') as f:
|
| 146 |
+
abc_text = f.read()
|
| 147 |
+
|
| 148 |
+
file_bytes = patchilizer.encode_train(abc_text)
|
| 149 |
+
file_masks = [1] * len(file_bytes)
|
| 150 |
+
|
| 151 |
+
file_bytes = torch.tensor(file_bytes, dtype=torch.long)
|
| 152 |
+
file_masks = torch.tensor(file_masks, dtype=torch.long)
|
| 153 |
+
|
| 154 |
+
return file_bytes, file_masks
|
| 155 |
+
|
| 156 |
+
def process_one_batch(batch):
|
| 157 |
+
input_patches, input_masks = batch
|
| 158 |
+
loss = model(input_patches, input_masks).loss
|
| 159 |
+
|
| 160 |
+
# Reduce the loss on GPU 0
|
| 161 |
+
if world_size > 1:
|
| 162 |
+
loss = loss.unsqueeze(0)
|
| 163 |
+
dist.reduce(loss, dst=0)
|
| 164 |
+
loss = loss / world_size
|
| 165 |
+
dist.broadcast(loss, src=0)
|
| 166 |
+
|
| 167 |
+
return loss
|
| 168 |
+
|
| 169 |
+
|
| 170 |
+
# do one epoch for training
|
| 171 |
+
def train_epoch(epoch):
|
| 172 |
+
tqdm_train_set = tqdm(train_set)
|
| 173 |
+
total_train_loss = 0
|
| 174 |
+
iter_idx = 1
|
| 175 |
+
model.train()
|
| 176 |
+
train_steps = (epoch-1)*len(train_set)
|
| 177 |
+
|
| 178 |
+
for batch in tqdm_train_set:
|
| 179 |
+
minibatches = split_into_minibatches(batch[0], batch[1], BATCH_SIZE//ACCUMULATION_STEPS)
|
| 180 |
+
for minibatch in minibatches:
|
| 181 |
+
with autocast():
|
| 182 |
+
loss = process_one_batch(minibatch) / ACCUMULATION_STEPS
|
| 183 |
+
scaler.scale(loss).backward()
|
| 184 |
+
total_train_loss += loss.item()
|
| 185 |
+
scaler.step(optimizer)
|
| 186 |
+
scaler.update()
|
| 187 |
+
|
| 188 |
+
lr_scheduler.step()
|
| 189 |
+
model.zero_grad(set_to_none=True)
|
| 190 |
+
tqdm_train_set.set_postfix({str(global_rank)+'_train_loss': total_train_loss / iter_idx})
|
| 191 |
+
train_steps += 1
|
| 192 |
+
|
| 193 |
+
# Log the training loss to wandb
|
| 194 |
+
if global_rank==0 and WANDB_LOGGING:
|
| 195 |
+
wandb.log({"train_loss": total_train_loss / iter_idx}, step=train_steps)
|
| 196 |
+
|
| 197 |
+
iter_idx += 1
|
| 198 |
+
if iter_idx % 1000 == 0:
|
| 199 |
+
clear_unused_tensors()
|
| 200 |
+
|
| 201 |
+
return total_train_loss / (iter_idx-1)
|
| 202 |
+
|
| 203 |
+
# do one epoch for eval
|
| 204 |
+
def eval_epoch():
|
| 205 |
+
tqdm_eval_set = tqdm(eval_set)
|
| 206 |
+
total_eval_loss = 0
|
| 207 |
+
total_eval_bpb = 0
|
| 208 |
+
iter_idx = 1
|
| 209 |
+
model.eval()
|
| 210 |
+
|
| 211 |
+
# Evaluate data for one epoch
|
| 212 |
+
for batch in tqdm_eval_set:
|
| 213 |
+
minibatches = split_into_minibatches(batch[0], batch[1], BATCH_SIZE//ACCUMULATION_STEPS)
|
| 214 |
+
for minibatch in minibatches:
|
| 215 |
+
with torch.no_grad():
|
| 216 |
+
loss = process_one_batch(minibatch) / ACCUMULATION_STEPS
|
| 217 |
+
total_eval_loss += loss.item()
|
| 218 |
+
tqdm_eval_set.set_postfix({str(global_rank)+'_eval_loss': total_eval_loss / iter_idx})
|
| 219 |
+
iter_idx += 1
|
| 220 |
+
return total_eval_loss / (iter_idx-1)
|
| 221 |
+
|
| 222 |
+
# train and eval
|
| 223 |
+
if __name__ == "__main__":
|
| 224 |
+
|
| 225 |
+
# Initialize wandb
|
| 226 |
+
if WANDB_LOGGING and global_rank==0:
|
| 227 |
+
wandb.login(key=WANDB_KEY)
|
| 228 |
+
wandb.init(project="notagen",
|
| 229 |
+
name=WANDB_NAME)
|
| 230 |
+
|
| 231 |
+
# load data
|
| 232 |
+
with open(DATA_TRAIN_INDEX_PATH, "r", encoding="utf-8") as f:
|
| 233 |
+
print("Loading Data...")
|
| 234 |
+
train_files = []
|
| 235 |
+
for line in f:
|
| 236 |
+
train_files.append(json.loads(line))
|
| 237 |
+
|
| 238 |
+
with open(DATA_EVAL_INDEX_PATH, "r", encoding="utf-8") as f:
|
| 239 |
+
print("Loading Data...")
|
| 240 |
+
eval_files = []
|
| 241 |
+
for line in f:
|
| 242 |
+
eval_files.append(json.loads(line))
|
| 243 |
+
|
| 244 |
+
train_batch_nums = int(len(train_files) / batch_size)
|
| 245 |
+
eval_batch_nums = int(len(eval_files) / batch_size)
|
| 246 |
+
|
| 247 |
+
|
| 248 |
+
random.shuffle(train_files)
|
| 249 |
+
random.shuffle(eval_files)
|
| 250 |
+
|
| 251 |
+
train_files = train_files[:train_batch_nums*batch_size]
|
| 252 |
+
eval_files = eval_files[:eval_batch_nums*batch_size]
|
| 253 |
+
|
| 254 |
+
train_set = NotaGenDataset(train_files)
|
| 255 |
+
eval_set = NotaGenDataset(eval_files)
|
| 256 |
+
|
| 257 |
+
train_sampler = DistributedSampler(train_set, num_replicas=world_size, rank=local_rank)
|
| 258 |
+
eval_sampler = DistributedSampler(eval_set, num_replicas=world_size, rank=local_rank)
|
| 259 |
+
|
| 260 |
+
train_set = DataLoader(train_set, batch_size=batch_size, collate_fn=collate_batch, sampler=train_sampler, shuffle = (train_sampler is None))
|
| 261 |
+
eval_set = DataLoader(eval_set, batch_size=batch_size, collate_fn=collate_batch, sampler=eval_sampler, shuffle = (train_sampler is None))
|
| 262 |
+
|
| 263 |
+
lr_scheduler = get_constant_schedule_with_warmup(optimizer=optimizer, num_warmup_steps=1000)
|
| 264 |
+
|
| 265 |
+
model = model.to(device)
|
| 266 |
+
optimizer = torch.optim.AdamW(model.parameters(), lr=LEARNING_RATE)
|
| 267 |
+
|
| 268 |
+
if LOAD_FROM_CHECKPOINT and os.path.exists(WEIGHTS_PATH):
|
| 269 |
+
# Load checkpoint to CPU
|
| 270 |
+
checkpoint = torch.load(WEIGHTS_PATH, map_location='cpu')
|
| 271 |
+
|
| 272 |
+
# Here, model is assumed to be on GPU
|
| 273 |
+
# Load state dict to CPU model first, then move the model to GPU
|
| 274 |
+
if torch.cuda.device_count() > 1:
|
| 275 |
+
# If you have a DataParallel model, you need to load to model.module instead
|
| 276 |
+
cpu_model = deepcopy(model.module)
|
| 277 |
+
cpu_model.load_state_dict(checkpoint['model'])
|
| 278 |
+
model.module.load_state_dict(cpu_model.state_dict())
|
| 279 |
+
else:
|
| 280 |
+
# Load to a CPU clone of the model, then load back
|
| 281 |
+
cpu_model = deepcopy(model)
|
| 282 |
+
cpu_model.load_state_dict(checkpoint['model'])
|
| 283 |
+
model.load_state_dict(cpu_model.state_dict())
|
| 284 |
+
optimizer.load_state_dict(checkpoint['optimizer'])
|
| 285 |
+
lr_scheduler.load_state_dict(checkpoint['lr_sched'])
|
| 286 |
+
pre_epoch = checkpoint['epoch']
|
| 287 |
+
best_epoch = checkpoint['best_epoch']
|
| 288 |
+
min_eval_loss = checkpoint['min_eval_loss']
|
| 289 |
+
print("Successfully Loaded Checkpoint from Epoch %d" % pre_epoch)
|
| 290 |
+
checkpoint = None
|
| 291 |
+
|
| 292 |
+
else:
|
| 293 |
+
pre_epoch = 0
|
| 294 |
+
best_epoch = 0
|
| 295 |
+
min_eval_loss = 100
|
| 296 |
+
|
| 297 |
+
for epoch in range(1+pre_epoch, NUM_EPOCHS+1):
|
| 298 |
+
train_sampler.set_epoch(epoch)
|
| 299 |
+
eval_sampler.set_epoch(epoch)
|
| 300 |
+
print('-' * 21 + "Epoch " + str(epoch) + '-' * 21)
|
| 301 |
+
train_loss = train_epoch(epoch)
|
| 302 |
+
eval_loss = eval_epoch()
|
| 303 |
+
if global_rank==0:
|
| 304 |
+
with open(LOGS_PATH,'a') as f:
|
| 305 |
+
f.write("Epoch " + str(epoch) + "\ntrain_loss: " + str(train_loss) + "\neval_loss: " +str(eval_loss) + "\ntime: " + time.asctime(time.localtime(time.time())) + "\n\n")
|
| 306 |
+
if eval_loss < min_eval_loss:
|
| 307 |
+
best_epoch = epoch
|
| 308 |
+
min_eval_loss = eval_loss
|
| 309 |
+
checkpoint = {
|
| 310 |
+
'model': model.module.state_dict() if hasattr(model, "module") else model.state_dict(),
|
| 311 |
+
'optimizer': optimizer.state_dict(),
|
| 312 |
+
'lr_sched': lr_scheduler.state_dict(),
|
| 313 |
+
'epoch': epoch,
|
| 314 |
+
'best_epoch': best_epoch,
|
| 315 |
+
'min_eval_loss': min_eval_loss
|
| 316 |
+
}
|
| 317 |
+
torch.save(checkpoint, WEIGHTS_PATH)
|
| 318 |
+
|
| 319 |
+
if world_size > 1:
|
| 320 |
+
dist.barrier()
|
| 321 |
+
|
| 322 |
+
if global_rank==0:
|
| 323 |
+
print("Best Eval Epoch : "+str(best_epoch))
|
| 324 |
+
print("Min Eval Loss : "+str(min_eval_loss))
|
| 325 |
+
|
train-gen.py
ADDED
|
@@ -0,0 +1,374 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import gc
|
| 3 |
+
import time
|
| 4 |
+
import math
|
| 5 |
+
import json
|
| 6 |
+
import wandb
|
| 7 |
+
import torch
|
| 8 |
+
import random
|
| 9 |
+
import numpy as np
|
| 10 |
+
from abctoolkit.transpose import Key2index, Key2Mode
|
| 11 |
+
from utils import *
|
| 12 |
+
from config import *
|
| 13 |
+
from tqdm import tqdm
|
| 14 |
+
from copy import deepcopy
|
| 15 |
+
from torch.cuda.amp import autocast, GradScaler
|
| 16 |
+
from torch.utils.data import Dataset, DataLoader
|
| 17 |
+
from transformers import GPT2Config, LlamaConfig, get_scheduler, get_constant_schedule_with_warmup
|
| 18 |
+
import torch.distributed as dist
|
| 19 |
+
from torch.nn.parallel import DistributedDataParallel as DDP
|
| 20 |
+
from torch.utils.data.distributed import DistributedSampler
|
| 21 |
+
|
| 22 |
+
Index2Key = {index: key for key, index in Key2index.items() if index not in [1, 11]}
|
| 23 |
+
Mode2Key = {mode: key for key, mode_list in Key2Mode.items() for mode in mode_list }
|
| 24 |
+
|
| 25 |
+
# Set up distributed training
|
| 26 |
+
world_size = int(os.environ['WORLD_SIZE']) if 'WORLD_SIZE' in os.environ else 1
|
| 27 |
+
global_rank = int(os.environ['RANK']) if 'RANK' in os.environ else 0
|
| 28 |
+
local_rank = int(os.environ['LOCAL_RANK']) if 'LOCAL_RANK' in os.environ else 0
|
| 29 |
+
|
| 30 |
+
if world_size > 1:
|
| 31 |
+
torch.cuda.set_device(local_rank)
|
| 32 |
+
device = torch.device("cuda", local_rank)
|
| 33 |
+
dist.init_process_group(backend='nccl') if world_size > 1 else None
|
| 34 |
+
else:
|
| 35 |
+
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
|
| 36 |
+
|
| 37 |
+
# Set random seed
|
| 38 |
+
seed = 0 + global_rank
|
| 39 |
+
random.seed(seed)
|
| 40 |
+
np.random.seed(seed)
|
| 41 |
+
torch.manual_seed(seed)
|
| 42 |
+
torch.cuda.manual_seed_all(seed)
|
| 43 |
+
torch.backends.cudnn.deterministic = True
|
| 44 |
+
torch.backends.cudnn.benchmark = False
|
| 45 |
+
|
| 46 |
+
batch_size = BATCH_SIZE
|
| 47 |
+
|
| 48 |
+
patchilizer = Patchilizer()
|
| 49 |
+
|
| 50 |
+
patch_config = GPT2Config(num_hidden_layers=PATCH_NUM_LAYERS,
|
| 51 |
+
max_length=PATCH_LENGTH,
|
| 52 |
+
max_position_embeddings=PATCH_LENGTH,
|
| 53 |
+
n_embd=HIDDEN_SIZE,
|
| 54 |
+
num_attention_heads=HIDDEN_SIZE//64,
|
| 55 |
+
vocab_size=1)
|
| 56 |
+
char_config = GPT2Config(num_hidden_layers=CHAR_NUM_LAYERS,
|
| 57 |
+
max_length=PATCH_SIZE+1,
|
| 58 |
+
max_position_embeddings=PATCH_SIZE+1,
|
| 59 |
+
hidden_size=HIDDEN_SIZE,
|
| 60 |
+
num_attention_heads=HIDDEN_SIZE//64,
|
| 61 |
+
vocab_size=128)
|
| 62 |
+
|
| 63 |
+
model = NotaGenLMHeadModel(encoder_config=patch_config, decoder_config=char_config)
|
| 64 |
+
|
| 65 |
+
model = model.to(device)
|
| 66 |
+
|
| 67 |
+
# print parameter number
|
| 68 |
+
print("Parameter Number: "+str(sum(p.numel() for p in model.parameters() if p.requires_grad)))
|
| 69 |
+
|
| 70 |
+
if world_size > 1:
|
| 71 |
+
model = DDP(model, device_ids=[local_rank], output_device=local_rank, find_unused_parameters=True)
|
| 72 |
+
|
| 73 |
+
scaler = GradScaler()
|
| 74 |
+
is_autocast = True
|
| 75 |
+
optimizer = torch.optim.AdamW(model.parameters(), lr=LEARNING_RATE)
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
def clear_unused_tensors():
|
| 79 |
+
gc.disable() # Temporarily disable garbage collection
|
| 80 |
+
try:
|
| 81 |
+
# Get the set of tensor ids used by the model
|
| 82 |
+
if hasattr(model, "module"):
|
| 83 |
+
model_tensors = {id(p) for p in model.module.parameters()}
|
| 84 |
+
else:
|
| 85 |
+
model_tensors = {id(p) for p in model.parameters()}
|
| 86 |
+
|
| 87 |
+
# Get the set of tensor ids used by the optimizer
|
| 88 |
+
optimizer_tensors = {
|
| 89 |
+
id(state)
|
| 90 |
+
for state_dict in optimizer.state.values()
|
| 91 |
+
for state in state_dict.values()
|
| 92 |
+
if isinstance(state, torch.Tensor) # Ensure only tensors are considered
|
| 93 |
+
}
|
| 94 |
+
|
| 95 |
+
# List of all CUDA tensors currently in memory
|
| 96 |
+
tensors = [obj for obj in gc.get_objects() if isinstance(obj, torch.Tensor) and obj.is_cuda]
|
| 97 |
+
|
| 98 |
+
# Create weak references to avoid interfering with garbage collection
|
| 99 |
+
tensor_refs = [weakref.ref(tensor) for tensor in tensors]
|
| 100 |
+
|
| 101 |
+
for tensor_ref in tensor_refs:
|
| 102 |
+
tensor = tensor_ref() # Dereference the weak reference
|
| 103 |
+
if tensor is not None and id(tensor) not in model_tensors and id(tensor) not in optimizer_tensors:
|
| 104 |
+
# Mark the tensor for deletion
|
| 105 |
+
tensor.detach_() # Detach from computation graph
|
| 106 |
+
del tensor # Delete the tensor reference
|
| 107 |
+
except:
|
| 108 |
+
pass
|
| 109 |
+
|
| 110 |
+
finally:
|
| 111 |
+
gc.enable() # Re-enable garbage collection
|
| 112 |
+
gc.collect() # Force a garbage collection
|
| 113 |
+
torch.cuda.empty_cache() # Clear the CUDA cache
|
| 114 |
+
|
| 115 |
+
def collate_batch(input_batches):
|
| 116 |
+
|
| 117 |
+
input_patches, input_masks = zip(*input_batches)
|
| 118 |
+
input_patches = torch.nn.utils.rnn.pad_sequence(input_patches, batch_first=True, padding_value=0)
|
| 119 |
+
input_masks = torch.nn.utils.rnn.pad_sequence(input_masks, batch_first=True, padding_value=0)
|
| 120 |
+
|
| 121 |
+
return input_patches.to(device), input_masks.to(device)
|
| 122 |
+
|
| 123 |
+
def split_into_minibatches(input_patches, input_masks, minibatch_size):
|
| 124 |
+
minibatches = []
|
| 125 |
+
for start_idx in range(0, len(input_patches), minibatch_size):
|
| 126 |
+
end_idx = start_idx + minibatch_size
|
| 127 |
+
minibatch_patches = input_patches[start_idx:end_idx]
|
| 128 |
+
minibatch_masks = input_masks[start_idx:end_idx]
|
| 129 |
+
minibatches.append((minibatch_patches, minibatch_masks))
|
| 130 |
+
return minibatches
|
| 131 |
+
|
| 132 |
+
class NotaGenDataset(Dataset):
|
| 133 |
+
def __init__(self, filenames):
|
| 134 |
+
self.filenames = filenames
|
| 135 |
+
|
| 136 |
+
def __len__(self):
|
| 137 |
+
return len(self.filenames)
|
| 138 |
+
|
| 139 |
+
def __getitem__(self, idx):
|
| 140 |
+
|
| 141 |
+
filepath = self.filenames[idx]['path']
|
| 142 |
+
ori_key = Mode2Key[self.filenames[idx]['key']]
|
| 143 |
+
|
| 144 |
+
# choose a key to transpose, according to a probility distribution
|
| 145 |
+
ori_key_index = Key2index[ori_key]
|
| 146 |
+
available_index = [(ori_key_index + offset) % 12 for offset in range(-3, 4)]
|
| 147 |
+
index_prob = [1/16, 2/16, 3/16, 4/16, 3/16, 2/16, 1/16]
|
| 148 |
+
index_prob_range = [0] + [sum(index_prob[0 : i + 1]) for i in range(len(index_prob))]
|
| 149 |
+
random_number = random.random()
|
| 150 |
+
for i in range(len(index_prob_range) - 1):
|
| 151 |
+
if index_prob_range[i] <= random_number < index_prob_range[i + 1]:
|
| 152 |
+
des_key_index = available_index[i]
|
| 153 |
+
if des_key_index == 1:
|
| 154 |
+
des_key = 'Db' if random.random() < 0.8 else 'C#'
|
| 155 |
+
elif des_key_index == 11:
|
| 156 |
+
des_key = 'B' if random.random() < 0.8 else 'Cb'
|
| 157 |
+
elif des_key_index == 6:
|
| 158 |
+
des_key = 'F#' if random.random() < 0.5 else 'Gb'
|
| 159 |
+
else:
|
| 160 |
+
des_key = Index2Key[des_key_index]
|
| 161 |
+
|
| 162 |
+
folder = os.path.dirname(filepath)
|
| 163 |
+
name = os.path.split(filepath)[-1]
|
| 164 |
+
des_filepath = os.path.join(folder, des_key, name + '_' + des_key + '.abc')
|
| 165 |
+
|
| 166 |
+
with open(des_filepath, 'r', encoding='utf-8') as f:
|
| 167 |
+
abc_text = f.read()
|
| 168 |
+
|
| 169 |
+
file_bytes = patchilizer.encode_train(abc_text)
|
| 170 |
+
file_masks = [1] * len(file_bytes)
|
| 171 |
+
|
| 172 |
+
file_bytes = torch.tensor(file_bytes, dtype=torch.long)
|
| 173 |
+
file_masks = torch.tensor(file_masks, dtype=torch.long)
|
| 174 |
+
|
| 175 |
+
return file_bytes, file_masks
|
| 176 |
+
|
| 177 |
+
|
| 178 |
+
def process_one_batch(batch):
|
| 179 |
+
input_patches, input_masks = batch
|
| 180 |
+
loss = model(input_patches, input_masks).loss
|
| 181 |
+
|
| 182 |
+
# Reduce the loss on GPU 0
|
| 183 |
+
if world_size > 1:
|
| 184 |
+
loss = loss.unsqueeze(0)
|
| 185 |
+
dist.reduce(loss, dst=0)
|
| 186 |
+
loss = loss / world_size
|
| 187 |
+
dist.broadcast(loss, src=0)
|
| 188 |
+
|
| 189 |
+
return loss
|
| 190 |
+
|
| 191 |
+
|
| 192 |
+
# do one epoch for training
|
| 193 |
+
def train_epoch(epoch):
|
| 194 |
+
tqdm_train_set = tqdm(train_set)
|
| 195 |
+
total_train_loss = 0
|
| 196 |
+
iter_idx = 1
|
| 197 |
+
model.train()
|
| 198 |
+
train_steps = (epoch-1)*len(train_set)
|
| 199 |
+
|
| 200 |
+
for batch in tqdm_train_set:
|
| 201 |
+
minibatches = split_into_minibatches(batch[0], batch[1], BATCH_SIZE//ACCUMULATION_STEPS)
|
| 202 |
+
for minibatch in minibatches:
|
| 203 |
+
with autocast():
|
| 204 |
+
loss = process_one_batch(minibatch) / ACCUMULATION_STEPS
|
| 205 |
+
scaler.scale(loss).backward()
|
| 206 |
+
total_train_loss += loss.item()
|
| 207 |
+
scaler.step(optimizer)
|
| 208 |
+
scaler.update()
|
| 209 |
+
|
| 210 |
+
lr_scheduler.step()
|
| 211 |
+
model.zero_grad(set_to_none=True)
|
| 212 |
+
tqdm_train_set.set_postfix({str(global_rank)+'_train_loss': total_train_loss / iter_idx})
|
| 213 |
+
train_steps += 1
|
| 214 |
+
|
| 215 |
+
# Log the training loss to wandb
|
| 216 |
+
if global_rank==0 and WANDB_LOGGING:
|
| 217 |
+
wandb.log({"train_loss": total_train_loss / iter_idx}, step=train_steps)
|
| 218 |
+
|
| 219 |
+
iter_idx += 1
|
| 220 |
+
if iter_idx % 1000 == 0:
|
| 221 |
+
clear_unused_tensors()
|
| 222 |
+
|
| 223 |
+
return total_train_loss / (iter_idx-1)
|
| 224 |
+
|
| 225 |
+
# do one epoch for eval
|
| 226 |
+
def eval_epoch():
|
| 227 |
+
tqdm_eval_set = tqdm(eval_set)
|
| 228 |
+
total_eval_loss = 0
|
| 229 |
+
total_eval_bpb = 0
|
| 230 |
+
iter_idx = 1
|
| 231 |
+
model.eval()
|
| 232 |
+
|
| 233 |
+
# Evaluate data for one epoch
|
| 234 |
+
for batch in tqdm_eval_set:
|
| 235 |
+
minibatches = split_into_minibatches(batch[0], batch[1], BATCH_SIZE//ACCUMULATION_STEPS)
|
| 236 |
+
for minibatch in minibatches:
|
| 237 |
+
with torch.no_grad():
|
| 238 |
+
loss = process_one_batch(minibatch) / ACCUMULATION_STEPS
|
| 239 |
+
total_eval_loss += loss.item()
|
| 240 |
+
tqdm_eval_set.set_postfix({str(global_rank)+'_eval_loss': total_eval_loss / iter_idx})
|
| 241 |
+
iter_idx += 1
|
| 242 |
+
return total_eval_loss / (iter_idx-1)
|
| 243 |
+
|
| 244 |
+
# train and eval
|
| 245 |
+
if __name__ == "__main__":
|
| 246 |
+
|
| 247 |
+
# Initialize wandb
|
| 248 |
+
if WANDB_LOGGING and global_rank==0:
|
| 249 |
+
wandb.login(key=WANDB_KEY)
|
| 250 |
+
wandb.init(project="notagen",
|
| 251 |
+
name=WANDB_NAME)
|
| 252 |
+
|
| 253 |
+
# load data
|
| 254 |
+
with open(DATA_TRAIN_INDEX_PATH, "r", encoding="utf-8") as f:
|
| 255 |
+
print("Loading Data...")
|
| 256 |
+
train_files = []
|
| 257 |
+
for line in f:
|
| 258 |
+
train_files.append(json.loads(line))
|
| 259 |
+
|
| 260 |
+
with open(DATA_EVAL_INDEX_PATH, "r", encoding="utf-8") as f:
|
| 261 |
+
print("Loading Data...")
|
| 262 |
+
eval_files = []
|
| 263 |
+
for line in f:
|
| 264 |
+
eval_files.append(json.loads(line))
|
| 265 |
+
|
| 266 |
+
if len(eval_files) == 0:
|
| 267 |
+
train_files, eval_files = split_data(train_files)
|
| 268 |
+
|
| 269 |
+
train_batch_nums = int(len(train_files) / batch_size)
|
| 270 |
+
eval_batch_nums = int(len(eval_files) / batch_size)
|
| 271 |
+
|
| 272 |
+
random.shuffle(train_files)
|
| 273 |
+
random.shuffle(eval_files)
|
| 274 |
+
|
| 275 |
+
train_files = train_files[:train_batch_nums*batch_size]
|
| 276 |
+
eval_files = eval_files[:eval_batch_nums*batch_size]
|
| 277 |
+
|
| 278 |
+
train_set = NotaGenDataset(train_files)
|
| 279 |
+
eval_set = NotaGenDataset(eval_files)
|
| 280 |
+
|
| 281 |
+
train_sampler = DistributedSampler(train_set, num_replicas=world_size, rank=local_rank)
|
| 282 |
+
eval_sampler = DistributedSampler(eval_set, num_replicas=world_size, rank=local_rank)
|
| 283 |
+
|
| 284 |
+
train_set = DataLoader(train_set, batch_size=batch_size, collate_fn=collate_batch, sampler=train_sampler, shuffle = (train_sampler is None))
|
| 285 |
+
eval_set = DataLoader(eval_set, batch_size=batch_size, collate_fn=collate_batch, sampler=eval_sampler, shuffle = (train_sampler is None))
|
| 286 |
+
|
| 287 |
+
lr_scheduler = get_constant_schedule_with_warmup(optimizer=optimizer, num_warmup_steps=1000)
|
| 288 |
+
|
| 289 |
+
model = model.to(device)
|
| 290 |
+
optimizer = torch.optim.AdamW(model.parameters(), lr=LEARNING_RATE)
|
| 291 |
+
|
| 292 |
+
if not LOAD_FROM_CHECKPOINT:
|
| 293 |
+
if os.path.exists(PRETRAINED_PATH):
|
| 294 |
+
# Load pre-trained checkpoint to CPU
|
| 295 |
+
checkpoint = torch.load(PRETRAINED_PATH, map_location='cpu')
|
| 296 |
+
|
| 297 |
+
# Here, model is assumed to be on GPU
|
| 298 |
+
# Load state dict to CPU model first, then move the model to GPU
|
| 299 |
+
if torch.cuda.device_count() > 1:
|
| 300 |
+
# If you have a DataParallel model, you need to load to model.module instead
|
| 301 |
+
cpu_model = deepcopy(model.module)
|
| 302 |
+
cpu_model.load_state_dict(checkpoint['model'])
|
| 303 |
+
model.module.load_state_dict(cpu_model.state_dict())
|
| 304 |
+
else:
|
| 305 |
+
# Load to a CPU clone of the model, then load back
|
| 306 |
+
cpu_model = deepcopy(model)
|
| 307 |
+
cpu_model.load_state_dict(checkpoint['model'])
|
| 308 |
+
model.load_state_dict(cpu_model.state_dict())
|
| 309 |
+
|
| 310 |
+
print(f"Successfully Loaded Pretrained Checkpoint at Epoch {checkpoint['epoch']} with Loss {checkpoint['min_eval_loss']}")
|
| 311 |
+
|
| 312 |
+
pre_epoch = 0
|
| 313 |
+
best_epoch = 0
|
| 314 |
+
min_eval_loss = 100
|
| 315 |
+
else:
|
| 316 |
+
raise Exception('Pre-trained Checkpoint not found. Please check your pre-trained ckpt path.')
|
| 317 |
+
|
| 318 |
+
else:
|
| 319 |
+
if os.path.exists(WEIGHTS_PATH):
|
| 320 |
+
# Load checkpoint to CPU
|
| 321 |
+
checkpoint = torch.load(WEIGHTS_PATH, map_location='cpu')
|
| 322 |
+
|
| 323 |
+
# Here, model is assumed to be on GPU
|
| 324 |
+
# Load state dict to CPU model first, then move the model to GPU
|
| 325 |
+
if torch.cuda.device_count() > 1:
|
| 326 |
+
# If you have a DataParallel model, you need to load to model.module instead
|
| 327 |
+
cpu_model = deepcopy(model.module)
|
| 328 |
+
cpu_model.load_state_dict(checkpoint['model'])
|
| 329 |
+
model.module.load_state_dict(cpu_model.state_dict())
|
| 330 |
+
else:
|
| 331 |
+
# Load to a CPU clone of the model, then load back
|
| 332 |
+
cpu_model = deepcopy(model)
|
| 333 |
+
cpu_model.load_state_dict(checkpoint['model'])
|
| 334 |
+
model.load_state_dict(cpu_model.state_dict())
|
| 335 |
+
optimizer.load_state_dict(checkpoint['optimizer'])
|
| 336 |
+
lr_scheduler.load_state_dict(checkpoint['lr_sched'])
|
| 337 |
+
pre_epoch = checkpoint['epoch']
|
| 338 |
+
best_epoch = checkpoint['best_epoch']
|
| 339 |
+
min_eval_loss = checkpoint['min_eval_loss']
|
| 340 |
+
print("Successfully Loaded Checkpoint from Epoch %d" % pre_epoch)
|
| 341 |
+
checkpoint = None
|
| 342 |
+
|
| 343 |
+
else:
|
| 344 |
+
raise Exception('Checkpoint not found to continue training. Please check your parameter settings.')
|
| 345 |
+
|
| 346 |
+
|
| 347 |
+
for epoch in range(1+pre_epoch, NUM_EPOCHS+1):
|
| 348 |
+
train_sampler.set_epoch(epoch)
|
| 349 |
+
eval_sampler.set_epoch(epoch)
|
| 350 |
+
print('-' * 21 + "Epoch " + str(epoch) + '-' * 21)
|
| 351 |
+
train_loss = train_epoch(epoch)
|
| 352 |
+
eval_loss = eval_epoch()
|
| 353 |
+
if global_rank==0:
|
| 354 |
+
with open(LOGS_PATH,'a') as f:
|
| 355 |
+
f.write("Epoch " + str(epoch) + "\ntrain_loss: " + str(train_loss) + "\neval_loss: " +str(eval_loss) + "\ntime: " + time.asctime(time.localtime(time.time())) + "\n\n")
|
| 356 |
+
if eval_loss < min_eval_loss:
|
| 357 |
+
best_epoch = epoch
|
| 358 |
+
min_eval_loss = eval_loss
|
| 359 |
+
checkpoint = {
|
| 360 |
+
'model': model.module.state_dict() if hasattr(model, "module") else model.state_dict(),
|
| 361 |
+
'optimizer': optimizer.state_dict(),
|
| 362 |
+
'lr_sched': lr_scheduler.state_dict(),
|
| 363 |
+
'epoch': epoch,
|
| 364 |
+
'best_epoch': best_epoch,
|
| 365 |
+
'min_eval_loss': min_eval_loss
|
| 366 |
+
}
|
| 367 |
+
torch.save(checkpoint, WEIGHTS_PATH)
|
| 368 |
+
|
| 369 |
+
if world_size > 1:
|
| 370 |
+
dist.barrier()
|
| 371 |
+
|
| 372 |
+
if global_rank==0:
|
| 373 |
+
print("Best Eval Epoch : "+str(best_epoch))
|
| 374 |
+
print("Min Eval Loss : "+str(min_eval_loss))
|
xml2abc.py
ADDED
|
@@ -0,0 +1,1609 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python
|
| 2 |
+
# coding=latin-1
|
| 3 |
+
'''
|
| 4 |
+
Copyright (C) 2012-2018: W.G. Vree
|
| 5 |
+
Contributions: M. Tarenskeen, N. Liberg, Paul Villiger, Janus Meuris, Larry Myerscough,
|
| 6 |
+
Dick Jackson, Jan Wybren de Jong, Mark Zealey.
|
| 7 |
+
|
| 8 |
+
This program is free software; you can redistribute it and/or modify it under the terms of the
|
| 9 |
+
Lesser GNU General Public License as published by the Free Software Foundation;
|
| 10 |
+
|
| 11 |
+
This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
|
| 12 |
+
without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
| 13 |
+
See the Lesser GNU General Public License for more details. <http://www.gnu.org/licenses/lgpl.html>.
|
| 14 |
+
'''
|
| 15 |
+
|
| 16 |
+
'''Small revisions made for NotaGen to improve the succuess rate of conversion.'''
|
| 17 |
+
|
| 18 |
+
try: import xml.etree.cElementTree as E
|
| 19 |
+
except: import xml.etree.ElementTree as E
|
| 20 |
+
import os, sys, types, re, math
|
| 21 |
+
|
| 22 |
+
VERSION = 143
|
| 23 |
+
|
| 24 |
+
python3 = sys.version_info.major > 2
|
| 25 |
+
if python3:
|
| 26 |
+
tupletype = tuple
|
| 27 |
+
listtype = list
|
| 28 |
+
max_int = sys.maxsize
|
| 29 |
+
else:
|
| 30 |
+
tupletype = types.TupleType
|
| 31 |
+
listtype = types.ListType
|
| 32 |
+
max_int = sys.maxint
|
| 33 |
+
|
| 34 |
+
note_ornamentation_map = { # for notations/, modified from EasyABC
|
| 35 |
+
'ornaments/trill-mark': 'T',
|
| 36 |
+
'ornaments/mordent': 'M',
|
| 37 |
+
'ornaments/inverted-mordent': 'P',
|
| 38 |
+
'ornaments/turn': '!turn!',
|
| 39 |
+
'ornaments/inverted-turn': '!invertedturn!',
|
| 40 |
+
'technical/up-bow': 'u',
|
| 41 |
+
'technical/down-bow': 'v',
|
| 42 |
+
'technical/harmonic': '!open!',
|
| 43 |
+
'technical/open-string': '!open!',
|
| 44 |
+
'technical/stopped': '!plus!',
|
| 45 |
+
'technical/snap-pizzicato': '!snap!',
|
| 46 |
+
'technical/thumb-position': '!thumb!',
|
| 47 |
+
'articulations/accent': '!>!',
|
| 48 |
+
'articulations/strong-accent':'!^!',
|
| 49 |
+
'articulations/staccato': '.',
|
| 50 |
+
'articulations/staccatissimo':'!wedge!',
|
| 51 |
+
'articulations/scoop': '!slide!',
|
| 52 |
+
'fermata': '!fermata!',
|
| 53 |
+
'arpeggiate': '!arpeggio!',
|
| 54 |
+
'articulations/tenuto': '!tenuto!',
|
| 55 |
+
'articulations/staccatissimo':'!wedge!', # not sure whether this is the right translation
|
| 56 |
+
'articulations/spiccato': '!wedge!', # not sure whether this is the right translation
|
| 57 |
+
'articulations/breath-mark': '!breath!', # this may need to be tested to make sure it appears on the right side of the note
|
| 58 |
+
'articulations/detached-legato': '!tenuto!.',
|
| 59 |
+
}
|
| 60 |
+
|
| 61 |
+
dynamics_map = { # for direction/direction-type/dynamics/
|
| 62 |
+
'p': '!p!',
|
| 63 |
+
'pp': '!pp!',
|
| 64 |
+
'ppp': '!ppp!',
|
| 65 |
+
'pppp': '!pppp!',
|
| 66 |
+
'f': '!f!',
|
| 67 |
+
'ff': '!ff!',
|
| 68 |
+
'fff': '!fff!',
|
| 69 |
+
'ffff': '!ffff!',
|
| 70 |
+
'mp': '!mp!',
|
| 71 |
+
'mf': '!mf!',
|
| 72 |
+
'sfz': '!sfz!',
|
| 73 |
+
}
|
| 74 |
+
|
| 75 |
+
percSvg = '''%%beginsvg
|
| 76 |
+
<defs>
|
| 77 |
+
<text id="x" x="-3" y="0"></text>
|
| 78 |
+
<text id="x-" x="-3" y="0"></text>
|
| 79 |
+
<text id="x+" x="-3" y="0"></text>
|
| 80 |
+
<text id="normal" x="-3.7" y="0"></text>
|
| 81 |
+
<text id="normal-" x="-3.7" y="0"></text>
|
| 82 |
+
<text id="normal+" x="-3.7" y="0"></text>
|
| 83 |
+
<g id="circle-x"><text x="-3" y="0"></text><circle r="4" class="stroke"></circle></g>
|
| 84 |
+
<g id="circle-x-"><text x="-3" y="0"></text><circle r="4" class="stroke"></circle></g>
|
| 85 |
+
<path id="triangle" d="m-4 -3.2l4 6.4 4 -6.4z" class="stroke" style="stroke-width:1.4"></path>
|
| 86 |
+
<path id="triangle-" d="m-4 -3.2l4 6.4 4 -6.4z" class="stroke" style="stroke-width:1.4"></path>
|
| 87 |
+
<path id="triangle+" d="m-4 -3.2l4 6.4 4 -6.4z" class="stroke" style="fill:#000"></path>
|
| 88 |
+
<path id="square" d="m-3.5 3l0 -6.2 7.2 0 0 6.2z" class="stroke" style="stroke-width:1.4"></path>
|
| 89 |
+
<path id="square-" d="m-3.5 3l0 -6.2 7.2 0 0 6.2z" class="stroke" style="stroke-width:1.4"></path>
|
| 90 |
+
<path id="square+" d="m-3.5 3l0 -6.2 7.2 0 0 6.2z" class="stroke" style="fill:#000"></path>
|
| 91 |
+
<path id="diamond" d="m0 -3l4.2 3.2 -4.2 3.2 -4.2 -3.2z" class="stroke" style="stroke-width:1.4"></path>
|
| 92 |
+
<path id="diamond-" d="m0 -3l4.2 3.2 -4.2 3.2 -4.2 -3.2z" class="stroke" style="stroke-width:1.4"></path>
|
| 93 |
+
<path id="diamond+" d="m0 -3l4.2 3.2 -4.2 3.2 -4.2 -3.2z" class="stroke" style="fill:#000"></path>
|
| 94 |
+
</defs>
|
| 95 |
+
%%endsvg'''
|
| 96 |
+
|
| 97 |
+
tabSvg = '''%%beginsvg
|
| 98 |
+
<style type="text/css">
|
| 99 |
+
.bf {font-family:sans-serif; font-size:7px}
|
| 100 |
+
</style>
|
| 101 |
+
<defs>
|
| 102 |
+
<rect id="clr" x="-3" y="-1" width="6" height="5" fill="white"></rect>
|
| 103 |
+
<rect id="clr2" x="-3" y="-1" width="11" height="5" fill="white"></rect>'''
|
| 104 |
+
|
| 105 |
+
kopSvg = '<g id="kop%s" class="bf"><use xlink:href="#clr"></use><text x="-2" y="3">%s</text></g>\n'
|
| 106 |
+
kopSvg2 = '<g id="kop%s" class="bf"><use xlink:href="#clr2"></use><text x="-2" y="3">%s</text></g>\n'
|
| 107 |
+
|
| 108 |
+
def info (s, warn=1): sys.stderr.write ((warn and '-- ' or '') + s + '\n')
|
| 109 |
+
|
| 110 |
+
#-------------------
|
| 111 |
+
# data abstractions
|
| 112 |
+
#-------------------
|
| 113 |
+
class Measure:
|
| 114 |
+
def __init__ (s, p):
|
| 115 |
+
s.reset ()
|
| 116 |
+
s.ixp = p # part number
|
| 117 |
+
s.ixm = 0 # measure number
|
| 118 |
+
s.mdur = 0 # measure duration (nominal metre value in divisions)
|
| 119 |
+
s.divs = 0 # number of divisions per 1/4
|
| 120 |
+
s.mtr = 4,4 # meter
|
| 121 |
+
|
| 122 |
+
def reset (s): # reset each measure
|
| 123 |
+
s.attr = '' # measure signatures, tempo
|
| 124 |
+
s.lline = '' # left barline, but only holds ':' at start of repeat, otherwise empty
|
| 125 |
+
s.rline = '|' # right barline
|
| 126 |
+
s.lnum = '' # (left) volta number
|
| 127 |
+
|
| 128 |
+
class Note:
|
| 129 |
+
def __init__ (s, dur=0, n=None):
|
| 130 |
+
s.tijd = 0 # the time in XML division units
|
| 131 |
+
s.dur = dur # duration of a note in XML divisions
|
| 132 |
+
s.fact = None # time modification for tuplet notes (num, div)
|
| 133 |
+
s.tup = [''] # start(s) and/or stop(s) of tuplet
|
| 134 |
+
s.tupabc = '' # abc tuplet string to issue before note
|
| 135 |
+
s.beam = 0 # 1 = beamed
|
| 136 |
+
s.grace = 0 # 1 = grace note
|
| 137 |
+
s.before = [] # abc string that goes before the note/chord
|
| 138 |
+
s.after = '' # the same after the note/chord
|
| 139 |
+
s.ns = n and [n] or [] # notes in the chord
|
| 140 |
+
s.lyrs = {} # {number -> syllabe}
|
| 141 |
+
s.tab = None # (string number, fret number)
|
| 142 |
+
s.ntdec = '' # !string!, !courtesy!
|
| 143 |
+
|
| 144 |
+
class Elem:
|
| 145 |
+
def __init__ (s, string):
|
| 146 |
+
s.tijd = 0 # the time in XML division units
|
| 147 |
+
s.str = string # any abc string that is not a note
|
| 148 |
+
|
| 149 |
+
class Counter:
|
| 150 |
+
def inc (s, key, voice): s.counters [key][voice] = s.counters [key].get (voice, 0) + 1
|
| 151 |
+
def clear (s, vnums): # reset all counters
|
| 152 |
+
tups = list( zip (vnums.keys (), len (vnums) * [0]))
|
| 153 |
+
s.counters = {'note': dict (tups), 'nopr': dict (tups), 'nopt': dict (tups)}
|
| 154 |
+
def getv (s, key, voice): return s.counters[key][voice]
|
| 155 |
+
def prcnt (s, ip): # print summary of all non zero counters
|
| 156 |
+
for iv in s.counters ['note']:
|
| 157 |
+
if s.getv ('nopr', iv) != 0:
|
| 158 |
+
info ( 'part %d, voice %d has %d skipped non printable notes' % (ip, iv, s.getv ('nopr', iv)))
|
| 159 |
+
if s.getv ('nopt', iv) != 0:
|
| 160 |
+
info ( 'part %d, voice %d has %d notes without pitch' % (ip, iv, s.getv ('nopt', iv)))
|
| 161 |
+
if s.getv ('note', iv) == 0: # no real notes counted in this voice
|
| 162 |
+
info ( 'part %d, skipped empty voice %d' % (ip, iv))
|
| 163 |
+
|
| 164 |
+
class Music:
|
| 165 |
+
def __init__(s, options):
|
| 166 |
+
s.tijd = 0 # the current time
|
| 167 |
+
s.maxtime = 0 # maximum time in a measure
|
| 168 |
+
s.gMaten = [] # [voices,.. for all measures in a part]
|
| 169 |
+
s.gLyrics = [] # [{num: (abc_lyric_string, melis)},.. for all measures in a part]
|
| 170 |
+
s.vnums = {} # all used voice id's in a part (xml voice id's == numbers)
|
| 171 |
+
s.cnt = Counter () # global counter object
|
| 172 |
+
s.vceCnt = 1 # the global voice count over all parts
|
| 173 |
+
s.lastnote = None # the last real note record inserted in s.voices
|
| 174 |
+
s.bpl = options.b # the max number of bars per line when writing abc
|
| 175 |
+
s.cpl = options.n # the number of chars per line when writing abc
|
| 176 |
+
s.repbra = 0 # true if volta is used somewhere
|
| 177 |
+
s.nvlt = options.v # no volta on higher voice numbers
|
| 178 |
+
s.jscript = options.j # compatibility with javascript version
|
| 179 |
+
|
| 180 |
+
def initVoices (s, newPart=0):
|
| 181 |
+
s.vtimes, s.voices, s.lyrics = {}, {}, {}
|
| 182 |
+
for v in s.vnums:
|
| 183 |
+
s.vtimes [v] = 0 # {voice: the end time of the last item in each voice}
|
| 184 |
+
s.voices [v] = [] # {voice: [Note|Elem, ..]}
|
| 185 |
+
s.lyrics [v] = [] # {voice: [{num: syl}, ..]}
|
| 186 |
+
if newPart: s.cnt.clear (s.vnums) # clear counters once per part
|
| 187 |
+
|
| 188 |
+
def incTime (s, dt):
|
| 189 |
+
s.tijd += dt
|
| 190 |
+
if s.tijd < 0: s.tijd = 0 # erroneous <backup> element
|
| 191 |
+
if s.tijd > s.maxtime: s.maxtime = s.tijd
|
| 192 |
+
|
| 193 |
+
def appendElemCv (s, voices, elem):
|
| 194 |
+
for v in voices:
|
| 195 |
+
s.appendElem (v, elem) # insert element in all voices
|
| 196 |
+
|
| 197 |
+
def insertElem (s, v, elem): # insert at the start of voice v in the current measure
|
| 198 |
+
obj = Elem (elem)
|
| 199 |
+
obj.tijd = 0 # because voice is sorted later
|
| 200 |
+
s.voices [v].insert (0, obj)
|
| 201 |
+
|
| 202 |
+
def appendObj (s, v, obj, dur):
|
| 203 |
+
obj.tijd = s.tijd
|
| 204 |
+
s.voices [v].append (obj)
|
| 205 |
+
s.incTime (dur)
|
| 206 |
+
if s.tijd > s.vtimes[v]: s.vtimes[v] = s.tijd # don't update for inserted earlier items
|
| 207 |
+
|
| 208 |
+
def appendElem (s, v, elem, tel=0):
|
| 209 |
+
s.appendObj (v, Elem (elem), 0)
|
| 210 |
+
if tel: s.cnt.inc ('note', v) # count number of certain elements in each voice (in addition to notes)
|
| 211 |
+
|
| 212 |
+
def appendElemT (s, v, elem, tijd): # insert element at specified time
|
| 213 |
+
obj = Elem (elem)
|
| 214 |
+
obj.tijd = tijd
|
| 215 |
+
s.voices [v].append (obj)
|
| 216 |
+
|
| 217 |
+
def appendNote (s, v, note, noot):
|
| 218 |
+
note.ns.append (note.ntdec + noot)
|
| 219 |
+
s.appendObj (v, note, int (note.dur))
|
| 220 |
+
s.lastnote = note # remember last note/rest for later modifications (chord, grace)
|
| 221 |
+
if noot != 'z' and noot != 'x': # real notes and grace notes
|
| 222 |
+
s.cnt.inc ('note', v) # count number of real notes in each voice
|
| 223 |
+
if not note.grace: # for every real note
|
| 224 |
+
s.lyrics[v].append (note.lyrs) # even when it has no lyrics
|
| 225 |
+
|
| 226 |
+
def getLastRec (s, voice):
|
| 227 |
+
if s.gMaten: return s.gMaten[-1][voice][-1] # the last record in the last measure
|
| 228 |
+
return None # no previous records in the first measure
|
| 229 |
+
|
| 230 |
+
def getLastMelis (s, voice, num): # get melisma of last measure
|
| 231 |
+
if s.gLyrics:
|
| 232 |
+
lyrdict = s.gLyrics[-1][voice] # the previous lyrics dict in this voice
|
| 233 |
+
if num in lyrdict: return lyrdict[num][1] # lyrdict = num -> (lyric string, melisma)
|
| 234 |
+
return 0 # no previous lyrics in voice or line number
|
| 235 |
+
|
| 236 |
+
def addChord (s, note, noot): # careful: we assume that chord notes follow immediately
|
| 237 |
+
for d in note.before: # put all decorations before chord
|
| 238 |
+
if d not in s.lastnote.before:
|
| 239 |
+
s.lastnote.before += [d]
|
| 240 |
+
s.lastnote.ns.append (note.ntdec + noot)
|
| 241 |
+
|
| 242 |
+
def addBar (s, lbrk, m): # linebreak, measure data
|
| 243 |
+
if m.mdur and s.maxtime > m.mdur: info ('measure %d in part %d longer than metre' % (m.ixm+1, m.ixp+1))
|
| 244 |
+
s.tijd = s.maxtime # the time of the bar lines inserted here
|
| 245 |
+
for v in s.vnums:
|
| 246 |
+
if m.lline or m.lnum: # if left barline or left volta number
|
| 247 |
+
p = s.getLastRec (v) # get the previous barline record
|
| 248 |
+
if p: # in measure 1 no previous measure is available
|
| 249 |
+
x = p.str # p.str is the ABC barline string
|
| 250 |
+
if m.lline: # append begin of repeat, m.lline == ':'
|
| 251 |
+
x = (x + m.lline).replace (':|:','::').replace ('||','|')
|
| 252 |
+
if s.nvlt == 3: # add volta number only to lowest voice in part 0
|
| 253 |
+
if m.ixp + v == min (s.vnums): x += m.lnum
|
| 254 |
+
elif m.lnum: # new behaviour with I:repbra 0
|
| 255 |
+
x += m.lnum # add volta number(s) or text to all voices
|
| 256 |
+
s.repbra = 1 # signal occurrence of a volta
|
| 257 |
+
p.str = x # modify previous right barline
|
| 258 |
+
elif m.lline: # begin of new part and left repeat bar is required
|
| 259 |
+
s.insertElem (v, '|:')
|
| 260 |
+
if lbrk:
|
| 261 |
+
p = s.getLastRec (v) # get the previous barline record
|
| 262 |
+
if p: p.str += lbrk # insert linebreak char after the barlines+volta
|
| 263 |
+
if m.attr: # insert signatures at front of buffer
|
| 264 |
+
s.insertElem (v, '%s' % m.attr)
|
| 265 |
+
s.appendElem (v, ' %s' % m.rline) # insert current barline record at time maxtime
|
| 266 |
+
s.voices[v] = sortMeasure (s.voices[v], m) # make all times consistent
|
| 267 |
+
lyrs = s.lyrics[v] # [{number: sylabe}, .. for all notes]
|
| 268 |
+
lyrdict = {} # {number: (abc_lyric_string, melis)} for this voice
|
| 269 |
+
nums = [num for d in lyrs for num in d.keys ()] # the lyrics numbers in this measure
|
| 270 |
+
maxNums = max (nums + [0]) # the highest lyrics number in this measure
|
| 271 |
+
for i in range (maxNums, 0, -1):
|
| 272 |
+
xs = [syldict.get (i, '') for syldict in lyrs] # collect the syllabi with number i
|
| 273 |
+
melis = s.getLastMelis (v, i) # get melisma from last measure
|
| 274 |
+
lyrdict [i] = abcLyr (xs, melis)
|
| 275 |
+
s.lyrics[v] = lyrdict # {number: (abc_lyric_string, melis)} for this measure
|
| 276 |
+
mkBroken (s.voices[v])
|
| 277 |
+
s.gMaten.append (s.voices)
|
| 278 |
+
s.gLyrics.append (s.lyrics)
|
| 279 |
+
s.tijd = s.maxtime = 0
|
| 280 |
+
s.initVoices ()
|
| 281 |
+
|
| 282 |
+
def outVoices (s, divs, ip, isSib): # output all voices of part ip
|
| 283 |
+
vvmap = {} # xml voice number -> abc voice number (one part)
|
| 284 |
+
vnum_keys = list (s.vnums.keys ())
|
| 285 |
+
if s.jscript or isSib: vnum_keys.sort ()
|
| 286 |
+
lvc = min (vnum_keys or [1]) # lowest xml voice number of this part
|
| 287 |
+
for iv in vnum_keys:
|
| 288 |
+
if s.cnt.getv ('note', iv) == 0: # no real notes counted in this voice
|
| 289 |
+
continue # skip empty voices
|
| 290 |
+
if abcOut.denL: unitL = abcOut.denL # take the unit length from the -d option
|
| 291 |
+
else: unitL = compUnitLength (iv, s.gMaten, divs) # compute the best unit length for this voice
|
| 292 |
+
abcOut.cmpL.append (unitL) # remember for header output
|
| 293 |
+
vn, vl = [], {} # for voice iv: collect all notes to vn and all lyric lines to vl
|
| 294 |
+
for im in range (len (s.gMaten)):
|
| 295 |
+
measure = s.gMaten [im][iv]
|
| 296 |
+
vn.append (outVoice (measure, divs [im], im, ip, unitL))
|
| 297 |
+
checkMelismas (s.gLyrics, s.gMaten, im, iv)
|
| 298 |
+
for n, (lyrstr, melis) in s.gLyrics [im][iv].items ():
|
| 299 |
+
if n in vl:
|
| 300 |
+
while len (vl[n]) < im: vl[n].append ('') # fill in skipped measures
|
| 301 |
+
vl[n].append (lyrstr)
|
| 302 |
+
else:
|
| 303 |
+
vl[n] = im * [''] + [lyrstr] # must skip im measures
|
| 304 |
+
for n, lyrs in vl.items (): # fill up possibly empty lyric measures at the end
|
| 305 |
+
mis = len (vn) - len (lyrs)
|
| 306 |
+
lyrs += mis * ['']
|
| 307 |
+
abcOut.add ('V:%d' % s.vceCnt)
|
| 308 |
+
if s.repbra:
|
| 309 |
+
if s.nvlt == 1 and s.vceCnt > 1: abcOut.add ('I:repbra 0') # only volta on first voice
|
| 310 |
+
if s.nvlt == 2 and iv > lvc: abcOut.add ('I:repbra 0') # only volta on first voice of each part
|
| 311 |
+
if s.cpl > 0: s.bpl = 0 # option -n (max chars per line) overrules -b (max bars per line)
|
| 312 |
+
elif s.bpl == 0: s.cpl = 100 # the default: 100 chars per line
|
| 313 |
+
bn = 0 # count bars
|
| 314 |
+
while vn: # while still measures available
|
| 315 |
+
ib = 1
|
| 316 |
+
chunk = vn [0]
|
| 317 |
+
while ib < len (vn):
|
| 318 |
+
if s.cpl > 0 and len (chunk) + len (vn [ib]) >= s.cpl: break # line full (number of chars)
|
| 319 |
+
if s.bpl > 0 and ib >= s.bpl: break # line full (number of bars)
|
| 320 |
+
chunk += vn [ib]
|
| 321 |
+
ib += 1
|
| 322 |
+
bn += ib
|
| 323 |
+
abcOut.add (chunk + ' %%%d' % bn) # line with barnumer
|
| 324 |
+
del vn[:ib] # chop ib bars
|
| 325 |
+
lyrlines = sorted (vl.items ()) # order the numbered lyric lines for output
|
| 326 |
+
for n, lyrs in lyrlines:
|
| 327 |
+
abcOut.add ('w: ' + '|'.join (lyrs[:ib]) + '|')
|
| 328 |
+
del lyrs[:ib]
|
| 329 |
+
vvmap [iv] = s.vceCnt # xml voice number -> abc voice number
|
| 330 |
+
s.vceCnt += 1 # count voices over all parts
|
| 331 |
+
s.gMaten = [] # reset the follwing instance vars for each part
|
| 332 |
+
s.gLyrics = []
|
| 333 |
+
s.cnt.prcnt (ip+1) # print summary of skipped items in this part
|
| 334 |
+
return vvmap
|
| 335 |
+
|
| 336 |
+
class ABCoutput:
|
| 337 |
+
pagekeys = 'scale,pageheight,pagewidth,leftmargin,rightmargin,topmargin,botmargin'.split (',')
|
| 338 |
+
def __init__ (s, fnmext, pad, X, options):
|
| 339 |
+
s.fnmext = fnmext
|
| 340 |
+
s.outlist = [] # list of ABC strings
|
| 341 |
+
s.title = 'T:Title'
|
| 342 |
+
s.key = 'none'
|
| 343 |
+
s.clefs = {} # clefs for all abc-voices
|
| 344 |
+
s.mtr = 'none'
|
| 345 |
+
s.tempo = 0 # 0 -> no tempo field
|
| 346 |
+
s.tempo_units = (1,4) # note type of tempo direction
|
| 347 |
+
s.pad = pad # the output path or none
|
| 348 |
+
s.X = X + 1 # the abc tune number
|
| 349 |
+
s.denL = options.d # denominator of the unit length (L:) from -d option
|
| 350 |
+
s.volpan = int (options.m) # 0 -> no %%MIDI, 1 -> only program, 2 -> all %%MIDI
|
| 351 |
+
s.cmpL = [] # computed optimal unit length for all voices
|
| 352 |
+
s.jscript = options.j # compatibility with javascript version
|
| 353 |
+
s.tstep = options.t # translate percmap to voicemap
|
| 354 |
+
s.stemless = 0 # use U:s=!stemless!
|
| 355 |
+
s.shiftStem = options.s # shift note heads 3 units left
|
| 356 |
+
if pad:
|
| 357 |
+
_, base_name = os.path.split (fnmext)
|
| 358 |
+
s.outfile = open (os.path.join (pad, base_name), 'w', encoding='utf-8')
|
| 359 |
+
else: s.outfile = sys.stdout
|
| 360 |
+
if s.jscript: s.X = 1 # always X:1 in javascript version
|
| 361 |
+
s.pageFmt = {}
|
| 362 |
+
for k in s.pagekeys: s.pageFmt [k] = None
|
| 363 |
+
if len (options.p) == 7:
|
| 364 |
+
for k, v in zip (s.pagekeys, options.p):
|
| 365 |
+
try: s.pageFmt [k] = float (v)
|
| 366 |
+
except: info ('illegal float %s for %s', (k, v)); continue
|
| 367 |
+
|
| 368 |
+
def add (s, str):
|
| 369 |
+
s.outlist.append (str + '\n') # collect all ABC output
|
| 370 |
+
|
| 371 |
+
def mkHeader (s, stfmap, partlist, midimap, vmpdct, koppen): # stfmap = [parts], part = [staves], stave = [voices]
|
| 372 |
+
accVce, accStf, staffs = [], [], stfmap[:] # staffs is consumed
|
| 373 |
+
for x in partlist: # collect partnames into accVce and staff groups into accStf
|
| 374 |
+
try: prgroupelem (x, ('', ''), '', stfmap, accVce, accStf)
|
| 375 |
+
except: info ('lousy musicxml: error in part-list')
|
| 376 |
+
staves = ' '.join (accStf)
|
| 377 |
+
clfnms = {}
|
| 378 |
+
for part, (partname, partabbrv) in zip (staffs, accVce):
|
| 379 |
+
if not part: continue # skip empty part
|
| 380 |
+
firstVoice = part[0][0] # the first voice number in this part
|
| 381 |
+
nm = partname.replace ('\n','\\n').replace ('.:','.').strip (':')
|
| 382 |
+
snm = partabbrv.replace ('\n','\\n').replace ('.:','.').strip (':')
|
| 383 |
+
clfnms [firstVoice] = (nm and 'nm="%s"' % nm or '') + (snm and ' snm="%s"' % snm or '')
|
| 384 |
+
hd = ['X:%d\n%s\n' % (s.X, s.title)]
|
| 385 |
+
for i, k in enumerate (s.pagekeys):
|
| 386 |
+
if s.jscript and k in ['pageheight','topmargin', 'botmargin']: continue
|
| 387 |
+
if s.pageFmt [k] != None: hd.append ('%%%%%s %.2f%s\n' % (k, s.pageFmt [k], i > 0 and 'cm' or ''))
|
| 388 |
+
if staves and len (accStf) > 1: hd.append ('%%score ' + staves + '\n')
|
| 389 |
+
tempo = s.tempo and 'Q:%d/%d=%s\n' % (s.tempo_units [0], s.tempo_units [1], s.tempo) or '' # default no tempo field
|
| 390 |
+
d = {} # determine the most frequently occurring unit length over all voices
|
| 391 |
+
for x in s.cmpL: d[x] = d.get (x, 0) + 1
|
| 392 |
+
if s.jscript: defLs = sorted (d.items (), key=lambda x: (-x[1], x[0])) # when tie (1) sort on key (0)
|
| 393 |
+
else: defLs = sorted (d.items (), key=lambda x: -x[1])
|
| 394 |
+
defL = s.denL and s.denL or defLs [0][0] # override default unit length with -d option
|
| 395 |
+
hd.append ('L:1/%d\n%sM:%s\n' % (defL, tempo, s.mtr))
|
| 396 |
+
hd.append ('K:%s\n' % s.key)
|
| 397 |
+
if s.stemless: hd.append ('U:s=!stemless!\n')
|
| 398 |
+
vxs = sorted (vmpdct.keys ())
|
| 399 |
+
for vx in vxs: hd.extend (vmpdct [vx])
|
| 400 |
+
s.dojef = 0 # translate percmap to voicemap
|
| 401 |
+
for vnum, clef in s.clefs.items ():
|
| 402 |
+
ch, prg, vol, pan = midimap [vnum-1][:4]
|
| 403 |
+
dmap = midimap [vnum - 1][4:] # map of abc percussion notes to midi notes
|
| 404 |
+
if dmap and 'perc' not in clef: clef = (clef + ' map=perc').strip ();
|
| 405 |
+
hd.append ('V:%d %s %s\n' % (vnum, clef, clfnms.get (vnum, '')))
|
| 406 |
+
if vnum in vmpdct:
|
| 407 |
+
hd.append ('%%%%voicemap tab%d\n' % vnum)
|
| 408 |
+
hd.append ('K:none\nM:none\n%%clef none\n%%staffscale 1.6\n%%flatbeams true\n%%stemdir down\n')
|
| 409 |
+
if 'perc' in clef: hd.append ('K:none\n'); # no key for a perc voice
|
| 410 |
+
if s.volpan > 1: # option -m 2 -> output all recognized midi commands when needed and present in xml
|
| 411 |
+
if ch > 0 and ch != vnum: hd.append ('%%%%MIDI channel %d\n' % ch)
|
| 412 |
+
if prg > 0: hd.append ('%%%%MIDI program %d\n' % (prg - 1))
|
| 413 |
+
if vol >= 0: hd.append ('%%%%MIDI control 7 %.0f\n' % vol) # volume == 0 is possible ...
|
| 414 |
+
if pan >= 0: hd.append ('%%%%MIDI control 10 %.0f\n' % pan)
|
| 415 |
+
elif s.volpan > 0: # default -> only output midi program command when present in xml
|
| 416 |
+
if dmap and ch > 0: hd.append ('%%%%MIDI channel %d\n' % ch) # also channel if percussion part
|
| 417 |
+
if prg > 0: hd.append ('%%%%MIDI program %d\n' % (prg - 1))
|
| 418 |
+
for abcNote, step, midiNote, notehead in dmap:
|
| 419 |
+
if not notehead: notehead = 'normal'
|
| 420 |
+
if abcMid (abcNote) != midiNote or abcNote != step:
|
| 421 |
+
if s.volpan > 0: hd.append ('%%%%MIDI drummap %s %s\n' % (abcNote, midiNote))
|
| 422 |
+
hd.append ('I:percmap %s %s %s %s\n' % (abcNote, step, midiNote, notehead))
|
| 423 |
+
s.dojef = s.tstep
|
| 424 |
+
if defL != s.cmpL [vnum-1]: # only if computed unit length different from header
|
| 425 |
+
hd.append ('L:1/%d\n' % s.cmpL [vnum-1])
|
| 426 |
+
s.outlist = hd + s.outlist
|
| 427 |
+
if koppen: # output SVG stuff needed for tablature
|
| 428 |
+
k1 = kopSvg.replace ('-2','-5') if s.shiftStem else kopSvg # shift note heads 3 units left
|
| 429 |
+
k2 = kopSvg2.replace ('-2','-5') if s.shiftStem else kopSvg2
|
| 430 |
+
tb = tabSvg.replace ('-3','-6') if s.shiftStem else tabSvg
|
| 431 |
+
ks = sorted (koppen.keys ()) # javascript compatibility
|
| 432 |
+
ks = [k2 % (k, k) if len (k) == 2 else k1 % (k, k) for k in ks]
|
| 433 |
+
tbs = map (lambda x: x.strip () + '\n', tb.splitlines ()) # javascript compatibility
|
| 434 |
+
s.outlist = tbs + ks + ['</defs>\n%%endsvg\n'] + s.outlist
|
| 435 |
+
|
| 436 |
+
def writeall (s): # determine the required encoding of the entire ABC output
|
| 437 |
+
str = ''.join (s.outlist)
|
| 438 |
+
# print(str)
|
| 439 |
+
if s.dojef: str = perc2map (str)
|
| 440 |
+
if python3: s.outfile.write (str)
|
| 441 |
+
else: s.outfile.write (str)
|
| 442 |
+
if s.pad: s.outfile.close () # close each file with -o option
|
| 443 |
+
else: s.outfile.write ('\n') # add empty line between tunes on stdout
|
| 444 |
+
info ('%s written with %d voices' % (s.fnmext, len (s.clefs)), warn=0)
|
| 445 |
+
|
| 446 |
+
#----------------
|
| 447 |
+
# functions
|
| 448 |
+
#----------------
|
| 449 |
+
def abcLyr (xs, melis): # Convert list xs to abc lyrics.
|
| 450 |
+
if not ''.join (xs): return '', 0 # there is no lyrics in this measure
|
| 451 |
+
res = []
|
| 452 |
+
for x in xs: # xs has for every note a lyrics syllabe or an empty string
|
| 453 |
+
if x == '': # note without lyrics
|
| 454 |
+
if melis: x = '_' # set melisma
|
| 455 |
+
else: x = '*' # skip note
|
| 456 |
+
elif x.endswith ('_') and not x.endswith ('\_'): # start of new melisma
|
| 457 |
+
x = x.replace ('_', '') # remove and set melis boolean
|
| 458 |
+
melis = 1 # so next skips will become melisma
|
| 459 |
+
else: melis = 0 # melisma stops on first syllable
|
| 460 |
+
res.append (x)
|
| 461 |
+
return (' '.join (res), melis)
|
| 462 |
+
|
| 463 |
+
def simplify (a, b): # divide a and b by their greatest common divisor
|
| 464 |
+
x, y = a, b
|
| 465 |
+
while b: a, b = b, a % b
|
| 466 |
+
return x // a, y // a
|
| 467 |
+
|
| 468 |
+
def abcdur (nx, divs, uL): # convert an musicXML duration d to abc units with L:1/uL
|
| 469 |
+
if nx.dur == 0: return '' # when called for elements without duration
|
| 470 |
+
num, den = simplify (uL * nx.dur, divs * 4) # L=1/8 -> uL = 8 units
|
| 471 |
+
if nx.fact: # apply tuplet time modification
|
| 472 |
+
numfac, denfac = nx.fact
|
| 473 |
+
num, den = simplify (num * numfac, den * denfac)
|
| 474 |
+
if den > 64: # limit the denominator to a maximum of 64
|
| 475 |
+
x = float (num) / den; n = math.floor (x); # when just above an integer n
|
| 476 |
+
if x - n < 0.1 * x: num, den = n, 1; # round to n
|
| 477 |
+
num64 = 64. * num / den + 1.0e-15 # to get Python2 behaviour of round
|
| 478 |
+
num, den = simplify (int (round (num64)), 64)
|
| 479 |
+
if num == 1:
|
| 480 |
+
if den == 1: dabc = ''
|
| 481 |
+
elif den == 2: dabc = '/'
|
| 482 |
+
else: dabc = '/%d' % den
|
| 483 |
+
elif den == 1: dabc = '%d' % num
|
| 484 |
+
else: dabc = '%d/%d' % (num, den)
|
| 485 |
+
return dabc
|
| 486 |
+
|
| 487 |
+
def abcMid (note): # abc note -> midi pitch
|
| 488 |
+
r = re.search (r"([_^]*)([A-Ga-g])([',]*)", note)
|
| 489 |
+
if not r: return -1
|
| 490 |
+
acc, n, oct = r.groups ()
|
| 491 |
+
nUp = n.upper ()
|
| 492 |
+
p = 60 + [0,2,4,5,7,9,11]['CDEFGAB'.index (nUp)] + (12 if nUp != n else 0);
|
| 493 |
+
if acc: p += (1 if acc[0] == '^' else -1) * len (acc)
|
| 494 |
+
if oct: p += (12 if oct[0] == "'" else -12) * len (oct)
|
| 495 |
+
return p
|
| 496 |
+
|
| 497 |
+
def staffStep (ptc, o, clef, tstep):
|
| 498 |
+
ndif = 0
|
| 499 |
+
if 'stafflines=1' in clef: ndif += 4 # meaning of one line: E (xml) -> B (abc)
|
| 500 |
+
if not tstep and clef.startswith ('bass'): ndif += 12 # transpose bass -> treble (C3 -> A4)
|
| 501 |
+
if ndif: # diatonic transposition == addition modulo 7
|
| 502 |
+
nm7 = 'C,D,E,F,G,A,B'.split (',')
|
| 503 |
+
n = nm7.index (ptc) + ndif
|
| 504 |
+
ptc, o = nm7 [n % 7], o + n // 7
|
| 505 |
+
if o > 4: ptc = ptc.lower ()
|
| 506 |
+
if o > 5: ptc = ptc + (o-5) * "'"
|
| 507 |
+
if o < 4: ptc = ptc + (4-o) * ","
|
| 508 |
+
return ptc
|
| 509 |
+
|
| 510 |
+
def setKey (fifths, mode):
|
| 511 |
+
sharpness = ['Fb', 'Cb','Gb','Db','Ab','Eb','Bb','F','C','G','D','A', 'E', 'B', 'F#','C#','G#','D#','A#','E#','B#']
|
| 512 |
+
offTab = {'maj':8, 'ion':8, 'm':11, 'min':11, 'aeo':11, 'mix':9, 'dor':10, 'phr':12, 'lyd':7, 'loc':13, 'non':8}
|
| 513 |
+
mode = mode.lower ()[:3] # only first three chars, no case
|
| 514 |
+
key = sharpness [offTab [mode] + fifths] + (mode if offTab [mode] != 8 else '')
|
| 515 |
+
accs = ['F','C','G','D','A','E','B']
|
| 516 |
+
if fifths >= 0: msralts = dict (zip (accs[:fifths], fifths * [1]))
|
| 517 |
+
else: msralts = dict (zip (accs[fifths:], -fifths * [-1]))
|
| 518 |
+
return key, msralts
|
| 519 |
+
|
| 520 |
+
def insTup (ix, notes, fact): # read one nested tuplet
|
| 521 |
+
tupcnt = 0
|
| 522 |
+
nx = notes [ix]
|
| 523 |
+
if 'start' in nx.tup:
|
| 524 |
+
nx.tup.remove ('start') # do recursive calls when starts remain
|
| 525 |
+
tix = ix # index of first tuplet note
|
| 526 |
+
fn, fd = fact # xml time-mod of the higher level
|
| 527 |
+
fnum, fden = nx.fact # xml time-mod of the current level
|
| 528 |
+
tupfact = fnum//fn, fden//fd # abc time mod of this level
|
| 529 |
+
while ix < len (notes):
|
| 530 |
+
nx = notes [ix]
|
| 531 |
+
if isinstance (nx, Elem) or nx.grace:
|
| 532 |
+
ix += 1 # skip all non tuplet elements
|
| 533 |
+
continue
|
| 534 |
+
if 'start' in nx.tup: # more nested tuplets to start
|
| 535 |
+
ix, tupcntR = insTup (ix, notes, tupfact) # ix is on the stop note!
|
| 536 |
+
tupcnt += tupcntR
|
| 537 |
+
elif nx.fact:
|
| 538 |
+
tupcnt += 1 # count tuplet elements
|
| 539 |
+
if 'stop' in nx.tup:
|
| 540 |
+
nx.tup.remove ('stop')
|
| 541 |
+
break
|
| 542 |
+
if not nx.fact: # stop on first non tuplet note
|
| 543 |
+
ix = lastix # back to last tuplet note
|
| 544 |
+
break
|
| 545 |
+
lastix = ix
|
| 546 |
+
ix += 1
|
| 547 |
+
# put abc tuplet notation before the recursive ones
|
| 548 |
+
tup = (tupfact[0], tupfact[1], tupcnt)
|
| 549 |
+
if tup == (3, 2, 3): tupPrefix = '(3'
|
| 550 |
+
else: tupPrefix = '(%d:%d:%d' % tup
|
| 551 |
+
notes [tix].tupabc = tupPrefix + notes [tix].tupabc
|
| 552 |
+
return ix, tupcnt # ix is on the last tuplet note
|
| 553 |
+
|
| 554 |
+
def mkBroken (vs): # introduce broken rhythms (vs: one voice, one measure)
|
| 555 |
+
vs = [n for n in vs if isinstance (n, Note)]
|
| 556 |
+
i = 0
|
| 557 |
+
while i < len (vs) - 1:
|
| 558 |
+
n1, n2 = vs[i], vs[i+1] # scan all adjacent pairs
|
| 559 |
+
# skip if note in tuplet or has no duration or outside beam
|
| 560 |
+
if not n1.fact and not n2.fact and n1.dur > 0 and n2.beam:
|
| 561 |
+
if n1.dur * 3 == n2.dur:
|
| 562 |
+
n2.dur = (2 * n2.dur) // 3
|
| 563 |
+
n1.dur = n1.dur * 2
|
| 564 |
+
n1.after = '<' + n1.after
|
| 565 |
+
i += 1 # do not chain broken rhythms
|
| 566 |
+
elif n2.dur * 3 == n1.dur:
|
| 567 |
+
n1.dur = (2 * n1.dur) // 3
|
| 568 |
+
n2.dur = n2.dur * 2
|
| 569 |
+
n1.after = '>' + n1.after
|
| 570 |
+
i += 1 # do not chain broken rhythms
|
| 571 |
+
i += 1
|
| 572 |
+
|
| 573 |
+
def outVoice (measure, divs, im, ip, unitL): # note/elem objects of one measure in one voice
|
| 574 |
+
ix = 0
|
| 575 |
+
while ix < len (measure): # set all (nested) tuplet annotations
|
| 576 |
+
nx = measure [ix]
|
| 577 |
+
if isinstance (nx, Note) and nx.fact and not nx.grace:
|
| 578 |
+
ix, tupcnt = insTup (ix, measure, (1, 1)) # read one tuplet, insert annotation(s)
|
| 579 |
+
ix += 1
|
| 580 |
+
vs = []
|
| 581 |
+
for nx in measure:
|
| 582 |
+
if isinstance (nx, Note):
|
| 583 |
+
durstr = abcdur (nx, divs, unitL) # xml -> abc duration string
|
| 584 |
+
chord = len (nx.ns) > 1
|
| 585 |
+
cns = [nt[:-1] for nt in nx.ns if nt.endswith ('-')]
|
| 586 |
+
tie = ''
|
| 587 |
+
if chord and len (cns) == len (nx.ns): # all chord notes tied
|
| 588 |
+
nx.ns = cns # chord notes without tie
|
| 589 |
+
tie = '-' # one tie for whole chord
|
| 590 |
+
s = nx.tupabc + ''.join (nx.before)
|
| 591 |
+
if chord: s += '['
|
| 592 |
+
for nt in nx.ns: s += nt
|
| 593 |
+
if chord: s += ']' + tie
|
| 594 |
+
if s.endswith ('-'): s, tie = s[:-1], '-' # split off tie
|
| 595 |
+
s += durstr + tie # and put it back again
|
| 596 |
+
s += nx.after
|
| 597 |
+
nospace = nx.beam
|
| 598 |
+
else:
|
| 599 |
+
if isinstance (nx.str, listtype): nx.str = nx.str [0]
|
| 600 |
+
s = nx.str
|
| 601 |
+
nospace = 1
|
| 602 |
+
if nospace: vs.append (s)
|
| 603 |
+
else: vs.append (' ' + s)
|
| 604 |
+
vs = ''.join (vs) # ad hoc: remove multiple pedal directions
|
| 605 |
+
while vs.find ('!ped!!ped!') >= 0: vs = vs.replace ('!ped!!ped!','!ped!')
|
| 606 |
+
while vs.find ('!ped-up!!ped-up!') >= 0: vs = vs.replace ('!ped-up!!ped-up!','!ped-up!')
|
| 607 |
+
while vs.find ('!8va(!!8va)!') >= 0: vs = vs.replace ('!8va(!!8va)!','') # remove empty ottava's
|
| 608 |
+
return vs
|
| 609 |
+
|
| 610 |
+
def sortMeasure (voice, m):
|
| 611 |
+
voice.sort (key=lambda o: o.tijd) # sort on time
|
| 612 |
+
time = 0
|
| 613 |
+
v = []
|
| 614 |
+
rs = [] # holds rests in between notes
|
| 615 |
+
for i, nx in enumerate (voice): # establish sequentiality
|
| 616 |
+
if nx.tijd > time and chkbug (nx.tijd - time, m):
|
| 617 |
+
v.append (Note (nx.tijd - time, 'x')) # fill hole with invisble rest
|
| 618 |
+
rs.append (len (v) - 1)
|
| 619 |
+
if isinstance (nx, Elem):
|
| 620 |
+
if nx.tijd < time: nx.tijd = time # shift elems without duration to where they fit
|
| 621 |
+
v.append (nx)
|
| 622 |
+
time = nx.tijd
|
| 623 |
+
continue
|
| 624 |
+
if nx.tijd < time: # overlapping element
|
| 625 |
+
if nx.ns[0] == 'z': continue # discard overlapping rest
|
| 626 |
+
if v[-1].tijd <= nx.tijd: # we can do something
|
| 627 |
+
if v[-1].ns[0] == 'z': # shorten rest
|
| 628 |
+
v[-1].dur = nx.tijd - v[-1].tijd
|
| 629 |
+
if v[-1].dur == 0: del v[-1] # nothing left
|
| 630 |
+
info ('overlap in part %d, measure %d: rest shortened' % (m.ixp+1, m.ixm+1))
|
| 631 |
+
else: # make a chord of overlap
|
| 632 |
+
v[-1].ns += nx.ns
|
| 633 |
+
info ('overlap in part %d, measure %d: added chord' % (m.ixp+1, m.ixm+1))
|
| 634 |
+
nx.dur = (nx.tijd + nx.dur) - time # the remains
|
| 635 |
+
if nx.dur <= 0: continue # nothing left
|
| 636 |
+
nx.tijd = time # append remains
|
| 637 |
+
else: # give up
|
| 638 |
+
info ('overlapping notes in one voice! part %d, measure %d, note %s discarded' % (m.ixp+1, m.ixm+1, isinstance (nx, Note) and nx.ns or nx.str))
|
| 639 |
+
continue
|
| 640 |
+
v.append (nx)
|
| 641 |
+
if isinstance (nx, Note):
|
| 642 |
+
if nx.ns [0] in 'zx':
|
| 643 |
+
rs.append (len (v) - 1) # remember rests between notes
|
| 644 |
+
elif len (rs):
|
| 645 |
+
if nx.beam and not nx.grace: # copy beam into rests
|
| 646 |
+
for j in rs: v[j].beam = nx.beam
|
| 647 |
+
rs = [] # clear rests on each note
|
| 648 |
+
time = nx.tijd + nx.dur
|
| 649 |
+
# when a measure contains no elements and no forwards -> no incTime -> s.maxtime = 0 -> right barline
|
| 650 |
+
# is inserted at time == 0 (in addbar) and is only element in the voice when sortMeasure is called
|
| 651 |
+
if time == 0: info ('empty measure in part %d, measure %d, it should contain at least a rest to advance the time!' % (m.ixp+1, m.ixm+1))
|
| 652 |
+
return v
|
| 653 |
+
|
| 654 |
+
def getPartlist (ps): # correct part-list (from buggy xml-software)
|
| 655 |
+
xs = [] # the corrected part-list
|
| 656 |
+
e = [] # stack of opened part-groups
|
| 657 |
+
for x in list (ps): # insert missing stops, delete double starts
|
| 658 |
+
if x.tag == 'part-group':
|
| 659 |
+
num, type = x.get ('number'), x.get ('type')
|
| 660 |
+
if type == 'start':
|
| 661 |
+
if num in e: # missing stop: insert one
|
| 662 |
+
xs.append (E.Element ('part-group', number = num, type = 'stop'))
|
| 663 |
+
xs.append (x)
|
| 664 |
+
else: # normal start
|
| 665 |
+
xs.append (x)
|
| 666 |
+
e.append (num)
|
| 667 |
+
else:
|
| 668 |
+
if num in e: # normal stop
|
| 669 |
+
e.remove (num)
|
| 670 |
+
xs.append (x)
|
| 671 |
+
else: pass # double stop: skip it
|
| 672 |
+
else: xs.append (x)
|
| 673 |
+
for num in reversed (e): # fill missing stops at the end
|
| 674 |
+
xs.append (E.Element ('part-group', number = num, type = 'stop'))
|
| 675 |
+
return xs
|
| 676 |
+
|
| 677 |
+
def parseParts (xs, d, e): # -> [elems on current level], rest of xs
|
| 678 |
+
if not xs: return [],[]
|
| 679 |
+
x = xs.pop (0)
|
| 680 |
+
if x.tag == 'part-group':
|
| 681 |
+
num, type = x.get ('number'), x.get ('type')
|
| 682 |
+
if type == 'start': # go one level deeper
|
| 683 |
+
s = [x.findtext (n, '') for n in ['group-symbol','group-barline','group-name','group-abbreviation']]
|
| 684 |
+
d [num] = s # remember groupdata by group number
|
| 685 |
+
e.append (num) # make stack of open group numbers
|
| 686 |
+
elemsnext, rest1 = parseParts (xs, d, e) # parse one level deeper to next stop
|
| 687 |
+
elems, rest2 = parseParts (rest1, d, e) # parse the rest on this level
|
| 688 |
+
return [elemsnext] + elems, rest2
|
| 689 |
+
else: # stop: close level and return group-data
|
| 690 |
+
nums = e.pop () # last open group number in stack order
|
| 691 |
+
if xs and xs[0].get ('type') == 'stop': # two consequetive stops
|
| 692 |
+
if num != nums: # in the wrong order (tempory solution)
|
| 693 |
+
d[nums], d[num] = d[num], d[nums] # exchange values (only works for two stops!!!)
|
| 694 |
+
sym = d[num] # retrieve an return groupdata as last element of the group
|
| 695 |
+
return [sym], xs
|
| 696 |
+
else:
|
| 697 |
+
elems, rest = parseParts (xs, d, e) # parse remaining elements on current level
|
| 698 |
+
name = x.findtext ('part-name',''), x.findtext ('part-abbreviation','')
|
| 699 |
+
return [name] + elems, rest
|
| 700 |
+
|
| 701 |
+
def bracePart (part): # put a brace on multistaff part and group voices
|
| 702 |
+
if not part: return [] # empty part in the score
|
| 703 |
+
brace = []
|
| 704 |
+
for ivs in part:
|
| 705 |
+
if len (ivs) == 1: # stave with one voice
|
| 706 |
+
brace.append ('%s' % ivs[0])
|
| 707 |
+
else: # stave with multiple voices
|
| 708 |
+
brace += ['('] + ['%s' % iv for iv in ivs] + [')']
|
| 709 |
+
brace.append ('|')
|
| 710 |
+
del brace[-1] # no barline at the end
|
| 711 |
+
if len (part) > 1:
|
| 712 |
+
brace = ['{'] + brace + ['}']
|
| 713 |
+
return brace
|
| 714 |
+
|
| 715 |
+
def prgroupelem (x, gnm, bar, pmap, accVce, accStf): # collect partnames (accVce) and %%score map (accStf)
|
| 716 |
+
if type (x) == tupletype: # partname-tuple = (part-name, part-abbrev)
|
| 717 |
+
y = pmap.pop (0)
|
| 718 |
+
if gnm[0]: x = [n1 + ':' + n2 for n1, n2 in zip (gnm, x)] # put group-name before part-name
|
| 719 |
+
accVce.append (x)
|
| 720 |
+
accStf.extend (bracePart (y))
|
| 721 |
+
elif len (x) == 2 and type (x[0]) == tupletype: # misuse of group just to add extra name to stave
|
| 722 |
+
y = pmap.pop (0)
|
| 723 |
+
nms = [n1 + ':' + n2 for n1, n2 in zip (x[0], x[1][2:])] # x[0] = partname-tuple, x[1][2:] = groupname-tuple
|
| 724 |
+
accVce.append (nms)
|
| 725 |
+
accStf.extend (bracePart (y))
|
| 726 |
+
else:
|
| 727 |
+
prgrouplist (x, bar, pmap, accVce, accStf)
|
| 728 |
+
|
| 729 |
+
def prgrouplist (x, pbar, pmap, accVce, accStf): # collect partnames, scoremap for a part-group
|
| 730 |
+
sym, bar, gnm, gabbr = x[-1] # bracket symbol, continue barline, group-name-tuple
|
| 731 |
+
bar = bar == 'yes' or pbar # pbar -> the parent has bar
|
| 732 |
+
accStf.append (sym == 'brace' and '{' or '[')
|
| 733 |
+
for z in x[:-1]:
|
| 734 |
+
prgroupelem (z, (gnm, gabbr), bar, pmap, accVce, accStf)
|
| 735 |
+
if bar: accStf.append ('|')
|
| 736 |
+
if bar: del accStf [-1] # remove last one before close
|
| 737 |
+
accStf.append (sym == 'brace' and '}' or ']')
|
| 738 |
+
|
| 739 |
+
def compUnitLength (iv, maten, divs): # compute optimal unit length
|
| 740 |
+
uLmin, minLen = 0, max_int
|
| 741 |
+
for uL in [4,8,16]: # try 1/4, 1/8 and 1/16
|
| 742 |
+
vLen = 0 # total length of abc duration strings in this voice
|
| 743 |
+
for im, m in enumerate (maten): # all measures
|
| 744 |
+
for e in m[iv]: # all notes in voice iv
|
| 745 |
+
if isinstance (e, Elem) or e.dur == 0: continue # no real durations
|
| 746 |
+
vLen += len (abcdur (e, divs [im], uL)) # add len of duration string
|
| 747 |
+
if vLen < minLen: uLmin, minLen = uL, vLen # remember the smallest
|
| 748 |
+
return uLmin
|
| 749 |
+
|
| 750 |
+
def doSyllable (syl):
|
| 751 |
+
txt = ''
|
| 752 |
+
for e in syl:
|
| 753 |
+
if e.tag == 'elision': txt += '~'
|
| 754 |
+
elif e.tag == 'text': # escape - and space characters
|
| 755 |
+
txt += (e.text or '').replace ('_','\_').replace('-', r'\-').replace(' ', '~')
|
| 756 |
+
if not txt: return txt
|
| 757 |
+
if syl.findtext('syllabic') in ['begin', 'middle']: txt += '-'
|
| 758 |
+
if syl.find('extend') is not None: txt += '_'
|
| 759 |
+
return txt
|
| 760 |
+
|
| 761 |
+
def checkMelismas (lyrics, maten, im, iv):
|
| 762 |
+
if im == 0: return
|
| 763 |
+
maat = maten [im][iv] # notes of the current measure
|
| 764 |
+
curlyr = lyrics [im][iv] # lyrics dict of current measure
|
| 765 |
+
prvlyr = lyrics [im-1][iv] # lyrics dict of previous measure
|
| 766 |
+
for n, (lyrstr, melis) in prvlyr.items (): # all lyric numbers in the previous measure
|
| 767 |
+
if n not in curlyr and melis: # melisma required, but no lyrics present -> make one!
|
| 768 |
+
ms = getMelisma (maat) # get a melisma for the current measure
|
| 769 |
+
if ms: curlyr [n] = (ms, 0) # set melisma as the n-th lyrics of the current measure
|
| 770 |
+
|
| 771 |
+
def getMelisma (maat): # get melisma from notes in maat
|
| 772 |
+
ms = []
|
| 773 |
+
for note in maat: # every note should get an underscore
|
| 774 |
+
if not isinstance (note, Note): continue # skip Elem's
|
| 775 |
+
if note.grace: continue # skip grace notes
|
| 776 |
+
if note.ns [0] in 'zx': break # stop on first rest
|
| 777 |
+
ms.append ('_')
|
| 778 |
+
return ' '.join (ms)
|
| 779 |
+
|
| 780 |
+
def perc2map (abcIn):
|
| 781 |
+
fillmap = {'diamond':1, 'triangle':1, 'square':1, 'normal':1};
|
| 782 |
+
abc = map (lambda x: x.strip (), percSvg.splitlines ())
|
| 783 |
+
id='default'
|
| 784 |
+
maps = {'default': []};
|
| 785 |
+
dmaps = {'default': []}
|
| 786 |
+
r1 = re.compile (r'V:\s*(\S+)')
|
| 787 |
+
ls = abcIn.splitlines ()
|
| 788 |
+
for x in ls:
|
| 789 |
+
if 'I:percmap' in x:
|
| 790 |
+
noot, step, midi, kop = map (lambda x: x.strip (), x.split ()[1:])
|
| 791 |
+
if kop in fillmap: kop = kop + '+' + ',' + kop
|
| 792 |
+
x = '%%%%map perc%s %s print=%s midi=%s heads=%s' % (id, noot, step, midi, kop)
|
| 793 |
+
maps [id].append (x)
|
| 794 |
+
if '%%MIDI' in x: dmaps [id].append (x)
|
| 795 |
+
if 'V:' in x:
|
| 796 |
+
r = r1.match (x)
|
| 797 |
+
if r:
|
| 798 |
+
id = r.group (1);
|
| 799 |
+
if id not in maps: maps [id] = []; dmaps [id] = []
|
| 800 |
+
ids = sorted (maps.keys ())
|
| 801 |
+
for id in ids: abc += maps [id]
|
| 802 |
+
id='default'
|
| 803 |
+
for x in ls:
|
| 804 |
+
if 'I:percmap' in x: continue
|
| 805 |
+
if '%%MIDI' in x: continue
|
| 806 |
+
if 'V:' in x or 'K:' in x:
|
| 807 |
+
r = r1.match (x)
|
| 808 |
+
if r: id = r.group (1)
|
| 809 |
+
abc.append (x)
|
| 810 |
+
if id in dmaps and len (dmaps [id]) > 0: abc.extend (dmaps [id]); del dmaps [id]
|
| 811 |
+
if 'perc' in x and 'map=' not in x: x += ' map=perc';
|
| 812 |
+
if 'map=perc' in x and len (maps [id]) > 0: abc.append ('%%voicemap perc' + id);
|
| 813 |
+
if 'map=off' in x: abc.append ('%%voicemap');
|
| 814 |
+
else:
|
| 815 |
+
abc.append (x)
|
| 816 |
+
return '\n'.join (abc) + '\n'
|
| 817 |
+
|
| 818 |
+
def addoct (ptc, o): # xml staff step, xml octave number
|
| 819 |
+
p = ptc
|
| 820 |
+
if o > 4: p = ptc.lower ()
|
| 821 |
+
if o > 5: p = p + (o-5) * "'"
|
| 822 |
+
if o < 4: p = p + (4-o) * ","
|
| 823 |
+
return p # abc pitch == abc note without accidental
|
| 824 |
+
|
| 825 |
+
def chkbug (dt, m):
|
| 826 |
+
if dt > m.divs / 16: return 1 # duration should be > 1/64 note
|
| 827 |
+
info ('MuseScore bug: incorrect duration, smaller then 1/64! in measure %d, part %d' % (m.ixm, m.ixp))
|
| 828 |
+
return 0
|
| 829 |
+
|
| 830 |
+
#----------------
|
| 831 |
+
# parser
|
| 832 |
+
#----------------
|
| 833 |
+
class Parser:
|
| 834 |
+
note_alts = [ # 3 alternative notations of the same note for tablature mapping
|
| 835 |
+
[x.strip () for x in '=C, ^C, =D, ^D, =E, =F, ^F, =G, ^G, =A, ^A, =B'.split (',')],
|
| 836 |
+
[x.strip () for x in '^B, _D,^^C, _E, _F, ^E, _G,^^F, _A,^^G, _B, _C'.split (',')],
|
| 837 |
+
[x.strip () for x in '__D,^^B,__E,__F,^^D,__G,^^E,__A,_/A,__B,__C,^^A'.split (',')] ]
|
| 838 |
+
step_map = {'C':0,'D':2,'E':4,'F':5,'G':7,'A':9,'B':11}
|
| 839 |
+
def __init__ (s, options):
|
| 840 |
+
# unfold repeats, number of chars per line, credit filter level, volta option
|
| 841 |
+
s.slurBuf = {} # dict of open slurs keyed by slur number
|
| 842 |
+
s.dirStk = {} # {direction-type + number -> (type, voice | time)} dict for proper closing
|
| 843 |
+
s.ingrace = 0 # marks a sequence of grace notes
|
| 844 |
+
s.msc = Music (options) # global music data abstraction
|
| 845 |
+
s.unfold = options.u # turn unfolding repeats on
|
| 846 |
+
s.ctf = options.c # credit text filter level
|
| 847 |
+
s.gStfMap = [] # [[abc voice numbers] for all parts]
|
| 848 |
+
s.midiMap = [] # midi-settings for each abc voice, in order
|
| 849 |
+
s.drumInst = {} # inst_id -> midi pitch for channel 10 notes
|
| 850 |
+
s.drumNotes = {} # (xml voice, abc note) -> (midi note, note head)
|
| 851 |
+
s.instMid = [] # [{inst id -> midi-settings} for all parts]
|
| 852 |
+
s.midDflt = [-1,-1,-1,-91] # default midi settings for channel, program, volume, panning
|
| 853 |
+
s.msralts = {} # xml-notenames (without octave) with accidentals from the key
|
| 854 |
+
s.curalts = {} # abc-notenames (with voice number) with passing accidentals
|
| 855 |
+
s.stfMap = {} # xml staff number -> [xml voice number]
|
| 856 |
+
s.vce2stf = {} # xml voice number -> allocated staff number
|
| 857 |
+
s.clefMap = {} # xml staff number -> abc clef (for header only)
|
| 858 |
+
s.curClef = {} # xml staff number -> current abc clef
|
| 859 |
+
s.stemDir = {} # xml voice number -> current stem direction
|
| 860 |
+
s.clefOct = {} # xml staff number -> current clef-octave-change
|
| 861 |
+
s.curStf = {} # xml voice number -> current xml staff number
|
| 862 |
+
s.nolbrk = options.x; # generate no linebreaks ($)
|
| 863 |
+
s.jscript = options.j # compatibility with javascript version
|
| 864 |
+
s.ornaments = sorted (note_ornamentation_map.items ())
|
| 865 |
+
s.doPageFmt = len (options.p) == 1 # translate xml page format
|
| 866 |
+
s.tstep = options.t # clef determines step on staff (percussion)
|
| 867 |
+
s.dirtov1 = options.v1 # all directions to first voice of staff
|
| 868 |
+
s.ped = options.ped # render pedal directions
|
| 869 |
+
s.wstems = options.stm # translate stem elements
|
| 870 |
+
s.pedVce = None # voice for pedal directions
|
| 871 |
+
s.repeat_str = {} # staff number -> [measure number, repeat-text]
|
| 872 |
+
s.tabVceMap = {} # abc voice num -> [%%map ...] for tab voices
|
| 873 |
+
s.koppen = {} # noteheads needed for %%map
|
| 874 |
+
|
| 875 |
+
def matchSlur (s, type2, n, v2, note2, grace, stopgrace): # match slur number n in voice v2, add abc code to before/after
|
| 876 |
+
if type2 not in ['start', 'stop']: return # slur type continue has no abc equivalent
|
| 877 |
+
if n == None: n = '1'
|
| 878 |
+
if n in s.slurBuf:
|
| 879 |
+
type1, v1, note1, grace1 = s.slurBuf [n]
|
| 880 |
+
if type2 != type1: # slur complete, now check the voice
|
| 881 |
+
if v2 == v1: # begins and ends in the same voice: keep it
|
| 882 |
+
if type1 == 'start' and (not grace1 or not stopgrace): # normal slur: start before stop and no grace slur
|
| 883 |
+
note1.before = ['('] + note1.before # keep left-right order!
|
| 884 |
+
note2.after += ')'
|
| 885 |
+
# no else: don't bother with reversed stave spanning slurs
|
| 886 |
+
del s.slurBuf [n] # slur finished, remove from stack
|
| 887 |
+
else: # double definition, keep the last
|
| 888 |
+
info ('double slur numbers %s-%s in part %d, measure %d, voice %d note %s, first discarded' % (type2, n, s.msr.ixp+1, s.msr.ixm+1, v2, note2.ns))
|
| 889 |
+
s.slurBuf [n] = (type2, v2, note2, grace)
|
| 890 |
+
else: # unmatched slur, put in dict
|
| 891 |
+
s.slurBuf [n] = (type2, v2, note2, grace)
|
| 892 |
+
|
| 893 |
+
def doNotations (s, note, nttn, isTab):
|
| 894 |
+
for key, val in s.ornaments:
|
| 895 |
+
if nttn.find (key) != None: note.before += [val] # just concat all ornaments
|
| 896 |
+
trem = nttn.find ('ornaments/tremolo')
|
| 897 |
+
if trem != None:
|
| 898 |
+
type = trem.get ('type')
|
| 899 |
+
if type == 'single':
|
| 900 |
+
note.before.insert (0, '!%s!' % (int (trem.text) * '/'))
|
| 901 |
+
else:
|
| 902 |
+
note.fact = None # no time modification in ABC
|
| 903 |
+
if s.tstep: # abc2svg version
|
| 904 |
+
if type == 'stop': note.before.insert (0, '!trem%s!' % trem.text);
|
| 905 |
+
else: # abc2xml version
|
| 906 |
+
if type == 'start': note.before.insert (0, '!%s-!' % (int (trem.text) * '/'));
|
| 907 |
+
fingering = nttn.findall ('technical/fingering')
|
| 908 |
+
for finger in fingering: # handle multiple finger annotations
|
| 909 |
+
if not isTab: note.before += ['!%s!' % finger.text] # fingering goes before chord (addChord)
|
| 910 |
+
snaar = nttn.find ('technical/string')
|
| 911 |
+
if snaar != None and isTab:
|
| 912 |
+
if s.tstep:
|
| 913 |
+
fret = nttn.find ('technical/fret')
|
| 914 |
+
if fret != None: note.tab = (snaar.text, fret.text)
|
| 915 |
+
else:
|
| 916 |
+
deco = '!%s!' % snaar.text # no double string decos (bug in musescore)
|
| 917 |
+
if deco not in note.ntdec: note.ntdec += deco
|
| 918 |
+
wvln = nttn.find ('ornaments/wavy-line')
|
| 919 |
+
if wvln != None:
|
| 920 |
+
if wvln.get ('type') == 'start': note.before = ['!trill(!'] + note.before # keep left-right order!
|
| 921 |
+
elif wvln.get ('type') == 'stop': note.before = ['!trill)!'] + note.before
|
| 922 |
+
glis = nttn.find ('glissando')
|
| 923 |
+
if glis == None: glis = nttn.find ('slide') # treat slide as glissando
|
| 924 |
+
if glis != None:
|
| 925 |
+
lt = '~' if glis.get ('line-type') =='wavy' else '-'
|
| 926 |
+
if glis.get ('type') == 'start': note.before = ['!%s(!' % lt] + note.before # keep left-right order!
|
| 927 |
+
elif glis.get ('type') == 'stop': note.before = ['!%s)!' % lt] + note.before
|
| 928 |
+
|
| 929 |
+
def tabnote (s, alt, ptc, oct, v, ntrec):
|
| 930 |
+
p = s.step_map [ptc] + int (alt or '0') # p in -2 .. 13
|
| 931 |
+
if p > 11: oct += 1 # octave correction
|
| 932 |
+
if p < 0: oct -= 1
|
| 933 |
+
p = p % 12 # remap p into 0..11
|
| 934 |
+
snaar_nw, fret_nw = ntrec.tab # the computed/annotated allocation of nt
|
| 935 |
+
for i in range (4): # support same note on 4 strings
|
| 936 |
+
na = s.note_alts [i % 3] [p] # get alternative representation of same note
|
| 937 |
+
o = oct
|
| 938 |
+
if na in ['^B', '^^B']: o -= 1 # because in adjacent octave
|
| 939 |
+
if na in ['_C', '__C']: o += 1
|
| 940 |
+
if '/' in na or i == 3: o = 9 # emergency notation for 4th string case
|
| 941 |
+
nt = addoct (na, o)
|
| 942 |
+
snaar, fret = s.tabmap.get ((v, nt), ('', '')) # the current allocation of nt
|
| 943 |
+
if not snaar: break # note not yet allocated
|
| 944 |
+
if snaar_nw == snaar: return nt # use present allocation
|
| 945 |
+
if i == 3: # new allocaion needed but none is free
|
| 946 |
+
fmt = 'rejected: voice %d note %3s string %s fret %2s remains: string %s fret %s'
|
| 947 |
+
info (fmt % (v, nt, snaar_nw, fret_nw, snaar, fret), 1)
|
| 948 |
+
ntrec.tab = (snaar, fret)
|
| 949 |
+
s.tabmap [v, nt] = ntrec.tab # for tablature map (voice, note) -> (string, fret)
|
| 950 |
+
return nt # ABC code always in key C (with midi pitch alterations)
|
| 951 |
+
|
| 952 |
+
def ntAbc (s, ptc, oct, note, v, ntrec, isTab): # pitch, octave -> abc notation
|
| 953 |
+
acc2alt = {
|
| 954 |
+
'double-flat': -2,
|
| 955 |
+
'flat-flat': -2,
|
| 956 |
+
'flat': -1,
|
| 957 |
+
'natural-flat': -1,
|
| 958 |
+
'natural': 0,
|
| 959 |
+
'sharp': 1,
|
| 960 |
+
'natural-sharp': 1,
|
| 961 |
+
'sharp-sharp': 2,
|
| 962 |
+
'double-sharp': 2
|
| 963 |
+
}
|
| 964 |
+
oct += s.clefOct.get (s.curStf [v], 0) # minus clef-octave-change value
|
| 965 |
+
acc = note.findtext ('accidental') # should be the notated accidental
|
| 966 |
+
alt = note.findtext ('pitch/alter') # pitch alteration (midi)
|
| 967 |
+
if ntrec.tab: return s.tabnote (alt, ptc, oct, v, ntrec) # implies s.tstep is true (options.t was given)
|
| 968 |
+
elif isTab and s.tstep:
|
| 969 |
+
nt = ['__','_','','^','^^'][int (alt or '0') + 2] + addoct (ptc, oct)
|
| 970 |
+
info ('no string notation found for note %s in voice %d' % (nt, v), 1)
|
| 971 |
+
p = addoct (ptc, oct)
|
| 972 |
+
if alt == None and s.msralts.get (ptc, 0): alt = 0 # no alt but key implies alt -> natural!!
|
| 973 |
+
if alt == None and (p, v) in s.curalts: alt = 0 # no alt but previous note had one -> natural!!
|
| 974 |
+
if acc == None and alt == None: return p # no acc, no alt
|
| 975 |
+
elif acc != None:
|
| 976 |
+
alt = acc2alt [acc] # acc takes precedence over the pitch here!
|
| 977 |
+
else: # now see if we really must add an accidental
|
| 978 |
+
alt = int (float (alt))
|
| 979 |
+
if (p, v) in s.curalts: # the note in this voice has been altered before
|
| 980 |
+
if alt == s.curalts [(p, v)]: return p # alteration still the same
|
| 981 |
+
elif alt == s.msralts.get (ptc, 0): return p # alteration implied by the key
|
| 982 |
+
tieElms = note.findall ('tie') + note.findall ('notations/tied') # in xml we have separate notated ties and playback ties
|
| 983 |
+
if 'stop' in [e.get ('type') for e in tieElms]: return p # don't alter tied notes
|
| 984 |
+
info ('accidental %d added in part %d, measure %d, voice %d note %s' % (alt, s.msr.ixp+1, s.msr.ixm+1, v+1, p))
|
| 985 |
+
s.curalts [(p, v)] = alt
|
| 986 |
+
p = ['__','_','=','^','^^'][alt+2] + p # and finally ... prepend the accidental
|
| 987 |
+
return p
|
| 988 |
+
|
| 989 |
+
def doNote (s, n): # parse a musicXML note tag
|
| 990 |
+
note = Note ()
|
| 991 |
+
v = int (n.findtext ('voice', '1'))
|
| 992 |
+
if s.isSib: v += 100 * int (n.findtext ('staff', '1')) # repair bug in Sibelius
|
| 993 |
+
chord = n.find ('chord') != None
|
| 994 |
+
p = n.findtext ('pitch/step') or n.findtext ('unpitched/display-step')
|
| 995 |
+
o = n.findtext ('pitch/octave') or n.findtext ('unpitched/display-octave')
|
| 996 |
+
r = n.find ('rest')
|
| 997 |
+
numer = n.findtext ('time-modification/actual-notes')
|
| 998 |
+
if numer:
|
| 999 |
+
denom = n.findtext ('time-modification/normal-notes')
|
| 1000 |
+
note.fact = (int (numer), int (denom))
|
| 1001 |
+
note.tup = [x.get ('type') for x in n.findall ('notations/tuplet')]
|
| 1002 |
+
dur = n.findtext ('duration')
|
| 1003 |
+
grc = n.find ('grace')
|
| 1004 |
+
note.grace = grc != None
|
| 1005 |
+
note.before, note.after = [], '' # strings with ABC stuff that goes before or after a note/chord
|
| 1006 |
+
if note.grace and not s.ingrace: # open a grace sequence
|
| 1007 |
+
s.ingrace = 1
|
| 1008 |
+
note.before = ['{']
|
| 1009 |
+
if grc.get ('slash') == 'yes': note.before += ['/'] # acciaccatura
|
| 1010 |
+
stopgrace = not note.grace and s.ingrace
|
| 1011 |
+
if stopgrace: # close the grace sequence
|
| 1012 |
+
s.ingrace = 0
|
| 1013 |
+
s.msc.lastnote.after += '}' # close grace on lastenote.after
|
| 1014 |
+
if dur == None or note.grace: dur = 0
|
| 1015 |
+
if r == None and n.get ('print-object') == 'no':
|
| 1016 |
+
if chord: return
|
| 1017 |
+
r = 1 # turn invisible notes (that advance the time) into invisible rests
|
| 1018 |
+
note.dur = int (dur)
|
| 1019 |
+
if r == None and (not p or not o): # not a rest and no pitch
|
| 1020 |
+
s.msc.cnt.inc ('nopt', v) # count unpitched notes
|
| 1021 |
+
o, p = 5,'E' # make it an E5 ??
|
| 1022 |
+
isTab = s.curClef and s.curClef.get (s.curStf [v], '').startswith ('tab')
|
| 1023 |
+
nttn = n.find ('notations') # add ornaments
|
| 1024 |
+
if nttn != None: s.doNotations (note, nttn, isTab)
|
| 1025 |
+
e = n.find ('stem') if r == None else None # no !stemless! before rest
|
| 1026 |
+
if e != None and e.text == 'none' and (not isTab or v in s.hasStems or s.tstep):
|
| 1027 |
+
note.before += ['!stemless!']; abcOut.stemless = 0; # ???????????U???????????!stemless!
|
| 1028 |
+
# note.before += ['s']; abcOut.stemless = 1;
|
| 1029 |
+
e = n.find ('accidental')
|
| 1030 |
+
if e != None and e.get ('parentheses') == 'yes': note.ntdec += '!courtesy!'
|
| 1031 |
+
if r != None: noot = 'x' if n.get ('print-object') == 'no' or isTab else 'z'
|
| 1032 |
+
else: noot = s.ntAbc (p, int (o), n, v, note, isTab)
|
| 1033 |
+
if n.find ('unpitched') != None:
|
| 1034 |
+
clef = s.curClef [s.curStf [v]] # the current clef for this voice
|
| 1035 |
+
step = staffStep (p, int (o), clef, s.tstep) # (clef independent) step value of note on the staff
|
| 1036 |
+
instr = n.find ('instrument')
|
| 1037 |
+
instId = instr.get ('id') if instr != None else 'dummyId'
|
| 1038 |
+
midi = s.drumInst.get (instId, abcMid (noot))
|
| 1039 |
+
nh = n.findtext ('notehead', '').replace (' ','-') # replace spaces in xml notehead names for percmap
|
| 1040 |
+
if nh == 'x': noot = '^' + noot.replace ('^','').replace ('_','')
|
| 1041 |
+
if nh in ['circle-x','diamond','triangle']: noot = '_' + noot.replace ('^','').replace ('_','')
|
| 1042 |
+
if nh and n.find ('notehead').get ('filled','') == 'yes': nh += '+'
|
| 1043 |
+
if nh and n.find ('notehead').get ('filled','') == 'no': nh += '-'
|
| 1044 |
+
s.drumNotes [(v, noot)] = (step, midi, nh) # keep data for percussion map
|
| 1045 |
+
tieElms = n.findall ('tie') + n.findall ('notations/tied') # in xml we have separate notated ties and playback ties
|
| 1046 |
+
if 'start' in [e.get ('type') for e in tieElms]: # n can have stop and start tie
|
| 1047 |
+
noot = noot + '-'
|
| 1048 |
+
note.beam = sum ([1 for b in n.findall('beam') if b.text in ['continue', 'end']]) + int (note.grace)
|
| 1049 |
+
lyrlast = 0; rsib = re.compile (r'^.*verse')
|
| 1050 |
+
for e in n.findall ('lyric'):
|
| 1051 |
+
lyrnum = int (rsib.sub ('', e.get ('number', '1'))) # also do Sibelius numbers
|
| 1052 |
+
if lyrnum == 0: lyrnum = lyrlast + 1 # and correct Sibelius bugs
|
| 1053 |
+
else: lyrlast = lyrnum
|
| 1054 |
+
note.lyrs [lyrnum] = doSyllable (e)
|
| 1055 |
+
stemdir = n.findtext ('stem')
|
| 1056 |
+
if s.wstems and (stemdir == 'up' or stemdir == 'down'):
|
| 1057 |
+
if stemdir != s.stemDir.get (v, ''):
|
| 1058 |
+
s.stemDir [v] = stemdir
|
| 1059 |
+
s.msc.appendElem (v, '[I:stemdir %s]' % stemdir)
|
| 1060 |
+
if chord: s.msc.addChord (note, noot)
|
| 1061 |
+
else:
|
| 1062 |
+
xmlstaff = int (n.findtext ('staff', '1'))
|
| 1063 |
+
if s.curStf [v] != xmlstaff: # the note should go to another staff
|
| 1064 |
+
dstaff = xmlstaff - s.curStf [v] # relative new staff number
|
| 1065 |
+
s.curStf [v] = xmlstaff # remember the new staff for this voice
|
| 1066 |
+
s.msc.appendElem (v, '[I:staff %+d]' % dstaff) # insert a move before the note
|
| 1067 |
+
s.msc.appendNote (v, note, noot)
|
| 1068 |
+
for slur in n.findall ('notations/slur'): # s.msc.lastnote points to the last real note/chord inserted above
|
| 1069 |
+
s.matchSlur (slur.get ('type'), slur.get ('number'), v, s.msc.lastnote, note.grace, stopgrace) # match slur definitions
|
| 1070 |
+
|
| 1071 |
+
def doAttr (s, e): # parse a musicXML attribute tag
|
| 1072 |
+
teken = {'C1':'alto1','C2':'alto2','C3':'alto','C4':'tenor','F4':'bass','F3':'bass3','G2':'treble','TAB':'tab','percussion':'perc'}
|
| 1073 |
+
dvstxt = e.findtext ('divisions')
|
| 1074 |
+
if dvstxt: s.msr.divs = int (dvstxt)
|
| 1075 |
+
steps = int (e.findtext ('transpose/chromatic', '0')) # for transposing instrument
|
| 1076 |
+
fifths = e.findtext ('key/fifths')
|
| 1077 |
+
first = s.msc.tijd == 0 and s.msr.ixm == 0 # first attributes in first measure
|
| 1078 |
+
if fifths:
|
| 1079 |
+
key, s.msralts = setKey (int (fifths), e.findtext ('key/mode','major'))
|
| 1080 |
+
if first and not steps and abcOut.key == 'none':
|
| 1081 |
+
abcOut.key = key # first measure -> header, if not transposing instrument or percussion part!
|
| 1082 |
+
elif key != abcOut.key or not first:
|
| 1083 |
+
s.msr.attr += '[K:%s]' % key # otherwise -> voice
|
| 1084 |
+
beats = e.findtext ('time/beats')
|
| 1085 |
+
if beats:
|
| 1086 |
+
unit = e.findtext ('time/beat-type')
|
| 1087 |
+
mtr = beats + '/' + unit
|
| 1088 |
+
if first: abcOut.mtr = mtr # first measure -> header
|
| 1089 |
+
else: s.msr.attr += '[M:%s]' % mtr # otherwise -> voice
|
| 1090 |
+
s.msr.mtr = int (beats), int (unit)
|
| 1091 |
+
s.msr.mdur = (s.msr.divs * s.msr.mtr[0] * 4) // s.msr.mtr[1] # duration of measure in xml-divisions
|
| 1092 |
+
for ms in e.findall('measure-style'):
|
| 1093 |
+
n = int (ms.get ('number', '1')) # staff number
|
| 1094 |
+
voices = s.stfMap [n] # all voices of staff n
|
| 1095 |
+
for mr in ms.findall('measure-repeat'):
|
| 1096 |
+
ty = mr.get('type')
|
| 1097 |
+
if ty == 'start': # remember start measure number and text voor each staff
|
| 1098 |
+
s.repeat_str [n] = [s.msr.ixm, mr.text]
|
| 1099 |
+
for v in voices: # insert repeat into all voices, value will be overwritten at stop
|
| 1100 |
+
s.msc.insertElem (v, s.repeat_str [n])
|
| 1101 |
+
elif ty == 'stop': # calculate repeat measure count for this staff n
|
| 1102 |
+
start_ix, text_ = s.repeat_str [n]
|
| 1103 |
+
repeat_count = s.msr.ixm - start_ix
|
| 1104 |
+
if text_:
|
| 1105 |
+
mid_str = "%s " % text_
|
| 1106 |
+
repeat_count /= int (text_)
|
| 1107 |
+
else:
|
| 1108 |
+
mid_str = "" # overwrite repeat with final string
|
| 1109 |
+
s.repeat_str [n][0] = '[I:repeat %s%d]' % (mid_str, repeat_count)
|
| 1110 |
+
del s.repeat_str [n] # remove closed repeats
|
| 1111 |
+
toct = e.findtext ('transpose/octave-change', '')
|
| 1112 |
+
if toct: steps += 12 * int (toct) # extra transposition of toct octaves
|
| 1113 |
+
for clef in e.findall ('clef'): # a part can have multiple staves
|
| 1114 |
+
n = int (clef.get ('number', '1')) # local staff number for this clef
|
| 1115 |
+
sgn = clef.findtext ('sign')
|
| 1116 |
+
line = clef.findtext ('line', '') if sgn not in ['percussion','TAB'] else ''
|
| 1117 |
+
cs = teken.get (sgn + line, '')
|
| 1118 |
+
oct = clef.findtext ('clef-octave-change', '') or '0'
|
| 1119 |
+
if oct: cs += {-2:'-15', -1:'-8', 1:'+8', 2:'+15'}.get (int (oct), '')
|
| 1120 |
+
s.clefOct [n] = -int (oct); # xml playback pitch -> abc notation pitch
|
| 1121 |
+
if steps: cs += ' transpose=' + str (steps)
|
| 1122 |
+
stfdtl = e.find ('staff-details')
|
| 1123 |
+
if stfdtl and int (stfdtl.get ('number', '1')) == n:
|
| 1124 |
+
lines = stfdtl.findtext ('staff-lines')
|
| 1125 |
+
if lines:
|
| 1126 |
+
lns= '|||' if lines == '3' and sgn == 'TAB' else lines
|
| 1127 |
+
cs += ' stafflines=%s' % lns
|
| 1128 |
+
s.stafflines = int (lines) # remember for tab staves
|
| 1129 |
+
strings = stfdtl.findall ('staff-tuning')
|
| 1130 |
+
if strings:
|
| 1131 |
+
tuning = [st.findtext ('tuning-step') + st.findtext ('tuning-octave') for st in strings]
|
| 1132 |
+
cs += ' strings=%s' % ','.join (tuning)
|
| 1133 |
+
capo = stfdtl.findtext ('capo')
|
| 1134 |
+
if capo: cs += ' capo=%s' % capo
|
| 1135 |
+
s.curClef [n] = cs # keep track of current clef (for percmap)
|
| 1136 |
+
if first: s.clefMap [n] = cs # clef goes to header (where it is mapped to voices)
|
| 1137 |
+
else:
|
| 1138 |
+
voices = s.stfMap[n] # clef change to all voices of staff n
|
| 1139 |
+
for v in voices:
|
| 1140 |
+
if n != s.curStf [v]: # voice is not at its home staff n
|
| 1141 |
+
dstaff = n - s.curStf [v]
|
| 1142 |
+
s.curStf [v] = n # reset current staff at start of measure to home position
|
| 1143 |
+
s.msc.appendElem (v, '[I:staff %+d]' % dstaff)
|
| 1144 |
+
s.msc.appendElem (v, '[K:%s]' % cs)
|
| 1145 |
+
|
| 1146 |
+
def findVoice (s, i, es):
|
| 1147 |
+
stfnum = int (es[i].findtext ('staff',1)) # directions belong to a staff
|
| 1148 |
+
vs = s.stfMap [stfnum] # voices in this staff
|
| 1149 |
+
v1 = vs [0] if vs else 1 # directions to first voice of staff
|
| 1150 |
+
if s.dirtov1: return stfnum, v1, v1 # option --v1
|
| 1151 |
+
for e in es [i+1:]: # or to the voice of the next note
|
| 1152 |
+
if e.tag == 'note':
|
| 1153 |
+
v = int (e.findtext ('voice', '1'))
|
| 1154 |
+
if s.isSib: v += 100 * int (e.findtext ('staff', '1')) # repair bug in Sibelius
|
| 1155 |
+
stf = s.vce2stf [v] # use our own staff allocation
|
| 1156 |
+
return stf, v, v1 # voice of next note, first voice of staff
|
| 1157 |
+
if e.tag == 'backup': break
|
| 1158 |
+
return stfnum, v1, v1 # no note found, fall back to v1
|
| 1159 |
+
|
| 1160 |
+
def doDirection (s, e, i, es): # parse a musicXML direction tag
|
| 1161 |
+
def addDirection (x, vs, tijd, stfnum):
|
| 1162 |
+
if not x: return
|
| 1163 |
+
vs = s.stfMap [stfnum] if '!8v' in x else [vs] # ottava's go to all voices of staff
|
| 1164 |
+
for v in vs:
|
| 1165 |
+
if tijd != None: # insert at time of encounter
|
| 1166 |
+
s.msc.appendElemT (v, x.replace ('(',')').replace ('ped','ped-up'), tijd)
|
| 1167 |
+
else:
|
| 1168 |
+
s.msc.appendElem (v, x)
|
| 1169 |
+
def startStop (dtype, vs, stfnum=1):
|
| 1170 |
+
typmap = {'down':'!8va(!', 'up':'!8vb(!', 'crescendo':'!<(!', 'diminuendo':'!>(!', 'start':'!ped!'}
|
| 1171 |
+
type = t.get ('type', '')
|
| 1172 |
+
k = dtype + t.get ('number', '1') # key to match the closing direction
|
| 1173 |
+
if type in typmap: # opening the direction
|
| 1174 |
+
x = typmap [type]
|
| 1175 |
+
if k in s.dirStk: # closing direction already encountered
|
| 1176 |
+
stype, tijd = s.dirStk [k]; del s.dirStk [k]
|
| 1177 |
+
if stype == 'stop':
|
| 1178 |
+
addDirection (x, vs, tijd, stfnum)
|
| 1179 |
+
else:
|
| 1180 |
+
info ('%s direction %s has no stop in part %d, measure %d, voice %d' % (dtype, stype, s.msr.ixp+1, s.msr.ixm+1, vs+1))
|
| 1181 |
+
s.dirStk [k] = ((type , vs)) # remember voice and type for closing
|
| 1182 |
+
else:
|
| 1183 |
+
s.dirStk [k] = ((type , vs)) # remember voice and type for closing
|
| 1184 |
+
elif type == 'stop':
|
| 1185 |
+
if k in s.dirStk: # matching open direction found
|
| 1186 |
+
type, vs = s.dirStk [k]; del s.dirStk [k] # into the same voice
|
| 1187 |
+
if type == 'stop':
|
| 1188 |
+
info ('%s direction %s has double stop in part %d, measure %d, voice %d' % (dtype, type, s.msr.ixp+1, s.msr.ixm+1, vs+1))
|
| 1189 |
+
x = ''
|
| 1190 |
+
else:
|
| 1191 |
+
x = typmap [type].replace ('(',')').replace ('ped','ped-up')
|
| 1192 |
+
else: # closing direction found before opening
|
| 1193 |
+
s.dirStk [k] = ('stop', s.msc.tijd)
|
| 1194 |
+
x = '' # delay code generation until opening found
|
| 1195 |
+
elif type in ['continue', 'resume', 'discontinue', 'change']:
|
| 1196 |
+
# ?? 'continue' ? 'resume'????????????????
|
| 1197 |
+
# ??????????????
|
| 1198 |
+
# info('Ignoring unsupported direction type: %s' % type)
|
| 1199 |
+
x = ''
|
| 1200 |
+
else: raise ValueError ('wrong direction type')
|
| 1201 |
+
addDirection (x, vs, None, stfnum)
|
| 1202 |
+
tempo, wrdstxt = None, ''
|
| 1203 |
+
plcmnt = e.get ('placement')
|
| 1204 |
+
stf, vs, v1 = s.findVoice (i, es)
|
| 1205 |
+
jmp = '' # for jump sound elements: dacapo, dalsegno and family
|
| 1206 |
+
jmps = [('dacapo','D.C.'),('dalsegno','D.S.'),('tocoda','dacoda'),('fine','fine'),('coda','O'),('segno','S')]
|
| 1207 |
+
t = e.find ('sound') # there are many possible attributes for sound
|
| 1208 |
+
if t != None:
|
| 1209 |
+
minst = t.find ('midi-instrument')
|
| 1210 |
+
if minst:
|
| 1211 |
+
prg = t.findtext ('midi-instrument/midi-program')
|
| 1212 |
+
chn = t.findtext ('midi-instrument/midi-channel')
|
| 1213 |
+
vids = [v for v, id in s.vceInst.items () if id == minst.get ('id')]
|
| 1214 |
+
if vids: vs = vids [0] # direction for the indentified voice, not the staff
|
| 1215 |
+
parm, inst = ('program', str (int (prg) - 1)) if prg else ('channel', chn)
|
| 1216 |
+
if inst and abcOut.volpan > 0: s.msc.appendElem (vs, '[I:MIDI= %s %s]' % (parm, inst))
|
| 1217 |
+
tempo = t.get ('tempo') # look for tempo attribute
|
| 1218 |
+
if tempo:
|
| 1219 |
+
tempo = '%.0f' % float (tempo) # hope it is a number and insert in voice 1
|
| 1220 |
+
tempo_units = (1,4) # always 1/4 for sound elements!
|
| 1221 |
+
for r, v in jmps:
|
| 1222 |
+
if t.get (r, ''): jmp = v; break
|
| 1223 |
+
dirtypes = e.findall ('direction-type')
|
| 1224 |
+
for dirtyp in dirtypes:
|
| 1225 |
+
units = { 'whole': (1,1), 'half': (1,2), 'quarter': (1,4), 'eighth': (1,8) }
|
| 1226 |
+
metr = dirtyp.find ('metronome')
|
| 1227 |
+
if metr != None:
|
| 1228 |
+
t = metr.findtext ('beat-unit', '')
|
| 1229 |
+
if t in units: tempo_units = units [t]
|
| 1230 |
+
else: tempo_units = units ['quarter']
|
| 1231 |
+
if metr.find ('beat-unit-dot') != None:
|
| 1232 |
+
tempo_units = simplify (tempo_units [0] * 3, tempo_units [1] * 2)
|
| 1233 |
+
|
| 1234 |
+
debugtext = metr.findtext ('per-minute')
|
| 1235 |
+
tmpro = None
|
| 1236 |
+
if metr.findtext ('per-minute'):
|
| 1237 |
+
tmpro = re.search ('[.\d]+', metr.findtext ('per-minute')) # look for a number #####
|
| 1238 |
+
if tmpro: tempo = tmpro.group () # overwrites the value set by the sound element of this direction
|
| 1239 |
+
t = dirtyp.find ('wedge')
|
| 1240 |
+
if t != None: startStop ('wedge', vs)
|
| 1241 |
+
allwrds = dirtyp.findall ('words') # insert text annotations
|
| 1242 |
+
if not allwrds: allwrds = dirtyp.findall ('rehearsal') # treat rehearsal mark as text annotation
|
| 1243 |
+
for wrds in allwrds:
|
| 1244 |
+
if jmp: # ignore the words when a jump sound element is present in this direction
|
| 1245 |
+
s.msc.appendElem (vs, '!%s!' % jmp , 1) # to voice
|
| 1246 |
+
break
|
| 1247 |
+
plc = plcmnt == 'below' and '_' or '^'
|
| 1248 |
+
if float (wrds.get ('default-y', '0')) < 0: plc = '_'
|
| 1249 |
+
wrdstxt += (wrds.text or '').replace ('"','\\"').replace ('\n', '\\n')
|
| 1250 |
+
wrdstxt = wrdstxt.strip ()
|
| 1251 |
+
for key, val in dynamics_map.items ():
|
| 1252 |
+
if dirtyp.find ('dynamics/' + key) != None:
|
| 1253 |
+
s.msc.appendElem (vs, val, 1) # to voice
|
| 1254 |
+
if dirtyp.find ('coda') != None: s.msc.appendElem (vs, 'O', 1)
|
| 1255 |
+
if dirtyp.find ('segno') != None: s.msc.appendElem (vs, 'S', 1)
|
| 1256 |
+
t = dirtyp.find ('octave-shift')
|
| 1257 |
+
if t != None: startStop ('octave-shift', vs, stf) # assume size == 8 for the time being
|
| 1258 |
+
t = dirtyp.find ('pedal')
|
| 1259 |
+
if t != None and s.ped:
|
| 1260 |
+
if not s.pedVce: s.pedVce = vs
|
| 1261 |
+
startStop ('pedal', s.pedVce)
|
| 1262 |
+
if dirtyp.findtext ('other-direction') == 'diatonic fretting': s.diafret = 1;
|
| 1263 |
+
if tempo:
|
| 1264 |
+
tempo = '%.0f' % float (tempo) # hope it is a number and insert in voice 1
|
| 1265 |
+
if s.msc.tijd == 0 and s.msr.ixm == 0: # first measure -> header
|
| 1266 |
+
abcOut.tempo = tempo
|
| 1267 |
+
abcOut.tempo_units = tempo_units
|
| 1268 |
+
else:
|
| 1269 |
+
s.msc.appendElem (v1, '[Q:%d/%d=%s]' % (tempo_units [0], tempo_units [1], tempo)) # otherwise -> 1st voice
|
| 1270 |
+
if wrdstxt: s.msc.appendElem (vs, '"%s%s"' % (plc, wrdstxt), 1) # to voice, but after tempo
|
| 1271 |
+
|
| 1272 |
+
def doHarmony (s, e, i, es): # parse a musicXMl harmony tag
|
| 1273 |
+
_, vt, _ = s.findVoice (i, es)
|
| 1274 |
+
short = {'major':'', 'minor':'m', 'augmented':'+', 'diminished':'dim', 'dominant':'7', 'half-diminished':'m7b5'}
|
| 1275 |
+
accmap = {'major':'maj', 'dominant':'', 'minor':'m', 'diminished':'dim', 'augmented':'+', 'suspended':'sus'}
|
| 1276 |
+
modmap = {'second':'2', 'fourth':'4', 'seventh':'7', 'sixth':'6', 'ninth':'9', '11th':'11', '13th':'13'}
|
| 1277 |
+
altmap = {'1':'#', '0':'', '-1':'b'}
|
| 1278 |
+
root = e.findtext ('root/root-step','')
|
| 1279 |
+
alt = altmap.get (e.findtext ('root/root-alter'), '')
|
| 1280 |
+
sus = ''
|
| 1281 |
+
kind = e.findtext ('kind', '')
|
| 1282 |
+
if kind in short: kind = short [kind]
|
| 1283 |
+
elif '-' in kind: # xml chord names: <triad name>-<modification>
|
| 1284 |
+
triad, mod = kind.split ('-')
|
| 1285 |
+
kind = accmap.get (triad, '') + modmap.get (mod, '')
|
| 1286 |
+
if kind.startswith ('sus'): kind, sus = '', kind # sus-suffix goes to the end
|
| 1287 |
+
elif kind == 'none': kind = e.find ('kind').get ('text','')
|
| 1288 |
+
degrees = e.findall ('degree')
|
| 1289 |
+
for d in degrees: # chord alterations
|
| 1290 |
+
kind += altmap.get (d.findtext ('degree-alter'),'') + d.findtext ('degree-value','')
|
| 1291 |
+
kind = kind.replace ('79','9').replace ('713','13').replace ('maj6','6')
|
| 1292 |
+
bass = e.findtext ('bass/bass-step','') + altmap.get (e.findtext ('bass/bass-alter'),'')
|
| 1293 |
+
s.msc.appendElem (vt, '"%s%s%s%s%s"' % (root, alt, kind, sus, bass and '/' + bass), 1)
|
| 1294 |
+
|
| 1295 |
+
def doBarline (s, e): # 0 = no repeat, 1 = begin repeat, 2 = end repeat
|
| 1296 |
+
rep = e.find ('repeat')
|
| 1297 |
+
if rep != None: rep = rep.get ('direction')
|
| 1298 |
+
if s.unfold: # unfold repeat, don't translate barlines
|
| 1299 |
+
return rep and (rep == 'forward' and 1 or 2) or 0
|
| 1300 |
+
loc = e.get ('location', 'right') # right is the default
|
| 1301 |
+
if loc == 'right': # only change style for the right side
|
| 1302 |
+
style = e.findtext ('bar-style')
|
| 1303 |
+
if style == 'light-light': s.msr.rline = '||'
|
| 1304 |
+
elif style == 'light-heavy': s.msr.rline = '|]'
|
| 1305 |
+
if rep != None: # repeat found
|
| 1306 |
+
if rep == 'forward': s.msr.lline = ':'
|
| 1307 |
+
else: s.msr.rline = ':|' # override barline style
|
| 1308 |
+
end = e.find ('ending')
|
| 1309 |
+
if end != None:
|
| 1310 |
+
if end.get ('type') == 'start':
|
| 1311 |
+
n = end.get ('number', '1').replace ('.','').replace (' ','')
|
| 1312 |
+
try: list (map (int, n.split (','))) # should be a list of integers
|
| 1313 |
+
except: n = '"%s"' % n.strip () # illegal musicXML
|
| 1314 |
+
s.msr.lnum = n # assume a start is always at the beginning of a measure
|
| 1315 |
+
elif s.msr.rline == '|': # stop and discontinue the same in ABC ?
|
| 1316 |
+
s.msr.rline = '||' # to stop on a normal barline use || in ABC ?
|
| 1317 |
+
return 0
|
| 1318 |
+
|
| 1319 |
+
def doPrint (s, e): # print element, measure number -> insert a line break
|
| 1320 |
+
if e.get ('new-system') == 'yes' or e.get ('new-page') == 'yes':
|
| 1321 |
+
if not s.nolbrk: return '$' # a line break
|
| 1322 |
+
|
| 1323 |
+
def doPartList (s, e): # translate the start/stop-event-based xml-partlist into proper tree
|
| 1324 |
+
for sp in e.findall ('part-list/score-part'):
|
| 1325 |
+
midi = {}
|
| 1326 |
+
for m in sp.findall ('midi-instrument'):
|
| 1327 |
+
x = [m.findtext (p, s.midDflt [i]) for i,p in enumerate (['midi-channel','midi-program','volume','pan'])]
|
| 1328 |
+
pan = float (x[3])
|
| 1329 |
+
if pan >= -90 and pan <= 90: # would be better to map behind-pannings
|
| 1330 |
+
pan = (float (x[3]) + 90) / 180 * 127 # xml between -90 and +90
|
| 1331 |
+
midi [m.get ('id')] = [int (x[0]), int (x[1]), float (x[2]) * 1.27, pan] # volume 100 -> midi 127
|
| 1332 |
+
up = m.findtext ('midi-unpitched')
|
| 1333 |
+
if up: s.drumInst [m.get ('id')] = int (up) - 1 # store midi-pitch for channel 10 notes
|
| 1334 |
+
s.instMid.append (midi)
|
| 1335 |
+
ps = e.find ('part-list') # partlist = [groupelem]
|
| 1336 |
+
xs = getPartlist (ps) # groupelem = partname | grouplist
|
| 1337 |
+
partlist, _ = parseParts (xs, {}, []) # grouplist = [groupelem, ..., groupdata]
|
| 1338 |
+
return partlist # groupdata = [group-symbol, group-barline, group-name, group-abbrev]
|
| 1339 |
+
|
| 1340 |
+
def mkTitle (s, e):
|
| 1341 |
+
def filterCredits (y): # y == filter level, higher filters less
|
| 1342 |
+
cs = []
|
| 1343 |
+
for x in credits: # skip redundant credit lines
|
| 1344 |
+
if y < 6 and (x in title or x in mvttl): continue # sure skip
|
| 1345 |
+
if y < 5 and (x in composer or x in lyricist): continue # almost sure skip
|
| 1346 |
+
if y < 4 and ((title and title in x) or (mvttl and mvttl in x)): continue # may skip too much
|
| 1347 |
+
if y < 3 and ([1 for c in composer if c in x] or [1 for c in lyricist if c in x]): continue # skips too much
|
| 1348 |
+
if y < 2 and re.match (r'^[\d\W]*$', x): continue # line only contains numbers and punctuation
|
| 1349 |
+
cs.append (x)
|
| 1350 |
+
if y == 0 and (title + mvttl): cs = '' # default: only credit when no title set
|
| 1351 |
+
return cs
|
| 1352 |
+
title = e.findtext ('work/work-title', '').strip ()
|
| 1353 |
+
mvttl = e.findtext ('movement-title', '').strip ()
|
| 1354 |
+
composer, lyricist, credits = [], [], []
|
| 1355 |
+
for creator in e.findall ('identification/creator'):
|
| 1356 |
+
if creator.text:
|
| 1357 |
+
if creator.get ('type') == 'composer':
|
| 1358 |
+
composer += [line.strip () for line in creator.text.split ('\n')]
|
| 1359 |
+
elif creator.get ('type') in ('lyricist', 'transcriber'):
|
| 1360 |
+
lyricist += [line.strip () for line in creator.text.split ('\n')]
|
| 1361 |
+
for rights in e.findall ('identification/rights'):
|
| 1362 |
+
if rights.text:
|
| 1363 |
+
lyricist += [line.strip () for line in rights.text.split ('\n')]
|
| 1364 |
+
for credit in e.findall('credit'):
|
| 1365 |
+
cs = ''.join (e.text or '' for e in credit.findall('credit-words'))
|
| 1366 |
+
credits += [re.sub (r'\s*[\r\n]\s*', ' ', cs)]
|
| 1367 |
+
credits = filterCredits (s.ctf)
|
| 1368 |
+
if title: title = 'T:%s\n' % title.replace ('\n', '\nT:')
|
| 1369 |
+
if mvttl: title += 'T:%s\n' % mvttl.replace ('\n', '\nT:')
|
| 1370 |
+
if credits: title += '\n'.join (['T:%s' % c for c in credits]) + '\n'
|
| 1371 |
+
if composer: title += '\n'.join (['C:%s' % c for c in composer]) + '\n'
|
| 1372 |
+
if lyricist: title += '\n'.join (['Z:%s' % c for c in lyricist]) + '\n'
|
| 1373 |
+
if title: abcOut.title = title[:-1]
|
| 1374 |
+
s.isSib = 'Sibelius' in (e.findtext ('identification/encoding/software') or '')
|
| 1375 |
+
if s.isSib: info ('Sibelius MusicXMl is unreliable')
|
| 1376 |
+
|
| 1377 |
+
def doDefaults (s, e):
|
| 1378 |
+
if not s.doPageFmt: return # return if -pf option absent
|
| 1379 |
+
d = e.find ('defaults');
|
| 1380 |
+
if d == None: return;
|
| 1381 |
+
mils = d.findtext ('scaling/millimeters') # mills == staff height (mm)
|
| 1382 |
+
tenths = d.findtext ('scaling/tenths') # staff height in tenths
|
| 1383 |
+
if not mils or not tenths: return
|
| 1384 |
+
xmlScale = float (mils) / float (tenths) / 10 # tenths -> mm
|
| 1385 |
+
space = 10 * xmlScale # space between staff lines == 10 tenths
|
| 1386 |
+
abcScale = space / 0.2117 # 0.2117 cm = 6pt = space between staff lines for scale = 1.0 in abcm2ps
|
| 1387 |
+
abcOut.pageFmt ['scale'] = abcScale
|
| 1388 |
+
eks = 2 * ['page-layout/'] + 4 * ['page-layout/page-margins/']
|
| 1389 |
+
eks = [a+b for a,b in zip (eks, 'page-height,page-width,left-margin,right-margin,top-margin,bottom-margin'.split (','))]
|
| 1390 |
+
for i in range (6):
|
| 1391 |
+
v = d.findtext (eks [i])
|
| 1392 |
+
k = abcOut.pagekeys [i+1] # pagekeys [0] == scale already done, skip it
|
| 1393 |
+
if not abcOut.pageFmt [k] and v:
|
| 1394 |
+
try: abcOut.pageFmt [k] = float (v) * xmlScale # -> cm
|
| 1395 |
+
except: info ('illegal value %s for xml element %s', (v, eks [i])); continue # just skip illegal values
|
| 1396 |
+
|
| 1397 |
+
def locStaffMap (s, part, maten): # map voice to staff with majority voting
|
| 1398 |
+
vmap = {} # {voice -> {staff -> n}} count occurrences of voice in staff
|
| 1399 |
+
s.vceInst = {} # {voice -> instrument id} for this part
|
| 1400 |
+
s.msc.vnums = {} # voice id's
|
| 1401 |
+
s.hasStems = {} # XML voice nums with at least one note with a stem (for tab key)
|
| 1402 |
+
s.stfMap, s.clefMap = {}, {} # staff -> [voices], staff -> clef
|
| 1403 |
+
ns = part.findall ('measure/note')
|
| 1404 |
+
for n in ns: # count staff allocations for all notes
|
| 1405 |
+
v = int (n.findtext ('voice', '1'))
|
| 1406 |
+
if s.isSib: v += 100 * int (n.findtext ('staff', '1')) # repair bug in Sibelius
|
| 1407 |
+
s.msc.vnums [v] = 1 # collect all used voice id's in this part
|
| 1408 |
+
sn = int (n.findtext ('staff', '1'))
|
| 1409 |
+
s.stfMap [sn] = []
|
| 1410 |
+
if v not in vmap:
|
| 1411 |
+
vmap [v] = {sn:1}
|
| 1412 |
+
else:
|
| 1413 |
+
d = vmap[v] # counter for voice v
|
| 1414 |
+
d[sn] = d.get (sn, 0) + 1 # ++ number of allocations for staff sn
|
| 1415 |
+
x = n.find ('instrument')
|
| 1416 |
+
if x != None: s.vceInst [v] = x.get ('id')
|
| 1417 |
+
x, noRest = n.findtext ('stem'), n.find ('rest') == None
|
| 1418 |
+
if noRest and (not x or x != 'none'): s.hasStems [v] = 1 # XML voice v has at least one stem
|
| 1419 |
+
vks = list (vmap.keys ())
|
| 1420 |
+
if s.jscript or s.isSib: vks.sort ()
|
| 1421 |
+
for v in vks: # choose staff with most allocations for each voice
|
| 1422 |
+
xs = [(n, sn) for sn, n in vmap[v].items ()]
|
| 1423 |
+
xs.sort ()
|
| 1424 |
+
stf = xs[-1][1] # the winner: staff with most notes of voice v
|
| 1425 |
+
s.stfMap [stf].append (v)
|
| 1426 |
+
s.vce2stf [v] = stf # reverse map
|
| 1427 |
+
s.curStf [v] = stf # current staff of XML voice v
|
| 1428 |
+
|
| 1429 |
+
def addStaffMap (s, vvmap): # vvmap: xml voice number -> global abc voice number
|
| 1430 |
+
part = [] # default: brace on staffs of one part
|
| 1431 |
+
for stf, voices in sorted (s.stfMap.items ()): # s.stfMap has xml staff and voice numbers
|
| 1432 |
+
locmap = [vvmap [iv] for iv in voices if iv in vvmap]
|
| 1433 |
+
nostem = [(iv not in s.hasStems) for iv in voices if iv in vvmap] # same order as locmap
|
| 1434 |
+
if locmap: # abc voice number of staff stf
|
| 1435 |
+
part.append (locmap)
|
| 1436 |
+
clef = s.clefMap.get (stf, 'treble') # {xml staff number -> clef}
|
| 1437 |
+
for i, iv in enumerate (locmap):
|
| 1438 |
+
clef_attr = ''
|
| 1439 |
+
if clef.startswith ('tab'):
|
| 1440 |
+
if nostem [i] and 'nostems' not in clef: clef_attr = ' nostems'
|
| 1441 |
+
if s.diafret and 'diafret' not in clef: clef_attr += ' diafret' # for all voices in the part
|
| 1442 |
+
abcOut.clefs [iv] = clef + clef_attr # add nostems when all notes of voice had no stem
|
| 1443 |
+
s.gStfMap.append (part)
|
| 1444 |
+
|
| 1445 |
+
def addMidiMap (s, ip, vvmap): # map abc voices to midi settings
|
| 1446 |
+
instr = s.instMid [ip] # get the midi settings for this part
|
| 1447 |
+
if instr.values (): defInstr = list(instr.values ())[0] # default settings = first instrument
|
| 1448 |
+
else: defInstr = s.midDflt # no instruments defined
|
| 1449 |
+
xs = []
|
| 1450 |
+
for v, vabc in vvmap.items (): # xml voice num, abc voice num
|
| 1451 |
+
ks = sorted (s.drumNotes.items ())
|
| 1452 |
+
ds = [(nt, step, midi, head) for (vd, nt), (step, midi, head) in ks if v == vd] # map perc notes
|
| 1453 |
+
id = s.vceInst.get (v, '') # get the instrument-id for part with multiple instruments
|
| 1454 |
+
if id in instr: # id is defined as midi-instrument in part-list
|
| 1455 |
+
xs.append ((vabc, instr [id] + ds)) # get midi settings for id
|
| 1456 |
+
else: xs.append ((vabc, defInstr + ds)) # only one instrument for this part
|
| 1457 |
+
xs.sort () # put abc voices in order
|
| 1458 |
+
s.midiMap.extend ([midi for v, midi in xs])
|
| 1459 |
+
snaarmap = ['E','G','B','d', 'f', 'a', "c'", "e'"]
|
| 1460 |
+
diamap = '0,1-,1,1+,2,3,3,4,4,5,6,6+,7,8-,8,8+,9,10,10,11,11,12,13,13+,14'.split (',')
|
| 1461 |
+
for k in sorted (s.tabmap.keys ()): # add %%map's for all tab voices
|
| 1462 |
+
v, noot = k;
|
| 1463 |
+
snaar, fret = s.tabmap [k];
|
| 1464 |
+
if s.diafret: fret = diamap [int (fret)]
|
| 1465 |
+
vabc = vvmap [v]
|
| 1466 |
+
snaar = s.stafflines - int (snaar)
|
| 1467 |
+
xs = s.tabVceMap.get (vabc, [])
|
| 1468 |
+
xs.append ('%%%%map tab%d %s print=%s heads=kop%s\n' % (vabc, noot, snaarmap [snaar], fret))
|
| 1469 |
+
s.tabVceMap [vabc] = xs
|
| 1470 |
+
s.koppen [fret] = 1 # collect noteheads for SVG defs
|
| 1471 |
+
|
| 1472 |
+
def parse (s, fobj):
|
| 1473 |
+
vvmapAll = {} # collect xml->abc voice maps (vvmap) of all parts
|
| 1474 |
+
e = E.parse (fobj)
|
| 1475 |
+
s.mkTitle (e)
|
| 1476 |
+
s.doDefaults (e)
|
| 1477 |
+
partlist = s.doPartList (e)
|
| 1478 |
+
parts = e.findall ('part')
|
| 1479 |
+
for ip, p in enumerate (parts):
|
| 1480 |
+
maten = p.findall ('measure')
|
| 1481 |
+
s.locStaffMap (p, maten) # {voice -> staff} for this part
|
| 1482 |
+
s.drumNotes = {} # (xml voice, abc note) -> (midi note, note head)
|
| 1483 |
+
s.clefOct = {} # xml staff number -> current clef-octave-change
|
| 1484 |
+
s.curClef = {} # xml staff number -> current abc clef
|
| 1485 |
+
s.stemDir = {} # xml voice number -> current stem direction
|
| 1486 |
+
s.tabmap = {} # (xml voice, abc note) -> (string, fret)
|
| 1487 |
+
s.diafret = 0 # use diatonic fretting
|
| 1488 |
+
s.stafflines = 5
|
| 1489 |
+
s.msc.initVoices (newPart = 1) # create all voices
|
| 1490 |
+
aantalHerhaald = 0 # keep track of number of repititions
|
| 1491 |
+
herhaalMaat = 0 # target measure of the repitition
|
| 1492 |
+
divisions = [] # current value of <divisions> for each measure
|
| 1493 |
+
s.msr = Measure (ip) # various measure data
|
| 1494 |
+
while s.msr.ixm < len (maten):
|
| 1495 |
+
if ip == 31 and s.msr.ixm == 405:
|
| 1496 |
+
print('')
|
| 1497 |
+
maat = maten [s.msr.ixm]
|
| 1498 |
+
herhaal, lbrk = 0, ''
|
| 1499 |
+
s.msr.reset ()
|
| 1500 |
+
s.curalts = {} # passing accidentals are reset each measure
|
| 1501 |
+
es = list (maat)
|
| 1502 |
+
for i, e in enumerate (es):
|
| 1503 |
+
if e.tag == 'note': s.doNote (e)
|
| 1504 |
+
elif e.tag == 'attributes': s.doAttr (e)
|
| 1505 |
+
elif e.tag == 'direction':
|
| 1506 |
+
s.doDirection (e, i, es)
|
| 1507 |
+
elif e.tag == 'sound': s.doDirection (maat, i, es) # sound element directly in measure!
|
| 1508 |
+
elif e.tag == 'harmony': s.doHarmony (e, i, es)
|
| 1509 |
+
elif e.tag == 'barline':
|
| 1510 |
+
herhaal = s.doBarline (e)
|
| 1511 |
+
elif e.tag == 'backup':
|
| 1512 |
+
dt = int (e.findtext ('duration'))
|
| 1513 |
+
if chkbug (dt, s.msr): s.msc.incTime (-dt)
|
| 1514 |
+
elif e.tag == 'forward':
|
| 1515 |
+
dt = int (e.findtext ('duration'))
|
| 1516 |
+
if chkbug (dt, s.msr): s.msc.incTime (dt)
|
| 1517 |
+
elif e.tag == 'print': lbrk = s.doPrint (e)
|
| 1518 |
+
s.msc.addBar (lbrk, s.msr)
|
| 1519 |
+
divisions.append (s.msr.divs)
|
| 1520 |
+
if herhaal == 1:
|
| 1521 |
+
herhaalMaat = s.msr.ixm
|
| 1522 |
+
s.msr.ixm += 1
|
| 1523 |
+
elif herhaal == 2:
|
| 1524 |
+
if aantalHerhaald < 1: # jump
|
| 1525 |
+
s.msr.ixm = herhaalMaat
|
| 1526 |
+
aantalHerhaald += 1
|
| 1527 |
+
else:
|
| 1528 |
+
aantalHerhaald = 0 # reset
|
| 1529 |
+
s.msr.ixm += 1 # just continue
|
| 1530 |
+
else: s.msr.ixm += 1 # on to the next measure
|
| 1531 |
+
for rv in s.repeat_str.values (): # close hanging measure-repeats without stop
|
| 1532 |
+
rv [0] = '[I:repeat %s %d]' % (rv [1], 1)
|
| 1533 |
+
vvmap = s.msc.outVoices (divisions, ip, s.isSib)
|
| 1534 |
+
s.addStaffMap (vvmap) # update global staff map
|
| 1535 |
+
s.addMidiMap (ip, vvmap)
|
| 1536 |
+
vvmapAll.update (vvmap)
|
| 1537 |
+
if vvmapAll: # skip output if no part has any notes
|
| 1538 |
+
abcOut.mkHeader (s.gStfMap, partlist, s.midiMap, s.tabVceMap, s.koppen)
|
| 1539 |
+
abcOut.writeall ()
|
| 1540 |
+
else: info ('nothing written, %s has no notes ...' % abcOut.fnmext)
|
| 1541 |
+
|
| 1542 |
+
#----------------
|
| 1543 |
+
# Main Program
|
| 1544 |
+
#----------------
|
| 1545 |
+
if __name__ == '__main__':
|
| 1546 |
+
from optparse import OptionParser
|
| 1547 |
+
from glob import glob
|
| 1548 |
+
from zipfile import ZipFile
|
| 1549 |
+
ustr = '%prog [-h] [-u] [-m] [-c C] [-d D] [-n CPL] [-b BPL] [-o DIR] [-v V]\n'
|
| 1550 |
+
ustr += '[-x] [-p PFMT] [-t] [-s] [-i] [--v1] [--noped] [--stems] <file1> [<file2> ...]'
|
| 1551 |
+
parser = OptionParser (usage=ustr, version=str(VERSION))
|
| 1552 |
+
parser.add_option ("-u", action="store_true", help="unfold simple repeats")
|
| 1553 |
+
parser.add_option ("-m", action="store", help="0 -> no %%MIDI, 1 -> minimal %%MIDI, 2-> all %%MIDI", default=0)
|
| 1554 |
+
parser.add_option ("-c", action="store", type="int", help="set credit text filter to C", default=0, metavar='C')
|
| 1555 |
+
parser.add_option ("-d", action="store", type="int", help="set L:1/D", default=0, metavar='D') # ??????????????L
|
| 1556 |
+
parser.add_option ("-n", action="store", type="int", help="CPL: max number of characters per line (default 100)", default=0, metavar='CPL')
|
| 1557 |
+
parser.add_option ("-b", action="store", type="int", help="BPL: max number of bars per line", default=0, metavar='BPL')
|
| 1558 |
+
parser.add_option ("-o", action="store", help="store abc files in DIR", default='', metavar='DIR')
|
| 1559 |
+
parser.add_option ("-v", action="store", type="int", help="set volta typesetting behaviour to V", default=0, metavar='V')
|
| 1560 |
+
parser.add_option ("-x", action="store_true", help="output no line breaks")
|
| 1561 |
+
parser.add_option ("-p", action="store", help="pageformat PFMT (cm) = scale, pageheight, pagewidth, leftmargin, rightmargin, topmargin, botmargin", default='', metavar='PFMT')
|
| 1562 |
+
parser.add_option ("-j", action="store_true", help="switch for compatibility with javascript version")
|
| 1563 |
+
parser.add_option ("-t", action="store_true", help="translate perc- and tab-staff to ABC code with %%map, %%voicemap")
|
| 1564 |
+
parser.add_option ("-s", action="store_true", help="shift node heads 3 units left in a tab staff")
|
| 1565 |
+
parser.add_option ("--v1", action="store_true", help="start-stop directions allways to first voice of staff")
|
| 1566 |
+
parser.add_option ("--noped", action="store_false", help="skip all pedal directions", dest='ped', default=True)
|
| 1567 |
+
parser.add_option ("--stems", action="store_true", help="translate stem directions", dest='stm', default=False)
|
| 1568 |
+
parser.add_option ("-i", action="store_true", help="read xml file from standard input")
|
| 1569 |
+
options, args = parser.parse_args ()
|
| 1570 |
+
if options.n < 0: parser.error ('only values >= 0')
|
| 1571 |
+
if options.b < 0: parser.error ('only values >= 0')
|
| 1572 |
+
if options.d and options.d not in [2**n for n in range (10)]:
|
| 1573 |
+
parser.error ('D should be on of %s' % ','.join ([str(2**n) for n in range (10)]))
|
| 1574 |
+
options.p = options.p and options.p.split (',') or [] # ==> [] | [string]
|
| 1575 |
+
if len (args) == 0 and not options.i: parser.error ('no input file given')
|
| 1576 |
+
pad = options.o
|
| 1577 |
+
if pad:
|
| 1578 |
+
if not os.path.exists (pad): os.mkdir (pad)
|
| 1579 |
+
if not os.path.isdir (pad): parser.error ('%s is not a directory' % pad)
|
| 1580 |
+
fnmext_list = []
|
| 1581 |
+
for i in args: fnmext_list += glob (i)
|
| 1582 |
+
if options.i: fnmext_list = ['stdin.xml']
|
| 1583 |
+
if not fnmext_list: parser.error ('none of the input files exist')
|
| 1584 |
+
for X, fnmext in enumerate (fnmext_list):
|
| 1585 |
+
fnm, ext = os.path.splitext (fnmext)
|
| 1586 |
+
if ext.lower () not in ('.xml','.mxl','.musicxml'):
|
| 1587 |
+
info ('skipped input file %s, it should have extension .xml or .mxl' % fnmext)
|
| 1588 |
+
continue
|
| 1589 |
+
if os.path.isdir (fnmext):
|
| 1590 |
+
info ('skipped directory %s. Only files are accepted' % fnmext)
|
| 1591 |
+
continue
|
| 1592 |
+
if fnmext == 'stdin.xml':
|
| 1593 |
+
fobj = sys.stdin
|
| 1594 |
+
elif ext.lower () == '.mxl': # extract .xml file from .mxl file
|
| 1595 |
+
z = ZipFile(fnmext)
|
| 1596 |
+
for n in z.namelist(): # assume there is always an xml file in a mxl archive !!
|
| 1597 |
+
if (n[:4] != 'META') and (n[-4:].lower() == '.xml'):
|
| 1598 |
+
fobj = z.open (n)
|
| 1599 |
+
break # assume only one MusicXML file per archive
|
| 1600 |
+
else:
|
| 1601 |
+
fobj = open (fnmext, 'rb') # open regular xml file
|
| 1602 |
+
|
| 1603 |
+
abcOut = ABCoutput (fnm + '.abc', pad, X, options) # create global ABC output object
|
| 1604 |
+
psr = Parser (options) # xml parser
|
| 1605 |
+
try:
|
| 1606 |
+
psr.parse (fobj) # parse file fobj and write abc to <fnm>.abc
|
| 1607 |
+
except:
|
| 1608 |
+
etype, value, traceback = sys.exc_info () # works in python 2 & 3
|
| 1609 |
+
info ('** %s occurred: %s in %s' % (etype, value, fnmext), 0)
|