projectlosangeles commited on
Commit
b37c643
·
verified ·
1 Parent(s): 5e34c85

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -16
app.py CHANGED
@@ -1,6 +1,6 @@
1
- #============================================================================================
2
  # https://huggingface.co/spaces/projectlosangeles/Orpheus-Pitches-Inpainter
3
- #============================================================================================
4
 
5
  print('=' * 70)
6
  print('Orpheus Pitches Inpainter Gradio App')
@@ -56,9 +56,8 @@ print('=' * 70)
56
 
57
  #==================================================================================
58
 
59
- MODEL_CHECKPOINT = 'Orpheus_Bridge_Music_Transformer_Trained_Model_19571_steps_0.9396_loss_0.7365_acc.pth'
60
-
61
- SOUDFONT_PATH = 'SGM-v2.01-YamahaGrand-Guit-Bass-v2.7.sf2'
62
 
63
  #==================================================================================
64
 
@@ -71,18 +70,19 @@ dtype = 'bfloat16'
71
  ptdtype = {'bfloat16': torch.bfloat16, 'float16': torch.float16}[dtype]
72
  ctx = torch.amp.autocast(device_type=device_type, dtype=ptdtype)
73
 
74
- SEQ_LEN = 1668
75
  PAD_IDX = 18819
76
 
77
- model = TransformerWrapper(num_tokens = PAD_IDX+1,
78
- max_seq_len = SEQ_LEN,
79
- attn_layers = Decoder(dim = 2048,
80
- depth = 8,
81
- heads = 32,
82
- rotary_pos_emb = True,
83
- attn_flash = True
84
- )
85
- )
 
86
 
87
  model = AutoregressiveWrapper(model, ignore_index=PAD_IDX, pad_value=PAD_IDX)
88
 
@@ -329,7 +329,7 @@ def Generate_Music_Bridge(input_midi,
329
 
330
 
331
  audio = midi_to_colab_audio(new_fn,
332
- soundfont_path=SOUDFONT_PATH,
333
  sample_rate=16000,
334
  volume_scale=10,
335
  output_for_gradio=True
 
1
+ #==========================================================================
2
  # https://huggingface.co/spaces/projectlosangeles/Orpheus-Pitches-Inpainter
3
+ #==========================================================================
4
 
5
  print('=' * 70)
6
  print('Orpheus Pitches Inpainter Gradio App')
 
56
 
57
  #==================================================================================
58
 
59
+ MODEL_CHECKPOINT = 'Orpheus_Music_Transformer_Trained_Model_128497_steps_0.6934_loss_0.7927_acc.pth'
60
+ SOUNDFONT_PATH = 'SGM-v2.01-YamahaGrand-Guit-Bass-v2.7.sf2'
 
61
 
62
  #==================================================================================
63
 
 
70
  ptdtype = {'bfloat16': torch.bfloat16, 'float16': torch.float16}[dtype]
71
  ctx = torch.amp.autocast(device_type=device_type, dtype=ptdtype)
72
 
73
+ SEQ_LEN = 8192
74
  PAD_IDX = 18819
75
 
76
+ model = TransformerWrapper(num_tokens=PAD_IDX + 1,
77
+ max_seq_len=SEQ_LEN,
78
+ attn_layers=Decoder(
79
+ dim=2048,
80
+ depth=8,
81
+ heads=32,
82
+ rotary_pos_emb=True,
83
+ attn_flash=True
84
+ )
85
+ )
86
 
87
  model = AutoregressiveWrapper(model, ignore_index=PAD_IDX, pad_value=PAD_IDX)
88
 
 
329
 
330
 
331
  audio = midi_to_colab_audio(new_fn,
332
+ soundfont_path=SOUNDFONT_PATH,
333
  sample_rate=16000,
334
  volume_scale=10,
335
  output_for_gradio=True