lllindsey0615 commited on
Commit
2e151e1
·
1 Parent(s): 5be5387

try to debug

Browse files
Files changed (1) hide show
  1. app.py +21 -52
app.py CHANGED
@@ -8,63 +8,34 @@ from anticipation.tokenize import extract_instruments
8
  import torch
9
  from pyharp import *
10
 
11
- # === Define AMT Model Paths ===
12
  SMALL_MODEL = "stanford-crfm/music-small-800k"
13
  MEDIUM_MODEL = "stanford-crfm/music-medium-800k"
14
  LARGE_MODEL = "stanford-crfm/music-large-800k"
15
 
16
- # === Define PyHARP Model Card ===
17
  model_card = ModelCard(
18
  name="Anticipatory Music Transformer",
19
- description="Using Anticipatory Music Transformer (AMT) to generate accompaniment for a given MIDI file.",
20
  author="John Thickstun, David Hall, Chris Donahue, Percy Liang",
21
  tags=["midi", "generation", "accompaniment"],
22
  midi_in=True,
23
  midi_out=True
24
  )
25
 
26
- # === Load AMT Model Efficiently ===
27
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
28
- current_model = None
29
- current_model_choice = None # Track currently loaded model
30
-
31
  def load_amt_model(model_choice):
32
- """Loads the selected AMT model only if it changes."""
33
- global current_model, current_model_choice
34
- if current_model is not None and model_choice == current_model_choice:
35
- return current_model # Use already loaded model
36
-
37
- print(f"Loading AMT Model: {model_choice}")
38
- current_model = AutoModelForCausalLM.from_pretrained(model_choice).to(device)
39
- current_model_choice = model_choice
40
- return current_model
41
 
42
- # === Generate Accompaniment Using ZeroGPU ===
43
- @spaces.GPU # Enables ZeroGPU acceleration on Hugging Face Spaces
44
  def generate_accompaniment(midi_file, model_choice, selected_midi_program):
45
  """Generates accompaniment for the entire MIDI input, conditioned on the first 5 seconds."""
46
-
47
- # Load the correct AMT model
48
  model = load_amt_model(model_choice)
49
-
50
- # Validate MIDI file
51
- if not midi_file or not hasattr(midi_file, 'name'):
52
- raise ValueError("Invalid MIDI file input")
53
-
54
- # Convert MIDI to events
55
  events = midi_to_events(midi_file.name)
56
  total_time = round(ops.max_time(events, seconds=True))
57
-
58
- # Extract the melody track
59
- events, melody = extract_instruments(events, [selected_midi_program])
60
-
61
- if not melody:
62
- raise ValueError("No melody detected for the selected MIDI program!")
63
-
64
- # Use first 5 seconds as history
65
  history = ops.clip(events, 0, 5, clip_duration=False)
66
-
67
- # Generate accompaniment
68
  accompaniment = generate(
69
  model,
70
  5, # Start generating at 5s
@@ -75,24 +46,23 @@ def generate_accompaniment(midi_file, model_choice, selected_midi_program):
75
  debug=False
76
  )
77
 
78
- # Combine accompaniment with melody
79
  output_events = ops.clip(ops.combine(accompaniment, melody), 0, total_time, clip_duration=True)
80
-
81
- # Convert to MIDI
82
- output_midi = "generated_accompaniment.mid"
83
  mid = events_to_midi(output_events)
84
  mid.save(output_midi)
85
 
86
  return output_midi
87
 
88
- # === PyHARP Process Function ===
89
  def process_fn(input_midi, model_choice, selected_midi_program):
90
  """Processes the input and runs AMT to generate accompaniment for the full MIDI file."""
91
- print(f"Model selected: {model_choice}")
92
  output_midi = generate_accompaniment(input_midi, model_choice, selected_midi_program)
93
  return output_midi
94
 
95
- # === Build PyHARP Interface ===
 
96
  with gr.Blocks() as demo:
97
  # Dropdown for selecting AMT model
98
  model_dropdown = gr.Dropdown(
@@ -104,7 +74,7 @@ with gr.Blocks() as demo:
104
  # MIDI file upload
105
  midi_file_input = gr.File(label="Upload MIDI File", file_types=[".mid"])
106
 
107
- # MIDI Program Selection Slider (default to 53)
108
  selected_midi_program = gr.Slider(
109
  0, 127, step=1, value=53, label="Select Melody Instrument (MIDI Program Number)"
110
  )
@@ -115,13 +85,12 @@ with gr.Blocks() as demo:
115
  # Output MIDI File
116
  output_midi = gr.File(label="Generated Accompaniment (MIDI)")
117
 
118
- # Connect PyHARP to Gradio
119
- app = build_endpoint(
120
- model_card=model_card,
121
- components=[model_dropdown, selected_midi_program],
122
- process_fn=process_fn
123
  )
124
 
125
- # Launch PyHARP App
126
  demo.queue()
127
- demo.launch(show_error=True)
 
8
  import torch
9
  from pyharp import *
10
 
 
11
  SMALL_MODEL = "stanford-crfm/music-small-800k"
12
  MEDIUM_MODEL = "stanford-crfm/music-medium-800k"
13
  LARGE_MODEL = "stanford-crfm/music-large-800k"
14
 
 
15
  model_card = ModelCard(
16
  name="Anticipatory Music Transformer",
17
+ description="Using Anticipatory Music Transformer (AMT) to generate accompaniment for a given MIDI file with selected melody.",
18
  author="John Thickstun, David Hall, Chris Donahue, Percy Liang",
19
  tags=["midi", "generation", "accompaniment"],
20
  midi_in=True,
21
  midi_out=True
22
  )
23
 
 
 
 
 
 
24
  def load_amt_model(model_choice):
25
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
26
+ model = AutoModelForCausalLM.from_pretrained(model_choice).to(device)
27
+ return model
 
 
 
 
 
 
28
 
29
+ # Generate Accompaniment Using ZeroGPU
30
+ @spaces.GPU
31
  def generate_accompaniment(midi_file, model_choice, selected_midi_program):
32
  """Generates accompaniment for the entire MIDI input, conditioned on the first 5 seconds."""
 
 
33
  model = load_amt_model(model_choice)
 
 
 
 
 
 
34
  events = midi_to_events(midi_file.name)
35
  total_time = round(ops.max_time(events, seconds=True))
36
+ event,melody = extract_instruments(events, [selected_midi_program])
 
 
 
 
 
 
 
37
  history = ops.clip(events, 0, 5, clip_duration=False)
38
+ # Generate accompaniment **for the rest of the MIDI duration**
 
39
  accompaniment = generate(
40
  model,
41
  5, # Start generating at 5s
 
46
  debug=False
47
  )
48
 
49
+ # Combine the accompaniment with the melody
50
  output_events = ops.clip(ops.combine(accompaniment, melody), 0, total_time, clip_duration=True)
51
+ # Convert back to MIDI
52
+ output_midi = "generated_ccompaniment.mid"
 
53
  mid = events_to_midi(output_events)
54
  mid.save(output_midi)
55
 
56
  return output_midi
57
 
58
+ # PyHARP Process Function
59
  def process_fn(input_midi, model_choice, selected_midi_program):
60
  """Processes the input and runs AMT to generate accompaniment for the full MIDI file."""
 
61
  output_midi = generate_accompaniment(input_midi, model_choice, selected_midi_program)
62
  return output_midi
63
 
64
+
65
+ # Build Gradio UI
66
  with gr.Blocks() as demo:
67
  # Dropdown for selecting AMT model
68
  model_dropdown = gr.Dropdown(
 
74
  # MIDI file upload
75
  midi_file_input = gr.File(label="Upload MIDI File", file_types=[".mid"])
76
 
77
+ # Static slider (default to 53)
78
  selected_midi_program = gr.Slider(
79
  0, 127, step=1, value=53, label="Select Melody Instrument (MIDI Program Number)"
80
  )
 
85
  # Output MIDI File
86
  output_midi = gr.File(label="Generated Accompaniment (MIDI)")
87
 
88
+ # Connect Gradio Components
89
+ generate_button.click(
90
+ process_fn,
91
+ inputs=[midi_file_input, model_dropdown, selected_midi_program],
92
+ outputs=[output_midi]
93
  )
94
 
 
95
  demo.queue()
96
+ demo.launch(show_error=True)