File size: 8,905 Bytes
07bbd8c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
import gradio as gr
import pretty_midi
import numpy as np
import tempfile
import os
import scipy
from scipy import signal
import librosa
import io
import base64
from pathlib import Path

class HumanizeBot:
    def __init__(self):
        self.groove_profiles = {
            "drums": {"timing_var": 0.02, "velocity_var": 15, "swing_factor": 0.1},
            "melody": {"timing_var": 0.01, "velocity_var": 10, "swing_factor": 0.05},
            "bass": {"timing_var": 0.015, "velocity_var": 12, "swing_factor": 0.07},
            "chords": {"timing_var": 0.008, "velocity_var": 8, "swing_factor": 0.03},
            "other": {"timing_var": 0.01, "velocity_var": 10, "swing_factor": 0.05}
        }
    
    def classify_instrument(self, instrument):
        """Classify instrument type for appropriate humanization"""
        if instrument.is_drum:
            return "drums"
        elif 32 <= instrument.program <= 39:  # Bass
            return "bass"
        elif 0 <= instrument.program <= 7:  # Piano
            return "chords"
        elif 40 <= instrument.program <= 55:  # Strings, orchestra
            return "chords"
        elif 80 <= instrument.program <= 104:  # Synth leads, pads
            return "melody"
        else:
            return "melody"
    
    def apply_swing(self, notes, swing_factor, tempo):
        """Apply swing/groove to notes"""
        swung_notes = []
        for note in notes:
            # Simple swing: push even 8th notes slightly later
            beat_position = (note.start * tempo / 60) % 1
            if 0.25 < beat_position < 0.75:  # Off-beat positions
                note.start += 0.01 * swing_factor
                note.end += 0.01 * swing_factor
            swung_notes.append(note)
        return swung_notes
    
    def humanize_midi(self, midi_file, intensity=0.7, style="organic", add_swing=True):
        """Main humanization function"""
        try:
            # Load MIDI file
            midi_data = pretty_midi.PrettyMIDI(midi_file.name)
            tempo = midi_data.estimate_tempo()
            
            # Process each instrument
            for instrument in midi_data.instruments:
                inst_type = self.classify_instrument(instrument)
                profile = self.groove_profiles[inst_type]
                
                # Apply swing if requested
                if add_swing and inst_type in ["drums", "bass"]:
                    instrument.notes = self.apply_swing(
                        instrument.notes, 
                        profile["swing_factor"] * intensity, 
                        tempo
                    )
                
                # Humanize timing and velocity
                for note in instrument.notes:
                    # Humanize timing (more variation for drums)
                    timing_shift = np.random.normal(0, profile["timing_var"] * intensity)
                    note.start = max(0, note.start + timing_shift)
                    
                    # Humanize note duration (except for drums)
                    if not instrument.is_drum:
                        duration_shift = np.random.normal(0, profile["timing_var"] * 0.5 * intensity)
                        note.end = max(note.start + 0.05, note.end + duration_shift)
                    
                    # Humanize velocity
                    vel_pattern = self.get_velocity_pattern(note, instrument, style)
                    vel_shift = np.random.randint(-profile["velocity_var"], profile["velocity_var"])
                    new_velocity = note.velocity + int(vel_shift * intensity * vel_pattern)
                    note.velocity = max(20, min(127, new_velocity))
            
            # Save humanized MIDI
            output_path = tempfile.mktemp(suffix='_humanized.mid')
            midi_data.write(output_path)
            return output_path, "βœ… Humanization successful! File is ready for download."
            
        except Exception as e:
            return None, f"❌ Error processing file: {str(e)}"
    
    def get_velocity_pattern(self, note, instrument, style):
        """Get velocity multiplier based on style and musical context"""
        if style == "organic":
            return 1.0
        elif style == "groovy":
            # Accentuate beats more
            beat_position = (note.start * 2) % 1  # Simple beat detection
            if beat_position < 0.1:  # On strong beats
                return 1.2
            else:
                return 0.9
        elif style == "gentle":
            return 0.8
        return 1.0

def create_audio_preview(midi_path):
    """Create a simple audio preview from MIDI"""
    try:
        midi_data = pretty_midi.PrettyMIDI(midi_path)
        # Generate audio using fluidsynth (simplified)
        audio_data = midi_data.synthesize()
        return 44100, audio_data.astype(np.float32)
    except:
        return None, None

def process_files(files, intensity, style, add_swing):
    if not files:
        return None, None, "Please upload MIDI files to begin."
    
    bot = HumanizeBot()
    processed_files = []
    audio_previews = []
    
    for file in files:
        humanized_path, message = bot.humanize_midi(file, intensity, style, add_swing)
        if humanized_path:
            processed_files.append(humanized_path)
            
            # Create audio preview
            sr, audio = create_audio_preview(humanized_path)
            if audio is not None:
                audio_previews.append((sr, audio))
    
    if processed_files:
        return processed_files, audio_previews[0] if audio_previews else None, f"βœ… Successfully processed {len(processed_files)} files!"
    else:
        return None, None, "❌ No files were processed successfully."

# Create the Gradio interface
with gr.Blocks(theme=gr.themes.Soft(), title="HumanizeBot") as demo:
    gr.Markdown("""
    # 🎡 HumanizeBot
    **Remove AI traces from your music and make it sound human-made!**
    
    Upload MIDI files from AI music generators to apply natural humanization: subtle timing variations, velocity changes, and musical feel.
    """)
    
    with gr.Row():
        with gr.Column(scale=1):
            gr.Markdown("### πŸ“ Upload & Settings")
            
            file_input = gr.File(
                file_count="multiple",
                file_types=[".mid", ".midi"],
                label="Upload MIDI Files",
                type="filepath"
            )
            
            intensity = gr.Slider(
                0.1, 1.0, 
                value=0.7, 
                label="🎚️ Humanization Intensity",
                info="Low = subtle, High = very human"
            )
            
            style = gr.Radio(
                ["organic", "groovy", "gentle"], 
                value="organic", 
                label="🎸 Humanization Style",
                info="Organic = natural, Groovy = rhythmic, Gentle = subtle"
            )
            
            add_swing = gr.Checkbox(
                value=True,
                label="πŸ”„ Add Swing/Groove",
                info="Add rhythmic push and pull"
            )
            
            process_btn = gr.Button(
                "✨ Humanize My Music!",
                variant="primary",
                size="lg"
            )
        
        with gr.Column(scale=1):
            gr.Markdown("### πŸ“₯ Download Results")
            
            file_output = gr.File(
                file_count="multiple",
                label="Download Humanized MIDI Files"
            )
            
            audio_output = gr.Audio(
                label="Audio Preview (First File)",
                interactive=False
            )
            
            status = gr.Textbox(
                label="Status",
                interactive=False,
                max_lines=3
            )
    
    # Examples section
    with gr.Accordion("🎯 Examples & Tips", open=False):
        gr.Markdown("""
        **Best used with:**
        - AI-generated MIDI from Soundraw, AIVA, MuseNet, etc.
        - Robotic-sounding drum patterns
        - Static piano or synth sequences
        
        **How it works:**
        - Adds subtle timing variations (like a human player)
        - Adjusts velocity (note strength) dynamically
        - Can add swing/groove for rhythmic parts
        - Preserves the original musical content
        
        **Pro tip:** Start with intensity 0.7 for balanced results!
        """)
    
    # Connect the processing function
    process_btn.click(
        fn=process_files,
        inputs=[file_input, intensity, style, add_swing],
        outputs=[file_output, audio_output, status]
    )
    
    gr.Markdown("""
    ---
    *Built with ❀️ using Gradio and PrettyMIDI. Works best with MIDI files from AI music generators.*
    """)

if __name__ == "__main__":
    demo.launch(debug=True)