kshahnathwani commited on
Commit
ba350e0
·
verified ·
1 Parent(s): 6ac306a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +26 -70
app.py CHANGED
@@ -1,83 +1,41 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
 
3
 
4
- fancy_css = """
5
- #main-container {
6
- background-color: #f0f0f0;
7
- font-family: 'Arial', sans-serif;
8
- }
9
- .gradio-container {
10
- max-width: 700px;
11
- margin: 0 auto;
12
- padding: 20px;
13
- background: white;
14
- box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1);
15
- border-radius: 10px;
16
- }
17
- .gr-button {
18
- background-color: #4CAF50;
19
- color: white;
20
- border: none;
21
- border-radius: 5px;
22
- padding: 10px 20px;
23
- cursor: pointer;
24
- transition: background-color 0.3s ease;
25
- }
26
- .gr-button:hover {
27
- background-color: #45a049;
28
- }
29
- .gr-chat {
30
- font-size: 16px;
31
- }
32
- #title {
33
- text-align: center;
34
- font-size: 2em;
35
- margin-bottom: 20px;
36
- color: #333;
37
- }
38
- """
39
 
40
  CHORD_SYSTEM_PROMPT = """You are a music theory expert specialized in chord identification.
41
  Given a list of notes (like "C E G" or "D F# A C"), identify the chord name.
42
  Always respond with the chord name and a short explanation of the intervals.
43
  """
44
 
45
- def respond(
46
- message,
47
- history: list[dict[str, str]],
48
- system_message,
49
- max_tokens,
50
- temperature,
51
- top_p,
52
- hf_token: gr.OAuthToken,
53
- ):
54
- if hf_token is None or not getattr(hf_token, "token", None):
55
- yield "⚠️ Please log in with your Hugging Face account first."
56
- return
57
 
58
- client = InferenceClient(
59
- token=hf_token.token,
60
- model="google/flan-t5-small", # change if you want to call your own model repo
61
- )
62
 
63
- # Build structured messages → turn into a prompt
64
- messages = [{"role": "system", "content": system_message}]
65
- messages.extend(history)
66
- messages.append({"role": "user", "content": f"What chord is {message}?"})
67
 
68
- prompt = "\n".join([f"{m['role']}: {m['content']}" for m in messages]) + "\nAnswer:"
 
 
 
 
 
 
 
 
69
 
70
- response = ""
71
- for chunk in client.text_generation(
72
- prompt,
73
- max_new_tokens=max_tokens,
74
- temperature=temperature,
75
- top_p=top_p,
76
- stream=True,
77
- ):
78
- response += chunk
79
- yield response.strip()
80
 
 
 
81
 
82
  chatbot = gr.ChatInterface(
83
  fn=respond,
@@ -85,15 +43,13 @@ chatbot = gr.ChatInterface(
85
  gr.Textbox(value=CHORD_SYSTEM_PROMPT, label="System message"),
86
  gr.Slider(minimum=1, maximum=512, value=128, step=1, label="Max new tokens"),
87
  gr.Slider(minimum=0.1, maximum=2.0, value=0.7, step=0.1, label="Temperature"),
88
- gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"),
89
  ],
90
  type="messages",
91
  )
92
 
93
  with gr.Blocks(css=fancy_css) as demo:
94
- with gr.Row():
95
- gr.Markdown("<h1 id='title'>🎶 Chord Bot (API-based) 🎶</h1>")
96
- gr.LoginButton()
97
  chatbot.render()
98
 
99
  if __name__ == "__main__":
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
+ import os
4
 
5
+ fancy_css = """..."""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
 
7
  CHORD_SYSTEM_PROMPT = """You are a music theory expert specialized in chord identification.
8
  Given a list of notes (like "C E G" or "D F# A C"), identify the chord name.
9
  Always respond with the chord name and a short explanation of the intervals.
10
  """
11
 
12
+ HF_TOKEN = os.environ.get("HF_TOKEN")
13
+ client = InferenceClient(token=HF_TOKEN, model="google/flan-t5-small")
 
 
 
 
 
 
 
 
 
 
14
 
15
+ def respond(message, history, system_message, max_tokens, temperature, top_p):
16
+ if HF_TOKEN is None:
17
+ yield "⚠️ No HF_TOKEN found. Please add it in your Space secrets."
18
+ return
19
 
20
+ try:
21
+ prompt = f"{system_message}\nUser: {message}\nAnswer:"
22
+ response = ""
 
23
 
24
+ for chunk in client.text_generation(
25
+ prompt,
26
+ max_new_tokens=max_tokens,
27
+ temperature=temperature,
28
+ top_p=top_p,
29
+ stream=True,
30
+ ):
31
+ response += chunk
32
+ yield response.strip()
33
 
34
+ if not response.strip():
35
+ yield "⚠️ No response from model (check model ID or token)."
 
 
 
 
 
 
 
 
36
 
37
+ except Exception as e:
38
+ yield f"⚠️ Error: {str(e)}"
39
 
40
  chatbot = gr.ChatInterface(
41
  fn=respond,
 
43
  gr.Textbox(value=CHORD_SYSTEM_PROMPT, label="System message"),
44
  gr.Slider(minimum=1, maximum=512, value=128, step=1, label="Max new tokens"),
45
  gr.Slider(minimum=0.1, maximum=2.0, value=0.7, step=0.1, label="Temperature"),
46
+ gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p"),
47
  ],
48
  type="messages",
49
  )
50
 
51
  with gr.Blocks(css=fancy_css) as demo:
52
+ gr.Markdown("<h1 id='title'>🎶 Chord Bot (API-based) 🎶</h1>")
 
 
53
  chatbot.render()
54
 
55
  if __name__ == "__main__":