kshahnathwani commited on
Commit
972e191
·
verified ·
1 Parent(s): 4a77b40

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -16
app.py CHANGED
@@ -3,6 +3,7 @@ from huggingface_hub import InferenceClient
3
  import os
4
  import traceback
5
 
 
6
  fancy_css = """
7
  #main-container {
8
  background-color: #f0f0f0;
@@ -39,15 +40,16 @@ fancy_css = """
39
  }
40
  """
41
 
 
42
  CHORD_SYSTEM_PROMPT = """You are a music theory expert specialized in chord identification.
43
  Given a list of notes (like "C E G" or "D F# A C"), identify the chord name.
44
  Always respond with the chord name and a short explanation of the intervals.
45
  """
46
 
47
- # Read token from Space secrets
48
  HF_TOKEN = os.environ.get("HF_TOKEN")
49
 
50
- # Create inference client
51
  client = InferenceClient(token=HF_TOKEN)
52
 
53
  def respond(message, history, system_message, max_tokens, temperature, top_p):
@@ -55,23 +57,25 @@ def respond(message, history, system_message, max_tokens, temperature, top_p):
55
  return "⚠️ No HF_TOKEN found. Please add it in your Space secrets."
56
 
57
  try:
 
58
  prompt = f"{system_message}\nUser: {message}\nAnswer:"
59
 
60
- # Non-streaming inference call
61
- output = client.text_generation(
62
- prompt,
63
- model="gpt2", # ✅ small model, works with Inference API
64
- max_new_tokens=max_tokens,
65
- temperature=temperature,
66
- top_p=top_p,
67
- stream=False, # 🔑 disable streaming
 
 
 
68
  )
69
 
70
- # Extract text
71
- if hasattr(output, "generated_text"):
72
- response = output.generated_text
73
- elif isinstance(output, dict) and "generated_text" in output:
74
- response = output["generated_text"]
75
  else:
76
  response = str(output)
77
 
@@ -82,6 +86,7 @@ def respond(message, history, system_message, max_tokens, temperature, top_p):
82
  return f"⚠️ Error: {str(e)}\n\nTraceback:\n{tb}"
83
 
84
 
 
85
  chatbot = gr.ChatInterface(
86
  fn=respond,
87
  additional_inputs=[
@@ -93,8 +98,9 @@ chatbot = gr.ChatInterface(
93
  type="messages",
94
  )
95
 
 
96
  with gr.Blocks(css=fancy_css) as demo:
97
- gr.Markdown("<h1 id='title'>🎶 Chord Bot (API-based, GPT-2) 🎶</h1>")
98
  chatbot.render()
99
 
100
  if __name__ == "__main__":
 
3
  import os
4
  import traceback
5
 
6
+ # Styling
7
  fancy_css = """
8
  #main-container {
9
  background-color: #f0f0f0;
 
40
  }
41
  """
42
 
43
+ # System prompt specialized for chord bot
44
  CHORD_SYSTEM_PROMPT = """You are a music theory expert specialized in chord identification.
45
  Given a list of notes (like "C E G" or "D F# A C"), identify the chord name.
46
  Always respond with the chord name and a short explanation of the intervals.
47
  """
48
 
49
+ # Hugging Face API token (in Space secrets)
50
  HF_TOKEN = os.environ.get("HF_TOKEN")
51
 
52
+ # Inference client
53
  client = InferenceClient(token=HF_TOKEN)
54
 
55
  def respond(message, history, system_message, max_tokens, temperature, top_p):
 
57
  return "⚠️ No HF_TOKEN found. Please add it in your Space secrets."
58
 
59
  try:
60
+ # Build a simple prompt
61
  prompt = f"{system_message}\nUser: {message}\nAnswer:"
62
 
63
+ # Raw POST to Hugging Face Inference API
64
+ output = client.post_json(
65
+ "https://api-inference.huggingface.co/models/gpt2", # ✅ change model here if needed
66
+ {
67
+ "inputs": prompt,
68
+ "parameters": {
69
+ "max_new_tokens": max_tokens,
70
+ "temperature": temperature,
71
+ "top_p": top_p,
72
+ },
73
+ },
74
  )
75
 
76
+ # Parse response
77
+ if isinstance(output, list) and len(output) > 0 and "generated_text" in output[0]:
78
+ response = output[0]["generated_text"]
 
 
79
  else:
80
  response = str(output)
81
 
 
86
  return f"⚠️ Error: {str(e)}\n\nTraceback:\n{tb}"
87
 
88
 
89
+ # Gradio ChatInterface
90
  chatbot = gr.ChatInterface(
91
  fn=respond,
92
  additional_inputs=[
 
98
  type="messages",
99
  )
100
 
101
+ # Layout
102
  with gr.Blocks(css=fancy_css) as demo:
103
+ gr.Markdown("<h1 id='title'>🎶 Chord Bot (API-based) 🎶</h1>")
104
  chatbot.render()
105
 
106
  if __name__ == "__main__":