Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -17,7 +17,7 @@ model = AutoModelForCausalLM.from_pretrained(
|
|
| 17 |
model.eval() # Set the model to evaluation mode
|
| 18 |
|
| 19 |
# Initialize the inference client (if needed for other API-based tasks)
|
| 20 |
-
client = InferenceClient(provider="together",token=access_token)
|
| 21 |
|
| 22 |
def conversation_predict(input_text):
|
| 23 |
"""Generate a response for single-turn input using the model."""
|
|
@@ -50,31 +50,18 @@ def respond(
|
|
| 50 |
|
| 51 |
messages.append({"role": "user", "content": message})
|
| 52 |
|
| 53 |
-
# response
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
# for message_chunk in client.chat_completion(
|
| 57 |
-
# model = "google/gemma-2b-it",
|
| 58 |
-
# messages=messages,
|
| 59 |
-
# max_tokens=max_tokens,
|
| 60 |
-
# stream=True,
|
| 61 |
-
# temperature=temperature,
|
| 62 |
-
# top_p=top_p,
|
| 63 |
-
# ):
|
| 64 |
-
# token = message_chunk["choices"][0]["delta"].get("content", "")
|
| 65 |
-
# response += token
|
| 66 |
-
# yield response
|
| 67 |
-
|
| 68 |
-
response = client.chat.completions.create(
|
| 69 |
-
model = "google/gemma-2b-it",
|
| 70 |
messages=messages,
|
| 71 |
max_tokens=max_tokens,
|
| 72 |
stream=False,
|
| 73 |
temperature=temperature,
|
| 74 |
top_p=top_p,
|
| 75 |
)
|
| 76 |
-
|
| 77 |
-
return response
|
|
|
|
| 78 |
|
| 79 |
# Create a Gradio ChatInterface demo
|
| 80 |
demo = gr.ChatInterface(
|
|
@@ -94,4 +81,4 @@ demo = gr.ChatInterface(
|
|
| 94 |
)
|
| 95 |
|
| 96 |
if __name__ == "__main__":
|
| 97 |
-
demo.launch(share=True)
|
|
|
|
| 17 |
model.eval() # Set the model to evaluation mode
|
| 18 |
|
| 19 |
# Initialize the inference client (if needed for other API-based tasks)
|
| 20 |
+
client = InferenceClient(provider="together", token=access_token)
|
| 21 |
|
| 22 |
def conversation_predict(input_text):
|
| 23 |
"""Generate a response for single-turn input using the model."""
|
|
|
|
| 50 |
|
| 51 |
messages.append({"role": "user", "content": message})
|
| 52 |
|
| 53 |
+
# Get the complete response at once (no streaming)
|
| 54 |
+
response = client.chat_completion(
|
| 55 |
+
model="google/gemma-2b-it",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 56 |
messages=messages,
|
| 57 |
max_tokens=max_tokens,
|
| 58 |
stream=False,
|
| 59 |
temperature=temperature,
|
| 60 |
top_p=top_p,
|
| 61 |
)
|
| 62 |
+
|
| 63 |
+
# Extract and return the full response
|
| 64 |
+
return response["choices"][0]["message"]["content"]
|
| 65 |
|
| 66 |
# Create a Gradio ChatInterface demo
|
| 67 |
demo = gr.ChatInterface(
|
|
|
|
| 81 |
)
|
| 82 |
|
| 83 |
if __name__ == "__main__":
|
| 84 |
+
demo.launch(share=True)
|