harvesthealth's picture
Upload folder using huggingface_hub
c2ebec8 verified
import os
from openai import OpenAI
# The base URL for the OpenAI-compatible Blablador API
BLABLADOR_API_BASE = "https://api.helmholtz-blablador.fz-juelich.de/v1"
def get_llm_response_stream(messages, model_name):
"""
Gets a streaming response from the specified LLM.
Args:
messages (list): A list of message dictionaries, following the OpenAI format.
model_name (str): The name of the model to use (e.g., 'alias-large', 'alias-fast').
Yields:
str: Chunks of the response content as they are received.
"""
api_key = os.environ.get("BLABLADOR_API_KEY")
if not api_key:
yield "Error: BLABLADOR_API_KEY is not set in the environment."
return
try:
client = OpenAI(
api_key=api_key,
base_url=BLABLADOR_API_BASE,
)
stream = client.chat.completions.create(
model=model_name,
messages=messages,
stream=True,
)
for chunk in stream:
content = chunk.choices[0].delta.content
if content:
yield content
except Exception as e:
yield f"Error communicating with LLM: {e}"
if __name__ == '__main__':
# Example of how to use the LLM client stream
# This requires the BLABLADOR_API_KEY to be set as an environment variable.
print("--- Testing LLM Client Stream ---")
example_messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Tell me a short story about a robot who discovers music."}
]
full_story = ""
response_stream = get_llm_response_stream(messages=example_messages, model_name="alias-large")
print("Streaming response:")
for chunk in response_stream:
print(chunk, end="", flush=True)
full_story += chunk
print("\n\n--- End of Stream ---")
print(f"Final assembled story length: {len(full_story)} characters.")