Spaces:
Sleeping
Sleeping
| from huggingface_hub import InferenceClient | |
| import re | |
| import os | |
| api_key = f"{os.getenv('ImagiGen_HF_secret')}" | |
| def clean_generated_text(text): | |
| # Remove asterisks (e.g., **text** or *text*) | |
| text = re.sub(r"\*+", "", text) | |
| # Remove special characters (except common punctuation and alphanumeric) | |
| text = re.sub(r'[^a-zA-Z0-9 .,!?\'"-]', "", text) | |
| # Normalize multiple spaces into a single space | |
| text = re.sub(r"\s+", " ", text).strip() | |
| return text | |
| def generate_prompt_response(api_key, model_name, user_message, max_tokens=1000): | |
| client = InferenceClient(api_key=api_key) | |
| messages = [{"role": "user", "content": user_message}] | |
| # Generate the completion response | |
| stream = client.chat.completions.create( | |
| model=model_name, messages=messages, max_tokens=max_tokens, stream=True | |
| ) | |
| # Collect the response | |
| response = "" | |
| for chunk in stream: | |
| response += chunk.choices[0].delta.content | |
| return clean_generated_text(response) | |
| def Qwen_72b(user_input): | |
| model_name = "Qwen/Qwen2.5-72B-Instruct" | |
| response = generate_prompt_response(api_key, model_name, user_message=user_input) | |
| return clean_generated_text(response) | |
| def Mixtral(user_input): | |
| model_name = "mistralai/Mixtral-8x7B-Instruct-v0.1" | |
| response = generate_prompt_response(api_key, model_name, user_message=user_input) | |
| return clean_generated_text(response) | |
| def microsoft_phi(user_input): | |
| model_name = "microsoft/Phi-3-mini-4k-instruct" | |
| response = generate_prompt_response(api_key, model_name, user_message=user_input) | |
| return clean_generated_text(response) | |