Spaces:
Sleeping
Sleeping
| import streamlit as st | |
| from ibm_watsonx_ai import APIClient, Credentials | |
| from ibm_watsonx_ai.foundation_models import ModelInference | |
| from io import BytesIO | |
| from reportlab.lib.pagesizes import letter | |
| from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer | |
| from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle | |
| from reportlab.lib import colors | |
| from reportlab.lib.enums import TA_LEFT, TA_RIGHT | |
| from datetime import datetime | |
| import regex | |
| import os | |
| def setup_watsonxai_client( | |
| api_key: str, project_id: str, url: str = "https://eu-de.ml.cloud.ibm.com" | |
| ): | |
| """Set up a watsonx.ai python SDK client using an apikey and project_id.""" | |
| from ibm_watsonx_ai import APIClient, Credentials | |
| wx_credentials = Credentials(url=url, api_key=api_key) | |
| wxai_client = APIClient(wx_credentials, project_id=project_id) | |
| return wxai_client | |
| emoji_pattern = regex.compile(r"\p{Emoji}", flags=regex.UNICODE) | |
| def remove_emojis(text): | |
| return emoji_pattern.sub(r"", text) | |
| def create_pdf_from_chat(chat_history): | |
| buffer = BytesIO() | |
| doc = SimpleDocTemplate(buffer, pagesize=letter, topMargin=30, bottomMargin=30) | |
| styles = getSampleStyleSheet() | |
| flowables = [] | |
| title_style = ParagraphStyle( | |
| "Title", parent=styles["Heading1"], fontSize=18, spaceAfter=20 | |
| ) | |
| flowables.append( | |
| Paragraph( | |
| f"Jimmy Chat History - {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}", | |
| title_style, | |
| ) | |
| ) | |
| user_style = ParagraphStyle( | |
| "UserStyle", | |
| parent=styles["Normal"], | |
| backColor=colors.lightblue, | |
| borderPadding=10, | |
| alignment=TA_RIGHT, | |
| ) | |
| jimmy_style = ParagraphStyle( | |
| "JimmyStyle", | |
| parent=styles["Normal"], | |
| backColor=colors.lavender, | |
| borderPadding=10, | |
| ) | |
| for message in chat_history: | |
| role = message["role"] | |
| content = remove_emojis(message["content"]) | |
| style = user_style if role == "user" else jimmy_style | |
| flowables.append(Paragraph(f"<b>{role.capitalize()}:</b> {content}", style)) | |
| flowables.append(Spacer(1, 12)) | |
| doc.build(flowables) | |
| buffer.seek(0) | |
| return buffer | |
| def watsonx_chat_prompt( | |
| messages, | |
| stream=False, | |
| client=None, | |
| wx_url=None, | |
| wx_apikey=None, | |
| wx_project_id=None, | |
| model_id=None, | |
| params=None, | |
| ): | |
| """ | |
| Dynamic chat function for Watson AI | |
| Args: | |
| messages (list): List of message objects following watsonx schema. | |
| Each message should have 'role' and 'content' keys. | |
| Supports system, user, assistant, and tool messages. | |
| stream (bool): If True, return streaming generator; if False, return complete response | |
| client (APIClient): Pre-configured Watson client (optional) | |
| wx_url (str): Watson URL (required if no client) | |
| wx_apikey (str): Watson API key (required if no client) | |
| project_id (str): Watson project ID (required if no client) | |
| model_id (str): Model identifier | |
| params (dict): Model parameters (optional) | |
| Returns: | |
| str or generator: Complete response text or streaming generator based on stream parameter | |
| """ | |
| # from ibm_watsonx_ai.foundation_models import ModelInference | |
| # from ibm_watsonx_ai import APIClient, Credentials | |
| if params is None: | |
| params = { | |
| "temperature": 0.7, | |
| "max_tokens": 4096, | |
| "top_p": 1.0, | |
| "stop": ["</s>", "<|end_of_text|>", "<|endoftext|>"], | |
| # "frequency_penalty": 0.5, | |
| # "presence_penalty": 0.3, | |
| } | |
| # Use provided client or create new one | |
| if client is None: | |
| wx_credentials = Credentials(url=wx_url, api_key=wx_apikey) | |
| client = APIClient(credentials=wx_credentials, project_id=wx_project_id) | |
| chat_model = ModelInference(api_client=client, model_id=model_id, params=params) | |
| if stream: | |
| return chat_model.chat_stream(messages=messages) | |
| else: | |
| return chat_model.chat(messages=messages) | |
| def generate_response(watsonx_chat_prompt, stream): | |
| if stream: | |
| for chunk in watsonx_chat_prompt: | |
| if chunk["choices"]: | |
| yield chunk["choices"][0]["delta"].get("content", "") | |
| else: | |
| return watsonx_chat_prompt["choices"][0]["message"]["content"] | |