Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| import pandas as pd | |
| import numpy as np | |
| import random | |
| import time | |
| from openai import OpenAI | |
| from dotenv import load_dotenv | |
| import logging | |
| import os | |
| import requests | |
| logger = logging.getLogger(__name__) | |
| load_dotenv() | |
| GEMINI_API_KEY="AIzaSyCSmhKbTF_sGbk8tD0dptlqbP0A1R3tQRk" | |
| GEMINI_URL="https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent" | |
| # client = OpenAI(api_key=XAI_API_KEY, base_url=XAI_BASE) | |
| def call_grok(user_msg:str,history:list[tuple[str, str]]): | |
| # if not XAI_API_KEY: | |
| # raise gr.Error("Please set the XAI_API_KEY environment variable.") | |
| # messages=[{"role":"system","content":"Tu es u n assistant intelligent"}] | |
| # for user, assistant in( history or []): | |
| # if user: | |
| # messages.append({"role":"user", "content":user}) | |
| # if assistant: | |
| # messages.append({"role":"assistant", "content":assistant}) | |
| # messages.append({"role":"assistant","content":user_msg}) | |
| # try: | |
| # response = client.chat.completions.create( | |
| # model=XAI_MODEL, | |
| # messages=messages, | |
| # temperature=0, | |
| # max_tokens=1024, | |
| # top_p=1, | |
| # frequency_penalty=0, | |
| # presence_penalty=0, | |
| # stop=None, | |
| # ) | |
| # return response.choices[0].message.content.strip() | |
| # except Exception as e: | |
| # logger.error(f"Error calling Grok: {e}") | |
| if not GEMINI_API_KEY: | |
| raise gr.Error("Please set the GEMINI_API_KEY environment variable.") | |
| contents=[] | |
| SYSTEM_PROMPT = "Tu es un assistant utile, concis et amical. Réponds en français." | |
| contents = [] | |
| for u, a in (history or []): | |
| if u: | |
| contents.append({"role": "user", "parts": [{"text": u}]}) | |
| if a: | |
| contents.append({"role": "model", "parts": [{"text": a}]}) | |
| contents.append({"role": "user", "parts": [{"text": user_msg}]}) | |
| url =f"{GEMINI_URL}?key={GEMINI_API_KEY}" | |
| headers = {"Content-Type": "application/json"} | |
| payload = { | |
| # 👉 Le SEUL et unique message system est passé ici : | |
| "systemInstruction": { | |
| "role": "system", | |
| "parts": [{"text": SYSTEM_PROMPT}] # une seule part ! | |
| }, | |
| "contents": contents | |
| } | |
| try: | |
| resp=requests.post(url, json=payload, headers=headers) | |
| except requests.RequestException as e: | |
| logger.error(f"Error calling Gemini: {e}") | |
| if resp.status_code == 200: | |
| response = resp.json() | |
| logger.info(f"Response from Gemini: {response}") | |
| return response["candidates"][0]["content"]["parts"][0]["text"].strip() | |
| else: | |
| logger.error(f"Error calling Gemini: {resp.status_code} {resp.text}") | |
| def chat_fn(message,history): | |
| response = call_grok(message, history or []) | |
| return response | |
| with gr.Blocks() as demo: | |
| gr.ChatInterface(chat_fn, title="MasterClass LLM + Gradio") | |
| if __name__ == "__main__": | |
| demo.launch() | |