Teotonix commited on
Commit
2694a30
·
verified ·
1 Parent(s): b249a4e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -20
app.py CHANGED
@@ -1,29 +1,22 @@
1
  import gradio as gr
2
- import requests
3
  import os
 
4
 
5
  HF_TOKEN = os.getenv("HF_TOKEN")
6
- API_URL = "https://api-inference.huggingface.co/models/mistralai/Mistral-7B-Instruct-v0.2"
7
- HEADERS = {"Authorization": f"Bearer {HF_TOKEN}"}
8
 
9
- def query_mistral(prompt):
10
- payload = {
11
- "inputs": prompt,
12
- "parameters": {"max_new_tokens": 300}
13
- }
14
-
15
- r = requests.post(API_URL, headers=HEADERS, json=payload)
16
- data = r.json()
17
-
18
- # Eğer model yükleniyorsa
19
- if isinstance(data, dict) and "error" in data:
20
- return f"Model yükleniyor veya hata oluştu:\n{data['error']}"
21
 
22
- # Normal cevap
23
- if isinstance(data, list):
24
- return data[0].get("generated_text", "Cevap alınamadı.")
25
-
26
- return "Beklenmeyen API cevabı."
 
 
 
27
 
28
  def chat_fn(message, history):
29
  if history is None:
 
1
  import gradio as gr
 
2
  import os
3
+ from huggingface_hub import InferenceClient
4
 
5
  HF_TOKEN = os.getenv("HF_TOKEN")
 
 
6
 
7
+ client = InferenceClient(
8
+ provider="hf-inference",
9
+ api_key=HF_TOKEN,
10
+ )
 
 
 
 
 
 
 
 
11
 
12
+ def query_mistral(prompt):
13
+ response = client.text_generation(
14
+ model="mistralai/Mistral-7B-Instruct-v0.2",
15
+ prompt=prompt,
16
+ max_new_tokens=300,
17
+ temperature=0.7,
18
+ )
19
+ return response
20
 
21
  def chat_fn(message, history):
22
  if history is None: