asrarbw commited on
Commit
1b3bcac
·
verified ·
1 Parent(s): baaeac3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -9
app.py CHANGED
@@ -9,11 +9,8 @@ from huggingface_hub import InferenceClient
9
  # ===============================
10
  HF_TOKEN = os.getenv("HF")
11
 
12
- client = InferenceClient(
13
- model="Qwen/Qwen2.5-7B-Instruct",
14
- token=HF_TOKEN,
15
- provider="hf-inference"
16
- )
17
 
18
  # ===============================
19
  # SAFETY
@@ -52,7 +49,7 @@ User question:
52
  {user_question}
53
  """
54
  response = ""
55
- for chunk in client.chat.completions.create(
56
  messages=[{"role": "user", "content": prompt}],
57
  max_tokens=150,
58
  temperature=0,
@@ -93,7 +90,7 @@ User question:
93
  planner_prompt += f"\nPrevious error:\n{last_error}\nFix the code."
94
 
95
  code = ""
96
- for chunk in client.chat.completions.create(
97
  messages=[{"role": "user", "content": planner_prompt}],
98
  max_tokens=400,
99
  temperature=0.2,
@@ -175,7 +172,7 @@ Focus on meaning and implications.
175
  if show_code:
176
  response += f"🧾 Generated Python code:\n\n```python\n{code}\n```\n\n"
177
 
178
- for chunk in client.chat.completions.create(
179
  messages=[{"role": "user", "content": insight_prompt}],
180
  max_tokens=350,
181
  temperature=0.4,
@@ -209,7 +206,7 @@ Do not generate code.
209
  """
210
 
211
  response = ""
212
- for chunk in client.chat.completions.create(
213
  messages=[{"role": "user", "content": insight_prompt}],
214
  max_tokens=400,
215
  temperature=0.4,
 
9
  # ===============================
10
  HF_TOKEN = os.getenv("HF")
11
 
12
+ client = InferenceClient(model="Qwen/Qwen2.5-7B-Instruct", token=HF_TOKEN )
13
+
 
 
 
14
 
15
  # ===============================
16
  # SAFETY
 
49
  {user_question}
50
  """
51
  response = ""
52
+ for chunk in client.chat_completion(
53
  messages=[{"role": "user", "content": prompt}],
54
  max_tokens=150,
55
  temperature=0,
 
90
  planner_prompt += f"\nPrevious error:\n{last_error}\nFix the code."
91
 
92
  code = ""
93
+ for chunk in client.chat_completion(
94
  messages=[{"role": "user", "content": planner_prompt}],
95
  max_tokens=400,
96
  temperature=0.2,
 
172
  if show_code:
173
  response += f"🧾 Generated Python code:\n\n```python\n{code}\n```\n\n"
174
 
175
+ for chunk in client.chat_completion(
176
  messages=[{"role": "user", "content": insight_prompt}],
177
  max_tokens=350,
178
  temperature=0.4,
 
206
  """
207
 
208
  response = ""
209
+ for chunk in client.chat_completion(
210
  messages=[{"role": "user", "content": insight_prompt}],
211
  max_tokens=400,
212
  temperature=0.4,