jostlebot commited on
Commit
b2a55d9
·
1 Parent(s): 32d8253

Fix API client to use Anthropic SDK directly

Browse files
Files changed (2) hide show
  1. app.py +62 -49
  2. requirements.txt +1 -0
app.py CHANGED
@@ -8,7 +8,19 @@ Author: Jocelyn Skillman, LMHC
8
 
9
  import gradio as gr
10
  import os
11
- from openai import OpenAI
 
 
 
 
 
 
 
 
 
 
 
 
12
 
13
  # ARI Engine System Prompt - The meta-prompt that powers tool generation
14
  ARI_ENGINE_SYSTEM_PROMPT = """You are the ARI Engine — an Assistive Relational Intelligence tool builder designed to help mental health clinicians create AI-powered tools for their practice. You are not a therapist. You are a clinical design collaborator that translates clinician intent into ethically-grounded, trauma-informed AI tool architectures.
@@ -203,33 +215,58 @@ MODALITIES = [
203
  ]
204
 
205
  def get_client():
206
- """Initialize OpenAI-compatible client for various providers."""
207
- # Try Anthropic first (via OpenAI-compatible endpoint)
208
- if os.environ.get("ANTHROPIC_API_KEY"):
209
- return OpenAI(
210
- api_key=os.environ["ANTHROPIC_API_KEY"],
211
- base_url="https://api.anthropic.com/v1/"
212
- ), "claude-3-5-sonnet-20241022"
213
 
214
  # Try OpenAI
215
- if os.environ.get("OPENAI_API_KEY"):
216
- return OpenAI(
217
- api_key=os.environ["OPENAI_API_KEY"]
218
- ), "gpt-4o"
219
 
220
  # Try HuggingFace Inference
221
- if os.environ.get("HF_TOKEN"):
222
  return OpenAI(
223
  api_key=os.environ["HF_TOKEN"],
224
  base_url="https://api-inference.huggingface.co/v1/"
225
- ), "meta-llama/Llama-3.1-70B-Instruct"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
226
 
227
- return None, None
 
 
 
 
 
 
 
 
228
 
229
  def generate_tool(description, category, population, modality, risk_level, additional_context, history):
230
  """Generate an ARI tool based on clinician input."""
231
 
232
- client, model = get_client()
233
 
234
  if not client:
235
  return """## API Key Required
@@ -241,7 +278,7 @@ To generate tools, please set one of these environment variables:
241
 
242
  For local testing, you can set these in a `.env` file or export them in your terminal.
243
 
244
- For HuggingFace Spaces deployment, add these as secrets in your Space settings.""", "", "", history
245
 
246
  # Build the user prompt
247
  user_prompt = f"""A clinician is requesting help building an ARI tool. Please guide them through the process.
@@ -272,7 +309,7 @@ Format your output clearly with markdown headers for each section.
272
  Remember: Every tool must pass the ARI Litmus Test - building capacity for human relationship, having clear exits toward humans, being safe for crisis, preventing dependency, and honoring clinical expertise."""
273
 
274
  # Build conversation history for context
275
- messages = [{"role": "system", "content": ARI_ENGINE_SYSTEM_PROMPT}]
276
 
277
  for h in history:
278
  messages.append({"role": "user", "content": h[0]})
@@ -282,14 +319,7 @@ Remember: Every tool must pass the ARI Litmus Test - building capacity for human
282
  messages.append({"role": "user", "content": user_prompt})
283
 
284
  try:
285
- response = client.chat.completions.create(
286
- model=model,
287
- messages=messages,
288
- max_tokens=4000,
289
- temperature=0.7
290
- )
291
-
292
- assistant_response = response.choices[0].message.content
293
 
294
  # Update history
295
  new_history = history + [[user_prompt, assistant_response]]
@@ -319,12 +349,12 @@ Remember: Every tool must pass the ARI Litmus Test - building capacity for human
319
  def continue_conversation(user_message, history):
320
  """Continue the conversation with the ARI Engine."""
321
 
322
- client, model = get_client()
323
 
324
  if not client:
325
  return "Please set an API key to continue.", history
326
 
327
- messages = [{"role": "system", "content": ARI_ENGINE_SYSTEM_PROMPT}]
328
 
329
  for h in history:
330
  messages.append({"role": "user", "content": h[0]})
@@ -334,14 +364,7 @@ def continue_conversation(user_message, history):
334
  messages.append({"role": "user", "content": user_message})
335
 
336
  try:
337
- response = client.chat.completions.create(
338
- model=model,
339
- messages=messages,
340
- max_tokens=4000,
341
- temperature=0.7
342
- )
343
-
344
- assistant_response = response.choices[0].message.content
345
  new_history = history + [[user_message, assistant_response]]
346
 
347
  return assistant_response, new_history
@@ -355,25 +378,15 @@ def preview_tool(system_prompt, test_input):
355
  if not system_prompt.strip():
356
  return "Please generate a tool first, then copy the System Prompt here to preview it."
357
 
358
- client, model = get_client()
359
 
360
  if not client:
361
  return "Please set an API key to preview tools."
362
 
363
- messages = [
364
- {"role": "system", "content": system_prompt},
365
- {"role": "user", "content": test_input}
366
- ]
367
 
368
  try:
369
- response = client.chat.completions.create(
370
- model=model,
371
- messages=messages,
372
- max_tokens=1000,
373
- temperature=0.7
374
- )
375
-
376
- return response.choices[0].message.content
377
 
378
  except Exception as e:
379
  return f"Error in preview: {str(e)}"
 
8
 
9
  import gradio as gr
10
  import os
11
+
12
+ # Try to import API clients
13
+ try:
14
+ import anthropic
15
+ ANTHROPIC_AVAILABLE = True
16
+ except ImportError:
17
+ ANTHROPIC_AVAILABLE = False
18
+
19
+ try:
20
+ from openai import OpenAI
21
+ OPENAI_AVAILABLE = True
22
+ except ImportError:
23
+ OPENAI_AVAILABLE = False
24
 
25
  # ARI Engine System Prompt - The meta-prompt that powers tool generation
26
  ARI_ENGINE_SYSTEM_PROMPT = """You are the ARI Engine — an Assistive Relational Intelligence tool builder designed to help mental health clinicians create AI-powered tools for their practice. You are not a therapist. You are a clinical design collaborator that translates clinician intent into ethically-grounded, trauma-informed AI tool architectures.
 
215
  ]
216
 
217
  def get_client():
218
+ """Initialize API client for various providers."""
219
+ # Try Anthropic first
220
+ if os.environ.get("ANTHROPIC_API_KEY") and ANTHROPIC_AVAILABLE:
221
+ return anthropic.Anthropic(api_key=os.environ["ANTHROPIC_API_KEY"]), "claude-sonnet-4-20250514", "anthropic"
 
 
 
222
 
223
  # Try OpenAI
224
+ if os.environ.get("OPENAI_API_KEY") and OPENAI_AVAILABLE:
225
+ return OpenAI(api_key=os.environ["OPENAI_API_KEY"]), "gpt-4o", "openai"
 
 
226
 
227
  # Try HuggingFace Inference
228
+ if os.environ.get("HF_TOKEN") and OPENAI_AVAILABLE:
229
  return OpenAI(
230
  api_key=os.environ["HF_TOKEN"],
231
  base_url="https://api-inference.huggingface.co/v1/"
232
+ ), "meta-llama/Llama-3.1-70B-Instruct", "openai"
233
+
234
+ return None, None, None
235
+
236
+
237
+ def call_llm(client, model, provider, system_prompt, messages):
238
+ """Call LLM with provider-specific formatting."""
239
+ if provider == "anthropic":
240
+ # Convert messages to Anthropic format
241
+ anthropic_messages = []
242
+ for msg in messages:
243
+ anthropic_messages.append({
244
+ "role": msg["role"],
245
+ "content": msg["content"]
246
+ })
247
+
248
+ response = client.messages.create(
249
+ model=model,
250
+ max_tokens=4000,
251
+ system=system_prompt,
252
+ messages=anthropic_messages
253
+ )
254
+ return response.content[0].text
255
 
256
+ else: # OpenAI-compatible
257
+ all_messages = [{"role": "system", "content": system_prompt}] + messages
258
+ response = client.chat.completions.create(
259
+ model=model,
260
+ messages=all_messages,
261
+ max_tokens=4000,
262
+ temperature=0.7
263
+ )
264
+ return response.choices[0].message.content
265
 
266
  def generate_tool(description, category, population, modality, risk_level, additional_context, history):
267
  """Generate an ARI tool based on clinician input."""
268
 
269
+ client, model, provider = get_client()
270
 
271
  if not client:
272
  return """## API Key Required
 
278
 
279
  For local testing, you can set these in a `.env` file or export them in your terminal.
280
 
281
+ For HuggingFace Spaces deployment, add these as secrets in your Space settings.""", "", "", "", history
282
 
283
  # Build the user prompt
284
  user_prompt = f"""A clinician is requesting help building an ARI tool. Please guide them through the process.
 
309
  Remember: Every tool must pass the ARI Litmus Test - building capacity for human relationship, having clear exits toward humans, being safe for crisis, preventing dependency, and honoring clinical expertise."""
310
 
311
  # Build conversation history for context
312
+ messages = []
313
 
314
  for h in history:
315
  messages.append({"role": "user", "content": h[0]})
 
319
  messages.append({"role": "user", "content": user_prompt})
320
 
321
  try:
322
+ assistant_response = call_llm(client, model, provider, ARI_ENGINE_SYSTEM_PROMPT, messages)
 
 
 
 
 
 
 
323
 
324
  # Update history
325
  new_history = history + [[user_prompt, assistant_response]]
 
349
  def continue_conversation(user_message, history):
350
  """Continue the conversation with the ARI Engine."""
351
 
352
+ client, model, provider = get_client()
353
 
354
  if not client:
355
  return "Please set an API key to continue.", history
356
 
357
+ messages = []
358
 
359
  for h in history:
360
  messages.append({"role": "user", "content": h[0]})
 
364
  messages.append({"role": "user", "content": user_message})
365
 
366
  try:
367
+ assistant_response = call_llm(client, model, provider, ARI_ENGINE_SYSTEM_PROMPT, messages)
 
 
 
 
 
 
 
368
  new_history = history + [[user_message, assistant_response]]
369
 
370
  return assistant_response, new_history
 
378
  if not system_prompt.strip():
379
  return "Please generate a tool first, then copy the System Prompt here to preview it."
380
 
381
+ client, model, provider = get_client()
382
 
383
  if not client:
384
  return "Please set an API key to preview tools."
385
 
386
+ messages = [{"role": "user", "content": test_input}]
 
 
 
387
 
388
  try:
389
+ return call_llm(client, model, provider, system_prompt, messages)
 
 
 
 
 
 
 
390
 
391
  except Exception as e:
392
  return f"Error in preview: {str(e)}"
requirements.txt CHANGED
@@ -1,3 +1,4 @@
1
  gradio>=4.44.0,<5.0.0
 
2
  openai>=1.0.0
3
  huggingface_hub>=0.24.0,<1.0.0
 
1
  gradio>=4.44.0,<5.0.0
2
+ anthropic>=0.39.0
3
  openai>=1.0.0
4
  huggingface_hub>=0.24.0,<1.0.0