infinitymatter commited on
Commit
b6b67d4
·
verified ·
1 Parent(s): 48c9787

Update src/models.py

Browse files
Files changed (1) hide show
  1. src/models.py +14 -0
src/models.py CHANGED
@@ -23,6 +23,10 @@ CLAUDE_MODEL = "mistralai/Mistral-7B-Instruct-v0.3"
23
 
24
  # Define a helper function to perform the direct API call
25
  def hf_inference_request(token, model, prompt, max_new_tokens=200):
 
 
 
 
26
  url = f"https://api-inference.huggingface.co/models/{model}"
27
  headers = {"Authorization": f"Bearer {token}"}
28
  payload = {"inputs": prompt, "parameters": {"max_new_tokens": max_new_tokens}}
@@ -34,6 +38,11 @@ def hf_inference_request(token, model, prompt, max_new_tokens=200):
34
 
35
  def get_gpt_completion(prompt, system_message):
36
  try:
 
 
 
 
 
37
  full_prompt = f"{system_message}\n{prompt}"
38
  response = hf_inference_request(openai_api_key, OPENAI_MODEL, full_prompt, max_new_tokens=200)
39
  return response
@@ -43,6 +52,11 @@ def get_gpt_completion(prompt, system_message):
43
 
44
  def get_claude_completion(prompt, system_message):
45
  try:
 
 
 
 
 
46
  full_prompt = f"{system_message}\n{prompt}"
47
  response = hf_inference_request(anthropic_api_key, CLAUDE_MODEL, full_prompt, max_new_tokens=200)
48
  return response
 
23
 
24
  # Define a helper function to perform the direct API call
25
  def hf_inference_request(token, model, prompt, max_new_tokens=200):
26
+ # Ensure the prompt is a string
27
+ if not isinstance(prompt, str):
28
+ prompt = str(prompt)
29
+
30
  url = f"https://api-inference.huggingface.co/models/{model}"
31
  headers = {"Authorization": f"Bearer {token}"}
32
  payload = {"inputs": prompt, "parameters": {"max_new_tokens": max_new_tokens}}
 
38
 
39
  def get_gpt_completion(prompt, system_message):
40
  try:
41
+ # Ensure both system_message and prompt are strings
42
+ if not isinstance(system_message, str):
43
+ system_message = str(system_message)
44
+ if not isinstance(prompt, str):
45
+ prompt = str(prompt)
46
  full_prompt = f"{system_message}\n{prompt}"
47
  response = hf_inference_request(openai_api_key, OPENAI_MODEL, full_prompt, max_new_tokens=200)
48
  return response
 
52
 
53
  def get_claude_completion(prompt, system_message):
54
  try:
55
+ # Ensure both system_message and prompt are strings
56
+ if not isinstance(system_message, str):
57
+ system_message = str(system_message)
58
+ if not isinstance(prompt, str):
59
+ prompt = str(prompt)
60
  full_prompt = f"{system_message}\n{prompt}"
61
  response = hf_inference_request(anthropic_api_key, CLAUDE_MODEL, full_prompt, max_new_tokens=200)
62
  return response