AashitaK commited on
Commit
e75369e
·
verified ·
1 Parent(s): 2a9cf0b

Create mistral_api.py

Browse files
Files changed (1) hide show
  1. mistral_api.py +50 -0
mistral_api.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from openai import OpenAI
3
+ from mistralai import Mistral
4
+ from file_utils import load_file
5
+
6
+ # Embedding API using OpenAI
7
+ # Ensure you set your API token in your environment variables or pass it directly
8
+ OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") # Make sure it's set in your environment
9
+
10
+ if OPENAI_API_KEY is None:
11
+ raise ValueError("Hugging Face API token is missing. Set it as an environment variable: OPENAI_API_KEY")
12
+
13
+ # Initialize the OpenAI client using the API keys
14
+ embedder = OpenAI(api_key=OPENAI_API_KEY)
15
+
16
+ EMBEDDING_MODEL = "text-embedding-3-large" # "text-embedding-3-small"
17
+
18
+ def get_embedding(text: str, model: str=EMBEDDING_MODEL) -> list[float]:
19
+ """Get text embeddings from OpenAI."""
20
+ result = embedder.embeddings.create(
21
+ model=model,
22
+ input=text
23
+ )
24
+ print("Got to get_embedding")
25
+ return result.data[0].embedding
26
+
27
+ # Completions API using HF InferenceClient
28
+ # Ensure you set your API token in your environment variables or pass it directly
29
+ MISTRAL_API_KEY = os.getenv("MISTRAL_API_KEY") # Make sure it's set in your environment
30
+ # api_key = os.environ["MISTRAL_API_KEY"]
31
+
32
+ if MISTRAL_API_KEY is None:
33
+ raise ValueError("Hugging Face API token is missing. Set it as an environment variable: MISTRAL_API_KEY")
34
+
35
+ client = Mistral(api_key=MISTRAL_API_KEY)
36
+
37
+ COMPLETIONS_MODEL = "mistral-large-latest"
38
+
39
+ def get_response(messages: list[dict], model: str=COMPLETIONS_MODEL,
40
+ temperature=0, max_completion_tokens=800) -> str:
41
+ """Chat completion using Mistral models.
42
+ https://docs.mistral.ai/capabilities/completion/ """
43
+ response = client.chat.complete(
44
+ model=model,
45
+ messages=messages,
46
+ max_completion_tokens=max_completion_tokens,
47
+ temperature=temperature,
48
+ # stream=True
49
+ )
50
+ return response.choices[0].message.content