Spaces:
Build error
Build error
| # import requests | |
| # import json | |
| # import os | |
| # from dotenv import load_dotenv | |
| # load_dotenv() | |
| # def get_answer(model_name, context, question): | |
| # llm_key = os.getenv("llm_key") | |
| # url = os.getenv("main_url") | |
| # # Construct the prompt for the model | |
| # prompt = f"You are a Question Answering Model. Can you help me answer the question: {question} from the context: {context}? Just return the answer only. The document may contain some Arabic text; please translate that to English if needed." | |
| # # Prepare payload for API request | |
| # payload = { | |
| # "model": model_name, | |
| # "messages": [ | |
| # { | |
| # "role": "user", | |
| # "content": prompt | |
| # } | |
| # ], | |
| # "max_tokens": 300, | |
| # "temperature": 0.2 | |
| # } | |
| # headers = { | |
| # 'Authorization': f'Bearer {llm_key}', | |
| # 'Content-Type': 'application/json' | |
| # } | |
| # # Convert payload to JSON string | |
| # json_payload = json.dumps(payload) | |
| # try: | |
| # # Send POST request to the API | |
| # response = requests.post(url, headers=headers, data=json_payload) | |
| # # Check if request was successful | |
| # if response.status_code == 200: | |
| # response_data = response.json() # Parse response JSON | |
| # answer = response_data['choices'][0]['message']['content'] # Extract model's answer from response | |
| # return answer | |
| # else: | |
| # print(f"Request failed with status code: {response.status_code}") | |
| # return None | |
| # except requests.exceptions.RequestException as e: | |
| # print(f"Error occurred: {e}") | |
| # return None | |
| # from huggingface_hub import InferenceClient | |
| # def get_hugging_face_answer(model_name, context, question): | |
| # client = InferenceClient(model_name, token=os.getenv("HF_TOKEN")) | |
| # prompt = f"You are a Question Answering Model. Can you help me answer the question: {question} from the context: {context}? Just return the answer only. The document may contain some Arabic text; please translate that to English if needed." | |
| # output = client.text_generation(prompt , max_new_tokens = 200, stream=True, temperature=0.1) | |
| # return output | |
| import os | |
| from groq import Groq | |
| from dotenv import load_dotenv | |
| load_dotenv() | |
| GROQ_API_KEY = os.getenv('GROQ_API') | |
| def get_answer_from_context(model_name, context, question): | |
| client = Groq(api_key=GROQ_API_KEY) | |
| chat_completion = client.chat.completions.create( | |
| model=model_name, | |
| messages=[ | |
| { | |
| "role": "system", | |
| "content": f"You are a Question Answering LLM that uses context provided: {context} to answer user's query. Just return the answer only. The document may contain some Arabic text; please translate that to English if needed." | |
| }, | |
| { | |
| "role": "user", | |
| "content": question, | |
| } | |
| ], | |
| temperature=0.2, | |
| max_tokens=200, | |
| top_p=1, | |
| stop=None, | |
| stream=False, | |
| # response_format={"type": "json_object"} | |
| ) | |
| return chat_completion.choices[0].message.content |