Spaces:
Sleeping
Sleeping
| # Start with imports - ask ChatGPT to explain any package that you don't know | |
| import datetime | |
| import os | |
| import json | |
| from dotenv import load_dotenv | |
| from openai import OpenAI | |
| from anthropic import Anthropic | |
| from IPython.display import Markdown, display | |
| # Always remember to do this! | |
| load_dotenv(override=True) | |
| # Print the key prefixes to help with any debugging | |
| openai_api_key = os.getenv('OPENAI_API_KEY') | |
| anthropic_api_key = os.getenv('ANTHROPIC_API_KEY') | |
| google_api_key = os.getenv('GOOGLE_API_KEY') | |
| deepseek_api_key = os.getenv('DEEPSEEK_API_KEY') | |
| groq_api_key = os.getenv('GROQ_API_KEY') | |
| if openai_api_key: | |
| print(f"OpenAI API Key exists and begins {openai_api_key[:8]}") | |
| else: | |
| print("OpenAI API Key not set") | |
| if anthropic_api_key: | |
| print(f"Anthropic API Key exists and begins {anthropic_api_key[:7]}") | |
| else: | |
| print("Anthropic API Key not set (and this is optional)") | |
| if google_api_key: | |
| print(f"Google API Key exists and begins {google_api_key[:2]}") | |
| else: | |
| print("Google API Key not set (and this is optional)") | |
| if deepseek_api_key: | |
| print(f"DeepSeek API Key exists and begins {deepseek_api_key[:3]}") | |
| else: | |
| print("DeepSeek API Key not set (and this is optional)") | |
| if groq_api_key: | |
| print(f"Groq API Key exists and begins {groq_api_key[:4]}") | |
| else: | |
| print("Groq API Key not set (and this is optional)") | |
| file_name = "1_foundations\\prompt1.txt" | |
| with open(file_name , mode="r", encoding="utf-8") as file: | |
| request = file.read() | |
| print(request) | |
| messages = [{"role": "user", "content": request}] | |
| # The API we know well | |
| openai = OpenAI() | |
| competitors = [] | |
| answers = [] | |
| model_name = "gpt-4o-mini" | |
| response = openai.chat.completions.create(model=model_name, messages=messages) | |
| answer = response.choices[0].message.content | |
| display(Markdown(answer)) | |
| competitors.append(model_name) | |
| answers.append(answer) | |
| model_name = "claude-3-7-sonnet-latest" | |
| claude = Anthropic() | |
| response = claude.messages.create(model=model_name, messages=messages, max_tokens=1000) | |
| answer = response.content[0].text | |
| display(Markdown(answer)) | |
| competitors.append(model_name) | |
| answers.append(answer) | |
| gemini = OpenAI(api_key=google_api_key, base_url="https://generativelanguage.googleapis.com/v1beta/openai/") | |
| model_name = "gemini-2.0-flash" | |
| response = gemini.chat.completions.create(model=model_name, messages=messages) | |
| answer = response.choices[0].message.content | |
| display(Markdown(answer)) | |
| competitors.append(model_name) | |
| answers.append(answer) | |
| groq = OpenAI(api_key=groq_api_key, base_url="https://api.groq.com/openai/v1") | |
| model_name = "llama-3.3-70b-versatile" | |
| response = groq.chat.completions.create(model=model_name, messages=messages) | |
| answer = response.choices[0].message.content | |
| display(Markdown(answer)) | |
| competitors.append(model_name) | |
| answers.append(answer) | |
| ollama = OpenAI(base_url='http://localhost:11434/v1', api_key='ollama') | |
| model_name = "llama3.2" | |
| response = ollama.chat.completions.create(model=model_name, messages=messages) | |
| answer = response.choices[0].message.content | |
| display(Markdown(answer)) | |
| competitors.append(model_name) | |
| answers.append(answer) | |
| print(competitors) | |
| print(answers) | |
| # It's nice to know how to use "zip" | |
| for competitor, answer in zip(competitors, answers): | |
| print(f"Competitor: {competitor}\n\n{answer}") | |
| together = "" | |
| for index, answer in enumerate(answers): | |
| together += f"# Response from competitor {index+1}\n\n" | |
| together += answer + "\n\n" | |
| judge = f"""Stai giudicando una competizione tra {len(competitors)} concorrenti. | |
| A ogni modello è stata data questa domanda: | |
| {request} | |
| Il tuo compito è quello di valutare ogni risposta in base alla chiarezza e alla forza delle argomentazioni e di classificarle in ordine dalla migliore alla peggiore. | |
| Rispondi con JSON, e solo JSON, con il seguente formato: | |
| {{"results": ["numero miglior concorrente", "numero secondo miglior concorrente", "numero terzo miglior concorrente", ...], "docx_link": "link al documento con la risposta migliore"}} | |
| Qui ci sono le risposte di ogni concorrente: | |
| {together} | |
| Ora rispondi con il JSON con l'ordine di classifica dei concorrenti, ed il link al documento dove hai inserito la risposta miugliore; nient'altro. Non includere la formattazione markdown o i blocchi di codice.""" | |
| judge_messages = [{"role": "user", "content": judge}] | |
| # Judgement time! | |
| ollama = OpenAI(base_url='http://localhost:11434/v1', api_key='ollama') | |
| model_name = "llama3.2" | |
| response = ollama.chat.completions.create(model=model_name, messages=judge_messages) | |
| results = response.choices[0].message.content | |
| print(results) | |
| results_dict = json.loads(results) | |
| ranks = results_dict["results"] | |
| for index, result in enumerate(ranks): | |
| competitor = competitors[int(result)-1] | |
| print(f"Rank {index+1}: {competitor}") | |
| # Salva la risposta migliore in un file con timestamp | |
| now = datetime.datetime.now().strftime('%Y%m%d_%H%M%S') | |
| filename = f"answer_{now}.txt" | |
| best_index = int(ranks[0]) - 1 | |
| best_answer = answers[best_index] | |
| print(f"La risposta migliore è: {best_answer}") | |
| with open(filename, "w", encoding="utf-8") as f: | |
| # La risposta migliore è quella del primo classificato | |
| f.write(best_answer) | |
| print(f"Risposta migliore salvata in {filename}") |