Spaces:
Sleeping
Sleeping
File size: 5,367 Bytes
d5abc0d | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 | # Start with imports - ask ChatGPT to explain any package that you don't know
import datetime
import os
import json
from dotenv import load_dotenv
from openai import OpenAI
from anthropic import Anthropic
from IPython.display import Markdown, display
# Always remember to do this!
load_dotenv(override=True)
# Print the key prefixes to help with any debugging
openai_api_key = os.getenv('OPENAI_API_KEY')
anthropic_api_key = os.getenv('ANTHROPIC_API_KEY')
google_api_key = os.getenv('GOOGLE_API_KEY')
deepseek_api_key = os.getenv('DEEPSEEK_API_KEY')
groq_api_key = os.getenv('GROQ_API_KEY')
if openai_api_key:
print(f"OpenAI API Key exists and begins {openai_api_key[:8]}")
else:
print("OpenAI API Key not set")
if anthropic_api_key:
print(f"Anthropic API Key exists and begins {anthropic_api_key[:7]}")
else:
print("Anthropic API Key not set (and this is optional)")
if google_api_key:
print(f"Google API Key exists and begins {google_api_key[:2]}")
else:
print("Google API Key not set (and this is optional)")
if deepseek_api_key:
print(f"DeepSeek API Key exists and begins {deepseek_api_key[:3]}")
else:
print("DeepSeek API Key not set (and this is optional)")
if groq_api_key:
print(f"Groq API Key exists and begins {groq_api_key[:4]}")
else:
print("Groq API Key not set (and this is optional)")
file_name = "1_foundations\\prompt1.txt"
with open(file_name , mode="r", encoding="utf-8") as file:
request = file.read()
print(request)
messages = [{"role": "user", "content": request}]
# The API we know well
openai = OpenAI()
competitors = []
answers = []
model_name = "gpt-4o-mini"
response = openai.chat.completions.create(model=model_name, messages=messages)
answer = response.choices[0].message.content
display(Markdown(answer))
competitors.append(model_name)
answers.append(answer)
model_name = "claude-3-7-sonnet-latest"
claude = Anthropic()
response = claude.messages.create(model=model_name, messages=messages, max_tokens=1000)
answer = response.content[0].text
display(Markdown(answer))
competitors.append(model_name)
answers.append(answer)
gemini = OpenAI(api_key=google_api_key, base_url="https://generativelanguage.googleapis.com/v1beta/openai/")
model_name = "gemini-2.0-flash"
response = gemini.chat.completions.create(model=model_name, messages=messages)
answer = response.choices[0].message.content
display(Markdown(answer))
competitors.append(model_name)
answers.append(answer)
groq = OpenAI(api_key=groq_api_key, base_url="https://api.groq.com/openai/v1")
model_name = "llama-3.3-70b-versatile"
response = groq.chat.completions.create(model=model_name, messages=messages)
answer = response.choices[0].message.content
display(Markdown(answer))
competitors.append(model_name)
answers.append(answer)
ollama = OpenAI(base_url='http://localhost:11434/v1', api_key='ollama')
model_name = "llama3.2"
response = ollama.chat.completions.create(model=model_name, messages=messages)
answer = response.choices[0].message.content
display(Markdown(answer))
competitors.append(model_name)
answers.append(answer)
print(competitors)
print(answers)
# It's nice to know how to use "zip"
for competitor, answer in zip(competitors, answers):
print(f"Competitor: {competitor}\n\n{answer}")
together = ""
for index, answer in enumerate(answers):
together += f"# Response from competitor {index+1}\n\n"
together += answer + "\n\n"
judge = f"""Stai giudicando una competizione tra {len(competitors)} concorrenti.
A ogni modello è stata data questa domanda:
{request}
Il tuo compito è quello di valutare ogni risposta in base alla chiarezza e alla forza delle argomentazioni e di classificarle in ordine dalla migliore alla peggiore.
Rispondi con JSON, e solo JSON, con il seguente formato:
{{"results": ["numero miglior concorrente", "numero secondo miglior concorrente", "numero terzo miglior concorrente", ...], "docx_link": "link al documento con la risposta migliore"}}
Qui ci sono le risposte di ogni concorrente:
{together}
Ora rispondi con il JSON con l'ordine di classifica dei concorrenti, ed il link al documento dove hai inserito la risposta miugliore; nient'altro. Non includere la formattazione markdown o i blocchi di codice."""
judge_messages = [{"role": "user", "content": judge}]
# Judgement time!
ollama = OpenAI(base_url='http://localhost:11434/v1', api_key='ollama')
model_name = "llama3.2"
response = ollama.chat.completions.create(model=model_name, messages=judge_messages)
results = response.choices[0].message.content
print(results)
results_dict = json.loads(results)
ranks = results_dict["results"]
for index, result in enumerate(ranks):
competitor = competitors[int(result)-1]
print(f"Rank {index+1}: {competitor}")
# Salva la risposta migliore in un file con timestamp
now = datetime.datetime.now().strftime('%Y%m%d_%H%M%S')
filename = f"answer_{now}.txt"
best_index = int(ranks[0]) - 1
best_answer = answers[best_index]
print(f"La risposta migliore è: {best_answer}")
with open(filename, "w", encoding="utf-8") as f:
# La risposta migliore è quella del primo classificato
f.write(best_answer)
print(f"Risposta migliore salvata in {filename}") |