|
|
import gradio as gr |
|
|
import requests |
|
|
|
|
|
API_URL = "https://api-inference.huggingface.co/models/meta-llama/Meta-Llama-3.1-405B" |
|
|
API_TOKEN = "hhf_fLbMLePGmcaRRWzZoSoLcpoiYqlZUSxeav" |
|
|
headers = {"Authorization": f"Bearer {API_TOKEN}"} |
|
|
|
|
|
def query(payload): |
|
|
try: |
|
|
response = requests.post(API_URL, headers=headers, json=payload) |
|
|
response.raise_for_status() |
|
|
return response.json()[0]['generated_text'] |
|
|
except requests.exceptions.RequestException as e: |
|
|
return f"Erro na requisição: {e}" |
|
|
|
|
|
def generate_text(prompt): |
|
|
output = query({ |
|
|
"inputs": prompt, |
|
|
}) |
|
|
return output |
|
|
|
|
|
iface = gr.Interface( |
|
|
fn=generate_text, |
|
|
inputs=gr.Textbox(lines=2, placeholder="Digite seu prompt aqui..."), |
|
|
outputs="text", |
|
|
title="Gerador de Texto com Llama 3.1 70B", |
|
|
description="Este aplicativo usa o modelo GPT-2 para gerar texto com base no prompt fornecido." |
|
|
) |
|
|
|
|
|
if __name__ == "__main__": |
|
|
iface.launch() |