Spaces:
Sleeping
Sleeping
File size: 1,548 Bytes
0ebb43f 36ed5c2 bef742f 36ed5c2 bef742f 0ebb43f 36ed5c2 0ebb43f 36ed5c2 0ebb43f bef742f 0ebb43f f9ed50d 2554aa8 f9ed50d 0ebb43f 3a7ce12 0ebb43f 840c468 0ebb43f 36ed5c2 3a7ce12 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 |
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
import gradio as gr
model_id = "eduard76/Llama3-8b-good-new"
# Load tokenizer and model
tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(
model_id,
device_map="auto",
torch_dtype=torch.float16,
trust_remote_code=True
)
model.eval()
# Lista de topicuri acoperite
covered_topics = {
"ospf", "bgp", "eigrp", "vxlan", "evpn", "network design", "acl", "routing",
"spine", "leaf", "underlay", "overlay", "mpls", "qos", "firewall",
"vpn", "vlan", "subnet", "cidr"
}
# Funcția principală de chat
def chat(user_input):
prompt = f"""### Human: {user_input}\n### Assistant:"""
input_ids = tokenizer(prompt, return_tensors="pt").to(model.device)
with torch.no_grad():
output = model.generate(
**input_ids,
max_new_tokens=256,
do_sample=True,
temperature=0.7,
repetition_penalty=1.2,
no_repeat_ngram_size=5,
top_k=50,
top_p=0.9
)
response = tokenizer.decode(output[0], skip_special_tokens=True)
# Scoate promptul inițial din răspuns
if "### Assistant:" in response:
response = response.split("### Assistant:")[-1].strip()
return response
# Interfață Gradio
iface = gr.Interface(fn=chat, inputs="text", outputs="text", title="Eduard's Virtual Architect – LLaMA3 Fine-Tuned")
if __name__ == "__main__":
iface.launch()
|