Tiitta commited on
Commit
26ee438
·
verified ·
1 Parent(s): 368e317

Upload version with dataset usage.

Browse files
Files changed (1) hide show
  1. Main&Gradio-huggingface.py +72 -0
Main&Gradio-huggingface.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import transformers
2
+ import torch
3
+ import gradio as gr
4
+ from datasets import load_dataset
5
+
6
+ # Load the model once when the script starts
7
+ model_id = "meta-llama/Meta-Llama-3.1-8B-Instruct"
8
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
9
+ print(f"Using device: {device}")
10
+
11
+ # Load the model into memory (on GPU if available)
12
+ pipeline = transformers.pipeline(
13
+ "text-generation",
14
+ model=model_id,
15
+ model_kwargs={"torch_dtype": torch.bfloat16},
16
+ device_map="auto", # Auto-detect GPU
17
+ )
18
+
19
+ # Load the dataset from Hugging Face
20
+ dataset = load_dataset("quantumminds/cisco_cli_commands")
21
+
22
+ # Function to search the dataset for a matching command
23
+ def search_dataset(user_input):
24
+ # Check if any command in the dataset matches the user input
25
+ for entry in dataset['train']: # assuming the dataset is in the 'train' split
26
+ if entry["command"].lower() in user_input.lower(): # Match the command with user input (case-insensitive)
27
+ return f"**Command:** {entry['command']}\n\n**Description:** {entry['description']}\n\n**Example:** {entry['examples'][0]['example_command'] if 'examples' in entry else 'No example available'}"
28
+ return None # If no match found
29
+
30
+ # Function to generate response using the dataset or fallback to the pipeline
31
+ def generate_response(user_input, chat_history):
32
+ # First, try to find a match in the dataset
33
+ dataset_response = search_dataset(user_input)
34
+
35
+ if dataset_response:
36
+ # Add user and assistant responses to the chat history
37
+ chat_history.append({"role": "user", "content": user_input})
38
+ chat_history.append({"role": "assistant", "content": "You are a heplfull chatbot who specializes in Cisco switch and router configurations" + assistant_response})
39
+ return chat_history
40
+
41
+ # Generate the response from the LLM
42
+ outputs = pipeline(user_input, max_new_tokens=512)
43
+
44
+ # Generate the assistant's response
45
+ assistant_response = outputs[0]["generated_text"]
46
+
47
+ # Add user and assistant responses to the chat history
48
+ chat_history.append({"role": "user", "content": user_input})
49
+ chat_history.append({"role": "assistant", "content": assistant_response})
50
+
51
+ return chat_history
52
+
53
+ # Create Gradio interface with chatbot and textbox
54
+ with gr.Blocks(theme=gr.themes.Ocean()) as iface:
55
+ gr.Markdown("<h1 style='text-align: center;'>Cisco Configuration Assistant</h1>")
56
+ chatbot = gr.Chatbot(label="Cisco Configuration Chatbot", type="messages", height=500)
57
+ user_input = gr.Textbox(placeholder="Enter your Cisco switch/router question here...", label="Your Input")
58
+ with gr.Row():
59
+ submit_btn = gr.Button("Submit")
60
+ clear_btn = gr.Button("Clear Feed")
61
+
62
+ def user(query, history):
63
+ # Generate a response and update the history
64
+ history = generate_response(query, history)
65
+ return history, "" # Return updated history and clear the input box
66
+ # Submit user input and update the chat history
67
+ user_input.submit(user, [user_input, chatbot], [chatbot, user_input])
68
+ submit_btn.click(user, [user_input, chatbot], [chatbot, user_input])
69
+ clear_btn.click(lambda: [], None, chatbot, queue=False)
70
+
71
+ # Launch the Gradio app
72
+ iface.launch()