sikeaditya commited on
Commit
834496f
·
verified ·
1 Parent(s): 540b4e0

Upload 2 files

Browse files
Files changed (2) hide show
  1. .gitignore +8 -0
  2. app.py +41 -0
.gitignore ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ # Ignore virtual environment files
2
+ venv/
3
+ env/
4
+ *.env
5
+ # Ignore IDE files
6
+ .vscode/
7
+ .idea/
8
+ .pycharm/
app.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer
3
+ from huggingface_hub import HfFolder
4
+ import os
5
+ from dotenv import load_dotenv
6
+
7
+ load_dotenv()
8
+
9
+ # Set token directly
10
+ api_key = os.getenv('HF_API_KEY')
11
+
12
+ model_name = "mistralai/Mixtral-8x7B-Instruct-v0.1"
13
+ tokenizer = AutoTokenizer.from_pretrained(model_name, use_auth_token=True)
14
+ model = AutoModelForCausalLM.from_pretrained(model_name, use_auth_token=True)
15
+
16
+ # Define chatbot function
17
+ def chatbot(prompt):
18
+ system_prompt = (
19
+ "You are a helpful coding assistant. Answer questions with clear and concise explanations. "
20
+ "Provide examples using proper Markdown formatting for code blocks.\n\n"
21
+ "Question: {user_prompt}\n\nAnswer:"
22
+ )
23
+ final_prompt = system_prompt.format(user_prompt=prompt)
24
+ inputs = tokenizer(final_prompt, return_tensors="pt").to(model.device)
25
+ outputs = model.generate(**inputs, max_new_tokens=512, temperature=0.7, top_p=0.9)
26
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
27
+ return f"**Here is the response:**\n\n{response.strip()}"
28
+
29
+
30
+ # Create Gradio interface
31
+ interface = gr.Interface(
32
+ fn=chatbot,
33
+ inputs="text",
34
+ outputs="text",
35
+ title="Coding Chatbot",
36
+ description="Ask coding questions and get AI-generated code!",
37
+ )
38
+
39
+ # Launch the app
40
+ if __name__ == "__main__":
41
+ interface.launch()