Spaces:
Sleeping
Sleeping
| import streamlit as st | |
| from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline | |
| import torch | |
| # Load the fine-tuned MFTCoder model | |
| def load_model(): | |
| MODEL_NAME = "path-to-your-finetuned-model" # Replace with your MFTCoder fine-tuned model path | |
| tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME) | |
| model = AutoModelForCausalLM.from_pretrained( | |
| MODEL_NAME, | |
| torch_dtype=torch.float16, # Use float16 for performance optimization | |
| device_map="auto" # Automatically allocate to CPU/GPU | |
| ) | |
| return pipeline("text-generation", model=model, tokenizer=tokenizer) | |
| # Initialize pipeline | |
| code_generator = load_model() | |
| # Streamlit UI | |
| st.title("MFTCoder-powered Code Bot 🚀") | |
| st.subheader("Generate high-quality code snippets with fine-tuned CodeLlama!") | |
| # User input | |
| prompt = st.text_area("Enter a code prompt to generate code:") | |
| # Generate code | |
| if st.button("Generate Code"): | |
| if prompt.strip(): | |
| st.info("Generating code... Please wait ⏳") | |
| try: | |
| # Generate code using the fine-tuned MFTCoder model | |
| response = code_generator( | |
| prompt, | |
| max_length=256, # Adjust as needed | |
| temperature=0.3, # Lower temperature for accurate outputs | |
| num_return_sequences=1, | |
| do_sample=True | |
| ) | |
| generated_code = response[0]['generated_text'] | |
| # Display the code output | |
| st.code(generated_code, language="python") # Default to Python for generated output | |
| except Exception as e: | |
| st.error(f"Error: {str(e)}") | |
| else: | |
| st.warning("Please enter a prompt.") | |
| st.caption("Created by Shamil") | |