File size: 2,121 Bytes
b1fca7a
 
 
ac26bd5
b1fca7a
ac26bd5
b1fca7a
 
7e71746
b1fca7a
ac26bd5
 
 
 
 
 
 
 
 
 
 
 
 
 
b1fca7a
ac26bd5
b1fca7a
 
ac26bd5
b1fca7a
 
 
e365949
 
b1fca7a
 
750cdab
b1fca7a
750cdab
b1fca7a
 
 
 
e365949
b1fca7a
 
750cdab
 
 
 
b1fca7a
 
ac26bd5
750cdab
b1fca7a
 
 
 
 
e365949
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
import streamlit as st
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
import torch
from accelerate import init_empty_weights, load_checkpoint_and_dispatch

# Load the model using Accelerate for memory optimization
@st.cache_resource()
def load_model():
    MODEL_NAME = "Salesforce/codegen-2B-multi"  # Updated model name
    tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
    
    # Load model with accelerate to optimize for memory usage
    with init_empty_weights():
        model = AutoModelForCausalLM.from_pretrained(
            MODEL_NAME, 
            torch_dtype=torch.float32,  # Use float32 for CPU
            low_cpu_mem_usage=True      # Enable low memory usage on CPU
        )
    
    # Move model to CPU
    model = load_checkpoint_and_dispatch(
        model,
        MODEL_NAME,
        device_map="cpu",  # Load model onto CPU
    )
    
    return pipeline("text-generation", model=model, tokenizer=tokenizer)

# Initialize the model
code_generator = load_model()

# Streamlit UI
st.title("CodeGen-2B Code Bot 🚀")
st.subheader("Generate code snippets using Hugging Face CodeGen-2B")

# User input
prompt = st.text_area("Enter a coding prompt (e.g., 'Write a Python function to sort a list'): ")

# Generate Code
if st.button("Generate Code"):
    if prompt.strip():
        st.info("Generating code... Please wait ⏳")
        try:
            # Generate code using the CodeGen-2B model
            response = code_generator(
                prompt,
                max_length=512,  # Increase for longer code generation
                temperature=0.2,  # Lower temperature for more deterministic results
                do_sample=True,   # Enable sampling
                num_return_sequences=1
            )
            generated_code = response[0]['generated_text']
            # Display the generated code output
            st.code(generated_code, language="python")  # Change language as needed
        except Exception as e:
            st.error(f"Error: {str(e)}")
    else:
        st.warning("Please enter a prompt.")

st.caption("Created by Shamil")