SHAMIL SHAHBAZ AWAN commited on
Commit
e365949
·
verified ·
1 Parent(s): e224900

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -5
app.py CHANGED
@@ -6,7 +6,7 @@ from accelerate import init_empty_weights, load_checkpoint_and_dispatch
6
  # Load the model using Accelerate for memory optimization
7
  @st.cache_resource()
8
  def load_model():
9
- MODEL_NAME = "codellama/CodeLlama-7b-hf" # Model name
10
  tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
11
 
12
  # Load model with accelerate to optimize for memory usage
@@ -30,8 +30,8 @@ def load_model():
30
  code_generator = load_model()
31
 
32
  # Streamlit UI
33
- st.title("CodeLlama-7B Code Bot 🚀")
34
- st.subheader("Generate code snippets using CodeLlama-7b-hf optimized for CPU")
35
 
36
  # User input
37
  prompt = st.text_area("Enter a coding prompt (e.g., 'Write a Python function to sort a list'): ")
@@ -41,7 +41,7 @@ if st.button("Generate Code"):
41
  if prompt.strip():
42
  st.info("Generating code... Please wait ⏳")
43
  try:
44
- # Generate code using the CodeLlama model
45
  response = code_generator(
46
  prompt,
47
  max_length=512, # Increase for longer code generation
@@ -57,4 +57,4 @@ if st.button("Generate Code"):
57
  else:
58
  st.warning("Please enter a prompt.")
59
 
60
- st.caption("Powered by CodeLlama-7B | Streamlit UI | CPU Optimized")
 
6
  # Load the model using Accelerate for memory optimization
7
  @st.cache_resource()
8
  def load_model():
9
+ MODEL_NAME = "huggingface/CodeGen-2B" # Updated model name
10
  tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
11
 
12
  # Load model with accelerate to optimize for memory usage
 
30
  code_generator = load_model()
31
 
32
  # Streamlit UI
33
+ st.title("CodeGen-2B Code Bot 🚀")
34
+ st.subheader("Generate code snippets using Hugging Face CodeGen-2B")
35
 
36
  # User input
37
  prompt = st.text_area("Enter a coding prompt (e.g., 'Write a Python function to sort a list'): ")
 
41
  if prompt.strip():
42
  st.info("Generating code... Please wait ⏳")
43
  try:
44
+ # Generate code using the CodeGen-2B model
45
  response = code_generator(
46
  prompt,
47
  max_length=512, # Increase for longer code generation
 
57
  else:
58
  st.warning("Please enter a prompt.")
59
 
60
+ st.caption("Created by Shamil")