nihardon commited on
Commit
1df3112
·
verified ·
1 Parent(s): 43200d9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -4
app.py CHANGED
@@ -2,13 +2,12 @@ import gradio as gr
2
  from huggingface_hub import hf_hub_download
3
  from llama_cpp import Llama
4
 
5
- # 1. Download your specific GGUF file
6
  model_path = hf_hub_download(
7
  repo_id="nihardon/fine-tuned-unit-test-generator",
8
- filename="llama-3-8b.Q4_K_M.gguf", # <--- DOUBLE CHECK THIS NAME!
9
  )
10
 
11
- # 2. Load the Model (CPU Optimized)
12
  llm = Llama(
13
  model_path=model_path,
14
  n_ctx=2048,
@@ -29,7 +28,7 @@ You are an expert Python QA engineer. Write a pytest unit test for the following
29
  output = llm(prompt, max_tokens=256, stop=["### Instruction:"], echo=False)
30
  return output['choices'][0]['text'].strip()
31
 
32
- # 3. The UI
33
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
34
  gr.Markdown("# 🧪 AI Unit Test Generator")
35
  gr.Markdown("**Model:** Custom Fine-Tuned Llama-3 (GGUF) | **Status:** Running Locally")
 
2
  from huggingface_hub import hf_hub_download
3
  from llama_cpp import Llama
4
 
 
5
  model_path = hf_hub_download(
6
  repo_id="nihardon/fine-tuned-unit-test-generator",
7
+ filename="llama-3-8b.Q4_K_M.gguf",
8
  )
9
 
10
+ # Load the model (CPU optimized)
11
  llm = Llama(
12
  model_path=model_path,
13
  n_ctx=2048,
 
28
  output = llm(prompt, max_tokens=256, stop=["### Instruction:"], echo=False)
29
  return output['choices'][0]['text'].strip()
30
 
31
+ # UI
32
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
33
  gr.Markdown("# 🧪 AI Unit Test Generator")
34
  gr.Markdown("**Model:** Custom Fine-Tuned Llama-3 (GGUF) | **Status:** Running Locally")