Kishoreuses5 commited on
Commit
a7561c8
·
verified ·
1 Parent(s): 8cadd28

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -24
app.py CHANGED
@@ -2,49 +2,43 @@ import gradio as gr
2
  from huggingface_hub import hf_hub_download
3
  from ctransformers import AutoModelForCausalLM
4
 
5
- # Download GGUF model (public, no token required)
6
  model_path = hf_hub_download(
7
- repo_id="lmstudio-community/DeepSeek-Coder-V2-Lite-Base-GGUF",
8
- filename="DeepSeek-Coder-V2-Lite-Base-Q2_K.gguf"
9
  )
10
 
11
- # Load model
12
  llm = AutoModelForCausalLM.from_pretrained(
13
  model_path,
14
- model_type="deepseek",
15
- context_length=4096
16
  )
17
 
18
  def evaluate(code):
19
- prompt = f"""
20
- You are an AI programming evaluator.
21
 
22
- Analyze the following Python code and provide a detailed report:
23
-
24
- 1. What the program does
25
- 2. Logic correctness
26
  3. Time complexity
27
  4. Space complexity
28
- 5. Edge cases that may fail
29
- 6. Performance bottlenecks
30
- 7. Code quality / readability issues
31
- 8. Plagiarism risk (low / medium / high)
32
- 9. Suggestions for improvement
33
- 10. Improved version if useful
34
 
35
  Code:
36
  {code}
37
  """
38
  return llm(
39
  prompt,
40
- max_new_tokens=1200,
41
- temperature=0.2,
42
- top_p=0.9
43
  )
44
 
45
  gr.Interface(
46
  fn=evaluate,
47
- inputs=gr.Textbox(lines=12, label="Student Python Code"),
48
- outputs=gr.Textbox(lines=22, label="AI Evaluation Report"),
49
- title="DeepSeek-Coder V2 Lite 1.3B Q2 Code Evaluator"
50
  ).launch()
 
2
  from huggingface_hub import hf_hub_download
3
  from ctransformers import AutoModelForCausalLM
4
 
 
5
  model_path = hf_hub_download(
6
+ repo_id="TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF",
7
+ filename="tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf"
8
  )
9
 
 
10
  llm = AutoModelForCausalLM.from_pretrained(
11
  model_path,
12
+ model_type="llama",
13
+ context_length=4096 # <-- correct way for ctransformers
14
  )
15
 
16
  def evaluate(code):
17
+ prompt = f"""You are a code evaluator teacher.
 
18
 
19
+ Evaluate this Python code and give:
20
+ 1. What it does
21
+ 2. Correctness of logic
 
22
  3. Time complexity
23
  4. Space complexity
24
+ 5. Performance issues
25
+ 6. Code quality problems
26
+ 7. Plagiarism risk level
27
+ 8. Suggestions to improve
28
+ 9. Improved version (if needed)
 
29
 
30
  Code:
31
  {code}
32
  """
33
  return llm(
34
  prompt,
35
+ max_new_tokens=4096, # tokens to generate
36
+ temperature=0.3
 
37
  )
38
 
39
  gr.Interface(
40
  fn=evaluate,
41
+ inputs="text",
42
+ outputs="text",
43
+ title="Code Evaluation (TinyLlama 1.1B4096 tokens)"
44
  ).launch()