Upload folder using huggingface_hub
Browse files- app.py +7 -7
- inference.py +10 -4
- requirements.txt +2 -1
app.py
CHANGED
|
@@ -1,15 +1,17 @@
|
|
| 1 |
"""
|
| 2 |
-
Gradio Space: Python docstring generation.
|
|
|
|
| 3 |
"""
|
| 4 |
import gradio as gr
|
| 5 |
-
from inference import generate_docstring
|
| 6 |
-
|
| 7 |
|
| 8 |
def summarize_code(code: str) -> str:
|
| 9 |
if not code or not code.strip():
|
| 10 |
return "Paste a Python code snippet above."
|
| 11 |
-
|
| 12 |
-
|
|
|
|
|
|
|
|
|
|
| 13 |
|
| 14 |
demo = gr.Interface(
|
| 15 |
fn=summarize_code,
|
|
@@ -22,5 +24,3 @@ demo = gr.Interface(
|
|
| 22 |
title="Python Docstring Generator",
|
| 23 |
description="Paste a Python function or snippet to get a short docstring summary.",
|
| 24 |
)
|
| 25 |
-
|
| 26 |
-
demo.launch()
|
|
|
|
| 1 |
"""
|
| 2 |
+
Gradio Space: Python docstring generation.
|
| 3 |
+
HF runs the app; do not call demo.launch() here.
|
| 4 |
"""
|
| 5 |
import gradio as gr
|
|
|
|
|
|
|
| 6 |
|
| 7 |
def summarize_code(code: str) -> str:
|
| 8 |
if not code or not code.strip():
|
| 9 |
return "Paste a Python code snippet above."
|
| 10 |
+
try:
|
| 11 |
+
from inference import generate_docstring
|
| 12 |
+
return generate_docstring(code, model_name="t5-small", max_length=128, num_beams=4)
|
| 13 |
+
except Exception as e:
|
| 14 |
+
return f"Error: {str(e)}. (Model may be loading; try again.)"
|
| 15 |
|
| 16 |
demo = gr.Interface(
|
| 17 |
fn=summarize_code,
|
|
|
|
| 24 |
title="Python Docstring Generator",
|
| 25 |
description="Paste a Python function or snippet to get a short docstring summary.",
|
| 26 |
)
|
|
|
|
|
|
inference.py
CHANGED
|
@@ -1,9 +1,9 @@
|
|
| 1 |
"""
|
| 2 |
-
Inference for docstring generation. Uses T5.
|
| 3 |
"""
|
| 4 |
-
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
|
| 5 |
import torch
|
| 6 |
|
|
|
|
| 7 |
|
| 8 |
def generate_docstring(
|
| 9 |
code: str,
|
|
@@ -14,8 +14,14 @@ def generate_docstring(
|
|
| 14 |
) -> str:
|
| 15 |
if device is None:
|
| 16 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 17 |
-
|
| 18 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 19 |
input_text = "summarize: " + code
|
| 20 |
inputs = tokenizer(input_text, return_tensors="pt", truncation=True, max_length=512).to(device)
|
| 21 |
with torch.no_grad():
|
|
|
|
| 1 |
"""
|
| 2 |
+
Inference for docstring generation. Uses T5 (cached after first load).
|
| 3 |
"""
|
|
|
|
| 4 |
import torch
|
| 5 |
|
| 6 |
+
_cache = {}
|
| 7 |
|
| 8 |
def generate_docstring(
|
| 9 |
code: str,
|
|
|
|
| 14 |
) -> str:
|
| 15 |
if device is None:
|
| 16 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 17 |
+
if model_name not in _cache:
|
| 18 |
+
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
|
| 19 |
+
_cache[model_name] = {
|
| 20 |
+
"tokenizer": AutoTokenizer.from_pretrained(model_name),
|
| 21 |
+
"model": AutoModelForSeq2SeqLM.from_pretrained(model_name).to(device),
|
| 22 |
+
}
|
| 23 |
+
tokenizer = _cache[model_name]["tokenizer"]
|
| 24 |
+
model = _cache[model_name]["model"]
|
| 25 |
input_text = "summarize: " + code
|
| 26 |
inputs = tokenizer(input_text, return_tensors="pt", truncation=True, max_length=512).to(device)
|
| 27 |
with torch.no_grad():
|
requirements.txt
CHANGED
|
@@ -1,3 +1,4 @@
|
|
| 1 |
torch>=2.0.0
|
| 2 |
transformers>=4.30.0
|
| 3 |
-
gradio>=4.0.0
|
|
|
|
|
|
| 1 |
torch>=2.0.0
|
| 2 |
transformers>=4.30.0
|
| 3 |
+
gradio>=4.0.0,<5.0.0
|
| 4 |
+
accelerate>=0.20.0
|