File size: 988 Bytes
bb6e1c1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
from typing import Optional
from huggingface_hub import InferenceClient
import os


DEFAULT_MODEL = os.environ.get("HF_CODE_MODEL", "HuggingFaceH4/zephyr-7b-beta")


def generate_code(prompt: str, language: str = "python", max_new_tokens: int = 512) -> str:
    """Generate code or technical text via HF Inference API.

    Falls back to a simple template if API call fails.
    """
    client = InferenceClient(token=os.environ.get("HF_TOKEN"))
    system_hint = f"You are an expert {language} code assistant. Output only code unless explanation is requested."
    full_prompt = f"{system_hint}\nTask: {prompt}\nLanguage: {language}"
    try:
        text = client.text_generation(
            model=DEFAULT_MODEL,
            prompt=full_prompt,
            max_new_tokens=max_new_tokens,
            temperature=0.2,
        )
        return text.strip()
    except Exception as e:
        return f"# Fallback template due to API error: {e}\nprint('Hello from {language} generator')\n"