|
|
import os |
|
|
import requests |
|
|
import json |
|
|
from dotenv import load_dotenv |
|
|
|
|
|
|
|
|
load_dotenv() |
|
|
HF_TOKEN = os.getenv("HF_TOKEN") |
|
|
|
|
|
|
|
|
def apply_tone(prompt, tone): |
|
|
tone_instructions = { |
|
|
"Formal": "Jawab dengan gaya bahasa formal dan sopan.", |
|
|
"Casual": "Jawab dengan gaya santai dan mudah dimengerti.", |
|
|
"Humorous": "Jawab dengan gaya lucu dan menghibur.", |
|
|
"Professional": "Jawab dengan gaya profesional dan ringkas.", |
|
|
"Sarcastic": "Jawab dengan gaya sarkastik tapi tetap informatif." |
|
|
} |
|
|
instruction = tone_instructions.get(tone, "") |
|
|
return f"{instruction}\n\n{prompt}" |
|
|
|
|
|
|
|
|
def apply_language(prompt, language): |
|
|
if language == "Indonesia": |
|
|
return f"Jawab pertanyaan berikut dalam bahasa Indonesia:\n\n{prompt}" |
|
|
elif language == "English": |
|
|
return f"Answer the following question in English:\n\n{prompt}" |
|
|
elif language == "Chinese": |
|
|
return f"请用中文回答以下问题:\n\n{prompt}" |
|
|
else: |
|
|
return prompt |
|
|
|
|
|
|
|
|
def apply_format(prompt, response_format): |
|
|
format_instructions = { |
|
|
"Text": "", |
|
|
"Bullet Points": "Jawab dalam bentuk poin-poin yang jelas dan terstruktur.", |
|
|
"Code": "Jawab dalam format kode atau blok teknis jika memungkinkan." |
|
|
} |
|
|
instruction = format_instructions.get(response_format, "") |
|
|
return f"{instruction}\n\n{prompt}" if instruction else prompt |
|
|
|
|
|
|
|
|
def query_huggingface(prompt, model="mistralai/Mistral-7B-Instruct-v0.2", temperature=0.7, max_tokens=500): |
|
|
try: |
|
|
headers = { |
|
|
"Authorization": f"Bearer {HF_TOKEN}", |
|
|
"Content-Type": "application/json" |
|
|
} |
|
|
payload = { |
|
|
"inputs": prompt, |
|
|
"parameters": { |
|
|
"temperature": temperature, |
|
|
"max_new_tokens": max_tokens |
|
|
} |
|
|
} |
|
|
response = requests.post( |
|
|
f"https://api-inference.huggingface.co/models/{model}", |
|
|
headers=headers, |
|
|
json=payload |
|
|
) |
|
|
response.raise_for_status() |
|
|
result = response.json() |
|
|
if isinstance(result, list): |
|
|
return result[0].get("generated_text", "") |
|
|
elif isinstance(result, dict) and "generated_text" in result: |
|
|
return result["generated_text"] |
|
|
else: |
|
|
return json.dumps(result) |
|
|
except Exception as e: |
|
|
return f"⚠️ Error from Hugging Face: {str(e)}" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def get_response(prompt, backend="HuggingFace", model="mistralai/Mistral-7B-Instruct-v0.2", temperature=0.7, max_tokens=500): |
|
|
if backend == "HuggingFace": |
|
|
return query_huggingface(prompt, model, temperature, max_tokens) |
|
|
|
|
|
|
|
|
else: |
|
|
return "❌ Backend tidak dikenali." |