|
|
import requests
|
|
|
|
|
|
|
|
|
OLLAMA_BASE_URL = "http://localhost:11434"
|
|
|
MODEL_NAME = "mistral"
|
|
|
|
|
|
def ask_ollama(prompt, model=MODEL_NAME):
|
|
|
url = f"{OLLAMA_BASE_URL}/api/generate"
|
|
|
headers = {"Content-Type": "application/json"}
|
|
|
|
|
|
payload = {
|
|
|
"model": model,
|
|
|
"prompt": prompt,
|
|
|
"stream": False
|
|
|
}
|
|
|
|
|
|
try:
|
|
|
response = requests.post(url, json=payload, headers=headers)
|
|
|
response.raise_for_status()
|
|
|
data = response.json()
|
|
|
return data.get("response", "No response from model.")
|
|
|
except Exception as e:
|
|
|
return f"Error: {e}"
|
|
|
|
|
|
|
|
|
def chat():
|
|
|
print("🤖 TinyLLaMA Chatbot (type 'exit' to quit)")
|
|
|
while True:
|
|
|
user_input = input("You: ")
|
|
|
if user_input.lower() in {"exit", "quit"}:
|
|
|
print("Goodbye!")
|
|
|
break
|
|
|
response = ask_ollama(user_input)
|
|
|
print(f"Bot: {response}")
|
|
|
|
|
|
if __name__ == "__main__":
|
|
|
chat()
|
|
|
|