import os import gradio as gr from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline model_id = "mistralai/Mistral-7B-Instruct-v0.3" hf_token = os.environ.get("hf_token") # Space Secret olarak tanımladın tokenizer = AutoTokenizer.from_pretrained(model_id, token=hf_token) model = AutoModelForCausalLM.from_pretrained(model_id, token=hf_token, device_map="auto") pipe = pipeline("text-generation", model=model, tokenizer=tokenizer) def bust_myth(statement): prompt = f""" You are a cultural myth-busting assistant. Your task is to: 1. Determine if the following statement is true, false, or partially true. 2. If it's a misconception, explain why it's incorrect. 3. Provide the correct information with a short cultural context. Statement: "{statement}" """ result = pipe(prompt, max_new_tokens=300, temperature=0.7)[0]["generated_text"] return result gr.Interface( fn=bust_myth, inputs=gr.Textbox(label="Enter a cultural or world belief", lines=3, placeholder="E.g. All Canadians live in igloos."), outputs="text", title="🌍 World Fact & Misconception Buster", description="Enter a statement about a country, culture, or people. The AI will fact-check and explain it." ).launch()