pkil / app.py
Chvigo's picture
p
286ec40 verified
import gradio as gr
import torch
import os
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
# GET TOKEN FROM SPACE SECRET (THIS IS THE ONLY WAY THAT WORKS)
HF_TOKEN = os.getenv("HF_TOKEN")
if not HF_TOKEN:
raise ValueError("HF_TOKEN secret not found! Add it in Space Settings β†’ Secrets")
# YOUR PRIVATE FULL MERGED MODEL
MODEL_REPO = "Chvigo/pkil-1.5B"
print("Loading tokenizer from private repo...")
tokenizer = AutoTokenizer.from_pretrained(
MODEL_REPO,
token=HF_TOKEN, # ← CRITICAL
trust_remote_code=True,
padding_side="left"
)
tokenizer.pad_token = tokenizer.eos_token
print("Loading full merged model (1.5B pure weights)...")
model = AutoModelForCausalLM.from_pretrained(
MODEL_REPO,
token=HF_TOKEN, # ← CRITICAL
torch_dtype=torch.bfloat16,
device_map="auto",
trust_remote_code=True,
low_cpu_mem_usage=True
)
model.eval()
print("PKIL GOD MODE LOADED β€” FULL OWNERSHIP ACTIVE")
generator = pipeline(
"text-generation",
model=model,
tokenizer=tokenizer,
max_new_tokens=350,
temperature=0.75,
top_p=0.90,
repetition_penalty=1.28,
do_sample=True,
pad_token_id=tokenizer.eos_token_id
)
def generate(prompt):
messages = [
{"role": "system", "content": "Write in short powerful lines. 0–3 hashtags max. End with question. Be emotional. No spam."},
{"role": "user", "content": prompt}
]
out = generator(messages, return_full_text=False)
return out[0]["generated_text"]
with gr.Blocks(theme=gr.themes.Soft()) as demo:
gr.Markdown("# PKIL LinkedIn Ghostwriter\nFull 1.5B β€’ No LoRA β€’ Private β€’ God Mode")
txt = gr.Textbox(label="Prompt", lines=4, placeholder="Just got promoted after 3 years of silence...")
btn = gr.Button("Generate β†’", variant="primary")
out = gr.Textbox(label="Your Post", lines=16)
btn.click(generate, txt, out)
demo.launch()