File size: 1,958 Bytes
c179f68 f57ee4a 51818ba 286ec40 c0a9e61 286ec40 ba440d2 286ec40 51818ba 286ec40 51818ba 286ec40 51818ba 286ec40 ba440d2 286ec40 ba440d2 286ec40 d00dabd ba440d2 300c0db c179f68 286ec40 300c0db ba440d2 286ec40 fe113c5 286ec40 fe113c5 51818ba c0a9e61 286ec40 ba440d2 286ec40 d00dabd ba440d2 286ec40 ba440d2 51818ba 286ec40 51818ba 286ec40 fe113c5 f57ee4a | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 | import gradio as gr
import torch
import os
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
# GET TOKEN FROM SPACE SECRET (THIS IS THE ONLY WAY THAT WORKS)
HF_TOKEN = os.getenv("HF_TOKEN")
if not HF_TOKEN:
raise ValueError("HF_TOKEN secret not found! Add it in Space Settings β Secrets")
# YOUR PRIVATE FULL MERGED MODEL
MODEL_REPO = "Chvigo/pkil-1.5B"
print("Loading tokenizer from private repo...")
tokenizer = AutoTokenizer.from_pretrained(
MODEL_REPO,
token=HF_TOKEN, # β CRITICAL
trust_remote_code=True,
padding_side="left"
)
tokenizer.pad_token = tokenizer.eos_token
print("Loading full merged model (1.5B pure weights)...")
model = AutoModelForCausalLM.from_pretrained(
MODEL_REPO,
token=HF_TOKEN, # β CRITICAL
torch_dtype=torch.bfloat16,
device_map="auto",
trust_remote_code=True,
low_cpu_mem_usage=True
)
model.eval()
print("PKIL GOD MODE LOADED β FULL OWNERSHIP ACTIVE")
generator = pipeline(
"text-generation",
model=model,
tokenizer=tokenizer,
max_new_tokens=350,
temperature=0.75,
top_p=0.90,
repetition_penalty=1.28,
do_sample=True,
pad_token_id=tokenizer.eos_token_id
)
def generate(prompt):
messages = [
{"role": "system", "content": "Write in short powerful lines. 0β3 hashtags max. End with question. Be emotional. No spam."},
{"role": "user", "content": prompt}
]
out = generator(messages, return_full_text=False)
return out[0]["generated_text"]
with gr.Blocks(theme=gr.themes.Soft()) as demo:
gr.Markdown("# PKIL LinkedIn Ghostwriter\nFull 1.5B β’ No LoRA β’ Private β’ God Mode")
txt = gr.Textbox(label="Prompt", lines=4, placeholder="Just got promoted after 3 years of silence...")
btn = gr.Button("Generate β", variant="primary")
out = gr.Textbox(label="Your Post", lines=16)
btn.click(generate, txt, out)
demo.launch() |