Chvigo commited on
Commit
286ec40
Β·
verified Β·
1 Parent(s): 51818ba
Files changed (1) hide show
  1. app.py +23 -29
app.py CHANGED
@@ -3,67 +3,61 @@ import torch
3
  import os
4
  from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
5
 
6
- # ←←←←←←←←←←←←←←←←←←←←←←←←←←←←←←←←←←←←←←←←←←←←←←←←←←←
7
- # CRITICAL: Use secret token (THIS IS THE ONLY WAY THAT WORKS IN SPACES)
8
- # ←←←←←←←←←←←←←←←←←←←←←←←←←←←←←←←←←←←←←←←←←←←←←←←←←←←
9
- from huggingface_hub import login
10
- login(token=os.getenv("HF_TOKEN")) # ← Go to Space Settings β†’ Secrets β†’ HF_TOKEN
11
 
12
- # Your full merged model (private is fine now)
13
- MODEL_REPO = "Chvigo/pkil-1.5B-merged-v1"
14
 
15
- print("Loading tokenizer and model from private repo...")
16
  tokenizer = AutoTokenizer.from_pretrained(
17
  MODEL_REPO,
 
18
  trust_remote_code=True,
19
- token=os.getenv("HF_TOKEN") # ← Force token here too
20
  )
 
21
 
 
22
  model = AutoModelForCausalLM.from_pretrained(
23
  MODEL_REPO,
 
24
  torch_dtype=torch.bfloat16,
25
  device_map="auto",
26
  trust_remote_code=True,
27
- token=os.getenv("HF_TOKEN"), # ← Critical line
28
  low_cpu_mem_usage=True
29
  )
30
 
31
  model.eval()
32
- print("Model loaded successfully β€” running full merged weights!")
33
 
34
  generator = pipeline(
35
  "text-generation",
36
  model=model,
37
  tokenizer=tokenizer,
38
- max_new_tokens=320,
39
  temperature=0.75,
40
  top_p=0.90,
41
- repetition_penalty=1.25,
42
  do_sample=True,
43
  pad_token_id=tokenizer.eos_token_id
44
  )
45
 
46
- def generate_linkedin_post(prompt):
47
  messages = [
48
- {"role": "system", "content": "You are the best LinkedIn copywriter alive. Write in short, powerful lines like Steve Jobs. Use 0–3 hashtags max. End with a real question 80% of the time. Be emotional, authentic, confident."},
49
  {"role": "user", "content": prompt}
50
  ]
51
- output = generator(messages, return_full_text=False)
52
- return output[0]["generated_text"].strip()
53
 
54
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
55
- gr.Markdown("# PKIL LinkedIn Ghostwriter\nFull 1.5B weights β€’ No LoRA β€’ Private repo")
56
-
57
- prompt = gr.Textbox(label="Prompt", placeholder="Just got promoted after 3 years...", lines=4)
58
  btn = gr.Button("Generate β†’", variant="primary")
59
- output = gr.Textbox(label="Your Post", lines=15)
60
-
61
- btn.click(generate_linkedin_post, prompt, output)
62
-
63
- gr.Examples([
64
- ["Just got promoted after being rejected twice"],
65
- ["Announce hitting $100k MRR bootstrapped"],
66
- ["Finally said no to a toxic client"],
67
- ], prompt)
68
 
69
  demo.launch()
 
3
  import os
4
  from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
5
 
6
+ # GET TOKEN FROM SPACE SECRET (THIS IS THE ONLY WAY THAT WORKS)
7
+ HF_TOKEN = os.getenv("HF_TOKEN")
8
+ if not HF_TOKEN:
9
+ raise ValueError("HF_TOKEN secret not found! Add it in Space Settings β†’ Secrets")
 
10
 
11
+ # YOUR PRIVATE FULL MERGED MODEL
12
+ MODEL_REPO = "Chvigo/pkil-1.5B"
13
 
14
+ print("Loading tokenizer from private repo...")
15
  tokenizer = AutoTokenizer.from_pretrained(
16
  MODEL_REPO,
17
+ token=HF_TOKEN, # ← CRITICAL
18
  trust_remote_code=True,
19
+ padding_side="left"
20
  )
21
+ tokenizer.pad_token = tokenizer.eos_token
22
 
23
+ print("Loading full merged model (1.5B pure weights)...")
24
  model = AutoModelForCausalLM.from_pretrained(
25
  MODEL_REPO,
26
+ token=HF_TOKEN, # ← CRITICAL
27
  torch_dtype=torch.bfloat16,
28
  device_map="auto",
29
  trust_remote_code=True,
 
30
  low_cpu_mem_usage=True
31
  )
32
 
33
  model.eval()
34
+ print("PKIL GOD MODE LOADED β€” FULL OWNERSHIP ACTIVE")
35
 
36
  generator = pipeline(
37
  "text-generation",
38
  model=model,
39
  tokenizer=tokenizer,
40
+ max_new_tokens=350,
41
  temperature=0.75,
42
  top_p=0.90,
43
+ repetition_penalty=1.28,
44
  do_sample=True,
45
  pad_token_id=tokenizer.eos_token_id
46
  )
47
 
48
+ def generate(prompt):
49
  messages = [
50
+ {"role": "system", "content": "Write in short powerful lines. 0–3 hashtags max. End with question. Be emotional. No spam."},
51
  {"role": "user", "content": prompt}
52
  ]
53
+ out = generator(messages, return_full_text=False)
54
+ return out[0]["generated_text"]
55
 
56
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
57
+ gr.Markdown("# PKIL LinkedIn Ghostwriter\nFull 1.5B β€’ No LoRA β€’ Private β€’ God Mode")
58
+ txt = gr.Textbox(label="Prompt", lines=4, placeholder="Just got promoted after 3 years of silence...")
 
59
  btn = gr.Button("Generate β†’", variant="primary")
60
+ out = gr.Textbox(label="Your Post", lines=16)
61
+ btn.click(generate, txt, out)
 
 
 
 
 
 
 
62
 
63
  demo.launch()