Alpha108 commited on
Commit
33a8f0a
·
verified ·
1 Parent(s): 7b65583

Create llm_groq.py

Browse files
Files changed (1) hide show
  1. llm_groq.py +69 -0
llm_groq.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from typing import List, Dict, Optional
3
+
4
+ try:
5
+ from groq import Groq
6
+ except ImportError:
7
+ Groq = None
8
+
9
+ DEFAULT_MODEL = "llama-3.3-70b-versatile"
10
+
11
+ def get_client() -> "Groq":
12
+ api_key = os.getenv("GROQ_API_KEY")
13
+ if not api_key:
14
+ raise RuntimeError("Missing GROQ_API_KEY (set in Space → Settings → Variables & Secrets).")
15
+ if Groq is None:
16
+ raise RuntimeError("Package 'groq' not installed. Add 'groq' to requirements.txt.")
17
+ return Groq(api_key=api_key)
18
+
19
+ def chat_once(messages: List[Dict[str, str]],
20
+ model: str = DEFAULT_MODEL,
21
+ temperature: float = 0.6,
22
+ top_p: float = 0.9,
23
+ max_tokens: int = 600) -> str:
24
+ client = get_client()
25
+ resp = client.chat.completions.create(
26
+ model=model,
27
+ messages=messages,
28
+ temperature=temperature,
29
+ top_p=top_p,
30
+ max_tokens=max_tokens,
31
+ )
32
+ return resp.choices[0].message.content.strip()
33
+
34
+ def generate_post(prompt: str,
35
+ model: str,
36
+ temperature: float,
37
+ top_p: float,
38
+ max_tokens: int) -> str:
39
+ messages = [
40
+ {"role": "system", "content": "You craft concise, original, high-signal LinkedIn posts. Respond with plain text only."},
41
+ {"role": "user", "content": prompt},
42
+ ]
43
+ return chat_once(messages, model, temperature, top_p, max_tokens)
44
+
45
+ def transform_post(instruction: str,
46
+ post_text: str,
47
+ model: str,
48
+ temperature: float,
49
+ top_p: float,
50
+ max_tokens: int) -> str:
51
+ messages = [
52
+ {"role": "system", "content": "You are a precise LinkedIn editor. Respond with plain text only."},
53
+ {"role": "user", "content": f"Instruction:\n{instruction}\n\nPost:\n{post_text}"}
54
+ ]
55
+ return chat_once(messages, model, temperature, top_p, max_tokens)
56
+
57
+ def generate_hooks(topic: str,
58
+ audience: str,
59
+ tone: str,
60
+ count: int,
61
+ model: str,
62
+ temperature: float,
63
+ top_p: float,
64
+ max_tokens: int) -> str:
65
+ messages = [
66
+ {"role": "system", "content": "You generate punchy first lines for viral LinkedIn posts."},
67
+ {"role": "user", "content": f"Create {count} distinct, curiosity-driving first lines for a post.\nTopic: {topic}\nAudience: {audience}\nTone: {tone}\nRules: 1 line each, no labels, no emojis."}
68
+ ]
69
+ return chat_once(messages, model, temperature, top_p, max_tokens)