eddddyy commited on
Commit
5a05bc5
·
verified ·
1 Parent(s): d3f017f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -27
app.py CHANGED
@@ -5,15 +5,12 @@ from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
5
  # -------------------------
6
  # CONFIGURATION
7
  # -------------------------
8
- # Switched to Qwen open-access model
9
- MODEL_ID = "Qwen/Qwen2.5-VL-7B-Instruct"
10
 
11
  # -------------------------
12
  # TOKEN AUTHENTICATION
13
  # -------------------------
14
- # This model still requires your Hugging Face token (must be added as secret named "HF_TOKEN")
15
  HF_TOKEN = os.getenv("HF_TOKEN")
16
-
17
  if not HF_TOKEN:
18
  raise ValueError("🚫 Hugging Face token not found. Please add 'HF_TOKEN' in your Space secrets.")
19
 
@@ -26,11 +23,11 @@ try:
26
  trust_remote_code=True,
27
  token=HF_TOKEN
28
  )
29
-
30
  model = AutoModelForCausalLM.from_pretrained(
31
  MODEL_ID,
32
  trust_remote_code=True,
33
- device_map="auto", # Automatically chooses GPU if available
34
  token=HF_TOKEN
35
  )
36
  except Exception as e:
@@ -39,35 +36,23 @@ except Exception as e:
39
  # -------------------------
40
  # CREATE PIPELINE
41
  # -------------------------
42
- try:
43
- pipe = pipeline(
44
- "text-generation",
45
- model=model,
46
- tokenizer=tokenizer,
47
- max_new_tokens=300,
48
- do_sample=True,
49
- temperature=0.7,
50
- )
51
- except Exception as e:
52
- raise RuntimeError(f"🚨 Failed to initialize pipeline: {e}")
53
 
54
  # -------------------------
55
  # MAIN ASSISTANT FUNCTION
56
  # -------------------------
57
  def ai_assistant(command: str) -> str:
58
- """
59
- Interprets a user's natural language command and returns a response.
60
- Uses instruction-style prompting.
61
- """
62
  prompt = f"User: {command}\nAssistant:"
63
-
64
  try:
65
  output = pipe(prompt)[0]["generated_text"]
66
- # Remove the prompt portion to isolate the assistant's answer
67
- if "Assistant:" in output:
68
- response = output.split("Assistant:")[-1].strip()
69
- else:
70
- response = output.strip()
71
  return response
72
  except Exception as e:
73
  return f"⚠️ Error: {e}"
 
5
  # -------------------------
6
  # CONFIGURATION
7
  # -------------------------
8
+ MODEL_ID = "Qwen/Qwen2.5-7B-Instruct" # ✅ Text-only version
 
9
 
10
  # -------------------------
11
  # TOKEN AUTHENTICATION
12
  # -------------------------
 
13
  HF_TOKEN = os.getenv("HF_TOKEN")
 
14
  if not HF_TOKEN:
15
  raise ValueError("🚫 Hugging Face token not found. Please add 'HF_TOKEN' in your Space secrets.")
16
 
 
23
  trust_remote_code=True,
24
  token=HF_TOKEN
25
  )
26
+
27
  model = AutoModelForCausalLM.from_pretrained(
28
  MODEL_ID,
29
  trust_remote_code=True,
30
+ device_map="auto",
31
  token=HF_TOKEN
32
  )
33
  except Exception as e:
 
36
  # -------------------------
37
  # CREATE PIPELINE
38
  # -------------------------
39
+ pipe = pipeline(
40
+ "text-generation",
41
+ model=model,
42
+ tokenizer=tokenizer,
43
+ max_new_tokens=300,
44
+ do_sample=True,
45
+ temperature=0.7,
46
+ )
 
 
 
47
 
48
  # -------------------------
49
  # MAIN ASSISTANT FUNCTION
50
  # -------------------------
51
  def ai_assistant(command: str) -> str:
 
 
 
 
52
  prompt = f"User: {command}\nAssistant:"
 
53
  try:
54
  output = pipe(prompt)[0]["generated_text"]
55
+ response = output.split("Assistant:")[-1].strip()
 
 
 
 
56
  return response
57
  except Exception as e:
58
  return f"⚠️ Error: {e}"