eddddyy commited on
Commit
d3f017f
·
verified ·
1 Parent(s): fffaa57

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +37 -25
app.py CHANGED
@@ -5,14 +5,13 @@ from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
5
  # -------------------------
6
  # CONFIGURATION
7
  # -------------------------
8
- # The model you want to use (must have access from Hugging Face)
9
- MODEL_ID = "meta-llama/Llama-3.1-8B-Instruct"
10
 
11
  # -------------------------
12
  # TOKEN AUTHENTICATION
13
  # -------------------------
14
- # Your Hugging Face Access Token must be set in the HF Space as a Secret named "HF_TOKEN"
15
- # To do this, go to your Hugging Face Space > Settings > Secrets > Add "HF_TOKEN"
16
  HF_TOKEN = os.getenv("HF_TOKEN")
17
 
18
  if not HF_TOKEN:
@@ -22,40 +21,53 @@ if not HF_TOKEN:
22
  # LOAD TOKENIZER & MODEL
23
  # -------------------------
24
  try:
25
- tokenizer = AutoTokenizer.from_pretrained(MODEL_ID, token=HF_TOKEN)
26
- model = AutoModelForCausalLM.from_pretrained(MODEL_ID, token=HF_TOKEN)
 
 
 
 
 
 
 
 
 
 
27
  except Exception as e:
28
  raise RuntimeError(f"🚨 Failed to load model: {e}")
29
 
30
  # -------------------------
31
  # CREATE PIPELINE
32
  # -------------------------
33
- pipe = pipeline(
34
- "text-generation",
35
- model=model,
36
- tokenizer=tokenizer,
37
- max_new_tokens=100,
38
- do_sample=True,
39
- temperature=0.7,
40
- )
 
 
 
41
 
42
  # -------------------------
43
  # MAIN ASSISTANT FUNCTION
44
  # -------------------------
45
  def ai_assistant(command: str) -> str:
46
  """
47
- Takes a natural language command and returns the assistant's response.
 
48
  """
49
- prompt = (
50
- "<|begin_of_text|><|start_header_id|>user<|end_header_id|>\n"
51
- f"{command}"
52
- "<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n"
53
- )
54
-
55
  try:
56
  output = pipe(prompt)[0]["generated_text"]
57
- # Parse only the assistant response
58
- response = output.split("<|eot_id|>")[0].split("<|end_header_id|>\n")[-1].strip()
 
 
 
59
  return response
60
  except Exception as e:
61
  return f"⚠️ Error: {e}"
@@ -67,8 +79,8 @@ demo = gr.Interface(
67
  fn=ai_assistant,
68
  inputs=gr.Textbox(lines=2, placeholder="e.g. Open Chrome or Take a screenshot"),
69
  outputs="text",
70
- title="🧠 LLaMA 3.1 AI Assistant",
71
- description="Enter a command. The AI assistant will interpret and respond like a smart OS assistant.",
72
  allow_flagging="never"
73
  )
74
 
 
5
  # -------------------------
6
  # CONFIGURATION
7
  # -------------------------
8
+ # Switched to Qwen open-access model
9
+ MODEL_ID = "Qwen/Qwen2.5-VL-7B-Instruct"
10
 
11
  # -------------------------
12
  # TOKEN AUTHENTICATION
13
  # -------------------------
14
+ # This model still requires your Hugging Face token (must be added as secret named "HF_TOKEN")
 
15
  HF_TOKEN = os.getenv("HF_TOKEN")
16
 
17
  if not HF_TOKEN:
 
21
  # LOAD TOKENIZER & MODEL
22
  # -------------------------
23
  try:
24
+ tokenizer = AutoTokenizer.from_pretrained(
25
+ MODEL_ID,
26
+ trust_remote_code=True,
27
+ token=HF_TOKEN
28
+ )
29
+
30
+ model = AutoModelForCausalLM.from_pretrained(
31
+ MODEL_ID,
32
+ trust_remote_code=True,
33
+ device_map="auto", # Automatically chooses GPU if available
34
+ token=HF_TOKEN
35
+ )
36
  except Exception as e:
37
  raise RuntimeError(f"🚨 Failed to load model: {e}")
38
 
39
  # -------------------------
40
  # CREATE PIPELINE
41
  # -------------------------
42
+ try:
43
+ pipe = pipeline(
44
+ "text-generation",
45
+ model=model,
46
+ tokenizer=tokenizer,
47
+ max_new_tokens=300,
48
+ do_sample=True,
49
+ temperature=0.7,
50
+ )
51
+ except Exception as e:
52
+ raise RuntimeError(f"🚨 Failed to initialize pipeline: {e}")
53
 
54
  # -------------------------
55
  # MAIN ASSISTANT FUNCTION
56
  # -------------------------
57
  def ai_assistant(command: str) -> str:
58
  """
59
+ Interprets a user's natural language command and returns a response.
60
+ Uses instruction-style prompting.
61
  """
62
+ prompt = f"User: {command}\nAssistant:"
63
+
 
 
 
 
64
  try:
65
  output = pipe(prompt)[0]["generated_text"]
66
+ # Remove the prompt portion to isolate the assistant's answer
67
+ if "Assistant:" in output:
68
+ response = output.split("Assistant:")[-1].strip()
69
+ else:
70
+ response = output.strip()
71
  return response
72
  except Exception as e:
73
  return f"⚠️ Error: {e}"
 
79
  fn=ai_assistant,
80
  inputs=gr.Textbox(lines=2, placeholder="e.g. Open Chrome or Take a screenshot"),
81
  outputs="text",
82
+ title="🧠 Qwen 2.5 AI Assistant",
83
+ description="Enter a command. The AI assistant will respond like a smart OS assistant.",
84
  allow_flagging="never"
85
  )
86