NithinAI12 commited on
Commit
d6a18fd
·
verified ·
1 Parent(s): 6e01896

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +25 -19
app.py CHANGED
@@ -3,21 +3,20 @@ from huggingface_hub import InferenceClient
3
  import os
4
 
5
  # ==============================================================================
6
- # 🚀 NITHIN-1: SERVERLESS AGI (Runs on Cloud GPUs via API)
7
  # ==============================================================================
8
 
9
  # 1. SETUP CLIENTS
10
- # Manam Token ni 'Secrets' nundi automatic ga teesukuntam
11
- # Make sure you added HF_TOKEN in Settings!
12
  token = os.getenv("HF_TOKEN")
13
 
14
- # TEXT BRAIN: Using 'Zephyr-7b' (It's like Mistral but faster for chat)
 
15
  text_client = InferenceClient(
16
- "HuggingFaceH4/zephyr-7b-beta",
17
  token=token
18
  )
19
 
20
- # IMAGE EYES: Using 'Stable Diffusion XL' (Best Free Image Model)
21
  image_client = InferenceClient(
22
  "stabilityai/stable-diffusion-xl-base-1.0",
23
  token=token
@@ -25,33 +24,40 @@ image_client = InferenceClient(
25
 
26
  # 2. THE BRAIN LOGIC
27
  def nithin_agi(message, history):
28
- # A. IMAGE DETECTION (Simple Logic)
29
  triggers = ["draw", "generate", "image", "photo", "paint", "picture"]
30
  if any(word in message.lower() for word in triggers):
31
- return "🎨 I am generating your image... Check the 'Image Generator' tab!"
32
 
33
- # B. TEXT GENERATION (Streaming)
34
- # Idhi Google GPUs meedha run avuthundi, so CPU slow unna parvaledu.
35
  system_prompt = "You are Nithin-1, an advanced AI created by Nithin. Be helpful, smart, and concise."
36
 
37
- # Formatting prompt for Zephyr model
38
- prompt = f"<|system|>\n{system_prompt}</s>\n<|user|>\n{message}</s>\n<|assistant|>"
 
 
 
39
 
40
  partial_message = ""
41
  try:
42
- # Stream response (Letter by letter style)
43
- for token in text_client.text_generation(prompt, max_new_tokens=512, stream=True):
44
- partial_message += token
45
- yield partial_message
 
 
 
 
 
 
46
  except Exception as e:
47
- yield f"⚠️ Error: Please check your HF_TOKEN in settings. Details: {e}"
48
 
49
  # 3. THE IMAGE LOGIC
50
  def generate_image(prompt):
51
  if not token:
52
  return None
53
  try:
54
- # Calling API for Image
55
  image = image_client.text_to_image(prompt + ", high quality, 8k, masterpiece")
56
  return image
57
  except Exception as e:
@@ -59,7 +65,7 @@ def generate_image(prompt):
59
  return None
60
 
61
  # ==============================================================================
62
- # 🎨 THE UI (Professional Look)
63
  # ==============================================================================
64
 
65
  with gr.Blocks(theme="soft") as demo:
 
3
  import os
4
 
5
  # ==============================================================================
6
+ # 🚀 NITHIN-1: SERVERLESS AGI (Fixed & Updated)
7
  # ==============================================================================
8
 
9
  # 1. SETUP CLIENTS
 
 
10
  token = os.getenv("HF_TOKEN")
11
 
12
+ # TEXT BRAIN: Switched to Mistral-7B-Instruct-v0.3 (Latest & Best Free Model)
13
+ # This model supports the "Chat" format perfectly.
14
  text_client = InferenceClient(
15
+ "mistralai/Mistral-7B-Instruct-v0.3",
16
  token=token
17
  )
18
 
19
+ # IMAGE EYES: Stable Diffusion XL
20
  image_client = InferenceClient(
21
  "stabilityai/stable-diffusion-xl-base-1.0",
22
  token=token
 
24
 
25
  # 2. THE BRAIN LOGIC
26
  def nithin_agi(message, history):
27
+ # A. IMAGE DETECTION
28
  triggers = ["draw", "generate", "image", "photo", "paint", "picture"]
29
  if any(word in message.lower() for word in triggers):
30
+ return "🎨 I am generating your image... Please check the 'Image Engine' tab!"
31
 
32
+ # B. TEXT GENERATION (Conversational Mode)
 
33
  system_prompt = "You are Nithin-1, an advanced AI created by Nithin. Be helpful, smart, and concise."
34
 
35
+ # New Logic: Using 'chat_completion' instead of 'text_generation'
36
+ messages = [
37
+ {"role": "system", "content": system_prompt},
38
+ {"role": "user", "content": message}
39
+ ]
40
 
41
  partial_message = ""
42
  try:
43
+ # Streaming the response correctly
44
+ stream = text_client.chat_completion(messages, max_tokens=512, stream=True)
45
+
46
+ for chunk in stream:
47
+ # Extracting content from the new format
48
+ if chunk.choices and chunk.choices[0].delta.content:
49
+ content = chunk.choices[0].delta.content
50
+ partial_message += content
51
+ yield partial_message
52
+
53
  except Exception as e:
54
+ yield f"⚠️ Brain Error: {str(e)}\n\n(Tip: Check if your HF_TOKEN is valid in Settings)"
55
 
56
  # 3. THE IMAGE LOGIC
57
  def generate_image(prompt):
58
  if not token:
59
  return None
60
  try:
 
61
  image = image_client.text_to_image(prompt + ", high quality, 8k, masterpiece")
62
  return image
63
  except Exception as e:
 
65
  return None
66
 
67
  # ==============================================================================
68
+ # 🎨 THE UI
69
  # ==============================================================================
70
 
71
  with gr.Blocks(theme="soft") as demo: