AumCoreAI commited on
Commit
28bfeaa
·
verified ·
1 Parent(s): 93226a5

Update main.py

Browse files
Files changed (1) hide show
  1. main.py +40 -34
main.py CHANGED
@@ -1,42 +1,48 @@
1
  import os
2
- import json
 
3
  from groq import Groq
4
 
5
  class AICore:
6
  def __init__(self, api_key):
7
  self.client = Groq(api_key=api_key)
8
- self.model = "qwen-2-vl-7b-instruct" # Sanjay bhai, vision ke liye best hai
9
- self.memory_file = "memory.json"
10
- self._load_memory()
11
 
12
- def _load_memory(self):
13
- if os.path.exists(self.memory_file):
14
- with open(self.memory_file, 'r') as f:
15
- self.memory = json.load(f)
16
- else:
17
- self.memory = []
18
 
19
- def save_to_memory(self, user_input, ai_response):
20
- self.memory.append({"user": user_input, "bot": ai_response})
21
- with open(self.memory_file, 'w') as f:
22
- json.dump(self.memory[-10:], f) # Last 10 chats yaad rakhega
23
-
24
- def get_response(self, text, image_path=None):
25
- try:
26
- # Simple text + vision prompt setup
27
- messages = [{"role": "user", "content": text}]
28
- # Future: Image processing logic can be expanded here
29
-
30
- completion = self.client.chat.completions.create(
31
- model=self.model,
32
- messages=messages,
33
- temperature=0.7
34
- )
35
- response = completion.choices[0].message.content
36
- self.save_to_memory(text, response)
37
- return response
38
- except Exception as e:
39
- return f"Error: {str(e)}"
40
-
41
- if __name__ == "__main__":
42
- print("AICore is ready!")
 
 
 
 
 
 
 
 
1
  import os
2
+ import chromadb
3
+ from chromadb.utils import embedding_functions
4
  from groq import Groq
5
 
6
  class AICore:
7
  def __init__(self, api_key):
8
  self.client = Groq(api_key=api_key)
9
+ self.chroma_client = chromadb.PersistentClient(path="./chroma_db")
10
+ self.embed_fn = embedding_functions.SentenceTransformerEmbeddingFunction(model_name="all-MiniLM-L6-v2")
11
+ self.collection = self.chroma_client.get_or_create_collection(name="aumcore_memory", embedding_function=self.embed_fn)
12
 
13
+ def get_response(self, user_input, user_name="Sanjay"):
14
+ # Purani yaadein fetch karna (Fact-finding only)
15
+ results = self.collection.query(query_texts=[user_input], n_results=2)
16
+ memory_context = "\n".join(results['documents'][0]) if results['documents'] else "No past history."
 
 
17
 
18
+ # Strict Persona Enforcement (No basic replies allowed)
19
+ system_prompt = f"""
20
+ Role: Senior Software Architect (AumCore AI).
21
+ User: {user_name}.
22
+ Language: Strictly 60% English & 40% Hindi.
23
+ Rule: If asking for code, provide a high-level Python script with try-except blocks.
24
+ Style: Do NOT be generic. Be professional and helpful.
25
+
26
+ Information from Memory: {memory_context}
27
+ """
28
+
29
+ messages = [
30
+ {"role": "system", "content": system_prompt},
31
+ {"role": "user", "content": "colab me gpu check karne ka code do"},
32
+ {"role": "assistant", "content": f"{user_name} bhai, Colab mein GPU visibility confirm karne ke liye ye standard script use karein.\n\n```python\nimport torch\ntry:\n status = torch.cuda.is_available()\n print(f'GPU Available: {{status}}')\n if status: print(f'Device: {{torch.cuda.get_device_name(0)}}')\nexcept Exception as e:\n print(f'Error identifying GPU: {{e}}')\n```"},
33
+ {"role": "user", "content": user_input}
34
+ ]
35
+
36
+ completion = self.client.chat.completions.create(
37
+ model="llama-3.3-70b-versatile",
38
+ messages=messages,
39
+ temperature=0.1 # Low temperature for consistency
40
+ )
41
+ response = completion.choices[0].message.content
42
+
43
+ # Saving new memory
44
+ self.collection.add(
45
+ documents=[f"User: {user_input} | AI: {response[:100]}"],
46
+ ids=[str(os.urandom(4).hex())]
47
+ )
48
+ return response