AumCoreAI commited on
Commit
485d3f9
·
verified ·
1 Parent(s): 43e96dd

Delete main.py

Browse files
Files changed (1) hide show
  1. main.py +0 -48
main.py DELETED
@@ -1,48 +0,0 @@
1
- import os
2
- import chromadb
3
- from chromadb.utils import embedding_functions
4
- from groq import Groq
5
-
6
- class AICore:
7
- def __init__(self, api_key):
8
- self.client = Groq(api_key=api_key)
9
- self.chroma_client = chromadb.PersistentClient(path="./chroma_db")
10
- self.embed_fn = embedding_functions.SentenceTransformerEmbeddingFunction(model_name="all-MiniLM-L6-v2")
11
- self.collection = self.chroma_client.get_or_create_collection(name="aumcore_memory", embedding_function=self.embed_fn)
12
-
13
- def get_response(self, user_input, user_name="Sanjay"):
14
- # Purani yaadein fetch karna (Fact-finding only)
15
- results = self.collection.query(query_texts=[user_input], n_results=2)
16
- memory_context = "\n".join(results['documents'][0]) if results['documents'] else "No past history."
17
-
18
- # Strict Persona Enforcement (No basic replies allowed)
19
- system_prompt = f"""
20
- Role: Senior Software Architect (AumCore AI).
21
- User: {user_name}.
22
- Language: Strictly 60% English & 40% Hindi.
23
- Rule: If asking for code, provide a high-level Python script with try-except blocks.
24
- Style: Do NOT be generic. Be professional and helpful.
25
-
26
- Information from Memory: {memory_context}
27
- """
28
-
29
- messages = [
30
- {"role": "system", "content": system_prompt},
31
- {"role": "user", "content": "colab me gpu check karne ka code do"},
32
- {"role": "assistant", "content": f"{user_name} bhai, Colab mein GPU visibility confirm karne ke liye ye standard script use karein.\n\n```python\nimport torch\ntry:\n status = torch.cuda.is_available()\n print(f'GPU Available: {{status}}')\n if status: print(f'Device: {{torch.cuda.get_device_name(0)}}')\nexcept Exception as e:\n print(f'Error identifying GPU: {{e}}')\n```"},
33
- {"role": "user", "content": user_input}
34
- ]
35
-
36
- completion = self.client.chat.completions.create(
37
- model="llama-3.3-70b-versatile",
38
- messages=messages,
39
- temperature=0.1 # Low temperature for consistency
40
- )
41
- response = completion.choices[0].message.content
42
-
43
- # Saving new memory
44
- self.collection.add(
45
- documents=[f"User: {user_input} | AI: {response[:100]}"],
46
- ids=[str(os.urandom(4).hex())]
47
- )
48
- return response