V3G4v2 commited on
Commit
c5c8831
·
verified ·
1 Parent(s): efe4cd4

Upload 3 files

Browse files
Files changed (3) hide show
  1. Dockerfile +17 -0
  2. ai_core.py +60 -0
  3. requirements.txt +9 -0
Dockerfile ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.10-slim
2
+
3
+ WORKDIR /app
4
+ COPY . .
5
+
6
+ RUN apt-get update && apt-get install -y \
7
+ git \
8
+ gcc \
9
+ g++ \
10
+ && rm -rf /var/lib/apt/lists/*
11
+
12
+ RUN pip install --no-cache-dir -r requirements.txt
13
+
14
+ ENV PYTHONUNBUFFERED=1
15
+ ENV V3G4_MODE=production
16
+
17
+ CMD ["python3", "-m", "core.ai_core"]
ai_core.py ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import sys
3
+ import torch
4
+ from pathlib import Path
5
+ from dotenv import load_dotenv
6
+ from termcolor import colored
7
+ from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
8
+
9
+ class V3G4Core:
10
+ def __init__(self, config_path="config/V3G4.json"):
11
+ load_dotenv()
12
+ self.config_path = Path(config_path)
13
+ self.identity = self.load_identity()
14
+ self.validate_identity()
15
+ self.system_prompt = self.create_system_prompt()
16
+ self.init_model()
17
+ self.plugins = []
18
+ self.load_plugins()
19
+ self.context_memory = []
20
+
21
+ def load_identity(self):
22
+ try:
23
+ with open(self.config_path, 'r', encoding='utf-8') as f:
24
+ data = json.load(f)
25
+ if 'identity' not in data:
26
+ raise ValueError("Formato inv�lido: falta secci�n 'identity'")
27
+ return data['identity']
28
+ except Exception as e:
29
+ self.critical_error(f"Error cargando identidad: {str(e)}")
30
+
31
+ def validate_identity(self):
32
+ required_fields = {
33
+ 'name': str,
34
+ 'creator': str,
35
+ 'laws': list,
36
+ 'core_principles': list,
37
+ 'symbolism': dict
38
+ }
39
+ # ... (validaci�n completa como en respuestas anteriores)
40
+
41
+ def init_model(self):
42
+ self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
43
+ self.tokenizer = AutoTokenizer.from_pretrained(
44
+ "core/models/DeepSeek-R1-14B",
45
+ use_fast=True
46
+ )
47
+ # ... (configuraci�n de modelo como antes)
48
+
49
+ def generate_response(self, prompt):
50
+ # ... (l�gica de generaci�n con seguridad)
51
+
52
+ def load_plugins(self):
53
+ plugins = {
54
+ 'security': SecurityMonitor(self),
55
+ 'memory': MemoryManager(self)
56
+ }
57
+ self.plugins = plugins
58
+
59
+ def critical_error(self, message):
60
+ # ... (manejo de errores)
requirements.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ transformers>=4.35.0
2
+ torch>=2.1.0
3
+ accelerate>=0.24.0
4
+ bitsandbytes>=0.41.0
5
+ python-dotenv>=1.0.0
6
+ termcolor>=2.3.0
7
+ prompt-toolkit>=3.0.40
8
+ python-dateutil>=2.8.2
9
+ tqdm>=4.66.0