eshwar06 commited on
Commit
084c28d
·
verified ·
1 Parent(s): 6f18a74

Update LLM/__init__.py

Browse files
Files changed (1) hide show
  1. LLM/__init__.py +81 -105
LLM/__init__.py CHANGED
@@ -1,105 +1,81 @@
1
- from .Linly import Linly
2
- from .Qwen import Qwen
3
- from .Qwen2 import Qwen2
4
- try:
5
- from .Gemini import Gemini
6
- except Exception as e:
7
- print("Gemini模型加载失败,可能是因为没有google-generativeai库,但是Gemini模型不是必顂的,可以忽略")
8
- from .ChatGPT import ChatGPT
9
- from .ChatGLM import ChatGLM
10
- from .Llama2Chinese import Llama2Chinese
11
- from .GPT4Free import GPT4FREE
12
- from .QAnything import QAnything
13
-
14
- def test_Linly(question = "如何应对压力?", mode='offline', model_path="Linly-AI/Chinese-LLaMA-2-7B-hf"):
15
- llm = Linly(mode, model_path)
16
- answer = llm.generate(question)
17
- print(answer)
18
-
19
- def test_Qwen(question = "如何应对压力?", mode='offline', model_path="Qwen/Qwen-1_8B-Chat"):
20
- llm = Qwen(mode, model_path)
21
- answer = llm.generate(question)
22
- print(answer)
23
-
24
- def test_Gemini(question = "如何应对压力?", model_path='gemini-pro', api_key=None, proxy_url=None):
25
- llm = Gemini(model_path, api_key, proxy_url)
26
- answer = llm.generate(question)
27
- print(answer)
28
-
29
- def test_ChatGPT(question = "如何应对压力?", model_path = 'gpt-3.5-turbo', api_key = None, proxy_url = None):
30
- llm = ChatGPT(model_path, api_key, proxy_url)
31
- answer = llm.generate(question)
32
- print(answer)
33
-
34
- class LLM:
35
- def __init__(self, mode='offline'):
36
- self.mode = mode
37
-
38
- def init_model(self, model_name, model_path='', api_key=None, proxy_url=None, prefix_prompt='''请用少于25个字回答以下问题\n\n'''):
39
- if model_name not in ['Linly', 'Qwen', 'Qwen2', 'Gemini', 'ChatGLM', 'ChatGPT', 'Llama2Chinese', 'GPT4Free', 'QAnything', '直接回复 Direct Reply']:
40
- raise ValueError("model_name must be one of ['Linly', 'Qwen', 'Qwen2', 'Gemini', 'ChatGLM', 'ChatGPT', 'Llama2Chinese', 'GPT4Free', 'QAnything', '直接回复 Direct Reply']")
41
- if model_name == 'Linly':
42
- llm = Linly(self.mode, model_path)
43
- elif model_name == 'Qwen':
44
- llm = Qwen(self.mode, model_path)
45
- elif model_name == 'Qwen2':
46
- llm = Qwen2(self.mode, model_path)
47
- elif model_name == 'Gemini':
48
- llm = Gemini(model_path, api_key, proxy_url)
49
- elif model_name == 'ChatGLM':
50
- llm = ChatGLM(self.mode, model_path)
51
- elif model_name == 'ChatGPT':
52
- llm = ChatGPT(model_path, api_key, proxy_url)
53
- elif model_name == 'Llama2Chinese':
54
- llm = Llama2Chinese(model_path, self.mode)
55
- elif model_name == 'GPT4Free':
56
- llm = GPT4FREE()
57
- elif model_name == 'QAnything':
58
- llm = QAnything()
59
- elif model_name == '直接回复 Direct Reply':
60
- llm = self
61
- llm.prefix_prompt = prefix_prompt
62
- return llm
63
-
64
- def chat(self, system_prompt, message, history):
65
- response = self.generate(message, system_prompt)
66
- history.append((message, response))
67
- return response, history
68
-
69
- def generate(self, question, system_prompt = 'system无效'):
70
- return question
71
-
72
- def test_Linly(self, question="如何应对压力?", model_path="Linly-AI/Chinese-LLaMA-2-7B-hf"):
73
- llm = Linly(self.mode, model_path)
74
- answer = llm.generate(question)
75
- print(answer)
76
-
77
- def test_Qwen(self, question="如何应对压力?", model_path="Qwen/Qwen-1_8B-Chat"):
78
- llm = Qwen(self.mode, model_path)
79
- answer = llm.generate(question)
80
- print(answer)
81
-
82
- def test_Gemini(self, question="如何应对压力?", model_path='gemini-pro', api_key=None, proxy_url=None):
83
- llm = Gemini(model_path, api_key, proxy_url)
84
- answer = llm.generate(question)
85
- print(answer)
86
-
87
- def test_ChatGPT(self, question="如何应对压力?", model_path = 'gpt-3.5-turbo', api_key = None, proxy_url = None):
88
- llm = ChatGPT(model_path, api_key, proxy_url)
89
- answer = llm.generate(question)
90
- print(answer)
91
-
92
- def test_ChatGLM(self, question="如何应对压力?", model_path="THUDM/chatglm-6b"):
93
- llm = ChatGLM(mode=self.mode, model_name_or_path=model_path)
94
- answer = llm.generate(question)
95
- print(answer)
96
-
97
- if __name__ == '__main__':
98
- llm_class = LLM(mode='offline')
99
- llm_class.init_model('直接回复 Direct Reply')
100
- question = '如何应对压力?'
101
- answer = llm_class.generate(question)
102
- # llm.test_Qwen()
103
- # llm.test_Linly()
104
- # llm.test_Gemini()
105
- # llm.test_ChatGLM()
 
1
+ import os
2
+
3
+ # --- OPTIONAL IMPORTS (Handle Missing Modules Gracefully) ---
4
+ try:
5
+ from .Linly import Linly
6
+ linly_available = True
7
+ except ImportError:
8
+ Linly = None
9
+ linly_available = False
10
+ print("⚠️ Linly module not available")
11
+
12
+ try:
13
+ from .Gemini import Gemini
14
+ gemini_available = True
15
+ except ImportError:
16
+ Gemini = None
17
+ gemini_available = False
18
+ print("⚠️ Gemini (Standard) module not available")
19
+
20
+ # --- GEMINI LIVE EXPORT ---
21
+ # We export this so it can be imported via 'from LLM import GeminiLiveClient'
22
+ try:
23
+ from .GeminiLive import GeminiLiveClient
24
+ except ImportError:
25
+ print("⚠️ GeminiLive module not found in LLM package")
26
+
27
+ # --- MINIMAL LLM FACTORY CLASS ---
28
+ class LLM:
29
+ def __init__(self, mode='offline'):
30
+ self.mode = mode
31
+ self.model = None
32
+
33
+ def init_model(self, model_name, model_path='', api_key=None, proxy_url=None, prefix_prompt='Please answer in less than 25 words.\n\n'):
34
+ """
35
+ Initialize the selected LLM.
36
+ Supports: Linly, Gemini (Standard), and Direct Reply.
37
+ """
38
+ if model_name == 'Linly' and linly_available:
39
+ self.model = Linly(self.mode, model_path)
40
+
41
+ elif model_name == 'Gemini' and gemini_available:
42
+ self.model = Gemini(model_path, api_key, proxy_url)
43
+
44
+ elif model_name == 'Direct Reply' or model_name == '直接回复 Direct Reply':
45
+ # Bypass model, just echo/pass-through
46
+ self.model = self
47
+
48
+ else:
49
+ print(f"⚠️ Model '{model_name}' not found or dependencies missing. Defaulting to Direct Reply.")
50
+ self.model = self
51
+
52
+ # Set prompt prefix if the underlying model supports it
53
+ if hasattr(self.model, 'prefix_prompt'):
54
+ self.model.prefix_prompt = prefix_prompt
55
+
56
+ return self.model
57
+
58
+ def chat(self, system_prompt, message, history):
59
+ """
60
+ Standard Chat Interface
61
+ """
62
+ if self.model and self.model != self:
63
+ # Delegate to loaded model (Linly/Gemini)
64
+ return self.model.chat(system_prompt, message, history)
65
+ else:
66
+ # Direct Reply Fallback
67
+ response = self.generate(message, system_prompt)
68
+ history.append((message, response))
69
+ return response, history
70
+
71
+ def generate(self, question, system_prompt=''):
72
+ """
73
+ Direct generation (Non-Chat)
74
+ """
75
+ # If we are in "Direct Reply" mode (self.model == self), just return the question/echo
76
+ return question
77
+
78
+ def clear_history(self):
79
+ if self.model and self.model != self:
80
+ if hasattr(self.model, 'clear_history'):
81
+ self.model.clear_history()