Rulga commited on
Commit
af6d411
·
1 Parent(s): 8a2ab5c

Remove XGLM 7.5B model details from app.py and settings.py for cleanup

Browse files
Files changed (2) hide show
  1. app.py +0 -22
  2. config/settings.py +3 -25
app.py CHANGED
@@ -102,29 +102,7 @@ MODEL_DETAILS = {
102
  ],
103
  "documentation": "https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2"
104
  },
105
- "xglm-7.5b": {
106
- "full_name": "Meta XGLM 7.5B",
107
- "capabilities": [
108
- "Specialized for multilingual generation",
109
- "Support for 30+ languages",
110
- "Strong cross-lingual transfer abilities",
111
- "Consistent performance across diverse languages"
112
- ],
113
- "limitations": [
114
- "Less instruction-tuned than dedicated chat models",
115
- "May require more specific prompting",
116
- "Not specifically optimized for legal domain",
117
- "Slightly larger model requiring more GPU memory"
118
- ],
119
- "use_cases": [
120
- "International legal assistance in native languages",
121
- "Complex multilingual documentation",
122
- "Serving clients from diverse linguistic backgrounds",
123
- "Translation and summarization of legal concepts across languages"
124
- ],
125
- "documentation": "https://huggingface.co/facebook/xglm-7.5B"
126
  }
127
- }
128
  # MODEL_DETAILS = {
129
  # "llama-7b": {
130
  # "full_name": "Meta Llama 2 7B Chat",
 
102
  ],
103
  "documentation": "https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2"
104
  },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
105
  }
 
106
  # MODEL_DETAILS = {
107
  # "llama-7b": {
108
  # "full_name": "Meta Llama 2 7B Chat",
config/settings.py CHANGED
@@ -25,7 +25,7 @@ MODELS = {
25
  "id": "HuggingFaceH4/zephyr-7b-beta",
26
  "name": "Zephyr 7B",
27
  "description": "A state-of-the-art 7B parameter language model",
28
- "type": "base", # base/fine-tuned
29
  "parameters": {
30
  "max_length": 2048,
31
  "temperature": 0.7,
@@ -33,7 +33,7 @@ MODELS = {
33
  "repetition_penalty": 1.1,
34
  },
35
  "training": {
36
- "base_model_path": "HuggingFaceH4/zephyr-7b-beta", # Используем прямой путь к модели
37
  "fine_tuned_path": os.path.join(TRAINING_OUTPUT_DIR, "zephyr-7b-beta-tuned"),
38
  "lora_config": {
39
  "r": 16,
@@ -87,28 +87,6 @@ MODELS = {
87
  }
88
  }
89
  },
90
- "xglm-7.5b": {
91
- "id": "facebook/xglm-7.5B",
92
- "name": "XGLM 7.5B",
93
- "description": "Meta's multilingual model designed for cross-lingual generation",
94
- "type": "base",
95
- "parameters": {
96
- "max_length": 2048,
97
- "temperature": 0.7,
98
- "top_p": 0.9,
99
- "repetition_penalty": 1.1,
100
- },
101
- "training": {
102
- "base_model_path": "facebook/xglm-7.5B",
103
- "fine_tuned_path": os.path.join(TRAINING_OUTPUT_DIR, "xglm-7.5b-tuned"),
104
- "lora_config": {
105
- "r": 16,
106
- "lora_alpha": 32,
107
- "lora_dropout": 0.05,
108
- "target_modules": ["q_proj", "v_proj", "k_proj", "o_proj"]
109
- }
110
- }
111
- }
112
  }
113
 
114
  # Default model
@@ -119,4 +97,4 @@ ACTIVE_MODEL = MODELS[DEFAULT_MODEL]
119
  EMBEDDING_MODEL = "intfloat/multilingual-e5-large"
120
 
121
  # Request settings
122
- USER_AGENT = "Status-Law-Assistant/1.0"
 
25
  "id": "HuggingFaceH4/zephyr-7b-beta",
26
  "name": "Zephyr 7B",
27
  "description": "A state-of-the-art 7B parameter language model",
28
+ "type": "base",
29
  "parameters": {
30
  "max_length": 2048,
31
  "temperature": 0.7,
 
33
  "repetition_penalty": 1.1,
34
  },
35
  "training": {
36
+ "base_model_path": "HuggingFaceH4/zephyr-7b-beta",
37
  "fine_tuned_path": os.path.join(TRAINING_OUTPUT_DIR, "zephyr-7b-beta-tuned"),
38
  "lora_config": {
39
  "r": 16,
 
87
  }
88
  }
89
  },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
90
  }
91
 
92
  # Default model
 
97
  EMBEDDING_MODEL = "intfloat/multilingual-e5-large"
98
 
99
  # Request settings
100
+ USER_AGENT = "Status-Law-Assistant/1.0"