Pengfa Li commited on
Commit
de2fe68
·
verified ·
1 Parent(s): 86d8c26

Upload folder using huggingface_hub

Browse files
Files changed (6) hide show
  1. Gradio_en.py +613 -0
  2. LLM.py +100 -0
  3. LLM_APIs.txt +9 -0
  4. README.md +3 -9
  5. data/text_retrieval_results.json +0 -0
  6. prompt_generate.py +100 -0
Gradio_en.py ADDED
@@ -0,0 +1,613 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ import gradio as gr
3
+ import pandas as pd
4
+ import json
5
+ import random
6
+ from LLM import zero_shot
7
+ from prompt_generate import generate_prompt_with_examples as generate_prompt
8
+ from prompt_generate import generate_prompt_with_best_matches as generate_prompt_b
9
+
10
+ def get_model_options():
11
+ """Get available model series options"""
12
+ return ['gpt', 'llama', 'qwen', 'deepSeek', 'gemini', 'claude']
13
+
14
+ def get_common_model_names(model_series):
15
+ """Return common model name options based on model series"""
16
+ model_names = {
17
+ 'gpt': ['gpt-3.5-turbo', 'gpt-4o'],
18
+ 'llama': ['meta-llama/Meta-Llama-3.1-405B-Instruct'],
19
+ 'qwen': ['Qwen/Qwen2.5-72B-Instruct'],
20
+ 'deepSeek': ['deepseek-ai/DeepSeek-V3', 'deepseek-ai/DeepSeek-R1'],
21
+ 'gemini': ['gemini-1.5-pro-002'],
22
+ 'claude': ['claude-3-5-haiku-20241022']
23
+ }
24
+ return model_names.get(model_series, [])
25
+
26
+ def get_prompt_templates():
27
+ """Get predefined prompt templates"""
28
+ templates = {
29
+ "Custom": "",
30
+ "Zero-shot Basic Extraction": """You are a professional and experienced expert in engineering geology. Your task is to extract "entity-relation-entity" triples from the given input text. There are 24 types of relations: "outcrop at", "located in", "conformable contact", "unconformable contact", "paraconformable contact", "fault contact", "distribution pattern", "tectonic position", "stratigraphic division", "exposed strata", "lithology", "thickness", "area", "coordinates", "length", "contains", "age", "administrative division", "development", "paleontology", "elevation", "belongs to", "engulf", "intrude". Please follow these specifications for extraction:
31
+ 1. Output format:
32
+ Strictly follow JSON array format, no additional text, each element contains:
33
+ [
34
+ {
35
+ "entity1": "Entity1",
36
+ "relation": "Relation",
37
+ "entity2": "Entity2"
38
+ }
39
+ ]
40
+ 2. Complex relationship handling:
41
+ - If the same entity participates in multiple relationships, list different triples separately""",
42
+
43
+ "knowledge-injected Enhanced Extraction": """You are a professional and experienced expert in engineering geology. Your task is to extract "entity-relation-entity" triples from the given input text. There are 24 types of relations: "outcrop at", "located in", "conformable contact", "unconformable contact", "paraconformable contact", "fault contact", "distribution pattern", "tectonic position", "stratigraphic division", "exposed strata", "lithology", "thickness", "area", "coordinates", "length", "contains", "age", "administrative division", "development", "paleontology", "elevation", "belongs to", "engulf", "intrude". Please follow these specifications for extraction:
44
+ 1. Output format:
45
+ Strictly follow JSON array format, no additional text, each element contains:
46
+ [
47
+ {
48
+ "entity1": "Entity1",
49
+ "relation": "Relation",
50
+ "entity2": "Entity2"
51
+ }
52
+ ]
53
+ 2. Complex relationship handling:
54
+ - If the same entity participates in multiple relationships, list different triples separately
55
+ 3. Relationship explanations:
56
+ Outcrop at: Refers to rocks or strata exposed at the surface or near-surface, not covered or buried. Example: (Late Ordovician-Silurian intrusive rocks, outcrop at, southern investigation area).
57
+
58
+ Located in: Establishes the subordinate relationship of geological units within a larger spatial framework (administrative region/tectonic unit). Example: (Kumuqi Silurian basaltic basic rocks, located in, central-western investigation area)
59
+
60
+ Conformable contact: Indicates contact relationships formed by continuous deposition of upper and lower strata, reflecting gradational lithological characteristics without significant depositional hiatus. Example: (Solake Formation, conformable contact, Middle Ordovician Lin Formation).
61
+
62
+ Unconformable contact: Describes stratigraphic contact interfaces with depositional gaps, including contact features with angular differences or lithological abrupt changes. Example: (Tongziyan Formation, unconformable contact, Maokou Formation).
63
+
64
+ Paraconformable contact: Specifically refers to parallel unconformity types with consistent attitudes, emphasizing depositional sequence interruption but without structural deformation. Example: (Solake Formation, paraconformable contact, Middle Ordovician Lin Formation).
65
+
66
+ Fault contact: Two strata are separated by fault zones or fault planes, often accompanied by dynamic crushing and other structural phenomena. Example: (Solake Formation, fault contact, Upper Ordovician Lapai Spring Formation).
67
+
68
+ Distribution pattern: Depicts spatial distribution characteristics of geological units, including geometric morphology and extension direction combinations. Example: (Carboniferous, distribution pattern, banded).
69
+
70
+ Tectonic position: Locates geological units' attribution in plate tectonic framework, associated with orogenic belts or tectonic unit divisions. Example: (Carboniferous, tectonic position, northern margin of Gondwana tectonic belt).
71
+
72
+ Stratigraphic division: Characterizes hierarchical attribution and zoning attributes of stratigraphic units in regional stratigraphic division systems. Example: (Carboniferous, stratigraphic division, Gondwana).
73
+
74
+ Exposed strata: Specifically refers to actually exposed stratigraphic entities in a region, emphasizing observable surface geological units. Example: (Hongliugou gold-copper mining area, exposed strata, Nanhua-Lower Ordovician Hongliugou Group).
75
+
76
+ Lithology: Defines material composition and structural characteristics of rocks, including hierarchical descriptive elements of composite lithology. Example: (Late Ordovician-Silurian syenite, lithology, altered syenite).
77
+
78
+ Thickness: Quantifies vertical dimensions of strata/rock bodies, including dimensional expressions with absolute values and relative descriptions. Example: (syenite, thickness, 35.60 m).
79
+
80
+ Area: Characterizes horizontal distribution range of geological units, presented in standardized form combining numerical values and units. Example: (intrusive rocks, outcrop area, 54 m2)
81
+
82
+ Coordinates: Specifically refers to geographical spatial positioning data recording geological feature points. Example: (Solake copper-gold mine site, coordinates, 90°11′47″E).
83
+
84
+ Length: Describes spatial extension dimensions of linear geological bodies. Example: Triple (Shibien fault zone, length, 20m) can be extracted.
85
+
86
+ Contains: Indicates compositional inclusion relationships of main materials, specifically referring to mineral composition or fossil occurrence states, different from everyday meaning. Example: (medium gray-black massive chert, contains, chert bands).
87
+
88
+ Age: Establishes correspondence between geological units and standard geological chronological systems. Example: (Hongliugou gold-copper mining area, age, Early-Middle Permian).
89
+
90
+ Administrative division: Defines subordinate hierarchy and territorial attribution of geological entities in administrative management systems. Example: (investigation area, administrative division, Chayang County).
91
+
92
+ Development: Describes manifestation degree and formation state intensity of geological structures or depositional features. Example: (Lanhuaweng Formation, development, horizontal bedding).
93
+
94
+ Paleontology: Records fossil biological information occurring in strata, requiring complete Latin scientific names and classification features. Example: (strata, paleontology, Lumu et al).
95
+
96
+ Elevation: Quantifies elevation data of geological feature points relative to sea level, retaining measurement reference identification. Example: (Solake copper-gold mine site, elevation, 2800m).
97
+
98
+ Belongs to: Establishes type attribution of geological units in classification systems. Example: (mining area, belongs to, polymetallic mineralization subarea).
99
+
100
+ Engulf: Characterizes spatial replacement processes of intrusive bodies on country rocks, reflecting transformation effects of magmatic activities. Example: (Nintendo Rock Formation, engulf, Jurassic granite).
101
+
102
+ Intrude: Describes geological processes of magmatic rock bodies penetrating country rocks, including accompanying phenomena such as contact metamorphism. Example: (Gaozhou Shell Stone Formation, intrude, gneissic granite).
103
+
104
+ 4. Other key points:
105
+ All triple relationships must be one of the above 24 types
106
+ Relationship entities cannot be verbs, prepositions, or other meaningless words. Descriptions of rocks, strata, and other entities should be as complete as possible according to the original text""",
107
+ }
108
+ return templates
109
+
110
+ def get_qa_prompt_templates():
111
+ """Get QA module prompt templates"""
112
+ templates = {
113
+ "Custom": "",
114
+ "Zero-shot True/False": "Please judge true or false based on the given text.",
115
+ "Zero-shot Q&A": "Please answer the question based on the given text.",
116
+ "COT True/False": "Please first judge true or false, and provide your reasoning basis.",
117
+ "COT Q&A": "Please first answer the question, and provide your reasoning basis.",
118
+ }
119
+ return templates
120
+
121
+ # Global variables to store training data
122
+ _train_data = None
123
+ _text_series = None
124
+ _label_series = None
125
+
126
+ def load_train_data():
127
+ """Load training data"""
128
+ global _train_data, _text_series, _label_series
129
+ if _train_data is None:
130
+ try:
131
+ _train_data = pd.read_json('./data/train_triples.json')
132
+ _text_series = _train_data['text']
133
+ _label_series = _train_data['triple_list']
134
+ except Exception as e:
135
+ print(f"Failed to load training data: {e}")
136
+ return False
137
+ return True
138
+
139
+ def generate_random_context_prompt(user_text, num_examples):
140
+ """Generate random context prompts"""
141
+ if not load_train_data():
142
+ return "Unable to load training data"
143
+
144
+ try:
145
+ random_prompt = generate_prompt(_text_series, _label_series, num_examples)
146
+ return f"Here are geological description text and triple extraction examples:\n\n{random_prompt}\nPlease extract triples based on the examples:\n{user_text}"
147
+ except Exception as e:
148
+ return f"Failed to generate random context prompt: {e}"
149
+
150
+ def generate_best_match_context_prompt(user_text, num_examples):
151
+ """Generate best match context prompts based on similarity"""
152
+ if not load_train_data():
153
+ return "Unable to load training data"
154
+
155
+ try:
156
+ best_match_prompt = generate_prompt_b(_text_series, _label_series, user_text, num_examples)
157
+ if best_match_prompt.strip():
158
+ return f"Here are geological description text and triple extraction examples:\n\n{best_match_prompt}\n\nPlease extract triples based on the examples:\n{user_text}"
159
+ else:
160
+ return f"No matching examples found, performing zero-shot extraction:\n{user_text}"
161
+ except Exception as e:
162
+ return f"Failed to generate best match context prompt: {e}"
163
+
164
+ def update_model_names(model_series):
165
+ """Update model name dropdown list when model series changes"""
166
+ names = get_common_model_names(model_series)
167
+ return gr.Dropdown(choices=names, value=names[0] if names else "", label="Model Name", allow_custom_value=True)
168
+
169
+ def update_prompt_content(template_name):
170
+ """Update content when prompt template changes"""
171
+ templates = get_prompt_templates()
172
+ content = templates.get(template_name, "")
173
+ return gr.Textbox(value=content, label="Prompt Content", lines=15, max_lines=25)
174
+
175
+ def update_qa_prompt_content(template_name):
176
+ """Update content when QA prompt template changes"""
177
+ templates = get_qa_prompt_templates()
178
+ content = templates.get(template_name, "")
179
+ return gr.Textbox(value=content, label="QA Prompt Content", lines=3, max_lines=10)
180
+
181
+ def call_llm_model(model_series, model_name, prompt_content, user_content, context_type, num_examples):
182
+ """LLM model wrapper function (triple extraction)"""
183
+ try:
184
+ if not model_series or not model_name:
185
+ return "Please select model series and model name"
186
+
187
+ if not user_content:
188
+ return "Please input text content to process"
189
+
190
+ # Combine complete input content based on context type
191
+ if context_type == "No Context":
192
+ if prompt_content.strip():
193
+ full_content = prompt_content.strip() + "\n\n" + user_content
194
+ else:
195
+ full_content = user_content
196
+ elif context_type == "Random Context":
197
+ context_prompt = generate_random_context_prompt(user_content, num_examples)
198
+ if prompt_content.strip():
199
+ full_content = prompt_content.strip() + "\n\n" + context_prompt
200
+ else:
201
+ full_content = context_prompt
202
+ elif context_type == "Best Match Context":
203
+ context_prompt = generate_best_match_context_prompt(user_content, num_examples)
204
+ if prompt_content.strip():
205
+ full_content = prompt_content.strip() + "\n\n" + context_prompt
206
+ else:
207
+ full_content = context_prompt
208
+ else:
209
+ if prompt_content.strip():
210
+ full_content = prompt_content.strip() + "\n\n" + user_content
211
+ else:
212
+ full_content = user_content
213
+
214
+ response = zero_shot(model_series, model_name, full_content)
215
+
216
+ # Handle different types of return values
217
+ if hasattr(response, 'content'):
218
+ return response.content
219
+ elif isinstance(response, dict) and 'content' in response:
220
+ return response['content']
221
+ elif isinstance(response, str):
222
+ return response
223
+ else:
224
+ return str(response)
225
+
226
+ except Exception as e:
227
+ return f"Error calling model: {str(e)}"
228
+
229
+ def call_qa_model(model_series, model_name, qa_prompt_content, geological_text, question_or_statement, qa_type):
230
+ """LLM model wrapper function (QA module)"""
231
+ try:
232
+ if not model_series or not model_name:
233
+ return "Please select model series and model name"
234
+
235
+ if not geological_text:
236
+ return "Please input geological text"
237
+
238
+ if not question_or_statement:
239
+ if qa_type == "True/False":
240
+ return "Please input factual statement to judge"
241
+ else:
242
+ return "Please input question to answer"
243
+
244
+ # Combine complete input content
245
+ if qa_type == "True/False":
246
+ if qa_prompt_content.strip():
247
+ full_content = f"{qa_prompt_content.strip()}\n\nGeological text:\n{geological_text}\n\nStatement to judge:\n{question_or_statement}"
248
+ else:
249
+ full_content = f"Geological text:\n{geological_text}\n\nStatement to judge:\n{question_or_statement}"
250
+ else: # Q&A
251
+ if qa_prompt_content.strip():
252
+ full_content = f"{qa_prompt_content.strip()}\n\nGeological text:\n{geological_text}\n\nQuestion:\n{question_or_statement}"
253
+ else:
254
+ full_content = f"Geological text:\n{geological_text}\n\nQuestion:\n{question_or_statement}"
255
+
256
+ response = zero_shot(model_series, model_name, full_content)
257
+
258
+ # Handle different types of return values
259
+ if hasattr(response, 'content'):
260
+ return response.content
261
+ elif isinstance(response, dict) and 'content' in response:
262
+ return response['content']
263
+ elif isinstance(response, str):
264
+ return response
265
+ else:
266
+ return str(response)
267
+
268
+ except Exception as e:
269
+ return f"Error calling model: {str(e)}"
270
+
271
+ def create_interface():
272
+ """Create Gradio interface"""
273
+
274
+ with gr.Blocks(title="GeoLLM Model Interface", theme=gr.themes.Soft()) as demo:
275
+ gr.Markdown("# 🚀 GeoLLM Geological Intelligence Platform")
276
+ gr.Markdown("Professional geological text analysis tool integrating triple extraction and intelligent Q&A functions")
277
+
278
+ # Add tabs
279
+ with gr.Tabs():
280
+ # Triple extraction module
281
+ with gr.TabItem("🔗 Triple Extraction", elem_id="triple_extraction"):
282
+ with gr.Row():
283
+ with gr.Column(scale=1):
284
+ # Model selection area
285
+ gr.Markdown("## 📋 Model Configuration")
286
+ model_series = gr.Dropdown(
287
+ choices=get_model_options(),
288
+ value="gpt",
289
+ label="Model Series",
290
+ info="Select the model series to use"
291
+ )
292
+
293
+ model_name = gr.Dropdown(
294
+ choices=get_common_model_names("gpt"),
295
+ value="gpt-3.5-turbo",
296
+ label="Model Name",
297
+ info="Select specific model name, or input manually",
298
+ allow_custom_value=True
299
+ )
300
+
301
+ # Custom model name input box
302
+ custom_model_name = gr.Textbox(
303
+ label="Custom Model Name (Optional)",
304
+ placeholder="If your desired model is not in the options above, please input here",
305
+ info="Input here will override the selection above"
306
+ )
307
+
308
+ # Prompt template selection
309
+ gr.Markdown("## 📝 Prompt Template")
310
+ prompt_template = gr.Dropdown(
311
+ choices=list(get_prompt_templates().keys()),
312
+ value="Custom",
313
+ label="Select Prompt Template",
314
+ info="Select predefined prompt template or customize"
315
+ )
316
+
317
+ # Context type selection
318
+ gr.Markdown("## 🎯 Context Configuration")
319
+ context_type = gr.Dropdown(
320
+ choices=["No Context", "Random Context", "Best Match Context"],
321
+ value="No Context",
322
+ label="Context Type",
323
+ info="Choose whether to use context examples"
324
+ )
325
+
326
+ num_examples = gr.Slider(
327
+ minimum=1,
328
+ maximum=3,
329
+ value=2,
330
+ step=1,
331
+ label="Number of Examples",
332
+ info="Select the number of context examples (1-3)"
333
+ )
334
+
335
+ with gr.Column(scale=2):
336
+ # Prompt content area
337
+ gr.Markdown("## 🎯 Prompt Content")
338
+ prompt_content = gr.Textbox(
339
+ label="Prompt Content",
340
+ placeholder="Select template or customize your prompt...",
341
+ lines=15,
342
+ max_lines=25,
343
+ info="Will be sent to the model as system prompt"
344
+ )
345
+
346
+ # User input area
347
+ gr.Markdown("## 💬 Geological Text Input")
348
+ user_content = gr.Textbox(
349
+ label="Geological Text to Process",
350
+ placeholder="Please input geological description text for triple extraction...",
351
+ lines=6,
352
+ max_lines=10
353
+ )
354
+
355
+ # Button and output area
356
+ with gr.Row():
357
+ clear_btn = gr.Button("🗑️ Clear", variant="secondary")
358
+ submit_btn = gr.Button("🚀 Extract Triples", variant="primary")
359
+
360
+ # Output area
361
+ gr.Markdown("## 📤 Extraction Results")
362
+ output = gr.Textbox(
363
+ label="Triple Extraction Results",
364
+ lines=12,
365
+ max_lines=20,
366
+ interactive=False
367
+ )
368
+
369
+ # Example area
370
+ gr.Markdown("## 💡 Usage Examples")
371
+ gr.Examples(
372
+ examples=[
373
+ ["gpt", "gpt-3.5-turbo", "No Context", 2, "The Noriba Gari Bao Formation originally refers to gray-green thick-bedded medium- to fine-grained lithic feldspar sandstone, feldspar quartz sandstone, feldspar sandstone occasionally interbedded with siltstone, clay rock and micritic limestone, only bivalve fossils are seen, and continuous deposition with the overlying Ninety Road Class Formation."],
374
+ ["gemini", "gemini-1.5-pro-002", "Random Context", 3, "The Quemo Cuo Formation has only a small outcrop in the Sewang Yongqu area in the southwest corner of the map sheet within the survey area, with an area of less than 10m2 and a thickness greater than 29.25m."],
375
+ ["claude", "claude-3-5-haiku-20241022", "Best Match Context", 2, "Hecosmilia sp. scabbard coral was collected from limestone; Complexastraea sp. and Radulopccten sp. scraping sea fan; Oscillopha sp., dated to the Middle Jurassic."],
376
+ ["deepSeek", "deepseek-ai/DeepSeek-V3", "Best Match Context", 3, "Late Triassic granite is mainly distributed in the Ladi Gongma Mianche Ri Ahri Qu area of the survey area. Regionally controlled by NW-SE trending regional faults within the structural melange zone, it is distributed in long strips. The intrusive bodies have good gregariousness and excellent zonal extensibility, with 8 exposed intrusive bodies covering an area of about 227m2."],
377
+ ],
378
+ inputs=[model_series, model_name, context_type, num_examples, user_content]
379
+ )
380
+
381
+ # Event handling
382
+ def submit_request(series, name, custom_name, template, prompt, content, ctx_type, num_ex):
383
+ # Use custom model name if provided
384
+ final_model_name = custom_name.strip() if custom_name.strip() else name
385
+ return call_llm_model(series, final_model_name, prompt, content, ctx_type, num_ex)
386
+
387
+ # Update model name options
388
+ model_series.change(
389
+ fn=update_model_names,
390
+ inputs=[model_series],
391
+ outputs=[model_name]
392
+ )
393
+
394
+ # Update prompt content
395
+ prompt_template.change(
396
+ fn=update_prompt_content,
397
+ inputs=[prompt_template],
398
+ outputs=[prompt_content]
399
+ )
400
+
401
+ # Submit button event
402
+ submit_btn.click(
403
+ fn=submit_request,
404
+ inputs=[model_series, model_name, custom_model_name, prompt_template, prompt_content, user_content, context_type, num_examples],
405
+ outputs=[output]
406
+ )
407
+
408
+ # Clear button event
409
+ clear_btn.click(
410
+ fn=lambda: ("", ""),
411
+ outputs=[user_content, output]
412
+ )
413
+
414
+ # Enter key submission
415
+ user_content.submit(
416
+ fn=submit_request,
417
+ inputs=[model_series, model_name, custom_model_name, prompt_template, prompt_content, user_content, context_type, num_examples],
418
+ outputs=[output]
419
+ )
420
+
421
+ # QA module
422
+ with gr.TabItem("❓ Intelligent Q&A", elem_id="qa_module"):
423
+ with gr.Row():
424
+ with gr.Column(scale=1):
425
+ # Model selection area
426
+ gr.Markdown("## 📋 Model Configuration")
427
+ qa_model_series = gr.Dropdown(
428
+ choices=get_model_options(),
429
+ value="gpt",
430
+ label="Model Series",
431
+ info="Select the model series to use"
432
+ )
433
+
434
+ qa_model_name = gr.Dropdown(
435
+ choices=get_common_model_names("gpt"),
436
+ value="gpt-3.5-turbo",
437
+ label="Model Name",
438
+ info="Select specific model name, or input manually",
439
+ allow_custom_value=True
440
+ )
441
+
442
+ # Custom model name input box
443
+ qa_custom_model_name = gr.Textbox(
444
+ label="Custom Model Name (Optional)",
445
+ placeholder="If your desired model is not in the options above, please input here",
446
+ info="Input here will override the selection above"
447
+ )
448
+
449
+ # QA type selection
450
+ gr.Markdown("## 🎯 Q&A Type")
451
+ qa_type = gr.Dropdown(
452
+ choices=["True/False", "Q&A"],
453
+ value="True/False",
454
+ label="Task Type",
455
+ info="Choose between judging true/false or answering questions"
456
+ )
457
+
458
+ # QA Prompt template selection
459
+ gr.Markdown("## 📝 Prompt Template")
460
+ qa_prompt_template = gr.Dropdown(
461
+ choices=list(get_qa_prompt_templates().keys()),
462
+ value="Zero-shot True/False",
463
+ label="Select QA Prompt Template",
464
+ info="Select predefined prompt template or customize"
465
+ )
466
+
467
+ with gr.Column(scale=2):
468
+ # QA Prompt content area
469
+ gr.Markdown("## 🎯 Prompt Content")
470
+ qa_prompt_content = gr.Textbox(
471
+ label="QA Prompt Content",
472
+ value="Please judge true or false based on the given text.",
473
+ placeholder="Select template or customize your prompt...",
474
+ lines=3,
475
+ max_lines=10,
476
+ info="Will be sent to the model as system prompt"
477
+ )
478
+
479
+ # Geological text input area
480
+ gr.Markdown("## 📄 Geological Text")
481
+ geological_text = gr.Textbox(
482
+ label="Geological Background Text",
483
+ placeholder="Please input geological description text as background...",
484
+ lines=8,
485
+ max_lines=15,
486
+ info="Provides contextual information for answering questions or judging facts"
487
+ )
488
+
489
+ # Question or statement input area
490
+ gr.Markdown("## ❓ Question/Statement")
491
+ question_or_statement = gr.Textbox(
492
+ label="Question or Statement",
493
+ placeholder="Please input question to answer or statement to judge...",
494
+ lines=3,
495
+ max_lines=8,
496
+ info="Input corresponding content based on task type"
497
+ )
498
+
499
+ # Button and output area
500
+ with gr.Row():
501
+ qa_clear_btn = gr.Button("🗑️ Clear", variant="secondary")
502
+ qa_submit_btn = gr.Button("🤖 Start Q&A", variant="primary")
503
+
504
+ # Output area
505
+ gr.Markdown("## 📤 Q&A Results")
506
+ qa_output = gr.Textbox(
507
+ label="Model Response",
508
+ lines=10,
509
+ max_lines=20,
510
+ interactive=False
511
+ )
512
+
513
+ # Example area
514
+ gr.Markdown("## 💡 Usage Examples")
515
+
516
+ # True/False examples
517
+ with gr.Accordion("True/False Examples", open=False):
518
+ gr.Examples(
519
+ examples=[
520
+ ["gpt", "gpt-3.5-turbo", "True/False", "Sudden geological disasters in Huoshan County are mainly collapses, landslides, and debris flows. A total of 190 sudden geological disaster points (including hidden danger points) have been identified, including 74 collapses, 96 landslides, 14 debris flows, and 6 unstable slopes. There are 58 newly discovered geological disaster points, accounting for 30.5% of the total. Among the 190 collapses, landslides, debris flows and other sudden geological disasters in Huoshan County, most are caused by human factors. There are 163 geological disasters caused by human factors, accounting for 85.8%; there are 27 disasters formed by natural factors, accounting for 14.2%.", "In the sudden geological disasters in Huoshan County, the number of landslides exceeds the number of collapses."],
521
+ ["deepSeek", "deepseek-ai/DeepSeek-V3", "True/False", "Sudden geological disasters in Huoshan County are mainly collapses, landslides, and debris flows. A total of 190 sudden geological disaster points (including hidden danger points) have been identified, including 74 collapses, 96 landslides, 14 debris flows, and 6 unstable slopes.", "The total number of geological disaster points in Huoshan County exceeds 200."],
522
+ ],
523
+ inputs=[qa_model_series, qa_model_name, qa_type, geological_text, question_or_statement]
524
+ )
525
+
526
+ # Q&A examples
527
+ with gr.Accordion("Q&A Examples", open=False):
528
+ gr.Examples(
529
+ examples=[
530
+ ["gpt", "gpt-3.5-turbo", "Q&A", "Sudden geological disasters in Huoshan County are mainly collapses, landslides, and debris flows. A total of 190 sudden geological disaster points (including hidden danger points) have been identified, including 74 collapses, 96 landslides, 14 debris flows, and 6 unstable slopes. There are 58 newly discovered geological disaster points, accounting for 30.5% of the total.", "How many sudden geological disaster points are there in Huoshan County in total?"],
531
+ ["claude", "claude-3-5-haiku-20241022", "Q&A", "Sudden geological disasters in Huoshan County are mainly collapses, landslides, and debris flows. A total of 190 sudden geological disaster points (including hidden danger points) have been identified, including 74 collapses, 96 landslides, 14 debris flows, and 6 unstable slopes.", "Among the geological disasters in Huoshan County, which type of disaster has the largest number?"],
532
+ ],
533
+ inputs=[qa_model_series, qa_model_name, qa_type, geological_text, question_or_statement]
534
+ )
535
+
536
+ # QA event handling
537
+ def submit_qa_request(series, name, custom_name, q_type, template, prompt, geo_text, question):
538
+ # Use custom model name if provided
539
+ final_model_name = custom_name.strip() if custom_name.strip() else name
540
+ return call_qa_model(series, final_model_name, prompt, geo_text, question, q_type)
541
+
542
+ def update_qa_prompt_on_type_change(qa_type_value):
543
+ """Update prompt template options and content when QA type changes"""
544
+ if qa_type_value == "True/False":
545
+ new_choices = ["Custom", "Zero-shot True/False", "COT True/False"]
546
+ new_value = "Zero-shot True/False"
547
+ new_prompt = "Please judge true or false based on the given text."
548
+ new_placeholder = "Please input statement to judge..."
549
+ new_label = "Statement"
550
+ else: # Q&A
551
+ new_choices = ["Custom", "Zero-shot Q&A", "COT Q&A"]
552
+ new_value = "Zero-shot Q&A"
553
+ new_prompt = "Please answer the question based on the given text."
554
+ new_placeholder = "Please input question to answer..."
555
+ new_label = "Question"
556
+
557
+ return (
558
+ gr.Dropdown(choices=new_choices, value=new_value, label="Select QA Prompt Template"),
559
+ gr.Textbox(value=new_prompt, label="QA Prompt Content", lines=3, max_lines=10),
560
+ gr.Textbox(label=new_label, placeholder=new_placeholder, lines=3, max_lines=8)
561
+ )
562
+
563
+ # Update QA model name options
564
+ qa_model_series.change(
565
+ fn=update_model_names,
566
+ inputs=[qa_model_series],
567
+ outputs=[qa_model_name]
568
+ )
569
+
570
+ # Update QA prompt content
571
+ qa_prompt_template.change(
572
+ fn=update_qa_prompt_content,
573
+ inputs=[qa_prompt_template],
574
+ outputs=[qa_prompt_content]
575
+ )
576
+
577
+ # Update related components when QA type changes
578
+ qa_type.change(
579
+ fn=update_qa_prompt_on_type_change,
580
+ inputs=[qa_type],
581
+ outputs=[qa_prompt_template, qa_prompt_content, question_or_statement]
582
+ )
583
+
584
+ # QA submit button event
585
+ qa_submit_btn.click(
586
+ fn=submit_qa_request,
587
+ inputs=[qa_model_series, qa_model_name, qa_custom_model_name, qa_type, qa_prompt_template, qa_prompt_content, geological_text, question_or_statement],
588
+ outputs=[qa_output]
589
+ )
590
+
591
+ # QA clear button event
592
+ qa_clear_btn.click(
593
+ fn=lambda: ("", "", ""),
594
+ outputs=[geological_text, question_or_statement, qa_output]
595
+ )
596
+
597
+ # QA enter key submission
598
+ question_or_statement.submit(
599
+ fn=submit_qa_request,
600
+ inputs=[qa_model_series, qa_model_name, qa_custom_model_name, qa_type, qa_prompt_template, qa_prompt_content, geological_text, question_or_statement],
601
+ outputs=[qa_output]
602
+ )
603
+
604
+ return demo
605
+
606
+ if __name__ == "__main__":
607
+ # Launch interface
608
+ demo = create_interface()
609
+ demo.launch(
610
+ server_port=7860, # Port number
611
+ share=True, # Whether to create public link
612
+ debug=True # Debug mode
613
+ )
LLM.py ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from openai import OpenAI
2
+ import os
3
+ import pandas as pd
4
+ import google.generativeai as genai
5
+ import requests
6
+
7
+ # 输入模型系列和内容
8
+ def zero_shot(model_series, model_name, content):
9
+ # 读取txt文件
10
+ df = pd.read_csv('./LLM_APIs.txt')
11
+ # 由于列名包含制表符,需要先分割列名
12
+ df = pd.DataFrame([x.split('\t') for x in df.values.flatten()], columns=['name','series','API'])
13
+ # 获取对应的API密钥
14
+ api_key = df[df['name'] == model_series]['API'].values[0]
15
+
16
+ # 根据不同模型系列调用不同API
17
+ if model_series == 'gpt':
18
+ client = OpenAI(
19
+ api_key=api_key,
20
+ base_url="https://api.bianxie.ai/v1"
21
+ )
22
+ completion = client.chat.completions.create(
23
+ model = model_name,
24
+ messages=[
25
+ {
26
+ "role": "user",
27
+ "content": content
28
+ }
29
+ ]
30
+ )
31
+ return completion.choices[0].message
32
+
33
+
34
+ elif model_series == 'deepSeek' or model_series == 'qwen' or model_series == 'llama':
35
+
36
+ client = OpenAI(api_key=api_key,
37
+ base_url="https://api.studio.nebius.ai/v1")
38
+
39
+ response = client.chat.completions.create(
40
+ model=model_name,
41
+ messages=[
42
+ {"role": "user", "content": content},
43
+ ],
44
+ stream=False
45
+ )
46
+ # client = OpenAI(api_key=api_key,
47
+ # base_url="https://api.deepseek.com")
48
+
49
+ # response = client.chat.completions.create(
50
+ # model=model_name,
51
+ # messages=[
52
+ # {"role": "user", "content": content},
53
+ # ],
54
+ # stream=False
55
+ # )
56
+
57
+ return response.choices[0].message.content
58
+
59
+
60
+ elif model_series == 'gemini':
61
+ # genai.configure(api_key=api_key)
62
+ # model = genai.GenerativeModel(model_name)
63
+ # response = model.generate_content(content)
64
+ # print(response.text)
65
+ # return response.text
66
+ api_key = api_key
67
+ url = 'https://api.bianxie.ai/v1/chat/completions'
68
+ headers = {
69
+ 'Content-Type': 'application/json',
70
+ 'Authorization': f'Bearer {api_key}'
71
+ }
72
+ data = {
73
+ 'model': model_name,
74
+ 'messages': [{'role': 'user', 'content': content}],
75
+ }
76
+ response = requests.post(url, headers=headers, json=data)
77
+ content = response.json()['choices'][0]['message']
78
+ return content
79
+
80
+ elif model_series == 'claude':
81
+ api_key = api_key
82
+ url = 'https://api.bianxie.ai/v1/chat/completions'
83
+
84
+ headers = {
85
+ 'Content-Type': 'application/json',
86
+ 'Authorization': f'Bearer {api_key}'
87
+ }
88
+
89
+ data = {
90
+ 'model': model_name,
91
+ 'messages': [{'role': 'user', 'content': content}],
92
+ }
93
+
94
+ response = requests.post(url, headers=headers, json=data)
95
+ content = response.json()['choices'][0]['message']
96
+ return content
97
+
98
+ else:
99
+ return "不支持的模型系列"
100
+
LLM_APIs.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ name 系列 API
2
+ gpt sk-7MxU2gpQr49LZMeaavtynBdP2LKOuKRuSHMkmIVOLtbY1O3o
3
+ llama eyJhbGciOiJIUzI1NiIsImtpZCI6IlV6SXJWd1h0dnprLVRvdzlLZWstc0M1akptWXBvX1VaVkxUZlpnMDRlOFUiLCJ0eXAiOiJKV1QifQ.eyJzdWIiOiJnb29nbGUtb2F1dGgyfDEwNzU1NDAzMzAyMDM4Nzk0MTUzNiIsInNjb3BlIjoib3BlbmlkIG9mZmxpbmVfYWNjZXNzIiwiaXNzIjoiYXBpX2tleV9pc3N1ZXIiLCJhdWQiOlsiaHR0cHM6Ly9uZWJpdXMtaW5mZXJlbmNlLmV1LmF1dGgwLmNvbS9hcGkvdjIvIl0sImV4cCI6MTkxMTQ2OTQwNywidXVpZCI6Ijg0M2UzMzJjLWI0OWYtNDkwNS1hNThjLWQ0NzhiYzYxYzhjNSIsIm5hbWUiOiJncmFkaW9fdGVzdCIsImV4cGlyZXNfYXQiOiIyMDMwLTA3LTI4VDExOjQzOjI3KzAwMDAifQ.HAJ-_DqioxpRq4RzUjgKVBt4GFIxzYQTyZXV8ATWxCc
4
+ claude sk-7MxU2gpQr49LZMeaavtynBdP2LKOuKRuSHMkmIVOLtbY1O3o
5
+ qwen eyJhbGciOiJIUzI1NiIsImtpZCI6IlV6SXJWd1h0dnprLVRvdzlLZWstc0M1akptWXBvX1VaVkxUZlpnMDRlOFUiLCJ0eXAiOiJKV1QifQ.eyJzdWIiOiJnb29nbGUtb2F1dGgyfDEwNzU1NDAzMzAyMDM4Nzk0MTUzNiIsInNjb3BlIjoib3BlbmlkIG9mZmxpbmVfYWNjZXNzIiwiaXNzIjoiYXBpX2tleV9pc3N1ZXIiLCJhdWQiOlsiaHR0cHM6Ly9uZWJpdXMtaW5mZXJlbmNlLmV1LmF1dGgwLmNvbS9hcGkvdjIvIl0sImV4cCI6MTkxMTQ2OTQwNywidXVpZCI6Ijg0M2UzMzJjLWI0OWYtNDkwNS1hNThjLWQ0NzhiYzYxYzhjNSIsIm5hbWUiOiJncmFkaW9fdGVzdCIsImV4cGlyZXNfYXQiOiIyMDMwLTA3LTI4VDExOjQzOjI3KzAwMDAifQ.HAJ-_DqioxpRq4RzUjgKVBt4GFIxzYQTyZXV8ATWxCc
6
+ claude sk-7MxU2gpQr49LZMeaavtynBdP2LKOuKRuSHMkmIVOLtbY1O3o
7
+ gemini sk-7MxU2gpQr49LZMeaavtynBdP2LKOuKRuSHMkmIVOLtbY1O3o
8
+ deepSeek eyJhbGciOiJIUzI1NiIsImtpZCI6IlV6SXJWd1h0dnprLVRvdzlLZWstc0M1akptWXBvX1VaVkxUZlpnMDRlOFUiLCJ0eXAiOiJKV1QifQ.eyJzdWIiOiJnb29nbGUtb2F1dGgyfDEwNzU1NDAzMzAyMDM4Nzk0MTUzNiIsInNjb3BlIjoib3BlbmlkIG9mZmxpbmVfYWNjZXNzIiwiaXNzIjoiYXBpX2tleV9pc3N1ZXIiLCJhdWQiOlsiaHR0cHM6Ly9uZWJpdXMtaW5mZXJlbmNlLmV1LmF1dGgwLmNvbS9hcGkvdjIvIl0sImV4cCI6MTkxMTQ2OTQwNywidXVpZCI6Ijg0M2UzMzJjLWI0OWYtNDkwNS1hNThjLWQ0NzhiYzYxYzhjNSIsIm5hbWUiOiJncmFkaW9fdGVzdCIsImV4cGlyZXNfYXQiOiIyMDMwLTA3LTI4VDExOjQzOjI3KzAwMDAifQ.HAJ-_DqioxpRq4RzUjgKVBt4GFIxzYQTyZXV8ATWxCc
9
+ claude sk-7MxU2gpQr49LZMeaavtynBdP2LKOuKRuSHMkmIVOLtbY1O3o
README.md CHANGED
@@ -1,12 +1,6 @@
1
  ---
2
- title: Up
3
- emoji: 🌍
4
- colorFrom: blue
5
- colorTo: gray
6
  sdk: gradio
7
- sdk_version: 5.38.2
8
- app_file: app.py
9
- pinned: false
10
  ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: up
3
+ app_file: Gradio_en.py
 
 
4
  sdk: gradio
5
+ sdk_version: 4.44.1
 
 
6
  ---
 
 
data/text_retrieval_results.json ADDED
The diff for this file is too large to render. See raw diff
 
prompt_generate.py ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import random
2
+ import json
3
+ from difflib import SequenceMatcher
4
+ import pandas as pd
5
+
6
+ def generate_prompt_with_examples(text, label, n, start_index=500, end_index=1000):
7
+ """
8
+ 生成带有n个样例的提示。
9
+
10
+ Args:
11
+ text: 包含文本描述的 Series。
12
+ label: 包含三元组列表的 Series。
13
+ n: 要提取的样例数量。
14
+ start_index: 样例的起始索引。
15
+ end_index: 样例的结束索引。
16
+
17
+ Returns:
18
+ 一个字符串,包含n个样例的提示,格式为text_prompt[i]+'\\n'+triple_prompt[i]。
19
+ 如果n大于可用样例数量,则返回所有可用样例。
20
+ """
21
+
22
+ text_len = len(text)
23
+ label_len = len(label)
24
+ end_index = min(end_index, text_len, label_len)
25
+
26
+ if start_index >= end_index:
27
+ return "起始索引大于或等于结束索引,无法生成样例。"
28
+
29
+ available_examples = end_index - start_index
30
+ n = min(n, available_examples)
31
+
32
+ prompt = ""
33
+ # 随机选择n个不同的索引
34
+ random_indices = random.sample(range(start_index, end_index), n)
35
+
36
+ for i in random_indices:
37
+ text_prompt = text.iloc[i]
38
+ triple_prompt = label.iloc[i]
39
+ prompt += text_prompt + '\n' + str(triple_prompt) + '\n'
40
+
41
+ return prompt
42
+
43
+ def generate_prompt_with_best_matches(text_series, label_series, query_text, n=3, start_index=500, end_index=1000):
44
+ """
45
+ 基于相似度匹配生成渐进式提示
46
+
47
+ 参数:
48
+ text: 文本数据Series
49
+ label: 三元组标签数据Series
50
+ query_text: 需要生成提示的查询文本
51
+ n: 最大样例数量
52
+ start_index: 候选样例起始索引
53
+ end_index: 候选样例结束索引
54
+
55
+ 返回:
56
+ 渐进式提示字符串 (1个样例、2个样例、3个样例...)
57
+ """
58
+ query_text_path = './data/text_retrieval_results.json'
59
+ with open(query_text_path, 'r', encoding='utf-8') as f:
60
+ query_text_data = json.load(f)
61
+ # 根据输入的query_text,在query_text_data中找到对应的query_text索引的matched_texts,基于输入n选择提取matched_texts中context的个数
62
+ for item in query_text_data:
63
+ if item['query_text'] == query_text:
64
+ matched_texts = item['matched_texts']
65
+ break
66
+ matched_texts = matched_texts[:n]
67
+ # 将matched_texts中的context提取出来
68
+ context_list = [text['context'] for text in matched_texts]
69
+ # 基于context_list中文本匹配输入text和label作为提示
70
+ prompt = ""
71
+ # 遍历每个上下文
72
+ for context in context_list:
73
+ # 遍历整个文本序列
74
+ for idx, text_item in text_series.items():
75
+ if context in text_item:
76
+ # 获取对应的标签
77
+ position = text_series.index.get_loc(idx)
78
+ label = label_series.iloc[position]
79
+ # print("@@@@@@@@@@@@@@@@@@@@@@@@@@@@")
80
+ # print(text_item)
81
+ # print(label)
82
+ prompt += f"{text_item}\n{label}\n\n"
83
+ break
84
+
85
+ return prompt.strip()
86
+
87
+ if __name__ == "__main__":
88
+ # 读取训练数据
89
+ train_data = pd.read_json('./data/train_triples.json')
90
+ text = train_data['text']
91
+ label = train_data['triple_list']
92
+ query_text_path = './data/text_retrieval_results.json'
93
+ with open(query_text_path, 'r', encoding='utf-8') as f:
94
+ query_text_data = json.load(f)
95
+ query_text = query_text_data[3]['query_text']
96
+ print(query_text)
97
+ print("--------------------------------")
98
+ prompt = generate_prompt_with_best_matches(text, label, query_text, n=3, start_index=500, end_index=1000)
99
+ print(prompt)
100
+