Upload folder using huggingface_hub
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- .gitattributes +6 -0
- LLM/__init__.py +0 -0
- LLM/__pycache__/__init__.cpython-310.pyc +0 -0
- LLM/__pycache__/__init__.cpython-38.pyc +0 -0
- LLM/__pycache__/__init__.cpython-39.pyc +0 -0
- LLM/__pycache__/apillm.cpython-310.pyc +0 -0
- LLM/__pycache__/apillm.cpython-38.pyc +0 -0
- LLM/__pycache__/apillm.cpython-39.pyc +0 -0
- LLM/__pycache__/base_client.cpython-310.pyc +0 -0
- LLM/__pycache__/base_client.cpython-38.pyc +0 -0
- LLM/__pycache__/base_client.cpython-39.pyc +0 -0
- LLM/__pycache__/deli_client.cpython-310.pyc +0 -0
- LLM/__pycache__/deli_client.cpython-38.pyc +0 -0
- LLM/__pycache__/deli_client.cpython-39.pyc +0 -0
- LLM/__pycache__/llm.cpython-310.pyc +0 -0
- LLM/__pycache__/llm.cpython-38.pyc +0 -0
- LLM/__pycache__/llm.cpython-39.pyc +0 -0
- LLM/__pycache__/offlinellm.cpython-310.pyc +0 -0
- LLM/__pycache__/offlinellm.cpython-38.pyc +0 -0
- LLM/__pycache__/offlinellm.cpython-39.pyc +0 -0
- LLM/__pycache__/openai_client.cpython-310.pyc +0 -0
- LLM/__pycache__/openai_client.cpython-38.pyc +0 -0
- LLM/__pycache__/openai_client.cpython-39.pyc +0 -0
- LLM/__pycache__/proxy_client.cpython-310.pyc +0 -0
- LLM/__pycache__/proxy_client.cpython-38.pyc +0 -0
- LLM/__pycache__/proxy_client.cpython-39.pyc +0 -0
- LLM/__pycache__/wenxin_client.cpython-310.pyc +0 -0
- LLM/__pycache__/wenxin_client.cpython-38.pyc +0 -0
- LLM/__pycache__/wenxin_client.cpython-39.pyc +0 -0
- LLM/__pycache__/zhipuai_client.cpython-310.pyc +0 -0
- LLM/__pycache__/zhipuai_client.cpython-38.pyc +0 -0
- LLM/__pycache__/zhipuai_client.cpython-39.pyc +0 -0
- LLM/apillm.py +36 -0
- LLM/base_client.py +9 -0
- LLM/deli_client.py +17 -0
- LLM/llm.py +10 -0
- LLM/offlinellm.py +26 -0
- LLM/openai_client.py +24 -0
- LLM/proxy_client.py +35 -0
- LLM/wenxin_client.py +136 -0
- LLM/zhipuai_client.py +82 -0
- README.md +3 -9
- __pycache__/agent.cpython-310.pyc +0 -0
- __pycache__/frontEnd.cpython-310.pyc +0 -0
- __pycache__/main.cpython-310.pyc +0 -0
- __pycache__/prompts.cpython-310.pyc +0 -0
- agent.py +788 -0
- api_pool/.env +2 -0
- api_pool/__pycache__/api_pool.cpython-310.pyc +0 -0
- api_pool/__pycache__/api_pool.cpython-38.pyc +0 -0
.gitattributes
CHANGED
|
@@ -33,3 +33,9 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
readme.pdf filter=lfs diff=lfs merge=lfs -text
|
| 37 |
+
resource/corpus[[:space:]]copy.txt filter=lfs diff=lfs merge=lfs -text
|
| 38 |
+
resource/corpus.jsonl filter=lfs diff=lfs merge=lfs -text
|
| 39 |
+
resource/corpus.txt filter=lfs diff=lfs merge=lfs -text
|
| 40 |
+
resource/corpus1.txt filter=lfs diff=lfs merge=lfs -text
|
| 41 |
+
resource/law.json filter=lfs diff=lfs merge=lfs -text
|
LLM/__init__.py
ADDED
|
File without changes
|
LLM/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (145 Bytes). View file
|
|
|
LLM/__pycache__/__init__.cpython-38.pyc
ADDED
|
Binary file (140 Bytes). View file
|
|
|
LLM/__pycache__/__init__.cpython-39.pyc
ADDED
|
Binary file (146 Bytes). View file
|
|
|
LLM/__pycache__/apillm.cpython-310.pyc
ADDED
|
Binary file (1.45 kB). View file
|
|
|
LLM/__pycache__/apillm.cpython-38.pyc
ADDED
|
Binary file (1.44 kB). View file
|
|
|
LLM/__pycache__/apillm.cpython-39.pyc
ADDED
|
Binary file (1.46 kB). View file
|
|
|
LLM/__pycache__/base_client.cpython-310.pyc
ADDED
|
Binary file (616 Bytes). View file
|
|
|
LLM/__pycache__/base_client.cpython-38.pyc
ADDED
|
Binary file (611 Bytes). View file
|
|
|
LLM/__pycache__/base_client.cpython-39.pyc
ADDED
|
Binary file (617 Bytes). View file
|
|
|
LLM/__pycache__/deli_client.cpython-310.pyc
ADDED
|
Binary file (560 Bytes). View file
|
|
|
LLM/__pycache__/deli_client.cpython-38.pyc
ADDED
|
Binary file (549 Bytes). View file
|
|
|
LLM/__pycache__/deli_client.cpython-39.pyc
ADDED
|
Binary file (555 Bytes). View file
|
|
|
LLM/__pycache__/llm.cpython-310.pyc
ADDED
|
Binary file (597 Bytes). View file
|
|
|
LLM/__pycache__/llm.cpython-38.pyc
ADDED
|
Binary file (590 Bytes). View file
|
|
|
LLM/__pycache__/llm.cpython-39.pyc
ADDED
|
Binary file (596 Bytes). View file
|
|
|
LLM/__pycache__/offlinellm.cpython-310.pyc
ADDED
|
Binary file (1.09 kB). View file
|
|
|
LLM/__pycache__/offlinellm.cpython-38.pyc
ADDED
|
Binary file (1.08 kB). View file
|
|
|
LLM/__pycache__/offlinellm.cpython-39.pyc
ADDED
|
Binary file (1.09 kB). View file
|
|
|
LLM/__pycache__/openai_client.cpython-310.pyc
ADDED
|
Binary file (1.05 kB). View file
|
|
|
LLM/__pycache__/openai_client.cpython-38.pyc
ADDED
|
Binary file (1.05 kB). View file
|
|
|
LLM/__pycache__/openai_client.cpython-39.pyc
ADDED
|
Binary file (1.05 kB). View file
|
|
|
LLM/__pycache__/proxy_client.cpython-310.pyc
ADDED
|
Binary file (1.12 kB). View file
|
|
|
LLM/__pycache__/proxy_client.cpython-38.pyc
ADDED
|
Binary file (1.1 kB). View file
|
|
|
LLM/__pycache__/proxy_client.cpython-39.pyc
ADDED
|
Binary file (1.11 kB). View file
|
|
|
LLM/__pycache__/wenxin_client.cpython-310.pyc
ADDED
|
Binary file (3.26 kB). View file
|
|
|
LLM/__pycache__/wenxin_client.cpython-38.pyc
ADDED
|
Binary file (3.25 kB). View file
|
|
|
LLM/__pycache__/wenxin_client.cpython-39.pyc
ADDED
|
Binary file (3.25 kB). View file
|
|
|
LLM/__pycache__/zhipuai_client.cpython-310.pyc
ADDED
|
Binary file (3.29 kB). View file
|
|
|
LLM/__pycache__/zhipuai_client.cpython-38.pyc
ADDED
|
Binary file (3.25 kB). View file
|
|
|
LLM/__pycache__/zhipuai_client.cpython-39.pyc
ADDED
|
Binary file (3.22 kB). View file
|
|
|
LLM/apillm.py
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .llm import LLM
|
| 2 |
+
from .openai_client import OpenAIClient
|
| 3 |
+
from .wenxin_client import WenxinClient
|
| 4 |
+
from .zhipuai_client import ZhipuAIClient
|
| 5 |
+
from .proxy_client import ProxyClient
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class APILLM(LLM):
|
| 9 |
+
def __init__(self, api_key, api_secret=None, platform="wenxin", model="gpt-4"):
|
| 10 |
+
self.api_key = api_key
|
| 11 |
+
self.api_secret = api_secret
|
| 12 |
+
self.platform = platform
|
| 13 |
+
self.model = model
|
| 14 |
+
self.client = self._initialize_client()
|
| 15 |
+
|
| 16 |
+
def _initialize_client(self):
|
| 17 |
+
if self.platform == "openai":
|
| 18 |
+
return OpenAIClient(self.api_key, self.model)
|
| 19 |
+
elif self.platform == "wenxin":
|
| 20 |
+
return WenxinClient(self.api_key, self.api_secret, self.model)
|
| 21 |
+
elif self.platform == "zhipuai":
|
| 22 |
+
return ZhipuAIClient(self.api_key, self.model)
|
| 23 |
+
elif self.platform == "proxy":
|
| 24 |
+
return ProxyClient(self.model)
|
| 25 |
+
else:
|
| 26 |
+
raise ValueError(f"Unsupported platform: {self.platform}")
|
| 27 |
+
|
| 28 |
+
def generate(self, instruction, prompt, *args, **kwargs):
|
| 29 |
+
if instruction is None:
|
| 30 |
+
instruction = "You are a helpful assistant."
|
| 31 |
+
|
| 32 |
+
messages = [
|
| 33 |
+
{"role": "system", "content": instruction},
|
| 34 |
+
{"role": "user", "content": prompt},
|
| 35 |
+
]
|
| 36 |
+
return self.client.send_request(messages, *args, **kwargs)
|
LLM/base_client.py
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# LLM/base_client.py
|
| 2 |
+
from abc import ABC, abstractmethod
|
| 3 |
+
from typing import List, Dict
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
class BaseClient(ABC):
|
| 7 |
+
@abstractmethod
|
| 8 |
+
def send_request(self, messages: List[Dict[str, str]], *args, **kwargs):
|
| 9 |
+
pass
|
LLM/deli_client.py
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import requests
|
| 2 |
+
import json
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
def search_law(query):
|
| 6 |
+
LAW_VECTOR_API_URL = "" # Place your API URL here
|
| 7 |
+
URL = "" # Place your API URL here
|
| 8 |
+
params = {"question": query}
|
| 9 |
+
res = requests.get(URL, params=params)
|
| 10 |
+
res = json.loads(res.text)
|
| 11 |
+
|
| 12 |
+
return res
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
if __name__ == "__main__":
|
| 16 |
+
query = "《中华人民共和国劳动法》第四十三条规定?"
|
| 17 |
+
print(search_law(query))
|
LLM/llm.py
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# LLM/llm.py:
|
| 2 |
+
from abc import ABC, abstractmethod
|
| 3 |
+
import requests
|
| 4 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class LLM(ABC):
|
| 8 |
+
@abstractmethod
|
| 9 |
+
def generate(self, prompt,*args, **kwargs):
|
| 10 |
+
pass
|
LLM/offlinellm.py
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
|
| 2 |
+
from .llm import LLM
|
| 3 |
+
import torch
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
class OfflineLLM(LLM):
|
| 7 |
+
def __init__(self, model_path, device="cuda"):
|
| 8 |
+
self.pipe = pipeline(
|
| 9 |
+
"text-generation",
|
| 10 |
+
model=model_path,
|
| 11 |
+
torch_dtype=torch.float16,
|
| 12 |
+
device_map=device,
|
| 13 |
+
)
|
| 14 |
+
|
| 15 |
+
def generate(self, instruction, prompt, max_new_tokens=500):
|
| 16 |
+
|
| 17 |
+
if instruction is None:
|
| 18 |
+
instruction = "You are a helpful assistant."
|
| 19 |
+
|
| 20 |
+
messages = [
|
| 21 |
+
{"role": "system", "content": instruction},
|
| 22 |
+
{"role": "user", "content": prompt},
|
| 23 |
+
]
|
| 24 |
+
|
| 25 |
+
response = self.pipe(messages, max_new_tokens=max_new_tokens)
|
| 26 |
+
return response[0]["generated_text"][-1]["content"]
|
LLM/openai_client.py
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# api_client/openai_client.py
|
| 2 |
+
import requests
|
| 3 |
+
import json
|
| 4 |
+
from .base_client import BaseClient
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class OpenAIClient(BaseClient):
|
| 8 |
+
def __init__(self, api_key, model):
|
| 9 |
+
self.api_key = api_key
|
| 10 |
+
self.model = model
|
| 11 |
+
|
| 12 |
+
def send_request(self, messages):
|
| 13 |
+
url = "https://api.openai.com/v1/chat/completions"
|
| 14 |
+
headers = {
|
| 15 |
+
"Content-Type": "application/json",
|
| 16 |
+
"Authorization": f"Bearer {self.api_key}",
|
| 17 |
+
}
|
| 18 |
+
payload = {
|
| 19 |
+
"model": self.model,
|
| 20 |
+
"messages": messages,
|
| 21 |
+
}
|
| 22 |
+
response = requests.post(url, headers=headers, data=json.dumps(payload))
|
| 23 |
+
text = json.loads(response.text)
|
| 24 |
+
return text.get("choices")[0].get("message").get("content")
|
LLM/proxy_client.py
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
from .base_client import BaseClient
|
| 3 |
+
from api_pool.api_pool import api_pool
|
| 4 |
+
|
| 5 |
+
class ProxyClient(BaseClient):
|
| 6 |
+
def __init__(self, model_name):
|
| 7 |
+
if model_name not in api_pool:
|
| 8 |
+
raise ValueError("Invalid model name")
|
| 9 |
+
self.query_function = api_pool[model_name]
|
| 10 |
+
|
| 11 |
+
def send_request(
|
| 12 |
+
self,
|
| 13 |
+
messages,
|
| 14 |
+
temperature=0,
|
| 15 |
+
max_tokens=4096,
|
| 16 |
+
*args,
|
| 17 |
+
**kwargs,
|
| 18 |
+
): #temperature is set to zero
|
| 19 |
+
# Prepare the payload
|
| 20 |
+
payload = {
|
| 21 |
+
"messages": messages,
|
| 22 |
+
"temperature": temperature,
|
| 23 |
+
"max_tokens": max_tokens,
|
| 24 |
+
}
|
| 25 |
+
|
| 26 |
+
# Call the appropriate function from api_pool
|
| 27 |
+
try:
|
| 28 |
+
result, usage = self.query_function(**payload)
|
| 29 |
+
print(f"Result: {result}")
|
| 30 |
+
print(f"Usage: {usage}")
|
| 31 |
+
return result
|
| 32 |
+
except Exception as e:
|
| 33 |
+
print(f"Error during API call: {str(e)}")
|
| 34 |
+
return ""
|
| 35 |
+
|
LLM/wenxin_client.py
ADDED
|
@@ -0,0 +1,136 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# api_client/wenxin_client.py
|
| 2 |
+
import requests
|
| 3 |
+
import json
|
| 4 |
+
from .base_client import BaseClient
|
| 5 |
+
import time
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class WenxinClient(BaseClient):
|
| 9 |
+
def __init__(self, api_key, api_secret, model):
|
| 10 |
+
self.api_key = api_key
|
| 11 |
+
self.api_secret = api_secret
|
| 12 |
+
self.model = model
|
| 13 |
+
|
| 14 |
+
def get_access_token(self):
|
| 15 |
+
url = f"https://aip.baidubce.com/oauth/2.0/token?grant_type=client_credentials&client_id={self.api_key}&client_secret={self.api_secret}"
|
| 16 |
+
headers = {"Content-Type": "application/json", "Accept": "application/json"}
|
| 17 |
+
response = requests.post(url, headers=headers)
|
| 18 |
+
return response.json().get("access_token")
|
| 19 |
+
|
| 20 |
+
def send_request(
|
| 21 |
+
self,
|
| 22 |
+
messages,
|
| 23 |
+
temperature=0.8,
|
| 24 |
+
top_p=0.8,
|
| 25 |
+
penalty_score=1.0,
|
| 26 |
+
stream=False,
|
| 27 |
+
enable_system_memory=False,
|
| 28 |
+
system_memory_id=None,
|
| 29 |
+
stop=None,
|
| 30 |
+
disable_search=False,
|
| 31 |
+
enable_citation=False,
|
| 32 |
+
enable_trace=False,
|
| 33 |
+
max_output_tokens: int = None,
|
| 34 |
+
response_format=None,
|
| 35 |
+
user_id=None,
|
| 36 |
+
tool_choice=None,
|
| 37 |
+
*args,
|
| 38 |
+
**kwargs,
|
| 39 |
+
):
|
| 40 |
+
|
| 41 |
+
access_token = self.get_access_token()
|
| 42 |
+
if self.model == "ERNIE-4.0-8K":
|
| 43 |
+
endpoint = "completions_pro"
|
| 44 |
+
elif self.model == "ERNIE-Speed-128K":
|
| 45 |
+
endpoint = "ernie-speed-128k"
|
| 46 |
+
elif self.model == "ERNIE-3.5-8K":
|
| 47 |
+
endpoint = "completions"
|
| 48 |
+
else:
|
| 49 |
+
raise ValueError("Invalid model name")
|
| 50 |
+
|
| 51 |
+
base_url = f"https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/{endpoint}?access_token={access_token}"
|
| 52 |
+
headers = {"Content-Type": "application/json"}
|
| 53 |
+
|
| 54 |
+
system_messages = [msg for msg in messages if msg["role"] == "system"]
|
| 55 |
+
if system_messages:
|
| 56 |
+
system = system_messages[0]["content"]
|
| 57 |
+
messages = [msg for msg in messages if msg["role"] != "system"]
|
| 58 |
+
else:
|
| 59 |
+
system = None
|
| 60 |
+
|
| 61 |
+
payload = {
|
| 62 |
+
"messages": messages,
|
| 63 |
+
"temperature": temperature,
|
| 64 |
+
"top_p": top_p,
|
| 65 |
+
"penalty_score": penalty_score,
|
| 66 |
+
"stream": stream,
|
| 67 |
+
"enable_system_memory": enable_system_memory,
|
| 68 |
+
"disable_search": disable_search,
|
| 69 |
+
"enable_citation": enable_citation,
|
| 70 |
+
"enable_trace": enable_trace,
|
| 71 |
+
"response_format": response_format,
|
| 72 |
+
}
|
| 73 |
+
|
| 74 |
+
if system:
|
| 75 |
+
payload["system"] = system
|
| 76 |
+
if system_memory_id:
|
| 77 |
+
payload["system_memory_id"] = system_memory_id
|
| 78 |
+
if stop:
|
| 79 |
+
payload["stop"] = stop
|
| 80 |
+
if max_output_tokens:
|
| 81 |
+
payload["max_output_tokens"] = max_output_tokens
|
| 82 |
+
if user_id:
|
| 83 |
+
payload["user_id"] = user_id
|
| 84 |
+
if tool_choice:
|
| 85 |
+
payload["tool_choice"] = tool_choice
|
| 86 |
+
|
| 87 |
+
response = requests.post(base_url, headers=headers, data=json.dumps(payload))
|
| 88 |
+
|
| 89 |
+
# 处理速率限制
|
| 90 |
+
if response.status_code == 429:
|
| 91 |
+
print("警告:请求速率超过限制!")
|
| 92 |
+
remaining_requests = int(
|
| 93 |
+
response.headers.get("X-Ratelimit-Remaining-Requests", 0)
|
| 94 |
+
)
|
| 95 |
+
remaining_tokens = int(
|
| 96 |
+
response.headers.get("X-Ratelimit-Remaining-Tokens", 0)
|
| 97 |
+
)
|
| 98 |
+
if remaining_requests == 0 or remaining_tokens == 0:
|
| 99 |
+
sleep_time = 60 # 休眠60秒再重试
|
| 100 |
+
print(f"配额已用尽,{sleep_time}秒后重试...")
|
| 101 |
+
time.sleep(sleep_time)
|
| 102 |
+
return self.send_request(
|
| 103 |
+
messages,
|
| 104 |
+
temperature,
|
| 105 |
+
top_p,
|
| 106 |
+
penalty_score,
|
| 107 |
+
stream,
|
| 108 |
+
enable_system_memory,
|
| 109 |
+
system_memory_id,
|
| 110 |
+
stop,
|
| 111 |
+
disable_search,
|
| 112 |
+
enable_citation,
|
| 113 |
+
enable_trace,
|
| 114 |
+
max_output_tokens,
|
| 115 |
+
response_format,
|
| 116 |
+
user_id,
|
| 117 |
+
tool_choice,
|
| 118 |
+
)
|
| 119 |
+
|
| 120 |
+
text = json.loads(response.text)
|
| 121 |
+
print(text)
|
| 122 |
+
|
| 123 |
+
if "result" not in text:
|
| 124 |
+
print("警告:响应中未找到result字段!")
|
| 125 |
+
return ""
|
| 126 |
+
|
| 127 |
+
result = text["result"]
|
| 128 |
+
|
| 129 |
+
if text.get("is_truncated"):
|
| 130 |
+
print("注意:输出结果被截断!")
|
| 131 |
+
|
| 132 |
+
if text.get("function_call"):
|
| 133 |
+
print("模型生成了函数调用:")
|
| 134 |
+
print(text["function_call"])
|
| 135 |
+
|
| 136 |
+
return result
|
LLM/zhipuai_client.py
ADDED
|
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# api_client/zhipuai_client.py
|
| 2 |
+
import requests
|
| 3 |
+
import json
|
| 4 |
+
from .base_client import BaseClient
|
| 5 |
+
from typing import List, Dict, Optional, Union
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class ZhipuAIClient(BaseClient):
|
| 9 |
+
def __init__(self, api_key: str, model: str):
|
| 10 |
+
self.api_key = api_key
|
| 11 |
+
self.model = model
|
| 12 |
+
|
| 13 |
+
def send_request(
|
| 14 |
+
self,
|
| 15 |
+
messages: List[Dict[str, str]],
|
| 16 |
+
request_id: Optional[str] = None,
|
| 17 |
+
do_sample: bool = True,
|
| 18 |
+
stream: bool = False,
|
| 19 |
+
temperature: float = 0.95,
|
| 20 |
+
top_p: float = 0.7,
|
| 21 |
+
max_tokens: int = 1024,
|
| 22 |
+
stop: Optional[List[str]] = None,
|
| 23 |
+
tools: Optional[List[str]] = None,
|
| 24 |
+
tool_choice: Union[str, Dict] = "auto",
|
| 25 |
+
user_id: Optional[str] = None,
|
| 26 |
+
*args,
|
| 27 |
+
**kwargs,
|
| 28 |
+
) -> str:
|
| 29 |
+
url = "https://open.bigmodel.cn/api/paas/v4/chat/completions"
|
| 30 |
+
headers = {
|
| 31 |
+
"Authorization": f"Bearer {self.api_key}",
|
| 32 |
+
"Content-Type": "application/json",
|
| 33 |
+
}
|
| 34 |
+
payload = {
|
| 35 |
+
"model": self.model,
|
| 36 |
+
"messages": messages,
|
| 37 |
+
"request_id": request_id,
|
| 38 |
+
"do_sample": do_sample,
|
| 39 |
+
"stream": stream,
|
| 40 |
+
"temperature": temperature,
|
| 41 |
+
"top_p": top_p,
|
| 42 |
+
"max_tokens": max_tokens,
|
| 43 |
+
"stop": stop,
|
| 44 |
+
"tools": tools,
|
| 45 |
+
"tool_choice": tool_choice,
|
| 46 |
+
"user_id": user_id,
|
| 47 |
+
}
|
| 48 |
+
# 移除值为 None 的参数
|
| 49 |
+
payload = {k: v for k, v in payload.items() if v is not None}
|
| 50 |
+
|
| 51 |
+
# 参数类型检查
|
| 52 |
+
assert isinstance(self.model, str), "model must be a string"
|
| 53 |
+
assert isinstance(messages, list), "messages must be a list"
|
| 54 |
+
assert all(
|
| 55 |
+
isinstance(msg, dict) for msg in messages
|
| 56 |
+
), "each message must be a dictionary"
|
| 57 |
+
assert all(
|
| 58 |
+
"role" in msg and "content" in msg for msg in messages
|
| 59 |
+
), "each message must have 'role' and 'content' keys"
|
| 60 |
+
assert request_id is None or isinstance(
|
| 61 |
+
request_id, str
|
| 62 |
+
), "request_id must be a string or None"
|
| 63 |
+
assert isinstance(do_sample, bool), "do_sample must be a boolean"
|
| 64 |
+
assert isinstance(stream, bool), "stream must be a boolean"
|
| 65 |
+
assert isinstance(temperature, (int, float)), "temperature must be a number"
|
| 66 |
+
assert 0 < temperature < 1, "temperature must be between 0 and 1"
|
| 67 |
+
assert isinstance(top_p, (int, float)), "top_p must be a number"
|
| 68 |
+
assert 0 < top_p < 1, "top_p must be between 0 and 1"
|
| 69 |
+
assert isinstance(max_tokens, int), "max_tokens must be an integer"
|
| 70 |
+
assert max_tokens > 0, "max_tokens must be positive"
|
| 71 |
+
assert stop is None or isinstance(stop, list), "stop must be a list or None"
|
| 72 |
+
assert tools is None or isinstance(tools, list), "tools must be a list or None"
|
| 73 |
+
assert isinstance(
|
| 74 |
+
tool_choice, (str, dict)
|
| 75 |
+
), "tool_choice must be a string or dictionary"
|
| 76 |
+
assert user_id is None or isinstance(
|
| 77 |
+
user_id, str
|
| 78 |
+
), "user_id must be a string or None"
|
| 79 |
+
|
| 80 |
+
response = requests.post(url, headers=headers, data=json.dumps(payload))
|
| 81 |
+
text = json.loads(response.text)
|
| 82 |
+
return text.get("choices")[0].get("message").get("content")
|
README.md
CHANGED
|
@@ -1,12 +1,6 @@
|
|
| 1 |
---
|
| 2 |
-
title:
|
| 3 |
-
|
| 4 |
-
colorFrom: indigo
|
| 5 |
-
colorTo: green
|
| 6 |
sdk: gradio
|
| 7 |
-
sdk_version:
|
| 8 |
-
app_file: app.py
|
| 9 |
-
pinned: false
|
| 10 |
---
|
| 11 |
-
|
| 12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
| 1 |
---
|
| 2 |
+
title: simcourt
|
| 3 |
+
app_file: main.py
|
|
|
|
|
|
|
| 4 |
sdk: gradio
|
| 5 |
+
sdk_version: 4.44.1
|
|
|
|
|
|
|
| 6 |
---
|
|
|
|
|
|
__pycache__/agent.cpython-310.pyc
ADDED
|
Binary file (40 kB). View file
|
|
|
__pycache__/frontEnd.cpython-310.pyc
ADDED
|
Binary file (11.5 kB). View file
|
|
|
__pycache__/main.cpython-310.pyc
ADDED
|
Binary file (54.6 kB). View file
|
|
|
__pycache__/prompts.cpython-310.pyc
ADDED
|
Binary file (2.18 kB). View file
|
|
|
agent.py
ADDED
|
@@ -0,0 +1,788 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import List, Dict, Any, Tuple
|
| 2 |
+
import re
|
| 3 |
+
import json
|
| 4 |
+
from LLM.deli_client import search_law
|
| 5 |
+
import uuid
|
| 6 |
+
import logging
|
| 7 |
+
import requests
|
| 8 |
+
from langchain_community.vectorstores import Chroma
|
| 9 |
+
from langchain_huggingface import HuggingFaceEmbeddings
|
| 10 |
+
from frontEnd import simplify
|
| 11 |
+
import sys
|
| 12 |
+
import os
|
| 13 |
+
|
| 14 |
+
# sys.path.insert(0, os.path.abspath(os.path.dirname(__file__)))
|
| 15 |
+
|
| 16 |
+
law_repository={}
|
| 17 |
+
with open(os.path.abspath(os.path.join(os.path.dirname(__file__),"resource/law.json")), 'r', encoding='utf-8') as f:
|
| 18 |
+
law_repository=json.load(f)
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def extract_bracket_content(text):
|
| 22 |
+
# 正则表达式匹配【】及其内部内容
|
| 23 |
+
pattern = r'【(.*?)】'
|
| 24 |
+
# 使用findall方法找到所有匹配的内容
|
| 25 |
+
contents = re.findall(pattern, text)
|
| 26 |
+
return contents
|
| 27 |
+
|
| 28 |
+
class Context:
|
| 29 |
+
def __init__(self,content):
|
| 30 |
+
self.content=content
|
| 31 |
+
self.evidence={
|
| 32 |
+
"公诉方":"",
|
| 33 |
+
"辩护方":""
|
| 34 |
+
}
|
| 35 |
+
def add_prosecution_evidence(self,item:str):
|
| 36 |
+
self.evidence["公诉方"]+=item
|
| 37 |
+
def add_advocate_evidence(self,item:str):
|
| 38 |
+
self.evidence["辩护方"]+=item
|
| 39 |
+
|
| 40 |
+
class Agent:
|
| 41 |
+
def __init__(
|
| 42 |
+
self,
|
| 43 |
+
id: int,
|
| 44 |
+
name: str,
|
| 45 |
+
role: str,
|
| 46 |
+
description: str,
|
| 47 |
+
llm: Any,
|
| 48 |
+
# db: Any,
|
| 49 |
+
# log_think=False,
|
| 50 |
+
):
|
| 51 |
+
self.id = id
|
| 52 |
+
self.name = name
|
| 53 |
+
self.role = role
|
| 54 |
+
self.basic_description = description
|
| 55 |
+
self.instruction=description
|
| 56 |
+
self.llm = llm
|
| 57 |
+
self.goal=""
|
| 58 |
+
CHROMA_PATH = "./resource/chroma"
|
| 59 |
+
# embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-mpnet-base-v2")
|
| 60 |
+
|
| 61 |
+
# self.db= Chroma(persist_directory=CHROMA_PATH, embedding_function=embeddings)
|
| 62 |
+
|
| 63 |
+
self.evidence_pool={
|
| 64 |
+
"公诉方出示的":"",
|
| 65 |
+
"辩护方出示的":""
|
| 66 |
+
}
|
| 67 |
+
self.memory=""
|
| 68 |
+
# {
|
| 69 |
+
# "审判长":"",
|
| 70 |
+
# "公诉人":"",
|
| 71 |
+
# "辩护人":"",
|
| 72 |
+
# "被告人":""
|
| 73 |
+
# }
|
| 74 |
+
self.search_pool=None
|
| 75 |
+
self.crime3aspect="""
|
| 76 |
+
判断被告人是否犯罪要考虑如下三个角度:
|
| 77 |
+
**只有当三个角度都成立,且具有直接因果关系时,罪名才真正成立**
|
| 78 |
+
|
| 79 |
+
一、构成要件符合性:
|
| 80 |
+
行为主体:1、三阶层理论不要求区分其年龄、责任能力;2、身份犯的概念。真正身份犯:身份是犯罪构成条件,例如贪污罪。不真正身份犯,身份是影响量刑的条件,例如非法拘禁罪。3、单位:主观:整体意志;客观:是为·整体谋利益,例如私分国有财产
|
| 81 |
+
行为:(1)作为(2)不作为:义务来源:1.对危险源有监管义务,如负责管理的危险物,监管义务的人的举动。自己的先行行为。2.对法益有保护义务的特定关系。法律规定的特定关系,合同关系,职务业务建立,自愿接受行为。(德国排除被害人自陷风险)3、特定领域,管理人和产生依赖关系。
|
| 82 |
+
结果:故意犯罪:产生危险是成立条件,产生结果是既遂条件。过失犯罪实害结果是犯罪成立条件
|
| 83 |
+
因果关系:1.制造了法律不允许的危险;2.刑法要防止发生的结果;3.危险的实现:如果存在介入因素,应该分析介入因素与先行行为关系,叠加关系还是独立关系?
|
| 84 |
+
|
| 85 |
+
二、违法性
|
| 86 |
+
正当防卫:(1)正当防卫的条件:1.起因条件:必须是不法的,”现实的“,紧迫的不法侵害。2、针对不法侵害者实施。认识到不法侵害正在发生,并有意识地对不发侵害进行反击。3不能明显超过必要限度。(2)特殊防卫:行为限制是正在进行的行凶、抢劫、强奸、绑架以及其他严重危机人身安全的暴力犯罪,造成不法侵害人伤亡的,不属于防卫过当。
|
| 87 |
+
紧急避险:(1)条件:1.起因条件:客观具体的法益危险,也可以是国家、公共利益;具有现在性:紧迫即刻发生的危险,以及在一定时间内,随时具有发生可能的持续性危险。行为条件:牺牲其他法益的行为是避免危险所需要以及补充性要件。避险意识:客观违法论:不考虑避险意识,偶然避险属于紧急避险。主观违法论:要求对危险及通过避险能够避免法益损失的事实存在认识。
|
| 88 |
+
其他:(1)自救行为(2)正当业务行为(3)义务冲突(4)被害人承诺
|
| 89 |
+
|
| 90 |
+
三、有责性
|
| 91 |
+
主观要件: (1) 犯罪故意:明知自己的行为会发生危害社会的结果,并且希望或者放任这种结果发生。 (2) 犯罪过失:应当预见自己的行为可能发生危害社会的结果,因而疏忽大意而没有预见,或者已经预见而轻言能够避免,以致发生这种结果。
|
| 92 |
+
责任阻却事由:(1)责任年龄:【年龄对责任能力的影响】已满十六周岁的人犯罪,应当负刑事责任。
|
| 93 |
+
已满十四周岁不满十六周岁的人,犯故意杀人、故意伤害致人重伤或者死亡、强奸、抢劫、贩卖毒品、放火、爆炸、投放危险物质罪的,应当负刑事责任。已满十四不满十八周岁的人犯罪,应当从轻或者减轻处罚。因不满十六周岁不予刑事处罚的,责令他的家长或者监护人加以管教:在必要的时候,也可以由政府收容教养。
|
| 94 |
+
>第十七条之一【年龄对老年人责任能力的影响】已满七十五周岁的人故意犯罪的,可以从轻或者减轻处罚:过失犯罪的,应当从轻或者减轻处罚。(2)责任能力:第十八条 【精神障碍对责任能力的影响】精神病人在不能辨认或者不能控制自己行为的时候造成危害结果,经法定程序鉴定确认的,不负刑事责任,但是应当责令他的家属或者监护人严加看管和医疗;在必要的时候,由政府强制医疗。间歇性的精神病人在精神正常的时候犯罪,应当负刑事责任。尚未完全丧失辨认或者控制自己行为能力的精神病人犯罪的,应当负刑事责任,但是可以从轻或者减轻处罚。醉酒的人犯罪,应当负刑事责任。第十九条 【听说、视觉机能对责任能力的影响】又聋又哑的人或者盲人犯罪,可以从轻、减轻或者免除处罚。(3)违法性认识错误:行为人具有认识违法的能力,行为人具有考察法律属性的机会;可以期待行为人利用其提供认识违法性的可能性。(4)期待可能性:根据社会通常人的情况,在当时的环境下,能否做出与行为人同样行为作为判断标准,是相对较为合理。
|
| 95 |
+
"""
|
| 96 |
+
self.current_plan=None
|
| 97 |
+
self.current_answer=None
|
| 98 |
+
# self.db = db
|
| 99 |
+
# self.log_think = log_think
|
| 100 |
+
# self.instruction=None
|
| 101 |
+
# self.logger = logging.getLogger(__name__)
|
| 102 |
+
|
| 103 |
+
def set_instruction(self,text):
|
| 104 |
+
self.instruction=self.basic_description+text
|
| 105 |
+
|
| 106 |
+
def __str__(self):
|
| 107 |
+
return f"{self.name} ({self.role})"
|
| 108 |
+
|
| 109 |
+
def update_evidence(self,context:Context):
|
| 110 |
+
return
|
| 111 |
+
self.evidence_pool["公诉方出示的"]+=context.evidence["公诉方"]
|
| 112 |
+
self.evidence_pool["辩护方出示的"]+=context.evidence["辩护方"]
|
| 113 |
+
|
| 114 |
+
def check_hallucination(self,context:Context):
|
| 115 |
+
self.strategy=self.speak("最新的庭审记录:"+context.content+"法庭人员发言记录总结:"+str(self.memory)+"目前的证据:"+str(self.evidence_pool)+"目前的策略:"+str(self.strategy),
|
| 116 |
+
"""
|
| 117 |
+
法庭上要实事求是!不能无中生有!没有提及的内容不能作为证据,不能作为策略!
|
| 118 |
+
请根据最新庭审记录、发言记录总结、证据,检查【目前的策略】中,是否出现了无中生有、捏造事实的言论。
|
| 119 |
+
例如,如果没有信息表明被告人赔偿损失、征得谅解,则不应该有相应的策略!
|
| 120 |
+
如果有,把相关内容直接删掉。
|
| 121 |
+
如果某条策略的相关不实内容删掉后,该条目没有实质性内容,则把该条目一同删掉。
|
| 122 |
+
如果没有发现无中生有、捏造事实的言论,则无需修改。
|
| 123 |
+
不要保留修改痕迹!不用额外说明/注释!
|
| 124 |
+
|
| 125 |
+
请回复经过修改之后的策略。(依然分条目)
|
| 126 |
+
保持原始的格式不变,即:
|
| 127 |
+
攻击策略:1.xxx2.xxx3.xxx
|
| 128 |
+
防御策略:1.xxx2.xxx3.xxx
|
| 129 |
+
"""
|
| 130 |
+
)
|
| 131 |
+
|
| 132 |
+
def check_speak_hallucination(self,context,response):
|
| 133 |
+
res=self.speak("最新的庭审记录:"+context+"法庭人员发言记录总结:"+str(self.memory)+"目前的证据:"+str(self.evidence_pool)+"拟进行的发言内容:"+response,
|
| 134 |
+
"""
|
| 135 |
+
法庭上要实事求是!不能无中生有!没有论证的内容,不能当作发言内容!
|
| 136 |
+
请根据最新庭审记录、发言记录总结、证据,检查【拟进行的发言内容】中,是否出现了无中生有、捏造事实的言论(即幻觉)。
|
| 137 |
+
例如,如果没有信息表明被告人赔偿损失、征得谅解,但【拟进行的发言内容】中却出现了相应的文字,则表明出现了幻觉
|
| 138 |
+
或者数额和证据及前文讨论内容对应不上,则表明出现了幻觉
|
| 139 |
+
如果有幻觉,返回一个字‘有’,如果没有,返回一个字‘无’(不要带引号)
|
| 140 |
+
|
| 141 |
+
不要说多余的话!
|
| 142 |
+
""",check=False
|
| 143 |
+
)
|
| 144 |
+
return res
|
| 145 |
+
|
| 146 |
+
# --- Plan Phase --- #
|
| 147 |
+
def reflect_and_update(self,context:Context,summary=True):
|
| 148 |
+
if self.role=="审判长":
|
| 149 |
+
# res=self.speak("最新的庭审记录:"+context.content+"法庭人员发言记录总结:"+str(self.memory)+"目前的证据:"+str(self.evidence_pool)+"目前的辩论焦点:"+str(self.debate_focus),
|
| 150 |
+
# """
|
| 151 |
+
# 你要根据输入的信息,进一步调整/更新辩论焦点。
|
| 152 |
+
# """+self.strategy_prompt+
|
| 153 |
+
# """
|
| 154 |
+
# 辩论焦点从1开始编号
|
| 155 |
+
# 请严格按照以下格式回复:
|
| 156 |
+
# 1.法庭辩论焦点2.法庭辩论焦点3.法庭辩论焦点
|
| 157 |
+
# """
|
| 158 |
+
# )
|
| 159 |
+
info="起诉书:"+self.prosecution_statement+"被告人信息:"+self.defendant_information+"法庭人员发言记录总结:"+str(self.memory)+"最新的庭审记录:"+context.content+"目前的证据:"+str(self.evidence_pool)+"目前的辩论焦点与查明情况:"+str(self.debate_focus)
|
| 160 |
+
task="""
|
| 161 |
+
作为审判长,你要查明案件的事实。你的任务如下:
|
| 162 |
+
(1)你要根据输入的信息,以及当前的辩论焦点和查明情况,进一步调整/更新辩论焦点。
|
| 163 |
+
"""+self.strategy_prompt+"""
|
| 164 |
+
(2)除了调整/更新辩论焦点外,你还要根据输入的信息、更新后的辩论焦点、先前的查明情况,总结现在的每一个焦点的**查明情况。**
|
| 165 |
+
"""
|
| 166 |
+
QA_pair=self.plan(info+task)
|
| 167 |
+
self.init_QA_pair.update(QA_pair)
|
| 168 |
+
res=self.speak(info+"参考资料:"+str(self.init_QA_pair),
|
| 169 |
+
"""
|
| 170 |
+
作为审判长,你要查明案件的事实。你的任务如下:
|
| 171 |
+
(1)你要根据输入的信息,以及当前的辩论焦点和查明情况,进一步调整/更新辩论焦点。
|
| 172 |
+
"""+self.strategy_prompt+
|
| 173 |
+
"""
|
| 174 |
+
(2)除了调整/更新辩论焦点外,你还要根据输入的信息、更新后的辩论焦点、先前的查明情况,总结现在的每一个焦点的**查明情况。**
|
| 175 |
+
**查明情况**是控辩双方对这个焦点的讨论情况的概述、讨论得是否充分、以及作为审判长你的看法。
|
| 176 |
+
**简要说明即可。**
|
| 177 |
+
|
| 178 |
+
最终请严格按照如下格式进行回复:
|
| 179 |
+
辩论焦点从1开始编号,查明情况紧随其后。
|
| 180 |
+
请严格按照以下格式回复:
|
| 181 |
+
1.法庭辩论焦点。查明情况。
|
| 182 |
+
2.法庭辩论焦点。查明情况。
|
| 183 |
+
3.法庭辩论焦点。查明情况。
|
| 184 |
+
|
| 185 |
+
例如:
|
| 186 |
+
1.被告人自首是否成立...公诉人认为不构成自首,通过xxx进行证明;辩护人认为构成自首,表示xxx。审判长认为被告人的自首成立,因为xxx
|
| 187 |
+
2.关于证据xx里提到的xxx是否和本案有关联,控辩双方各自认为xxx,审判长认为还需要进一步辩论。
|
| 188 |
+
"""
|
| 189 |
+
)
|
| 190 |
+
self.debate_focus=res
|
| 191 |
+
else:
|
| 192 |
+
info="起诉书:"+self.prosecution_statement+"被告人信息:"+self.defendant_information+"之前法庭人员发言记录总结:"+str(self.memory)+"最新的庭审记录:"+context.content+"目前的证据:"+str(self.evidence_pool)+"拟定的目标:"+self.goal+"目前的策略:"+str(self.strategy)
|
| 193 |
+
task="""
|
| 194 |
+
你要根据输入的信息,进一步调整/更新策略。
|
| 195 |
+
"""+self.strategy_prompt
|
| 196 |
+
QA_pair=self.plan(info+task)
|
| 197 |
+
self.init_QA_pair.update(QA_pair)
|
| 198 |
+
res=self.speak(info+"参考资料:"+str(self.init_QA_pair),
|
| 199 |
+
"""
|
| 200 |
+
你要根据输入的信息,进一步调整/更新策略。
|
| 201 |
+
"""+self.strategy_prompt+
|
| 202 |
+
"""
|
| 203 |
+
你的回复应当严格按照如下格式(分条表述):
|
| 204 |
+
攻击策略:1.xxx2.xxx3.xxx
|
| 205 |
+
防御策略:1.xxx2.xxx3.xxx
|
| 206 |
+
|
| 207 |
+
其中每一条的格式为:
|
| 208 |
+
xxx(策略内容),参考法条:xxx(法条名称及内容概括),参考案例:xxx(案例编号及内容概括)
|
| 209 |
+
|
| 210 |
+
法条和案例如果没有,或对于约定俗成的常见策略,则不必添加这些参考。
|
| 211 |
+
|
| 212 |
+
【注意】
|
| 213 |
+
1.注意,策略要实事求是符合实际!不能无中生有!
|
| 214 |
+
2.直��按照格式返回,不要说多余的话
|
| 215 |
+
3.每一个策略下,**要附带能支撑该策略的参考法条以及类案信息**,(从给你的参考资料中取,如果没有则不用带)
|
| 216 |
+
4.你要进一步调整策略,要和之前的策略进行合理地修改,而非照搬或者全盘否定。
|
| 217 |
+
"""
|
| 218 |
+
)
|
| 219 |
+
self.strategy=res
|
| 220 |
+
self.check_hallucination(context)
|
| 221 |
+
|
| 222 |
+
if self.role=="审判长" or simplify==False:
|
| 223 |
+
# tmp=""
|
| 224 |
+
# if self.memory!="":
|
| 225 |
+
# tmp+="之前的庭审总结"+str(self.memory)
|
| 226 |
+
tmp="最新的庭审记录:"+context.content
|
| 227 |
+
|
| 228 |
+
res=self.speak(tmp,
|
| 229 |
+
"""
|
| 230 |
+
你要根据最新的庭审记录,写出这一部分的庭审总结。
|
| 231 |
+
(1)庭审总结应当包括:按先后顺序发生的、【且可能影响最终定罪量刑的事情】的概括与总结。
|
| 232 |
+
程序性、重复性、与案件无关、法官控场打断等【没有实质性内容的话语】应当省略!
|
| 233 |
+
|
| 234 |
+
例如:
|
| 235 |
+
庭审准备阶段,审判长询问被告人xxx,被告人表示xx
|
| 236 |
+
法庭调查阶段,公诉人依次询问xxx,被告人回复xxx,辩护人询问xxx,被告人回复xxx
|
| 237 |
+
|
| 238 |
+
**注意:**
|
| 239 |
+
1.你只要返回这一阶段的总结即可。
|
| 240 |
+
2.可能涉及最后罪情的重要的数字、重要信息应当保留!例如赔偿金额,轻伤/重伤判定等。
|
| 241 |
+
3.双方有争议的也要总结并保留,不能丢弃。
|
| 242 |
+
"""
|
| 243 |
+
)
|
| 244 |
+
self.memory+="\n\n"+res
|
| 245 |
+
|
| 246 |
+
def yilvkezhi_retriever(self,prompt):
|
| 247 |
+
url = "http://web.megatechai.com:33615/test_case_app/wenshu_search/search_and_answer"
|
| 248 |
+
payload = json.dumps({
|
| 249 |
+
"query": prompt,
|
| 250 |
+
"need_answer": 1
|
| 251 |
+
})
|
| 252 |
+
headers = {
|
| 253 |
+
'Content-Type': 'application/json'
|
| 254 |
+
}
|
| 255 |
+
response = requests.request("POST", url, headers=headers, data=payload)
|
| 256 |
+
|
| 257 |
+
try:
|
| 258 |
+
response=json.loads(response.text)
|
| 259 |
+
except:
|
| 260 |
+
print("Error!",response.text)
|
| 261 |
+
return None
|
| 262 |
+
# print(response)
|
| 263 |
+
answer=response["data"]["answer"]
|
| 264 |
+
sim_cases=extract_bracket_content(answer)
|
| 265 |
+
anhao={}
|
| 266 |
+
for js in response["data"]["wenshu_results"]:
|
| 267 |
+
anhao[js['anhao']]=js
|
| 268 |
+
sim_cases_text="类似案件为:"
|
| 269 |
+
fl=0
|
| 270 |
+
for case in sim_cases:
|
| 271 |
+
if case=="" or case is None:
|
| 272 |
+
continue
|
| 273 |
+
if case in anhao:
|
| 274 |
+
if anhao[case]['caipanliyou']!="" and anhao[case]['caipanjieguo']!="":
|
| 275 |
+
if anhao[case]['caipanliyou'] is not None and anhao[case]['caipanjieguo'] is not None:
|
| 276 |
+
fl+=1
|
| 277 |
+
sim_cases_text+="\n"+"【"+case+"】:"+anhao[case]['caipanliyou']+anhao[case]['caipanjieguo']
|
| 278 |
+
if fl==1: #only 1 cases
|
| 279 |
+
break
|
| 280 |
+
if fl!=0:
|
| 281 |
+
answer+="\n\n#####\n"+sim_cases_text
|
| 282 |
+
return answer
|
| 283 |
+
|
| 284 |
+
def law_retriever(self,query_text):
|
| 285 |
+
if query_text in law_repository:
|
| 286 |
+
return law_repository[query_text]
|
| 287 |
+
return None
|
| 288 |
+
|
| 289 |
+
def _get_plan(self,context,hint=""):
|
| 290 |
+
prompt = """
|
| 291 |
+
现在,为了更好地完成任务,有两个工具供你使用。
|
| 292 |
+
1.法条检索库。包含中华人民共和国全部法条,包含刑法、民法典、民事诉讼法等。根据查询内容,可以返回法条内容。同时也包含部分法律法规和司法解释。
|
| 293 |
+
2.‘一律可知’。可以通过输入法律问题,法律事实,争议焦点,案例信息来获取相关案例以及专业的分析答案。
|
| 294 |
+
请根据你的任务目标和当前信息,决定是否要使用这两个工具。以及要询问什么内容。
|
| 295 |
+
一次可以查询多个问题,多个问题放到list里面。
|
| 296 |
+
|
| 297 |
+
【注意!!】
|
| 298 |
+
1.法条检索库输入的法律名称必须是全称,不能缩写。比如必须写明为“中华人民共和国刑法第一条”,“中华人民共和国刑法第一百二十条之二”,“最高人民法院关于审理未成年人刑事案件具体应用法律若干问题的解释第十五条” 等
|
| 299 |
+
2.不要写款、项!仅写明法条的全名。例如,不要写“中华人民共和国刑法第一百三十四条第一款”,只写“中华人民共和国刑法第一百三十四条”
|
| 300 |
+
|
| 301 |
+
你的回复要严格按照如下格式(不要说任何多余的话!无需进行其他回复!!):
|
| 302 |
+
|
| 303 |
+
{
|
| 304 |
+
"法条":{
|
| 305 |
+
"使用":0代表不适用,1代表使用。
|
| 306 |
+
"询问":[你要询问的问题1,你要询问的问题2]
|
| 307 |
+
},
|
| 308 |
+
"一律���知":{
|
| 309 |
+
"使用":0代表不适用,1代表使用。
|
| 310 |
+
"询问":[你要询问的问题1,你要询问的问题2]
|
| 311 |
+
}
|
| 312 |
+
}
|
| 313 |
+
|
| 314 |
+
例如:
|
| 315 |
+
{
|
| 316 |
+
"法条":{
|
| 317 |
+
"使用":1,
|
| 318 |
+
"询问":["中华人民共和国民法典第四百六十九条","中华人民共和国刑法第五条","最高人民法院关于审理未成年人刑事案件具体应用法律若干问题的解释第十五条"]
|
| 319 |
+
},
|
| 320 |
+
"一律可知":{
|
| 321 |
+
"使用":1
|
| 322 |
+
"询问":["客户信息是否属于公司的商业秘密?","单位可以利用末位淘汰制度与员工解除劳动合同吗?"]
|
| 323 |
+
}
|
| 324 |
+
}
|
| 325 |
+
|
| 326 |
+
"""+hint
|
| 327 |
+
response = self.speak(
|
| 328 |
+
context,
|
| 329 |
+
prompt,
|
| 330 |
+
check=False
|
| 331 |
+
)
|
| 332 |
+
print("Queries",self.role,response)
|
| 333 |
+
return response
|
| 334 |
+
|
| 335 |
+
def plan(self, context,hint=""):
|
| 336 |
+
now_plan=self._get_plan(context,hint=hint)
|
| 337 |
+
now_plan=json.loads(now_plan)
|
| 338 |
+
|
| 339 |
+
laws=""
|
| 340 |
+
QA_pair={}
|
| 341 |
+
if now_plan["一律可知"]["使用"]==1:
|
| 342 |
+
for query in now_plan["一律可知"]["询问"]:
|
| 343 |
+
answer=self.yilvkezhi_retriever(query)
|
| 344 |
+
if answer is not None:
|
| 345 |
+
QA_pair.update({query:answer})
|
| 346 |
+
now_plan["一律可知"].update({query:answer})
|
| 347 |
+
|
| 348 |
+
pattern = r"### 核心法条\n(.*?)\n\n###"
|
| 349 |
+
core_statutes = re.search(pattern, answer, re.S)
|
| 350 |
+
|
| 351 |
+
core_statutes_content = core_statutes.group(1) if core_statutes else ""
|
| 352 |
+
laws+=core_statutes_content
|
| 353 |
+
|
| 354 |
+
if now_plan["法条"]["使用"]==1:
|
| 355 |
+
prompt = """
|
| 356 |
+
提取所给出的文本中的所有法条、法规、司法解释名称及具体条目。
|
| 357 |
+
法条包含刑法、民法典、民事诉讼法等。同时也包含部分法律法规和司法解释。
|
| 358 |
+
|
| 359 |
+
【注意!!】
|
| 360 |
+
1.每一项法律法规司法解释必须是全称,不能缩写。比如必须写明为“中华人民共和国刑法第一条”,“中华人民共和国刑法第一百二十条之二”,“最高人民法院关于审理未成年人刑事案件具体应用法律若干问题的解释第十五条” 等
|
| 361 |
+
2.不要写款、项!仅写明法条的全名(即精确到第xx条)。例如,不要写“中华人民共和国刑法第一百三十四条第一款”,只写“中华人民共和国刑法第一百三十四条”
|
| 362 |
+
3.直接返回提取的结果,相邻两个条目之间用|分隔。
|
| 363 |
+
4.法条法规不要带书名号!直接衔接第x条。
|
| 364 |
+
|
| 365 |
+
【返回格式】
|
| 366 |
+
法条法规1|法条法规2|法条法规3|法条法规4
|
| 367 |
+
|
| 368 |
+
例如:
|
| 369 |
+
中华人民共和国刑法第一条|中华人民共和国刑法第一百二十条之二|最高人民法院关于审理未成年人刑事案件具体应用法律若干问题的解释第十五条
|
| 370 |
+
|
| 371 |
+
"""
|
| 372 |
+
response = self.speak(
|
| 373 |
+
laws+str(now_plan["法条"]["询问"]),
|
| 374 |
+
prompt,
|
| 375 |
+
check=False
|
| 376 |
+
)
|
| 377 |
+
response=response.split("|")
|
| 378 |
+
for query in response:
|
| 379 |
+
answer=self.law_retriever(query)
|
| 380 |
+
if answer is not None:
|
| 381 |
+
QA_pair.update({query:answer})
|
| 382 |
+
now_plan["法条"].update({query:answer})
|
| 383 |
+
|
| 384 |
+
self.current_plan=now_plan
|
| 385 |
+
self.current_answer=QA_pair
|
| 386 |
+
print("Generate QA_pair",self.role,QA_pair)
|
| 387 |
+
return QA_pair
|
| 388 |
+
|
| 389 |
+
# --- Do Phase --- #
|
| 390 |
+
|
| 391 |
+
def execute(
|
| 392 |
+
self, plan: Dict[str, Any], history_list: List[Dict[str, str]], prompt: str, simple:int
|
| 393 |
+
) -> str:
|
| 394 |
+
|
| 395 |
+
history_context = self.prepare_history_context(history_list)
|
| 396 |
+
self.history_context=history_context
|
| 397 |
+
if simple==1: #审判长宣读辩论焦点
|
| 398 |
+
return self.speak("当前庭审记录:"+history_context, prompt,check=True)
|
| 399 |
+
elif simple==2: # 最后判决
|
| 400 |
+
context="目前的辩论焦点和查明情况:"+str(self.debate_focus)+"法庭发言记录总结:"+str(self.memory)
|
| 401 |
+
QA_pair=self.plan(context+prompt.split("#####")[0],hint="提示,可以搜索类似案件判决情况,关注刑期长短,关注社会评价与社会影响,关注是否适用缓刑,是否需要赔偿、处罚罚金,以及具体数值等。**建议至少查询类似案件、实刑刑期长度、缓刑适用、罚金数额!**")
|
| 402 |
+
self.init_QA_pair.update(QA_pair)
|
| 403 |
+
print("参考资料:"+str(self.init_QA_pair), context+prompt)
|
| 404 |
+
return self.speak("参考资料:"+str(QA_pair), context+prompt,check=True)
|
| 405 |
+
elif simple==6: # 判决书生成
|
| 406 |
+
context="庭审记录总结:"+str(self.memory)+"目前的辩论焦点和查明情况:"+str(self.debate_focus)
|
| 407 |
+
return self.speak(context, prompt,check=True)
|
| 408 |
+
elif simple==5: # 被告人回答是否之前有过其他法律处分/法官宣读庭审启动
|
| 409 |
+
return self.speak("", prompt)
|
| 410 |
+
|
| 411 |
+
|
| 412 |
+
if self.role=="审判长":
|
| 413 |
+
context="之前法庭人员发言记录总结:"+str(self.memory)+"目前的证据:"+str(self.evidence_pool)+"目前的辩论焦点和查明情况:"+str(self.debate_focus)+"最新的庭审记录:"+history_context
|
| 414 |
+
elif self.role=="被告人":
|
| 415 |
+
context="事实经过:"+self.fact+"目前的策略:"+str(self.strategy)+"最新的庭审记录:"+history_context
|
| 416 |
+
prompt+="被告人请按实际情况诚实回答,无中生有可能会加重判罚。例如,如果没有赔偿,或没有取得谅解,就要如实回答没有。"
|
| 417 |
+
else:
|
| 418 |
+
# context="之前法庭人员发言记录总结:"+str(self.memory)+"目前的证据:"+str(self.evidence_pool)+"目前的策略:"+str(self.strategy)+"最新的庭审记录:"+history_context
|
| 419 |
+
context="之前法庭人员发言记录总结:"+str(self.memory)+"目前的证据:"+str(self.evidence_pool)
|
| 420 |
+
context+="目前的策略:"+str(self.strategy)+"最新的庭审记录:"+history_context
|
| 421 |
+
|
| 422 |
+
if simple==3: #公诉人/辩护人 法庭调查的提问
|
| 423 |
+
context+="拟询问的问题(仅供参考),注意不要和之前问题重复:"+str(self.questions)
|
| 424 |
+
|
| 425 |
+
if simple==4: #审判长打断选项
|
| 426 |
+
return self.speak(context, prompt)
|
| 427 |
+
|
| 428 |
+
if simple==7: #审判长判断有没有认罪认罚具结书
|
| 429 |
+
return self.speak(context, prompt)
|
| 430 |
+
|
| 431 |
+
# QA_pair=self.plan(context)
|
| 432 |
+
# return self.speak(context+"参考资料:"+str(QA_pair), prompt)
|
| 433 |
+
return self.speak(context, prompt,check=True)
|
| 434 |
+
|
| 435 |
+
def pure_final_judge(self,context,prompt):
|
| 436 |
+
QA_pair=self.plan(context+prompt,hint="提示,可以搜索类似案件判决情况,关注刑期长短,关注社会评价与社会影响,关注是否适用缓刑,是否需要赔偿、处罚罚金,以及具体数值等。**建议至少查询类似案件、实刑刑期长度、缓刑适用、罚金数额!**")
|
| 437 |
+
return self.speak("案件事实"+context+"参考资料:"+str(QA_pair), prompt)
|
| 438 |
+
# return self.speak(context, prompt)
|
| 439 |
+
|
| 440 |
+
def speak(self, context: str, prompt: str, max_tokens=4096,check=False) -> str:
|
| 441 |
+
instruction = f"{self.instruction}\n\n"
|
| 442 |
+
full_prompt = f"{context}\n\n{prompt}"
|
| 443 |
+
|
| 444 |
+
hint=""
|
| 445 |
+
|
| 446 |
+
if check==False:
|
| 447 |
+
return self.llm.generate(instruction=instruction, prompt=full_prompt+hint,max_tokens=max_tokens)
|
| 448 |
+
|
| 449 |
+
for i in range(2):
|
| 450 |
+
response=self.llm.generate(instruction=instruction, prompt=full_prompt+hint,max_tokens=max_tokens)
|
| 451 |
+
check_res=self.check_speak_hallucination(self.history_context,response)
|
| 452 |
+
if check_res[0]=='无':
|
| 453 |
+
return response
|
| 454 |
+
hint="请仔细查看证据信息,务必遵循证据事实,确保不要无中生有!你已经出现了一次幻觉!"
|
| 455 |
+
print("HALLUCINATION!")
|
| 456 |
+
return "【HALLUCINATION!】"+response
|
| 457 |
+
# --- Reflect Phase --- #
|
| 458 |
+
|
| 459 |
+
def prepare_history_context(self, history_list: List[Dict[str, str]]) -> str:
|
| 460 |
+
formatted_history = ["当前庭审记录:"]
|
| 461 |
+
for entry in history_list:
|
| 462 |
+
role = entry["role"]
|
| 463 |
+
content = entry["content"].replace("\n", "\n ")
|
| 464 |
+
formatted_entry = f"\\role{{{role}}}\n {content}"
|
| 465 |
+
formatted_history.append(formatted_entry)
|
| 466 |
+
return "\n\n".join(formatted_history)
|
| 467 |
+
|
| 468 |
+
|
| 469 |
+
|
| 470 |
+
class Agent_litigants(Agent):
|
| 471 |
+
def __init__(
|
| 472 |
+
self,
|
| 473 |
+
id: int,
|
| 474 |
+
name: str,
|
| 475 |
+
role: str,
|
| 476 |
+
description: str,
|
| 477 |
+
llm: Any,
|
| 478 |
+
fact=None
|
| 479 |
+
):
|
| 480 |
+
super().__init__(id,name,role,description,llm)
|
| 481 |
+
self.strategy=""
|
| 482 |
+
# {
|
| 483 |
+
# "攻击策略":"",
|
| 484 |
+
# "防御策略":""
|
| 485 |
+
# }
|
| 486 |
+
self.questions=""
|
| 487 |
+
self.fact=fact
|
| 488 |
+
|
| 489 |
+
def call_thought(self,save_only=False):
|
| 490 |
+
res={}
|
| 491 |
+
res.update({
|
| 492 |
+
"策略":self.strategy,
|
| 493 |
+
"记忆":self.memory,
|
| 494 |
+
})
|
| 495 |
+
if self.role!="被告人":
|
| 496 |
+
res.update({
|
| 497 |
+
"规划":self.current_plan,
|
| 498 |
+
})
|
| 499 |
+
if save_only==True:
|
| 500 |
+
res.update({
|
| 501 |
+
"目标":self.goal
|
| 502 |
+
})
|
| 503 |
+
if self.role!="被告人":
|
| 504 |
+
res.update({
|
| 505 |
+
"询问":self.questions
|
| 506 |
+
})
|
| 507 |
+
return res
|
| 508 |
+
|
| 509 |
+
def preparation(self,all_term,prosecution_statement,defendant_information,evidence):
|
| 510 |
+
term_mention=f"""
|
| 511 |
+
被告人被指控的罪名有:{all_term}
|
| 512 |
+
你的策略应当针对该罪名的定罪、量刑进行制定。一定要有针对性。
|
| 513 |
+
"""
|
| 514 |
+
|
| 515 |
+
|
| 516 |
+
self.defendant_information=defendant_information
|
| 517 |
+
self.prosecution_statement=prosecution_statement
|
| 518 |
+
self.evidence_pool=evidence
|
| 519 |
+
|
| 520 |
+
all_input=term_mention+self.crime3aspect
|
| 521 |
+
|
| 522 |
+
if self.role=="公诉人":
|
| 523 |
+
self.strategy_prompt="""
|
| 524 |
+
【攻击策略】公诉人需要制定自己的攻击策略,包含被告人犯罪的证据链(通过对证据、法律条文的精准解读说明为什么被告人犯罪),质疑辩护人的证据的关联性与证明效力(例如辩护人出示自首证据,可以质疑是否真的自首),以及论述为什么足以判处起诉状中的刑期(比如社会危害度,可改造程度等)等。
|
| 525 |
+
【防御策略】公诉人需要制定自己的防御策略,包含如何维护自身证据的关联性和证明效力,以应对辩护人和被告人潜在的对证据的质疑。
|
| 526 |
+
|
| 527 |
+
你的策略应当参考被告人信息、被告人的诉求!
|
| 528 |
+
此外要注意,**公诉人的目的是协助法官查明真相,对被告人进行公正的判罚**,所以对于**你认可的被告人能够从轻处罚的情节**,也可以加入到策略之中,而非机械地反对被告人/辩护人的一切观点与诉求。
|
| 529 |
+
|
| 530 |
+
你的策略可以晓之以理动之以情,可以换位思考,站在被告人的角度,事实经过的角度去制定策略。
|
| 531 |
+
"""
|
| 532 |
+
|
| 533 |
+
info="起诉状:"+prosecution_statement+"被告人信息:"+defendant_information+"证据:"+evidence
|
| 534 |
+
task="""
|
| 535 |
+
在庭审开始前,你要根据起诉状、被告人信息、证据条目,制定自己本次出庭的目标、攻击策略、防御策略。
|
| 536 |
+
"""+all_input+"""
|
| 537 |
+
**再次注意,只有当三个角度都成立,且具有直接因果关系时,罪名才真正成立**
|
| 538 |
+
**所以你的策略应当围绕论证犯罪的定义成立,且因果成立的角度。**
|
| 539 |
+
越具体越好!
|
| 540 |
+
"""+"""
|
| 541 |
+
【目标】公诉人的目标通常为保证准确、及时地查明犯罪事实,正确应用法律,惩罚犯罪分子,保障无罪的人不受刑事追究。具体地,你的目标还应当包含期望法庭给被告人处以的罪名、刑期和罚金。
|
| 542 |
+
"""+self.strategy_prompt
|
| 543 |
+
QA_pair=self.plan(info+task)
|
| 544 |
+
res=self.speak(info+"参考资料:"+str(QA_pair),
|
| 545 |
+
task+
|
| 546 |
+
"""
|
| 547 |
+
你的回复应当严格按照如下格式:
|
| 548 |
+
[一句话总结目标]|[攻击策略,直接分条回复。1.xxx2.xxx3.xxx]|[防御策略,分条回复。1.xxx2.xxx3.xxx]
|
| 549 |
+
回复时不要包含[]
|
| 550 |
+
例如:
|
| 551 |
+
请求法院判处被告人xxx|1.证据xxx证明2.被告人的自首系xxx3.社会危害性xxx|1.证据xxx是由xxx,具有法律效力2.xxx
|
| 552 |
+
|
| 553 |
+
|
| 554 |
+
其中每一条的格式为:
|
| 555 |
+
xxx(策略内容),参考法条:xxx(法条名称及内容概括),参考案例:xxx(案例编号及内容概括)
|
| 556 |
+
|
| 557 |
+
法条和案例如果没有,或对于约定俗成的常见策略,则不必添加这些参考。
|
| 558 |
+
|
| 559 |
+
【注意】
|
| 560 |
+
1.注意,策略要实事求是符合实际!不能无中生有!
|
| 561 |
+
2.直接按照格式返回,不要说多余的话
|
| 562 |
+
3.每一个策略下,**要附带能支撑该策略的参考法条以及类案信息**,(从给你的参考资料中取,如果没有则不用带)
|
| 563 |
+
4.你要进一步调整策略,要和之前的策略进行合理地修改,而非照搬或者全盘否定。
|
| 564 |
+
"""
|
| 565 |
+
)
|
| 566 |
+
|
| 567 |
+
elif self.role=="辩护人":
|
| 568 |
+
info="起诉状:"+prosecution_statement+"被告人信息:"+defendant_information+"证据:"+evidence
|
| 569 |
+
self.strategy_prompt="""
|
| 570 |
+
【攻击策略】辩护人需要制定自己的攻击策略,质疑公诉人的证据(例如说明公诉人的证据与本案件的不具备关联性,或证明效力不够充分),指出其证据不足或推理不合理。
|
| 571 |
+
【防御策略】辩护人需要制定自己的防御策略,以减轻被告人的罪责。包含寻找有利证据、构建合理故事、法律条文的精准解读、强调案件特殊情节、强调积极态度与悔悟等为被告人进行开脱,同时,也可以强调被告人的学历认知受限、家庭困难等客观因素,争取减轻判罚。此外,还要维护自身证据与案件事实的关联性,以应对公诉人潜在的对证据的质疑。
|
| 572 |
+
|
| 573 |
+
你的策略整体上要围绕定罪、量刑、缓刑、罚金展开,应当参考被告人信息、被��人的诉求!
|
| 574 |
+
例如被告人家庭经济条件不好,则应当尝试减少罚金,争取缓刑等;被告人提出希望缓刑,则应当争取缓刑。
|
| 575 |
+
|
| 576 |
+
你的策略可以晓之以理动之以情,可以换位思考,站在被告人的角度,事实经过的角度去制定策略。
|
| 577 |
+
"""
|
| 578 |
+
task="""
|
| 579 |
+
在庭审开始前,你要根据起诉状、被告人信息、证据条目,制定自己本次出庭的目标、攻击策略、防御策略。
|
| 580 |
+
"""+all_input+"""
|
| 581 |
+
**再次注意,只有当三个角度都成立,且具有直接因果关系时,罪名才真正成立**
|
| 582 |
+
**所以你的策略应当围绕论证犯罪的定义不成立,或者因果不完全成立的角度。**
|
| 583 |
+
越具体越好!
|
| 584 |
+
"""+"""
|
| 585 |
+
【目标】辩护人的责任是根据事实和法律,提出犯罪嫌疑人、被告人无罪、罪轻或者减轻、免除其刑事责任的材料和意见,维护犯罪嫌疑人、被告人的诉讼权利和其他合法权益。具体地,你的目标应还当包含期望法庭给被告人处以的罪名、刑期和罚金。(要低于起诉状中的罪情)
|
| 586 |
+
"""+self.strategy_prompt
|
| 587 |
+
QA_pair=self.plan(info+task)
|
| 588 |
+
res=self.speak(info+"参考资料:"+str(QA_pair),
|
| 589 |
+
task+
|
| 590 |
+
"""
|
| 591 |
+
你的回复应当严格按照如下格式:
|
| 592 |
+
[一句话总结目标]|[攻击策略,直接分条回复。1.xxx2.xxx3.xxx]|[防御策略,分条回复。1.xxx2.xxx3.xxx]
|
| 593 |
+
回复时不要包含[]
|
| 594 |
+
|
| 595 |
+
例如:
|
| 596 |
+
请求法院判处被告人xxx|1.证据xxx不足以xxx2.xxx处推理不合理|1.证据xxx是由xxx,具有法律效力2.被告人态度良好xxx
|
| 597 |
+
|
| 598 |
+
|
| 599 |
+
其中每一条的格式为:
|
| 600 |
+
xxx(策略内容),参考法条:xxx(法条名称及内容概括),参考案例:xxx(案例编号及内容概括)
|
| 601 |
+
|
| 602 |
+
法条和案例如果没有,或对于约定俗成的常见策略,则不必添加这些参考。
|
| 603 |
+
|
| 604 |
+
【注意】
|
| 605 |
+
1.注意,策略要实事求是符合实际!不能无中生有!
|
| 606 |
+
2.直接按照格式返回,不要说多余的话
|
| 607 |
+
3.每一个策略下,**要附带能支撑该策略的参考法条以及类案信息**,(从给你的参考资料中取,如果没有则不用带)
|
| 608 |
+
4.你要进一步调整策略,要和之前的策略进行合理地修改,而非照搬或者全盘否定。
|
| 609 |
+
"""
|
| 610 |
+
)
|
| 611 |
+
res=res.split("|")
|
| 612 |
+
self.goal=res[0]
|
| 613 |
+
self.strategy=f"攻击策略:{res[1]}"+ "\n"+ f"防御策略:{res[2]}"
|
| 614 |
+
self.init_QA_pair=QA_pair
|
| 615 |
+
|
| 616 |
+
if self.role=="公诉人":
|
| 617 |
+
self.questions=self.speak("起诉状:"+prosecution_statement+"被告人信息:"+defendant_information+"证据:"+evidence+"最终目标:"+self.goal+"策略: "+self.strategy+"参考资料:"+str(QA_pair),
|
| 618 |
+
"""
|
| 619 |
+
在法庭调查阶段,作为公诉人,你要围绕定罪量刑相关问题对被告人进行发问。
|
| 620 |
+
比如针对案件事实,犯罪动机,犯罪情节等。
|
| 621 |
+
现在请你根据起诉状、被告人信息、证据条目、本次出庭的目标、攻击策略、防御策略,制定法庭调查阶段拟询问被告人的问题。
|
| 622 |
+
|
| 623 |
+
问题应当涵盖多个角度,而总数不要太多!
|
| 624 |
+
"""+
|
| 625 |
+
all_input+
|
| 626 |
+
"""
|
| 627 |
+
请分条回复。**按照重要性先后顺序!**
|
| 628 |
+
例如
|
| 629 |
+
1.xxxx
|
| 630 |
+
2.xxxx
|
| 631 |
+
3.xxxx
|
| 632 |
+
""")
|
| 633 |
+
|
| 634 |
+
elif self.role=="辩护人":
|
| 635 |
+
self.questions=self.speak("起诉状:"+prosecution_statement+"被告人信息:"+defendant_information+"证据:"+evidence+"最终目标:"+self.goal+"策略:"+self.strategy+"参考资料:"+str(QA_pair),
|
| 636 |
+
"""
|
| 637 |
+
在法庭调查阶段,作为辩护人,你要围绕定罪量刑相关问题对被告人进行发问。
|
| 638 |
+
比如针对案件事实,犯罪动机,犯罪情节等。
|
| 639 |
+
现在请你根据起诉状、被告人信息、证据条目、本次出庭的目标、攻击策略、防御策略,制定法庭调查阶段拟询问被告人的问题。
|
| 640 |
+
问题应当涵盖多个角度,而总数不要太多!
|
| 641 |
+
"""+
|
| 642 |
+
all_input+
|
| 643 |
+
"""
|
| 644 |
+
请分条回复。**按照重要性先后顺序!**
|
| 645 |
+
例如
|
| 646 |
+
1.xxxx
|
| 647 |
+
2.xxxx
|
| 648 |
+
3.xxxx
|
| 649 |
+
""")
|
| 650 |
+
|
| 651 |
+
def for_defendant(self,all_term,prosecution_statement,defendant_information,evidence):
|
| 652 |
+
# 辩护人和被告人商议最终目标
|
| 653 |
+
term_mention=f"""
|
| 654 |
+
被告人被指控的罪名有:{all_term}
|
| 655 |
+
你的策略应当针对该罪名的定罪、量刑进行制定。一定要有针对性。
|
| 656 |
+
"""
|
| 657 |
+
defendant_strategy_prompt="""
|
| 658 |
+
【防御策略】被告人需要制定自己的防御策略,以减轻自己的罪责。如果目标是带来减刑,则应当表示认罪认罚悔罪态度;若目标是否认指控自证无罪,则应当坚定地声称自身的清白。
|
| 659 |
+
"""
|
| 660 |
+
res=self.speak("起诉状:"+prosecution_statement+"被告人信息:"+defendant_information+"证据:"+evidence+"参考资料:"+str(self.init_QA_pair),
|
| 661 |
+
"""
|
| 662 |
+
你是辩护人,你的当事人(被告人)被指控犯罪。
|
| 663 |
+
在庭审开始前,你要根据起诉状、被告人信息、证据条目,帮助被告人制定其在本次出庭的目标、防御策略。
|
| 664 |
+
如果被告人提出了诉求,那么诉求中的内容应当出现在被告人的目标与策略中。
|
| 665 |
+
|
| 666 |
+
"""
|
| 667 |
+
+term_mention+
|
| 668 |
+
"""
|
| 669 |
+
【目标】被告人的目标是在已知信息的范围内,维护自己的利益。良好的认罪认罚态度有可能带来减刑,直接否认指控自证无罪可能会免除罪责,也有可能会罪加一等。
|
| 670 |
+
"""
|
| 671 |
+
+defendant_strategy_prompt+
|
| 672 |
+
"""
|
| 673 |
+
此前,作为辩护人,你自己制定的目标和策略是:
|
| 674 |
+
"""+
|
| 675 |
+
"辩护目标:"+self.goal+"辩护策略"+str(self.strategy)+
|
| 676 |
+
"""
|
| 677 |
+
**注意,你代表被告人的利益,所以给被告人制定的策略要和你自己制定的辩护策略相一致。**
|
| 678 |
+
**注意,策略必须要符合实际,如果证据中没有提及赔偿、谅解等情节,则不应该有这部分策略!**
|
| 679 |
+
**反之,如果被告人做出了赔偿、征得了谅解,则应当有这部分的策略**
|
| 680 |
+
**如果被告人提出了诉求,那么诉求中的内容应当出现在被告人的目标与策略中。**
|
| 681 |
+
|
| 682 |
+
你的回复应当严格按照如下格式:
|
| 683 |
+
[一句话总结目标]|[防御策略,分条回复。1.xxx2.xxx3.xxx]
|
| 684 |
+
回复时不要包含[]
|
| 685 |
+
例如:
|
| 686 |
+
希望以悔罪的态度争取xxxx|1.积极悔罪xxx2.说明自身自首xxx
|
| 687 |
+
|
| 688 |
+
|
| 689 |
+
"""
|
| 690 |
+
)
|
| 691 |
+
res=res.split("|")
|
| 692 |
+
return defendant_strategy_prompt,res[0],f"防御策略:{res[1]}"
|
| 693 |
+
|
| 694 |
+
# def think_answer(self,questions):
|
| 695 |
+
# self.answer=self.speak("出庭目标:"+self.goal+self.strategy+"辩护人询问的问题:"+self.questions,
|
| 696 |
+
# """
|
| 697 |
+
# 在法庭调查阶段,辩护人可能会向被告人询问一些问题。
|
| 698 |
+
# 作为被告人,请你根据你的出庭目标、防御策略,针对辩护人打算询问的问题依次做出回复。
|
| 699 |
+
# 你的回复应当严格按照如下形式:
|
| 700 |
+
# 1.xxxx
|
| 701 |
+
# 2.xxxx
|
| 702 |
+
# 3.xxxx
|
| 703 |
+
# """
|
| 704 |
+
# )
|
| 705 |
+
|
| 706 |
+
def set_strategy(self,strategy_prompt,goal,strategy,all_term,prosecution_statement,defendant_information,evidence):
|
| 707 |
+
self.all_term=all_term
|
| 708 |
+
self.prosecution_statement=prosecution_statement
|
| 709 |
+
self.defendant_information=defendant_information
|
| 710 |
+
self.evidence_pool=evidence
|
| 711 |
+
self.strategy_prompt=strategy_prompt
|
| 712 |
+
self.goal=goal
|
| 713 |
+
self.strategy=strategy
|
| 714 |
+
|
| 715 |
+
class Agent_Judge(Agent):
|
| 716 |
+
def __init__(
|
| 717 |
+
self,
|
| 718 |
+
id: int,
|
| 719 |
+
name: str,
|
| 720 |
+
role: str,
|
| 721 |
+
description: str,
|
| 722 |
+
llm: Any,
|
| 723 |
+
):
|
| 724 |
+
super().__init__(id,name,role,description,llm)
|
| 725 |
+
self.debate_focus=""
|
| 726 |
+
# self.conclusion=""
|
| 727 |
+
def call_thought(self,save_only=False):
|
| 728 |
+
res={}
|
| 729 |
+
res.update({"辩论焦点与查明情况":self.debate_focus,
|
| 730 |
+
"记忆":self.memory,
|
| 731 |
+
"规划":self.current_plan})
|
| 732 |
+
return res
|
| 733 |
+
|
| 734 |
+
def preparation(self,all_term,prosecution_statement,defendant_information,evidence):
|
| 735 |
+
self.evidence_pool=evidence
|
| 736 |
+
self.prosecution_statement=prosecution_statement
|
| 737 |
+
self.defendant_information=defendant_information
|
| 738 |
+
|
| 739 |
+
term_mention=f"""
|
| 740 |
+
被告人被指控的罪名有:{all_term}
|
| 741 |
+
你所要查明的事实也应当针对该罪名的定罪、量刑进行制定。一定要有针对性。
|
| 742 |
+
"""
|
| 743 |
+
self.strategy_prompt="""
|
| 744 |
+
作为审判长,你需要查明案件的事实,所以辩论焦点还要考虑涵盖以下几个角度的内容:
|
| 745 |
+
辩论焦点包括:
|
| 746 |
+
定罪与否、责任认定情况(比如年龄是不是符合,主责或次责等)、量刑情况(是否有一些从轻或从重处���的情节,是否自首,是否取得被害人原谅)
|
| 747 |
+
还应当包括应当查明但双方还没有讨论清楚的内容,你认为对定罪量刑有价值的问题。
|
| 748 |
+
辩论焦点应当具体且灵活。
|
| 749 |
+
"""
|
| 750 |
+
info="起诉状:"+prosecution_statement+"被告人信息:"+defendant_information+"证据:"+evidence
|
| 751 |
+
task="""
|
| 752 |
+
在庭审开始前,你要根据起诉状、被告人信息、证据条目,生成本案可能的辩论焦点。
|
| 753 |
+
"""+term_mention+self.crime3aspect+"""
|
| 754 |
+
**只有当三个角度都成立,且具有直接因果关系时,罪名才真正成立**
|
| 755 |
+
所以你的辩论焦点也应该围绕这三个角度。越具体越好!
|
| 756 |
+
"""+self.strategy_prompt
|
| 757 |
+
|
| 758 |
+
self.init_QA_pair=self.plan(info+task)
|
| 759 |
+
|
| 760 |
+
res=self.speak(info+"参考资料:"+str(self.init_QA_pair),
|
| 761 |
+
task+
|
| 762 |
+
"""
|
| 763 |
+
辩论焦点从1开始编号
|
| 764 |
+
|
| 765 |
+
例如:1.是否构成故意伤害罪。需要讨论直到被告表示认罪受罚,或证明被告罪行不满足故意伤害罪条件。若此前被告已经明确表示认罪,则无需进行辩论。2.是否构成自首。3.是否征得被害人谅解。
|
| 766 |
+
|
| 767 |
+
请严格按照以下格式回复:
|
| 768 |
+
1.法庭辩论焦点2.法庭辩论焦点3.法庭辩论焦点
|
| 769 |
+
"""
|
| 770 |
+
)
|
| 771 |
+
|
| 772 |
+
self.debate_focus=res
|
| 773 |
+
|
| 774 |
+
|
| 775 |
+
self.questions=self.speak("起诉状:"+prosecution_statement+"被告人信息:"+defendant_information+"证据:"+evidence+"辩论焦点:"+self.debate_focus+"参考资料:"+str(self.init_QA_pair),
|
| 776 |
+
"""
|
| 777 |
+
在法庭调查阶段,作为审判长,你要查清案件事实,围绕定罪量刑相关问题对被告人进行发问。
|
| 778 |
+
比如针对案件事实,犯罪动机,犯罪情节等。
|
| 779 |
+
现在请你根据起诉状、被告人信息、证据条目、辩论焦点制定法庭调查阶段拟询问被告人的问题。
|
| 780 |
+
|
| 781 |
+
问题应当涵盖多个角度,而总数不要太多!
|
| 782 |
+
|
| 783 |
+
请分条回复。**按照重要性先后顺序!**
|
| 784 |
+
例如
|
| 785 |
+
1.xxxx
|
| 786 |
+
2.xxxx
|
| 787 |
+
3.xxxx
|
| 788 |
+
""")
|
api_pool/.env
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
BASE_URL=https://svip.xty.app/v1
|
| 2 |
+
API_KEY=sk-r0WeYOdkMjzYdnSxEcC8B931Aa904e4bBaCcAc2a57D803F1
|
api_pool/__pycache__/api_pool.cpython-310.pyc
ADDED
|
Binary file (6.89 kB). View file
|
|
|
api_pool/__pycache__/api_pool.cpython-38.pyc
ADDED
|
Binary file (6.91 kB). View file
|
|
|