Delete claude_converter.py
Browse files- claude_converter.py +0 -386
claude_converter.py
DELETED
|
@@ -1,386 +0,0 @@
|
|
| 1 |
-
import json
|
| 2 |
-
import uuid
|
| 3 |
-
from datetime import datetime
|
| 4 |
-
from typing import List, Dict, Any, Optional, Union
|
| 5 |
-
|
| 6 |
-
try:
|
| 7 |
-
from .claude_types import ClaudeRequest, ClaudeMessage, ClaudeTool
|
| 8 |
-
except ImportError:
|
| 9 |
-
# Fallback for dynamic loading where relative import might fail
|
| 10 |
-
# We assume claude_types is available in sys.modules or we can import it directly if in same dir
|
| 11 |
-
import sys
|
| 12 |
-
if "v2.claude_types" in sys.modules:
|
| 13 |
-
from v2.claude_types import ClaudeRequest, ClaudeMessage, ClaudeTool
|
| 14 |
-
else:
|
| 15 |
-
# Try absolute import assuming v2 is in path or current dir
|
| 16 |
-
try:
|
| 17 |
-
from claude_types import ClaudeRequest, ClaudeMessage, ClaudeTool
|
| 18 |
-
except ImportError:
|
| 19 |
-
# Last resort: if loaded via importlib in app.py, we might need to rely on app.py injecting it
|
| 20 |
-
# But app.py loads this module.
|
| 21 |
-
pass
|
| 22 |
-
|
| 23 |
-
def get_current_timestamp() -> str:
|
| 24 |
-
"""Get current timestamp in Amazon Q format."""
|
| 25 |
-
now = datetime.now().astimezone()
|
| 26 |
-
weekday = now.strftime("%A")
|
| 27 |
-
iso_time = now.isoformat(timespec='milliseconds')
|
| 28 |
-
return f"{weekday}, {iso_time}"
|
| 29 |
-
|
| 30 |
-
def map_model_name(claude_model: str) -> str:
|
| 31 |
-
"""Map Claude model name to Amazon Q model ID."""
|
| 32 |
-
model_lower = claude_model.lower()
|
| 33 |
-
if model_lower.startswith("claude-sonnet-4.5") or model_lower.startswith("claude-sonnet-4-5"):
|
| 34 |
-
return "claude-sonnet-4.5"
|
| 35 |
-
return "claude-sonnet-4"
|
| 36 |
-
|
| 37 |
-
def extract_text_from_content(content: Union[str, List[Dict[str, Any]]]) -> str:
|
| 38 |
-
"""Extract text from Claude content."""
|
| 39 |
-
if isinstance(content, str):
|
| 40 |
-
return content
|
| 41 |
-
elif isinstance(content, list):
|
| 42 |
-
parts = []
|
| 43 |
-
for block in content:
|
| 44 |
-
if isinstance(block, dict):
|
| 45 |
-
if block.get("type") == "text":
|
| 46 |
-
parts.append(block.get("text", ""))
|
| 47 |
-
return "\n".join(parts)
|
| 48 |
-
return ""
|
| 49 |
-
|
| 50 |
-
def process_tool_result_block(block: Dict[str, Any], tool_results: List[Dict[str, Any]]) -> None:
|
| 51 |
-
"""
|
| 52 |
-
处理单个 tool_result 块,提取内容并添加到 tool_results 列表
|
| 53 |
-
|
| 54 |
-
Args:
|
| 55 |
-
block: tool_result 类型的内容块
|
| 56 |
-
tool_results: 用于存储处理结果的列表
|
| 57 |
-
"""
|
| 58 |
-
tool_use_id = block.get("tool_use_id")
|
| 59 |
-
raw_c = block.get("content", [])
|
| 60 |
-
|
| 61 |
-
aq_content = []
|
| 62 |
-
if isinstance(raw_c, str):
|
| 63 |
-
aq_content = [{"text": raw_c}]
|
| 64 |
-
elif isinstance(raw_c, list):
|
| 65 |
-
for item in raw_c:
|
| 66 |
-
if isinstance(item, dict):
|
| 67 |
-
if item.get("type") == "text":
|
| 68 |
-
aq_content.append({"text": item.get("text", "")})
|
| 69 |
-
elif "text" in item:
|
| 70 |
-
aq_content.append({"text": item["text"]})
|
| 71 |
-
elif isinstance(item, str):
|
| 72 |
-
aq_content.append({"text": item})
|
| 73 |
-
|
| 74 |
-
if not any(i.get("text", "").strip() for i in aq_content):
|
| 75 |
-
aq_content = [{"text": "Tool use was cancelled by the user"}]
|
| 76 |
-
|
| 77 |
-
# Merge if exists
|
| 78 |
-
existing = next((r for r in tool_results if r["toolUseId"] == tool_use_id), None)
|
| 79 |
-
if existing:
|
| 80 |
-
existing["content"].extend(aq_content)
|
| 81 |
-
else:
|
| 82 |
-
tool_results.append({
|
| 83 |
-
"toolUseId": tool_use_id,
|
| 84 |
-
"content": aq_content,
|
| 85 |
-
"status": block.get("status", "success")
|
| 86 |
-
})
|
| 87 |
-
|
| 88 |
-
def extract_images_from_content(content: Union[str, List[Dict[str, Any]]]) -> Optional[List[Dict[str, Any]]]:
|
| 89 |
-
"""Extract images from Claude content and convert to Amazon Q format."""
|
| 90 |
-
if not isinstance(content, list):
|
| 91 |
-
return None
|
| 92 |
-
|
| 93 |
-
images = []
|
| 94 |
-
for block in content:
|
| 95 |
-
if isinstance(block, dict) and block.get("type") == "image":
|
| 96 |
-
source = block.get("source", {})
|
| 97 |
-
if source.get("type") == "base64":
|
| 98 |
-
media_type = source.get("media_type", "image/png")
|
| 99 |
-
fmt = media_type.split("/")[-1] if "/" in media_type else "png"
|
| 100 |
-
images.append({
|
| 101 |
-
"format": fmt,
|
| 102 |
-
"source": {
|
| 103 |
-
"bytes": source.get("data", "")
|
| 104 |
-
}
|
| 105 |
-
})
|
| 106 |
-
return images if images else None
|
| 107 |
-
|
| 108 |
-
def convert_tool(tool: ClaudeTool) -> Dict[str, Any]:
|
| 109 |
-
"""Convert Claude tool to Amazon Q tool."""
|
| 110 |
-
desc = tool.description or ""
|
| 111 |
-
if len(desc) > 10240:
|
| 112 |
-
desc = desc[:10100] + "\n\n...(Full description provided in TOOL DOCUMENTATION section)"
|
| 113 |
-
|
| 114 |
-
return {
|
| 115 |
-
"toolSpecification": {
|
| 116 |
-
"name": tool.name,
|
| 117 |
-
"description": desc,
|
| 118 |
-
"inputSchema": {"json": tool.input_schema}
|
| 119 |
-
}
|
| 120 |
-
}
|
| 121 |
-
|
| 122 |
-
def merge_user_messages(messages: List[Dict[str, Any]]) -> Dict[str, Any]:
|
| 123 |
-
"""Merge consecutive user messages, keeping only the last 2 messages' images."""
|
| 124 |
-
if not messages:
|
| 125 |
-
return {}
|
| 126 |
-
|
| 127 |
-
all_contents = []
|
| 128 |
-
base_context = None
|
| 129 |
-
base_origin = None
|
| 130 |
-
base_model = None
|
| 131 |
-
all_images = []
|
| 132 |
-
|
| 133 |
-
for msg in messages:
|
| 134 |
-
content = msg.get("content", "")
|
| 135 |
-
if base_context is None:
|
| 136 |
-
base_context = msg.get("userInputMessageContext", {})
|
| 137 |
-
if base_origin is None:
|
| 138 |
-
base_origin = msg.get("origin", "CLI")
|
| 139 |
-
if base_model is None:
|
| 140 |
-
base_model = msg.get("modelId")
|
| 141 |
-
|
| 142 |
-
if content:
|
| 143 |
-
all_contents.append(content)
|
| 144 |
-
|
| 145 |
-
# Collect images from each message
|
| 146 |
-
msg_images = msg.get("images")
|
| 147 |
-
if msg_images:
|
| 148 |
-
all_images.append(msg_images)
|
| 149 |
-
|
| 150 |
-
result = {
|
| 151 |
-
"content": "\n\n".join(all_contents),
|
| 152 |
-
"userInputMessageContext": base_context or {},
|
| 153 |
-
"origin": base_origin or "CLI",
|
| 154 |
-
"modelId": base_model
|
| 155 |
-
}
|
| 156 |
-
|
| 157 |
-
# Only keep images from the last 2 messages that have images
|
| 158 |
-
if all_images:
|
| 159 |
-
kept_images = []
|
| 160 |
-
for img_list in all_images[-2:]: # Take last 2 messages' images
|
| 161 |
-
kept_images.extend(img_list)
|
| 162 |
-
if kept_images:
|
| 163 |
-
result["images"] = kept_images
|
| 164 |
-
|
| 165 |
-
return result
|
| 166 |
-
|
| 167 |
-
def process_history(messages: List[ClaudeMessage]) -> List[Dict[str, Any]]:
|
| 168 |
-
"""Process history messages to match Amazon Q format (alternating user/assistant)."""
|
| 169 |
-
history = []
|
| 170 |
-
seen_tool_use_ids = set()
|
| 171 |
-
|
| 172 |
-
raw_history = []
|
| 173 |
-
|
| 174 |
-
# First pass: convert individual messages
|
| 175 |
-
for msg in messages:
|
| 176 |
-
if msg.role == "user":
|
| 177 |
-
content = msg.content
|
| 178 |
-
text_content = ""
|
| 179 |
-
tool_results = None
|
| 180 |
-
images = extract_images_from_content(content)
|
| 181 |
-
|
| 182 |
-
if isinstance(content, list):
|
| 183 |
-
text_parts = []
|
| 184 |
-
for block in content:
|
| 185 |
-
if isinstance(block, dict):
|
| 186 |
-
btype = block.get("type")
|
| 187 |
-
if btype == "text":
|
| 188 |
-
text_parts.append(block.get("text", ""))
|
| 189 |
-
elif btype == "tool_result":
|
| 190 |
-
if tool_results is None:
|
| 191 |
-
tool_results = []
|
| 192 |
-
process_tool_result_block(block, tool_results)
|
| 193 |
-
text_content = "\n".join(text_parts)
|
| 194 |
-
else:
|
| 195 |
-
text_content = extract_text_from_content(content)
|
| 196 |
-
|
| 197 |
-
user_ctx = {
|
| 198 |
-
"envState": {
|
| 199 |
-
"operatingSystem": "macos",
|
| 200 |
-
"currentWorkingDirectory": "/"
|
| 201 |
-
}
|
| 202 |
-
}
|
| 203 |
-
if tool_results:
|
| 204 |
-
user_ctx["toolResults"] = tool_results
|
| 205 |
-
|
| 206 |
-
u_msg = {
|
| 207 |
-
"content": text_content,
|
| 208 |
-
"userInputMessageContext": user_ctx,
|
| 209 |
-
"origin": "CLI"
|
| 210 |
-
}
|
| 211 |
-
if images:
|
| 212 |
-
u_msg["images"] = images
|
| 213 |
-
|
| 214 |
-
raw_history.append({"userInputMessage": u_msg})
|
| 215 |
-
|
| 216 |
-
elif msg.role == "assistant":
|
| 217 |
-
content = msg.content
|
| 218 |
-
text_content = extract_text_from_content(content)
|
| 219 |
-
|
| 220 |
-
entry = {
|
| 221 |
-
"assistantResponseMessage": {
|
| 222 |
-
"messageId": str(uuid.uuid4()),
|
| 223 |
-
"content": text_content
|
| 224 |
-
}
|
| 225 |
-
}
|
| 226 |
-
|
| 227 |
-
if isinstance(content, list):
|
| 228 |
-
tool_uses = []
|
| 229 |
-
for block in content:
|
| 230 |
-
if isinstance(block, dict) and block.get("type") == "tool_use":
|
| 231 |
-
tid = block.get("id")
|
| 232 |
-
if tid and tid not in seen_tool_use_ids:
|
| 233 |
-
seen_tool_use_ids.add(tid)
|
| 234 |
-
tool_uses.append({
|
| 235 |
-
"toolUseId": tid,
|
| 236 |
-
"name": block.get("name"),
|
| 237 |
-
"input": block.get("input", {})
|
| 238 |
-
})
|
| 239 |
-
if tool_uses:
|
| 240 |
-
entry["assistantResponseMessage"]["toolUses"] = tool_uses
|
| 241 |
-
|
| 242 |
-
raw_history.append(entry)
|
| 243 |
-
|
| 244 |
-
# Second pass: merge consecutive user messages
|
| 245 |
-
pending_user_msgs = []
|
| 246 |
-
for item in raw_history:
|
| 247 |
-
if "userInputMessage" in item:
|
| 248 |
-
pending_user_msgs.append(item["userInputMessage"])
|
| 249 |
-
elif "assistantResponseMessage" in item:
|
| 250 |
-
if pending_user_msgs:
|
| 251 |
-
merged = merge_user_messages(pending_user_msgs)
|
| 252 |
-
history.append({"userInputMessage": merged})
|
| 253 |
-
pending_user_msgs = []
|
| 254 |
-
history.append(item)
|
| 255 |
-
|
| 256 |
-
if pending_user_msgs:
|
| 257 |
-
merged = merge_user_messages(pending_user_msgs)
|
| 258 |
-
history.append({"userInputMessage": merged})
|
| 259 |
-
|
| 260 |
-
return history
|
| 261 |
-
|
| 262 |
-
def convert_claude_to_amazonq_request(req: ClaudeRequest, conversation_id: Optional[str] = None) -> Dict[str, Any]:
|
| 263 |
-
"""Convert ClaudeRequest to Amazon Q request body."""
|
| 264 |
-
if conversation_id is None:
|
| 265 |
-
conversation_id = str(uuid.uuid4())
|
| 266 |
-
|
| 267 |
-
# 1. Tools
|
| 268 |
-
aq_tools = []
|
| 269 |
-
long_desc_tools = []
|
| 270 |
-
if req.tools:
|
| 271 |
-
for t in req.tools:
|
| 272 |
-
if t.description and len(t.description) > 10240:
|
| 273 |
-
long_desc_tools.append({"name": t.name, "full_description": t.description})
|
| 274 |
-
aq_tools.append(convert_tool(t))
|
| 275 |
-
|
| 276 |
-
# 2. Current Message (last user message)
|
| 277 |
-
last_msg = req.messages[-1] if req.messages else None
|
| 278 |
-
prompt_content = ""
|
| 279 |
-
tool_results = None
|
| 280 |
-
has_tool_result = False
|
| 281 |
-
images = None
|
| 282 |
-
|
| 283 |
-
if last_msg and last_msg.role == "user":
|
| 284 |
-
content = last_msg.content
|
| 285 |
-
images = extract_images_from_content(content)
|
| 286 |
-
|
| 287 |
-
if isinstance(content, list):
|
| 288 |
-
text_parts = []
|
| 289 |
-
for block in content:
|
| 290 |
-
if isinstance(block, dict):
|
| 291 |
-
btype = block.get("type")
|
| 292 |
-
if btype == "text":
|
| 293 |
-
text_parts.append(block.get("text", ""))
|
| 294 |
-
elif btype == "tool_result":
|
| 295 |
-
has_tool_result = True
|
| 296 |
-
if tool_results is None:
|
| 297 |
-
tool_results = []
|
| 298 |
-
process_tool_result_block(block, tool_results)
|
| 299 |
-
prompt_content = "\n".join(text_parts)
|
| 300 |
-
else:
|
| 301 |
-
prompt_content = extract_text_from_content(content)
|
| 302 |
-
|
| 303 |
-
# 3. Context
|
| 304 |
-
user_ctx = {
|
| 305 |
-
"envState": {
|
| 306 |
-
"operatingSystem": "macos",
|
| 307 |
-
"currentWorkingDirectory": "/"
|
| 308 |
-
}
|
| 309 |
-
}
|
| 310 |
-
if aq_tools:
|
| 311 |
-
user_ctx["tools"] = aq_tools
|
| 312 |
-
if tool_results:
|
| 313 |
-
user_ctx["toolResults"] = tool_results
|
| 314 |
-
|
| 315 |
-
# 4. Format Content
|
| 316 |
-
formatted_content = ""
|
| 317 |
-
if has_tool_result and not prompt_content:
|
| 318 |
-
formatted_content = ""
|
| 319 |
-
else:
|
| 320 |
-
formatted_content = (
|
| 321 |
-
"--- CONTEXT ENTRY BEGIN ---\n"
|
| 322 |
-
f"Current time: {get_current_timestamp()}\n"
|
| 323 |
-
"--- CONTEXT ENTRY END ---\n\n"
|
| 324 |
-
"--- USER MESSAGE BEGIN ---\n"
|
| 325 |
-
f"{prompt_content}\n"
|
| 326 |
-
"--- USER MESSAGE END ---"
|
| 327 |
-
)
|
| 328 |
-
|
| 329 |
-
if long_desc_tools:
|
| 330 |
-
docs = []
|
| 331 |
-
for info in long_desc_tools:
|
| 332 |
-
docs.append(f"Tool: {info['name']}\nFull Description:\n{info['full_description']}\n")
|
| 333 |
-
formatted_content = (
|
| 334 |
-
"--- TOOL DOCUMENTATION BEGIN ---\n"
|
| 335 |
-
f"{''.join(docs)}"
|
| 336 |
-
"--- TOOL DOCUMENTATION END ---\n\n"
|
| 337 |
-
f"{formatted_content}"
|
| 338 |
-
)
|
| 339 |
-
|
| 340 |
-
if req.system and formatted_content:
|
| 341 |
-
sys_text = ""
|
| 342 |
-
if isinstance(req.system, str):
|
| 343 |
-
sys_text = req.system
|
| 344 |
-
elif isinstance(req.system, list):
|
| 345 |
-
parts = []
|
| 346 |
-
for b in req.system:
|
| 347 |
-
if isinstance(b, dict) and b.get("type") == "text":
|
| 348 |
-
parts.append(b.get("text", ""))
|
| 349 |
-
sys_text = "\n".join(parts)
|
| 350 |
-
|
| 351 |
-
if sys_text:
|
| 352 |
-
formatted_content = (
|
| 353 |
-
"--- SYSTEM PROMPT BEGIN ---\n"
|
| 354 |
-
f"{sys_text}\n"
|
| 355 |
-
"--- SYSTEM PROMPT END ---\n\n"
|
| 356 |
-
f"{formatted_content}"
|
| 357 |
-
)
|
| 358 |
-
|
| 359 |
-
# 5. Model
|
| 360 |
-
model_id = map_model_name(req.model)
|
| 361 |
-
|
| 362 |
-
# 6. User Input Message
|
| 363 |
-
user_input_msg = {
|
| 364 |
-
"content": formatted_content,
|
| 365 |
-
"userInputMessageContext": user_ctx,
|
| 366 |
-
"origin": "CLI",
|
| 367 |
-
"modelId": model_id
|
| 368 |
-
}
|
| 369 |
-
if images:
|
| 370 |
-
user_input_msg["images"] = images
|
| 371 |
-
|
| 372 |
-
# 7. History
|
| 373 |
-
history_msgs = req.messages[:-1] if len(req.messages) > 1 else []
|
| 374 |
-
aq_history = process_history(history_msgs)
|
| 375 |
-
|
| 376 |
-
# 8. Final Body
|
| 377 |
-
return {
|
| 378 |
-
"conversationState": {
|
| 379 |
-
"conversationId": conversation_id,
|
| 380 |
-
"history": aq_history,
|
| 381 |
-
"currentMessage": {
|
| 382 |
-
"userInputMessage": user_input_msg
|
| 383 |
-
},
|
| 384 |
-
"chatTriggerType": "MANUAL"
|
| 385 |
-
}
|
| 386 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|