Update app.py
Browse files
app.py
CHANGED
|
@@ -1,237 +1,179 @@
|
|
|
|
|
| 1 |
import streamlit as st
|
| 2 |
-
import
|
| 3 |
-
|
| 4 |
-
import
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
#
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 14 |
st.markdown("""
|
| 15 |
-
**
|
| 16 |
-
|
| 17 |
-
🔗 [GitHub Profile](https://github.com/volkansah) |
|
| 18 |
-
📂 [Project Repository](https://github.com/volkansah/gemini-ai-chat) |
|
| 19 |
-
💬 [Soon](https://aicodecraft.io)
|
| 20 |
""")
|
| 21 |
|
| 22 |
-
# Session State Management
|
| 23 |
if "messages" not in st.session_state:
|
| 24 |
st.session_state.messages = []
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
if file_type in ["txt"] + [ext.strip('.') for ext in text_extensions if ext not in ('.csv', '.xlsx')]:
|
| 47 |
-
return {"type": "text", "content": uploaded_file.read().decode("utf-8", errors='ignore')}
|
| 48 |
-
|
| 49 |
-
if file_type in ["csv", "xlsx"]:
|
| 50 |
-
try:
|
| 51 |
-
# Versuch, Datei als CSV oder Excel zu lesen
|
| 52 |
-
if file_type == "csv":
|
| 53 |
-
df = pd.read_csv(uploaded_file)
|
| 54 |
-
else: # xlsx
|
| 55 |
-
df = pd.read_excel(uploaded_file)
|
| 56 |
-
return {"type": "text", "content": df.to_string()}
|
| 57 |
-
except Exception as e:
|
| 58 |
-
return {"type": "error", "content": f"Failed to read tabular data: {e}"}
|
| 59 |
-
|
| 60 |
-
if file_type == "pdf":
|
| 61 |
-
try:
|
| 62 |
-
reader = PyPDF2.PdfReader(uploaded_file)
|
| 63 |
-
return {"type": "text", "content": "".join(page.extract_text() for page in reader.pages if page.extract_text())}
|
| 64 |
-
except Exception as e:
|
| 65 |
-
return {"type": "error", "content": f"Failed to read PDF: {e}"}
|
| 66 |
-
|
| 67 |
-
if file_type == "zip":
|
| 68 |
-
try:
|
| 69 |
-
with zipfile.ZipFile(uploaded_file) as z:
|
| 70 |
-
newline = "\n"
|
| 71 |
-
content = f"ZIP Contents (Processing text files only):{newline}"
|
| 72 |
-
|
| 73 |
-
for file_info in z.infolist():
|
| 74 |
-
if not file_info.is_dir():
|
| 75 |
-
try:
|
| 76 |
-
# Prüfen, ob die Datei eine Text-Erweiterung hat
|
| 77 |
-
if file_info.filename.lower().endswith(text_extensions):
|
| 78 |
-
with z.open(file_info.filename) as file:
|
| 79 |
-
# Decode mit 'ignore', falls es Probleme gibt
|
| 80 |
-
file_content = file.read().decode('utf-8', errors='ignore')
|
| 81 |
-
content += f"{newline}📄 {file_info.filename}:{newline}{file_content}{newline}"
|
| 82 |
-
else:
|
| 83 |
-
content += f"{newline}⚠️ Binärdatei/Unbekannte Datei ignoriert: {file_info.filename}{newline}"
|
| 84 |
-
except Exception as e:
|
| 85 |
-
content += f"{newline}❌ Fehler beim Lesen von {file_info.filename}: {str(e)}{newline}"
|
| 86 |
-
|
| 87 |
-
return {"type": "text", "content": content}
|
| 88 |
-
except Exception as e:
|
| 89 |
-
return {"type": "error", "content": f"Failed to process ZIP: {e}"}
|
| 90 |
-
|
| 91 |
-
return {"type": "error", "content": "Unsupported file format"}
|
| 92 |
|
| 93 |
-
# Sidebar
|
| 94 |
with st.sidebar:
|
| 95 |
-
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
|
| 99 |
-
|
| 100 |
-
"
|
| 101 |
-
"
|
| 102 |
-
|
| 103 |
-
|
| 104 |
-
"
|
| 105 |
-
"
|
| 106 |
-
|
| 107 |
-
# --- Legacy-Modelle (Text-only oder ältere Endpunkte) ---
|
| 108 |
-
"gemini-2.0-flash",
|
| 109 |
-
"gemini-1.0-pro", # Älterer stabiler Endpunkt
|
| 110 |
]
|
| 111 |
-
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
|
| 117 |
temperature = st.slider("Temperature", 0.0, 1.0, 0.7)
|
| 118 |
-
max_tokens = st.slider(
|
| 119 |
-
|
| 120 |
-
|
| 121 |
-
|
| 122 |
-
|
| 123 |
-
|
| 124 |
-
|
| 125 |
-
if uploaded_file and st.session_state.uploaded_content is None:
|
| 126 |
-
# Nur verarbeiten, wenn eine neue Datei hochgeladen wird und kein Inhalt im State ist
|
| 127 |
-
processed = process_file(uploaded_file)
|
| 128 |
-
st.session_state.uploaded_content = processed
|
| 129 |
-
|
| 130 |
-
# Vorschau anzeigen, wenn Inhalt vorhanden
|
| 131 |
-
if st.session_state.uploaded_content:
|
| 132 |
-
processed = st.session_state.uploaded_content
|
| 133 |
-
|
| 134 |
-
st.subheader("Current File Attachment:")
|
| 135 |
-
|
| 136 |
-
if processed["type"] == "image":
|
| 137 |
-
st.image(processed["content"], caption="Attached Image", use_container_width=False, width=300)
|
| 138 |
-
elif processed["type"] == "text":
|
| 139 |
-
st.text_area("File Preview", processed["content"], height=150)
|
| 140 |
-
elif processed["type"] == "error":
|
| 141 |
-
st.error(f"Error processing file: {processed['content']}")
|
| 142 |
-
|
| 143 |
-
# NEU: Clear Button
|
| 144 |
-
if st.button("❌ Clear Uploaded File Attachment"):
|
| 145 |
-
st.session_state.uploaded_content = None
|
| 146 |
-
# Da st.file_uploader selbst nicht einfach resettet,
|
| 147 |
-
# informieren wir den Nutzer, dass der Zustand gelöscht ist.
|
| 148 |
-
st.info("Attachment cleared! Reload the page to reset the upload field completely.")
|
| 149 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 150 |
|
| 151 |
-
# Chat-Historie anzeigen
|
| 152 |
-
for message in st.session_state.messages:
|
| 153 |
-
with st.chat_message(message["role"]):
|
| 154 |
-
st.markdown(message["content"])
|
| 155 |
|
| 156 |
-
# Chat
|
| 157 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 158 |
if not api_key:
|
| 159 |
-
st.warning("API Key
|
| 160 |
st.stop()
|
| 161 |
-
|
| 162 |
-
#
|
| 163 |
-
|
| 164 |
-
|
| 165 |
-
|
| 166 |
-
|
| 167 |
-
|
| 168 |
-
|
| 169 |
-
|
| 170 |
-
|
| 171 |
-
|
| 172 |
-
|
| 173 |
-
|
| 174 |
-
|
| 175 |
-
|
| 176 |
-
|
| 177 |
-
|
| 178 |
-
|
| 179 |
-
|
| 180 |
-
st.stop()
|
| 181 |
-
|
| 182 |
-
content.append({
|
| 183 |
-
"inline_data": {
|
| 184 |
-
"mime_type": "image/jpeg",
|
| 185 |
-
"data": encode_image(st.session_state.uploaded_content["content"])
|
| 186 |
-
}
|
| 187 |
-
})
|
| 188 |
-
elif st.session_state.uploaded_content["type"] == "text":
|
| 189 |
-
# Text-Inhalt dem Prompt hinzufügen
|
| 190 |
-
content[0]["text"] += f"\n\n[Attached File Content]\n{st.session_state.uploaded_content['content']}"
|
| 191 |
-
|
| 192 |
-
# Nachricht zur Historie hinzufügen und anzeigen
|
| 193 |
-
st.session_state.messages.append({"role": "user", "content": prompt})
|
| 194 |
-
with st.chat_message("user"):
|
| 195 |
-
st.markdown(prompt)
|
| 196 |
-
|
| 197 |
-
# Antwort generieren
|
| 198 |
-
response = model_instance.generate_content(
|
| 199 |
-
content,
|
| 200 |
-
generation_config=genai.types.GenerationConfig(
|
| 201 |
-
temperature=temperature,
|
| 202 |
-
max_output_tokens=max_tokens
|
| 203 |
-
)
|
| 204 |
-
)
|
| 205 |
-
|
| 206 |
-
# Überprüfen, ob die Antwort gültig ist
|
| 207 |
-
if not response.candidates:
|
| 208 |
-
st.error("API Error: Keine gültige Antwort erhalten. Überprüfe die Eingabe oder das Modell.")
|
| 209 |
-
else:
|
| 210 |
-
# Antwort anzeigen und zur Historie hinzufügen
|
| 211 |
-
response_text = response.text
|
| 212 |
-
with st.chat_message("assistant"):
|
| 213 |
-
st.markdown(response_text)
|
| 214 |
-
st.session_state.messages.append({"role": "assistant", "content": response_text})
|
| 215 |
-
|
| 216 |
-
except Exception as e:
|
| 217 |
-
st.error(f"API Error: {str(e)}")
|
| 218 |
-
# Zusätzliche Überprüfung für Visionsfehler
|
| 219 |
-
if st.session_state.uploaded_content and st.session_state.uploaded_content["type"] == "image" and "vision" not in model.lower() and "pro" not in model.lower():
|
| 220 |
-
st.error("Detail-Fehler: Für Bilder MUSS ein Vision-fähiger Modell (z.B. 1.5 Pro) ausgewählt werden.")
|
| 221 |
-
|
| 222 |
-
# Instructions in the sidebar
|
| 223 |
-
with st.sidebar:
|
| 224 |
-
st.markdown("""
|
| 225 |
-
---
|
| 226 |
-
## 📝 Instructions:
|
| 227 |
-
1. Enter your Google AI API key
|
| 228 |
-
2. Select a model (use **Pro/Vision** models for image analysis)
|
| 229 |
-
3. Adjust parameters (Temperature/Tokens)
|
| 230 |
-
4. Upload a file (optional, supports **Image, Text, PDF, ZIP, CSV/XLSX**)
|
| 231 |
-
5. Type your message and press Enter
|
| 232 |
-
|
| 233 |
-
### About
|
| 234 |
-
🔗 [GitHub Profile](https://github.com/volkansah) |
|
| 235 |
-
📂 [Project Repository](https://github.com/volkansah/gemini-ai-chat) |
|
| 236 |
-
💬 [Soon](https://aicodecraft.io)
|
| 237 |
-
""")
|
|
|
|
| 1 |
+
import os
|
| 2 |
import streamlit as st
|
| 3 |
+
import tempfile
|
| 4 |
+
import requests
|
| 5 |
+
import json
|
| 6 |
+
# Entfernte Imports: PIL.Image, io, base64, pandas, zipfile, PyPDF2
|
| 7 |
+
# Da kein File Manager mehr benötigt wird.
|
| 8 |
+
|
| 9 |
+
# ----------------------------------------------------
|
| 10 |
+
# 🚨 KRITISCHE FIXES FÜR DEN PERMISSION ERROR
|
| 11 |
+
# Zwingt Streamlit, seine temporären/Konfigurationsdateien
|
| 12 |
+
# in den beschreibbaren /tmp-Bereich zu schreiben.
|
| 13 |
+
# ----------------------------------------------------
|
| 14 |
+
|
| 15 |
+
# 1. Temporären, beschreibbaren Pfad erstellen
|
| 16 |
+
TEMP_STREAMLIT_HOME = os.path.join(tempfile.gettempdir(), "st_config_workaround")
|
| 17 |
+
os.makedirs(TEMP_STREAMLIT_HOME, exist_ok=True)
|
| 18 |
+
|
| 19 |
+
# 2. Umgebungsvariablen setzen
|
| 20 |
+
os.environ["STREAMLIT_HOME"] = TEMP_STREAMLIT_HOME
|
| 21 |
+
os.environ["STREAMLIT_GATHER_USAGE_STATS"] = "false"
|
| 22 |
+
|
| 23 |
+
# 3. Minimale config.toml erstellen, um Schreibversuche zu unterbinden
|
| 24 |
+
CONFIG_PATH = os.path.join(TEMP_STREAMLIT_HOME, "config.toml")
|
| 25 |
+
CONFIG_CONTENT = """
|
| 26 |
+
[browser]
|
| 27 |
+
gatherUsageStats = false
|
| 28 |
+
"""
|
| 29 |
+
|
| 30 |
+
if not os.path.exists(CONFIG_PATH):
|
| 31 |
+
try:
|
| 32 |
+
with open(CONFIG_PATH, "w") as f:
|
| 33 |
+
f.write(CONFIG_CONTENT)
|
| 34 |
+
except Exception as e:
|
| 35 |
+
print(f"WARNUNG: Konnte config.toml nicht schreiben: {e}")
|
| 36 |
+
|
| 37 |
+
# ----------------------------------------------------
|
| 38 |
+
# Ende der Workarounds
|
| 39 |
+
# ----------------------------------------------------
|
| 40 |
+
|
| 41 |
+
# --- Konfiguration ---
|
| 42 |
+
# Hinweis: Layout-Einstellung 'wide' bleibt erhalten
|
| 43 |
+
st.set_page_config(page_title="OpenRouter Minimal Chat UI", layout="wide")
|
| 44 |
+
OPENROUTER_API_BASE = "https://openrouter.ai/api/v1"
|
| 45 |
+
|
| 46 |
+
# --- Page Title ---
|
| 47 |
+
st.title("💸 OpenRouter Minimal Chat Interface")
|
| 48 |
st.markdown("""
|
| 49 |
+
**Willkommen im OpenRouter Minimal Chat Interface!**
|
| 50 |
+
Chatte mit **kostenlosen (Free-Tier)** Modellen über die OpenRouter API. Nur Text-Chat.
|
|
|
|
|
|
|
|
|
|
| 51 |
""")
|
| 52 |
|
| 53 |
+
# --- Session State Management ---
|
| 54 |
if "messages" not in st.session_state:
|
| 55 |
st.session_state.messages = []
|
| 56 |
+
# st.session_state.uploaded_content wurde entfernt.
|
| 57 |
+
|
| 58 |
+
# --- Context-Length Fetch (Wird beibehalten, da für den Slider wichtig) ---
|
| 59 |
+
def fetch_model_contexts(api_key):
|
| 60 |
+
"""Lädt alle Modelle + deren context_length."""
|
| 61 |
+
if not api_key:
|
| 62 |
+
return {}
|
| 63 |
+
headers = {"Authorization": f"Bearer {api_key}"}
|
| 64 |
+
try:
|
| 65 |
+
res = requests.get(f"{OPENROUTER_API_BASE}/models", headers=headers, timeout=10)
|
| 66 |
+
contexts = {}
|
| 67 |
+
if res.status_code == 200:
|
| 68 |
+
for m in res.json().get("data", []):
|
| 69 |
+
mid = m.get("id")
|
| 70 |
+
ctx = m.get("context_length", 4096)
|
| 71 |
+
contexts[mid] = ctx
|
| 72 |
+
return contexts
|
| 73 |
+
except Exception as e:
|
| 74 |
+
return {}
|
| 75 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 76 |
|
| 77 |
+
# --- Sidebar ---
|
| 78 |
with st.sidebar:
|
| 79 |
+
st.header("⚙️ API Settings")
|
| 80 |
+
api_key = st.text_input("OpenRouter API Key", type="password")
|
| 81 |
+
|
| 82 |
+
# --- Manuelle Modelle ---
|
| 83 |
+
FREE_MODEL_LIST = [
|
| 84 |
+
"cognitivecomputations/dolphin-mistral-24b-venice-edition:free",
|
| 85 |
+
"deepseek/deepseek-chat-v3",
|
| 86 |
+
"google/gemma-2-9b-it",
|
| 87 |
+
"mistralai/mistral-7b-instruct-v0.2",
|
| 88 |
+
"qwen/qwen2-72b-instruct",
|
| 89 |
+
"nousresearch/nous-hermes-2-mixtral-8x7b-dpo",
|
|
|
|
|
|
|
|
|
|
|
|
|
| 90 |
]
|
| 91 |
+
model = st.selectbox("Wähle ein Modell", FREE_MODEL_LIST, index=0)
|
| 92 |
+
|
| 93 |
+
# Kontextlänge holen (mit Fallback)
|
| 94 |
+
model_contexts = fetch_model_contexts(api_key)
|
| 95 |
+
default_ctx = model_contexts.get(model, 4096)
|
| 96 |
+
|
| 97 |
temperature = st.slider("Temperature", 0.0, 1.0, 0.7)
|
| 98 |
+
max_tokens = st.slider(
|
| 99 |
+
f"Max Output Tokens (max {default_ctx})",
|
| 100 |
+
1,
|
| 101 |
+
min(default_ctx, 32000),
|
| 102 |
+
min(512, default_ctx)
|
| 103 |
+
)
|
| 104 |
+
st.caption(f"🔢 Model Context Length (Fallback 4096): {default_ctx}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 105 |
|
| 106 |
+
if st.button("🔄 Chat Reset"):
|
| 107 |
+
st.session_state.messages = []
|
| 108 |
+
st.success("Chat-Verlauf gelöscht.")
|
| 109 |
+
st.experimental_rerun()
|
| 110 |
+
|
| 111 |
+
st.markdown("""
|
| 112 |
+
---
|
| 113 |
+
**Minimal UI:** Nur Text-Chat.
|
| 114 |
+
""")
|
| 115 |
+
|
| 116 |
+
# --- Datei Upload Sektion komplett entfernt ---
|
| 117 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 118 |
|
| 119 |
+
# --- Chat Verlauf anzeigen ---
|
| 120 |
+
for msg in st.session_state.messages:
|
| 121 |
+
with st.chat_message(msg["role"]):
|
| 122 |
+
st.markdown(msg["content"])
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
# --- API Request Funktion (Unverändert) ---
|
| 126 |
+
def call_openrouter(model, messages, temp, max_tok, key):
|
| 127 |
+
headers = {
|
| 128 |
+
"Authorization": f"Bearer {key}",
|
| 129 |
+
"Content-Type": "application/json",
|
| 130 |
+
"Referer": "https://aicodecraft.io",
|
| 131 |
+
"X-Title": "OpenRouter-Minimal-Interface",
|
| 132 |
+
}
|
| 133 |
+
payload = {
|
| 134 |
+
"model": model,
|
| 135 |
+
"messages": messages,
|
| 136 |
+
"temperature": temp,
|
| 137 |
+
"max_tokens": max_tok,
|
| 138 |
+
}
|
| 139 |
+
|
| 140 |
+
res = requests.post(f"{OPENROUTER_API_BASE}/chat/completions", headers=headers, data=json.dumps(payload))
|
| 141 |
+
|
| 142 |
+
if res.status_code == 200:
|
| 143 |
+
try:
|
| 144 |
+
return res.json()["choices"][0]["message"]["content"]
|
| 145 |
+
except (KeyError, IndexError):
|
| 146 |
+
raise Exception("Fehlerhafte API-Antwort: Konnte Antworttext nicht extrahieren.")
|
| 147 |
+
else:
|
| 148 |
+
try:
|
| 149 |
+
err = res.json()
|
| 150 |
+
msg = err.get("error", {}).get("message", res.text)
|
| 151 |
+
except:
|
| 152 |
+
msg = res.text
|
| 153 |
+
raise Exception(f"API Error {res.status_code}: {msg}")
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
# --- Chat Input ---
|
| 157 |
+
if prompt := st.chat_input("Deine Nachricht..."):
|
| 158 |
if not api_key:
|
| 159 |
+
st.warning("Bitte trage deinen OpenRouter API Key in der Sidebar ein.")
|
| 160 |
st.stop()
|
| 161 |
+
|
| 162 |
+
# Nachricht hinzufügen und anzeigen
|
| 163 |
+
st.session_state.messages.append({"role": "user", "content": prompt})
|
| 164 |
+
with st.chat_message("user"):
|
| 165 |
+
st.markdown(prompt)
|
| 166 |
+
|
| 167 |
+
# API Nachrichten vorbereiten (für Chatverlauf)
|
| 168 |
+
messages = [{"role": m["role"], "content": m["content"]} for m in st.session_state.messages]
|
| 169 |
+
|
| 170 |
+
# Antwort generieren
|
| 171 |
+
with st.chat_message("assistant"):
|
| 172 |
+
with st.spinner(f"Fragend {model}..."):
|
| 173 |
+
try:
|
| 174 |
+
reply = call_openrouter(model, messages, temperature, max_tokens, api_key)
|
| 175 |
+
st.markdown(reply)
|
| 176 |
+
st.session_state.messages.append({"role": "assistant", "content": reply})
|
| 177 |
+
except Exception as e:
|
| 178 |
+
st.error(str(e))
|
| 179 |
+
st.session_state.messages.append({"role": "assistant", "content": f"❌ {str(e)}"})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|