Update app.py
Browse files
app.py
CHANGED
|
@@ -29,43 +29,38 @@ def clean_json_string(json_str):
|
|
| 29 |
return cleaned_str # Return extracted part anyway
|
| 30 |
|
| 31 |
def generate(input_text):
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
|
|
|
| 55 |
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
)
|
| 62 |
-
if chunk.text:
|
| 63 |
-
full_response += chunk.text
|
| 64 |
-
# Wir geben zwei Werte zurück, da Gradio zwei Outputs erwartet
|
| 65 |
-
yield full_response, ""
|
| 66 |
|
| 67 |
-
except Exception as e:
|
| 68 |
-
yield f"Error: {e}", input_text
|
| 69 |
|
| 70 |
|
| 71 |
def generate1(input_text):
|
|
|
|
| 29 |
return cleaned_str # Return extracted part anyway
|
| 30 |
|
| 31 |
def generate(input_text):
|
| 32 |
+
client = genai.Client(
|
| 33 |
+
api_key=os.environ.get("GEMINI_API_KEY"),
|
| 34 |
+
)
|
| 35 |
+
|
| 36 |
+
model = "gemini-3-flash-preview"
|
| 37 |
+
contents = [
|
| 38 |
+
types.Content(
|
| 39 |
+
role="user",
|
| 40 |
+
parts=[
|
| 41 |
+
types.Part.from_text(text=f"{input_text}"),
|
| 42 |
+
],
|
| 43 |
+
),
|
| 44 |
+
]
|
| 45 |
+
tools = [
|
| 46 |
+
types.Tool(googleSearch=types.GoogleSearch(
|
| 47 |
+
)),
|
| 48 |
+
]
|
| 49 |
+
generate_content_config = types.GenerateContentConfig(
|
| 50 |
+
temperature=0.35,
|
| 51 |
+
thinking_config=types.ThinkingConfig(
|
| 52 |
+
thinking_level="HIGH",
|
| 53 |
+
),
|
| 54 |
+
tools=tools,
|
| 55 |
+
)
|
| 56 |
|
| 57 |
+
for chunk in client.models.generate_content_stream(
|
| 58 |
+
model=model,
|
| 59 |
+
contents=contents,
|
| 60 |
+
config=generate_content_config,
|
| 61 |
+
):
|
| 62 |
+
print(chunk.text, end="")
|
|
|
|
|
|
|
|
|
|
|
|
|
| 63 |
|
|
|
|
|
|
|
| 64 |
|
| 65 |
|
| 66 |
def generate1(input_text):
|