sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
zai-org/Open-AutoGLM:phone_agent/xctest/input.py | """Input utilities for iOS device text input via WebDriverAgent."""
import time
def _get_wda_session_url(wda_url: str, session_id: str | None, endpoint: str) -> str:
"""
Get the correct WDA URL for a session endpoint.
Args:
wda_url: Base WDA URL.
session_id: Optional session ID.
endpoint: The endpoint path.
Returns:
Full URL for the endpoint.
"""
base = wda_url.rstrip("/")
if session_id:
return f"{base}/session/{session_id}/{endpoint}"
else:
# Try to use WDA endpoints without session when possible
return f"{base}/{endpoint}"
def type_text(
text: str,
wda_url: str = "http://localhost:8100",
session_id: str | None = None,
frequency: int = 60,
) -> None:
"""
Type text into the currently focused input field.
Args:
text: The text to type.
wda_url: WebDriverAgent URL.
session_id: Optional WDA session ID.
frequency: Typing frequency (keys per minute). Default is 60.
Note:
The input field must be focused before calling this function.
Use tap() to focus on the input field first.
"""
try:
import requests
url = _get_wda_session_url(wda_url, session_id, "wda/keys")
# Send text to WDA
response = requests.post(
url, json={"value": list(text), "frequency": frequency}, timeout=30, verify=False
)
if response.status_code not in (200, 201):
print(f"Warning: Text input may have failed. Status: {response.status_code}")
except ImportError:
print("Error: requests library required. Install: pip install requests")
except Exception as e:
print(f"Error typing text: {e}")
def clear_text(
wda_url: str = "http://localhost:8100",
session_id: str | None = None,
) -> None:
"""
Clear text in the currently focused input field.
Args:
wda_url: WebDriverAgent URL.
session_id: Optional WDA session ID.
Note:
This sends a clear command to the active element.
The input field must be focused before calling this function.
"""
try:
import requests
# First, try to get the active element
url = _get_wda_session_url(wda_url, session_id, "element/active")
response = requests.get(url, timeout=10, verify=False)
if response.status_code == 200:
data = response.json()
element_id = data.get("value", {}).get("ELEMENT") or data.get("value", {}).get("element-6066-11e4-a52e-4f735466cecf")
if element_id:
# Clear the element
clear_url = _get_wda_session_url(wda_url, session_id, f"element/{element_id}/clear")
requests.post(clear_url, timeout=10, verify=False)
return
# Fallback: send backspace commands
_clear_with_backspace(wda_url, session_id)
except ImportError:
print("Error: requests library required. Install: pip install requests")
except Exception as e:
print(f"Error clearing text: {e}")
def _clear_with_backspace(
wda_url: str = "http://localhost:8100",
session_id: str | None = None,
max_backspaces: int = 100,
) -> None:
"""
Clear text by sending backspace keys.
Args:
wda_url: WebDriverAgent URL.
session_id: Optional WDA session ID.
max_backspaces: Maximum number of backspaces to send.
"""
try:
import requests
url = _get_wda_session_url(wda_url, session_id, "wda/keys")
# Send backspace character multiple times
backspace_char = "\u0008" # Backspace Unicode character
requests.post(
url,
json={"value": [backspace_char] * max_backspaces},
timeout=10,
verify=False,
)
except Exception as e:
print(f"Error clearing with backspace: {e}")
def send_keys(
keys: list[str],
wda_url: str = "http://localhost:8100",
session_id: str | None = None,
) -> None:
"""
Send a sequence of keys.
Args:
keys: List of keys to send.
wda_url: WebDriverAgent URL.
session_id: Optional WDA session ID.
Example:
>>> send_keys(["H", "e", "l", "l", "o"])
>>> send_keys(["\n"]) # Send enter key
"""
try:
import requests
url = _get_wda_session_url(wda_url, session_id, "wda/keys")
requests.post(url, json={"value": keys}, timeout=10, verify=False)
except ImportError:
print("Error: requests library required. Install: pip install requests")
except Exception as e:
print(f"Error sending keys: {e}")
def press_enter(
wda_url: str = "http://localhost:8100",
session_id: str | None = None,
delay: float = 0.5,
) -> None:
"""
Press the Enter/Return key.
Args:
wda_url: WebDriverAgent URL.
session_id: Optional WDA session ID.
delay: Delay in seconds after pressing enter.
"""
send_keys(["\n"], wda_url, session_id)
time.sleep(delay)
def hide_keyboard(
wda_url: str = "http://localhost:8100",
session_id: str | None = None,
) -> None:
"""
Hide the on-screen keyboard.
Args:
wda_url: WebDriverAgent URL.
session_id: Optional WDA session ID.
"""
try:
import requests
url = f"{wda_url.rstrip('/')}/wda/keyboard/dismiss"
requests.post(url, timeout=10, verify=False)
except ImportError:
print("Error: requests library required. Install: pip install requests")
except Exception as e:
print(f"Error hiding keyboard: {e}")
def is_keyboard_shown(
wda_url: str = "http://localhost:8100",
session_id: str | None = None,
) -> bool:
"""
Check if the on-screen keyboard is currently shown.
Args:
wda_url: WebDriverAgent URL.
session_id: Optional WDA session ID.
Returns:
True if keyboard is shown, False otherwise.
"""
try:
import requests
url = _get_wda_session_url(wda_url, session_id, "wda/keyboard/shown")
response = requests.get(url, timeout=5, verify=False)
if response.status_code == 200:
data = response.json()
return data.get("value", False)
except ImportError:
print("Error: requests library required. Install: pip install requests")
except Exception:
pass
return False
def set_pasteboard(
text: str,
wda_url: str = "http://localhost:8100",
) -> None:
"""
Set the device pasteboard (clipboard) content.
Args:
text: Text to set in pasteboard.
wda_url: WebDriverAgent URL.
Note:
This can be useful for inputting large amounts of text.
After setting pasteboard, you can simulate paste gesture.
"""
try:
import requests
url = f"{wda_url.rstrip('/')}/wda/setPasteboard"
requests.post(
url, json={"content": text, "contentType": "plaintext"}, timeout=10, verify=False
)
except ImportError:
print("Error: requests library required. Install: pip install requests")
except Exception as e:
print(f"Error setting pasteboard: {e}")
def get_pasteboard(
wda_url: str = "http://localhost:8100",
) -> str | None:
"""
Get the device pasteboard (clipboard) content.
Args:
wda_url: WebDriverAgent URL.
Returns:
Pasteboard content or None if failed.
"""
try:
import requests
url = f"{wda_url.rstrip('/')}/wda/getPasteboard"
response = requests.post(url, timeout=10, verify=False)
if response.status_code == 200:
data = response.json()
return data.get("value")
except ImportError:
print("Error: requests library required. Install: pip install requests")
except Exception as e:
print(f"Error getting pasteboard: {e}")
return None
| {
"repo_id": "zai-org/Open-AutoGLM",
"file_path": "phone_agent/xctest/input.py",
"license": "Apache License 2.0",
"lines": 229,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
zai-org/Open-AutoGLM:phone_agent/xctest/screenshot.py | """Screenshot utilities for capturing iOS device screen."""
import base64
import os
import subprocess
import tempfile
import uuid
from dataclasses import dataclass
from io import BytesIO
from PIL import Image
@dataclass
class Screenshot:
"""Represents a captured screenshot."""
base64_data: str
width: int
height: int
is_sensitive: bool = False
def get_screenshot(
wda_url: str = "http://localhost:8100",
session_id: str | None = None,
device_id: str | None = None,
timeout: int = 10,
) -> Screenshot:
"""
Capture a screenshot from the connected iOS device.
Args:
wda_url: WebDriverAgent URL.
session_id: Optional WDA session ID.
device_id: Optional device UDID (for idevicescreenshot fallback).
timeout: Timeout in seconds for screenshot operations.
Returns:
Screenshot object containing base64 data and dimensions.
Note:
Tries WebDriverAgent first, falls back to idevicescreenshot if available.
If both fail, returns a black fallback image.
"""
# Try WebDriverAgent first (preferred method)
screenshot = _get_screenshot_wda(wda_url, session_id, timeout)
if screenshot:
return screenshot
# Fallback to idevicescreenshot
screenshot = _get_screenshot_idevice(device_id, timeout)
if screenshot:
return screenshot
# Return fallback black image
return _create_fallback_screenshot(is_sensitive=False)
def _get_screenshot_wda(
wda_url: str, session_id: str | None, timeout: int
) -> Screenshot | None:
"""
Capture screenshot using WebDriverAgent.
Args:
wda_url: WebDriverAgent URL.
session_id: Optional WDA session ID.
timeout: Timeout in seconds.
Returns:
Screenshot object or None if failed.
"""
try:
import requests
url = f"{wda_url.rstrip('/')}/screenshot"
response = requests.get(url, timeout=timeout, verify=False)
if response.status_code == 200:
data = response.json()
base64_data = data.get("value", "")
if base64_data:
# Decode to get dimensions
img_data = base64.b64decode(base64_data)
img = Image.open(BytesIO(img_data))
width, height = img.size
return Screenshot(
base64_data=base64_data,
width=width,
height=height,
is_sensitive=False,
)
except ImportError:
print("Note: requests library not installed. Install: pip install requests")
except Exception as e:
print(f"WDA screenshot failed: {e}")
return None
def _get_screenshot_idevice(
device_id: str | None, timeout: int
) -> Screenshot | None:
"""
Capture screenshot using idevicescreenshot (libimobiledevice).
Args:
device_id: Optional device UDID.
timeout: Timeout in seconds.
Returns:
Screenshot object or None if failed.
"""
try:
temp_path = os.path.join(
tempfile.gettempdir(), f"ios_screenshot_{uuid.uuid4()}.png"
)
cmd = ["idevicescreenshot"]
if device_id:
cmd.extend(["-u", device_id])
cmd.append(temp_path)
result = subprocess.run(
cmd, capture_output=True, text=True, timeout=timeout
)
if result.returncode == 0 and os.path.exists(temp_path):
# Read and encode image
img = Image.open(temp_path)
width, height = img.size
buffered = BytesIO()
img.save(buffered, format="PNG")
base64_data = base64.b64encode(buffered.getvalue()).decode("utf-8")
# Cleanup
os.remove(temp_path)
return Screenshot(
base64_data=base64_data, width=width, height=height, is_sensitive=False
)
except FileNotFoundError:
print(
"Note: idevicescreenshot not found. Install: brew install libimobiledevice"
)
except Exception as e:
print(f"idevicescreenshot failed: {e}")
return None
def _create_fallback_screenshot(is_sensitive: bool) -> Screenshot:
"""
Create a black fallback image when screenshot fails.
Args:
is_sensitive: Whether the failure was due to sensitive content.
Returns:
Screenshot object with black image.
"""
# Default iPhone screen size (iPhone 14 Pro)
default_width, default_height = 1179, 2556
black_img = Image.new("RGB", (default_width, default_height), color="black")
buffered = BytesIO()
black_img.save(buffered, format="PNG")
base64_data = base64.b64encode(buffered.getvalue()).decode("utf-8")
return Screenshot(
base64_data=base64_data,
width=default_width,
height=default_height,
is_sensitive=is_sensitive,
)
def save_screenshot(
screenshot: Screenshot,
file_path: str,
) -> bool:
"""
Save a screenshot to a file.
Args:
screenshot: Screenshot object.
file_path: Path to save the screenshot.
Returns:
True if successful, False otherwise.
"""
try:
img_data = base64.b64decode(screenshot.base64_data)
img = Image.open(BytesIO(img_data))
img.save(file_path)
return True
except Exception as e:
print(f"Error saving screenshot: {e}")
return False
def get_screenshot_png(
wda_url: str = "http://localhost:8100",
session_id: str | None = None,
device_id: str | None = None,
) -> bytes | None:
"""
Get screenshot as PNG bytes.
Args:
wda_url: WebDriverAgent URL.
session_id: Optional WDA session ID.
device_id: Optional device UDID.
Returns:
PNG bytes or None if failed.
"""
screenshot = get_screenshot(wda_url, session_id, device_id)
try:
return base64.b64decode(screenshot.base64_data)
except Exception:
return None
| {
"repo_id": "zai-org/Open-AutoGLM",
"file_path": "phone_agent/xctest/screenshot.py",
"license": "Apache License 2.0",
"lines": 180,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
zai-org/Open-AutoGLM:phone_agent/device_factory.py | """Device factory for selecting ADB or HDC based on device type."""
from enum import Enum
from typing import Any
class DeviceType(Enum):
"""Type of device connection tool."""
ADB = "adb"
HDC = "hdc"
IOS = "ios"
class DeviceFactory:
"""
Factory class for getting device-specific implementations.
This allows the system to work with both Android (ADB) and HarmonyOS (HDC) devices.
"""
def __init__(self, device_type: DeviceType = DeviceType.ADB):
"""
Initialize the device factory.
Args:
device_type: The type of device to use (ADB or HDC).
"""
self.device_type = device_type
self._module = None
@property
def module(self):
"""Get the appropriate device module (adb or hdc)."""
if self._module is None:
if self.device_type == DeviceType.ADB:
from phone_agent import adb
self._module = adb
elif self.device_type == DeviceType.HDC:
from phone_agent import hdc
self._module = hdc
else:
raise ValueError(f"Unknown device type: {self.device_type}")
return self._module
def get_screenshot(self, device_id: str | None = None, timeout: int = 10):
"""Get screenshot from device."""
return self.module.get_screenshot(device_id, timeout)
def get_current_app(self, device_id: str | None = None) -> str:
"""Get current app name."""
return self.module.get_current_app(device_id)
def tap(
self, x: int, y: int, device_id: str | None = None, delay: float | None = None
):
"""Tap at coordinates."""
return self.module.tap(x, y, device_id, delay)
def double_tap(
self, x: int, y: int, device_id: str | None = None, delay: float | None = None
):
"""Double tap at coordinates."""
return self.module.double_tap(x, y, device_id, delay)
def long_press(
self,
x: int,
y: int,
duration_ms: int = 3000,
device_id: str | None = None,
delay: float | None = None,
):
"""Long press at coordinates."""
return self.module.long_press(x, y, duration_ms, device_id, delay)
def swipe(
self,
start_x: int,
start_y: int,
end_x: int,
end_y: int,
duration_ms: int | None = None,
device_id: str | None = None,
delay: float | None = None,
):
"""Swipe from start to end."""
return self.module.swipe(
start_x, start_y, end_x, end_y, duration_ms, device_id, delay
)
def back(self, device_id: str | None = None, delay: float | None = None):
"""Press back button."""
return self.module.back(device_id, delay)
def home(self, device_id: str | None = None, delay: float | None = None):
"""Press home button."""
return self.module.home(device_id, delay)
def launch_app(
self, app_name: str, device_id: str | None = None, delay: float | None = None
) -> bool:
"""Launch an app."""
return self.module.launch_app(app_name, device_id, delay)
def type_text(self, text: str, device_id: str | None = None):
"""Type text."""
return self.module.type_text(text, device_id)
def clear_text(self, device_id: str | None = None):
"""Clear text."""
return self.module.clear_text(device_id)
def detect_and_set_adb_keyboard(self, device_id: str | None = None) -> str:
"""Detect and set keyboard."""
return self.module.detect_and_set_adb_keyboard(device_id)
def restore_keyboard(self, ime: str, device_id: str | None = None):
"""Restore keyboard."""
return self.module.restore_keyboard(ime, device_id)
def list_devices(self):
"""List connected devices."""
return self.module.list_devices()
def get_connection_class(self):
"""Get the connection class (ADBConnection or HDCConnection)."""
if self.device_type == DeviceType.ADB:
from phone_agent.adb import ADBConnection
return ADBConnection
elif self.device_type == DeviceType.HDC:
from phone_agent.hdc import HDCConnection
return HDCConnection
else:
raise ValueError(f"Unknown device type: {self.device_type}")
# Global device factory instance
_device_factory: DeviceFactory | None = None
def set_device_type(device_type: DeviceType):
"""
Set the global device type.
Args:
device_type: The device type to use (ADB or HDC).
"""
global _device_factory
_device_factory = DeviceFactory(device_type)
def get_device_factory() -> DeviceFactory:
"""
Get the global device factory instance.
Returns:
The device factory instance.
"""
global _device_factory
if _device_factory is None:
_device_factory = DeviceFactory(DeviceType.ADB) # Default to ADB
return _device_factory
| {
"repo_id": "zai-org/Open-AutoGLM",
"file_path": "phone_agent/device_factory.py",
"license": "Apache License 2.0",
"lines": 130,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
zai-org/Open-AutoGLM:phone_agent/hdc/connection.py | """HDC connection management for HarmonyOS devices."""
import os
import subprocess
import time
from dataclasses import dataclass
from enum import Enum
from typing import Optional
from phone_agent.config.timing import TIMING_CONFIG
# Global flag to control HDC command output
_HDC_VERBOSE = os.getenv("HDC_VERBOSE", "false").lower() in ("true", "1", "yes")
def _run_hdc_command(cmd: list, **kwargs) -> subprocess.CompletedProcess:
"""
Run HDC command with optional verbose output.
Args:
cmd: Command list to execute.
**kwargs: Additional arguments for subprocess.run.
Returns:
CompletedProcess result.
"""
if _HDC_VERBOSE:
print(f"[HDC] Running command: {' '.join(cmd)}")
result = subprocess.run(cmd, **kwargs)
if _HDC_VERBOSE and result.returncode != 0:
print(f"[HDC] Command failed with return code {result.returncode}")
if hasattr(result, 'stderr') and result.stderr:
print(f"[HDC] Error: {result.stderr}")
return result
def set_hdc_verbose(verbose: bool):
"""Set HDC verbose mode globally."""
global _HDC_VERBOSE
_HDC_VERBOSE = verbose
class ConnectionType(Enum):
"""Type of HDC connection."""
USB = "usb"
WIFI = "wifi"
REMOTE = "remote"
@dataclass
class DeviceInfo:
"""Information about a connected device."""
device_id: str
status: str
connection_type: ConnectionType
model: str | None = None
harmony_version: str | None = None
class HDCConnection:
"""
Manages HDC connections to HarmonyOS devices.
Supports USB, WiFi, and remote TCP/IP connections.
Example:
>>> conn = HDCConnection()
>>> # Connect to remote device
>>> conn.connect("192.168.1.100:5555")
>>> # List devices
>>> devices = conn.list_devices()
>>> # Disconnect
>>> conn.disconnect("192.168.1.100:5555")
"""
def __init__(self, hdc_path: str = "hdc"):
"""
Initialize HDC connection manager.
Args:
hdc_path: Path to HDC executable.
"""
self.hdc_path = hdc_path
def connect(self, address: str, timeout: int = 10) -> tuple[bool, str]:
"""
Connect to a remote device via TCP/IP.
Args:
address: Device address in format "host:port" (e.g., "192.168.1.100:5555").
timeout: Connection timeout in seconds.
Returns:
Tuple of (success, message).
Note:
The remote device must have TCP/IP debugging enabled.
"""
# Validate address format
if ":" not in address:
address = f"{address}:5555" # Default HDC port
try:
result = _run_hdc_command(
[self.hdc_path, "tconn", address],
capture_output=True,
text=True,
timeout=timeout,
)
output = result.stdout + result.stderr
if "Connect OK" in output or "connected" in output.lower():
return True, f"Connected to {address}"
elif "already connected" in output.lower():
return True, f"Already connected to {address}"
else:
return False, output.strip()
except subprocess.TimeoutExpired:
return False, f"Connection timeout after {timeout}s"
except Exception as e:
return False, f"Connection error: {e}"
def disconnect(self, address: str | None = None) -> tuple[bool, str]:
"""
Disconnect from a remote device.
Args:
address: Device address to disconnect. If None, disconnects all.
Returns:
Tuple of (success, message).
"""
try:
if address:
cmd = [self.hdc_path, "tdisconn", address]
else:
# HDC doesn't have a "disconnect all" command, so we need to list and disconnect each
devices = self.list_devices()
for device in devices:
if ":" in device.device_id: # Remote device
_run_hdc_command(
[self.hdc_path, "tdisconn", device.device_id],
capture_output=True,
text=True,
timeout=5
)
return True, "Disconnected all remote devices"
result = _run_hdc_command(cmd, capture_output=True, text=True, encoding="utf-8", timeout=5)
output = result.stdout + result.stderr
return True, output.strip() or "Disconnected"
except Exception as e:
return False, f"Disconnect error: {e}"
def list_devices(self) -> list[DeviceInfo]:
"""
List all connected devices.
Returns:
List of DeviceInfo objects.
"""
try:
result = _run_hdc_command(
[self.hdc_path, "list", "targets"],
capture_output=True,
text=True,
timeout=5,
)
devices = []
for line in result.stdout.strip().split("\n"):
if not line.strip():
continue
# HDC output format: device_id (status)
# Example: "192.168.1.100:5555" or "FMR0223C13000649"
device_id = line.strip()
# Determine connection type
if ":" in device_id:
conn_type = ConnectionType.REMOTE
else:
conn_type = ConnectionType.USB
# HDC doesn't provide detailed status in list command
# We assume "Connected" status for devices that appear
devices.append(
DeviceInfo(
device_id=device_id,
status="device",
connection_type=conn_type,
model=None,
)
)
return devices
except Exception as e:
print(f"Error listing devices: {e}")
return []
def get_device_info(self, device_id: str | None = None) -> DeviceInfo | None:
"""
Get detailed information about a device.
Args:
device_id: Device ID. If None, uses first available device.
Returns:
DeviceInfo or None if not found.
"""
devices = self.list_devices()
if not devices:
return None
if device_id is None:
return devices[0]
for device in devices:
if device.device_id == device_id:
return device
return None
def is_connected(self, device_id: str | None = None) -> bool:
"""
Check if a device is connected.
Args:
device_id: Device ID to check. If None, checks if any device is connected.
Returns:
True if connected, False otherwise.
"""
devices = self.list_devices()
if not devices:
return False
if device_id is None:
return len(devices) > 0
return any(d.device_id == device_id for d in devices)
def enable_tcpip(
self, port: int = 5555, device_id: str | None = None
) -> tuple[bool, str]:
"""
Enable TCP/IP debugging on a USB-connected device.
This allows subsequent wireless connections to the device.
Args:
port: TCP port for HDC (default: 5555).
device_id: Device ID. If None, uses first available device.
Returns:
Tuple of (success, message).
Note:
The device must be connected via USB first.
After this, you can disconnect USB and connect via WiFi.
"""
try:
cmd = [self.hdc_path]
if device_id:
cmd.extend(["-t", device_id])
cmd.extend(["tmode", "port", str(port)])
result = _run_hdc_command(cmd, capture_output=True, text=True, encoding="utf-8", timeout=10)
output = result.stdout + result.stderr
if result.returncode == 0 or "success" in output.lower():
time.sleep(TIMING_CONFIG.connection.adb_restart_delay)
return True, f"TCP/IP mode enabled on port {port}"
else:
return False, output.strip()
except Exception as e:
return False, f"Error enabling TCP/IP: {e}"
def get_device_ip(self, device_id: str | None = None) -> str | None:
"""
Get the IP address of a connected device.
Args:
device_id: Device ID. If None, uses first available device.
Returns:
IP address string or None if not found.
"""
try:
cmd = [self.hdc_path]
if device_id:
cmd.extend(["-t", device_id])
cmd.extend(["shell", "ifconfig"])
result = _run_hdc_command(cmd, capture_output=True, text=True, encoding="utf-8", timeout=5)
# Parse IP from ifconfig output
for line in result.stdout.split("\n"):
if "inet addr:" in line or "inet " in line:
parts = line.strip().split()
for i, part in enumerate(parts):
if "addr:" in part:
ip = part.split(":")[1]
# Filter out localhost
if not ip.startswith("127."):
return ip
elif part == "inet" and i + 1 < len(parts):
ip = parts[i + 1].split("/")[0]
if not ip.startswith("127."):
return ip
return None
except Exception as e:
print(f"Error getting device IP: {e}")
return None
def restart_server(self) -> tuple[bool, str]:
"""
Restart the HDC server.
Returns:
Tuple of (success, message).
"""
try:
# Kill server
_run_hdc_command(
[self.hdc_path, "kill"], capture_output=True, timeout=5
)
time.sleep(TIMING_CONFIG.connection.server_restart_delay)
# Start server (HDC auto-starts when running commands)
_run_hdc_command(
[self.hdc_path, "start", "-r"], capture_output=True, timeout=5
)
return True, "HDC server restarted"
except Exception as e:
return False, f"Error restarting server: {e}"
def quick_connect(address: str) -> tuple[bool, str]:
"""
Quick helper to connect to a remote device.
Args:
address: Device address (e.g., "192.168.1.100" or "192.168.1.100:5555").
Returns:
Tuple of (success, message).
"""
conn = HDCConnection()
return conn.connect(address)
def list_devices() -> list[DeviceInfo]:
"""
Quick helper to list connected devices.
Returns:
List of DeviceInfo objects.
"""
conn = HDCConnection()
return conn.list_devices()
| {
"repo_id": "zai-org/Open-AutoGLM",
"file_path": "phone_agent/hdc/connection.py",
"license": "Apache License 2.0",
"lines": 292,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
zai-org/Open-AutoGLM:phone_agent/hdc/device.py | """Device control utilities for HarmonyOS automation."""
import os
import subprocess
import time
from typing import List, Optional, Tuple
from phone_agent.config.apps_harmonyos import APP_ABILITIES, APP_PACKAGES
from phone_agent.config.timing import TIMING_CONFIG
from phone_agent.hdc.connection import _run_hdc_command
import re
def get_current_app(device_id: str | None = None) -> str:
"""
Get the currently focused app name.
Args:
device_id: Optional HDC device ID for multi-device setups.
Returns:
The app name if recognized, otherwise "System Home".
"""
hdc_prefix = _get_hdc_prefix(device_id)
# Use 'aa dump -l' to list running abilities
result = _run_hdc_command(
hdc_prefix + ["shell", "aa", "dump", "-l"],
capture_output=True,
text=True,
encoding="utf-8"
)
output = result.stdout
# print(output)
if not output:
raise ValueError("No output from aa dump")
# Parse missions and find the one with FOREGROUND state
# Output format:
# Mission ID #139
# mission name #[#com.kuaishou.hmapp:kwai:EntryAbility]
# app name [com.kuaishou.hmapp]
# bundle name [com.kuaishou.hmapp]
# ability type [PAGE]
# state #FOREGROUND
# app state #FOREGROUND
lines = output.split("\n")
foreground_bundle = None
current_bundle = None
for line in lines:
# Track the current mission's bundle name
if "app name [" in line:
match = re.search(r'\[([^\]]+)\]', line)
if match:
current_bundle = match.group(1)
# Check if this mission is in FOREGROUND state
if "state #FOREGROUND" in line or "state #foreground" in line.lower():
if current_bundle:
foreground_bundle = current_bundle
break # Found the foreground app, no need to continue
# Reset current_bundle when starting a new mission
if "Mission ID" in line:
current_bundle = None
# Match against known apps
if foreground_bundle:
for app_name, package in APP_PACKAGES.items():
if package == foreground_bundle:
return app_name
# If bundle is found but not in our known apps, return the bundle name
print(f'Bundle is found but not in our known apps: {foreground_bundle}')
return foreground_bundle
print(f'No bundle is found')
return "System Home"
def tap(
x: int, y: int, device_id: str | None = None, delay: float | None = None
) -> None:
"""
Tap at the specified coordinates.
Args:
x: X coordinate.
y: Y coordinate.
device_id: Optional HDC device ID.
delay: Delay in seconds after tap. If None, uses configured default.
"""
if delay is None:
delay = TIMING_CONFIG.device.default_tap_delay
hdc_prefix = _get_hdc_prefix(device_id)
# HarmonyOS uses uitest uiInput click
_run_hdc_command(
hdc_prefix + ["shell", "uitest", "uiInput", "click", str(x), str(y)],
capture_output=True
)
time.sleep(delay)
def double_tap(
x: int, y: int, device_id: str | None = None, delay: float | None = None
) -> None:
"""
Double tap at the specified coordinates.
Args:
x: X coordinate.
y: Y coordinate.
device_id: Optional HDC device ID.
delay: Delay in seconds after double tap. If None, uses configured default.
"""
if delay is None:
delay = TIMING_CONFIG.device.default_double_tap_delay
hdc_prefix = _get_hdc_prefix(device_id)
# HarmonyOS uses uitest uiInput doubleClick
_run_hdc_command(
hdc_prefix + ["shell", "uitest", "uiInput", "doubleClick", str(x), str(y)],
capture_output=True
)
time.sleep(delay)
def long_press(
x: int,
y: int,
duration_ms: int = 3000,
device_id: str | None = None,
delay: float | None = None,
) -> None:
"""
Long press at the specified coordinates.
Args:
x: X coordinate.
y: Y coordinate.
duration_ms: Duration of press in milliseconds (note: HarmonyOS longClick may not support duration).
device_id: Optional HDC device ID.
delay: Delay in seconds after long press. If None, uses configured default.
"""
if delay is None:
delay = TIMING_CONFIG.device.default_long_press_delay
hdc_prefix = _get_hdc_prefix(device_id)
# HarmonyOS uses uitest uiInput longClick
# Note: longClick may have a fixed duration, duration_ms parameter might not be supported
_run_hdc_command(
hdc_prefix + ["shell", "uitest", "uiInput", "longClick", str(x), str(y)],
capture_output=True,
)
time.sleep(delay)
def swipe(
start_x: int,
start_y: int,
end_x: int,
end_y: int,
duration_ms: int | None = None,
device_id: str | None = None,
delay: float | None = None,
) -> None:
"""
Swipe from start to end coordinates.
Args:
start_x: Starting X coordinate.
start_y: Starting Y coordinate.
end_x: Ending X coordinate.
end_y: Ending Y coordinate.
duration_ms: Duration of swipe in milliseconds (auto-calculated if None).
device_id: Optional HDC device ID.
delay: Delay in seconds after swipe. If None, uses configured default.
"""
if delay is None:
delay = TIMING_CONFIG.device.default_swipe_delay
hdc_prefix = _get_hdc_prefix(device_id)
if duration_ms is None:
# Calculate duration based on distance
dist_sq = (start_x - end_x) ** 2 + (start_y - end_y) ** 2
duration_ms = int(dist_sq / 1000)
duration_ms = max(500, min(duration_ms, 1000)) # Clamp between 500-1000ms
# HarmonyOS uses uitest uiInput swipe
# Format: swipe startX startY endX endY duration
_run_hdc_command(
hdc_prefix
+ [
"shell",
"uitest",
"uiInput",
"swipe",
str(start_x),
str(start_y),
str(end_x),
str(end_y),
str(duration_ms),
],
capture_output=True,
)
time.sleep(delay)
def back(device_id: str | None = None, delay: float | None = None) -> None:
"""
Press the back button.
Args:
device_id: Optional HDC device ID.
delay: Delay in seconds after pressing back. If None, uses configured default.
"""
if delay is None:
delay = TIMING_CONFIG.device.default_back_delay
hdc_prefix = _get_hdc_prefix(device_id)
# HarmonyOS uses uitest uiInput keyEvent Back
_run_hdc_command(
hdc_prefix + ["shell", "uitest", "uiInput", "keyEvent", "Back"],
capture_output=True
)
time.sleep(delay)
def home(device_id: str | None = None, delay: float | None = None) -> None:
"""
Press the home button.
Args:
device_id: Optional HDC device ID.
delay: Delay in seconds after pressing home. If None, uses configured default.
"""
if delay is None:
delay = TIMING_CONFIG.device.default_home_delay
hdc_prefix = _get_hdc_prefix(device_id)
# HarmonyOS uses uitest uiInput keyEvent Home
_run_hdc_command(
hdc_prefix + ["shell", "uitest", "uiInput", "keyEvent", "Home"],
capture_output=True
)
time.sleep(delay)
def launch_app(
app_name: str, device_id: str | None = None, delay: float | None = None
) -> bool:
"""
Launch an app by name.
Args:
app_name: The app name (must be in APP_PACKAGES).
device_id: Optional HDC device ID.
delay: Delay in seconds after launching. If None, uses configured default.
Returns:
True if app was launched, False if app not found.
"""
if delay is None:
delay = TIMING_CONFIG.device.default_launch_delay
if app_name not in APP_PACKAGES:
print(f"[HDC] App '{app_name}' not found in HarmonyOS app list")
print(f"[HDC] Available apps: {', '.join(sorted(APP_PACKAGES.keys())[:10])}...")
return False
hdc_prefix = _get_hdc_prefix(device_id)
bundle = APP_PACKAGES[app_name]
# Get the ability name for this bundle
# Default to "EntryAbility" if not specified in APP_ABILITIES
ability = APP_ABILITIES.get(bundle, "EntryAbility")
# HarmonyOS uses 'aa start' command to launch apps
# Format: aa start -b {bundle} -a {ability}
_run_hdc_command(
hdc_prefix
+ [
"shell",
"aa",
"start",
"-b",
bundle,
"-a",
ability,
],
capture_output=True,
)
time.sleep(delay)
return True
def _get_hdc_prefix(device_id: str | None) -> list:
"""Get HDC command prefix with optional device specifier."""
if device_id:
return ["hdc", "-t", device_id]
return ["hdc"]
if __name__ == "__main__":
print(get_current_app())
| {
"repo_id": "zai-org/Open-AutoGLM",
"file_path": "phone_agent/hdc/device.py",
"license": "Apache License 2.0",
"lines": 256,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
zai-org/Open-AutoGLM:phone_agent/hdc/input.py | """Input utilities for HarmonyOS device text input."""
import base64
import subprocess
from typing import Optional
from phone_agent.hdc.connection import _run_hdc_command
def type_text(text: str, device_id: str | None = None) -> None:
"""
Type text into the currently focused input field.
Args:
text: The text to type. Supports multi-line text with newline characters.
device_id: Optional HDC device ID for multi-device setups.
Note:
HarmonyOS uses: hdc shell uitest uiInput text "文本内容"
This command works without coordinates when input field is focused.
For multi-line text, the function splits by newlines and sends ENTER keyEvents.
ENTER key code in HarmonyOS: 2054
Recommendation: Click on the input field first to focus it, then use this function.
"""
hdc_prefix = _get_hdc_prefix(device_id)
# Handle multi-line text by splitting on newlines
if '\n' in text:
lines = text.split('\n')
for i, line in enumerate(lines):
if line: # Only process non-empty lines
# Escape special characters for shell
escaped_line = line.replace('"', '\\"').replace("$", "\\$")
_run_hdc_command(
hdc_prefix + ["shell", "uitest", "uiInput", "text", escaped_line],
capture_output=True,
text=True,
)
# Send ENTER key event after each line except the last one
if i < len(lines) - 1:
try:
_run_hdc_command(
hdc_prefix + ["shell", "uitest", "uiInput", "keyEvent", "2054"],
capture_output=True,
text=True,
)
except Exception as e:
print(f"[HDC] ENTER keyEvent failed: {e}")
else:
# Single line text - original logic
# Escape special characters for shell (keep quotes for proper text handling)
# The text will be wrapped in quotes in the command
escaped_text = text.replace('"', '\\"').replace("$", "\\$")
# HarmonyOS uitest uiInput text command
# Format: hdc shell uitest uiInput text "文本内容"
_run_hdc_command(
hdc_prefix + ["shell", "uitest", "uiInput", "text", escaped_text],
capture_output=True,
text=True,
)
def clear_text(device_id: str | None = None) -> None:
"""
Clear text in the currently focused input field.
Args:
device_id: Optional HDC device ID for multi-device setups.
Note:
This method uses repeated delete key events to clear text.
For HarmonyOS, you might also use select all + delete for better efficiency.
"""
hdc_prefix = _get_hdc_prefix(device_id)
# Ctrl+A to select all (key code 2072 for Ctrl, 2017 for A)
# Then delete
_run_hdc_command(
hdc_prefix + ["shell", "uitest", "uiInput", "keyEvent", "2072", "2017"],
capture_output=True,
text=True,
)
_run_hdc_command(
hdc_prefix + ["shell", "uitest", "uiInput", "keyEvent", "2055"], # Delete key
capture_output=True,
text=True,
)
def detect_and_set_adb_keyboard(device_id: str | None = None) -> str:
"""
Detect current keyboard and switch to ADB Keyboard if available.
Args:
device_id: Optional HDC device ID for multi-device setups.
Returns:
The original keyboard IME identifier for later restoration.
Note:
This is a placeholder. HarmonyOS may not support ADB Keyboard.
If there's a similar tool for HarmonyOS, integrate it here.
"""
hdc_prefix = _get_hdc_prefix(device_id)
# Get current IME (if HarmonyOS supports this)
try:
result = _run_hdc_command(
hdc_prefix + ["shell", "settings", "get", "secure", "default_input_method"],
capture_output=True,
text=True,
)
current_ime = (result.stdout + result.stderr).strip()
# If ADB Keyboard equivalent exists for HarmonyOS, switch to it
# For now, we'll just return the current IME
return current_ime
except Exception:
return ""
def restore_keyboard(ime: str, device_id: str | None = None) -> None:
"""
Restore the original keyboard IME.
Args:
ime: The IME identifier to restore.
device_id: Optional HDC device ID for multi-device setups.
"""
if not ime:
return
hdc_prefix = _get_hdc_prefix(device_id)
try:
_run_hdc_command(
hdc_prefix + ["shell", "ime", "set", ime], capture_output=True, text=True
)
except Exception:
pass
def _get_hdc_prefix(device_id: str | None) -> list:
"""Get HDC command prefix with optional device specifier."""
if device_id:
return ["hdc", "-t", device_id]
return ["hdc"]
| {
"repo_id": "zai-org/Open-AutoGLM",
"file_path": "phone_agent/hdc/input.py",
"license": "Apache License 2.0",
"lines": 121,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
zai-org/Open-AutoGLM:phone_agent/hdc/screenshot.py | """Screenshot utilities for capturing HarmonyOS device screen."""
import base64
import os
import subprocess
import tempfile
import uuid
from dataclasses import dataclass
from io import BytesIO
from typing import Tuple
from PIL import Image
from phone_agent.hdc.connection import _run_hdc_command
@dataclass
class Screenshot:
"""Represents a captured screenshot."""
base64_data: str
width: int
height: int
is_sensitive: bool = False
def get_screenshot(device_id: str | None = None, timeout: int = 10) -> Screenshot:
"""
Capture a screenshot from the connected HarmonyOS device.
Args:
device_id: Optional HDC device ID for multi-device setups.
timeout: Timeout in seconds for screenshot operations.
Returns:
Screenshot object containing base64 data and dimensions.
Note:
If the screenshot fails (e.g., on sensitive screens like payment pages),
a black fallback image is returned with is_sensitive=True.
"""
temp_path = os.path.join(tempfile.gettempdir(), f"screenshot_{uuid.uuid4()}.png")
hdc_prefix = _get_hdc_prefix(device_id)
try:
# Execute screenshot command
# HarmonyOS HDC only supports JPEG format
remote_path = "/data/local/tmp/tmp_screenshot.jpeg"
# Try method 1: hdc shell screenshot (newer HarmonyOS versions)
result = _run_hdc_command(
hdc_prefix + ["shell", "screenshot", remote_path],
capture_output=True,
text=True,
timeout=timeout,
)
# Check for screenshot failure (sensitive screen)
output = result.stdout + result.stderr
if "fail" in output.lower() or "error" in output.lower() or "not found" in output.lower():
# Try method 2: snapshot_display (older versions or different devices)
result = _run_hdc_command(
hdc_prefix + ["shell", "snapshot_display", "-f", remote_path],
capture_output=True,
text=True,
timeout=timeout,
)
output = result.stdout + result.stderr
if "fail" in output.lower() or "error" in output.lower():
return _create_fallback_screenshot(is_sensitive=True)
# Pull screenshot to local temp path
# Note: remote file is JPEG, but PIL can open it regardless of local extension
_run_hdc_command(
hdc_prefix + ["file", "recv", remote_path, temp_path],
capture_output=True,
text=True,
timeout=5,
)
if not os.path.exists(temp_path):
return _create_fallback_screenshot(is_sensitive=False)
# Read JPEG image and convert to PNG for model inference
# PIL automatically detects the image format from file content
img = Image.open(temp_path)
width, height = img.size
buffered = BytesIO()
img.save(buffered, format="PNG")
base64_data = base64.b64encode(buffered.getvalue()).decode("utf-8")
# Cleanup
os.remove(temp_path)
return Screenshot(
base64_data=base64_data, width=width, height=height, is_sensitive=False
)
except Exception as e:
print(f"Screenshot error: {e}")
return _create_fallback_screenshot(is_sensitive=False)
def _get_hdc_prefix(device_id: str | None) -> list:
"""Get HDC command prefix with optional device specifier."""
if device_id:
return ["hdc", "-t", device_id]
return ["hdc"]
def _create_fallback_screenshot(is_sensitive: bool) -> Screenshot:
"""Create a black fallback image when screenshot fails."""
default_width, default_height = 1080, 2400
black_img = Image.new("RGB", (default_width, default_height), color="black")
buffered = BytesIO()
black_img.save(buffered, format="PNG")
base64_data = base64.b64encode(buffered.getvalue()).decode("utf-8")
return Screenshot(
base64_data=base64_data,
width=default_width,
height=default_height,
is_sensitive=is_sensitive,
)
| {
"repo_id": "zai-org/Open-AutoGLM",
"file_path": "phone_agent/hdc/screenshot.py",
"license": "Apache License 2.0",
"lines": 99,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
zai-org/Open-AutoGLM:phone_agent/config/timing.py | """Timing configuration for Phone Agent.
This module defines all configurable waiting times used throughout the application.
Users can customize these values by modifying this file or by setting environment variables.
"""
import os
from dataclasses import dataclass
@dataclass
class ActionTimingConfig:
"""Configuration for action handler timing delays."""
# Text input related delays (in seconds)
keyboard_switch_delay: float = 1.0 # Delay after switching to ADB keyboard
text_clear_delay: float = 1.0 # Delay after clearing text
text_input_delay: float = 1.0 # Delay after typing text
keyboard_restore_delay: float = 1.0 # Delay after restoring original keyboard
def __post_init__(self):
"""Load values from environment variables if present."""
self.keyboard_switch_delay = float(
os.getenv("PHONE_AGENT_KEYBOARD_SWITCH_DELAY", self.keyboard_switch_delay)
)
self.text_clear_delay = float(
os.getenv("PHONE_AGENT_TEXT_CLEAR_DELAY", self.text_clear_delay)
)
self.text_input_delay = float(
os.getenv("PHONE_AGENT_TEXT_INPUT_DELAY", self.text_input_delay)
)
self.keyboard_restore_delay = float(
os.getenv("PHONE_AGENT_KEYBOARD_RESTORE_DELAY", self.keyboard_restore_delay)
)
@dataclass
class DeviceTimingConfig:
"""Configuration for device operation timing delays."""
# Default delays for various device operations (in seconds)
default_tap_delay: float = 1.0 # Default delay after tap
default_double_tap_delay: float = 1.0 # Default delay after double tap
double_tap_interval: float = 0.1 # Interval between two taps in double tap
default_long_press_delay: float = 1.0 # Default delay after long press
default_swipe_delay: float = 1.0 # Default delay after swipe
default_back_delay: float = 1.0 # Default delay after back button
default_home_delay: float = 1.0 # Default delay after home button
default_launch_delay: float = 1.0 # Default delay after launching app
def __post_init__(self):
"""Load values from environment variables if present."""
self.default_tap_delay = float(
os.getenv("PHONE_AGENT_TAP_DELAY", self.default_tap_delay)
)
self.default_double_tap_delay = float(
os.getenv("PHONE_AGENT_DOUBLE_TAP_DELAY", self.default_double_tap_delay)
)
self.double_tap_interval = float(
os.getenv("PHONE_AGENT_DOUBLE_TAP_INTERVAL", self.double_tap_interval)
)
self.default_long_press_delay = float(
os.getenv("PHONE_AGENT_LONG_PRESS_DELAY", self.default_long_press_delay)
)
self.default_swipe_delay = float(
os.getenv("PHONE_AGENT_SWIPE_DELAY", self.default_swipe_delay)
)
self.default_back_delay = float(
os.getenv("PHONE_AGENT_BACK_DELAY", self.default_back_delay)
)
self.default_home_delay = float(
os.getenv("PHONE_AGENT_HOME_DELAY", self.default_home_delay)
)
self.default_launch_delay = float(
os.getenv("PHONE_AGENT_LAUNCH_DELAY", self.default_launch_delay)
)
@dataclass
class ConnectionTimingConfig:
"""Configuration for ADB connection timing delays."""
# ADB server and connection delays (in seconds)
adb_restart_delay: float = 2.0 # Wait time after enabling TCP/IP mode
server_restart_delay: float = (
1.0 # Wait time between killing and starting ADB server
)
def __post_init__(self):
"""Load values from environment variables if present."""
self.adb_restart_delay = float(
os.getenv("PHONE_AGENT_ADB_RESTART_DELAY", self.adb_restart_delay)
)
self.server_restart_delay = float(
os.getenv("PHONE_AGENT_SERVER_RESTART_DELAY", self.server_restart_delay)
)
@dataclass
class TimingConfig:
"""Master timing configuration combining all timing settings."""
action: ActionTimingConfig
device: DeviceTimingConfig
connection: ConnectionTimingConfig
def __init__(self):
"""Initialize all timing configurations."""
self.action = ActionTimingConfig()
self.device = DeviceTimingConfig()
self.connection = ConnectionTimingConfig()
# Global timing configuration instance
# Users can modify these values at runtime or through environment variables
TIMING_CONFIG = TimingConfig()
def get_timing_config() -> TimingConfig:
"""
Get the global timing configuration.
Returns:
The global TimingConfig instance.
"""
return TIMING_CONFIG
def update_timing_config(
action: ActionTimingConfig | None = None,
device: DeviceTimingConfig | None = None,
connection: ConnectionTimingConfig | None = None,
) -> None:
"""
Update the global timing configuration.
Args:
action: New action timing configuration.
device: New device timing configuration.
connection: New connection timing configuration.
Example:
>>> from phone_agent.config.timing import update_timing_config, ActionTimingConfig
>>> custom_action = ActionTimingConfig(
... keyboard_switch_delay=0.5,
... text_input_delay=0.5
... )
>>> update_timing_config(action=custom_action)
"""
global TIMING_CONFIG
if action is not None:
TIMING_CONFIG.action = action
if device is not None:
TIMING_CONFIG.device = device
if connection is not None:
TIMING_CONFIG.connection = connection
__all__ = [
"ActionTimingConfig",
"DeviceTimingConfig",
"ConnectionTimingConfig",
"TimingConfig",
"TIMING_CONFIG",
"get_timing_config",
"update_timing_config",
]
| {
"repo_id": "zai-org/Open-AutoGLM",
"file_path": "phone_agent/config/timing.py",
"license": "Apache License 2.0",
"lines": 138,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
zai-org/Open-AutoGLM:scripts/check_deployment_en.py | import argparse
import json
import os
from openai import OpenAI
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Tool for checking if model deployment is successful",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Usage examples:
python scripts/check_deployment_en.py --base-url http://localhost:8000/v1 --apikey your-key --model autoglm-phone-9b
python scripts/check_deployment_en.py --base-url http://localhost:8000/v1 --apikey your-key --model autoglm-phone-9b --messages-file custom.json
""",
)
parser.add_argument(
"--base-url",
type=str,
required=True,
help="Base URL of the API service, e.g.: http://localhost:8000/v1",
)
parser.add_argument(
"--apikey", type=str, default="EMPTY", help="API key (default: EMPTY)"
)
parser.add_argument(
"--model",
type=str,
required=True,
help="Name of the model to test, e.g.: autoglm-phone-9b",
)
parser.add_argument(
"--messages-file",
type=str,
default="scripts/sample_messages_en.json",
help="Path to JSON file containing test messages (default: scripts/sample_messages_en.json)",
)
parser.add_argument(
"--max-tokens",
type=int,
default=3000,
help="Maximum generation tokens (default: 3000)",
)
parser.add_argument(
"--temperature",
type=float,
default=0.0,
help="Sampling temperature (default: 0.0)",
)
parser.add_argument(
"--top_p",
type=float,
default=0.85,
help="Nucleus sampling parameter (default: 0.85)",
)
parser.add_argument(
"--frequency_penalty",
type=float,
default=0.2,
help="Frequency penalty parameter (default: 0.2)",
)
args = parser.parse_args()
# Read test messages
if not os.path.exists(args.messages_file):
print(f"Error: Message file {args.messages_file} does not exist")
exit(1)
with open(args.messages_file) as f:
messages = json.load(f)
base_url = args.base_url
api_key = args.apikey
model = args.model
print(f"Starting model inference test...")
print(f"Base URL: {base_url}")
print(f"Model: {model}")
print(f"Messages file: {args.messages_file}")
print("=" * 80)
try:
client = OpenAI(
base_url=base_url,
api_key=api_key,
)
response = client.chat.completions.create(
messages=messages,
model=model,
max_tokens=args.max_tokens,
temperature=args.temperature,
top_p=args.top_p,
frequency_penalty=args.frequency_penalty,
stream=False,
)
print("\nModel inference result:")
print("=" * 80)
print(response.choices[0].message.content)
print("=" * 80)
if response.usage:
print(f"\nStatistics:")
print(f" - Prompt tokens: {response.usage.prompt_tokens}")
print(f" - Completion tokens: {response.usage.completion_tokens}")
print(f" - Total tokens: {response.usage.total_tokens}")
print(
f"\nPlease evaluate the above inference result to determine if the model deployment meets expectations."
)
except Exception as e:
print(f"\nError occurred while calling API:")
print(f"Error type: {type(e).__name__}")
print(f"Error message: {str(e)}")
print(
"\nTip: Please check if base_url, api_key and model parameters are correct, and if the service is running."
)
exit(1)
| {
"repo_id": "zai-org/Open-AutoGLM",
"file_path": "scripts/check_deployment_en.py",
"license": "Apache License 2.0",
"lines": 108,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
zai-org/Open-AutoGLM:phone_agent/config/prompts_en.py | """System prompts for the AI agent."""
from datetime import datetime
today = datetime.today()
formatted_date = today.strftime("%Y-%m-%d, %A")
SYSTEM_PROMPT = (
"The current date: "
+ formatted_date
+ """
# Setup
You are a professional Android operation agent assistant that can fulfill the user's high-level instructions. Given a screenshot of the Android interface at each step, you first analyze the situation, then plan the best course of action using Python-style pseudo-code.
# More details about the code
Your response format must be structured as follows:
Think first: Use <think>...</think> to analyze the current screen, identify key elements, and determine the most efficient action.
Provide the action: Use <answer>...</answer> to return a single line of pseudo-code representing the operation.
Your output should STRICTLY follow the format:
<think>
[Your thought]
</think>
<answer>
[Your operation code]
</answer>
- **Tap**
Perform a tap action on a specified screen area. The element is a list of 2 integers, representing the coordinates of the tap point.
**Example**:
<answer>
do(action="Tap", element=[x,y])
</answer>
- **Type**
Enter text into the currently focused input field.
**Example**:
<answer>
do(action="Type", text="Hello World")
</answer>
- **Swipe**
Perform a swipe action with start point and end point.
**Examples**:
<answer>
do(action="Swipe", start=[x1,y1], end=[x2,y2])
</answer>
- **Long Press**
Perform a long press action on a specified screen area.
You can add the element to the action to specify the long press area. The element is a list of 2 integers, representing the coordinates of the long press point.
**Example**:
<answer>
do(action="Long Press", element=[x,y])
</answer>
- **Launch**
Launch an app. Try to use launch action when you need to launch an app. Check the instruction to choose the right app before you use this action.
**Example**:
<answer>
do(action="Launch", app="Settings")
</answer>
- **Back**
Press the Back button to navigate to the previous screen.
**Example**:
<answer>
do(action="Back")
</answer>
- **Finish**
Terminate the program and optionally print a message.
**Example**:
<answer>
finish(message="Task completed.")
</answer>
REMEMBER:
- Think before you act: Always analyze the current UI and the best course of action before executing any step, and output in <think> part.
- Only ONE LINE of action in <answer> part per response: Each step must contain exactly one line of executable code.
- Generate execution code strictly according to format requirements.
"""
)
| {
"repo_id": "zai-org/Open-AutoGLM",
"file_path": "phone_agent/config/prompts_en.py",
"license": "Apache License 2.0",
"lines": 70,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
zai-org/Open-AutoGLM:main.py | #!/usr/bin/env python3
"""
Phone Agent CLI - AI-powered phone automation.
Usage:
python main.py [OPTIONS]
Environment Variables:
PHONE_AGENT_BASE_URL: Model API base URL (default: http://localhost:8000/v1)
PHONE_AGENT_MODEL: Model name (default: autoglm-phone-9b)
PHONE_AGENT_API_KEY: API key for model authentication (default: EMPTY)
PHONE_AGENT_MAX_STEPS: Maximum steps per task (default: 100)
PHONE_AGENT_DEVICE_ID: ADB device ID for multi-device setups
"""
import argparse
import os
import shutil
import subprocess
import sys
from urllib.parse import urlparse
from openai import OpenAI
from phone_agent import PhoneAgent
from phone_agent.agent import AgentConfig
from phone_agent.agent_ios import IOSAgentConfig, IOSPhoneAgent
from phone_agent.config.apps import list_supported_apps
from phone_agent.config.apps_harmonyos import list_supported_apps as list_harmonyos_apps
from phone_agent.config.apps_ios import list_supported_apps as list_ios_apps
from phone_agent.device_factory import DeviceType, get_device_factory, set_device_type
from phone_agent.model import ModelConfig
from phone_agent.xctest import XCTestConnection
from phone_agent.xctest import list_devices as list_ios_devices
def check_system_requirements(
device_type: DeviceType = DeviceType.ADB, wda_url: str = "http://localhost:8100"
) -> bool:
"""
Check system requirements before running the agent.
Checks:
1. ADB/HDC/iOS tools installed
2. At least one device connected
3. ADB Keyboard installed on the device (for ADB only)
4. WebDriverAgent running (for iOS only)
Args:
device_type: Type of device tool (ADB, HDC, or IOS).
wda_url: WebDriverAgent URL (for iOS only).
Returns:
True if all checks pass, False otherwise.
"""
print("🔍 Checking system requirements...")
print("-" * 50)
all_passed = True
# Determine tool name and command
if device_type == DeviceType.IOS:
tool_name = "libimobiledevice"
tool_cmd = "idevice_id"
else:
tool_name = "ADB" if device_type == DeviceType.ADB else "HDC"
tool_cmd = "adb" if device_type == DeviceType.ADB else "hdc"
# Check 1: Tool installed
print(f"1. Checking {tool_name} installation...", end=" ")
if shutil.which(tool_cmd) is None:
print("❌ FAILED")
print(f" Error: {tool_name} is not installed or not in PATH.")
print(f" Solution: Install {tool_name}:")
if device_type == DeviceType.ADB:
print(" - macOS: brew install android-platform-tools")
print(" - Linux: sudo apt install android-tools-adb")
print(
" - Windows: Download from https://developer.android.com/studio/releases/platform-tools"
)
elif device_type == DeviceType.HDC:
print(
" - Download from HarmonyOS SDK or https://gitee.com/openharmony/docs"
)
print(" - Add to PATH environment variable")
else: # IOS
print(" - macOS: brew install libimobiledevice")
print(" - Linux: sudo apt-get install libimobiledevice-utils")
all_passed = False
else:
# Double check by running version command
try:
if device_type == DeviceType.ADB:
version_cmd = [tool_cmd, "version"]
elif device_type == DeviceType.HDC:
version_cmd = [tool_cmd, "-v"]
else: # IOS
version_cmd = [tool_cmd, "-ln"]
result = subprocess.run(
version_cmd, capture_output=True, text=True, timeout=10
)
if result.returncode == 0:
version_line = result.stdout.strip().split("\n")[0]
print(f"✅ OK ({version_line if version_line else 'installed'})")
else:
print("❌ FAILED")
print(f" Error: {tool_name} command failed to run.")
all_passed = False
except FileNotFoundError:
print("❌ FAILED")
print(f" Error: {tool_name} command not found.")
all_passed = False
except subprocess.TimeoutExpired:
print("❌ FAILED")
print(f" Error: {tool_name} command timed out.")
all_passed = False
# If ADB is not installed, skip remaining checks
if not all_passed:
print("-" * 50)
print("❌ System check failed. Please fix the issues above.")
return False
# Check 2: Device connected
print("2. Checking connected devices...", end=" ")
try:
if device_type == DeviceType.ADB:
result = subprocess.run(
["adb", "devices"], capture_output=True, text=True, timeout=10
)
lines = result.stdout.strip().split("\n")
# Filter out header and empty lines, look for 'device' status
devices = [
line for line in lines[1:] if line.strip() and "\tdevice" in line
]
elif device_type == DeviceType.HDC:
result = subprocess.run(
["hdc", "list", "targets"], capture_output=True, text=True, timeout=10
)
lines = result.stdout.strip().split("\n")
devices = [line for line in lines if line.strip()]
else: # IOS
ios_devices = list_ios_devices()
devices = [d.device_id for d in ios_devices]
if not devices:
print("❌ FAILED")
print(" Error: No devices connected.")
print(" Solution:")
if device_type == DeviceType.ADB:
print(" 1. Enable USB debugging on your Android device")
print(" 2. Connect via USB and authorize the connection")
print(
" 3. Or connect remotely: python main.py --connect <ip>:<port>"
)
elif device_type == DeviceType.HDC:
print(" 1. Enable USB debugging on your HarmonyOS device")
print(" 2. Connect via USB and authorize the connection")
print(
" 3. Or connect remotely: python main.py --device-type hdc --connect <ip>:<port>"
)
else: # IOS
print(" 1. Connect your iOS device via USB")
print(" 2. Unlock device and tap 'Trust This Computer'")
print(" 3. Verify: idevice_id -l")
print(" 4. Or connect via WiFi using device IP")
all_passed = False
else:
if device_type == DeviceType.ADB:
device_ids = [d.split("\t")[0] for d in devices]
elif device_type == DeviceType.HDC:
device_ids = [d.strip() for d in devices]
else: # IOS
device_ids = devices
print(
f"✅ OK ({len(devices)} device(s): {', '.join(device_ids[:2])}{'...' if len(device_ids) > 2 else ''})"
)
except subprocess.TimeoutExpired:
print("❌ FAILED")
print(f" Error: {tool_name} command timed out.")
all_passed = False
except Exception as e:
print("❌ FAILED")
print(f" Error: {e}")
all_passed = False
# If no device connected, skip ADB Keyboard check
if not all_passed:
print("-" * 50)
print("❌ System check failed. Please fix the issues above.")
return False
# Check 3: ADB Keyboard installed (only for ADB) or WebDriverAgent (for iOS)
if device_type == DeviceType.ADB:
print("3. Checking ADB Keyboard...", end=" ")
try:
result = subprocess.run(
["adb", "shell", "ime", "list", "-s"],
capture_output=True,
text=True,
timeout=10,
)
ime_list = result.stdout.strip()
if "com.android.adbkeyboard/.AdbIME" in ime_list:
print("✅ OK")
else:
print("❌ FAILED")
print(" Error: ADB Keyboard is not installed on the device.")
print(" Solution:")
print(" 1. Download ADB Keyboard APK from:")
print(
" https://github.com/senzhk/ADBKeyBoard/blob/master/ADBKeyboard.apk"
)
print(" 2. Install it on your device: adb install ADBKeyboard.apk")
print(
" 3. Enable it in Settings > System > Languages & Input > Virtual Keyboard"
)
all_passed = False
except subprocess.TimeoutExpired:
print("❌ FAILED")
print(" Error: ADB command timed out.")
all_passed = False
except Exception as e:
print("❌ FAILED")
print(f" Error: {e}")
all_passed = False
elif device_type == DeviceType.HDC:
# For HDC, skip keyboard check as it uses different input method
print("3. Skipping keyboard check for HarmonyOS...", end=" ")
print("✅ OK (using native input)")
else: # IOS
# Check WebDriverAgent
print(f"3. Checking WebDriverAgent ({wda_url})...", end=" ")
try:
conn = XCTestConnection(wda_url=wda_url)
if conn.is_wda_ready():
print("✅ OK")
# Get WDA status for additional info
status = conn.get_wda_status()
if status:
session_id = status.get("sessionId", "N/A")
print(f" Session ID: {session_id}")
else:
print("❌ FAILED")
print(" Error: WebDriverAgent is not running or not accessible.")
print(" Solution:")
print(" 1. Run WebDriverAgent on your iOS device via Xcode")
print(" 2. For USB: Set up port forwarding: iproxy 8100 8100")
print(
" 3. For WiFi: Use device IP, e.g., --wda-url http://192.168.1.100:8100"
)
print(" 4. Verify in browser: open http://localhost:8100/status")
all_passed = False
except Exception as e:
print("❌ FAILED")
print(f" Error: {e}")
all_passed = False
print("-" * 50)
if all_passed:
print("✅ All system checks passed!\n")
else:
print("❌ System check failed. Please fix the issues above.")
return all_passed
def check_model_api(base_url: str, model_name: str, api_key: str = "EMPTY") -> bool:
"""
Check if the model API is accessible and the specified model exists.
Checks:
1. Network connectivity to the API endpoint
2. Model exists in the available models list
Args:
base_url: The API base URL
model_name: The model name to check
api_key: The API key for authentication
Returns:
True if all checks pass, False otherwise.
"""
print("🔍 Checking model API...")
print("-" * 50)
all_passed = True
# Check 1: Network connectivity using chat API
print(f"1. Checking API connectivity ({base_url})...", end=" ")
try:
# Create OpenAI client
client = OpenAI(base_url=base_url, api_key=api_key, timeout=30.0)
# Use chat completion to test connectivity (more universally supported than /models)
response = client.chat.completions.create(
model=model_name,
messages=[{"role": "user", "content": "Hi"}],
max_tokens=5,
temperature=0.0,
stream=False,
)
# Check if we got a valid response
if response.choices and len(response.choices) > 0:
print("✅ OK")
else:
print("❌ FAILED")
print(" Error: Received empty response from API")
all_passed = False
except Exception as e:
print("❌ FAILED")
error_msg = str(e)
# Provide more specific error messages
if "Connection refused" in error_msg or "Connection error" in error_msg:
print(f" Error: Cannot connect to {base_url}")
print(" Solution:")
print(" 1. Check if the model server is running")
print(" 2. Verify the base URL is correct")
print(f" 3. Try: curl {base_url}/chat/completions")
elif "timed out" in error_msg.lower() or "timeout" in error_msg.lower():
print(f" Error: Connection to {base_url} timed out")
print(" Solution:")
print(" 1. Check your network connection")
print(" 2. Verify the server is responding")
elif (
"Name or service not known" in error_msg
or "nodename nor servname" in error_msg
):
print(f" Error: Cannot resolve hostname")
print(" Solution:")
print(" 1. Check the URL is correct")
print(" 2. Verify DNS settings")
else:
print(f" Error: {error_msg}")
all_passed = False
print("-" * 50)
if all_passed:
print("✅ Model API checks passed!\n")
else:
print("❌ Model API check failed. Please fix the issues above.")
return all_passed
def parse_args() -> argparse.Namespace:
"""Parse command line arguments."""
parser = argparse.ArgumentParser(
description="Phone Agent - AI-powered phone automation",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
# Run with default settings (Android)
python main.py
# Specify model endpoint
python main.py --base-url http://localhost:8000/v1
# Use API key for authentication
python main.py --apikey sk-xxxxx
# Run with specific device
python main.py --device-id emulator-5554
# Connect to remote device
python main.py --connect 192.168.1.100:5555
# List connected devices
python main.py --list-devices
# Enable TCP/IP on USB device and get connection info
python main.py --enable-tcpip
# List supported apps
python main.py --list-apps
# iOS specific examples
# Run with iOS device
python main.py --device-type ios "Open Safari and search for iPhone tips"
# Use WiFi connection for iOS
python main.py --device-type ios --wda-url http://192.168.1.100:8100
# List connected iOS devices
python main.py --device-type ios --list-devices
# Check WebDriverAgent status
python main.py --device-type ios --wda-status
# Pair with iOS device
python main.py --device-type ios --pair
""",
)
# Model options
parser.add_argument(
"--base-url",
type=str,
default=os.getenv("PHONE_AGENT_BASE_URL", "http://localhost:8000/v1"),
help="Model API base URL",
)
parser.add_argument(
"--model",
type=str,
default=os.getenv("PHONE_AGENT_MODEL", "autoglm-phone-9b"),
help="Model name",
)
parser.add_argument(
"--apikey",
type=str,
default=os.getenv("PHONE_AGENT_API_KEY", "EMPTY"),
help="API key for model authentication",
)
parser.add_argument(
"--max-steps",
type=int,
default=int(os.getenv("PHONE_AGENT_MAX_STEPS", "100")),
help="Maximum steps per task",
)
# Device options
parser.add_argument(
"--device-id",
"-d",
type=str,
default=os.getenv("PHONE_AGENT_DEVICE_ID"),
help="ADB device ID",
)
parser.add_argument(
"--connect",
"-c",
type=str,
metavar="ADDRESS",
help="Connect to remote device (e.g., 192.168.1.100:5555)",
)
parser.add_argument(
"--disconnect",
type=str,
nargs="?",
const="all",
metavar="ADDRESS",
help="Disconnect from remote device (or 'all' to disconnect all)",
)
parser.add_argument(
"--list-devices", action="store_true", help="List connected devices and exit"
)
parser.add_argument(
"--enable-tcpip",
type=int,
nargs="?",
const=5555,
metavar="PORT",
help="Enable TCP/IP debugging on USB device (default port: 5555)",
)
# iOS specific options
parser.add_argument(
"--wda-url",
type=str,
default=os.getenv("PHONE_AGENT_WDA_URL", "http://localhost:8100"),
help="WebDriverAgent URL for iOS (default: http://localhost:8100)",
)
parser.add_argument(
"--pair",
action="store_true",
help="Pair with iOS device (required for some operations)",
)
parser.add_argument(
"--wda-status",
action="store_true",
help="Show WebDriverAgent status and exit (iOS only)",
)
# Other options
parser.add_argument(
"--quiet", "-q", action="store_true", help="Suppress verbose output"
)
parser.add_argument(
"--list-apps", action="store_true", help="List supported apps and exit"
)
parser.add_argument(
"--lang",
type=str,
choices=["cn", "en"],
default=os.getenv("PHONE_AGENT_LANG", "cn"),
help="Language for system prompt (cn or en, default: cn)",
)
parser.add_argument(
"--device-type",
type=str,
choices=["adb", "hdc", "ios"],
default=os.getenv("PHONE_AGENT_DEVICE_TYPE", "adb"),
help="Device type: adb for Android, hdc for HarmonyOS, ios for iPhone (default: adb)",
)
parser.add_argument(
"task",
nargs="?",
type=str,
help="Task to execute (interactive mode if not provided)",
)
return parser.parse_args()
def handle_ios_device_commands(args) -> bool:
"""
Handle iOS device-related commands.
Returns:
True if a device command was handled (should exit), False otherwise.
"""
conn = XCTestConnection(wda_url=args.wda_url)
# Handle --list-devices
if args.list_devices:
devices = list_ios_devices()
if not devices:
print("No iOS devices connected.")
print("\nTroubleshooting:")
print(" 1. Connect device via USB")
print(" 2. Unlock device and trust this computer")
print(" 3. Run: idevice_id -l")
else:
print("Connected iOS devices:")
print("-" * 70)
for device in devices:
conn_type = device.connection_type.value
model_info = f"{device.model}" if device.model else "Unknown"
ios_info = f"iOS {device.ios_version}" if device.ios_version else ""
name_info = device.device_name or "Unnamed"
print(f" ✓ {name_info}")
print(f" UUID: {device.device_id}")
print(f" Model: {model_info}")
print(f" OS: {ios_info}")
print(f" Connection: {conn_type}")
print("-" * 70)
return True
# Handle --pair
if args.pair:
print("Pairing with iOS device...")
success, message = conn.pair_device(args.device_id)
print(f"{'✓' if success else '✗'} {message}")
return True
# Handle --wda-status
if args.wda_status:
print(f"Checking WebDriverAgent status at {args.wda_url}...")
print("-" * 50)
if conn.is_wda_ready():
print("✓ WebDriverAgent is running")
status = conn.get_wda_status()
if status:
print(f"\nStatus details:")
value = status.get("value", {})
print(f" Session ID: {status.get('sessionId', 'N/A')}")
print(f" Build: {value.get('build', {}).get('time', 'N/A')}")
current_app = value.get("currentApp", {})
if current_app:
print(f"\nCurrent App:")
print(f" Bundle ID: {current_app.get('bundleId', 'N/A')}")
print(f" Process ID: {current_app.get('pid', 'N/A')}")
else:
print("✗ WebDriverAgent is not running")
print("\nPlease start WebDriverAgent on your iOS device:")
print(" 1. Open WebDriverAgent.xcodeproj in Xcode")
print(" 2. Select your device")
print(" 3. Run WebDriverAgentRunner (Product > Test or Cmd+U)")
print(f" 4. For USB: Run port forwarding: iproxy 8100 8100")
return True
return False
def handle_device_commands(args) -> bool:
"""
Handle device-related commands.
Returns:
True if a device command was handled (should exit), False otherwise.
"""
device_type = (
DeviceType.ADB
if args.device_type == "adb"
else (DeviceType.HDC if args.device_type == "hdc" else DeviceType.IOS)
)
# Handle iOS-specific commands
if device_type == DeviceType.IOS:
return handle_ios_device_commands(args)
device_factory = get_device_factory()
ConnectionClass = device_factory.get_connection_class()
conn = ConnectionClass()
# Handle --list-devices
if args.list_devices:
devices = device_factory.list_devices()
if not devices:
print("No devices connected.")
else:
print("Connected devices:")
print("-" * 60)
for device in devices:
status_icon = "✓" if device.status == "device" else "✗"
conn_type = device.connection_type.value
model_info = f" ({device.model})" if device.model else ""
print(
f" {status_icon} {device.device_id:<30} [{conn_type}]{model_info}"
)
return True
# Handle --connect
if args.connect:
print(f"Connecting to {args.connect}...")
success, message = conn.connect(args.connect)
print(f"{'✓' if success else '✗'} {message}")
if success:
# Set as default device
args.device_id = args.connect
return not success # Continue if connection succeeded
# Handle --disconnect
if args.disconnect:
if args.disconnect == "all":
print("Disconnecting all remote devices...")
success, message = conn.disconnect()
else:
print(f"Disconnecting from {args.disconnect}...")
success, message = conn.disconnect(args.disconnect)
print(f"{'✓' if success else '✗'} {message}")
return True
# Handle --enable-tcpip
if args.enable_tcpip:
port = args.enable_tcpip
print(f"Enabling TCP/IP debugging on port {port}...")
success, message = conn.enable_tcpip(port, args.device_id)
print(f"{'✓' if success else '✗'} {message}")
if success:
# Try to get device IP
ip = conn.get_device_ip(args.device_id)
if ip:
print(f"\nYou can now connect remotely using:")
print(f" python main.py --connect {ip}:{port}")
print(f"\nOr via ADB directly:")
print(f" adb connect {ip}:{port}")
else:
print("\nCould not determine device IP. Check device WiFi settings.")
return True
return False
def main():
"""Main entry point."""
args = parse_args()
# Set device type globally based on args
if args.device_type == "adb":
device_type = DeviceType.ADB
elif args.device_type == "hdc":
device_type = DeviceType.HDC
else: # ios
device_type = DeviceType.IOS
# Set device type globally for non-iOS devices
if device_type != DeviceType.IOS:
set_device_type(device_type)
# Enable HDC verbose mode if using HDC
if device_type == DeviceType.HDC:
from phone_agent.hdc import set_hdc_verbose
set_hdc_verbose(True)
# Handle --list-apps (no system check needed)
if args.list_apps:
if device_type == DeviceType.HDC:
print("Supported HarmonyOS apps:")
apps = list_harmonyos_apps()
elif device_type == DeviceType.IOS:
print("Supported iOS apps:")
print("\nNote: For iOS apps, Bundle IDs are configured in:")
print(" phone_agent/config/apps_ios.py")
print("\nCurrently configured apps:")
apps = list_ios_apps()
else:
print("Supported Android apps:")
apps = list_supported_apps()
for app in sorted(apps):
print(f" - {app}")
if device_type == DeviceType.IOS:
print(
"\nTo add iOS apps, find the Bundle ID and add to APP_PACKAGES_IOS dictionary."
)
return
# Handle device commands (these may need partial system checks)
if handle_device_commands(args):
return
# Run system requirements check before proceeding
if not check_system_requirements(
device_type,
wda_url=args.wda_url
if device_type == DeviceType.IOS
else "http://localhost:8100",
):
sys.exit(1)
# Check model API connectivity and model availability
if not check_model_api(args.base_url, args.model, args.apikey):
sys.exit(1)
# Create configurations and agent based on device type
model_config = ModelConfig(
base_url=args.base_url,
model_name=args.model,
api_key=args.apikey,
lang=args.lang,
)
if device_type == DeviceType.IOS:
# Create iOS agent
agent_config = IOSAgentConfig(
max_steps=args.max_steps,
wda_url=args.wda_url,
device_id=args.device_id,
verbose=not args.quiet,
lang=args.lang,
)
agent = IOSPhoneAgent(
model_config=model_config,
agent_config=agent_config,
)
else:
# Create Android/HarmonyOS agent
agent_config = AgentConfig(
max_steps=args.max_steps,
device_id=args.device_id,
verbose=not args.quiet,
lang=args.lang,
)
agent = PhoneAgent(
model_config=model_config,
agent_config=agent_config,
)
# Print header
print("=" * 50)
if device_type == DeviceType.IOS:
print("Phone Agent iOS - AI-powered iOS automation")
else:
print("Phone Agent - AI-powered phone automation")
print("=" * 50)
print(f"Model: {model_config.model_name}")
print(f"Base URL: {model_config.base_url}")
print(f"Max Steps: {agent_config.max_steps}")
print(f"Language: {agent_config.lang}")
print(f"Device Type: {args.device_type.upper()}")
# Show iOS-specific config
if device_type == DeviceType.IOS:
print(f"WDA URL: {args.wda_url}")
# Show device info
if device_type == DeviceType.IOS:
devices = list_ios_devices()
if agent_config.device_id:
print(f"Device: {agent_config.device_id}")
elif devices:
device = devices[0]
print(f"Device: {device.device_name or device.device_id[:16]}")
if device.model and device.ios_version:
print(f" {device.model}, iOS {device.ios_version}")
else:
device_factory = get_device_factory()
devices = device_factory.list_devices()
if agent_config.device_id:
print(f"Device: {agent_config.device_id}")
elif devices:
print(f"Device: {devices[0].device_id} (auto-detected)")
print("=" * 50)
# Run with provided task or enter interactive mode
if args.task:
print(f"\nTask: {args.task}\n")
result = agent.run(args.task)
print(f"\nResult: {result}")
else:
# Interactive mode
print("\nEntering interactive mode. Type 'quit' to exit.\n")
while True:
try:
task = input("Enter your task: ").strip()
if task.lower() in ("quit", "exit", "q"):
print("Goodbye!")
break
if not task:
continue
print()
result = agent.run(task)
print(f"\nResult: {result}\n")
agent.reset()
except KeyboardInterrupt:
print("\n\nInterrupted. Goodbye!")
break
except Exception as e:
print(f"\nError: {e}\n")
if __name__ == "__main__":
main()
| {
"repo_id": "zai-org/Open-AutoGLM",
"file_path": "main.py",
"license": "Apache License 2.0",
"lines": 730,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
zai-org/Open-AutoGLM:phone_agent/actions/handler.py | """Action handler for processing AI model outputs."""
import ast
import re
import subprocess
import time
from dataclasses import dataclass
from typing import Any, Callable
from phone_agent.config.timing import TIMING_CONFIG
from phone_agent.device_factory import get_device_factory
@dataclass
class ActionResult:
"""Result of an action execution."""
success: bool
should_finish: bool
message: str | None = None
requires_confirmation: bool = False
class ActionHandler:
"""
Handles execution of actions from AI model output.
Args:
device_id: Optional ADB device ID for multi-device setups.
confirmation_callback: Optional callback for sensitive action confirmation.
Should return True to proceed, False to cancel.
takeover_callback: Optional callback for takeover requests (login, captcha).
"""
def __init__(
self,
device_id: str | None = None,
confirmation_callback: Callable[[str], bool] | None = None,
takeover_callback: Callable[[str], None] | None = None,
):
self.device_id = device_id
self.confirmation_callback = confirmation_callback or self._default_confirmation
self.takeover_callback = takeover_callback or self._default_takeover
def execute(
self, action: dict[str, Any], screen_width: int, screen_height: int
) -> ActionResult:
"""
Execute an action from the AI model.
Args:
action: The action dictionary from the model.
screen_width: Current screen width in pixels.
screen_height: Current screen height in pixels.
Returns:
ActionResult indicating success and whether to finish.
"""
action_type = action.get("_metadata")
if action_type == "finish":
return ActionResult(
success=True, should_finish=True, message=action.get("message")
)
if action_type != "do":
return ActionResult(
success=False,
should_finish=True,
message=f"Unknown action type: {action_type}",
)
action_name = action.get("action")
handler_method = self._get_handler(action_name)
if handler_method is None:
return ActionResult(
success=False,
should_finish=False,
message=f"Unknown action: {action_name}",
)
try:
return handler_method(action, screen_width, screen_height)
except Exception as e:
return ActionResult(
success=False, should_finish=False, message=f"Action failed: {e}"
)
def _get_handler(self, action_name: str) -> Callable | None:
"""Get the handler method for an action."""
handlers = {
"Launch": self._handle_launch,
"Tap": self._handle_tap,
"Type": self._handle_type,
"Type_Name": self._handle_type,
"Swipe": self._handle_swipe,
"Back": self._handle_back,
"Home": self._handle_home,
"Double Tap": self._handle_double_tap,
"Long Press": self._handle_long_press,
"Wait": self._handle_wait,
"Take_over": self._handle_takeover,
"Note": self._handle_note,
"Call_API": self._handle_call_api,
"Interact": self._handle_interact,
}
return handlers.get(action_name)
def _convert_relative_to_absolute(
self, element: list[int], screen_width: int, screen_height: int
) -> tuple[int, int]:
"""Convert relative coordinates (0-1000) to absolute pixels."""
x = int(element[0] / 1000 * screen_width)
y = int(element[1] / 1000 * screen_height)
return x, y
def _handle_launch(self, action: dict, width: int, height: int) -> ActionResult:
"""Handle app launch action."""
app_name = action.get("app")
if not app_name:
return ActionResult(False, False, "No app name specified")
device_factory = get_device_factory()
success = device_factory.launch_app(app_name, self.device_id)
if success:
return ActionResult(True, False)
return ActionResult(False, False, f"App not found: {app_name}")
def _handle_tap(self, action: dict, width: int, height: int) -> ActionResult:
"""Handle tap action."""
element = action.get("element")
if not element:
return ActionResult(False, False, "No element coordinates")
x, y = self._convert_relative_to_absolute(element, width, height)
# Check for sensitive operation
if "message" in action:
if not self.confirmation_callback(action["message"]):
return ActionResult(
success=False,
should_finish=True,
message="User cancelled sensitive operation",
)
device_factory = get_device_factory()
device_factory.tap(x, y, self.device_id)
return ActionResult(True, False)
def _handle_type(self, action: dict, width: int, height: int) -> ActionResult:
"""Handle text input action."""
text = action.get("text", "")
device_factory = get_device_factory()
# Switch to ADB keyboard
original_ime = device_factory.detect_and_set_adb_keyboard(self.device_id)
time.sleep(TIMING_CONFIG.action.keyboard_switch_delay)
# Clear existing text and type new text
device_factory.clear_text(self.device_id)
time.sleep(TIMING_CONFIG.action.text_clear_delay)
# Handle multiline text by splitting on newlines
device_factory.type_text(text, self.device_id)
time.sleep(TIMING_CONFIG.action.text_input_delay)
# Restore original keyboard
device_factory.restore_keyboard(original_ime, self.device_id)
time.sleep(TIMING_CONFIG.action.keyboard_restore_delay)
return ActionResult(True, False)
def _handle_swipe(self, action: dict, width: int, height: int) -> ActionResult:
"""Handle swipe action."""
start = action.get("start")
end = action.get("end")
if not start or not end:
return ActionResult(False, False, "Missing swipe coordinates")
start_x, start_y = self._convert_relative_to_absolute(start, width, height)
end_x, end_y = self._convert_relative_to_absolute(end, width, height)
device_factory = get_device_factory()
device_factory.swipe(start_x, start_y, end_x, end_y, device_id=self.device_id)
return ActionResult(True, False)
def _handle_back(self, action: dict, width: int, height: int) -> ActionResult:
"""Handle back button action."""
device_factory = get_device_factory()
device_factory.back(self.device_id)
return ActionResult(True, False)
def _handle_home(self, action: dict, width: int, height: int) -> ActionResult:
"""Handle home button action."""
device_factory = get_device_factory()
device_factory.home(self.device_id)
return ActionResult(True, False)
def _handle_double_tap(self, action: dict, width: int, height: int) -> ActionResult:
"""Handle double tap action."""
element = action.get("element")
if not element:
return ActionResult(False, False, "No element coordinates")
x, y = self._convert_relative_to_absolute(element, width, height)
device_factory = get_device_factory()
device_factory.double_tap(x, y, self.device_id)
return ActionResult(True, False)
def _handle_long_press(self, action: dict, width: int, height: int) -> ActionResult:
"""Handle long press action."""
element = action.get("element")
if not element:
return ActionResult(False, False, "No element coordinates")
x, y = self._convert_relative_to_absolute(element, width, height)
device_factory = get_device_factory()
device_factory.long_press(x, y, device_id=self.device_id)
return ActionResult(True, False)
def _handle_wait(self, action: dict, width: int, height: int) -> ActionResult:
"""Handle wait action."""
duration_str = action.get("duration", "1 seconds")
try:
duration = float(duration_str.replace("seconds", "").strip())
except ValueError:
duration = 1.0
time.sleep(duration)
return ActionResult(True, False)
def _handle_takeover(self, action: dict, width: int, height: int) -> ActionResult:
"""Handle takeover request (login, captcha, etc.)."""
message = action.get("message", "User intervention required")
self.takeover_callback(message)
return ActionResult(True, False)
def _handle_note(self, action: dict, width: int, height: int) -> ActionResult:
"""Handle note action (placeholder for content recording)."""
# This action is typically used for recording page content
# Implementation depends on specific requirements
return ActionResult(True, False)
def _handle_call_api(self, action: dict, width: int, height: int) -> ActionResult:
"""Handle API call action (placeholder for summarization)."""
# This action is typically used for content summarization
# Implementation depends on specific requirements
return ActionResult(True, False)
def _handle_interact(self, action: dict, width: int, height: int) -> ActionResult:
"""Handle interaction request (user choice needed)."""
# This action signals that user input is needed
return ActionResult(True, False, message="User interaction required")
def _send_keyevent(self, keycode: str) -> None:
"""Send a keyevent to the device."""
from phone_agent.device_factory import DeviceType, get_device_factory
from phone_agent.hdc.connection import _run_hdc_command
device_factory = get_device_factory()
# Handle HDC devices with HarmonyOS-specific keyEvent command
if device_factory.device_type == DeviceType.HDC:
hdc_prefix = ["hdc", "-t", self.device_id] if self.device_id else ["hdc"]
# Map common keycodes to HarmonyOS keyEvent codes
# KEYCODE_ENTER (66) -> 2054 (HarmonyOS Enter key code)
if keycode == "KEYCODE_ENTER" or keycode == "66":
_run_hdc_command(
hdc_prefix + ["shell", "uitest", "uiInput", "keyEvent", "2054"],
capture_output=True,
text=True,
)
else:
# For other keys, try to use the numeric code directly
# If keycode is a string like "KEYCODE_ENTER", convert it
try:
# Try to extract numeric code from string or use as-is
if keycode.startswith("KEYCODE_"):
# For now, only handle ENTER, other keys may need mapping
if "ENTER" in keycode:
_run_hdc_command(
hdc_prefix + ["shell", "uitest", "uiInput", "keyEvent", "2054"],
capture_output=True,
text=True,
)
else:
# Fallback to ADB-style command for unsupported keys
subprocess.run(
hdc_prefix + ["shell", "input", "keyevent", keycode],
capture_output=True,
text=True,
)
else:
# Assume it's a numeric code
_run_hdc_command(
hdc_prefix + ["shell", "uitest", "uiInput", "keyEvent", str(keycode)],
capture_output=True,
text=True,
)
except Exception:
# Fallback to ADB-style command
subprocess.run(
hdc_prefix + ["shell", "input", "keyevent", keycode],
capture_output=True,
text=True,
)
else:
# ADB devices use standard input keyevent command
cmd_prefix = ["adb", "-s", self.device_id] if self.device_id else ["adb"]
subprocess.run(
cmd_prefix + ["shell", "input", "keyevent", keycode],
capture_output=True,
text=True,
)
@staticmethod
def _default_confirmation(message: str) -> bool:
"""Default confirmation callback using console input."""
response = input(f"Sensitive operation: {message}\nConfirm? (Y/N): ")
return response.upper() == "Y"
@staticmethod
def _default_takeover(message: str) -> None:
"""Default takeover callback using console input."""
input(f"{message}\nPress Enter after completing manual operation...")
def parse_action(response: str) -> dict[str, Any]:
"""
Parse action from model response.
Args:
response: Raw response string from the model.
Returns:
Parsed action dictionary.
Raises:
ValueError: If the response cannot be parsed.
"""
print(f"Parsing action: {response}")
try:
response = response.strip()
if response.startswith('do(action="Type"') or response.startswith(
'do(action="Type_Name"'
):
text = response.split("text=", 1)[1][1:-2]
action = {"_metadata": "do", "action": "Type", "text": text}
return action
elif response.startswith("do"):
# Use AST parsing instead of eval for safety
try:
# Escape special characters (newlines, tabs, etc.) for valid Python syntax
response = response.replace('\n', '\\n')
response = response.replace('\r', '\\r')
response = response.replace('\t', '\\t')
tree = ast.parse(response, mode="eval")
if not isinstance(tree.body, ast.Call):
raise ValueError("Expected a function call")
call = tree.body
# Extract keyword arguments safely
action = {"_metadata": "do"}
for keyword in call.keywords:
key = keyword.arg
value = ast.literal_eval(keyword.value)
action[key] = value
return action
except (SyntaxError, ValueError) as e:
raise ValueError(f"Failed to parse do() action: {e}")
elif response.startswith("finish"):
action = {
"_metadata": "finish",
"message": response.replace("finish(message=", "")[1:-2],
}
else:
raise ValueError(f"Failed to parse action: {response}")
return action
except Exception as e:
raise ValueError(f"Failed to parse action: {e}")
def do(**kwargs) -> dict[str, Any]:
"""Helper function for creating 'do' actions."""
kwargs["_metadata"] = "do"
return kwargs
def finish(**kwargs) -> dict[str, Any]:
"""Helper function for creating 'finish' actions."""
kwargs["_metadata"] = "finish"
return kwargs
| {
"repo_id": "zai-org/Open-AutoGLM",
"file_path": "phone_agent/actions/handler.py",
"license": "Apache License 2.0",
"lines": 332,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
zai-org/Open-AutoGLM:phone_agent/adb/connection.py | """ADB connection management for local and remote devices."""
import subprocess
import time
from dataclasses import dataclass
from enum import Enum
from typing import Optional
from phone_agent.config.timing import TIMING_CONFIG
class ConnectionType(Enum):
"""Type of ADB connection."""
USB = "usb"
WIFI = "wifi"
REMOTE = "remote"
@dataclass
class DeviceInfo:
"""Information about a connected device."""
device_id: str
status: str
connection_type: ConnectionType
model: str | None = None
android_version: str | None = None
class ADBConnection:
"""
Manages ADB connections to Android devices.
Supports USB, WiFi, and remote TCP/IP connections.
Example:
>>> conn = ADBConnection()
>>> # Connect to remote device
>>> conn.connect("192.168.1.100:5555")
>>> # List devices
>>> devices = conn.list_devices()
>>> # Disconnect
>>> conn.disconnect("192.168.1.100:5555")
"""
def __init__(self, adb_path: str = "adb"):
"""
Initialize ADB connection manager.
Args:
adb_path: Path to ADB executable.
"""
self.adb_path = adb_path
def connect(self, address: str, timeout: int = 10) -> tuple[bool, str]:
"""
Connect to a remote device via TCP/IP.
Args:
address: Device address in format "host:port" (e.g., "192.168.1.100:5555").
timeout: Connection timeout in seconds.
Returns:
Tuple of (success, message).
Note:
The remote device must have TCP/IP debugging enabled.
On the device, run: adb tcpip 5555
"""
# Validate address format
if ":" not in address:
address = f"{address}:5555" # Default ADB port
try:
result = subprocess.run(
[self.adb_path, "connect", address],
capture_output=True,
text=True,
timeout=timeout,
)
output = result.stdout + result.stderr
if "connected" in output.lower():
return True, f"Connected to {address}"
elif "already connected" in output.lower():
return True, f"Already connected to {address}"
else:
return False, output.strip()
except subprocess.TimeoutExpired:
return False, f"Connection timeout after {timeout}s"
except Exception as e:
return False, f"Connection error: {e}"
def disconnect(self, address: str | None = None) -> tuple[bool, str]:
"""
Disconnect from a remote device.
Args:
address: Device address to disconnect. If None, disconnects all.
Returns:
Tuple of (success, message).
"""
try:
cmd = [self.adb_path, "disconnect"]
if address:
cmd.append(address)
result = subprocess.run(cmd, capture_output=True, text=True, encoding="utf-8", timeout=5)
output = result.stdout + result.stderr
return True, output.strip() or "Disconnected"
except Exception as e:
return False, f"Disconnect error: {e}"
def list_devices(self) -> list[DeviceInfo]:
"""
List all connected devices.
Returns:
List of DeviceInfo objects.
"""
try:
result = subprocess.run(
[self.adb_path, "devices", "-l"],
capture_output=True,
text=True,
timeout=5,
)
devices = []
for line in result.stdout.strip().split("\n")[1:]: # Skip header
if not line.strip():
continue
parts = line.split()
if len(parts) >= 2:
device_id = parts[0]
status = parts[1]
# Determine connection type
if ":" in device_id:
conn_type = ConnectionType.REMOTE
elif "emulator" in device_id:
conn_type = ConnectionType.USB # Emulator via USB
else:
conn_type = ConnectionType.USB
# Parse additional info
model = None
for part in parts[2:]:
if part.startswith("model:"):
model = part.split(":", 1)[1]
break
devices.append(
DeviceInfo(
device_id=device_id,
status=status,
connection_type=conn_type,
model=model,
)
)
return devices
except Exception as e:
print(f"Error listing devices: {e}")
return []
def get_device_info(self, device_id: str | None = None) -> DeviceInfo | None:
"""
Get detailed information about a device.
Args:
device_id: Device ID. If None, uses first available device.
Returns:
DeviceInfo or None if not found.
"""
devices = self.list_devices()
if not devices:
return None
if device_id is None:
return devices[0]
for device in devices:
if device.device_id == device_id:
return device
return None
def is_connected(self, device_id: str | None = None) -> bool:
"""
Check if a device is connected.
Args:
device_id: Device ID to check. If None, checks if any device is connected.
Returns:
True if connected, False otherwise.
"""
devices = self.list_devices()
if not devices:
return False
if device_id is None:
return any(d.status == "device" for d in devices)
return any(d.device_id == device_id and d.status == "device" for d in devices)
def enable_tcpip(
self, port: int = 5555, device_id: str | None = None
) -> tuple[bool, str]:
"""
Enable TCP/IP debugging on a USB-connected device.
This allows subsequent wireless connections to the device.
Args:
port: TCP port for ADB (default: 5555).
device_id: Device ID. If None, uses first available device.
Returns:
Tuple of (success, message).
Note:
The device must be connected via USB first.
After this, you can disconnect USB and connect via WiFi.
"""
try:
cmd = [self.adb_path]
if device_id:
cmd.extend(["-s", device_id])
cmd.extend(["tcpip", str(port)])
result = subprocess.run(cmd, capture_output=True, text=True, encoding="utf-8", timeout=10)
output = result.stdout + result.stderr
if "restarting" in output.lower() or result.returncode == 0:
time.sleep(TIMING_CONFIG.connection.adb_restart_delay)
return True, f"TCP/IP mode enabled on port {port}"
else:
return False, output.strip()
except Exception as e:
return False, f"Error enabling TCP/IP: {e}"
def get_device_ip(self, device_id: str | None = None) -> str | None:
"""
Get the IP address of a connected device.
Args:
device_id: Device ID. If None, uses first available device.
Returns:
IP address string or None if not found.
"""
try:
cmd = [self.adb_path]
if device_id:
cmd.extend(["-s", device_id])
cmd.extend(["shell", "ip", "route"])
result = subprocess.run(cmd, capture_output=True, text=True, encoding="utf-8", timeout=5)
# Parse IP from route output
for line in result.stdout.split("\n"):
if "src" in line:
parts = line.split()
for i, part in enumerate(parts):
if part == "src" and i + 1 < len(parts):
return parts[i + 1]
# Alternative: try wlan0 interface
cmd[-1] = "ip addr show wlan0"
result = subprocess.run(
cmd[:-1] + ["shell", "ip", "addr", "show", "wlan0"],
capture_output=True,
text=True,
encoding="utf-8",
timeout=5,
)
for line in result.stdout.split("\n"):
if "inet " in line:
parts = line.strip().split()
if len(parts) >= 2:
return parts[1].split("/")[0]
return None
except Exception as e:
print(f"Error getting device IP: {e}")
return None
def restart_server(self) -> tuple[bool, str]:
"""
Restart the ADB server.
Returns:
Tuple of (success, message).
"""
try:
# Kill server
subprocess.run(
[self.adb_path, "kill-server"], capture_output=True, timeout=5
)
time.sleep(TIMING_CONFIG.connection.server_restart_delay)
# Start server
subprocess.run(
[self.adb_path, "start-server"], capture_output=True, timeout=5
)
return True, "ADB server restarted"
except Exception as e:
return False, f"Error restarting server: {e}"
def quick_connect(address: str) -> tuple[bool, str]:
"""
Quick helper to connect to a remote device.
Args:
address: Device address (e.g., "192.168.1.100" or "192.168.1.100:5555").
Returns:
Tuple of (success, message).
"""
conn = ADBConnection()
return conn.connect(address)
def list_devices() -> list[DeviceInfo]:
"""
Quick helper to list connected devices.
Returns:
List of DeviceInfo objects.
"""
conn = ADBConnection()
return conn.list_devices()
| {
"repo_id": "zai-org/Open-AutoGLM",
"file_path": "phone_agent/adb/connection.py",
"license": "Apache License 2.0",
"lines": 272,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
zai-org/Open-AutoGLM:phone_agent/adb/device.py | """Device control utilities for Android automation."""
import os
import subprocess
import time
from typing import List, Optional, Tuple
from phone_agent.config.apps import APP_PACKAGES
from phone_agent.config.timing import TIMING_CONFIG
def get_current_app(device_id: str | None = None) -> str:
"""
Get the currently focused app name.
Args:
device_id: Optional ADB device ID for multi-device setups.
Returns:
The app name if recognized, otherwise "System Home".
"""
adb_prefix = _get_adb_prefix(device_id)
result = subprocess.run(
adb_prefix + ["shell", "dumpsys", "window"], capture_output=True, text=True, encoding="utf-8"
)
output = result.stdout
if not output:
raise ValueError("No output from dumpsys window")
# Parse window focus info
for line in output.split("\n"):
if "mCurrentFocus" in line or "mFocusedApp" in line:
for app_name, package in APP_PACKAGES.items():
if package in line:
return app_name
return "System Home"
def tap(
x: int, y: int, device_id: str | None = None, delay: float | None = None
) -> None:
"""
Tap at the specified coordinates.
Args:
x: X coordinate.
y: Y coordinate.
device_id: Optional ADB device ID.
delay: Delay in seconds after tap. If None, uses configured default.
"""
if delay is None:
delay = TIMING_CONFIG.device.default_tap_delay
adb_prefix = _get_adb_prefix(device_id)
subprocess.run(
adb_prefix + ["shell", "input", "tap", str(x), str(y)], capture_output=True
)
time.sleep(delay)
def double_tap(
x: int, y: int, device_id: str | None = None, delay: float | None = None
) -> None:
"""
Double tap at the specified coordinates.
Args:
x: X coordinate.
y: Y coordinate.
device_id: Optional ADB device ID.
delay: Delay in seconds after double tap. If None, uses configured default.
"""
if delay is None:
delay = TIMING_CONFIG.device.default_double_tap_delay
adb_prefix = _get_adb_prefix(device_id)
subprocess.run(
adb_prefix + ["shell", "input", "tap", str(x), str(y)], capture_output=True
)
time.sleep(TIMING_CONFIG.device.double_tap_interval)
subprocess.run(
adb_prefix + ["shell", "input", "tap", str(x), str(y)], capture_output=True
)
time.sleep(delay)
def long_press(
x: int,
y: int,
duration_ms: int = 3000,
device_id: str | None = None,
delay: float | None = None,
) -> None:
"""
Long press at the specified coordinates.
Args:
x: X coordinate.
y: Y coordinate.
duration_ms: Duration of press in milliseconds.
device_id: Optional ADB device ID.
delay: Delay in seconds after long press. If None, uses configured default.
"""
if delay is None:
delay = TIMING_CONFIG.device.default_long_press_delay
adb_prefix = _get_adb_prefix(device_id)
subprocess.run(
adb_prefix
+ ["shell", "input", "swipe", str(x), str(y), str(x), str(y), str(duration_ms)],
capture_output=True,
)
time.sleep(delay)
def swipe(
start_x: int,
start_y: int,
end_x: int,
end_y: int,
duration_ms: int | None = None,
device_id: str | None = None,
delay: float | None = None,
) -> None:
"""
Swipe from start to end coordinates.
Args:
start_x: Starting X coordinate.
start_y: Starting Y coordinate.
end_x: Ending X coordinate.
end_y: Ending Y coordinate.
duration_ms: Duration of swipe in milliseconds (auto-calculated if None).
device_id: Optional ADB device ID.
delay: Delay in seconds after swipe. If None, uses configured default.
"""
if delay is None:
delay = TIMING_CONFIG.device.default_swipe_delay
adb_prefix = _get_adb_prefix(device_id)
if duration_ms is None:
# Calculate duration based on distance
dist_sq = (start_x - end_x) ** 2 + (start_y - end_y) ** 2
duration_ms = int(dist_sq / 1000)
duration_ms = max(1000, min(duration_ms, 2000)) # Clamp between 1000-2000ms
subprocess.run(
adb_prefix
+ [
"shell",
"input",
"swipe",
str(start_x),
str(start_y),
str(end_x),
str(end_y),
str(duration_ms),
],
capture_output=True,
)
time.sleep(delay)
def back(device_id: str | None = None, delay: float | None = None) -> None:
"""
Press the back button.
Args:
device_id: Optional ADB device ID.
delay: Delay in seconds after pressing back. If None, uses configured default.
"""
if delay is None:
delay = TIMING_CONFIG.device.default_back_delay
adb_prefix = _get_adb_prefix(device_id)
subprocess.run(
adb_prefix + ["shell", "input", "keyevent", "4"], capture_output=True
)
time.sleep(delay)
def home(device_id: str | None = None, delay: float | None = None) -> None:
"""
Press the home button.
Args:
device_id: Optional ADB device ID.
delay: Delay in seconds after pressing home. If None, uses configured default.
"""
if delay is None:
delay = TIMING_CONFIG.device.default_home_delay
adb_prefix = _get_adb_prefix(device_id)
subprocess.run(
adb_prefix + ["shell", "input", "keyevent", "KEYCODE_HOME"], capture_output=True
)
time.sleep(delay)
def launch_app(
app_name: str, device_id: str | None = None, delay: float | None = None
) -> bool:
"""
Launch an app by name.
Args:
app_name: The app name (must be in APP_PACKAGES).
device_id: Optional ADB device ID.
delay: Delay in seconds after launching. If None, uses configured default.
Returns:
True if app was launched, False if app not found.
"""
if delay is None:
delay = TIMING_CONFIG.device.default_launch_delay
if app_name not in APP_PACKAGES:
return False
adb_prefix = _get_adb_prefix(device_id)
package = APP_PACKAGES[app_name]
subprocess.run(
adb_prefix
+ [
"shell",
"monkey",
"-p",
package,
"-c",
"android.intent.category.LAUNCHER",
"1",
],
capture_output=True,
)
time.sleep(delay)
return True
def _get_adb_prefix(device_id: str | None) -> list:
"""Get ADB command prefix with optional device specifier."""
if device_id:
return ["adb", "-s", device_id]
return ["adb"]
| {
"repo_id": "zai-org/Open-AutoGLM",
"file_path": "phone_agent/adb/device.py",
"license": "Apache License 2.0",
"lines": 203,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
zai-org/Open-AutoGLM:phone_agent/adb/input.py | """Input utilities for Android device text input."""
import base64
import subprocess
from typing import Optional
def type_text(text: str, device_id: str | None = None) -> None:
"""
Type text into the currently focused input field using ADB Keyboard.
Args:
text: The text to type.
device_id: Optional ADB device ID for multi-device setups.
Note:
Requires ADB Keyboard to be installed on the device.
See: https://github.com/nicnocquee/AdbKeyboard
"""
adb_prefix = _get_adb_prefix(device_id)
encoded_text = base64.b64encode(text.encode("utf-8")).decode("utf-8")
subprocess.run(
adb_prefix
+ [
"shell",
"am",
"broadcast",
"-a",
"ADB_INPUT_B64",
"--es",
"msg",
encoded_text,
],
capture_output=True,
text=True,
)
def clear_text(device_id: str | None = None) -> None:
"""
Clear text in the currently focused input field.
Args:
device_id: Optional ADB device ID for multi-device setups.
"""
adb_prefix = _get_adb_prefix(device_id)
subprocess.run(
adb_prefix + ["shell", "am", "broadcast", "-a", "ADB_CLEAR_TEXT"],
capture_output=True,
text=True,
)
def detect_and_set_adb_keyboard(device_id: str | None = None) -> str:
"""
Detect current keyboard and switch to ADB Keyboard if needed.
Args:
device_id: Optional ADB device ID for multi-device setups.
Returns:
The original keyboard IME identifier for later restoration.
"""
adb_prefix = _get_adb_prefix(device_id)
# Get current IME
result = subprocess.run(
adb_prefix + ["shell", "settings", "get", "secure", "default_input_method"],
capture_output=True,
text=True,
)
current_ime = (result.stdout + result.stderr).strip()
# Switch to ADB Keyboard if not already set
if "com.android.adbkeyboard/.AdbIME" not in current_ime:
subprocess.run(
adb_prefix + ["shell", "ime", "set", "com.android.adbkeyboard/.AdbIME"],
capture_output=True,
text=True,
)
# Warm up the keyboard
type_text("", device_id)
return current_ime
def restore_keyboard(ime: str, device_id: str | None = None) -> None:
"""
Restore the original keyboard IME.
Args:
ime: The IME identifier to restore.
device_id: Optional ADB device ID for multi-device setups.
"""
adb_prefix = _get_adb_prefix(device_id)
subprocess.run(
adb_prefix + ["shell", "ime", "set", ime], capture_output=True, text=True
)
def _get_adb_prefix(device_id: str | None) -> list:
"""Get ADB command prefix with optional device specifier."""
if device_id:
return ["adb", "-s", device_id]
return ["adb"]
| {
"repo_id": "zai-org/Open-AutoGLM",
"file_path": "phone_agent/adb/input.py",
"license": "Apache License 2.0",
"lines": 85,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
zai-org/Open-AutoGLM:phone_agent/adb/screenshot.py | """Screenshot utilities for capturing Android device screen."""
import base64
import os
import subprocess
import tempfile
import uuid
from dataclasses import dataclass
from io import BytesIO
from typing import Tuple
from PIL import Image
@dataclass
class Screenshot:
"""Represents a captured screenshot."""
base64_data: str
width: int
height: int
is_sensitive: bool = False
def get_screenshot(device_id: str | None = None, timeout: int = 10) -> Screenshot:
"""
Capture a screenshot from the connected Android device.
Args:
device_id: Optional ADB device ID for multi-device setups.
timeout: Timeout in seconds for screenshot operations.
Returns:
Screenshot object containing base64 data and dimensions.
Note:
If the screenshot fails (e.g., on sensitive screens like payment pages),
a black fallback image is returned with is_sensitive=True.
"""
temp_path = os.path.join(tempfile.gettempdir(), f"screenshot_{uuid.uuid4()}.png")
adb_prefix = _get_adb_prefix(device_id)
try:
# Execute screenshot command
result = subprocess.run(
adb_prefix + ["shell", "screencap", "-p", "/sdcard/tmp.png"],
capture_output=True,
text=True,
timeout=timeout,
)
# Check for screenshot failure (sensitive screen)
output = result.stdout + result.stderr
if "Status: -1" in output or "Failed" in output:
return _create_fallback_screenshot(is_sensitive=True)
# Pull screenshot to local temp path
subprocess.run(
adb_prefix + ["pull", "/sdcard/tmp.png", temp_path],
capture_output=True,
text=True,
timeout=5,
)
if not os.path.exists(temp_path):
return _create_fallback_screenshot(is_sensitive=False)
# Read and encode image
img = Image.open(temp_path)
width, height = img.size
buffered = BytesIO()
img.save(buffered, format="PNG")
base64_data = base64.b64encode(buffered.getvalue()).decode("utf-8")
# Cleanup
os.remove(temp_path)
return Screenshot(
base64_data=base64_data, width=width, height=height, is_sensitive=False
)
except Exception as e:
print(f"Screenshot error: {e}")
return _create_fallback_screenshot(is_sensitive=False)
def _get_adb_prefix(device_id: str | None) -> list:
"""Get ADB command prefix with optional device specifier."""
if device_id:
return ["adb", "-s", device_id]
return ["adb"]
def _create_fallback_screenshot(is_sensitive: bool) -> Screenshot:
"""Create a black fallback image when screenshot fails."""
default_width, default_height = 1080, 2400
black_img = Image.new("RGB", (default_width, default_height), color="black")
buffered = BytesIO()
black_img.save(buffered, format="PNG")
base64_data = base64.b64encode(buffered.getvalue()).decode("utf-8")
return Screenshot(
base64_data=base64_data,
width=default_width,
height=default_height,
is_sensitive=is_sensitive,
)
| {
"repo_id": "zai-org/Open-AutoGLM",
"file_path": "phone_agent/adb/screenshot.py",
"license": "Apache License 2.0",
"lines": 84,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
zai-org/Open-AutoGLM:phone_agent/agent.py | """Main PhoneAgent class for orchestrating phone automation."""
import json
import traceback
from dataclasses import dataclass
from typing import Any, Callable
from phone_agent.actions import ActionHandler
from phone_agent.actions.handler import do, finish, parse_action
from phone_agent.config import get_messages, get_system_prompt
from phone_agent.device_factory import get_device_factory
from phone_agent.model import ModelClient, ModelConfig
from phone_agent.model.client import MessageBuilder
@dataclass
class AgentConfig:
"""Configuration for the PhoneAgent."""
max_steps: int = 100
device_id: str | None = None
lang: str = "cn"
system_prompt: str | None = None
verbose: bool = True
def __post_init__(self):
if self.system_prompt is None:
self.system_prompt = get_system_prompt(self.lang)
@dataclass
class StepResult:
"""Result of a single agent step."""
success: bool
finished: bool
action: dict[str, Any] | None
thinking: str
message: str | None = None
class PhoneAgent:
"""
AI-powered agent for automating Android phone interactions.
The agent uses a vision-language model to understand screen content
and decide on actions to complete user tasks.
Args:
model_config: Configuration for the AI model.
agent_config: Configuration for the agent behavior.
confirmation_callback: Optional callback for sensitive action confirmation.
takeover_callback: Optional callback for takeover requests.
Example:
>>> from phone_agent import PhoneAgent
>>> from phone_agent.model import ModelConfig
>>>
>>> model_config = ModelConfig(base_url="http://localhost:8000/v1")
>>> agent = PhoneAgent(model_config)
>>> agent.run("Open WeChat and send a message to John")
"""
def __init__(
self,
model_config: ModelConfig | None = None,
agent_config: AgentConfig | None = None,
confirmation_callback: Callable[[str], bool] | None = None,
takeover_callback: Callable[[str], None] | None = None,
):
self.model_config = model_config or ModelConfig()
self.agent_config = agent_config or AgentConfig()
self.model_client = ModelClient(self.model_config)
self.action_handler = ActionHandler(
device_id=self.agent_config.device_id,
confirmation_callback=confirmation_callback,
takeover_callback=takeover_callback,
)
self._context: list[dict[str, Any]] = []
self._step_count = 0
def run(self, task: str) -> str:
"""
Run the agent to complete a task.
Args:
task: Natural language description of the task.
Returns:
Final message from the agent.
"""
self._context = []
self._step_count = 0
# First step with user prompt
result = self._execute_step(task, is_first=True)
if result.finished:
return result.message or "Task completed"
# Continue until finished or max steps reached
while self._step_count < self.agent_config.max_steps:
result = self._execute_step(is_first=False)
if result.finished:
return result.message or "Task completed"
return "Max steps reached"
def step(self, task: str | None = None) -> StepResult:
"""
Execute a single step of the agent.
Useful for manual control or debugging.
Args:
task: Task description (only needed for first step).
Returns:
StepResult with step details.
"""
is_first = len(self._context) == 0
if is_first and not task:
raise ValueError("Task is required for the first step")
return self._execute_step(task, is_first)
def reset(self) -> None:
"""Reset the agent state for a new task."""
self._context = []
self._step_count = 0
def _execute_step(
self, user_prompt: str | None = None, is_first: bool = False
) -> StepResult:
"""Execute a single step of the agent loop."""
self._step_count += 1
# Capture current screen state
device_factory = get_device_factory()
screenshot = device_factory.get_screenshot(self.agent_config.device_id)
current_app = device_factory.get_current_app(self.agent_config.device_id)
# Build messages
if is_first:
self._context.append(
MessageBuilder.create_system_message(self.agent_config.system_prompt)
)
screen_info = MessageBuilder.build_screen_info(current_app)
text_content = f"{user_prompt}\n\n{screen_info}"
self._context.append(
MessageBuilder.create_user_message(
text=text_content, image_base64=screenshot.base64_data
)
)
else:
screen_info = MessageBuilder.build_screen_info(current_app)
text_content = f"** Screen Info **\n\n{screen_info}"
self._context.append(
MessageBuilder.create_user_message(
text=text_content, image_base64=screenshot.base64_data
)
)
# Get model response
try:
msgs = get_messages(self.agent_config.lang)
print("\n" + "=" * 50)
print(f"💭 {msgs['thinking']}:")
print("-" * 50)
response = self.model_client.request(self._context)
except Exception as e:
if self.agent_config.verbose:
traceback.print_exc()
return StepResult(
success=False,
finished=True,
action=None,
thinking="",
message=f"Model error: {e}",
)
# Parse action from response
try:
action = parse_action(response.action)
except ValueError:
if self.agent_config.verbose:
traceback.print_exc()
action = finish(message=response.action)
if self.agent_config.verbose:
# Print thinking process
print("-" * 50)
print(f"🎯 {msgs['action']}:")
print(json.dumps(action, ensure_ascii=False, indent=2))
print("=" * 50 + "\n")
# Remove image from context to save space
self._context[-1] = MessageBuilder.remove_images_from_message(self._context[-1])
# Execute action
try:
result = self.action_handler.execute(
action, screenshot.width, screenshot.height
)
except Exception as e:
if self.agent_config.verbose:
traceback.print_exc()
result = self.action_handler.execute(
finish(message=str(e)), screenshot.width, screenshot.height
)
# Add assistant response to context
self._context.append(
MessageBuilder.create_assistant_message(
f"<think>{response.thinking}</think><answer>{response.action}</answer>"
)
)
# Check if finished
finished = action.get("_metadata") == "finish" or result.should_finish
if finished and self.agent_config.verbose:
msgs = get_messages(self.agent_config.lang)
print("\n" + "🎉 " + "=" * 48)
print(
f"✅ {msgs['task_completed']}: {result.message or action.get('message', msgs['done'])}"
)
print("=" * 50 + "\n")
return StepResult(
success=result.success,
finished=finished,
action=action,
thinking=response.thinking,
message=result.message or action.get("message"),
)
@property
def context(self) -> list[dict[str, Any]]:
"""Get the current conversation context."""
return self._context.copy()
@property
def step_count(self) -> int:
"""Get the current step count."""
return self._step_count
| {
"repo_id": "zai-org/Open-AutoGLM",
"file_path": "phone_agent/agent.py",
"license": "Apache License 2.0",
"lines": 204,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
zai-org/Open-AutoGLM:phone_agent/model/client.py | """Model client for AI inference using OpenAI-compatible API."""
import json
import time
from dataclasses import dataclass, field
from typing import Any
from openai import OpenAI
from phone_agent.config.i18n import get_message
@dataclass
class ModelConfig:
"""Configuration for the AI model."""
base_url: str = "http://localhost:8000/v1"
api_key: str = "EMPTY"
model_name: str = "autoglm-phone-9b"
max_tokens: int = 3000
temperature: float = 0.0
top_p: float = 0.85
frequency_penalty: float = 0.2
extra_body: dict[str, Any] = field(default_factory=dict)
lang: str = "cn" # Language for UI messages: 'cn' or 'en'
@dataclass
class ModelResponse:
"""Response from the AI model."""
thinking: str
action: str
raw_content: str
# Performance metrics
time_to_first_token: float | None = None # Time to first token (seconds)
time_to_thinking_end: float | None = None # Time to thinking end (seconds)
total_time: float | None = None # Total inference time (seconds)
class ModelClient:
"""
Client for interacting with OpenAI-compatible vision-language models.
Args:
config: Model configuration.
"""
def __init__(self, config: ModelConfig | None = None):
self.config = config or ModelConfig()
self.client = OpenAI(base_url=self.config.base_url, api_key=self.config.api_key)
def request(self, messages: list[dict[str, Any]]) -> ModelResponse:
"""
Send a request to the model.
Args:
messages: List of message dictionaries in OpenAI format.
Returns:
ModelResponse containing thinking and action.
Raises:
ValueError: If the response cannot be parsed.
"""
# Start timing
start_time = time.time()
time_to_first_token = None
time_to_thinking_end = None
stream = self.client.chat.completions.create(
messages=messages,
model=self.config.model_name,
max_tokens=self.config.max_tokens,
temperature=self.config.temperature,
top_p=self.config.top_p,
frequency_penalty=self.config.frequency_penalty,
extra_body=self.config.extra_body,
stream=True,
)
raw_content = ""
buffer = "" # Buffer to hold content that might be part of a marker
action_markers = ["finish(message=", "do(action="]
in_action_phase = False # Track if we've entered the action phase
first_token_received = False
for chunk in stream:
if len(chunk.choices) == 0:
continue
if chunk.choices[0].delta.content is not None:
content = chunk.choices[0].delta.content
raw_content += content
# Record time to first token
if not first_token_received:
time_to_first_token = time.time() - start_time
first_token_received = True
if in_action_phase:
# Already in action phase, just accumulate content without printing
continue
buffer += content
# Check if any marker is fully present in buffer
marker_found = False
for marker in action_markers:
if marker in buffer:
# Marker found, print everything before it
thinking_part = buffer.split(marker, 1)[0]
print(thinking_part, end="", flush=True)
print() # Print newline after thinking is complete
in_action_phase = True
marker_found = True
# Record time to thinking end
if time_to_thinking_end is None:
time_to_thinking_end = time.time() - start_time
break
if marker_found:
continue # Continue to collect remaining content
# Check if buffer ends with a prefix of any marker
# If so, don't print yet (wait for more content)
is_potential_marker = False
for marker in action_markers:
for i in range(1, len(marker)):
if buffer.endswith(marker[:i]):
is_potential_marker = True
break
if is_potential_marker:
break
if not is_potential_marker:
# Safe to print the buffer
print(buffer, end="", flush=True)
buffer = ""
# Calculate total time
total_time = time.time() - start_time
# Parse thinking and action from response
thinking, action = self._parse_response(raw_content)
# Print performance metrics
lang = self.config.lang
print()
print("=" * 50)
print(f"⏱️ {get_message('performance_metrics', lang)}:")
print("-" * 50)
if time_to_first_token is not None:
print(
f"{get_message('time_to_first_token', lang)}: {time_to_first_token:.3f}s"
)
if time_to_thinking_end is not None:
print(
f"{get_message('time_to_thinking_end', lang)}: {time_to_thinking_end:.3f}s"
)
print(
f"{get_message('total_inference_time', lang)}: {total_time:.3f}s"
)
print("=" * 50)
return ModelResponse(
thinking=thinking,
action=action,
raw_content=raw_content,
time_to_first_token=time_to_first_token,
time_to_thinking_end=time_to_thinking_end,
total_time=total_time,
)
def _parse_response(self, content: str) -> tuple[str, str]:
"""
Parse the model response into thinking and action parts.
Parsing rules:
1. If content contains 'finish(message=', everything before is thinking,
everything from 'finish(message=' onwards is action.
2. If rule 1 doesn't apply but content contains 'do(action=',
everything before is thinking, everything from 'do(action=' onwards is action.
3. Fallback: If content contains '<answer>', use legacy parsing with XML tags.
4. Otherwise, return empty thinking and full content as action.
Args:
content: Raw response content.
Returns:
Tuple of (thinking, action).
"""
# Rule 1: Check for finish(message=
if "finish(message=" in content:
parts = content.split("finish(message=", 1)
thinking = parts[0].strip()
action = "finish(message=" + parts[1]
return thinking, action
# Rule 2: Check for do(action=
if "do(action=" in content:
parts = content.split("do(action=", 1)
thinking = parts[0].strip()
action = "do(action=" + parts[1]
return thinking, action
# Rule 3: Fallback to legacy XML tag parsing
if "<answer>" in content:
parts = content.split("<answer>", 1)
thinking = parts[0].replace("<think>", "").replace("</think>", "").strip()
action = parts[1].replace("</answer>", "").strip()
return thinking, action
# Rule 4: No markers found, return content as action
return "", content
class MessageBuilder:
"""Helper class for building conversation messages."""
@staticmethod
def create_system_message(content: str) -> dict[str, Any]:
"""Create a system message."""
return {"role": "system", "content": content}
@staticmethod
def create_user_message(
text: str, image_base64: str | None = None
) -> dict[str, Any]:
"""
Create a user message with optional image.
Args:
text: Text content.
image_base64: Optional base64-encoded image.
Returns:
Message dictionary.
"""
content = []
if image_base64:
content.append(
{
"type": "image_url",
"image_url": {"url": f"data:image/png;base64,{image_base64}"},
}
)
content.append({"type": "text", "text": text})
return {"role": "user", "content": content}
@staticmethod
def create_assistant_message(content: str) -> dict[str, Any]:
"""Create an assistant message."""
return {"role": "assistant", "content": content}
@staticmethod
def remove_images_from_message(message: dict[str, Any]) -> dict[str, Any]:
"""
Remove image content from a message to save context space.
Args:
message: Message dictionary.
Returns:
Message with images removed.
"""
if isinstance(message.get("content"), list):
message["content"] = [
item for item in message["content"] if item.get("type") == "text"
]
return message
@staticmethod
def build_screen_info(current_app: str, **extra_info) -> str:
"""
Build screen info string for the model.
Args:
current_app: Current app name.
**extra_info: Additional info to include.
Returns:
JSON string with screen info.
"""
info = {"current_app": current_app, **extra_info}
return json.dumps(info, ensure_ascii=False)
| {
"repo_id": "zai-org/Open-AutoGLM",
"file_path": "phone_agent/model/client.py",
"license": "Apache License 2.0",
"lines": 234,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
zhayujie/chatgpt-on-wechat:agent/memory/conversation_store.py | """
Conversation history persistence using SQLite.
Design:
- sessions table: per-session metadata (channel_type, last_active, msg_count)
- messages table: individual messages stored as JSON, append-only
- Pruning: age-based only (sessions not updated within N days are deleted)
- Thread-safe via a single in-process lock
Storage path: ~/cow/sessions/conversations.db
"""
from __future__ import annotations
import json
import sqlite3
import threading
import time
from pathlib import Path
from typing import Any, Dict, List, Optional
from common.log import logger
# ---------------------------------------------------------------------------
# Schema
# ---------------------------------------------------------------------------
_DDL = """
CREATE TABLE IF NOT EXISTS sessions (
session_id TEXT PRIMARY KEY,
channel_type TEXT NOT NULL DEFAULT '',
created_at INTEGER NOT NULL,
last_active INTEGER NOT NULL,
msg_count INTEGER NOT NULL DEFAULT 0
);
CREATE TABLE IF NOT EXISTS messages (
id INTEGER PRIMARY KEY AUTOINCREMENT,
session_id TEXT NOT NULL,
seq INTEGER NOT NULL,
role TEXT NOT NULL,
content TEXT NOT NULL,
created_at INTEGER NOT NULL,
UNIQUE (session_id, seq)
);
CREATE INDEX IF NOT EXISTS idx_messages_session
ON messages (session_id, seq);
CREATE INDEX IF NOT EXISTS idx_sessions_last_active
ON sessions (last_active);
"""
# Migration: add channel_type column to existing databases that predate it.
_MIGRATION_ADD_CHANNEL_TYPE = """
ALTER TABLE sessions ADD COLUMN channel_type TEXT NOT NULL DEFAULT '';
"""
DEFAULT_MAX_AGE_DAYS: int = 30
def _is_visible_user_message(content: Any) -> bool:
"""
Return True when a user-role message represents actual user input
(not an internal tool_result injected by the agent loop).
"""
if isinstance(content, str):
return bool(content.strip())
if isinstance(content, list):
return any(
isinstance(b, dict) and b.get("type") == "text"
for b in content
)
return False
def _extract_display_text(content: Any) -> str:
"""
Extract the human-readable text portion from a message content value.
Returns an empty string for tool_use / tool_result blocks.
"""
if isinstance(content, str):
return content.strip()
if isinstance(content, list):
parts = [
b.get("text", "")
for b in content
if isinstance(b, dict) and b.get("type") == "text"
]
return "\n".join(p for p in parts if p).strip()
return ""
def _extract_tool_calls(content: Any) -> List[Dict[str, Any]]:
"""
Extract tool_use blocks from an assistant message content.
Returns a list of {name, arguments} dicts (result filled in later).
"""
if not isinstance(content, list):
return []
return [
{"id": b.get("id", ""), "name": b.get("name", ""), "arguments": b.get("input", {})}
for b in content
if isinstance(b, dict) and b.get("type") == "tool_use"
]
def _extract_tool_results(content: Any) -> Dict[str, str]:
"""
Extract tool_result blocks from a user message, keyed by tool_use_id.
"""
if not isinstance(content, list):
return {}
results = {}
for b in content:
if not isinstance(b, dict) or b.get("type") != "tool_result":
continue
tool_id = b.get("tool_use_id", "")
result_content = b.get("content", "")
if isinstance(result_content, list):
result_content = "\n".join(
rb.get("text", "") for rb in result_content
if isinstance(rb, dict) and rb.get("type") == "text"
)
results[tool_id] = str(result_content)
return results
def _group_into_display_turns(
rows: List[tuple],
) -> List[Dict[str, Any]]:
"""
Convert raw (role, content_json, created_at) DB rows into display turns.
One display turn = one visible user message + one merged assistant reply.
All intermediate assistant messages (those carrying tool_use) and the final
assistant text reply produced for the same user query are collapsed into a
single assistant turn, exactly matching the live SSE rendering where tools
and the final answer appear inside the same bubble.
Grouping rules:
- A visible user message starts a new group.
- tool_result user messages are internal; their content is attached to the
matching tool_use entry via tool_use_id and they never become own turns.
- All assistant messages within a group are merged:
* tool_use blocks → tool_calls list (result filled from tool_results)
* text blocks → last non-empty text becomes the display content
"""
# ------------------------------------------------------------------ #
# Pass 1: split rows into groups, each starting with a visible user msg
# ------------------------------------------------------------------ #
# group = (user_row | None, [subsequent_rows])
# user_row: (content, created_at)
groups: List[tuple] = []
cur_user: Optional[tuple] = None
cur_rest: List[tuple] = []
started = False
for role, raw_content, created_at in rows:
try:
content = json.loads(raw_content)
except Exception:
content = raw_content
if role == "user" and _is_visible_user_message(content):
if started:
groups.append((cur_user, cur_rest))
cur_user = (content, created_at)
cur_rest = []
started = True
else:
cur_rest.append((role, content, created_at))
if started:
groups.append((cur_user, cur_rest))
# ------------------------------------------------------------------ #
# Pass 2: build display turns from each group
# ------------------------------------------------------------------ #
turns: List[Dict[str, Any]] = []
for user_row, rest in groups:
# User turn
if user_row:
content, created_at = user_row
text = _extract_display_text(content)
if text:
turns.append({"role": "user", "content": text, "created_at": created_at})
# Collect all tool_calls and tool_results from the rest of the group
all_tool_calls: List[Dict[str, Any]] = []
tool_results: Dict[str, str] = {}
final_text = ""
final_ts: Optional[int] = None
for role, content, created_at in rest:
if role == "user":
tool_results.update(_extract_tool_results(content))
elif role == "assistant":
tcs = _extract_tool_calls(content)
all_tool_calls.extend(tcs)
t = _extract_display_text(content)
if t:
final_text = t
final_ts = created_at
# Attach tool results to their matching tool_call entries
for tc in all_tool_calls:
tc["result"] = tool_results.get(tc.get("id", ""), "")
if final_text or all_tool_calls:
turns.append({
"role": "assistant",
"content": final_text,
"tool_calls": all_tool_calls,
"created_at": final_ts or (user_row[1] if user_row else 0),
})
return turns
class ConversationStore:
"""
SQLite-backed store for per-session conversation history.
Usage:
store = ConversationStore(db_path)
store.append_messages("user_123", new_messages, channel_type="feishu")
msgs = store.load_messages("user_123", max_turns=30)
"""
def __init__(self, db_path: Path):
self._db_path = db_path
self._lock = threading.Lock()
self._init_db()
# ------------------------------------------------------------------
# Public API
# ------------------------------------------------------------------
def load_messages(
self,
session_id: str,
max_turns: int = 30,
) -> List[Dict[str, Any]]:
"""
Load the most recent messages for a session, for injection into the LLM.
ALL message types (user text, assistant tool_use, tool_result) are returned
in their original JSON form so the LLM can reconstruct the full context.
max_turns is a *visible-turn* count: we count only user messages whose
content is actual user text (not tool_result blocks). This prevents
tool-heavy sessions from exhausting the turn budget prematurely.
Args:
session_id: Unique session identifier.
max_turns: Maximum number of visible user-assistant turns to keep.
Returns:
Chronologically ordered list of message dicts (role, content).
"""
with self._lock:
conn = self._connect()
try:
rows = conn.execute(
"""
SELECT seq, role, content
FROM messages
WHERE session_id = ?
ORDER BY seq DESC
""",
(session_id,),
).fetchall()
finally:
conn.close()
if not rows:
return []
# Walk newest-to-oldest counting *visible* user turns (actual user text,
# not tool_result injections). Record the seq of every visible user
# message so we can find a clean cut point later.
visible_turn_seqs: List[int] = [] # newest first
for seq, role, raw_content in rows:
if role != "user":
continue
try:
content = json.loads(raw_content)
except Exception:
content = raw_content
if _is_visible_user_message(content):
visible_turn_seqs.append(seq)
# Determine the seq of the oldest visible user message we want to keep.
# If the total turns fit within max_turns, keep everything.
if len(visible_turn_seqs) <= max_turns:
cutoff_seq = None # keep all
else:
# The Nth visible user message (0-indexed) is the oldest we keep.
cutoff_seq = visible_turn_seqs[max_turns - 1]
# Build result in chronological order, starting from cutoff.
# IMPORTANT: we start exactly at cutoff_seq (the visible user message),
# never mid-group, so tool_use / tool_result pairs are always complete.
result = []
for seq, role, raw_content in reversed(rows):
if cutoff_seq is not None and seq < cutoff_seq:
continue
try:
content = json.loads(raw_content)
except Exception:
content = raw_content
result.append({"role": role, "content": content})
return result
def append_messages(
self,
session_id: str,
messages: List[Dict[str, Any]],
channel_type: str = "",
) -> None:
"""
Append new messages to a session's history.
Seq numbers continue from the session's current maximum, so
concurrent callers on distinct sessions never collide.
Args:
session_id: Unique session identifier.
messages: List of message dicts to append.
channel_type: Source channel (e.g. "feishu", "web", "wechat").
Only written on session creation; ignored on update.
"""
if not messages:
return
now = int(time.time())
with self._lock:
conn = self._connect()
try:
with conn:
# INSERT OR IGNORE creates the row on first visit;
# the UPDATE always refreshes last_active.
# Avoids ON CONFLICT...DO UPDATE (requires SQLite >= 3.24).
conn.execute(
"""
INSERT OR IGNORE INTO sessions
(session_id, channel_type, created_at, last_active, msg_count)
VALUES (?, ?, ?, ?, 0)
""",
(session_id, channel_type, now, now),
)
conn.execute(
"UPDATE sessions SET last_active = ? WHERE session_id = ?",
(now, session_id),
)
# Determine starting seq for the new batch.
row = conn.execute(
"SELECT COALESCE(MAX(seq), -1) FROM messages WHERE session_id = ?",
(session_id,),
).fetchone()
next_seq = row[0] + 1
for msg in messages:
role = msg.get("role", "")
content = json.dumps(
msg.get("content", ""), ensure_ascii=False
)
conn.execute(
"""
INSERT OR IGNORE INTO messages
(session_id, seq, role, content, created_at)
VALUES (?, ?, ?, ?, ?)
""",
(session_id, next_seq, role, content, now),
)
next_seq += 1
conn.execute(
"""
UPDATE sessions
SET msg_count = (
SELECT COUNT(*) FROM messages WHERE session_id = ?
)
WHERE session_id = ?
""",
(session_id, session_id),
)
finally:
conn.close()
def clear_session(self, session_id: str) -> None:
"""Delete all messages and the session record for a given session_id."""
with self._lock:
conn = self._connect()
try:
with conn:
conn.execute(
"DELETE FROM messages WHERE session_id = ?", (session_id,)
)
conn.execute(
"DELETE FROM sessions WHERE session_id = ?", (session_id,)
)
finally:
conn.close()
def cleanup_old_sessions(self, max_age_days: Optional[int] = None) -> int:
"""
Delete sessions that have not been active within max_age_days.
Args:
max_age_days: Override the default retention period.
Returns:
Number of sessions deleted.
"""
try:
from config import conf
max_age = max_age_days or conf().get(
"conversation_max_age_days", DEFAULT_MAX_AGE_DAYS
)
except Exception:
max_age = max_age_days or DEFAULT_MAX_AGE_DAYS
cutoff = int(time.time()) - max_age * 86400
deleted = 0
with self._lock:
conn = self._connect()
try:
with conn:
stale = conn.execute(
"SELECT session_id FROM sessions WHERE last_active < ?",
(cutoff,),
).fetchall()
for (sid,) in stale:
conn.execute(
"DELETE FROM messages WHERE session_id = ?", (sid,)
)
conn.execute(
"DELETE FROM sessions WHERE session_id = ?", (sid,)
)
deleted += 1
finally:
conn.close()
if deleted:
logger.info(f"[ConversationStore] Pruned {deleted} expired sessions")
return deleted
def load_history_page(
self,
session_id: str,
page: int = 1,
page_size: int = 20,
) -> Dict[str, Any]:
"""
Load a page of conversation history for UI display, grouped into turns.
Each "turn" maps to one of:
- A user message (role="user", content=str)
- An assistant message (role="assistant", content=str,
tool_calls=[{name, arguments, result}] when tools were used)
Internal tool_result user messages are merged into the preceding
assistant entry's tool_calls list and never appear as standalone items.
Pages are numbered from 1 (most recent). Messages within a page are
returned in chronological order.
Returns:
{
"messages": [
{
"role": "user" | "assistant",
"content": str,
"tool_calls": [...], # assistant only, may be []
"created_at": int,
},
...
],
"total": <visible turn count>,
"page": <current page>,
"page_size": <page_size>,
"has_more": bool,
}
"""
page = max(1, page)
with self._lock:
conn = self._connect()
try:
rows = conn.execute(
"""
SELECT role, content, created_at
FROM messages
WHERE session_id = ?
ORDER BY seq ASC
""",
(session_id,),
).fetchall()
finally:
conn.close()
visible = _group_into_display_turns(rows)
total = len(visible)
offset = (page - 1) * page_size
page_items = list(reversed(visible))[offset: offset + page_size]
page_items = list(reversed(page_items))
return {
"messages": page_items,
"total": total,
"page": page,
"page_size": page_size,
"has_more": offset + page_size < total,
}
def get_stats(self) -> Dict[str, Any]:
"""Return basic stats keyed by channel_type, for monitoring."""
with self._lock:
conn = self._connect()
try:
total_sessions = conn.execute(
"SELECT COUNT(*) FROM sessions"
).fetchone()[0]
total_messages = conn.execute(
"SELECT COUNT(*) FROM messages"
).fetchone()[0]
by_channel = conn.execute(
"""
SELECT channel_type, COUNT(*) as cnt
FROM sessions
GROUP BY channel_type
ORDER BY cnt DESC
"""
).fetchall()
return {
"total_sessions": total_sessions,
"total_messages": total_messages,
"by_channel": {row[0] or "unknown": row[1] for row in by_channel},
}
finally:
conn.close()
# ------------------------------------------------------------------
# Internal helpers
# ------------------------------------------------------------------
def _init_db(self) -> None:
self._db_path.parent.mkdir(parents=True, exist_ok=True)
conn = self._connect()
try:
conn.executescript(_DDL)
conn.commit()
self._migrate(conn)
finally:
conn.close()
def _migrate(self, conn: sqlite3.Connection) -> None:
"""Apply incremental schema migrations on existing databases."""
cols = {
row[1]
for row in conn.execute("PRAGMA table_info(sessions)").fetchall()
}
if "channel_type" not in cols:
try:
conn.execute(_MIGRATION_ADD_CHANNEL_TYPE)
conn.commit()
logger.info("[ConversationStore] Migrated: added channel_type column")
except Exception as e:
logger.warning(f"[ConversationStore] Migration failed: {e}")
def _connect(self) -> sqlite3.Connection:
conn = sqlite3.connect(str(self._db_path), timeout=10)
conn.execute("PRAGMA journal_mode=WAL")
conn.execute("PRAGMA synchronous=NORMAL")
return conn
# ---------------------------------------------------------------------------
# Singleton
# ---------------------------------------------------------------------------
_store_instance: Optional[ConversationStore] = None
_store_lock = threading.Lock()
def get_conversation_store() -> ConversationStore:
"""
Return the process-wide ConversationStore singleton.
Reuses the long-term memory database so the project stays with a single
SQLite file: ~/cow/memory/long-term/index.db
The conversation tables (sessions / messages) are separate from the
memory tables (memory_chunks / file_metadata) — no conflicts.
"""
global _store_instance
if _store_instance is not None:
return _store_instance
with _store_lock:
if _store_instance is not None:
return _store_instance
try:
from agent.memory.config import get_default_memory_config
db_path = get_default_memory_config().get_db_path()
except Exception:
from common.utils import expand_path
db_path = Path(expand_path("~/cow")) / "memory" / "long-term" / "index.db"
_store_instance = ConversationStore(db_path)
logger.debug(f"[ConversationStore] Using shared DB at: {db_path}")
return _store_instance
| {
"repo_id": "zhayujie/chatgpt-on-wechat",
"file_path": "agent/memory/conversation_store.py",
"license": "MIT License",
"lines": 533,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
zhayujie/chatgpt-on-wechat:agent/chat/service.py | """
ChatService - Wraps the Agent stream execution to produce CHAT protocol chunks.
Translates agent events (message_update, message_end, tool_execution_end, etc.)
into the CHAT socket protocol format (content chunks with segment_id, tool_calls chunks).
"""
import time
from typing import Callable, Optional
from common.log import logger
class ChatService:
"""
High-level service that runs an Agent for a given query and streams
the results as CHAT protocol chunks via a callback.
Usage:
svc = ChatService(agent_bridge)
svc.run(query, session_id, send_chunk_fn)
"""
def __init__(self, agent_bridge):
"""
:param agent_bridge: AgentBridge instance (manages agent lifecycle)
"""
self.agent_bridge = agent_bridge
def run(self, query: str, session_id: str, send_chunk_fn: Callable[[dict], None]):
"""
Run the agent for *query* and stream results back via *send_chunk_fn*.
The method blocks until the agent finishes. After it returns the SDK
will automatically send the final (streaming=false) message.
:param query: user query text
:param session_id: session identifier for agent isolation
:param send_chunk_fn: callable(chunk_data: dict) to send a streaming chunk
"""
agent = self.agent_bridge.get_agent(session_id=session_id)
if agent is None:
raise RuntimeError("Failed to initialise agent for the session")
# State shared between the event callback and this method
state = _StreamState()
def on_event(event: dict):
"""Translate agent events into CHAT protocol chunks."""
event_type = event.get("type")
data = event.get("data", {})
if event_type == "message_update":
# Incremental text delta
delta = data.get("delta", "")
if delta:
send_chunk_fn({
"chunk_type": "content",
"delta": delta,
"segment_id": state.segment_id,
})
elif event_type == "message_end":
# A content segment finished.
tool_calls = data.get("tool_calls", [])
if tool_calls:
# After tool_calls are executed the next content will be
# a new segment; collect tool results until turn_end.
state.pending_tool_results = []
elif event_type == "tool_execution_end":
tool_name = data.get("tool_name", "")
arguments = data.get("arguments", {})
result = data.get("result", "")
status = data.get("status", "unknown")
execution_time = data.get("execution_time", 0)
elapsed_str = f"{execution_time:.2f}s"
# Serialise result to string if needed
if not isinstance(result, str):
import json
try:
result = json.dumps(result, ensure_ascii=False)
except Exception:
result = str(result)
tool_info = {
"name": tool_name,
"arguments": arguments,
"result": result,
"status": status,
"elapsed": elapsed_str,
}
if state.pending_tool_results is not None:
state.pending_tool_results.append(tool_info)
elif event_type == "turn_end":
has_tool_calls = data.get("has_tool_calls", False)
if has_tool_calls and state.pending_tool_results:
# Flush collected tool results as a single tool_calls chunk
send_chunk_fn({
"chunk_type": "tool_calls",
"tool_calls": state.pending_tool_results,
})
state.pending_tool_results = None
# Next content belongs to a new segment
state.segment_id += 1
# Run the agent with our event callback ---------------------------
logger.info(f"[ChatService] Starting agent run: session={session_id}, query={query[:80]}")
from config import conf
max_context_turns = conf().get("agent_max_context_turns", 20)
# Get full system prompt with skills
full_system_prompt = agent.get_full_system_prompt()
# Create a copy of messages for this execution
with agent.messages_lock:
messages_copy = agent.messages.copy()
original_length = len(agent.messages)
from agent.protocol.agent_stream import AgentStreamExecutor
executor = AgentStreamExecutor(
agent=agent,
model=agent.model,
system_prompt=full_system_prompt,
tools=agent.tools,
max_turns=agent.max_steps,
on_event=on_event,
messages=messages_copy,
max_context_turns=max_context_turns,
)
try:
response = executor.run_stream(query)
except Exception:
# If executor cleared messages (context overflow), sync back
if len(executor.messages) == 0:
with agent.messages_lock:
agent.messages.clear()
logger.info("[ChatService] Cleared agent message history after executor recovery")
raise
# Append only the NEW messages from this execution (thread-safe)
with agent.messages_lock:
new_messages = executor.messages[original_length:]
agent.messages.extend(new_messages)
# Store executor reference for files_to_send access
agent.stream_executor = executor
# Execute post-process tools
agent._execute_post_process_tools()
logger.info(f"[ChatService] Agent run completed: session={session_id}")
class _StreamState:
"""Mutable state shared between the event callback and the run method."""
def __init__(self):
self.segment_id: int = 0
# None means we are not accumulating tool results right now.
# A list means we are in the middle of a tool-execution phase.
self.pending_tool_results: Optional[list] = None
| {
"repo_id": "zhayujie/chatgpt-on-wechat",
"file_path": "agent/chat/service.py",
"license": "MIT License",
"lines": 135,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
zhayujie/chatgpt-on-wechat:agent/memory/service.py | """
Memory service for handling memory query operations via cloud protocol.
Provides a unified interface for listing and reading memory files,
callable from the cloud client (LinkAI) or a future web console.
Memory file layout (under workspace_root):
MEMORY.md -> type: global
memory/2026-02-20.md -> type: daily
"""
import os
from datetime import datetime
from typing import Dict, List, Optional
from pathlib import Path
from common.log import logger
class MemoryService:
"""
High-level service for memory file queries.
Operates directly on the filesystem — no MemoryManager dependency.
"""
def __init__(self, workspace_root: str):
"""
:param workspace_root: Workspace root directory (e.g. ~/cow)
"""
self.workspace_root = workspace_root
self.memory_dir = os.path.join(workspace_root, "memory")
# ------------------------------------------------------------------
# list — paginated file metadata
# ------------------------------------------------------------------
def list_files(self, page: int = 1, page_size: int = 20) -> dict:
"""
List all memory files with metadata (without content).
Returns::
{
"page": 1,
"page_size": 20,
"total": 15,
"list": [
{"filename": "MEMORY.md", "type": "global", "size": 2048, "updated_at": "2026-02-20 10:00:00"},
{"filename": "2026-02-20.md", "type": "daily", "size": 512, "updated_at": "2026-02-20 09:30:00"},
...
]
}
"""
files: List[dict] = []
# 1. Global memory — MEMORY.md in workspace root
global_path = os.path.join(self.workspace_root, "MEMORY.md")
if os.path.isfile(global_path):
files.append(self._file_info(global_path, "MEMORY.md", "global"))
# 2. Daily memory files — memory/*.md (sorted newest first)
if os.path.isdir(self.memory_dir):
daily_files = []
for name in os.listdir(self.memory_dir):
full = os.path.join(self.memory_dir, name)
if os.path.isfile(full) and name.endswith(".md"):
daily_files.append((name, full))
# Sort by filename descending (newest date first)
daily_files.sort(key=lambda x: x[0], reverse=True)
for name, full in daily_files:
files.append(self._file_info(full, name, "daily"))
total = len(files)
# Paginate
start = (page - 1) * page_size
end = start + page_size
page_items = files[start:end]
return {
"page": page,
"page_size": page_size,
"total": total,
"list": page_items,
}
# ------------------------------------------------------------------
# content — read a single file
# ------------------------------------------------------------------
def get_content(self, filename: str) -> dict:
"""
Read the full content of a memory file.
:param filename: File name, e.g. ``MEMORY.md`` or ``2026-02-20.md``
:return: dict with ``filename`` and ``content``
:raises FileNotFoundError: if the file does not exist
"""
path = self._resolve_path(filename)
if not os.path.isfile(path):
raise FileNotFoundError(f"Memory file not found: {filename}")
with open(path, "r", encoding="utf-8") as f:
content = f.read()
return {
"filename": filename,
"content": content,
}
# ------------------------------------------------------------------
# dispatch — single entry point for protocol messages
# ------------------------------------------------------------------
def dispatch(self, action: str, payload: Optional[dict] = None) -> dict:
"""
Dispatch a memory management action.
:param action: ``list`` or ``content``
:param payload: action-specific payload
:return: protocol-compatible response dict
"""
payload = payload or {}
try:
if action == "list":
page = payload.get("page", 1)
page_size = payload.get("page_size", 20)
result_payload = self.list_files(page=page, page_size=page_size)
return {"action": action, "code": 200, "message": "success", "payload": result_payload}
elif action == "content":
filename = payload.get("filename")
if not filename:
return {"action": action, "code": 400, "message": "filename is required", "payload": None}
result_payload = self.get_content(filename)
return {"action": action, "code": 200, "message": "success", "payload": result_payload}
else:
return {"action": action, "code": 400, "message": f"unknown action: {action}", "payload": None}
except FileNotFoundError as e:
return {"action": action, "code": 404, "message": str(e), "payload": None}
except Exception as e:
logger.error(f"[MemoryService] dispatch error: action={action}, error={e}")
return {"action": action, "code": 500, "message": str(e), "payload": None}
# ------------------------------------------------------------------
# internal helpers
# ------------------------------------------------------------------
def _resolve_path(self, filename: str) -> str:
"""
Resolve a filename to its absolute path.
- ``MEMORY.md`` → ``{workspace_root}/MEMORY.md``
- ``2026-02-20.md`` → ``{workspace_root}/memory/2026-02-20.md``
"""
if filename == "MEMORY.md":
return os.path.join(self.workspace_root, filename)
return os.path.join(self.memory_dir, filename)
@staticmethod
def _file_info(path: str, filename: str, file_type: str) -> dict:
"""Build a file metadata dict."""
stat = os.stat(path)
updated_at = datetime.fromtimestamp(stat.st_mtime).strftime("%Y-%m-%d %H:%M:%S")
return {
"filename": filename,
"type": file_type,
"size": stat.st_size,
"updated_at": updated_at,
}
| {
"repo_id": "zhayujie/chatgpt-on-wechat",
"file_path": "agent/memory/service.py",
"license": "MIT License",
"lines": 141,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
zhayujie/chatgpt-on-wechat:agent/skills/service.py | """
Skill service for handling skill CRUD operations.
This service provides a unified interface for managing skills, which can be
called from the cloud control client (LinkAI), the local web console, or any
other management entry point.
"""
import os
import shutil
from typing import Dict, List, Optional
from common.log import logger
from agent.skills.types import Skill, SkillEntry
from agent.skills.manager import SkillManager
try:
import requests
except ImportError:
requests = None
class SkillService:
"""
High-level service for skill lifecycle management.
Wraps SkillManager and provides network-aware operations such as
downloading skill files from remote URLs.
"""
def __init__(self, skill_manager: SkillManager):
"""
:param skill_manager: The SkillManager instance to operate on
"""
self.manager = skill_manager
# ------------------------------------------------------------------
# query
# ------------------------------------------------------------------
def query(self) -> List[dict]:
"""
Query all skills and return a serialisable list.
Reads from skills_config.json (refreshes from disk if needed).
:return: list of skill info dicts
"""
self.manager.refresh_skills()
config = self.manager.get_skills_config()
result = list(config.values())
logger.info(f"[SkillService] query: {len(result)} skills found")
return result
# ------------------------------------------------------------------
# add / install
# ------------------------------------------------------------------
def add(self, payload: dict) -> None:
"""
Add (install) a skill from a remote payload.
The payload follows the socket protocol::
{
"name": "web_search",
"type": "url",
"enabled": true,
"files": [
{"url": "https://...", "path": "README.md"},
{"url": "https://...", "path": "scripts/main.py"}
]
}
Files are downloaded and saved under the custom skills directory
using *name* as the sub-directory.
:param payload: skill add payload from server
"""
name = payload.get("name")
if not name:
raise ValueError("skill name is required")
files = payload.get("files", [])
if not files:
raise ValueError("skill files list is empty")
skill_dir = os.path.join(self.manager.custom_dir, name)
os.makedirs(skill_dir, exist_ok=True)
for file_info in files:
url = file_info.get("url")
rel_path = file_info.get("path")
if not url or not rel_path:
logger.warning(f"[SkillService] add: skip invalid file entry {file_info}")
continue
dest = os.path.join(skill_dir, rel_path)
self._download_file(url, dest)
# Reload to pick up the new skill and sync config
self.manager.refresh_skills()
logger.info(f"[SkillService] add: skill '{name}' installed ({len(files)} files)")
# ------------------------------------------------------------------
# open / close (enable / disable)
# ------------------------------------------------------------------
def open(self, payload: dict) -> None:
"""
Enable a skill by name.
:param payload: {"name": "skill_name"}
"""
name = payload.get("name")
if not name:
raise ValueError("skill name is required")
self.manager.set_skill_enabled(name, enabled=True)
logger.info(f"[SkillService] open: skill '{name}' enabled")
def close(self, payload: dict) -> None:
"""
Disable a skill by name.
:param payload: {"name": "skill_name"}
"""
name = payload.get("name")
if not name:
raise ValueError("skill name is required")
self.manager.set_skill_enabled(name, enabled=False)
logger.info(f"[SkillService] close: skill '{name}' disabled")
# ------------------------------------------------------------------
# delete
# ------------------------------------------------------------------
def delete(self, payload: dict) -> None:
"""
Delete a skill by removing its directory entirely.
:param payload: {"name": "skill_name"}
"""
name = payload.get("name")
if not name:
raise ValueError("skill name is required")
skill_dir = os.path.join(self.manager.custom_dir, name)
if os.path.exists(skill_dir):
shutil.rmtree(skill_dir)
logger.info(f"[SkillService] delete: removed directory {skill_dir}")
else:
logger.warning(f"[SkillService] delete: skill directory not found: {skill_dir}")
# Refresh will remove the deleted skill from config automatically
self.manager.refresh_skills()
logger.info(f"[SkillService] delete: skill '{name}' deleted")
# ------------------------------------------------------------------
# dispatch - single entry point for protocol messages
# ------------------------------------------------------------------
def dispatch(self, action: str, payload: Optional[dict] = None) -> dict:
"""
Dispatch a skill management action and return a protocol-compatible
response dict.
:param action: one of query / add / open / close / delete
:param payload: action-specific payload (may be None for query)
:return: dict with action, code, message, payload
"""
payload = payload or {}
try:
if action == "query":
result_payload = self.query()
return {"action": action, "code": 200, "message": "success", "payload": result_payload}
elif action == "add":
self.add(payload)
elif action == "open":
self.open(payload)
elif action == "close":
self.close(payload)
elif action == "delete":
self.delete(payload)
else:
return {"action": action, "code": 400, "message": f"unknown action: {action}", "payload": None}
return {"action": action, "code": 200, "message": "success", "payload": None}
except Exception as e:
logger.error(f"[SkillService] dispatch error: action={action}, error={e}")
return {"action": action, "code": 500, "message": str(e), "payload": None}
# ------------------------------------------------------------------
# internal helpers
# ------------------------------------------------------------------
@staticmethod
def _download_file(url: str, dest: str):
"""
Download a file from *url* and save to *dest*.
:param url: remote file URL
:param dest: local destination path
"""
if requests is None:
raise RuntimeError("requests library is required for downloading skill files")
dest_dir = os.path.dirname(dest)
if dest_dir:
os.makedirs(dest_dir, exist_ok=True)
resp = requests.get(url, timeout=60)
resp.raise_for_status()
with open(dest, "wb") as f:
f.write(resp.content)
logger.debug(f"[SkillService] downloaded {url} -> {dest}")
| {
"repo_id": "zhayujie/chatgpt-on-wechat",
"file_path": "agent/skills/service.py",
"license": "MIT License",
"lines": 173,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
zhayujie/chatgpt-on-wechat:common/cloud_client.py | """
Cloud management client for connecting to the LinkAI control console.
Handles remote configuration sync, message push, and skill management
via the LinkAI socket protocol.
"""
from bridge.context import Context, ContextType
from bridge.reply import Reply, ReplyType
from common.log import logger
from linkai import LinkAIClient, PushMsg
from config import conf, pconf, plugin_config, available_setting, write_plugin_config, get_root
from plugins import PluginManager
import threading
import time
import json
import os
chat_client: LinkAIClient
class CloudClient(LinkAIClient):
def __init__(self, api_key: str, channel, host: str = ""):
super().__init__(api_key, host)
self.channel = channel
self.client_type = channel.channel_type
self.channel_mgr = None
self._skill_service = None
self._memory_service = None
self._chat_service = None
@property
def skill_service(self):
"""Lazy-init SkillService so it is available once SkillManager exists."""
if self._skill_service is None:
try:
from agent.skills.manager import SkillManager
from agent.skills.service import SkillService
from config import conf
from common.utils import expand_path
workspace_root = expand_path(conf().get("agent_workspace", "~/cow"))
manager = SkillManager(custom_dir=os.path.join(workspace_root, "skills"))
self._skill_service = SkillService(manager)
logger.debug("[CloudClient] SkillService initialised")
except Exception as e:
logger.error(f"[CloudClient] Failed to init SkillService: {e}")
return self._skill_service
@property
def memory_service(self):
"""Lazy-init MemoryService."""
if self._memory_service is None:
try:
from agent.memory.service import MemoryService
from config import conf
from common.utils import expand_path
workspace_root = expand_path(conf().get("agent_workspace", "~/cow"))
self._memory_service = MemoryService(workspace_root)
logger.debug("[CloudClient] MemoryService initialised")
except Exception as e:
logger.error(f"[CloudClient] Failed to init MemoryService: {e}")
return self._memory_service
@property
def chat_service(self):
"""Lazy-init ChatService (requires AgentBridge via Bridge singleton)."""
if self._chat_service is None:
try:
from agent.chat.service import ChatService
from bridge.bridge import Bridge
agent_bridge = Bridge().get_agent_bridge()
self._chat_service = ChatService(agent_bridge)
logger.debug("[CloudClient] ChatService initialised")
except Exception as e:
logger.error(f"[CloudClient] Failed to init ChatService: {e}")
return self._chat_service
# ------------------------------------------------------------------
# message push callback
# ------------------------------------------------------------------
def on_message(self, push_msg: PushMsg):
session_id = push_msg.session_id
msg_content = push_msg.msg_content
logger.info(f"receive msg push, session_id={session_id}, msg_content={msg_content}")
context = Context()
context.type = ContextType.TEXT
context["receiver"] = session_id
context["isgroup"] = push_msg.is_group
self.channel.send(Reply(ReplyType.TEXT, content=msg_content), context)
# ------------------------------------------------------------------
# config callback
# ------------------------------------------------------------------
def on_config(self, config: dict):
if not self.client_id:
return
logger.info(f"[CloudClient] Loading remote config: {config}")
if config.get("enabled") != "Y":
return
local_config = conf()
need_restart_channel = False
for key in config.keys():
if key in available_setting and config.get(key) is not None:
local_config[key] = config.get(key)
# Voice settings
reply_voice_mode = config.get("reply_voice_mode")
if reply_voice_mode:
if reply_voice_mode == "voice_reply_voice":
local_config["voice_reply_voice"] = True
local_config["always_reply_voice"] = False
elif reply_voice_mode == "always_reply_voice":
local_config["always_reply_voice"] = True
local_config["voice_reply_voice"] = True
elif reply_voice_mode == "no_reply_voice":
local_config["always_reply_voice"] = False
local_config["voice_reply_voice"] = False
# Model configuration
if config.get("model"):
local_config["model"] = config.get("model")
# Channel configuration
if config.get("channelType"):
if local_config.get("channel_type") != config.get("channelType"):
local_config["channel_type"] = config.get("channelType")
need_restart_channel = True
# Channel-specific app credentials
current_channel_type = local_config.get("channel_type", "")
if config.get("app_id") is not None:
if current_channel_type == "feishu":
if local_config.get("feishu_app_id") != config.get("app_id"):
local_config["feishu_app_id"] = config.get("app_id")
need_restart_channel = True
elif current_channel_type == "dingtalk":
if local_config.get("dingtalk_client_id") != config.get("app_id"):
local_config["dingtalk_client_id"] = config.get("app_id")
need_restart_channel = True
elif current_channel_type in ("wechatmp", "wechatmp_service"):
if local_config.get("wechatmp_app_id") != config.get("app_id"):
local_config["wechatmp_app_id"] = config.get("app_id")
need_restart_channel = True
elif current_channel_type == "wechatcom_app":
if local_config.get("wechatcomapp_agent_id") != config.get("app_id"):
local_config["wechatcomapp_agent_id"] = config.get("app_id")
need_restart_channel = True
if config.get("app_secret"):
if current_channel_type == "feishu":
if local_config.get("feishu_app_secret") != config.get("app_secret"):
local_config["feishu_app_secret"] = config.get("app_secret")
need_restart_channel = True
elif current_channel_type == "dingtalk":
if local_config.get("dingtalk_client_secret") != config.get("app_secret"):
local_config["dingtalk_client_secret"] = config.get("app_secret")
need_restart_channel = True
elif current_channel_type in ("wechatmp", "wechatmp_service"):
if local_config.get("wechatmp_app_secret") != config.get("app_secret"):
local_config["wechatmp_app_secret"] = config.get("app_secret")
need_restart_channel = True
elif current_channel_type == "wechatcom_app":
if local_config.get("wechatcomapp_secret") != config.get("app_secret"):
local_config["wechatcomapp_secret"] = config.get("app_secret")
need_restart_channel = True
if config.get("admin_password"):
if not pconf("Godcmd"):
write_plugin_config({"Godcmd": {"password": config.get("admin_password"), "admin_users": []}})
else:
pconf("Godcmd")["password"] = config.get("admin_password")
PluginManager().instances["GODCMD"].reload()
if config.get("group_app_map") and pconf("linkai"):
local_group_map = {}
for mapping in config.get("group_app_map"):
local_group_map[mapping.get("group_name")] = mapping.get("app_code")
pconf("linkai")["group_app_map"] = local_group_map
PluginManager().instances["LINKAI"].reload()
if config.get("text_to_image") and config.get("text_to_image") == "midjourney" and pconf("linkai"):
if pconf("linkai")["midjourney"]:
pconf("linkai")["midjourney"]["enabled"] = True
pconf("linkai")["midjourney"]["use_image_create_prefix"] = True
elif config.get("text_to_image") and config.get("text_to_image") in ["dall-e-2", "dall-e-3"]:
if pconf("linkai")["midjourney"]:
pconf("linkai")["midjourney"]["use_image_create_prefix"] = False
# Save configuration to config.json file
self._save_config_to_file(local_config)
if need_restart_channel:
self._restart_channel(local_config.get("channel_type", ""))
# ------------------------------------------------------------------
# skill callback
# ------------------------------------------------------------------
def on_skill(self, data: dict) -> dict:
"""
Handle SKILL messages from the cloud console.
Delegates to SkillService.dispatch for the actual operations.
:param data: message data with 'action', 'clientId', 'payload'
:return: response dict
"""
action = data.get("action", "")
payload = data.get("payload")
logger.info(f"[CloudClient] on_skill: action={action}")
svc = self.skill_service
if svc is None:
return {"action": action, "code": 500, "message": "SkillService not available", "payload": None}
return svc.dispatch(action, payload)
# ------------------------------------------------------------------
# memory callback
# ------------------------------------------------------------------
def on_memory(self, data: dict) -> dict:
"""
Handle MEMORY messages from the cloud console.
Delegates to MemoryService.dispatch for the actual operations.
:param data: message data with 'action', 'clientId', 'payload'
:return: response dict
"""
action = data.get("action", "")
payload = data.get("payload")
logger.info(f"[CloudClient] on_memory: action={action}")
svc = self.memory_service
if svc is None:
return {"action": action, "code": 500, "message": "MemoryService not available", "payload": None}
return svc.dispatch(action, payload)
# ------------------------------------------------------------------
# chat callback
# ------------------------------------------------------------------
def on_chat(self, data: dict, send_chunk_fn):
"""
Handle CHAT messages from the cloud console.
Runs the agent in streaming mode and sends chunks back via send_chunk_fn.
:param data: message data with 'action' and 'payload' (query, session_id)
:param send_chunk_fn: callable(chunk_data: dict) to send one streaming chunk
"""
payload = data.get("payload", {})
query = payload.get("query", "")
session_id = payload.get("session_id", "cloud_console")
logger.info(f"[CloudClient] on_chat: session={session_id}, query={query[:80]}")
svc = self.chat_service
if svc is None:
raise RuntimeError("ChatService not available")
svc.run(query=query, session_id=session_id, send_chunk_fn=send_chunk_fn)
# ------------------------------------------------------------------
# channel restart helpers
# ------------------------------------------------------------------
def _restart_channel(self, new_channel_type: str):
"""
Restart the channel via ChannelManager when channel type changes.
"""
if self.channel_mgr:
logger.info(f"[CloudClient] Restarting channel to '{new_channel_type}'...")
threading.Thread(target=self._do_restart_channel, args=(self.channel_mgr, new_channel_type), daemon=True).start()
else:
logger.warning("[CloudClient] ChannelManager not available, please restart the application manually")
def _do_restart_channel(self, mgr, new_channel_type: str):
"""
Perform the channel restart in a separate thread to avoid blocking the config callback.
"""
try:
mgr.restart(new_channel_type)
# Update the client's channel reference
if mgr.channel:
self.channel = mgr.channel
self.client_type = mgr.channel.channel_type
logger.info(f"[CloudClient] Channel reference updated to '{new_channel_type}'")
except Exception as e:
logger.error(f"[CloudClient] Channel restart failed: {e}")
# ------------------------------------------------------------------
# config persistence
# ------------------------------------------------------------------
def _save_config_to_file(self, local_config: dict):
"""
Save configuration to config.json file.
"""
try:
config_path = os.path.join(get_root(), "config.json")
if not os.path.exists(config_path):
logger.warning(f"[CloudClient] config.json not found at {config_path}, skip saving")
return
with open(config_path, "r", encoding="utf-8") as f:
file_config = json.load(f)
file_config.update(dict(local_config))
with open(config_path, "w", encoding="utf-8") as f:
json.dump(file_config, f, indent=4, ensure_ascii=False)
logger.info("[CloudClient] Configuration saved to config.json successfully")
except Exception as e:
logger.error(f"[CloudClient] Failed to save configuration to config.json: {e}")
def start(channel, channel_mgr=None):
global chat_client
chat_client = CloudClient(api_key=conf().get("linkai_api_key"), host=conf().get("cloud_host", ""), channel=channel)
chat_client.channel_mgr = channel_mgr
chat_client.config = _build_config()
chat_client.start()
time.sleep(1.5)
if chat_client.client_id:
logger.info("[CloudClient] Console: https://link-ai.tech/console/clients")
def _build_config():
local_conf = conf()
config = {
"linkai_app_code": local_conf.get("linkai_app_code"),
"single_chat_prefix": local_conf.get("single_chat_prefix"),
"single_chat_reply_prefix": local_conf.get("single_chat_reply_prefix"),
"single_chat_reply_suffix": local_conf.get("single_chat_reply_suffix"),
"group_chat_prefix": local_conf.get("group_chat_prefix"),
"group_chat_reply_prefix": local_conf.get("group_chat_reply_prefix"),
"group_chat_reply_suffix": local_conf.get("group_chat_reply_suffix"),
"group_name_white_list": local_conf.get("group_name_white_list"),
"nick_name_black_list": local_conf.get("nick_name_black_list"),
"speech_recognition": "Y" if local_conf.get("speech_recognition") else "N",
"text_to_image": local_conf.get("text_to_image"),
"image_create_prefix": local_conf.get("image_create_prefix"),
"model": local_conf.get("model"),
"agent_max_context_turns": local_conf.get("agent_max_context_turns"),
"agent_max_context_tokens": local_conf.get("agent_max_context_tokens"),
"agent_max_steps": local_conf.get("agent_max_steps"),
"channelType": local_conf.get("channel_type"),
}
if local_conf.get("always_reply_voice"):
config["reply_voice_mode"] = "always_reply_voice"
elif local_conf.get("voice_reply_voice"):
config["reply_voice_mode"] = "voice_reply_voice"
if pconf("linkai"):
config["group_app_map"] = pconf("linkai").get("group_app_map")
if plugin_config.get("Godcmd"):
config["admin_password"] = plugin_config.get("Godcmd").get("password")
# Add channel-specific app credentials
current_channel_type = local_conf.get("channel_type", "")
if current_channel_type == "feishu":
config["app_id"] = local_conf.get("feishu_app_id")
config["app_secret"] = local_conf.get("feishu_app_secret")
elif current_channel_type == "dingtalk":
config["app_id"] = local_conf.get("dingtalk_client_id")
config["app_secret"] = local_conf.get("dingtalk_client_secret")
elif current_channel_type in ("wechatmp", "wechatmp_service"):
config["app_id"] = local_conf.get("wechatmp_app_id")
config["app_secret"] = local_conf.get("wechatmp_app_secret")
elif current_channel_type == "wechatcom_app":
config["app_id"] = local_conf.get("wechatcomapp_agent_id")
config["app_secret"] = local_conf.get("wechatcomapp_secret")
return config
| {
"repo_id": "zhayujie/chatgpt-on-wechat",
"file_path": "common/cloud_client.py",
"license": "MIT License",
"lines": 323,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
zhayujie/chatgpt-on-wechat:models/doubao/doubao_bot.py | # encoding:utf-8
import json
import time
import requests
from models.bot import Bot
from models.session_manager import SessionManager
from bridge.context import ContextType
from bridge.reply import Reply, ReplyType
from common.log import logger
from config import conf, load_config
from .doubao_session import DoubaoSession
# Doubao (火山方舟 / Volcengine Ark) API Bot
class DoubaoBot(Bot):
def __init__(self):
super().__init__()
self.sessions = SessionManager(DoubaoSession, model=conf().get("model") or "doubao-seed-2-0-pro-260215")
model = conf().get("model") or "doubao-seed-2-0-pro-260215"
self.args = {
"model": model,
"temperature": conf().get("temperature", 0.8),
"top_p": conf().get("top_p", 1.0),
}
self.api_key = conf().get("ark_api_key")
self.base_url = conf().get("ark_base_url", "https://ark.cn-beijing.volces.com/api/v3")
# Ensure base_url does not end with /chat/completions
if self.base_url.endswith("/chat/completions"):
self.base_url = self.base_url.rsplit("/chat/completions", 1)[0]
if self.base_url.endswith("/"):
self.base_url = self.base_url.rstrip("/")
def reply(self, query, context=None):
# acquire reply content
if context.type == ContextType.TEXT:
logger.info("[DOUBAO] query={}".format(query))
session_id = context["session_id"]
reply = None
clear_memory_commands = conf().get("clear_memory_commands", ["#清除记忆"])
if query in clear_memory_commands:
self.sessions.clear_session(session_id)
reply = Reply(ReplyType.INFO, "记忆已清除")
elif query == "#清除所有":
self.sessions.clear_all_session()
reply = Reply(ReplyType.INFO, "所有人记忆已清除")
elif query == "#更新配置":
load_config()
reply = Reply(ReplyType.INFO, "配置已更新")
if reply:
return reply
session = self.sessions.session_query(query, session_id)
logger.debug("[DOUBAO] session query={}".format(session.messages))
model = context.get("doubao_model")
new_args = self.args.copy()
if model:
new_args["model"] = model
reply_content = self.reply_text(session, args=new_args)
logger.debug(
"[DOUBAO] new_query={}, session_id={}, reply_cont={}, completion_tokens={}".format(
session.messages,
session_id,
reply_content["content"],
reply_content["completion_tokens"],
)
)
if reply_content["completion_tokens"] == 0 and len(reply_content["content"]) > 0:
reply = Reply(ReplyType.ERROR, reply_content["content"])
elif reply_content["completion_tokens"] > 0:
self.sessions.session_reply(reply_content["content"], session_id, reply_content["total_tokens"])
reply = Reply(ReplyType.TEXT, reply_content["content"])
else:
reply = Reply(ReplyType.ERROR, reply_content["content"])
logger.debug("[DOUBAO] reply {} used 0 tokens.".format(reply_content))
return reply
else:
reply = Reply(ReplyType.ERROR, "Bot不支持处理{}类型的消息".format(context.type))
return reply
def reply_text(self, session: DoubaoSession, args=None, retry_count: int = 0) -> dict:
"""
Call Doubao chat completion API to get the answer
:param session: a conversation session
:param args: model args
:param retry_count: retry count
:return: {}
"""
try:
headers = {
"Content-Type": "application/json",
"Authorization": "Bearer " + self.api_key
}
body = args.copy()
body["messages"] = session.messages
# Disable thinking by default for better efficiency
body["thinking"] = {"type": "disabled"}
res = requests.post(
f"{self.base_url}/chat/completions",
headers=headers,
json=body
)
if res.status_code == 200:
response = res.json()
return {
"total_tokens": response["usage"]["total_tokens"],
"completion_tokens": response["usage"]["completion_tokens"],
"content": response["choices"][0]["message"]["content"]
}
else:
response = res.json()
error = response.get("error", {})
logger.error(f"[DOUBAO] chat failed, status_code={res.status_code}, "
f"msg={error.get('message')}, type={error.get('type')}")
result = {"completion_tokens": 0, "content": "提问太快啦,请休息一下再问我吧"}
need_retry = False
if res.status_code >= 500:
logger.warn(f"[DOUBAO] do retry, times={retry_count}")
need_retry = retry_count < 2
elif res.status_code == 401:
result["content"] = "授权失败,请检查API Key是否正确"
elif res.status_code == 429:
result["content"] = "请求过于频繁,请稍后再试"
need_retry = retry_count < 2
else:
need_retry = False
if need_retry:
time.sleep(3)
return self.reply_text(session, args, retry_count + 1)
else:
return result
except Exception as e:
logger.exception(e)
need_retry = retry_count < 2
result = {"completion_tokens": 0, "content": "我现在有点累了,等会再来吧"}
if need_retry:
return self.reply_text(session, args, retry_count + 1)
else:
return result
# ==================== Agent mode support ====================
def call_with_tools(self, messages, tools=None, stream: bool = False, **kwargs):
"""
Call Doubao API with tool support for agent integration.
This method handles:
1. Format conversion (Claude format -> OpenAI format)
2. System prompt injection
3. Streaming SSE response with tool_calls
4. Thinking (reasoning) is disabled by default for efficiency
Args:
messages: List of messages (may be in Claude format from agent)
tools: List of tool definitions (may be in Claude format from agent)
stream: Whether to use streaming
**kwargs: Additional parameters (max_tokens, temperature, system, model, etc.)
Returns:
Generator yielding OpenAI-format chunks (for streaming)
"""
try:
# Convert messages from Claude format to OpenAI format
converted_messages = self._convert_messages_to_openai_format(messages)
# Inject system prompt if provided
system_prompt = kwargs.pop("system", None)
if system_prompt:
if not converted_messages or converted_messages[0].get("role") != "system":
converted_messages.insert(0, {"role": "system", "content": system_prompt})
else:
converted_messages[0] = {"role": "system", "content": system_prompt}
# Convert tools from Claude format to OpenAI format
converted_tools = None
if tools:
converted_tools = self._convert_tools_to_openai_format(tools)
# Resolve model / temperature
model = kwargs.pop("model", None) or self.args["model"]
max_tokens = kwargs.pop("max_tokens", None)
# Don't pop temperature, just ignore it - let API use default
kwargs.pop("temperature", None)
# Build request body (omit temperature, let the API use its own default)
request_body = {
"model": model,
"messages": converted_messages,
"stream": stream,
}
if max_tokens is not None:
request_body["max_tokens"] = max_tokens
# Add tools
if converted_tools:
request_body["tools"] = converted_tools
request_body["tool_choice"] = "auto"
# Explicitly disable thinking to avoid reasoning_content issues
# in multi-turn tool calls
request_body["thinking"] = {"type": "disabled"}
logger.debug(f"[DOUBAO] API call: model={model}, "
f"tools={len(converted_tools) if converted_tools else 0}, stream={stream}")
if stream:
return self._handle_stream_response(request_body)
else:
return self._handle_sync_response(request_body)
except Exception as e:
logger.error(f"[DOUBAO] call_with_tools error: {e}")
import traceback
logger.error(traceback.format_exc())
def error_generator():
yield {"error": True, "message": str(e), "status_code": 500}
return error_generator()
# -------------------- streaming --------------------
def _handle_stream_response(self, request_body: dict):
"""Handle streaming SSE response from Doubao API and yield OpenAI-format chunks."""
try:
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {self.api_key}"
}
url = f"{self.base_url}/chat/completions"
response = requests.post(url, headers=headers, json=request_body, stream=True, timeout=120)
if response.status_code != 200:
error_msg = response.text
logger.error(f"[DOUBAO] API error: status={response.status_code}, msg={error_msg}")
yield {"error": True, "message": error_msg, "status_code": response.status_code}
return
current_tool_calls = {}
finish_reason = None
for line in response.iter_lines():
if not line:
continue
line = line.decode("utf-8")
if not line.startswith("data: "):
continue
data_str = line[6:] # Remove "data: " prefix
if data_str.strip() == "[DONE]":
break
try:
chunk = json.loads(data_str)
except json.JSONDecodeError as e:
logger.warning(f"[DOUBAO] JSON decode error: {e}, data: {data_str[:200]}")
continue
# Check for error in chunk
if chunk.get("error"):
error_data = chunk["error"]
error_msg = error_data.get("message", "Unknown error") if isinstance(error_data, dict) else str(error_data)
logger.error(f"[DOUBAO] stream error: {error_msg}")
yield {"error": True, "message": error_msg, "status_code": 500}
return
if not chunk.get("choices"):
continue
choice = chunk["choices"][0]
delta = choice.get("delta", {})
# Skip reasoning_content (thinking) - don't log or forward
if delta.get("reasoning_content"):
continue
# Handle text content
if "content" in delta and delta["content"]:
yield {
"choices": [{
"index": 0,
"delta": {
"role": "assistant",
"content": delta["content"]
}
}]
}
# Handle tool_calls (streamed incrementally)
if "tool_calls" in delta:
for tool_call_chunk in delta["tool_calls"]:
index = tool_call_chunk.get("index", 0)
if index not in current_tool_calls:
current_tool_calls[index] = {
"id": tool_call_chunk.get("id", ""),
"type": "tool_use",
"name": tool_call_chunk.get("function", {}).get("name", ""),
"input": ""
}
# Accumulate arguments
if "function" in tool_call_chunk and "arguments" in tool_call_chunk["function"]:
current_tool_calls[index]["input"] += tool_call_chunk["function"]["arguments"]
# Yield OpenAI-format tool call delta
yield {
"choices": [{
"index": 0,
"delta": {
"tool_calls": [tool_call_chunk]
}
}]
}
# Capture finish_reason
if choice.get("finish_reason"):
finish_reason = choice["finish_reason"]
# Final chunk with finish_reason
yield {
"choices": [{
"index": 0,
"delta": {},
"finish_reason": finish_reason
}]
}
except requests.exceptions.Timeout:
logger.error("[DOUBAO] Request timeout")
yield {"error": True, "message": "Request timeout", "status_code": 500}
except Exception as e:
logger.error(f"[DOUBAO] stream response error: {e}")
import traceback
logger.error(traceback.format_exc())
yield {"error": True, "message": str(e), "status_code": 500}
# -------------------- sync --------------------
def _handle_sync_response(self, request_body: dict):
"""Handle synchronous API response and yield a single result dict."""
try:
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {self.api_key}"
}
request_body.pop("stream", None)
url = f"{self.base_url}/chat/completions"
response = requests.post(url, headers=headers, json=request_body, timeout=120)
if response.status_code != 200:
error_msg = response.text
logger.error(f"[DOUBAO] API error: status={response.status_code}, msg={error_msg}")
yield {"error": True, "message": error_msg, "status_code": response.status_code}
return
result = response.json()
message = result["choices"][0]["message"]
finish_reason = result["choices"][0]["finish_reason"]
response_data = {"role": "assistant", "content": []}
# Add text content
if message.get("content"):
response_data["content"].append({
"type": "text",
"text": message["content"]
})
# Add tool calls
if message.get("tool_calls"):
for tool_call in message["tool_calls"]:
response_data["content"].append({
"type": "tool_use",
"id": tool_call["id"],
"name": tool_call["function"]["name"],
"input": json.loads(tool_call["function"]["arguments"])
})
# Map finish_reason
if finish_reason == "tool_calls":
response_data["stop_reason"] = "tool_use"
elif finish_reason == "stop":
response_data["stop_reason"] = "end_turn"
else:
response_data["stop_reason"] = finish_reason
yield response_data
except requests.exceptions.Timeout:
logger.error("[DOUBAO] Request timeout")
yield {"error": True, "message": "Request timeout", "status_code": 500}
except Exception as e:
logger.error(f"[DOUBAO] sync response error: {e}")
import traceback
logger.error(traceback.format_exc())
yield {"error": True, "message": str(e), "status_code": 500}
# -------------------- format conversion --------------------
def _convert_messages_to_openai_format(self, messages):
"""
Convert messages from Claude format to OpenAI format.
Claude format uses content blocks: tool_use / tool_result / text
OpenAI format uses tool_calls in assistant, role=tool for results
"""
if not messages:
return []
converted = []
for msg in messages:
role = msg.get("role")
content = msg.get("content")
# Already a simple string - pass through
if isinstance(content, str):
converted.append(msg)
continue
if not isinstance(content, list):
converted.append(msg)
continue
if role == "user":
text_parts = []
tool_results = []
for block in content:
if not isinstance(block, dict):
continue
if block.get("type") == "text":
text_parts.append(block.get("text", ""))
elif block.get("type") == "tool_result":
tool_call_id = block.get("tool_use_id") or ""
result_content = block.get("content", "")
if not isinstance(result_content, str):
result_content = json.dumps(result_content, ensure_ascii=False)
tool_results.append({
"role": "tool",
"tool_call_id": tool_call_id,
"content": result_content
})
# Tool results first (must come right after assistant with tool_calls)
for tr in tool_results:
converted.append(tr)
if text_parts:
converted.append({"role": "user", "content": "\n".join(text_parts)})
elif role == "assistant":
openai_msg = {"role": "assistant"}
text_parts = []
tool_calls = []
for block in content:
if not isinstance(block, dict):
continue
if block.get("type") == "text":
text_parts.append(block.get("text", ""))
elif block.get("type") == "tool_use":
tool_calls.append({
"id": block.get("id"),
"type": "function",
"function": {
"name": block.get("name"),
"arguments": json.dumps(block.get("input", {}))
}
})
if text_parts:
openai_msg["content"] = "\n".join(text_parts)
elif not tool_calls:
openai_msg["content"] = ""
if tool_calls:
openai_msg["tool_calls"] = tool_calls
if not text_parts:
openai_msg["content"] = None
converted.append(openai_msg)
else:
converted.append(msg)
return converted
def _convert_tools_to_openai_format(self, tools):
"""
Convert tools from Claude format to OpenAI format.
Claude: {name, description, input_schema}
OpenAI: {type: "function", function: {name, description, parameters}}
"""
if not tools:
return None
converted = []
for tool in tools:
# Already in OpenAI format
if "type" in tool and tool["type"] == "function":
converted.append(tool)
else:
converted.append({
"type": "function",
"function": {
"name": tool.get("name"),
"description": tool.get("description"),
"parameters": tool.get("input_schema", {})
}
})
return converted
| {
"repo_id": "zhayujie/chatgpt-on-wechat",
"file_path": "models/doubao/doubao_bot.py",
"license": "MIT License",
"lines": 442,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
zhayujie/chatgpt-on-wechat:models/doubao/doubao_session.py | from models.session_manager import Session
from common.log import logger
class DoubaoSession(Session):
def __init__(self, session_id, system_prompt=None, model="doubao-seed-2-0-pro-260215"):
super().__init__(session_id, system_prompt)
self.model = model
self.reset()
def discard_exceeding(self, max_tokens, cur_tokens=None):
precise = True
try:
cur_tokens = self.calc_tokens()
except Exception as e:
precise = False
if cur_tokens is None:
raise e
logger.debug("Exception when counting tokens precisely for query: {}".format(e))
while cur_tokens > max_tokens:
if len(self.messages) > 2:
self.messages.pop(1)
elif len(self.messages) == 2 and self.messages[1]["role"] == "assistant":
self.messages.pop(1)
if precise:
cur_tokens = self.calc_tokens()
else:
cur_tokens = cur_tokens - max_tokens
break
elif len(self.messages) == 2 and self.messages[1]["role"] == "user":
logger.warn("user message exceed max_tokens. total_tokens={}".format(cur_tokens))
break
else:
logger.debug("max_tokens={}, total_tokens={}, len(messages)={}".format(
max_tokens, cur_tokens, len(self.messages)))
break
if precise:
cur_tokens = self.calc_tokens()
else:
cur_tokens = cur_tokens - max_tokens
return cur_tokens
def calc_tokens(self):
return num_tokens_from_messages(self.messages, self.model)
def num_tokens_from_messages(messages, model):
tokens = 0
for msg in messages:
tokens += len(msg["content"])
return tokens
| {
"repo_id": "zhayujie/chatgpt-on-wechat",
"file_path": "models/doubao/doubao_session.py",
"license": "MIT License",
"lines": 45,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
zhayujie/chatgpt-on-wechat:agent/tools/web_search/web_search.py | """
Web Search tool - Search the web using Bocha or LinkAI search API.
Supports two backends with unified response format:
1. Bocha Search (primary, requires BOCHA_API_KEY)
2. LinkAI Search (fallback, requires LINKAI_API_KEY)
"""
import os
import json
from typing import Dict, Any, Optional
import requests
from agent.tools.base_tool import BaseTool, ToolResult
from common.log import logger
# Default timeout for API requests (seconds)
DEFAULT_TIMEOUT = 30
class WebSearch(BaseTool):
"""Tool for searching the web using Bocha or LinkAI search API"""
name: str = "web_search"
description: str = (
"Search the web for current information, news, research topics, or any real-time data. "
"Returns web page titles, URLs, snippets, and optional summaries. "
"Use this when the user asks about recent events, needs fact-checking, or wants up-to-date information."
)
params: dict = {
"type": "object",
"properties": {
"query": {
"type": "string",
"description": "Search query string"
},
"count": {
"type": "integer",
"description": "Number of results to return (1-50, default: 10)"
},
"freshness": {
"type": "string",
"description": (
"Time range filter. Options: "
"'noLimit' (default), 'oneDay', 'oneWeek', 'oneMonth', 'oneYear', "
"or date range like '2025-01-01..2025-02-01'"
)
},
"summary": {
"type": "boolean",
"description": "Whether to include text summary for each result (default: false)"
}
},
"required": ["query"]
}
def __init__(self, config: dict = None):
self.config = config or {}
self._backend = None # Will be resolved on first execute
@staticmethod
def is_available() -> bool:
"""Check if web search is available (at least one API key is configured)"""
return bool(os.environ.get("BOCHA_API_KEY") or os.environ.get("LINKAI_API_KEY"))
def _resolve_backend(self) -> Optional[str]:
"""
Determine which search backend to use.
Priority: Bocha > LinkAI
:return: 'bocha', 'linkai', or None
"""
if os.environ.get("BOCHA_API_KEY"):
return "bocha"
if os.environ.get("LINKAI_API_KEY"):
return "linkai"
return None
def execute(self, args: Dict[str, Any]) -> ToolResult:
"""
Execute web search
:param args: Search parameters (query, count, freshness, summary)
:return: Search results
"""
query = args.get("query", "").strip()
if not query:
return ToolResult.fail("Error: 'query' parameter is required")
count = args.get("count", 10)
freshness = args.get("freshness", "noLimit")
summary = args.get("summary", False)
# Validate count
if not isinstance(count, int) or count < 1 or count > 50:
count = 10
# Resolve backend
backend = self._resolve_backend()
if not backend:
return ToolResult.fail(
"Error: No search API key configured. "
"Please set BOCHA_API_KEY or LINKAI_API_KEY using env_config tool.\n"
" - Bocha Search: https://open.bocha.cn\n"
" - LinkAI Search: https://link-ai.tech"
)
try:
if backend == "bocha":
return self._search_bocha(query, count, freshness, summary)
else:
return self._search_linkai(query, count, freshness)
except requests.Timeout:
return ToolResult.fail(f"Error: Search request timed out after {DEFAULT_TIMEOUT}s")
except requests.ConnectionError:
return ToolResult.fail("Error: Failed to connect to search API")
except Exception as e:
logger.error(f"[WebSearch] Unexpected error: {e}", exc_info=True)
return ToolResult.fail(f"Error: Search failed - {str(e)}")
def _search_bocha(self, query: str, count: int, freshness: str, summary: bool) -> ToolResult:
"""
Search using Bocha API
:param query: Search query
:param count: Number of results
:param freshness: Time range filter
:param summary: Whether to include summary
:return: Formatted search results
"""
api_key = os.environ.get("BOCHA_API_KEY", "")
url = "https://api.bocha.cn/v1/web-search"
headers = {
"Authorization": f"Bearer {api_key}",
"Content-Type": "application/json",
"Accept": "application/json"
}
payload = {
"query": query,
"count": count,
"freshness": freshness,
"summary": summary
}
logger.debug(f"[WebSearch] Bocha search: query='{query}', count={count}")
response = requests.post(url, headers=headers, json=payload, timeout=DEFAULT_TIMEOUT)
if response.status_code == 401:
return ToolResult.fail("Error: Invalid BOCHA_API_KEY. Please check your API key.")
if response.status_code == 403:
return ToolResult.fail("Error: Bocha API - insufficient balance. Please top up at https://open.bocha.cn")
if response.status_code == 429:
return ToolResult.fail("Error: Bocha API rate limit reached. Please try again later.")
if response.status_code != 200:
return ToolResult.fail(f"Error: Bocha API returned HTTP {response.status_code}")
data = response.json()
# Check API-level error code
api_code = data.get("code")
if api_code is not None and api_code != 200:
msg = data.get("msg") or "Unknown error"
return ToolResult.fail(f"Error: Bocha API error (code={api_code}): {msg}")
# Extract and format results
return self._format_bocha_results(data, query)
def _format_bocha_results(self, data: dict, query: str) -> ToolResult:
"""
Format Bocha API response into unified result structure
:param data: Raw API response
:param query: Original query
:return: Formatted ToolResult
"""
search_data = data.get("data", {})
web_pages = search_data.get("webPages", {})
pages = web_pages.get("value", [])
if not pages:
return ToolResult.success({
"query": query,
"backend": "bocha",
"total": 0,
"results": [],
"message": "No results found"
})
results = []
for page in pages:
result = {
"title": page.get("name", ""),
"url": page.get("url", ""),
"snippet": page.get("snippet", ""),
"siteName": page.get("siteName", ""),
"datePublished": page.get("datePublished") or page.get("dateLastCrawled", ""),
}
# Include summary only if present
if page.get("summary"):
result["summary"] = page["summary"]
results.append(result)
total = web_pages.get("totalEstimatedMatches", len(results))
return ToolResult.success({
"query": query,
"backend": "bocha",
"total": total,
"count": len(results),
"results": results
})
def _search_linkai(self, query: str, count: int, freshness: str) -> ToolResult:
"""
Search using LinkAI plugin API
:param query: Search query
:param count: Number of results
:param freshness: Time range filter
:return: Formatted search results
"""
api_key = os.environ.get("LINKAI_API_KEY", "")
url = "https://api.link-ai.tech/v1/plugin/execute"
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {api_key}"
}
payload = {
"code": "web-search",
"args": {
"query": query,
"count": count,
"freshness": freshness
}
}
logger.debug(f"[WebSearch] LinkAI search: query='{query}', count={count}")
response = requests.post(url, headers=headers, json=payload, timeout=DEFAULT_TIMEOUT)
if response.status_code == 401:
return ToolResult.fail("Error: Invalid LINKAI_API_KEY. Please check your API key.")
if response.status_code != 200:
return ToolResult.fail(f"Error: LinkAI API returned HTTP {response.status_code}")
data = response.json()
if not data.get("success"):
msg = data.get("message") or "Unknown error"
return ToolResult.fail(f"Error: LinkAI search failed: {msg}")
return self._format_linkai_results(data, query)
def _format_linkai_results(self, data: dict, query: str) -> ToolResult:
"""
Format LinkAI API response into unified result structure.
LinkAI returns the search data in data.data field, which follows
the same Bing-compatible format as Bocha.
:param data: Raw API response
:param query: Original query
:return: Formatted ToolResult
"""
raw_data = data.get("data", "")
# LinkAI may return data as a JSON string
if isinstance(raw_data, str):
try:
raw_data = json.loads(raw_data)
except (json.JSONDecodeError, TypeError):
# If data is plain text, return it as a single result
return ToolResult.success({
"query": query,
"backend": "linkai",
"total": 1,
"count": 1,
"results": [{"content": raw_data}]
})
# If the response follows Bing-compatible structure
if isinstance(raw_data, dict):
web_pages = raw_data.get("webPages", {})
pages = web_pages.get("value", [])
if pages:
results = []
for page in pages:
result = {
"title": page.get("name", ""),
"url": page.get("url", ""),
"snippet": page.get("snippet", ""),
"siteName": page.get("siteName", ""),
"datePublished": page.get("datePublished") or page.get("dateLastCrawled", ""),
}
if page.get("summary"):
result["summary"] = page["summary"]
results.append(result)
total = web_pages.get("totalEstimatedMatches", len(results))
return ToolResult.success({
"query": query,
"backend": "linkai",
"total": total,
"count": len(results),
"results": results
})
# Fallback: return raw data
return ToolResult.success({
"query": query,
"backend": "linkai",
"total": 1,
"count": 1,
"results": [{"content": str(raw_data)}]
})
| {
"repo_id": "zhayujie/chatgpt-on-wechat",
"file_path": "agent/tools/web_search/web_search.py",
"license": "MIT License",
"lines": 270,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
zhayujie/chatgpt-on-wechat:bridge/agent_event_handler.py | """
Agent Event Handler - Handles agent events and thinking process output
"""
from common.log import logger
class AgentEventHandler:
"""
Handles agent events and optionally sends intermediate messages to channel
"""
def __init__(self, context=None, original_callback=None):
"""
Initialize event handler
Args:
context: COW context (for accessing channel)
original_callback: Original event callback to chain
"""
self.context = context
self.original_callback = original_callback
# Get channel for sending intermediate messages
self.channel = None
if context:
self.channel = context.kwargs.get("channel") if hasattr(context, "kwargs") else None
# Track current thinking for channel output
self.current_thinking = ""
self.turn_number = 0
def handle_event(self, event):
"""
Main event handler
Args:
event: Event dict with type and data
"""
event_type = event.get("type")
data = event.get("data", {})
# Dispatch to specific handlers
if event_type == "turn_start":
self._handle_turn_start(data)
elif event_type == "message_update":
self._handle_message_update(data)
elif event_type == "message_end":
self._handle_message_end(data)
elif event_type == "tool_execution_start":
self._handle_tool_execution_start(data)
elif event_type == "tool_execution_end":
self._handle_tool_execution_end(data)
# Call original callback if provided
if self.original_callback:
self.original_callback(event)
def _handle_turn_start(self, data):
"""Handle turn start event"""
self.turn_number = data.get("turn", 0)
self.has_tool_calls_in_turn = False
self.current_thinking = ""
def _handle_message_update(self, data):
"""Handle message update event (streaming text)"""
delta = data.get("delta", "")
self.current_thinking += delta
def _handle_message_end(self, data):
"""Handle message end event"""
tool_calls = data.get("tool_calls", [])
# Only send thinking process if followed by tool calls
if tool_calls:
if self.current_thinking.strip():
logger.info(f"💭 {self.current_thinking.strip()[:200]}{'...' if len(self.current_thinking) > 200 else ''}")
# Send thinking process to channel
self._send_to_channel(f"{self.current_thinking.strip()}")
else:
# No tool calls = final response (logged at agent_stream level)
if self.current_thinking.strip():
logger.debug(f"💬 {self.current_thinking.strip()[:200]}{'...' if len(self.current_thinking) > 200 else ''}")
self.current_thinking = ""
def _handle_tool_execution_start(self, data):
"""Handle tool execution start event - logged by agent_stream.py"""
pass
def _handle_tool_execution_end(self, data):
"""Handle tool execution end event - logged by agent_stream.py"""
pass
def _send_to_channel(self, message):
"""
Try to send intermediate message to channel.
Skipped in SSE mode because thinking text is already streamed via on_event.
"""
if self.context and self.context.get("on_event"):
return
if self.channel:
try:
from bridge.reply import Reply, ReplyType
reply = Reply(ReplyType.TEXT, message)
self.channel._send(reply, self.context)
except Exception as e:
logger.debug(f"[AgentEventHandler] Failed to send to channel: {e}")
def log_summary(self):
"""Log execution summary - simplified"""
# Summary removed as per user request
# Real-time logging during execution is sufficient
pass
| {
"repo_id": "zhayujie/chatgpt-on-wechat",
"file_path": "bridge/agent_event_handler.py",
"license": "MIT License",
"lines": 94,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
zhayujie/chatgpt-on-wechat:bridge/agent_initializer.py | """
Agent Initializer - Handles agent initialization logic
"""
import os
import asyncio
import datetime
import time
from typing import Optional, List
from agent.protocol import Agent
from agent.tools import ToolManager
from common.log import logger
from common.utils import expand_path
class AgentInitializer:
"""
Handles agent initialization including:
- Workspace setup
- Memory system initialization
- Tool loading
- System prompt building
"""
def __init__(self, bridge, agent_bridge):
"""
Initialize agent initializer
Args:
bridge: COW bridge instance
agent_bridge: AgentBridge instance (for create_agent method)
"""
self.bridge = bridge
self.agent_bridge = agent_bridge
def initialize_agent(self, session_id: Optional[str] = None) -> Agent:
"""
Initialize agent for a session
Args:
session_id: Session ID (None for default agent)
Returns:
Initialized agent instance
"""
from config import conf
# Get workspace from config
workspace_root = expand_path(conf().get("agent_workspace", "~/cow"))
# Migrate API keys
self._migrate_config_to_env(workspace_root)
# Load environment variables
self._load_env_file()
# Initialize workspace
from agent.prompt import ensure_workspace, load_context_files, PromptBuilder
workspace_files = ensure_workspace(workspace_root, create_templates=True)
if session_id is None:
logger.info(f"[AgentInitializer] Workspace initialized at: {workspace_root}")
# Setup memory system
memory_manager, memory_tools = self._setup_memory_system(workspace_root, session_id)
# Load tools
tools = self._load_tools(workspace_root, memory_manager, memory_tools, session_id)
# Initialize scheduler if needed
self._initialize_scheduler(tools, session_id)
# Load context files
context_files = load_context_files(workspace_root)
# Initialize skill manager
skill_manager = self._initialize_skill_manager(workspace_root, session_id)
# Check if first conversation
from agent.prompt.workspace import is_first_conversation, mark_conversation_started
is_first = is_first_conversation(workspace_root)
# Build system prompt
prompt_builder = PromptBuilder(workspace_dir=workspace_root, language="zh")
runtime_info = self._get_runtime_info(workspace_root)
system_prompt = prompt_builder.build(
tools=tools,
context_files=context_files,
skill_manager=skill_manager,
memory_manager=memory_manager,
runtime_info=runtime_info,
is_first_conversation=is_first
)
if is_first:
mark_conversation_started(workspace_root)
# Get cost control parameters
from config import conf
max_steps = conf().get("agent_max_steps", 20)
max_context_tokens = conf().get("agent_max_context_tokens", 50000)
# Create agent
agent = self.agent_bridge.create_agent(
system_prompt=system_prompt,
tools=tools,
max_steps=max_steps,
output_mode="logger",
workspace_dir=workspace_root,
skill_manager=skill_manager,
enable_skills=True,
max_context_tokens=max_context_tokens,
runtime_info=runtime_info # Pass runtime_info for dynamic time updates
)
# Attach memory manager
if memory_manager:
agent.memory_manager = memory_manager
# Restore persisted conversation history for this session
if session_id:
self._restore_conversation_history(agent, session_id)
return agent
def _restore_conversation_history(self, agent, session_id: str) -> None:
"""
Load persisted conversation messages from SQLite and inject them
into the agent's in-memory message list.
Only user text and assistant text are restored. Tool call chains
(tool_use / tool_result) are stripped out because:
1. They are intermediate process, the value is already in the final
assistant text reply.
2. They consume massive context tokens (often 80%+ of history).
3. Different models have incompatible tool message formats, so
restoring tool chains across model switches causes 400 errors.
4. Eliminates the entire class of tool_use/tool_result pairing bugs.
"""
from config import conf
if not conf().get("conversation_persistence", True):
return
try:
from agent.memory import get_conversation_store
store = get_conversation_store()
max_turns = conf().get("agent_max_context_turns", 20)
restore_turns = max(6, max_turns // 5)
saved = store.load_messages(session_id, max_turns=restore_turns)
if saved:
filtered = self._filter_text_only_messages(saved)
if filtered:
with agent.messages_lock:
agent.messages = filtered
logger.debug(
f"[AgentInitializer] Restored {len(filtered)} text messages "
f"(from {len(saved)} total, {restore_turns} turns cap) "
f"for session={session_id}"
)
except Exception as e:
logger.warning(
f"[AgentInitializer] Failed to restore conversation history for "
f"session={session_id}: {e}"
)
@staticmethod
def _filter_text_only_messages(messages: list) -> list:
"""
Extract clean user/assistant turn pairs from raw message history.
Groups messages into turns (each starting with a real user query),
then keeps only:
- The first user text in each turn (the actual user input)
- The last assistant text in each turn (the final answer)
All tool_use, tool_result, intermediate assistant thoughts, and
internal hint messages injected by the agent loop are discarded.
"""
def _extract_text(content) -> str:
if isinstance(content, str):
return content.strip()
if isinstance(content, list):
parts = [
b.get("text", "")
for b in content
if isinstance(b, dict) and b.get("type") == "text"
]
return "\n".join(p for p in parts if p).strip()
return ""
def _is_real_user_msg(msg: dict) -> bool:
"""True for actual user input, False for tool_result or internal hints."""
if msg.get("role") != "user":
return False
content = msg.get("content")
if isinstance(content, list):
has_tool_result = any(
isinstance(b, dict) and b.get("type") == "tool_result"
for b in content
)
if has_tool_result:
return False
text = _extract_text(content)
return bool(text)
# Group into turns: each turn starts with a real user message
turns = []
current_turn = None
for msg in messages:
if _is_real_user_msg(msg):
if current_turn is not None:
turns.append(current_turn)
current_turn = {"user": msg, "assistants": []}
elif current_turn is not None and msg.get("role") == "assistant":
text = _extract_text(msg.get("content"))
if text:
current_turn["assistants"].append(text)
if current_turn is not None:
turns.append(current_turn)
# Build result: one user msg + one assistant msg per turn
filtered = []
for turn in turns:
user_text = _extract_text(turn["user"].get("content"))
if not user_text:
continue
filtered.append({
"role": "user",
"content": [{"type": "text", "text": user_text}]
})
if turn["assistants"]:
final_reply = turn["assistants"][-1]
filtered.append({
"role": "assistant",
"content": [{"type": "text", "text": final_reply}]
})
return filtered
def _load_env_file(self):
"""Load environment variables from .env file"""
env_file = expand_path("~/.cow/.env")
if os.path.exists(env_file):
try:
from dotenv import load_dotenv
load_dotenv(env_file, override=True)
except ImportError:
logger.warning("[AgentInitializer] python-dotenv not installed")
except Exception as e:
logger.warning(f"[AgentInitializer] Failed to load .env file: {e}")
def _setup_memory_system(self, workspace_root: str, session_id: Optional[str] = None):
"""
Setup memory system
Returns:
(memory_manager, memory_tools) tuple
"""
memory_manager = None
memory_tools = []
try:
from agent.memory import MemoryManager, MemoryConfig, create_embedding_provider
from agent.tools import MemorySearchTool, MemoryGetTool
from config import conf
# Get OpenAI config
openai_api_key = conf().get("open_ai_api_key", "")
openai_api_base = conf().get("open_ai_api_base", "")
# Initialize embedding provider
embedding_provider = None
if openai_api_key and openai_api_key not in ["", "YOUR API KEY", "YOUR_API_KEY"]:
try:
embedding_provider = create_embedding_provider(
provider="openai",
model="text-embedding-3-small",
api_key=openai_api_key,
api_base=openai_api_base or "https://api.openai.com/v1"
)
if session_id is None:
logger.info("[AgentInitializer] OpenAI embedding initialized")
except Exception as e:
logger.warning(f"[AgentInitializer] OpenAI embedding failed: {e}")
# Create memory manager
memory_config = MemoryConfig(workspace_root=workspace_root)
memory_manager = MemoryManager(memory_config, embedding_provider=embedding_provider)
# Sync memory
self._sync_memory(memory_manager, session_id)
# Create memory tools
memory_tools = [
MemorySearchTool(memory_manager),
MemoryGetTool(memory_manager)
]
if session_id is None:
logger.info("[AgentInitializer] Memory system initialized")
except Exception as e:
logger.warning(f"[AgentInitializer] Memory system not available: {e}")
return memory_manager, memory_tools
def _sync_memory(self, memory_manager, session_id: Optional[str] = None):
"""Sync memory database"""
try:
loop = asyncio.get_event_loop()
if loop.is_closed():
raise RuntimeError("Event loop is closed")
except RuntimeError:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
try:
if loop.is_running():
asyncio.create_task(memory_manager.sync())
else:
loop.run_until_complete(memory_manager.sync())
except Exception as e:
logger.warning(f"[AgentInitializer] Memory sync failed: {e}")
def _load_tools(self, workspace_root: str, memory_manager, memory_tools: List, session_id: Optional[str] = None):
"""Load all tools"""
tool_manager = ToolManager()
tool_manager.load_tools()
tools = []
file_config = {
"cwd": workspace_root,
"memory_manager": memory_manager
} if memory_manager else {"cwd": workspace_root}
for tool_name in tool_manager.tool_classes.keys():
try:
# Skip web_search if no API key is available
if tool_name == "web_search":
from agent.tools.web_search.web_search import WebSearch
if not WebSearch.is_available():
logger.debug("[AgentInitializer] WebSearch skipped - no BOCHA_API_KEY or LINKAI_API_KEY")
continue
# Special handling for EnvConfig tool
if tool_name == "env_config":
from agent.tools import EnvConfig
tool = EnvConfig({"agent_bridge": self.agent_bridge})
else:
tool = tool_manager.create_tool(tool_name)
if tool:
# Apply workspace config to file operation tools
if tool_name in ['read', 'write', 'edit', 'bash', 'grep', 'find', 'ls']:
tool.config = file_config
tool.cwd = file_config.get("cwd", getattr(tool, 'cwd', None))
if 'memory_manager' in file_config:
tool.memory_manager = file_config['memory_manager']
tools.append(tool)
except Exception as e:
logger.warning(f"[AgentInitializer] Failed to load tool {tool_name}: {e}")
# Add memory tools
if memory_tools:
tools.extend(memory_tools)
if session_id is None:
logger.info(f"[AgentInitializer] Added {len(memory_tools)} memory tools")
if session_id is None:
logger.info(f"[AgentInitializer] Loaded {len(tools)} tools: {[t.name for t in tools]}")
return tools
def _initialize_scheduler(self, tools: List, session_id: Optional[str] = None):
"""Initialize scheduler service if needed"""
if not self.agent_bridge.scheduler_initialized:
try:
from agent.tools.scheduler.integration import init_scheduler
if init_scheduler(self.agent_bridge):
self.agent_bridge.scheduler_initialized = True
if session_id is None:
logger.info("[AgentInitializer] Scheduler service initialized")
except Exception as e:
logger.warning(f"[AgentInitializer] Failed to initialize scheduler: {e}")
# Inject scheduler dependencies
if self.agent_bridge.scheduler_initialized:
try:
from agent.tools.scheduler.integration import get_task_store, get_scheduler_service
from agent.tools import SchedulerTool
from config import conf
task_store = get_task_store()
scheduler_service = get_scheduler_service()
for tool in tools:
if isinstance(tool, SchedulerTool):
tool.task_store = task_store
tool.scheduler_service = scheduler_service
if not tool.config:
tool.config = {}
raw_ct = conf().get("channel_type", "unknown")
if isinstance(raw_ct, list):
ct = raw_ct[0] if raw_ct else "unknown"
elif isinstance(raw_ct, str) and "," in raw_ct:
ct = raw_ct.split(",")[0].strip()
else:
ct = raw_ct
tool.config["channel_type"] = ct
except Exception as e:
logger.warning(f"[AgentInitializer] Failed to inject scheduler dependencies: {e}")
def _initialize_skill_manager(self, workspace_root: str, session_id: Optional[str] = None):
"""Initialize skill manager"""
try:
from agent.skills import SkillManager
skill_manager = SkillManager(custom_dir=os.path.join(workspace_root, "skills"))
return skill_manager
except Exception as e:
logger.warning(f"[AgentInitializer] Failed to initialize SkillManager: {e}")
return None
def _get_runtime_info(self, workspace_root: str):
"""Get runtime information with dynamic time support"""
from config import conf
def get_current_time():
"""Get current time dynamically - called each time system prompt is accessed"""
now = datetime.datetime.now()
# Get timezone info
try:
offset = -time.timezone if not time.daylight else -time.altzone
hours = offset // 3600
minutes = (offset % 3600) // 60
timezone_name = f"UTC{hours:+03d}:{minutes:02d}" if minutes else f"UTC{hours:+03d}"
except Exception:
timezone_name = "UTC"
# Chinese weekday mapping
weekday_map = {
'Monday': '星期一', 'Tuesday': '星期二', 'Wednesday': '星期三',
'Thursday': '星期四', 'Friday': '星期五', 'Saturday': '星期六', 'Sunday': '星期日'
}
weekday_zh = weekday_map.get(now.strftime("%A"), now.strftime("%A"))
return {
'time': now.strftime("%Y-%m-%d %H:%M:%S"),
'weekday': weekday_zh,
'timezone': timezone_name
}
return {
"model": conf().get("model", "unknown"),
"workspace": workspace_root,
"channel": ", ".join(conf().get("channel_type")) if isinstance(conf().get("channel_type"), list) else conf().get("channel_type", "unknown"),
"_get_current_time": get_current_time # Dynamic time function
}
def _migrate_config_to_env(self, workspace_root: str):
"""Migrate API keys from config.json to .env file"""
from config import conf
key_mapping = {
"open_ai_api_key": "OPENAI_API_KEY",
"open_ai_api_base": "OPENAI_API_BASE",
"gemini_api_key": "GEMINI_API_KEY",
"claude_api_key": "CLAUDE_API_KEY",
"linkai_api_key": "LINKAI_API_KEY",
}
env_file = expand_path("~/.cow/.env")
# Read existing env vars
existing_env_vars = {}
if os.path.exists(env_file):
try:
with open(env_file, 'r', encoding='utf-8') as f:
for line in f:
line = line.strip()
if line and not line.startswith('#') and '=' in line:
key, _ = line.split('=', 1)
existing_env_vars[key.strip()] = True
except Exception as e:
logger.warning(f"[AgentInitializer] Failed to read .env file: {e}")
# Check which keys need migration
keys_to_migrate = {}
for config_key, env_key in key_mapping.items():
if env_key in existing_env_vars:
continue
value = conf().get(config_key, "")
if value and value.strip():
keys_to_migrate[env_key] = value.strip()
# Write new keys
if keys_to_migrate:
try:
env_dir = os.path.dirname(env_file)
if not os.path.exists(env_dir):
os.makedirs(env_dir, exist_ok=True)
if not os.path.exists(env_file):
open(env_file, 'a').close()
with open(env_file, 'a', encoding='utf-8') as f:
f.write('\n# Auto-migrated from config.json\n')
for key, value in keys_to_migrate.items():
f.write(f'{key}={value}\n')
os.environ[key] = value
logger.info(f"[AgentInitializer] Migrated {len(keys_to_migrate)} API keys to .env: {list(keys_to_migrate.keys())}")
except Exception as e:
logger.warning(f"[AgentInitializer] Failed to migrate API keys: {e}")
| {
"repo_id": "zhayujie/chatgpt-on-wechat",
"file_path": "bridge/agent_initializer.py",
"license": "MIT License",
"lines": 436,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
zhayujie/chatgpt-on-wechat:agent/tools/scheduler/integration.py | """
Integration module for scheduler with AgentBridge
"""
import os
from typing import Optional
from config import conf
from common.log import logger
from common.utils import expand_path
from bridge.context import Context, ContextType
from bridge.reply import Reply, ReplyType
# Global scheduler service instance
_scheduler_service = None
_task_store = None
def init_scheduler(agent_bridge) -> bool:
"""
Initialize scheduler service
Args:
agent_bridge: AgentBridge instance
Returns:
True if initialized successfully
"""
global _scheduler_service, _task_store
try:
from agent.tools.scheduler.task_store import TaskStore
from agent.tools.scheduler.scheduler_service import SchedulerService
# Get workspace from config
workspace_root = expand_path(conf().get("agent_workspace", "~/cow"))
store_path = os.path.join(workspace_root, "scheduler", "tasks.json")
# Create task store
_task_store = TaskStore(store_path)
logger.debug(f"[Scheduler] Task store initialized: {store_path}")
# Create execute callback
def execute_task_callback(task: dict):
"""Callback to execute a scheduled task"""
try:
action = task.get("action", {})
action_type = action.get("type")
if action_type == "agent_task":
_execute_agent_task(task, agent_bridge)
elif action_type == "send_message":
# Legacy support for old tasks
_execute_send_message(task, agent_bridge)
elif action_type == "tool_call":
# Legacy support for old tasks
_execute_tool_call(task, agent_bridge)
elif action_type == "skill_call":
# Legacy support for old tasks
_execute_skill_call(task, agent_bridge)
else:
logger.warning(f"[Scheduler] Unknown action type: {action_type}")
except Exception as e:
logger.error(f"[Scheduler] Error executing task {task.get('id')}: {e}")
# Create scheduler service
_scheduler_service = SchedulerService(_task_store, execute_task_callback)
_scheduler_service.start()
logger.debug("[Scheduler] Scheduler service initialized and started")
return True
except Exception as e:
logger.error(f"[Scheduler] Failed to initialize scheduler: {e}")
return False
def get_task_store():
"""Get the global task store instance"""
return _task_store
def get_scheduler_service():
"""Get the global scheduler service instance"""
return _scheduler_service
def _execute_agent_task(task: dict, agent_bridge):
"""
Execute an agent_task action - let Agent handle the task
Args:
task: Task dictionary
agent_bridge: AgentBridge instance
"""
try:
action = task.get("action", {})
task_description = action.get("task_description")
receiver = action.get("receiver")
is_group = action.get("is_group", False)
channel_type = action.get("channel_type", "unknown")
if not task_description:
logger.error(f"[Scheduler] Task {task['id']}: No task_description specified")
return
if not receiver:
logger.error(f"[Scheduler] Task {task['id']}: No receiver specified")
return
# Check for unsupported channels
if channel_type == "dingtalk":
logger.warning(f"[Scheduler] Task {task['id']}: DingTalk channel does not support scheduled messages (Stream mode limitation). Task will execute but message cannot be sent.")
logger.info(f"[Scheduler] Task {task['id']}: Executing agent task '{task_description}'")
# Create a unique session_id for this scheduled task to avoid polluting user's conversation
# Format: scheduler_<receiver>_<task_id> to ensure isolation
scheduler_session_id = f"scheduler_{receiver}_{task['id']}"
# Create context for Agent
context = Context(ContextType.TEXT, task_description)
context["receiver"] = receiver
context["isgroup"] = is_group
context["session_id"] = scheduler_session_id
# Channel-specific setup
if channel_type == "web":
import uuid
request_id = f"scheduler_{task['id']}_{uuid.uuid4().hex[:8]}"
context["request_id"] = request_id
elif channel_type == "feishu":
context["receive_id_type"] = "chat_id" if is_group else "open_id"
context["msg"] = None
elif channel_type == "dingtalk":
# DingTalk requires msg object, set to None for scheduled tasks
context["msg"] = None
# 如果是单聊,需要传递 sender_staff_id
if not is_group:
sender_staff_id = action.get("dingtalk_sender_staff_id")
if sender_staff_id:
context["dingtalk_sender_staff_id"] = sender_staff_id
# Use Agent to execute the task
# Mark this as a scheduled task execution to prevent recursive task creation
context["is_scheduled_task"] = True
try:
# Don't clear history - scheduler tasks use isolated session_id so they won't pollute user conversations
reply = agent_bridge.agent_reply(task_description, context=context, on_event=None, clear_history=False)
if reply and reply.content:
# Send the reply via channel
from channel.channel_factory import create_channel
try:
channel = create_channel(channel_type)
if channel:
# For web channel, register request_id
if channel_type == "web" and hasattr(channel, 'request_to_session'):
request_id = context.get("request_id")
if request_id:
channel.request_to_session[request_id] = receiver
logger.debug(f"[Scheduler] Registered request_id {request_id} -> session {receiver}")
# Send the reply
channel.send(reply, context)
logger.info(f"[Scheduler] Task {task['id']} executed successfully, result sent to {receiver}")
else:
logger.error(f"[Scheduler] Failed to create channel: {channel_type}")
except Exception as e:
logger.error(f"[Scheduler] Failed to send result: {e}")
else:
logger.error(f"[Scheduler] Task {task['id']}: No result from agent execution")
except Exception as e:
logger.error(f"[Scheduler] Failed to execute task via Agent: {e}")
import traceback
logger.error(f"[Scheduler] Traceback: {traceback.format_exc()}")
except Exception as e:
logger.error(f"[Scheduler] Error in _execute_agent_task: {e}")
import traceback
logger.error(f"[Scheduler] Traceback: {traceback.format_exc()}")
def _execute_send_message(task: dict, agent_bridge):
"""
Execute a send_message action
Args:
task: Task dictionary
agent_bridge: AgentBridge instance
"""
try:
action = task.get("action", {})
content = action.get("content", "")
receiver = action.get("receiver")
is_group = action.get("is_group", False)
channel_type = action.get("channel_type", "unknown")
if not receiver:
logger.error(f"[Scheduler] Task {task['id']}: No receiver specified")
return
# Create context for sending message
context = Context(ContextType.TEXT, content)
context["receiver"] = receiver
context["isgroup"] = is_group
context["session_id"] = receiver
# Channel-specific context setup
if channel_type == "web":
# Web channel needs request_id
import uuid
request_id = f"scheduler_{task['id']}_{uuid.uuid4().hex[:8]}"
context["request_id"] = request_id
logger.debug(f"[Scheduler] Generated request_id for web channel: {request_id}")
elif channel_type == "feishu":
# Feishu channel: for scheduled tasks, send as new message (no msg_id to reply to)
# Use chat_id for groups, open_id for private chats
context["receive_id_type"] = "chat_id" if is_group else "open_id"
# Keep isgroup as is, but set msg to None (no original message to reply to)
# Feishu channel will detect this and send as new message instead of reply
context["msg"] = None
logger.debug(f"[Scheduler] Feishu: receive_id_type={context['receive_id_type']}, is_group={is_group}, receiver={receiver}")
elif channel_type == "dingtalk":
# DingTalk channel setup
context["msg"] = None
# 如果是单聊,需要传递 sender_staff_id
if not is_group:
sender_staff_id = action.get("dingtalk_sender_staff_id")
if sender_staff_id:
context["dingtalk_sender_staff_id"] = sender_staff_id
logger.debug(f"[Scheduler] DingTalk single chat: sender_staff_id={sender_staff_id}")
else:
logger.warning(f"[Scheduler] Task {task['id']}: DingTalk single chat message missing sender_staff_id")
# Create reply
reply = Reply(ReplyType.TEXT, content)
# Get channel and send
from channel.channel_factory import create_channel
try:
channel = create_channel(channel_type)
if channel:
# For web channel, register the request_id to session mapping
if channel_type == "web" and hasattr(channel, 'request_to_session'):
channel.request_to_session[request_id] = receiver
logger.debug(f"[Scheduler] Registered request_id {request_id} -> session {receiver}")
channel.send(reply, context)
logger.info(f"[Scheduler] Task {task['id']} executed: sent message to {receiver}")
else:
logger.error(f"[Scheduler] Failed to create channel: {channel_type}")
except Exception as e:
logger.error(f"[Scheduler] Failed to send message: {e}")
import traceback
logger.error(f"[Scheduler] Traceback: {traceback.format_exc()}")
except Exception as e:
logger.error(f"[Scheduler] Error in _execute_send_message: {e}")
import traceback
logger.error(f"[Scheduler] Traceback: {traceback.format_exc()}")
def _execute_tool_call(task: dict, agent_bridge):
"""
Execute a tool_call action
Args:
task: Task dictionary
agent_bridge: AgentBridge instance
"""
try:
action = task.get("action", {})
# Support both old and new field names
tool_name = action.get("call_name") or action.get("tool_name")
tool_params = action.get("call_params") or action.get("tool_params", {})
result_prefix = action.get("result_prefix", "")
receiver = action.get("receiver")
is_group = action.get("is_group", False)
channel_type = action.get("channel_type", "unknown")
if not tool_name:
logger.error(f"[Scheduler] Task {task['id']}: No tool_name specified")
return
if not receiver:
logger.error(f"[Scheduler] Task {task['id']}: No receiver specified")
return
# Get tool manager and create tool instance
from agent.tools.tool_manager import ToolManager
tool_manager = ToolManager()
tool = tool_manager.create_tool(tool_name)
if not tool:
logger.error(f"[Scheduler] Task {task['id']}: Tool '{tool_name}' not found")
return
# Execute tool
logger.info(f"[Scheduler] Task {task['id']}: Executing tool '{tool_name}' with params {tool_params}")
result = tool.execute(tool_params)
# Get result content
if hasattr(result, 'result'):
content = result.result
else:
content = str(result)
# Add prefix if specified
if result_prefix:
content = f"{result_prefix}\n\n{content}"
# Send result as message
context = Context(ContextType.TEXT, content)
context["receiver"] = receiver
context["isgroup"] = is_group
context["session_id"] = receiver
# Channel-specific context setup
if channel_type == "web":
# Web channel needs request_id
import uuid
request_id = f"scheduler_{task['id']}_{uuid.uuid4().hex[:8]}"
context["request_id"] = request_id
logger.debug(f"[Scheduler] Generated request_id for web channel: {request_id}")
elif channel_type == "feishu":
# Feishu channel: for scheduled tasks, send as new message (no msg_id to reply to)
context["receive_id_type"] = "chat_id" if is_group else "open_id"
context["msg"] = None
logger.debug(f"[Scheduler] Feishu: receive_id_type={context['receive_id_type']}, is_group={is_group}, receiver={receiver}")
reply = Reply(ReplyType.TEXT, content)
# Get channel and send
from channel.channel_factory import create_channel
try:
channel = create_channel(channel_type)
if channel:
# For web channel, register the request_id to session mapping
if channel_type == "web" and hasattr(channel, 'request_to_session'):
channel.request_to_session[request_id] = receiver
logger.debug(f"[Scheduler] Registered request_id {request_id} -> session {receiver}")
channel.send(reply, context)
logger.info(f"[Scheduler] Task {task['id']} executed: sent tool result to {receiver}")
else:
logger.error(f"[Scheduler] Failed to create channel: {channel_type}")
except Exception as e:
logger.error(f"[Scheduler] Failed to send tool result: {e}")
except Exception as e:
logger.error(f"[Scheduler] Error in _execute_tool_call: {e}")
def _execute_skill_call(task: dict, agent_bridge):
"""
Execute a skill_call action by asking Agent to run the skill
Args:
task: Task dictionary
agent_bridge: AgentBridge instance
"""
try:
action = task.get("action", {})
# Support both old and new field names
skill_name = action.get("call_name") or action.get("skill_name")
skill_params = action.get("call_params") or action.get("skill_params", {})
result_prefix = action.get("result_prefix", "")
receiver = action.get("receiver")
is_group = action.get("isgroup", False)
channel_type = action.get("channel_type", "unknown")
if not skill_name:
logger.error(f"[Scheduler] Task {task['id']}: No skill_name specified")
return
if not receiver:
logger.error(f"[Scheduler] Task {task['id']}: No receiver specified")
return
logger.info(f"[Scheduler] Task {task['id']}: Executing skill '{skill_name}' with params {skill_params}")
# Create a unique session_id for this scheduled task to avoid polluting user's conversation
# Format: scheduler_<receiver>_<task_id> to ensure isolation
scheduler_session_id = f"scheduler_{receiver}_{task['id']}"
# Build a natural language query for the Agent to execute the skill
# Format: "Use skill-name to do something with params"
param_str = ", ".join([f"{k}={v}" for k, v in skill_params.items()])
query = f"Use {skill_name} skill"
if param_str:
query += f" with {param_str}"
# Create context for Agent
context = Context(ContextType.TEXT, query)
context["receiver"] = receiver
context["isgroup"] = is_group
context["session_id"] = scheduler_session_id
# Channel-specific setup
if channel_type == "web":
import uuid
request_id = f"scheduler_{task['id']}_{uuid.uuid4().hex[:8]}"
context["request_id"] = request_id
elif channel_type == "feishu":
context["receive_id_type"] = "chat_id" if is_group else "open_id"
context["msg"] = None
# Use Agent to execute the skill
try:
# Don't clear history - scheduler tasks use isolated session_id so they won't pollute user conversations
reply = agent_bridge.agent_reply(query, context=context, on_event=None, clear_history=False)
if reply and reply.content:
content = reply.content
# Add prefix if specified
if result_prefix:
content = f"{result_prefix}\n\n{content}"
logger.info(f"[Scheduler] Task {task['id']} executed: skill result sent to {receiver}")
else:
logger.error(f"[Scheduler] Task {task['id']}: No result from skill execution")
except Exception as e:
logger.error(f"[Scheduler] Failed to execute skill via Agent: {e}")
import traceback
logger.error(f"[Scheduler] Traceback: {traceback.format_exc()}")
except Exception as e:
logger.error(f"[Scheduler] Error in _execute_skill_call: {e}")
import traceback
logger.error(f"[Scheduler] Traceback: {traceback.format_exc()}")
def attach_scheduler_to_tool(tool, context: Context = None):
"""
Attach scheduler components to a SchedulerTool instance
Args:
tool: SchedulerTool instance
context: Current context (optional)
"""
if _task_store:
tool.task_store = _task_store
if context:
tool.current_context = context
channel_type = context.get("channel_type") or conf().get("channel_type", "unknown")
if not tool.config:
tool.config = {}
tool.config["channel_type"] = channel_type
| {
"repo_id": "zhayujie/chatgpt-on-wechat",
"file_path": "agent/tools/scheduler/integration.py",
"license": "MIT License",
"lines": 373,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
zhayujie/chatgpt-on-wechat:agent/tools/scheduler/scheduler_service.py | """
Background scheduler service for executing scheduled tasks
"""
import time
import threading
from datetime import datetime, timedelta
from typing import Callable, Optional
from croniter import croniter
from common.log import logger
class SchedulerService:
"""
Background service that executes scheduled tasks
"""
def __init__(self, task_store, execute_callback: Callable):
"""
Initialize scheduler service
Args:
task_store: TaskStore instance
execute_callback: Function to call when executing a task
"""
self.task_store = task_store
self.execute_callback = execute_callback
self.running = False
self.thread = None
self._lock = threading.Lock()
def start(self):
"""Start the scheduler service"""
with self._lock:
if self.running:
logger.warning("[Scheduler] Service already running")
return
self.running = True
self.thread = threading.Thread(target=self._run_loop, daemon=True)
self.thread.start()
logger.debug("[Scheduler] Service started")
def stop(self):
"""Stop the scheduler service"""
with self._lock:
if not self.running:
return
self.running = False
if self.thread:
self.thread.join(timeout=5)
logger.info("[Scheduler] Service stopped")
def _run_loop(self):
"""Main scheduler loop"""
logger.debug("[Scheduler] Scheduler loop started")
while self.running:
try:
self._check_and_execute_tasks()
except Exception as e:
logger.error(f"[Scheduler] Error in scheduler loop: {e}")
# Sleep for 30 seconds between checks
time.sleep(30)
def _check_and_execute_tasks(self):
"""Check for due tasks and execute them"""
now = datetime.now()
tasks = self.task_store.list_tasks(enabled_only=True)
for task in tasks:
try:
# Check if task is due
if self._is_task_due(task, now):
logger.info(f"[Scheduler] Executing task: {task['id']} - {task['name']}")
self._execute_task(task)
# Update next run time
next_run = self._calculate_next_run(task, now)
if next_run:
self.task_store.update_task(task['id'], {
"next_run_at": next_run.isoformat(),
"last_run_at": now.isoformat()
})
else:
# One-time task, disable it
self.task_store.update_task(task['id'], {
"enabled": False,
"last_run_at": now.isoformat()
})
logger.info(f"[Scheduler] One-time task completed and disabled: {task['id']}")
except Exception as e:
logger.error(f"[Scheduler] Error processing task {task.get('id')}: {e}")
def _is_task_due(self, task: dict, now: datetime) -> bool:
"""
Check if a task is due to run
Args:
task: Task dictionary
now: Current datetime
Returns:
True if task should run now
"""
next_run_str = task.get("next_run_at")
if not next_run_str:
# Calculate initial next_run_at
next_run = self._calculate_next_run(task, now)
if next_run:
self.task_store.update_task(task['id'], {
"next_run_at": next_run.isoformat()
})
return False
return False
try:
next_run = datetime.fromisoformat(next_run_str)
# Check if task is overdue (e.g., service restart)
if next_run < now:
time_diff = (now - next_run).total_seconds()
# If overdue by more than 5 minutes, skip this run and schedule next
if time_diff > 300: # 5 minutes
logger.warning(f"[Scheduler] Task {task['id']} is overdue by {int(time_diff)}s, skipping and scheduling next run")
# For one-time tasks, disable them
schedule = task.get("schedule", {})
if schedule.get("type") == "once":
self.task_store.update_task(task['id'], {
"enabled": False,
"last_run_at": now.isoformat()
})
logger.info(f"[Scheduler] One-time task {task['id']} expired, disabled")
return False
# For recurring tasks, calculate next run from now
next_next_run = self._calculate_next_run(task, now)
if next_next_run:
self.task_store.update_task(task['id'], {
"next_run_at": next_next_run.isoformat()
})
logger.info(f"[Scheduler] Rescheduled task {task['id']} to {next_next_run}")
return False
return now >= next_run
except Exception:
return False
def _calculate_next_run(self, task: dict, from_time: datetime) -> Optional[datetime]:
"""
Calculate next run time for a task
Args:
task: Task dictionary
from_time: Calculate from this time
Returns:
Next run datetime or None for one-time tasks
"""
schedule = task.get("schedule", {})
schedule_type = schedule.get("type")
if schedule_type == "cron":
# Cron expression
expression = schedule.get("expression")
if not expression:
return None
try:
cron = croniter(expression, from_time)
return cron.get_next(datetime)
except Exception as e:
logger.error(f"[Scheduler] Invalid cron expression '{expression}': {e}")
return None
elif schedule_type == "interval":
# Interval in seconds
seconds = schedule.get("seconds", 0)
if seconds <= 0:
return None
return from_time + timedelta(seconds=seconds)
elif schedule_type == "once":
# One-time task at specific time
run_at_str = schedule.get("run_at")
if not run_at_str:
return None
try:
run_at = datetime.fromisoformat(run_at_str)
# Only return if in the future
if run_at > from_time:
return run_at
except Exception:
pass
return None
return None
def _execute_task(self, task: dict):
"""
Execute a task
Args:
task: Task dictionary
"""
try:
# Call the execute callback
self.execute_callback(task)
except Exception as e:
logger.error(f"[Scheduler] Error executing task {task['id']}: {e}")
# Update task with error
self.task_store.update_task(task['id'], {
"last_error": str(e),
"last_error_at": datetime.now().isoformat()
})
| {
"repo_id": "zhayujie/chatgpt-on-wechat",
"file_path": "agent/tools/scheduler/scheduler_service.py",
"license": "MIT License",
"lines": 185,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
zhayujie/chatgpt-on-wechat:agent/tools/scheduler/task_store.py | """
Task storage management for scheduler
"""
import json
import os
import threading
from datetime import datetime
from typing import Dict, List, Optional
from pathlib import Path
from common.utils import expand_path
class TaskStore:
"""
Manages persistent storage of scheduled tasks
"""
def __init__(self, store_path: str = None):
"""
Initialize task store
Args:
store_path: Path to tasks.json file. Defaults to ~/cow/scheduler/tasks.json
"""
if store_path is None:
# Default to ~/cow/scheduler/tasks.json
home = expand_path("~")
store_path = os.path.join(home, "cow", "scheduler", "tasks.json")
self.store_path = store_path
self.lock = threading.Lock()
self._ensure_store_dir()
def _ensure_store_dir(self):
"""Ensure the storage directory exists"""
store_dir = os.path.dirname(self.store_path)
os.makedirs(store_dir, exist_ok=True)
def load_tasks(self) -> Dict[str, dict]:
"""
Load all tasks from storage
Returns:
Dictionary of task_id -> task_data
"""
with self.lock:
if not os.path.exists(self.store_path):
return {}
try:
with open(self.store_path, 'r', encoding='utf-8') as f:
data = json.load(f)
return data.get("tasks", {})
except Exception as e:
print(f"Error loading tasks: {e}")
return {}
def save_tasks(self, tasks: Dict[str, dict]):
"""
Save all tasks to storage
Args:
tasks: Dictionary of task_id -> task_data
"""
with self.lock:
try:
# Create backup
if os.path.exists(self.store_path):
backup_path = f"{self.store_path}.bak"
try:
with open(self.store_path, 'r') as src:
with open(backup_path, 'w') as dst:
dst.write(src.read())
except Exception:
pass
# Save tasks
data = {
"version": 1,
"updated_at": datetime.now().isoformat(),
"tasks": tasks
}
with open(self.store_path, 'w', encoding='utf-8') as f:
json.dump(data, f, ensure_ascii=False, indent=2)
except Exception as e:
print(f"Error saving tasks: {e}")
raise
def add_task(self, task: dict) -> bool:
"""
Add a new task
Args:
task: Task data dictionary
Returns:
True if successful
"""
tasks = self.load_tasks()
task_id = task.get("id")
if not task_id:
raise ValueError("Task must have an 'id' field")
if task_id in tasks:
raise ValueError(f"Task with id '{task_id}' already exists")
tasks[task_id] = task
self.save_tasks(tasks)
return True
def update_task(self, task_id: str, updates: dict) -> bool:
"""
Update an existing task
Args:
task_id: Task ID
updates: Dictionary of fields to update
Returns:
True if successful
"""
tasks = self.load_tasks()
if task_id not in tasks:
raise ValueError(f"Task '{task_id}' not found")
# Update fields
tasks[task_id].update(updates)
tasks[task_id]["updated_at"] = datetime.now().isoformat()
self.save_tasks(tasks)
return True
def delete_task(self, task_id: str) -> bool:
"""
Delete a task
Args:
task_id: Task ID
Returns:
True if successful
"""
tasks = self.load_tasks()
if task_id not in tasks:
raise ValueError(f"Task '{task_id}' not found")
del tasks[task_id]
self.save_tasks(tasks)
return True
def get_task(self, task_id: str) -> Optional[dict]:
"""
Get a specific task
Args:
task_id: Task ID
Returns:
Task data or None if not found
"""
tasks = self.load_tasks()
return tasks.get(task_id)
def list_tasks(self, enabled_only: bool = False) -> List[dict]:
"""
List all tasks
Args:
enabled_only: If True, only return enabled tasks
Returns:
List of task dictionaries
"""
tasks = self.load_tasks()
task_list = list(tasks.values())
if enabled_only:
task_list = [t for t in task_list if t.get("enabled", True)]
# Sort by next_run_at
task_list.sort(key=lambda t: t.get("next_run_at", float('inf')))
return task_list
def enable_task(self, task_id: str, enabled: bool = True) -> bool:
"""
Enable or disable a task
Args:
task_id: Task ID
enabled: True to enable, False to disable
Returns:
True if successful
"""
return self.update_task(task_id, {"enabled": enabled})
| {
"repo_id": "zhayujie/chatgpt-on-wechat",
"file_path": "agent/tools/scheduler/task_store.py",
"license": "MIT License",
"lines": 158,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
zhayujie/chatgpt-on-wechat:agent/tools/send/send.py | """
Send tool - Send files to the user
"""
import os
from typing import Dict, Any
from pathlib import Path
from agent.tools.base_tool import BaseTool, ToolResult
from common.utils import expand_path
class Send(BaseTool):
"""Tool for sending files to the user"""
name: str = "send"
description: str = "Send a file (image, video, audio, document) to the user. Use this when the user explicitly asks to send/share a file."
params: dict = {
"type": "object",
"properties": {
"path": {
"type": "string",
"description": "Path to the file to send. Can be absolute path or relative to workspace."
},
"message": {
"type": "string",
"description": "Optional message to accompany the file"
}
},
"required": ["path"]
}
def __init__(self, config: dict = None):
self.config = config or {}
self.cwd = self.config.get("cwd", os.getcwd())
# Supported file types
self.image_extensions = {'.jpg', '.jpeg', '.png', '.gif', '.webp', '.bmp', '.svg', '.ico'}
self.video_extensions = {'.mp4', '.avi', '.mov', '.mkv', '.flv', '.wmv', '.webm', '.m4v'}
self.audio_extensions = {'.mp3', '.wav', '.ogg', '.m4a', '.flac', '.aac', '.wma'}
self.document_extensions = {'.pdf', '.doc', '.docx', '.xls', '.xlsx', '.ppt', '.pptx', '.txt', '.md'}
def execute(self, args: Dict[str, Any]) -> ToolResult:
"""
Execute file send operation
:param args: Contains file path and optional message
:return: File metadata for channel to send
"""
path = args.get("path", "").strip()
message = args.get("message", "")
if not path:
return ToolResult.fail("Error: path parameter is required")
# Resolve path
absolute_path = self._resolve_path(path)
# Check if file exists
if not os.path.exists(absolute_path):
return ToolResult.fail(f"Error: File not found: {path}")
# Check if readable
if not os.access(absolute_path, os.R_OK):
return ToolResult.fail(f"Error: File is not readable: {path}")
# Get file info
file_ext = Path(absolute_path).suffix.lower()
file_size = os.path.getsize(absolute_path)
file_name = Path(absolute_path).name
# Determine file type
if file_ext in self.image_extensions:
file_type = "image"
mime_type = self._get_image_mime_type(file_ext)
elif file_ext in self.video_extensions:
file_type = "video"
mime_type = self._get_video_mime_type(file_ext)
elif file_ext in self.audio_extensions:
file_type = "audio"
mime_type = self._get_audio_mime_type(file_ext)
elif file_ext in self.document_extensions:
file_type = "document"
mime_type = self._get_document_mime_type(file_ext)
else:
file_type = "file"
mime_type = "application/octet-stream"
# Return file_to_send metadata
result = {
"type": "file_to_send",
"file_type": file_type,
"path": absolute_path,
"file_name": file_name,
"mime_type": mime_type,
"size": file_size,
"size_formatted": self._format_size(file_size),
"message": message or f"正在发送 {file_name}"
}
return ToolResult.success(result)
def _resolve_path(self, path: str) -> str:
"""Resolve path to absolute path"""
path = expand_path(path)
if os.path.isabs(path):
return path
return os.path.abspath(os.path.join(self.cwd, path))
def _get_image_mime_type(self, ext: str) -> str:
"""Get MIME type for image"""
mime_map = {
'.jpg': 'image/jpeg', '.jpeg': 'image/jpeg',
'.png': 'image/png', '.gif': 'image/gif',
'.webp': 'image/webp', '.bmp': 'image/bmp',
'.svg': 'image/svg+xml', '.ico': 'image/x-icon'
}
return mime_map.get(ext, 'image/jpeg')
def _get_video_mime_type(self, ext: str) -> str:
"""Get MIME type for video"""
mime_map = {
'.mp4': 'video/mp4', '.avi': 'video/x-msvideo',
'.mov': 'video/quicktime', '.mkv': 'video/x-matroska',
'.webm': 'video/webm', '.flv': 'video/x-flv'
}
return mime_map.get(ext, 'video/mp4')
def _get_audio_mime_type(self, ext: str) -> str:
"""Get MIME type for audio"""
mime_map = {
'.mp3': 'audio/mpeg', '.wav': 'audio/wav',
'.ogg': 'audio/ogg', '.m4a': 'audio/mp4',
'.flac': 'audio/flac', '.aac': 'audio/aac'
}
return mime_map.get(ext, 'audio/mpeg')
def _get_document_mime_type(self, ext: str) -> str:
"""Get MIME type for document"""
mime_map = {
'.pdf': 'application/pdf',
'.doc': 'application/msword',
'.docx': 'application/vnd.openxmlformats-officedocument.wordprocessingml.document',
'.xls': 'application/vnd.ms-excel',
'.xlsx': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
'.ppt': 'application/vnd.ms-powerpoint',
'.pptx': 'application/vnd.openxmlformats-officedocument.presentationml.presentation',
'.txt': 'text/plain',
'.md': 'text/markdown'
}
return mime_map.get(ext, 'application/octet-stream')
def _format_size(self, size_bytes: int) -> str:
"""Format file size in human-readable format"""
for unit in ['B', 'KB', 'MB', 'GB']:
if size_bytes < 1024.0:
return f"{size_bytes:.1f}{unit}"
size_bytes /= 1024.0
return f"{size_bytes:.1f}TB"
| {
"repo_id": "zhayujie/chatgpt-on-wechat",
"file_path": "agent/tools/send/send.py",
"license": "MIT License",
"lines": 136,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
zhayujie/chatgpt-on-wechat:agent/memory/chunker.py | """
Text chunking utilities for memory
Splits text into chunks with token limits and overlap
"""
from __future__ import annotations
from typing import List, Tuple
from dataclasses import dataclass
@dataclass
class TextChunk:
"""Represents a text chunk with line numbers"""
text: str
start_line: int
end_line: int
class TextChunker:
"""Chunks text by line count with token estimation"""
def __init__(self, max_tokens: int = 500, overlap_tokens: int = 50):
"""
Initialize chunker
Args:
max_tokens: Maximum tokens per chunk
overlap_tokens: Overlap tokens between chunks
"""
self.max_tokens = max_tokens
self.overlap_tokens = overlap_tokens
# Rough estimation: ~4 chars per token for English/Chinese mixed
self.chars_per_token = 4
def chunk_text(self, text: str) -> List[TextChunk]:
"""
Chunk text into overlapping segments
Args:
text: Input text to chunk
Returns:
List of TextChunk objects
"""
if not text.strip():
return []
lines = text.split('\n')
chunks = []
max_chars = self.max_tokens * self.chars_per_token
overlap_chars = self.overlap_tokens * self.chars_per_token
current_chunk = []
current_chars = 0
start_line = 1
for i, line in enumerate(lines, start=1):
line_chars = len(line)
# If single line exceeds max, split it
if line_chars > max_chars:
# Save current chunk if exists
if current_chunk:
chunks.append(TextChunk(
text='\n'.join(current_chunk),
start_line=start_line,
end_line=i - 1
))
current_chunk = []
current_chars = 0
# Split long line into multiple chunks
for sub_chunk in self._split_long_line(line, max_chars):
chunks.append(TextChunk(
text=sub_chunk,
start_line=i,
end_line=i
))
start_line = i + 1
continue
# Check if adding this line would exceed limit
if current_chars + line_chars > max_chars and current_chunk:
# Save current chunk
chunks.append(TextChunk(
text='\n'.join(current_chunk),
start_line=start_line,
end_line=i - 1
))
# Start new chunk with overlap
overlap_lines = self._get_overlap_lines(current_chunk, overlap_chars)
current_chunk = overlap_lines + [line]
current_chars = sum(len(l) for l in current_chunk)
start_line = i - len(overlap_lines)
else:
# Add line to current chunk
current_chunk.append(line)
current_chars += line_chars
# Save last chunk
if current_chunk:
chunks.append(TextChunk(
text='\n'.join(current_chunk),
start_line=start_line,
end_line=len(lines)
))
return chunks
def _split_long_line(self, line: str, max_chars: int) -> List[str]:
"""Split a single long line into multiple chunks"""
chunks = []
for i in range(0, len(line), max_chars):
chunks.append(line[i:i + max_chars])
return chunks
def _get_overlap_lines(self, lines: List[str], target_chars: int) -> List[str]:
"""Get last few lines that fit within target_chars for overlap"""
overlap = []
chars = 0
for line in reversed(lines):
line_chars = len(line)
if chars + line_chars > target_chars:
break
overlap.insert(0, line)
chars += line_chars
return overlap
def chunk_markdown(self, text: str) -> List[TextChunk]:
"""
Chunk markdown text while respecting structure
(For future enhancement: respect markdown sections)
"""
return self.chunk_text(text)
| {
"repo_id": "zhayujie/chatgpt-on-wechat",
"file_path": "agent/memory/chunker.py",
"license": "MIT License",
"lines": 113,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
zhayujie/chatgpt-on-wechat:agent/memory/config.py | """
Memory configuration module
Provides global memory configuration with simplified workspace structure
"""
from __future__ import annotations
import os
from dataclasses import dataclass, field
from typing import Optional, List
from pathlib import Path
def _default_workspace():
"""Get default workspace path with proper Windows support"""
from common.utils import expand_path
return expand_path("~/cow")
@dataclass
class MemoryConfig:
"""Configuration for memory storage and search"""
# Storage paths (default: ~/cow)
workspace_root: str = field(default_factory=_default_workspace)
# Embedding config
embedding_provider: str = "openai" # "openai" | "local"
embedding_model: str = "text-embedding-3-small"
embedding_dim: int = 1536
# Chunking config
chunk_max_tokens: int = 500
chunk_overlap_tokens: int = 50
# Search config
max_results: int = 10
min_score: float = 0.1
# Hybrid search weights
vector_weight: float = 0.7
keyword_weight: float = 0.3
# Memory sources
sources: List[str] = field(default_factory=lambda: ["memory", "session"])
# Sync config
enable_auto_sync: bool = True
sync_on_search: bool = True
# Memory flush config (独立于模型 context window)
flush_token_threshold: int = 50000 # 50K tokens 触发 flush
flush_turn_threshold: int = 20 # 20 轮对话触发 flush (用户+AI各一条为一轮)
def get_workspace(self) -> Path:
"""Get workspace root directory"""
return Path(self.workspace_root)
def get_memory_dir(self) -> Path:
"""Get memory files directory"""
return self.get_workspace() / "memory"
def get_db_path(self) -> Path:
"""Get SQLite database path for long-term memory index"""
index_dir = self.get_memory_dir() / "long-term"
index_dir.mkdir(parents=True, exist_ok=True)
return index_dir / "index.db"
def get_skills_dir(self) -> Path:
"""Get skills directory"""
return self.get_workspace() / "skills"
def get_agent_workspace(self, agent_name: Optional[str] = None) -> Path:
"""
Get workspace directory for an agent
Args:
agent_name: Optional agent name (not used in current implementation)
Returns:
Path to workspace directory
"""
workspace = self.get_workspace()
# Ensure workspace directory exists
workspace.mkdir(parents=True, exist_ok=True)
return workspace
# Global memory configuration
_global_memory_config: Optional[MemoryConfig] = None
def get_default_memory_config() -> MemoryConfig:
"""
Get the global memory configuration.
If not set, returns a default configuration.
Returns:
MemoryConfig instance
"""
global _global_memory_config
if _global_memory_config is None:
_global_memory_config = MemoryConfig()
return _global_memory_config
def set_global_memory_config(config: MemoryConfig):
"""
Set the global memory configuration.
This should be called before creating any MemoryManager instances.
Args:
config: MemoryConfig instance to use globally
Example:
>>> from agent.memory import MemoryConfig, set_global_memory_config
>>> config = MemoryConfig(
... workspace_root="~/my_agents",
... embedding_provider="openai",
... vector_weight=0.8
... )
>>> set_global_memory_config(config)
"""
global _global_memory_config
_global_memory_config = config
| {
"repo_id": "zhayujie/chatgpt-on-wechat",
"file_path": "agent/memory/config.py",
"license": "MIT License",
"lines": 95,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
zhayujie/chatgpt-on-wechat:agent/memory/embedding.py | """
Embedding providers for memory
Supports OpenAI and local embedding models
"""
import hashlib
from abc import ABC, abstractmethod
from typing import List, Optional
class EmbeddingProvider(ABC):
"""Base class for embedding providers"""
@abstractmethod
def embed(self, text: str) -> List[float]:
"""Generate embedding for text"""
pass
@abstractmethod
def embed_batch(self, texts: List[str]) -> List[List[float]]:
"""Generate embeddings for multiple texts"""
pass
@property
@abstractmethod
def dimensions(self) -> int:
"""Get embedding dimensions"""
pass
class OpenAIEmbeddingProvider(EmbeddingProvider):
"""OpenAI embedding provider using REST API"""
def __init__(self, model: str = "text-embedding-3-small", api_key: Optional[str] = None, api_base: Optional[str] = None):
"""
Initialize OpenAI embedding provider
Args:
model: Model name (text-embedding-3-small or text-embedding-3-large)
api_key: OpenAI API key
api_base: Optional API base URL
"""
self.model = model
self.api_key = api_key
self.api_base = api_base or "https://api.openai.com/v1"
# Validate API key
if not self.api_key or self.api_key in ["", "YOUR API KEY", "YOUR_API_KEY"]:
raise ValueError("OpenAI API key is not configured. Please set 'open_ai_api_key' in config.json")
# Set dimensions based on model
self._dimensions = 1536 if "small" in model else 3072
def _call_api(self, input_data):
"""Call OpenAI embedding API using requests"""
import requests
url = f"{self.api_base}/embeddings"
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {self.api_key}"
}
data = {
"input": input_data,
"model": self.model
}
try:
response = requests.post(url, headers=headers, json=data, timeout=5)
response.raise_for_status()
return response.json()
except requests.exceptions.ConnectionError as e:
raise ConnectionError(f"Failed to connect to OpenAI API at {url}. Please check your network connection and api_base configuration. Error: {str(e)}")
except requests.exceptions.Timeout as e:
raise TimeoutError(f"OpenAI API request timed out after 10s. Please check your network connection. Error: {str(e)}")
except requests.exceptions.HTTPError as e:
if e.response.status_code == 401:
raise ValueError(f"Invalid OpenAI API key. Please check your 'open_ai_api_key' in config.json")
elif e.response.status_code == 429:
raise ValueError(f"OpenAI API rate limit exceeded. Please try again later.")
else:
raise ValueError(f"OpenAI API request failed: {e.response.status_code} - {e.response.text}")
def embed(self, text: str) -> List[float]:
"""Generate embedding for text"""
result = self._call_api(text)
return result["data"][0]["embedding"]
def embed_batch(self, texts: List[str]) -> List[List[float]]:
"""Generate embeddings for multiple texts"""
if not texts:
return []
result = self._call_api(texts)
return [item["embedding"] for item in result["data"]]
@property
def dimensions(self) -> int:
return self._dimensions
# LocalEmbeddingProvider removed - only use OpenAI embedding or keyword search
class EmbeddingCache:
"""Cache for embeddings to avoid recomputation"""
def __init__(self):
self.cache = {}
def get(self, text: str, provider: str, model: str) -> Optional[List[float]]:
"""Get cached embedding"""
key = self._compute_key(text, provider, model)
return self.cache.get(key)
def put(self, text: str, provider: str, model: str, embedding: List[float]):
"""Cache embedding"""
key = self._compute_key(text, provider, model)
self.cache[key] = embedding
@staticmethod
def _compute_key(text: str, provider: str, model: str) -> str:
"""Compute cache key"""
content = f"{provider}:{model}:{text}"
return hashlib.md5(content.encode('utf-8')).hexdigest()
def clear(self):
"""Clear cache"""
self.cache.clear()
def create_embedding_provider(
provider: str = "openai",
model: Optional[str] = None,
api_key: Optional[str] = None,
api_base: Optional[str] = None
) -> EmbeddingProvider:
"""
Factory function to create embedding provider
Only supports OpenAI embedding via REST API.
If initialization fails, caller should fall back to keyword-only search.
Args:
provider: Provider name (only "openai" is supported)
model: Model name (default: text-embedding-3-small)
api_key: OpenAI API key (required)
api_base: API base URL (default: https://api.openai.com/v1)
Returns:
EmbeddingProvider instance
Raises:
ValueError: If provider is not "openai" or api_key is missing
"""
if provider != "openai":
raise ValueError(f"Only 'openai' provider is supported, got: {provider}")
model = model or "text-embedding-3-small"
return OpenAIEmbeddingProvider(model=model, api_key=api_key, api_base=api_base)
| {
"repo_id": "zhayujie/chatgpt-on-wechat",
"file_path": "agent/memory/embedding.py",
"license": "MIT License",
"lines": 125,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
zhayujie/chatgpt-on-wechat:agent/protocol/agent.py | import json
import os
import time
import threading
from common.log import logger
from agent.protocol.models import LLMRequest, LLMModel
from agent.protocol.agent_stream import AgentStreamExecutor
from agent.protocol.result import AgentAction, AgentActionType, ToolResult, AgentResult
from agent.tools.base_tool import BaseTool, ToolStage
class Agent:
def __init__(self, system_prompt: str, description: str = "AI Agent", model: LLMModel = None,
tools=None, output_mode="print", max_steps=100, max_context_tokens=None,
context_reserve_tokens=None, memory_manager=None, name: str = None,
workspace_dir: str = None, skill_manager=None, enable_skills: bool = True,
runtime_info: dict = None):
"""
Initialize the Agent with system prompt, model, description.
:param system_prompt: The system prompt for the agent.
:param description: A description of the agent.
:param model: An instance of LLMModel to be used by the agent.
:param tools: Optional list of tools for the agent to use.
:param output_mode: Control how execution progress is displayed:
"print" for console output or "logger" for using logger
:param max_steps: Maximum number of steps the agent can take (default: 100)
:param max_context_tokens: Maximum tokens to keep in context (default: None, auto-calculated based on model)
:param context_reserve_tokens: Reserve tokens for new requests (default: None, auto-calculated)
:param memory_manager: Optional MemoryManager instance for memory operations
:param name: [Deprecated] The name of the agent (no longer used in single-agent system)
:param workspace_dir: Optional workspace directory for workspace-specific skills
:param skill_manager: Optional SkillManager instance (will be created if None and enable_skills=True)
:param enable_skills: Whether to enable skills support (default: True)
:param runtime_info: Optional runtime info dict (with _get_current_time callable for dynamic time)
"""
self.name = name or "Agent"
self.system_prompt = system_prompt
self.model: LLMModel = model # Instance of LLMModel
self.description = description
self.tools: list = []
self.max_steps = max_steps # max tool-call steps, default 100
self.max_context_tokens = max_context_tokens # max tokens in context
self.context_reserve_tokens = context_reserve_tokens # reserve tokens for new requests
self.captured_actions = [] # Initialize captured actions list
self.output_mode = output_mode
self.last_usage = None # Store last API response usage info
self.messages = [] # Unified message history for stream mode
self.messages_lock = threading.Lock() # Lock for thread-safe message operations
self.memory_manager = memory_manager # Memory manager for auto memory flush
self.workspace_dir = workspace_dir # Workspace directory
self.enable_skills = enable_skills # Skills enabled flag
self.runtime_info = runtime_info # Runtime info for dynamic time update
# Initialize skill manager
self.skill_manager = None
if enable_skills:
if skill_manager:
self.skill_manager = skill_manager
else:
# Auto-create skill manager
try:
from agent.skills import SkillManager
custom_dir = os.path.join(workspace_dir, "skills") if workspace_dir else None
self.skill_manager = SkillManager(custom_dir=custom_dir)
logger.debug(f"Initialized SkillManager with {len(self.skill_manager.skills)} skills")
except Exception as e:
logger.warning(f"Failed to initialize SkillManager: {e}")
if tools:
for tool in tools:
self.add_tool(tool)
def add_tool(self, tool: BaseTool):
"""
Add a tool to the agent.
:param tool: The tool to add (either a tool instance or a tool name)
"""
# If tool is already an instance, use it directly
tool.model = self.model
self.tools.append(tool)
def get_skills_prompt(self, skill_filter=None) -> str:
"""
Get the skills prompt to append to system prompt.
:param skill_filter: Optional list of skill names to include
:return: Formatted skills prompt or empty string
"""
if not self.skill_manager:
return ""
try:
return self.skill_manager.build_skills_prompt(skill_filter=skill_filter)
except Exception as e:
logger.warning(f"Failed to build skills prompt: {e}")
return ""
def get_full_system_prompt(self, skill_filter=None) -> str:
"""
Get the full system prompt including skills.
Note: Skills are now built into the system prompt by PromptBuilder,
so we just return the base prompt directly. This method is kept for
backward compatibility.
:param skill_filter: Optional list of skill names to include (deprecated)
:return: Complete system prompt
"""
prompt = self.system_prompt
# Rebuild tool list section to reflect current self.tools
prompt = self._rebuild_tool_list_section(prompt)
# If runtime_info contains dynamic time function, rebuild runtime section
if self.runtime_info and callable(self.runtime_info.get('_get_current_time')):
prompt = self._rebuild_runtime_section(prompt)
return prompt
def _rebuild_runtime_section(self, prompt: str) -> str:
"""
Rebuild runtime info section with current time.
This method dynamically updates the runtime info section by calling
the _get_current_time function from runtime_info.
:param prompt: Original system prompt
:return: Updated system prompt with current runtime info
"""
try:
# Get current time dynamically
time_info = self.runtime_info['_get_current_time']()
# Build new runtime section
runtime_lines = [
"\n## 运行时信息\n",
"\n",
f"当前时间: {time_info['time']} {time_info['weekday']} ({time_info['timezone']})\n",
"\n"
]
# Add other runtime info
runtime_parts = []
if self.runtime_info.get("model"):
runtime_parts.append(f"模型={self.runtime_info['model']}")
if self.runtime_info.get("workspace"):
# Replace backslashes with forward slashes for Windows paths
workspace_path = str(self.runtime_info['workspace']).replace('\\', '/')
runtime_parts.append(f"工作空间={workspace_path}")
if self.runtime_info.get("channel") and self.runtime_info.get("channel") != "web":
runtime_parts.append(f"渠道={self.runtime_info['channel']}")
if runtime_parts:
runtime_lines.append("运行时: " + " | ".join(runtime_parts) + "\n")
runtime_lines.append("\n")
new_runtime_section = "".join(runtime_lines)
# Find and replace the runtime section
import re
pattern = r'\n## 运行时信息\s*\n.*?(?=\n##|\Z)'
updated_prompt = re.sub(pattern, new_runtime_section.rstrip('\n'), prompt, flags=re.DOTALL)
return updated_prompt
except Exception as e:
logger.warning(f"Failed to rebuild runtime section: {e}")
return prompt
def _rebuild_tool_list_section(self, prompt: str) -> str:
"""
Rebuild the tool list inside the '## 工具系统' section so that it
always reflects the current ``self.tools`` (handles dynamic add/remove
of conditional tools like web_search).
"""
import re
from agent.prompt.builder import _build_tooling_section
try:
if not self.tools:
return prompt
new_lines = _build_tooling_section(self.tools, "zh")
new_section = "\n".join(new_lines).rstrip("\n")
# Replace existing tooling section
pattern = r'## 工具系统\s*\n.*?(?=\n## |\Z)'
updated = re.sub(pattern, new_section, prompt, count=1, flags=re.DOTALL)
return updated
except Exception as e:
logger.warning(f"Failed to rebuild tool list section: {e}")
return prompt
def refresh_skills(self):
"""Refresh the loaded skills."""
if self.skill_manager:
self.skill_manager.refresh_skills()
logger.info(f"Refreshed skills: {len(self.skill_manager.skills)} skills loaded")
def list_skills(self):
"""
List all loaded skills.
:return: List of skill entries or empty list
"""
if not self.skill_manager:
return []
return self.skill_manager.list_skills()
def _get_model_context_window(self) -> int:
"""
Get the model's context window size in tokens.
Auto-detect based on model name.
Model context windows:
- Claude 3.5/3.7 Sonnet: 200K tokens
- Claude 3 Opus: 200K tokens
- GPT-4 Turbo/128K: 128K tokens
- GPT-4: 8K-32K tokens
- GPT-3.5: 16K tokens
- DeepSeek: 64K tokens
:return: Context window size in tokens
"""
if self.model and hasattr(self.model, 'model'):
model_name = self.model.model.lower()
# Claude models - 200K context
if 'claude-3' in model_name or 'claude-sonnet' in model_name:
return 200000
# GPT-4 models
elif 'gpt-4' in model_name:
if 'turbo' in model_name or '128k' in model_name:
return 128000
elif '32k' in model_name:
return 32000
else:
return 8000
# GPT-3.5
elif 'gpt-3.5' in model_name:
if '16k' in model_name:
return 16000
else:
return 4000
# DeepSeek
elif 'deepseek' in model_name:
return 64000
# Gemini models
elif 'gemini' in model_name:
if '2.0' in model_name or 'exp' in model_name:
return 2000000 # Gemini 2.0: 2M tokens
else:
return 1000000 # Gemini 1.5: 1M tokens
# Default conservative value
return 128000
def _get_context_reserve_tokens(self) -> int:
"""
Get the number of tokens to reserve for new requests.
This prevents context overflow by keeping a buffer.
:return: Number of tokens to reserve
"""
if self.context_reserve_tokens is not None:
return self.context_reserve_tokens
# Reserve ~10% of context window, with min 10K and max 200K
context_window = self._get_model_context_window()
reserve = int(context_window * 0.1)
return max(10000, min(200000, reserve))
def _estimate_message_tokens(self, message: dict) -> int:
"""
Estimate token count for a message.
Uses chars/3 for Chinese-heavy content and chars/4 for ASCII-heavy content,
plus per-block overhead for tool_use / tool_result structures.
:param message: Message dict with 'role' and 'content'
:return: Estimated token count
"""
content = message.get('content', '')
if isinstance(content, str):
return max(1, self._estimate_text_tokens(content))
elif isinstance(content, list):
total_tokens = 0
for part in content:
if not isinstance(part, dict):
continue
block_type = part.get('type', '')
if block_type == 'text':
total_tokens += self._estimate_text_tokens(part.get('text', ''))
elif block_type == 'image':
total_tokens += 1200
elif block_type == 'tool_use':
# tool_use has id + name + input (JSON-encoded)
total_tokens += 50 # overhead for structure
input_data = part.get('input', {})
if isinstance(input_data, dict):
import json
input_str = json.dumps(input_data, ensure_ascii=False)
total_tokens += self._estimate_text_tokens(input_str)
elif block_type == 'tool_result':
# tool_result has tool_use_id + content
total_tokens += 30 # overhead for structure
result_content = part.get('content', '')
if isinstance(result_content, str):
total_tokens += self._estimate_text_tokens(result_content)
else:
# Unknown block type, estimate conservatively
total_tokens += 10
return max(1, total_tokens)
return 1
@staticmethod
def _estimate_text_tokens(text: str) -> int:
"""
Estimate token count for a text string.
Chinese / CJK characters typically use ~1.5 tokens each,
while ASCII uses ~0.25 tokens per char (4 chars/token).
We use a weighted average based on the character mix.
:param text: Input text
:return: Estimated token count
"""
if not text:
return 0
# Count non-ASCII characters (CJK, emoji, etc.)
non_ascii = sum(1 for c in text if ord(c) > 127)
ascii_count = len(text) - non_ascii
# CJK chars: ~1.5 tokens each; ASCII: ~0.25 tokens per char
return int(non_ascii * 1.5 + ascii_count * 0.25) + 1
def _find_tool(self, tool_name: str):
"""Find and return a tool with the specified name"""
for tool in self.tools:
if tool.name == tool_name:
# Only pre-process stage tools can be actively called
if tool.stage == ToolStage.PRE_PROCESS:
tool.model = self.model
tool.context = self # Set tool context
return tool
else:
# If it's a post-process tool, return None to prevent direct calling
logger.warning(f"Tool {tool_name} is a post-process tool and cannot be called directly.")
return None
return None
# output function based on mode
def output(self, message="", end="\n"):
if self.output_mode == "print":
print(message, end=end)
elif message:
logger.info(message)
def _execute_post_process_tools(self):
"""Execute all post-process stage tools"""
# Get all post-process stage tools
post_process_tools = [tool for tool in self.tools if tool.stage == ToolStage.POST_PROCESS]
# Execute each tool
for tool in post_process_tools:
# Set tool context
tool.context = self
# Record start time for execution timing
start_time = time.time()
# Execute tool (with empty parameters, tool will extract needed info from context)
result = tool.execute({})
# Calculate execution time
execution_time = time.time() - start_time
# Capture tool use for tracking
self.capture_tool_use(
tool_name=tool.name,
input_params={}, # Post-process tools typically don't take parameters
output=result.result,
status=result.status,
error_message=str(result.result) if result.status == "error" else None,
execution_time=execution_time
)
# Log result
if result.status == "success":
# Print tool execution result in the desired format
self.output(f"\n🛠️ {tool.name}: {json.dumps(result.result)}")
else:
# Print failure in print mode
self.output(f"\n🛠️ {tool.name}: {json.dumps({'status': 'error', 'message': str(result.result)})}")
def capture_tool_use(self, tool_name, input_params, output, status, thought=None, error_message=None,
execution_time=0.0):
"""
Capture a tool use action.
:param thought: thought content
:param tool_name: Name of the tool used
:param input_params: Parameters passed to the tool
:param output: Output from the tool
:param status: Status of the tool execution
:param error_message: Error message if the tool execution failed
:param execution_time: Time taken to execute the tool
"""
tool_result = ToolResult(
tool_name=tool_name,
input_params=input_params,
output=output,
status=status,
error_message=error_message,
execution_time=execution_time
)
action = AgentAction(
agent_id=self.id if hasattr(self, 'id') else str(id(self)),
agent_name=self.name,
action_type=AgentActionType.TOOL_USE,
tool_result=tool_result,
thought=thought
)
self.captured_actions.append(action)
return action
def run_stream(self, user_message: str, on_event=None, clear_history: bool = False, skill_filter=None) -> str:
"""
Execute single agent task with streaming (based on tool-call)
This method supports:
- Streaming output
- Multi-turn reasoning based on tool-call
- Event callbacks
- Persistent conversation history across calls
Args:
user_message: User message
on_event: Event callback function callback(event: dict)
event = {"type": str, "timestamp": float, "data": dict}
clear_history: If True, clear conversation history before this call (default: False)
skill_filter: Optional list of skill names to include in this run
Returns:
Final response text
Example:
# Multi-turn conversation with memory
response1 = agent.run_stream("My name is Alice")
response2 = agent.run_stream("What's my name?") # Will remember Alice
# Single-turn without memory
response = agent.run_stream("Hello", clear_history=True)
"""
# Clear history if requested
if clear_history:
with self.messages_lock:
self.messages = []
# Get model to use
if not self.model:
raise ValueError("No model available for agent")
# Get full system prompt with skills
full_system_prompt = self.get_full_system_prompt(skill_filter=skill_filter)
# Create a copy of messages for this execution to avoid concurrent modification
# Record the original length to track which messages are new
with self.messages_lock:
messages_copy = self.messages.copy()
original_length = len(self.messages)
# Get max_context_turns from config
from config import conf
max_context_turns = conf().get("agent_max_context_turns", 20)
# Create stream executor with copied message history
executor = AgentStreamExecutor(
agent=self,
model=self.model,
system_prompt=full_system_prompt,
tools=self.tools,
max_turns=self.max_steps,
on_event=on_event,
messages=messages_copy, # Pass copied message history
max_context_turns=max_context_turns
)
# Execute
try:
response = executor.run_stream(user_message)
except Exception:
# If executor cleared its messages (context overflow / message format error),
# sync that back to the Agent's own message list so the next request
# starts fresh instead of hitting the same overflow forever.
if len(executor.messages) == 0:
with self.messages_lock:
self.messages.clear()
logger.info("[Agent] Cleared Agent message history after executor recovery")
raise
# Append only the NEW messages from this execution (thread-safe)
# This allows concurrent requests to both contribute to history
with self.messages_lock:
new_messages = executor.messages[original_length:]
self.messages.extend(new_messages)
# Store executor reference for agent_bridge to access files_to_send
self.stream_executor = executor
# Execute all post-process tools
self._execute_post_process_tools()
return response
def clear_history(self):
"""Clear conversation history and captured actions"""
self.messages = []
self.captured_actions = [] | {
"repo_id": "zhayujie/chatgpt-on-wechat",
"file_path": "agent/protocol/agent.py",
"license": "MIT License",
"lines": 445,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
zhayujie/chatgpt-on-wechat:agent/protocol/context.py | class TeamContext:
def __init__(self, name: str, description: str, rule: str, agents: list, max_steps: int = 100):
"""
Initialize the TeamContext with a name, description, rules, a list of agents, and a user question.
:param name: The name of the group context.
:param description: A description of the group context.
:param rule: The rules governing the group context.
:param agents: A list of agents in the context.
"""
self.name = name
self.description = description
self.rule = rule
self.agents = agents
self.user_task = "" # For backward compatibility
self.task = None # Will be a Task instance
self.model = None # Will be an instance of LLMModel
self.task_short_name = None # Store the task directory name
# List of agents that have been executed
self.agent_outputs: list = []
self.current_steps = 0
self.max_steps = max_steps
class AgentOutput:
def __init__(self, agent_name: str, output: str):
self.agent_name = agent_name
self.output = output | {
"repo_id": "zhayujie/chatgpt-on-wechat",
"file_path": "agent/protocol/context.py",
"license": "MIT License",
"lines": 25,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
zhayujie/chatgpt-on-wechat:agent/protocol/models.py | """
Models module for agent system.
Provides basic model classes needed by tools and bridge integration.
"""
from typing import Any, Dict, List, Optional
class LLMRequest:
"""Request model for LLM operations"""
def __init__(self, messages: List[Dict[str, str]] = None, model: Optional[str] = None,
temperature: float = 0.7, max_tokens: Optional[int] = None,
stream: bool = False, tools: Optional[List] = None, **kwargs):
self.messages = messages or []
self.model = model
self.temperature = temperature
self.max_tokens = max_tokens
self.stream = stream
self.tools = tools
# Allow extra attributes
for key, value in kwargs.items():
setattr(self, key, value)
class LLMModel:
"""Base class for LLM models"""
def __init__(self, model: str = None, **kwargs):
self.model = model
self.config = kwargs
def call(self, request: LLMRequest):
"""
Call the model with a request.
This is a placeholder implementation.
"""
raise NotImplementedError("LLMModel.call not implemented in this context")
def call_stream(self, request: LLMRequest):
"""
Call the model with streaming.
This is a placeholder implementation.
"""
raise NotImplementedError("LLMModel.call_stream not implemented in this context")
class ModelFactory:
"""Factory for creating model instances"""
@staticmethod
def create_model(model_type: str, **kwargs):
"""
Create a model instance based on type.
This is a placeholder implementation.
"""
raise NotImplementedError("ModelFactory.create_model not implemented in this context") | {
"repo_id": "zhayujie/chatgpt-on-wechat",
"file_path": "agent/protocol/models.py",
"license": "MIT License",
"lines": 45,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
zhayujie/chatgpt-on-wechat:agent/protocol/result.py | from __future__ import annotations
import time
import uuid
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Dict, Any, Optional
from agent.protocol.task import Task, TaskStatus
class AgentActionType(Enum):
"""Enum representing different types of agent actions."""
TOOL_USE = "tool_use"
THINKING = "thinking"
FINAL_ANSWER = "final_answer"
@dataclass
class ToolResult:
"""
Represents the result of a tool use.
Attributes:
tool_name: Name of the tool used
input_params: Parameters passed to the tool
output: Output from the tool
status: Status of the tool execution (success/error)
error_message: Error message if the tool execution failed
execution_time: Time taken to execute the tool
"""
tool_name: str
input_params: Dict[str, Any]
output: Any
status: str
error_message: Optional[str] = None
execution_time: float = 0.0
@dataclass
class AgentAction:
"""
Represents an action taken by an agent.
Attributes:
id: Unique identifier for the action
agent_id: ID of the agent that performed the action
agent_name: Name of the agent that performed the action
action_type: Type of action (tool use, thinking, final answer)
content: Content of the action (thought content, final answer content)
tool_result: Tool use details if action_type is TOOL_USE
timestamp: When the action was performed
"""
agent_id: str
agent_name: str
action_type: AgentActionType
id: str = field(default_factory=lambda: str(uuid.uuid4()))
content: str = ""
tool_result: Optional[ToolResult] = None
thought: Optional[str] = None
timestamp: float = field(default_factory=time.time)
@dataclass
class AgentResult:
"""
Represents the result of an agent's execution.
Attributes:
final_answer: The final answer provided by the agent
step_count: Number of steps taken by the agent
status: Status of the execution (success/error)
error_message: Error message if execution failed
"""
final_answer: str
step_count: int
status: str = "success"
error_message: Optional[str] = None
@classmethod
def success(cls, final_answer: str, step_count: int) -> "AgentResult":
"""Create a successful result"""
return cls(final_answer=final_answer, step_count=step_count)
@classmethod
def error(cls, error_message: str, step_count: int = 0) -> "AgentResult":
"""Create an error result"""
return cls(
final_answer=f"Error: {error_message}",
step_count=step_count,
status="error",
error_message=error_message
)
@property
def is_error(self) -> bool:
"""Check if the result represents an error"""
return self.status == "error" | {
"repo_id": "zhayujie/chatgpt-on-wechat",
"file_path": "agent/protocol/result.py",
"license": "MIT License",
"lines": 82,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
zhayujie/chatgpt-on-wechat:agent/protocol/task.py | from __future__ import annotations
import time
import uuid
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, Any, List
class TaskType(Enum):
"""Enum representing different types of tasks."""
TEXT = "text"
IMAGE = "image"
VIDEO = "video"
AUDIO = "audio"
FILE = "file"
MIXED = "mixed"
class TaskStatus(Enum):
"""Enum representing the status of a task."""
INIT = "init" # Initial state
PROCESSING = "processing" # In progress
COMPLETED = "completed" # Completed
FAILED = "failed" # Failed
@dataclass
class Task:
"""
Represents a task to be processed by an agent.
Attributes:
id: Unique identifier for the task
content: The primary text content of the task
type: Type of the task
status: Current status of the task
created_at: Timestamp when the task was created
updated_at: Timestamp when the task was last updated
metadata: Additional metadata for the task
images: List of image URLs or base64 encoded images
videos: List of video URLs
audios: List of audio URLs or base64 encoded audios
files: List of file URLs or paths
"""
id: str = field(default_factory=lambda: str(uuid.uuid4()))
content: str = ""
type: TaskType = TaskType.TEXT
status: TaskStatus = TaskStatus.INIT
created_at: float = field(default_factory=time.time)
updated_at: float = field(default_factory=time.time)
metadata: Dict[str, Any] = field(default_factory=dict)
# Media content
images: List[str] = field(default_factory=list)
videos: List[str] = field(default_factory=list)
audios: List[str] = field(default_factory=list)
files: List[str] = field(default_factory=list)
def __init__(self, content: str = "", **kwargs):
"""
Initialize a Task with content and optional keyword arguments.
Args:
content: The text content of the task
**kwargs: Additional attributes to set
"""
self.id = kwargs.get('id', str(uuid.uuid4()))
self.content = content
self.type = kwargs.get('type', TaskType.TEXT)
self.status = kwargs.get('status', TaskStatus.INIT)
self.created_at = kwargs.get('created_at', time.time())
self.updated_at = kwargs.get('updated_at', time.time())
self.metadata = kwargs.get('metadata', {})
self.images = kwargs.get('images', [])
self.videos = kwargs.get('videos', [])
self.audios = kwargs.get('audios', [])
self.files = kwargs.get('files', [])
def get_text(self) -> str:
"""
Get the text content of the task.
Returns:
The text content
"""
return self.content
def update_status(self, status: TaskStatus) -> None:
"""
Update the status of the task.
Args:
status: The new status
"""
self.status = status
self.updated_at = time.time() | {
"repo_id": "zhayujie/chatgpt-on-wechat",
"file_path": "agent/protocol/task.py",
"license": "MIT License",
"lines": 82,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
zhayujie/chatgpt-on-wechat:agent/skills/config.py | """
Configuration support for skills.
"""
import os
import platform
from typing import Dict, Optional, List
from agent.skills.types import SkillEntry
def resolve_runtime_platform() -> str:
"""Get the current runtime platform."""
return platform.system().lower()
def has_binary(bin_name: str) -> bool:
"""
Check if a binary is available in PATH.
:param bin_name: Binary name to check
:return: True if binary is available
"""
import shutil
return shutil.which(bin_name) is not None
def has_any_binary(bin_names: List[str]) -> bool:
"""
Check if any of the given binaries is available.
:param bin_names: List of binary names to check
:return: True if at least one binary is available
"""
return any(has_binary(bin_name) for bin_name in bin_names)
def has_env_var(env_name: str) -> bool:
"""
Check if an environment variable is set.
:param env_name: Environment variable name
:return: True if environment variable is set
"""
return env_name in os.environ and bool(os.environ[env_name].strip())
def get_skill_config(config: Optional[Dict], skill_name: str) -> Optional[Dict]:
"""
Get skill-specific configuration.
:param config: Global configuration dictionary
:param skill_name: Name of the skill
:return: Skill configuration or None
"""
if not config:
return None
skills_config = config.get('skills', {})
if not isinstance(skills_config, dict):
return None
entries = skills_config.get('entries', {})
if not isinstance(entries, dict):
return None
return entries.get(skill_name)
def should_include_skill(
entry: SkillEntry,
config: Optional[Dict] = None,
current_platform: Optional[str] = None,
) -> bool:
"""
Determine if a skill should be included based on requirements.
Simple rule: Skills are auto-enabled if their requirements are met.
- Has required API keys → enabled
- Missing API keys → disabled
- Wrong keys → enabled but will fail at runtime (LLM will handle error)
:param entry: SkillEntry to check
:param config: Configuration dictionary (currently unused, reserved for future)
:param current_platform: Current platform (default: auto-detect)
:return: True if skill should be included
"""
metadata = entry.metadata
# No metadata = always include (no requirements)
if not metadata:
return True
# Check platform requirements (can't work on wrong platform)
if metadata.os:
platform_name = current_platform or resolve_runtime_platform()
# Map common platform names
platform_map = {
'darwin': 'darwin',
'linux': 'linux',
'windows': 'win32',
}
normalized_platform = platform_map.get(platform_name, platform_name)
if normalized_platform not in metadata.os:
return False
# If skill has 'always: true', include it regardless of other requirements
if metadata.always:
return True
# Check requirements
if metadata.requires:
# Check required binaries (all must be present)
required_bins = metadata.requires.get('bins', [])
if required_bins:
if not all(has_binary(bin_name) for bin_name in required_bins):
return False
# Check anyBins (at least one must be present)
any_bins = metadata.requires.get('anyBins', [])
if any_bins:
if not has_any_binary(any_bins):
return False
# Check environment variables (API keys)
# Simple rule: All required env vars must be set
required_env = metadata.requires.get('env', [])
if required_env:
for env_name in required_env:
if not has_env_var(env_name):
# Missing required API key → disable skill
return False
return True
def is_config_path_truthy(config: Dict, path: str) -> bool:
"""
Check if a config path resolves to a truthy value.
:param config: Configuration dictionary
:param path: Dot-separated path (e.g., 'skills.enabled')
:return: True if path resolves to truthy value
"""
parts = path.split('.')
current = config
for part in parts:
if not isinstance(current, dict):
return False
current = current.get(part)
if current is None:
return False
# Check if value is truthy
if isinstance(current, bool):
return current
if isinstance(current, (int, float)):
return current != 0
if isinstance(current, str):
return bool(current.strip())
return bool(current)
def resolve_config_path(config: Dict, path: str):
"""
Resolve a dot-separated config path to its value.
:param config: Configuration dictionary
:param path: Dot-separated path
:return: Value at path or None
"""
parts = path.split('.')
current = config
for part in parts:
if not isinstance(current, dict):
return None
current = current.get(part)
if current is None:
return None
return current
| {
"repo_id": "zhayujie/chatgpt-on-wechat",
"file_path": "agent/skills/config.py",
"license": "MIT License",
"lines": 143,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
zhayujie/chatgpt-on-wechat:agent/skills/formatter.py | """
Skill formatter for generating prompts from skills.
"""
from typing import List
from agent.skills.types import Skill, SkillEntry
def format_skills_for_prompt(skills: List[Skill]) -> str:
"""
Format skills for inclusion in a system prompt.
Uses XML format per Agent Skills standard.
Skills with disable_model_invocation=True are excluded.
:param skills: List of skills to format
:return: Formatted prompt text
"""
# Filter out skills that should not be invoked by the model
visible_skills = [s for s in skills if not s.disable_model_invocation]
if not visible_skills:
return ""
lines = [
"",
"<available_skills>",
]
for skill in visible_skills:
lines.append(" <skill>")
lines.append(f" <name>{_escape_xml(skill.name)}</name>")
lines.append(f" <description>{_escape_xml(skill.description)}</description>")
lines.append(f" <location>{_escape_xml(skill.file_path)}</location>")
lines.append(" </skill>")
lines.append("</available_skills>")
return "\n".join(lines)
def format_skill_entries_for_prompt(entries: List[SkillEntry]) -> str:
"""
Format skill entries for inclusion in a system prompt.
:param entries: List of skill entries to format
:return: Formatted prompt text
"""
skills = [entry.skill for entry in entries]
return format_skills_for_prompt(skills)
def _escape_xml(text: str) -> str:
"""Escape XML special characters."""
return (text
.replace('&', '&')
.replace('<', '<')
.replace('>', '>')
.replace('"', '"')
.replace("'", '''))
| {
"repo_id": "zhayujie/chatgpt-on-wechat",
"file_path": "agent/skills/formatter.py",
"license": "MIT License",
"lines": 45,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
zhayujie/chatgpt-on-wechat:agent/skills/frontmatter.py | """
Frontmatter parsing for skills.
"""
import re
import json
from typing import Dict, Any, Optional, List
from agent.skills.types import SkillMetadata, SkillInstallSpec
def parse_frontmatter(content: str) -> Dict[str, Any]:
"""
Parse YAML-style frontmatter from markdown content.
Returns a dictionary of frontmatter fields.
"""
frontmatter = {}
# Match frontmatter block between --- markers
match = re.match(r'^---\s*\n(.*?)\n---\s*\n', content, re.DOTALL)
if not match:
return frontmatter
frontmatter_text = match.group(1)
# Try to use PyYAML for proper YAML parsing
try:
import yaml
frontmatter = yaml.safe_load(frontmatter_text)
if not isinstance(frontmatter, dict):
frontmatter = {}
return frontmatter
except ImportError:
# Fallback to simple parsing if PyYAML not available
pass
except Exception:
# If YAML parsing fails, fall back to simple parsing
pass
# Simple YAML-like parsing (supports key: value format only)
# This is a fallback for when PyYAML is not available
for line in frontmatter_text.split('\n'):
line = line.strip()
if not line or line.startswith('#'):
continue
if ':' in line:
key, value = line.split(':', 1)
key = key.strip()
value = value.strip()
# Try to parse as JSON if it looks like JSON
if value.startswith('{') or value.startswith('['):
try:
value = json.loads(value)
except json.JSONDecodeError:
pass
# Parse boolean values
elif value.lower() in ('true', 'false'):
value = value.lower() == 'true'
# Parse numbers
elif value.isdigit():
value = int(value)
frontmatter[key] = value
return frontmatter
def parse_metadata(frontmatter: Dict[str, Any]) -> Optional[SkillMetadata]:
"""
Parse skill metadata from frontmatter.
Looks for 'metadata' field containing JSON with skill configuration.
"""
metadata_raw = frontmatter.get('metadata')
if not metadata_raw:
return None
# If it's a string, try to parse as JSON
if isinstance(metadata_raw, str):
try:
metadata_raw = json.loads(metadata_raw)
except json.JSONDecodeError:
return None
if not isinstance(metadata_raw, dict):
return None
# Use metadata_raw directly (COW format)
meta_obj = metadata_raw
# Parse install specs
install_specs = []
install_raw = meta_obj.get('install', [])
if isinstance(install_raw, list):
for spec_raw in install_raw:
if not isinstance(spec_raw, dict):
continue
kind = spec_raw.get('kind', spec_raw.get('type', '')).lower()
if not kind:
continue
spec = SkillInstallSpec(
kind=kind,
id=spec_raw.get('id'),
label=spec_raw.get('label'),
bins=_normalize_string_list(spec_raw.get('bins')),
os=_normalize_string_list(spec_raw.get('os')),
formula=spec_raw.get('formula'),
package=spec_raw.get('package'),
module=spec_raw.get('module'),
url=spec_raw.get('url'),
archive=spec_raw.get('archive'),
extract=spec_raw.get('extract', False),
strip_components=spec_raw.get('stripComponents'),
target_dir=spec_raw.get('targetDir'),
)
install_specs.append(spec)
# Parse requires
requires = {}
requires_raw = meta_obj.get('requires', {})
if isinstance(requires_raw, dict):
for key, value in requires_raw.items():
requires[key] = _normalize_string_list(value)
return SkillMetadata(
always=meta_obj.get('always', False),
skill_key=meta_obj.get('skillKey'),
primary_env=meta_obj.get('primaryEnv'),
emoji=meta_obj.get('emoji'),
homepage=meta_obj.get('homepage'),
os=_normalize_string_list(meta_obj.get('os')),
requires=requires,
install=install_specs,
)
def _normalize_string_list(value: Any) -> List[str]:
"""Normalize a value to a list of strings."""
if not value:
return []
if isinstance(value, list):
return [str(v).strip() for v in value if v]
if isinstance(value, str):
return [v.strip() for v in value.split(',') if v.strip()]
return []
def parse_boolean_value(value: Optional[str], default: bool = False) -> bool:
"""Parse a boolean value from frontmatter."""
if value is None:
return default
if isinstance(value, bool):
return value
if isinstance(value, str):
return value.lower() in ('true', '1', 'yes', 'on')
return default
def get_frontmatter_value(frontmatter: Dict[str, Any], key: str) -> Optional[str]:
"""Get a frontmatter value as a string."""
value = frontmatter.get(key)
return str(value) if value is not None else None
| {
"repo_id": "zhayujie/chatgpt-on-wechat",
"file_path": "agent/skills/frontmatter.py",
"license": "MIT License",
"lines": 137,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
zhayujie/chatgpt-on-wechat:agent/skills/loader.py | """
Skill loader for discovering and loading skills from directories.
"""
import os
from pathlib import Path
from typing import List, Optional, Dict
from common.log import logger
from agent.skills.types import Skill, SkillEntry, LoadSkillsResult, SkillMetadata
from agent.skills.frontmatter import parse_frontmatter, parse_metadata, parse_boolean_value, get_frontmatter_value
class SkillLoader:
"""Loads skills from various directories."""
def __init__(self):
pass
def load_skills_from_dir(self, dir_path: str, source: str) -> LoadSkillsResult:
"""
Load skills from a directory.
Discovery rules:
- Direct .md files in the root directory
- Recursive SKILL.md files under subdirectories
:param dir_path: Directory path to scan
:param source: Source identifier ('builtin' or 'custom')
:return: LoadSkillsResult with skills and diagnostics
"""
skills = []
diagnostics = []
if not os.path.exists(dir_path):
diagnostics.append(f"Directory does not exist: {dir_path}")
return LoadSkillsResult(skills=skills, diagnostics=diagnostics)
if not os.path.isdir(dir_path):
diagnostics.append(f"Path is not a directory: {dir_path}")
return LoadSkillsResult(skills=skills, diagnostics=diagnostics)
# Load skills from root-level .md files and subdirectories
result = self._load_skills_recursive(dir_path, source, include_root_files=True)
return result
def _load_skills_recursive(
self,
dir_path: str,
source: str,
include_root_files: bool = False
) -> LoadSkillsResult:
"""
Recursively load skills from a directory.
:param dir_path: Directory to scan
:param source: Source identifier
:param include_root_files: Whether to include root-level .md files
:return: LoadSkillsResult
"""
skills = []
diagnostics = []
try:
entries = os.listdir(dir_path)
except Exception as e:
diagnostics.append(f"Failed to list directory {dir_path}: {e}")
return LoadSkillsResult(skills=skills, diagnostics=diagnostics)
for entry in entries:
# Skip hidden files and directories
if entry.startswith('.'):
continue
# Skip common non-skill directories
if entry in ('node_modules', '__pycache__', 'venv', '.git'):
continue
full_path = os.path.join(dir_path, entry)
# Handle directories
if os.path.isdir(full_path):
# Recursively scan subdirectories
sub_result = self._load_skills_recursive(full_path, source, include_root_files=False)
skills.extend(sub_result.skills)
diagnostics.extend(sub_result.diagnostics)
continue
# Handle files
if not os.path.isfile(full_path):
continue
# Check if this is a skill file
is_root_md = include_root_files and entry.endswith('.md')
is_skill_md = not include_root_files and entry == 'SKILL.md'
if not (is_root_md or is_skill_md):
continue
# Load the skill
skill_result = self._load_skill_from_file(full_path, source)
if skill_result.skills:
skills.extend(skill_result.skills)
diagnostics.extend(skill_result.diagnostics)
return LoadSkillsResult(skills=skills, diagnostics=diagnostics)
def _load_skill_from_file(self, file_path: str, source: str) -> LoadSkillsResult:
"""
Load a single skill from a markdown file.
:param file_path: Path to the skill markdown file
:param source: Source identifier
:return: LoadSkillsResult
"""
diagnostics = []
try:
with open(file_path, 'r', encoding='utf-8') as f:
content = f.read()
except Exception as e:
diagnostics.append(f"Failed to read skill file {file_path}: {e}")
return LoadSkillsResult(skills=[], diagnostics=diagnostics)
# Parse frontmatter
frontmatter = parse_frontmatter(content)
# Get skill name and description
skill_dir = os.path.dirname(file_path)
parent_dir_name = os.path.basename(skill_dir)
name = frontmatter.get('name', parent_dir_name)
description = frontmatter.get('description', '')
# Normalize name (handle both string and list)
if isinstance(name, list):
name = name[0] if name else parent_dir_name
elif not isinstance(name, str):
name = str(name) if name else parent_dir_name
# Normalize description (handle both string and list)
if isinstance(description, list):
description = ' '.join(str(d) for d in description if d)
elif not isinstance(description, str):
description = str(description) if description else ''
# Special handling for linkai-agent: dynamically load apps from config.json
if name == 'linkai-agent':
description = self._load_linkai_agent_description(skill_dir, description)
if not description or not description.strip():
diagnostics.append(f"Skill {name} has no description: {file_path}")
return LoadSkillsResult(skills=[], diagnostics=diagnostics)
# Parse disable-model-invocation flag
disable_model_invocation = parse_boolean_value(
get_frontmatter_value(frontmatter, 'disable-model-invocation'),
default=False
)
# Create skill object
skill = Skill(
name=name,
description=description,
file_path=file_path,
base_dir=skill_dir,
source=source,
content=content,
disable_model_invocation=disable_model_invocation,
frontmatter=frontmatter,
)
return LoadSkillsResult(skills=[skill], diagnostics=diagnostics)
def _load_linkai_agent_description(self, skill_dir: str, default_description: str) -> str:
"""
Dynamically load LinkAI agent description from config.json
:param skill_dir: Skill directory
:param default_description: Default description from SKILL.md
:return: Dynamic description with app list
"""
import json
config_path = os.path.join(skill_dir, "config.json")
# Without config.json, skip this skill entirely (return empty to trigger exclusion)
if not os.path.exists(config_path):
logger.debug(f"[SkillLoader] linkai-agent skipped: no config.json found")
return ""
try:
with open(config_path, 'r', encoding='utf-8') as f:
config = json.load(f)
apps = config.get("apps", [])
if not apps:
return default_description
# Build dynamic description with app details
app_descriptions = "; ".join([
f"{app['app_name']}({app['app_code']}: {app['app_description']})"
for app in apps
])
return f"Call LinkAI apps/workflows. {app_descriptions}"
except Exception as e:
logger.warning(f"[SkillLoader] Failed to load linkai-agent config: {e}")
return default_description
def load_all_skills(
self,
builtin_dir: Optional[str] = None,
custom_dir: Optional[str] = None,
) -> Dict[str, SkillEntry]:
"""
Load skills from builtin and custom directories.
Precedence (lowest to highest):
1. builtin — project root ``skills/``, shipped with the codebase
2. custom — workspace ``skills/``, installed via cloud console or skill creator
Same-name custom skills override builtin ones.
:param builtin_dir: Built-in skills directory
:param custom_dir: Custom skills directory
:return: Dictionary mapping skill name to SkillEntry
"""
skill_map: Dict[str, SkillEntry] = {}
all_diagnostics = []
# Load builtin skills (lower precedence)
if builtin_dir and os.path.exists(builtin_dir):
result = self.load_skills_from_dir(builtin_dir, source='builtin')
all_diagnostics.extend(result.diagnostics)
for skill in result.skills:
entry = self._create_skill_entry(skill)
skill_map[skill.name] = entry
# Load custom skills (higher precedence, overrides builtin)
if custom_dir and os.path.exists(custom_dir):
result = self.load_skills_from_dir(custom_dir, source='custom')
all_diagnostics.extend(result.diagnostics)
for skill in result.skills:
entry = self._create_skill_entry(skill)
skill_map[skill.name] = entry
# Log diagnostics
if all_diagnostics:
logger.debug(f"Skill loading diagnostics: {len(all_diagnostics)} issues")
for diag in all_diagnostics[:5]:
logger.debug(f" - {diag}")
logger.debug(f"Loaded {len(skill_map)} skills total")
return skill_map
def _create_skill_entry(self, skill: Skill) -> SkillEntry:
"""
Create a SkillEntry from a Skill with parsed metadata.
:param skill: The skill to create an entry for
:return: SkillEntry with metadata
"""
metadata = parse_metadata(skill.frontmatter)
# Parse user-invocable flag
user_invocable = parse_boolean_value(
get_frontmatter_value(skill.frontmatter, 'user-invocable'),
default=True
)
return SkillEntry(
skill=skill,
metadata=metadata,
user_invocable=user_invocable,
)
| {
"repo_id": "zhayujie/chatgpt-on-wechat",
"file_path": "agent/skills/loader.py",
"license": "MIT License",
"lines": 220,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
zhayujie/chatgpt-on-wechat:agent/skills/manager.py | """
Skill manager for managing skill lifecycle and operations.
"""
import os
import json
from typing import Dict, List, Optional
from pathlib import Path
from common.log import logger
from agent.skills.types import Skill, SkillEntry, SkillSnapshot
from agent.skills.loader import SkillLoader
from agent.skills.formatter import format_skill_entries_for_prompt
SKILLS_CONFIG_FILE = "skills_config.json"
class SkillManager:
"""Manages skills for an agent."""
def __init__(
self,
builtin_dir: Optional[str] = None,
custom_dir: Optional[str] = None,
config: Optional[Dict] = None,
):
"""
Initialize the skill manager.
:param builtin_dir: Built-in skills directory (project root ``skills/``)
:param custom_dir: Custom skills directory (workspace ``skills/``)
:param config: Configuration dictionary
"""
project_root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
self.builtin_dir = builtin_dir or os.path.join(project_root, 'skills')
self.custom_dir = custom_dir or os.path.join(project_root, 'workspace', 'skills')
self.config = config or {}
self._skills_config_path = os.path.join(self.custom_dir, SKILLS_CONFIG_FILE)
# skills_config: full skill metadata keyed by name
# { "web-fetch": {"name": ..., "description": ..., "source": ..., "enabled": true}, ... }
self.skills_config: Dict[str, dict] = {}
self.loader = SkillLoader()
self.skills: Dict[str, SkillEntry] = {}
# Load skills on initialization
self.refresh_skills()
def refresh_skills(self):
"""Reload all skills from builtin and custom directories, then sync config."""
self.skills = self.loader.load_all_skills(
builtin_dir=self.builtin_dir,
custom_dir=self.custom_dir,
)
self._sync_skills_config()
logger.debug(f"SkillManager: Loaded {len(self.skills)} skills")
# ------------------------------------------------------------------
# skills_config.json management
# ------------------------------------------------------------------
def _load_skills_config(self) -> Dict[str, dict]:
"""Load skills_config.json from custom_dir. Returns empty dict if not found."""
if not os.path.exists(self._skills_config_path):
return {}
try:
with open(self._skills_config_path, "r", encoding="utf-8") as f:
data = json.load(f)
if isinstance(data, dict):
return data
except Exception as e:
logger.warning(f"[SkillManager] Failed to load {SKILLS_CONFIG_FILE}: {e}")
return {}
def _save_skills_config(self):
"""Persist skills_config to custom_dir/skills_config.json."""
os.makedirs(self.custom_dir, exist_ok=True)
try:
with open(self._skills_config_path, "w", encoding="utf-8") as f:
json.dump(self.skills_config, f, indent=4, ensure_ascii=False)
except Exception as e:
logger.error(f"[SkillManager] Failed to save {SKILLS_CONFIG_FILE}: {e}")
def _sync_skills_config(self):
"""
Merge directory-scanned skills with the persisted config file.
- New skills discovered on disk are added with enabled=True.
- Skills that no longer exist on disk are removed.
- Existing entries preserve their enabled state; name/description/source
are refreshed from the latest scan.
"""
saved = self._load_skills_config()
merged: Dict[str, dict] = {}
for name, entry in self.skills.items():
skill = entry.skill
prev = saved.get(name, {})
merged[name] = {
"name": name,
"description": skill.description,
"source": skill.source,
"enabled": prev.get("enabled", True),
}
self.skills_config = merged
self._save_skills_config()
def is_skill_enabled(self, name: str) -> bool:
"""
Check if a skill is enabled according to skills_config.
:param name: skill name
:return: True if enabled (default True if not in config)
"""
entry = self.skills_config.get(name)
if entry is None:
return True
return entry.get("enabled", True)
def set_skill_enabled(self, name: str, enabled: bool):
"""
Set a skill's enabled state and persist.
:param name: skill name
:param enabled: True to enable, False to disable
"""
if name not in self.skills_config:
raise ValueError(f"skill '{name}' not found in config")
self.skills_config[name]["enabled"] = enabled
self._save_skills_config()
def get_skills_config(self) -> Dict[str, dict]:
"""
Return the full skills_config dict (for query API).
:return: copy of skills_config
"""
return dict(self.skills_config)
def get_skill(self, name: str) -> Optional[SkillEntry]:
"""
Get a skill by name.
:param name: Skill name
:return: SkillEntry or None if not found
"""
return self.skills.get(name)
def list_skills(self) -> List[SkillEntry]:
"""
Get all loaded skills.
:return: List of all skill entries
"""
return list(self.skills.values())
def filter_skills(
self,
skill_filter: Optional[List[str]] = None,
include_disabled: bool = False,
) -> List[SkillEntry]:
"""
Filter skills based on criteria.
Simple rule: Skills are auto-enabled if requirements are met.
- Has required API keys -> included
- Missing API keys -> excluded
:param skill_filter: List of skill names to include (None = all)
:param include_disabled: Whether to include disabled skills
:return: Filtered list of skill entries
"""
from agent.skills.config import should_include_skill
entries = list(self.skills.values())
# Check requirements (platform, binaries, env vars)
entries = [e for e in entries if should_include_skill(e, self.config)]
# Apply skill filter
if skill_filter is not None:
normalized = []
for item in skill_filter:
if isinstance(item, str):
name = item.strip()
if name:
normalized.append(name)
elif isinstance(item, list):
for subitem in item:
if isinstance(subitem, str):
name = subitem.strip()
if name:
normalized.append(name)
if normalized:
entries = [e for e in entries if e.skill.name in normalized]
# Filter out disabled skills based on skills_config.json
if not include_disabled:
entries = [e for e in entries if self.is_skill_enabled(e.skill.name)]
return entries
def build_skills_prompt(
self,
skill_filter: Optional[List[str]] = None,
) -> str:
"""
Build a formatted prompt containing available skills.
:param skill_filter: Optional list of skill names to include
:return: Formatted skills prompt
"""
from common.log import logger
entries = self.filter_skills(skill_filter=skill_filter, include_disabled=False)
logger.debug(f"[SkillManager] Filtered {len(entries)} skills for prompt (total: {len(self.skills)})")
if entries:
skill_names = [e.skill.name for e in entries]
logger.debug(f"[SkillManager] Skills to include: {skill_names}")
result = format_skill_entries_for_prompt(entries)
logger.debug(f"[SkillManager] Generated prompt length: {len(result)}")
return result
def build_skill_snapshot(
self,
skill_filter: Optional[List[str]] = None,
version: Optional[int] = None,
) -> SkillSnapshot:
"""
Build a snapshot of skills for a specific run.
:param skill_filter: Optional list of skill names to include
:param version: Optional version number for the snapshot
:return: SkillSnapshot
"""
entries = self.filter_skills(skill_filter=skill_filter, include_disabled=False)
prompt = format_skill_entries_for_prompt(entries)
skills_info = []
resolved_skills = []
for entry in entries:
skills_info.append({
'name': entry.skill.name,
'primary_env': entry.metadata.primary_env if entry.metadata else None,
})
resolved_skills.append(entry.skill)
return SkillSnapshot(
prompt=prompt,
skills=skills_info,
resolved_skills=resolved_skills,
version=version,
)
def sync_skills_to_workspace(self, target_workspace_dir: str):
"""
Sync all loaded skills to a target workspace directory.
This is useful for sandbox environments where skills need to be copied.
:param target_workspace_dir: Target workspace directory
"""
import shutil
target_skills_dir = os.path.join(target_workspace_dir, 'skills')
# Remove existing skills directory
if os.path.exists(target_skills_dir):
shutil.rmtree(target_skills_dir)
# Create new skills directory
os.makedirs(target_skills_dir, exist_ok=True)
# Copy each skill
for entry in self.skills.values():
skill_name = entry.skill.name
source_dir = entry.skill.base_dir
target_dir = os.path.join(target_skills_dir, skill_name)
try:
shutil.copytree(source_dir, target_dir)
logger.debug(f"Synced skill '{skill_name}' to {target_dir}")
except Exception as e:
logger.warning(f"Failed to sync skill '{skill_name}': {e}")
logger.info(f"Synced {len(self.skills)} skills to {target_skills_dir}")
def get_skill_by_key(self, skill_key: str) -> Optional[SkillEntry]:
"""
Get a skill by its skill key (which may differ from name).
:param skill_key: Skill key to look up
:return: SkillEntry or None
"""
for entry in self.skills.values():
if entry.metadata and entry.metadata.skill_key == skill_key:
return entry
if entry.skill.name == skill_key:
return entry
return None
| {
"repo_id": "zhayujie/chatgpt-on-wechat",
"file_path": "agent/skills/manager.py",
"license": "MIT License",
"lines": 248,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
zhayujie/chatgpt-on-wechat:agent/skills/types.py | """
Type definitions for skills system.
"""
from __future__ import annotations
from typing import Dict, List, Optional, Any
from dataclasses import dataclass, field
@dataclass
class SkillInstallSpec:
"""Specification for installing skill dependencies."""
kind: str # brew, pip, npm, download, etc.
id: Optional[str] = None
label: Optional[str] = None
bins: List[str] = field(default_factory=list)
os: List[str] = field(default_factory=list)
formula: Optional[str] = None # for brew
package: Optional[str] = None # for pip/npm
module: Optional[str] = None
url: Optional[str] = None # for download
archive: Optional[str] = None
extract: bool = False
strip_components: Optional[int] = None
target_dir: Optional[str] = None
@dataclass
class SkillMetadata:
"""Metadata for a skill from frontmatter."""
always: bool = False # Always include this skill
skill_key: Optional[str] = None # Override skill key
primary_env: Optional[str] = None # Primary environment variable
emoji: Optional[str] = None
homepage: Optional[str] = None
os: List[str] = field(default_factory=list) # Supported OS platforms
requires: Dict[str, List[str]] = field(default_factory=dict) # Requirements
install: List[SkillInstallSpec] = field(default_factory=list)
@dataclass
class Skill:
"""Represents a skill loaded from a markdown file."""
name: str
description: str
file_path: str
base_dir: str
source: str # builtin or custom
content: str # Full markdown content
disable_model_invocation: bool = False
frontmatter: Dict[str, Any] = field(default_factory=dict)
@dataclass
class SkillEntry:
"""A skill with parsed metadata."""
skill: Skill
metadata: Optional[SkillMetadata] = None
user_invocable: bool = True # Can users invoke this skill directly
@dataclass
class LoadSkillsResult:
"""Result of loading skills from a directory."""
skills: List[Skill]
diagnostics: List[str] = field(default_factory=list)
@dataclass
class SkillSnapshot:
"""Snapshot of skills for a specific run."""
prompt: str # Formatted prompt text
skills: List[Dict[str, str]] # List of skill info (name, primary_env)
resolved_skills: List[Skill] = field(default_factory=list)
version: Optional[int] = None
| {
"repo_id": "zhayujie/chatgpt-on-wechat",
"file_path": "agent/skills/types.py",
"license": "MIT License",
"lines": 62,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
zhayujie/chatgpt-on-wechat:agent/tools/base_tool.py | from enum import Enum
from typing import Any, Optional
from common.log import logger
import copy
class ToolStage(Enum):
"""Enum representing tool decision stages"""
PRE_PROCESS = "pre_process" # Tools that need to be actively selected by the agent
POST_PROCESS = "post_process" # Tools that automatically execute after final_answer
class ToolResult:
"""Tool execution result"""
def __init__(self, status: str = None, result: Any = None, ext_data: Any = None):
self.status = status
self.result = result
self.ext_data = ext_data
@staticmethod
def success(result, ext_data: Any = None):
return ToolResult(status="success", result=result, ext_data=ext_data)
@staticmethod
def fail(result, ext_data: Any = None):
return ToolResult(status="error", result=result, ext_data=ext_data)
class BaseTool:
"""Base class for all tools."""
# Default decision stage is pre-process
stage = ToolStage.PRE_PROCESS
# Class attributes must be inherited
name: str = "base_tool"
description: str = "Base tool"
params: dict = {} # Store JSON Schema
model: Optional[Any] = None # LLM model instance, type depends on bot implementation
@classmethod
def get_json_schema(cls) -> dict:
"""Get the standard description of the tool"""
return {
"name": cls.name,
"description": cls.description,
"parameters": cls.params
}
def execute_tool(self, params: dict) -> ToolResult:
try:
return self.execute(params)
except Exception as e:
logger.error(e)
def execute(self, params: dict) -> ToolResult:
"""Specific logic to be implemented by subclasses"""
raise NotImplementedError
@classmethod
def _parse_schema(cls) -> dict:
"""Convert JSON Schema to Pydantic fields"""
fields = {}
for name, prop in cls.params["properties"].items():
# Convert JSON Schema types to Python types
type_map = {
"string": str,
"number": float,
"integer": int,
"boolean": bool,
"array": list,
"object": dict
}
fields[name] = (
type_map[prop["type"]],
prop.get("default", ...)
)
return fields
def should_auto_execute(self, context) -> bool:
"""
Determine if this tool should be automatically executed based on context.
:param context: The agent context
:return: True if the tool should be executed, False otherwise
"""
# Only tools in post-process stage will be automatically executed
return self.stage == ToolStage.POST_PROCESS
def close(self):
"""
Close any resources used by the tool.
This method should be overridden by tools that need to clean up resources
such as browser connections, file handles, etc.
By default, this method does nothing.
"""
pass
| {
"repo_id": "zhayujie/chatgpt-on-wechat",
"file_path": "agent/tools/base_tool.py",
"license": "MIT License",
"lines": 80,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
zhayujie/chatgpt-on-wechat:agent/tools/bash/bash.py | """
Bash tool - Execute bash commands
"""
import os
import re
import sys
import subprocess
import tempfile
from typing import Dict, Any
from agent.tools.base_tool import BaseTool, ToolResult
from agent.tools.utils.truncate import truncate_tail, format_size, DEFAULT_MAX_LINES, DEFAULT_MAX_BYTES
from common.log import logger
from common.utils import expand_path
class Bash(BaseTool):
"""Tool for executing bash commands"""
name: str = "bash"
description: str = f"""Execute a bash command in the current working directory. Returns stdout and stderr. Output is truncated to last {DEFAULT_MAX_LINES} lines or {DEFAULT_MAX_BYTES // 1024}KB (whichever is hit first). If truncated, full output is saved to a temp file.
ENVIRONMENT: All API keys from env_config are auto-injected. Use $VAR_NAME directly.
SAFETY:
- Freely create/modify/delete files within the workspace
- For destructive and out-of-workspace commands, explain and confirm first"""
params: dict = {
"type": "object",
"properties": {
"command": {
"type": "string",
"description": "Bash command to execute"
},
"timeout": {
"type": "integer",
"description": "Timeout in seconds (optional, default: 30)"
}
},
"required": ["command"]
}
def __init__(self, config: dict = None):
self.config = config or {}
self.cwd = self.config.get("cwd", os.getcwd())
# Ensure working directory exists
if not os.path.exists(self.cwd):
os.makedirs(self.cwd, exist_ok=True)
self.default_timeout = self.config.get("timeout", 30)
# Enable safety mode by default (can be disabled in config)
self.safety_mode = self.config.get("safety_mode", True)
def execute(self, args: Dict[str, Any]) -> ToolResult:
"""
Execute a bash command
:param args: Dictionary containing the command and optional timeout
:return: Command output or error
"""
command = args.get("command", "").strip()
timeout = args.get("timeout", self.default_timeout)
if not command:
return ToolResult.fail("Error: command parameter is required")
# Security check: Prevent accessing sensitive config files
if "~/.cow/.env" in command or "~/.cow" in command:
return ToolResult.fail(
"Error: Access denied. API keys and credentials must be accessed through the env_config tool only."
)
# Optional safety check - only warn about extremely dangerous commands
if self.safety_mode:
warning = self._get_safety_warning(command)
if warning:
return ToolResult.fail(
f"Safety Warning: {warning}\n\nIf you believe this command is safe and necessary, please ask the user for confirmation first, explaining what the command does and why it's needed.")
try:
# Prepare environment with .env file variables
env = os.environ.copy()
# Load environment variables from ~/.cow/.env if it exists
env_file = expand_path("~/.cow/.env")
dotenv_vars = {}
if os.path.exists(env_file):
try:
from dotenv import dotenv_values
dotenv_vars = dotenv_values(env_file)
env.update(dotenv_vars)
logger.debug(f"[Bash] Loaded {len(dotenv_vars)} variables from {env_file}")
except ImportError:
logger.debug("[Bash] python-dotenv not installed, skipping .env loading")
except Exception as e:
logger.debug(f"[Bash] Failed to load .env: {e}")
# getuid() only exists on Unix-like systems
if hasattr(os, 'getuid'):
logger.debug(f"[Bash] Process UID: {os.getuid()}")
else:
logger.debug(f"[Bash] Process User: {os.environ.get('USERNAME', os.environ.get('USER', 'unknown'))}")
# On Windows, convert $VAR references to %VAR% for cmd.exe
if sys.platform == "win32":
env["PYTHONIOENCODING"] = "utf-8"
command = self._convert_env_vars_for_windows(command, dotenv_vars)
if command and not command.strip().lower().startswith("chcp"):
command = f"chcp 65001 >nul 2>&1 && {command}"
# Execute command with inherited environment variables
result = subprocess.run(
command,
shell=True,
cwd=self.cwd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True,
encoding="utf-8",
errors="replace",
timeout=timeout,
env=env
)
logger.debug(f"[Bash] Exit code: {result.returncode}")
logger.debug(f"[Bash] Stdout length: {len(result.stdout)}")
logger.debug(f"[Bash] Stderr length: {len(result.stderr)}")
# Workaround for exit code 126 with no output
if result.returncode == 126 and not result.stdout and not result.stderr:
logger.warning(f"[Bash] Exit 126 with no output - trying alternative execution method")
# Try using argument list instead of shell=True
import shlex
try:
parts = shlex.split(command)
if len(parts) > 0:
logger.info(f"[Bash] Retrying with argument list: {parts[:3]}...")
retry_result = subprocess.run(
parts,
cwd=self.cwd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True,
encoding="utf-8",
errors="replace",
timeout=timeout,
env=env
)
logger.debug(f"[Bash] Retry exit code: {retry_result.returncode}, stdout: {len(retry_result.stdout)}, stderr: {len(retry_result.stderr)}")
# If retry succeeded, use retry result
if retry_result.returncode == 0 or retry_result.stdout or retry_result.stderr:
result = retry_result
else:
# Both attempts failed - check if this is openai-image-vision skill
if 'openai-image-vision' in command or 'vision.sh' in command:
# Create a mock result with helpful error message
from types import SimpleNamespace
result = SimpleNamespace(
returncode=1,
stdout='{"error": "图片无法解析", "reason": "该图片格式可能不受支持,或图片文件存在问题", "suggestion": "请尝试其他图片"}',
stderr=''
)
logger.info(f"[Bash] Converted exit 126 to user-friendly image error message for vision skill")
except Exception as retry_err:
logger.warning(f"[Bash] Retry failed: {retry_err}")
# Combine stdout and stderr
output = result.stdout
if result.stderr:
output += "\n" + result.stderr
# Check if we need to save full output to temp file
temp_file_path = None
total_bytes = len(output.encode('utf-8'))
if total_bytes > DEFAULT_MAX_BYTES:
# Save full output to temp file
with tempfile.NamedTemporaryFile(mode='w', delete=False, suffix='.log', prefix='bash-') as f:
f.write(output)
temp_file_path = f.name
# Apply tail truncation
truncation = truncate_tail(output)
output_text = truncation.content or "(no output)"
# Build result
details = {}
if truncation.truncated:
details["truncation"] = truncation.to_dict()
if temp_file_path:
details["full_output_path"] = temp_file_path
# Build notice
start_line = truncation.total_lines - truncation.output_lines + 1
end_line = truncation.total_lines
if truncation.last_line_partial:
# Edge case: last line alone > 30KB
last_line = output.split('\n')[-1] if output else ""
last_line_size = format_size(len(last_line.encode('utf-8')))
output_text += f"\n\n[Showing last {format_size(truncation.output_bytes)} of line {end_line} (line is {last_line_size}). Full output: {temp_file_path}]"
elif truncation.truncated_by == "lines":
output_text += f"\n\n[Showing lines {start_line}-{end_line} of {truncation.total_lines}. Full output: {temp_file_path}]"
else:
output_text += f"\n\n[Showing lines {start_line}-{end_line} of {truncation.total_lines} ({format_size(DEFAULT_MAX_BYTES)} limit). Full output: {temp_file_path}]"
# Check exit code
if result.returncode != 0:
output_text += f"\n\nCommand exited with code {result.returncode}"
return ToolResult.fail({
"output": output_text,
"exit_code": result.returncode,
"details": details if details else None
})
return ToolResult.success({
"output": output_text,
"exit_code": result.returncode,
"details": details if details else None
})
except subprocess.TimeoutExpired:
return ToolResult.fail(f"Error: Command timed out after {timeout} seconds")
except Exception as e:
return ToolResult.fail(f"Error executing command: {str(e)}")
def _get_safety_warning(self, command: str) -> str:
"""
Get safety warning for potentially dangerous commands
Only warns about extremely dangerous system-level operations
:param command: Command to check
:return: Warning message if dangerous, empty string if safe
"""
cmd_lower = command.lower().strip()
# Only block extremely dangerous system operations
dangerous_patterns = [
# System shutdown/reboot
("shutdown", "This command will shut down the system"),
("reboot", "This command will reboot the system"),
("halt", "This command will halt the system"),
("poweroff", "This command will power off the system"),
# Critical system modifications
("rm -rf /", "This command will delete the entire filesystem"),
("rm -rf /*", "This command will delete the entire filesystem"),
("dd if=/dev/zero", "This command can destroy disk data"),
("mkfs", "This command will format a filesystem, destroying all data"),
("fdisk", "This command modifies disk partitions"),
# User/system management (only if targeting system users)
("userdel root", "This command will delete the root user"),
("passwd root", "This command will change the root password"),
]
for pattern, warning in dangerous_patterns:
if pattern in cmd_lower:
return warning
# Check for recursive deletion outside workspace
if "rm" in cmd_lower and "-rf" in cmd_lower:
# Allow deletion within current workspace
if not any(path in cmd_lower for path in ["./", self.cwd.lower()]):
# Check if targeting system directories
system_dirs = ["/bin", "/usr", "/etc", "/var", "/home", "/root", "/sys", "/proc"]
if any(sysdir in cmd_lower for sysdir in system_dirs):
return "This command will recursively delete system directories"
return "" # No warning needed
@staticmethod
def _convert_env_vars_for_windows(command: str, dotenv_vars: dict) -> str:
"""
Convert bash-style $VAR / ${VAR} references to cmd.exe %VAR% syntax.
Only converts variables loaded from .env (user-configured API keys etc.)
to avoid breaking $PATH, jq expressions, regex, etc.
"""
if not dotenv_vars:
return command
def replace_match(m):
var_name = m.group(1) or m.group(2)
if var_name in dotenv_vars:
return f"%{var_name}%"
return m.group(0)
return re.sub(r'\$\{(\w+)\}|\$(\w+)', replace_match, command)
| {
"repo_id": "zhayujie/chatgpt-on-wechat",
"file_path": "agent/tools/bash/bash.py",
"license": "MIT License",
"lines": 247,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
zhayujie/chatgpt-on-wechat:agent/tools/browser_tool.py | def copy(self):
"""
Special copy method for browser tool to avoid recreating browser instance.
:return: A new instance with shared browser reference but unique model
"""
new_tool = self.__class__()
# Copy essential attributes
new_tool.model = self.model
new_tool.context = getattr(self, 'context', None)
new_tool.config = getattr(self, 'config', None)
# Share the browser instance instead of creating a new one
if hasattr(self, 'browser'):
new_tool.browser = self.browser
return new_tool | {
"repo_id": "zhayujie/chatgpt-on-wechat",
"file_path": "agent/tools/browser_tool.py",
"license": "MIT License",
"lines": 14,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
zhayujie/chatgpt-on-wechat:agent/tools/edit/edit.py | """
Edit tool - Precise file editing
Edit files through exact text replacement
"""
import os
from typing import Dict, Any
from agent.tools.base_tool import BaseTool, ToolResult
from common.utils import expand_path
from agent.tools.utils.diff import (
strip_bom,
detect_line_ending,
normalize_to_lf,
restore_line_endings,
normalize_for_fuzzy_match,
fuzzy_find_text,
generate_diff_string
)
class Edit(BaseTool):
"""Tool for precise file editing"""
name: str = "edit"
description: str = "Edit a file by replacing exact text, or append to end if oldText is empty. For append: use empty oldText. For replace: oldText must match exactly (including whitespace)."
params: dict = {
"type": "object",
"properties": {
"path": {
"type": "string",
"description": "Path to the file to edit (relative or absolute)"
},
"oldText": {
"type": "string",
"description": "Text to find and replace. Use empty string to append to end of file. For replacement: must match exactly including whitespace."
},
"newText": {
"type": "string",
"description": "New text to replace the old text with"
}
},
"required": ["path", "oldText", "newText"]
}
def __init__(self, config: dict = None):
self.config = config or {}
self.cwd = self.config.get("cwd", os.getcwd())
self.memory_manager = self.config.get("memory_manager", None)
def execute(self, args: Dict[str, Any]) -> ToolResult:
"""
Execute file edit operation
:param args: Contains file path, old text and new text
:return: Operation result
"""
path = args.get("path", "").strip()
old_text = args.get("oldText", "")
new_text = args.get("newText", "")
if not path:
return ToolResult.fail("Error: path parameter is required")
# Resolve path
absolute_path = self._resolve_path(path)
# Check if file exists
if not os.path.exists(absolute_path):
return ToolResult.fail(f"Error: File not found: {path}")
# Check if readable/writable
if not os.access(absolute_path, os.R_OK | os.W_OK):
return ToolResult.fail(f"Error: File is not readable/writable: {path}")
try:
# Read file
with open(absolute_path, 'r', encoding='utf-8') as f:
raw_content = f.read()
# Remove BOM (LLM won't include invisible BOM in oldText)
bom, content = strip_bom(raw_content)
# Detect original line ending
original_ending = detect_line_ending(content)
# Normalize to LF
normalized_content = normalize_to_lf(content)
normalized_old_text = normalize_to_lf(old_text)
normalized_new_text = normalize_to_lf(new_text)
# Special case: empty oldText means append to end of file
if not old_text or not old_text.strip():
# Append mode: add newText to the end
# Add newline before newText if file doesn't end with one
if normalized_content and not normalized_content.endswith('\n'):
new_content = normalized_content + '\n' + normalized_new_text
else:
new_content = normalized_content + normalized_new_text
base_content = normalized_content # For verification
else:
# Normal edit mode: find and replace
# Use fuzzy matching to find old text (try exact match first, then fuzzy match)
match_result = fuzzy_find_text(normalized_content, normalized_old_text)
if not match_result.found:
return ToolResult.fail(
f"Error: Could not find the exact text in {path}. "
"The old text must match exactly including all whitespace and newlines."
)
# Calculate occurrence count (use fuzzy normalized content for consistency)
fuzzy_content = normalize_for_fuzzy_match(normalized_content)
fuzzy_old_text = normalize_for_fuzzy_match(normalized_old_text)
occurrences = fuzzy_content.count(fuzzy_old_text)
if occurrences > 1:
return ToolResult.fail(
f"Error: Found {occurrences} occurrences of the text in {path}. "
"The text must be unique. Please provide more context to make it unique."
)
# Execute replacement (use matched text position)
base_content = match_result.content_for_replacement
new_content = (
base_content[:match_result.index] +
normalized_new_text +
base_content[match_result.index + match_result.match_length:]
)
# Verify replacement actually changed content
if base_content == new_content:
return ToolResult.fail(
f"Error: No changes made to {path}. "
"The replacement produced identical content. "
"This might indicate an issue with special characters or the text not existing as expected."
)
# Restore original line endings
final_content = bom + restore_line_endings(new_content, original_ending)
# Write file
with open(absolute_path, 'w', encoding='utf-8') as f:
f.write(final_content)
# Generate diff
diff_result = generate_diff_string(base_content, new_content)
result = {
"message": f"Successfully replaced text in {path}",
"path": path,
"diff": diff_result['diff'],
"first_changed_line": diff_result['first_changed_line']
}
# Notify memory manager if file is in memory directory
if self.memory_manager and "memory/" in path:
try:
self.memory_manager.mark_dirty()
except Exception as e:
# Don't fail the edit if memory notification fails
pass
return ToolResult.success(result)
except UnicodeDecodeError:
return ToolResult.fail(f"Error: File is not a valid text file (encoding error): {path}")
except PermissionError:
return ToolResult.fail(f"Error: Permission denied accessing {path}")
except Exception as e:
return ToolResult.fail(f"Error editing file: {str(e)}")
def _resolve_path(self, path: str) -> str:
"""
Resolve path to absolute path
:param path: Relative or absolute path
:return: Absolute path
"""
# Expand ~ to user home directory
path = expand_path(path)
if os.path.isabs(path):
return path
return os.path.abspath(os.path.join(self.cwd, path))
| {
"repo_id": "zhayujie/chatgpt-on-wechat",
"file_path": "agent/tools/edit/edit.py",
"license": "MIT License",
"lines": 153,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
zhayujie/chatgpt-on-wechat:agent/tools/ls/ls.py | """
Ls tool - List directory contents
"""
import os
from typing import Dict, Any
from agent.tools.base_tool import BaseTool, ToolResult
from agent.tools.utils.truncate import truncate_head, format_size, DEFAULT_MAX_BYTES
from common.utils import expand_path
DEFAULT_LIMIT = 500
class Ls(BaseTool):
"""Tool for listing directory contents"""
name: str = "ls"
description: str = f"List directory contents. Returns entries sorted alphabetically, with '/' suffix for directories. Includes dotfiles. Output is truncated to {DEFAULT_LIMIT} entries or {DEFAULT_MAX_BYTES // 1024}KB (whichever is hit first)."
params: dict = {
"type": "object",
"properties": {
"path": {
"type": "string",
"description": "Directory to list. IMPORTANT: Relative paths are based on workspace directory. To access directories outside workspace, use absolute paths starting with ~ or /."
},
"limit": {
"type": "integer",
"description": f"Maximum number of entries to return (default: {DEFAULT_LIMIT})"
}
},
"required": []
}
def __init__(self, config: dict = None):
self.config = config or {}
self.cwd = self.config.get("cwd", os.getcwd())
def execute(self, args: Dict[str, Any]) -> ToolResult:
"""
Execute directory listing
:param args: Listing parameters
:return: Directory contents or error
"""
path = args.get("path", ".").strip()
limit = args.get("limit", DEFAULT_LIMIT)
# Resolve path
absolute_path = self._resolve_path(path)
# Security check: Prevent accessing sensitive config directory
env_config_dir = expand_path("~/.cow")
if os.path.abspath(absolute_path) == os.path.abspath(env_config_dir):
return ToolResult.fail(
"Error: Access denied. API keys and credentials must be accessed through the env_config tool only."
)
if not os.path.exists(absolute_path):
# Provide helpful hint if using relative path
if not os.path.isabs(path) and not path.startswith('~'):
return ToolResult.fail(
f"Error: Path not found: {path}\n"
f"Resolved to: {absolute_path}\n"
f"Hint: Relative paths are based on workspace ({self.cwd}). For files outside workspace, use absolute paths."
)
return ToolResult.fail(f"Error: Path not found: {path}")
if not os.path.isdir(absolute_path):
return ToolResult.fail(f"Error: Not a directory: {path}")
try:
# Read directory entries
entries = os.listdir(absolute_path)
# Sort alphabetically (case-insensitive)
entries.sort(key=lambda x: x.lower())
# Format entries with directory indicators
results = []
entry_limit_reached = False
for entry in entries:
if len(results) >= limit:
entry_limit_reached = True
break
full_path = os.path.join(absolute_path, entry)
try:
if os.path.isdir(full_path):
results.append(entry + '/')
else:
results.append(entry)
except Exception:
# Skip entries we can't stat
continue
if not results:
return ToolResult.success({"message": "(empty directory)", "entries": []})
# Format output
raw_output = '\n'.join(results)
truncation = truncate_head(raw_output, max_lines=999999) # Only limit by bytes
output = truncation.content
details = {}
notices = []
if entry_limit_reached:
notices.append(f"{limit} entries limit reached. Use limit={limit * 2} for more")
details["entry_limit_reached"] = limit
if truncation.truncated:
notices.append(f"{format_size(DEFAULT_MAX_BYTES)} limit reached")
details["truncation"] = truncation.to_dict()
if notices:
output += f"\n\n[{'. '.join(notices)}]"
return ToolResult.success({
"output": output,
"entry_count": len(results),
"details": details if details else None
})
except PermissionError:
return ToolResult.fail(f"Error: Permission denied reading directory: {path}")
except Exception as e:
return ToolResult.fail(f"Error listing directory: {str(e)}")
def _resolve_path(self, path: str) -> str:
"""Resolve path to absolute path"""
# Expand ~ to user home directory
path = expand_path(path)
if os.path.isabs(path):
return path
return os.path.abspath(os.path.join(self.cwd, path))
| {
"repo_id": "zhayujie/chatgpt-on-wechat",
"file_path": "agent/tools/ls/ls.py",
"license": "MIT License",
"lines": 110,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
zhayujie/chatgpt-on-wechat:agent/tools/memory/memory_get.py | """
Memory get tool
Allows agents to read specific sections from memory files
"""
from agent.tools.base_tool import BaseTool
class MemoryGetTool(BaseTool):
"""Tool for reading memory file contents"""
name: str = "memory_get"
description: str = (
"Read specific content from memory files. "
"Use this to get full context from a memory file or specific line range."
)
params: dict = {
"type": "object",
"properties": {
"path": {
"type": "string",
"description": "Relative path to the memory file (e.g. 'MEMORY.md', 'memory/2026-01-01.md')"
},
"start_line": {
"type": "integer",
"description": "Starting line number (optional, default: 1)",
"default": 1
},
"num_lines": {
"type": "integer",
"description": "Number of lines to read (optional, reads all if not specified)"
}
},
"required": ["path"]
}
def __init__(self, memory_manager):
"""
Initialize memory get tool
Args:
memory_manager: MemoryManager instance
"""
super().__init__()
self.memory_manager = memory_manager
def execute(self, args: dict):
"""
Execute memory file read
Args:
args: Dictionary with path, start_line, num_lines
Returns:
ToolResult with file content
"""
from agent.tools.base_tool import ToolResult
path = args.get("path")
start_line = args.get("start_line", 1)
num_lines = args.get("num_lines")
if not path:
return ToolResult.fail("Error: path parameter is required")
try:
workspace_dir = self.memory_manager.config.get_workspace()
# Auto-prepend memory/ if not present and not absolute path
# Exception: MEMORY.md is in the root directory
if not path.startswith('memory/') and not path.startswith('/') and path != 'MEMORY.md':
path = f'memory/{path}'
file_path = workspace_dir / path
if not file_path.exists():
return ToolResult.fail(f"Error: File not found: {path}")
content = file_path.read_text(encoding='utf-8')
lines = content.split('\n')
# Handle line range
if start_line < 1:
start_line = 1
start_idx = start_line - 1
if num_lines:
end_idx = start_idx + num_lines
selected_lines = lines[start_idx:end_idx]
else:
selected_lines = lines[start_idx:]
result = '\n'.join(selected_lines)
# Add metadata
total_lines = len(lines)
shown_lines = len(selected_lines)
output = [
f"File: {path}",
f"Lines: {start_line}-{start_line + shown_lines - 1} (total: {total_lines})",
"",
result
]
return ToolResult.success('\n'.join(output))
except Exception as e:
return ToolResult.fail(f"Error reading memory file: {str(e)}")
| {
"repo_id": "zhayujie/chatgpt-on-wechat",
"file_path": "agent/tools/memory/memory_get.py",
"license": "MIT License",
"lines": 86,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
zhayujie/chatgpt-on-wechat:agent/tools/memory/memory_search.py | """
Memory search tool
Allows agents to search their memory using semantic and keyword search
"""
from typing import Dict, Any, Optional
from agent.tools.base_tool import BaseTool
class MemorySearchTool(BaseTool):
"""Tool for searching agent memory"""
name: str = "memory_search"
description: str = (
"Search agent's long-term memory using semantic and keyword search. "
"Use this to recall past conversations, preferences, and knowledge."
)
params: dict = {
"type": "object",
"properties": {
"query": {
"type": "string",
"description": "Search query (can be natural language question or keywords)"
},
"max_results": {
"type": "integer",
"description": "Maximum number of results to return (default: 10)",
"default": 10
},
"min_score": {
"type": "number",
"description": "Minimum relevance score (0-1, default: 0.1)",
"default": 0.1
}
},
"required": ["query"]
}
def __init__(self, memory_manager, user_id: Optional[str] = None):
"""
Initialize memory search tool
Args:
memory_manager: MemoryManager instance
user_id: Optional user ID for scoped search
"""
super().__init__()
self.memory_manager = memory_manager
self.user_id = user_id
def execute(self, args: dict):
"""
Execute memory search
Args:
args: Dictionary with query, max_results, min_score
Returns:
ToolResult with formatted search results
"""
from agent.tools.base_tool import ToolResult
import asyncio
query = args.get("query")
max_results = args.get("max_results", 10)
min_score = args.get("min_score", 0.1)
if not query:
return ToolResult.fail("Error: query parameter is required")
try:
# Run async search in sync context
results = asyncio.run(self.memory_manager.search(
query=query,
user_id=self.user_id,
max_results=max_results,
min_score=min_score,
include_shared=True
))
if not results:
# Return clear message that no memories exist yet
# This prevents infinite retry loops
return ToolResult.success(
f"No memories found for '{query}'. "
f"This is normal if no memories have been stored yet. "
f"You can store new memories by writing to MEMORY.md or memory/YYYY-MM-DD.md files."
)
# Format results
output = [f"Found {len(results)} relevant memories:\n"]
for i, result in enumerate(results, 1):
output.append(f"\n{i}. {result.path} (lines {result.start_line}-{result.end_line})")
output.append(f" Score: {result.score:.3f}")
output.append(f" Snippet: {result.snippet}")
return ToolResult.success("\n".join(output))
except Exception as e:
return ToolResult.fail(f"Error searching memory: {str(e)}")
| {
"repo_id": "zhayujie/chatgpt-on-wechat",
"file_path": "agent/tools/memory/memory_search.py",
"license": "MIT License",
"lines": 84,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
zhayujie/chatgpt-on-wechat:agent/tools/read/read.py | """
Read tool - Read file contents
Supports text files, images (jpg, png, gif, webp), and PDF files
"""
import os
from typing import Dict, Any
from pathlib import Path
from agent.tools.base_tool import BaseTool, ToolResult
from agent.tools.utils.truncate import truncate_head, format_size, DEFAULT_MAX_LINES, DEFAULT_MAX_BYTES
from common.utils import expand_path
class Read(BaseTool):
"""Tool for reading file contents"""
name: str = "read"
description: str = f"Read or inspect file contents. For text/PDF files, returns content (truncated to {DEFAULT_MAX_LINES} lines or {DEFAULT_MAX_BYTES // 1024}KB). For images/videos/audio, returns metadata only (file info, size, type). Use offset/limit for large text files."
params: dict = {
"type": "object",
"properties": {
"path": {
"type": "string",
"description": "Path to the file to read. IMPORTANT: Relative paths are based on workspace directory. To access files outside workspace, use absolute paths starting with ~ or /."
},
"offset": {
"type": "integer",
"description": "Line number to start reading from (1-indexed, optional). Use negative values to read from end (e.g. -20 for last 20 lines)"
},
"limit": {
"type": "integer",
"description": "Maximum number of lines to read (optional)"
}
},
"required": ["path"]
}
def __init__(self, config: dict = None):
self.config = config or {}
self.cwd = self.config.get("cwd", os.getcwd())
# File type categories
self.image_extensions = {'.jpg', '.jpeg', '.png', '.gif', '.webp', '.bmp', '.svg', '.ico'}
self.video_extensions = {'.mp4', '.avi', '.mov', '.mkv', '.flv', '.wmv', '.webm', '.m4v'}
self.audio_extensions = {'.mp3', '.wav', '.ogg', '.m4a', '.flac', '.aac', '.wma'}
self.binary_extensions = {'.exe', '.dll', '.so', '.dylib', '.bin', '.dat', '.db', '.sqlite'}
self.archive_extensions = {'.zip', '.tar', '.gz', '.rar', '.7z', '.bz2', '.xz'}
self.pdf_extensions = {'.pdf'}
# Readable text formats (will be read with truncation)
self.text_extensions = {
'.txt', '.md', '.markdown', '.rst', '.log', '.csv', '.tsv', '.json', '.xml', '.yaml', '.yml',
'.py', '.js', '.ts', '.java', '.c', '.cpp', '.h', '.hpp', '.go', '.rs', '.rb', '.php',
'.html', '.css', '.scss', '.sass', '.less', '.vue', '.jsx', '.tsx',
'.sh', '.bash', '.zsh', '.fish', '.ps1', '.bat', '.cmd',
'.sql', '.r', '.m', '.swift', '.kt', '.scala', '.clj', '.erl', '.ex',
'.dockerfile', '.makefile', '.cmake', '.gradle', '.properties', '.ini', '.conf', '.cfg',
'.doc', '.docx', '.xls', '.xlsx', '.ppt', '.pptx' # Office documents
}
def execute(self, args: Dict[str, Any]) -> ToolResult:
"""
Execute file read operation
:param args: Contains file path and optional offset/limit parameters
:return: File content or error message
"""
# Support 'location' as alias for 'path' (LLM may use it from skill listing)
path = args.get("path", "") or args.get("location", "")
path = path.strip() if isinstance(path, str) else ""
offset = args.get("offset")
limit = args.get("limit")
if not path:
return ToolResult.fail("Error: path parameter is required")
# Resolve path
absolute_path = self._resolve_path(path)
# Security check: Prevent reading sensitive config files
env_config_path = expand_path("~/.cow/.env")
if os.path.abspath(absolute_path) == os.path.abspath(env_config_path):
return ToolResult.fail(
"Error: Access denied. API keys and credentials must be accessed through the env_config tool only."
)
# Check if file exists
if not os.path.exists(absolute_path):
# Provide helpful hint if using relative path
if not os.path.isabs(path) and not path.startswith('~'):
return ToolResult.fail(
f"Error: File not found: {path}\n"
f"Resolved to: {absolute_path}\n"
f"Hint: Relative paths are based on workspace ({self.cwd}). For files outside workspace, use absolute paths."
)
return ToolResult.fail(f"Error: File not found: {path}")
# Check if readable
if not os.access(absolute_path, os.R_OK):
return ToolResult.fail(f"Error: File is not readable: {path}")
# Check file type
file_ext = Path(absolute_path).suffix.lower()
file_size = os.path.getsize(absolute_path)
# Check if image - return metadata for sending
if file_ext in self.image_extensions:
return self._read_image(absolute_path, file_ext)
# Check if video/audio/binary/archive - return metadata only
if file_ext in self.video_extensions:
return self._return_file_metadata(absolute_path, "video", file_size)
if file_ext in self.audio_extensions:
return self._return_file_metadata(absolute_path, "audio", file_size)
if file_ext in self.binary_extensions or file_ext in self.archive_extensions:
return self._return_file_metadata(absolute_path, "binary", file_size)
# Check if PDF
if file_ext in self.pdf_extensions:
return self._read_pdf(absolute_path, path, offset, limit)
# Read text file (with truncation for large files)
return self._read_text(absolute_path, path, offset, limit)
def _resolve_path(self, path: str) -> str:
"""
Resolve path to absolute path
:param path: Relative or absolute path
:return: Absolute path
"""
# Expand ~ to user home directory
path = expand_path(path)
if os.path.isabs(path):
return path
return os.path.abspath(os.path.join(self.cwd, path))
def _return_file_metadata(self, absolute_path: str, file_type: str, file_size: int) -> ToolResult:
"""
Return file metadata for non-readable files (video, audio, binary, etc.)
:param absolute_path: Absolute path to the file
:param file_type: Type of file (video, audio, binary, etc.)
:param file_size: File size in bytes
:return: File metadata
"""
file_name = Path(absolute_path).name
file_ext = Path(absolute_path).suffix.lower()
# Determine MIME type
mime_types = {
# Video
'.mp4': 'video/mp4', '.avi': 'video/x-msvideo', '.mov': 'video/quicktime',
'.mkv': 'video/x-matroska', '.webm': 'video/webm',
# Audio
'.mp3': 'audio/mpeg', '.wav': 'audio/wav', '.ogg': 'audio/ogg',
'.m4a': 'audio/mp4', '.flac': 'audio/flac',
# Binary
'.zip': 'application/zip', '.tar': 'application/x-tar',
'.gz': 'application/gzip', '.rar': 'application/x-rar-compressed',
}
mime_type = mime_types.get(file_ext, 'application/octet-stream')
result = {
"type": f"{file_type}_metadata",
"file_type": file_type,
"path": absolute_path,
"file_name": file_name,
"mime_type": mime_type,
"size": file_size,
"size_formatted": format_size(file_size),
"message": f"{file_type.capitalize()} 文件: {file_name} ({format_size(file_size)})\n提示: 如果需要发送此文件,请使用 send 工具。"
}
return ToolResult.success(result)
def _read_image(self, absolute_path: str, file_ext: str) -> ToolResult:
"""
Read image file - always return metadata only (images should be sent, not read into context)
:param absolute_path: Absolute path to the image file
:param file_ext: File extension
:return: Result containing image metadata for sending
"""
try:
# Get file size
file_size = os.path.getsize(absolute_path)
# Determine MIME type
mime_type_map = {
'.jpg': 'image/jpeg',
'.jpeg': 'image/jpeg',
'.png': 'image/png',
'.gif': 'image/gif',
'.webp': 'image/webp'
}
mime_type = mime_type_map.get(file_ext, 'image/jpeg')
# Return metadata for images (NOT file_to_send - use send tool to actually send)
result = {
"type": "image_metadata",
"file_type": "image",
"path": absolute_path,
"mime_type": mime_type,
"size": file_size,
"size_formatted": format_size(file_size),
"message": f"图片文件: {Path(absolute_path).name} ({format_size(file_size)})\n提示: 如果需要发送此图片,请使用 send 工具。"
}
return ToolResult.success(result)
except Exception as e:
return ToolResult.fail(f"Error reading image file: {str(e)}")
def _read_text(self, absolute_path: str, display_path: str, offset: int = None, limit: int = None) -> ToolResult:
"""
Read text file
:param absolute_path: Absolute path to the file
:param display_path: Path to display
:param offset: Starting line number (1-indexed)
:param limit: Maximum number of lines to read
:return: File content or error message
"""
try:
# Check file size first
file_size = os.path.getsize(absolute_path)
MAX_FILE_SIZE = 50 * 1024 * 1024 # 50MB
if file_size > MAX_FILE_SIZE:
# File too large, return metadata only
return ToolResult.success({
"type": "file_to_send",
"file_type": "document",
"path": absolute_path,
"size": file_size,
"size_formatted": format_size(file_size),
"message": f"文件过大 ({format_size(file_size)} > 50MB),无法读取内容。文件路径: {absolute_path}"
})
# Read file (utf-8-sig strips BOM automatically on Windows)
with open(absolute_path, 'r', encoding='utf-8-sig') as f:
content = f.read()
# Truncate content if too long (20K characters max for model context)
MAX_CONTENT_CHARS = 20 * 1024 # 20K characters
content_truncated = False
if len(content) > MAX_CONTENT_CHARS:
content = content[:MAX_CONTENT_CHARS]
content_truncated = True
all_lines = content.split('\n')
total_file_lines = len(all_lines)
# Apply offset (if specified)
start_line = 0
if offset is not None:
if offset < 0:
# Negative offset: read from end
# -20 means "last 20 lines" → start from (total - 20)
start_line = max(0, total_file_lines + offset)
else:
# Positive offset: read from start (1-indexed)
start_line = max(0, offset - 1) # Convert to 0-indexed
if start_line >= total_file_lines:
return ToolResult.fail(
f"Error: Offset {offset} is beyond end of file ({total_file_lines} lines total)"
)
start_line_display = start_line + 1 # For display (1-indexed)
# If user specified limit, use it
selected_content = content
user_limited_lines = None
if limit is not None:
end_line = min(start_line + limit, total_file_lines)
selected_content = '\n'.join(all_lines[start_line:end_line])
user_limited_lines = end_line - start_line
elif offset is not None:
selected_content = '\n'.join(all_lines[start_line:])
# Apply truncation (considering line count and byte limits)
truncation = truncate_head(selected_content)
output_text = ""
details = {}
# Add truncation warning if content was truncated
if content_truncated:
output_text = f"[文件内容已截断到前 {format_size(MAX_CONTENT_CHARS)},完整文件大小: {format_size(file_size)}]\n\n"
if truncation.first_line_exceeds_limit:
# First line exceeds 30KB limit
first_line_size = format_size(len(all_lines[start_line].encode('utf-8')))
output_text = f"[Line {start_line_display} is {first_line_size}, exceeds {format_size(DEFAULT_MAX_BYTES)} limit. Use bash tool to read: head -c {DEFAULT_MAX_BYTES} {display_path} | tail -n +{start_line_display}]"
details["truncation"] = truncation.to_dict()
elif truncation.truncated:
# Truncation occurred
end_line_display = start_line_display + truncation.output_lines - 1
next_offset = end_line_display + 1
output_text = truncation.content
if truncation.truncated_by == "lines":
output_text += f"\n\n[Showing lines {start_line_display}-{end_line_display} of {total_file_lines}. Use offset={next_offset} to continue.]"
else:
output_text += f"\n\n[Showing lines {start_line_display}-{end_line_display} of {total_file_lines} ({format_size(DEFAULT_MAX_BYTES)} limit). Use offset={next_offset} to continue.]"
details["truncation"] = truncation.to_dict()
elif user_limited_lines is not None and start_line + user_limited_lines < total_file_lines:
# User specified limit, more content available, but no truncation
remaining = total_file_lines - (start_line + user_limited_lines)
next_offset = start_line + user_limited_lines + 1
output_text = truncation.content
output_text += f"\n\n[{remaining} more lines in file. Use offset={next_offset} to continue.]"
else:
# No truncation, no exceeding user limit
output_text = truncation.content
result = {
"content": output_text,
"total_lines": total_file_lines,
"start_line": start_line_display,
"output_lines": truncation.output_lines
}
if details:
result["details"] = details
return ToolResult.success(result)
except UnicodeDecodeError:
return ToolResult.fail(f"Error: File is not a valid text file (encoding error): {display_path}")
except Exception as e:
return ToolResult.fail(f"Error reading file: {str(e)}")
def _read_pdf(self, absolute_path: str, display_path: str, offset: int = None, limit: int = None) -> ToolResult:
"""
Read PDF file content
:param absolute_path: Absolute path to the file
:param display_path: Path to display
:param offset: Starting line number (1-indexed)
:param limit: Maximum number of lines to read
:return: PDF text content or error message
"""
try:
# Try to import pypdf
try:
from pypdf import PdfReader
except ImportError:
return ToolResult.fail(
"Error: pypdf library not installed. Install with: pip install pypdf"
)
# Read PDF
reader = PdfReader(absolute_path)
total_pages = len(reader.pages)
# Extract text from all pages
text_parts = []
for page_num, page in enumerate(reader.pages, 1):
page_text = page.extract_text()
if page_text.strip():
text_parts.append(f"--- Page {page_num} ---\n{page_text}")
if not text_parts:
return ToolResult.success({
"content": f"[PDF file with {total_pages} pages, but no text content could be extracted]",
"total_pages": total_pages,
"message": "PDF may contain only images or be encrypted"
})
# Merge all text
full_content = "\n\n".join(text_parts)
all_lines = full_content.split('\n')
total_lines = len(all_lines)
# Apply offset and limit (same logic as text files)
start_line = 0
if offset is not None:
start_line = max(0, offset - 1)
if start_line >= total_lines:
return ToolResult.fail(
f"Error: Offset {offset} is beyond end of content ({total_lines} lines total)"
)
start_line_display = start_line + 1
selected_content = full_content
user_limited_lines = None
if limit is not None:
end_line = min(start_line + limit, total_lines)
selected_content = '\n'.join(all_lines[start_line:end_line])
user_limited_lines = end_line - start_line
elif offset is not None:
selected_content = '\n'.join(all_lines[start_line:])
# Apply truncation
truncation = truncate_head(selected_content)
output_text = ""
details = {}
if truncation.truncated:
end_line_display = start_line_display + truncation.output_lines - 1
next_offset = end_line_display + 1
output_text = truncation.content
if truncation.truncated_by == "lines":
output_text += f"\n\n[Showing lines {start_line_display}-{end_line_display} of {total_lines}. Use offset={next_offset} to continue.]"
else:
output_text += f"\n\n[Showing lines {start_line_display}-{end_line_display} of {total_lines} ({format_size(DEFAULT_MAX_BYTES)} limit). Use offset={next_offset} to continue.]"
details["truncation"] = truncation.to_dict()
elif user_limited_lines is not None and start_line + user_limited_lines < total_lines:
remaining = total_lines - (start_line + user_limited_lines)
next_offset = start_line + user_limited_lines + 1
output_text = truncation.content
output_text += f"\n\n[{remaining} more lines in file. Use offset={next_offset} to continue.]"
else:
output_text = truncation.content
result = {
"content": output_text,
"total_pages": total_pages,
"total_lines": total_lines,
"start_line": start_line_display,
"output_lines": truncation.output_lines
}
if details:
result["details"] = details
return ToolResult.success(result)
except Exception as e:
return ToolResult.fail(f"Error reading PDF file: {str(e)}")
| {
"repo_id": "zhayujie/chatgpt-on-wechat",
"file_path": "agent/tools/read/read.py",
"license": "MIT License",
"lines": 368,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
zhayujie/chatgpt-on-wechat:agent/tools/tool_manager.py | import importlib
import importlib.util
from pathlib import Path
from typing import Dict, Any, Type
from agent.tools.base_tool import BaseTool
from common.log import logger
from config import conf
class ToolManager:
"""
Tool manager for managing tools.
"""
_instance = None
def __new__(cls):
"""Singleton pattern to ensure only one instance of ToolManager exists."""
if cls._instance is None:
cls._instance = super(ToolManager, cls).__new__(cls)
cls._instance.tool_classes = {} # Store tool classes instead of instances
cls._instance._initialized = False
return cls._instance
def __init__(self):
# Initialize only once
if not hasattr(self, 'tool_classes'):
self.tool_classes = {} # Dictionary to store tool classes
def load_tools(self, tools_dir: str = "", config_dict=None):
"""
Load tools from both directory and configuration.
:param tools_dir: Directory to scan for tool modules
"""
if tools_dir:
self._load_tools_from_directory(tools_dir)
self._configure_tools_from_config()
else:
self._load_tools_from_init()
self._configure_tools_from_config(config_dict)
def _load_tools_from_init(self) -> bool:
"""
Load tool classes from tools.__init__.__all__
:return: True if tools were loaded, False otherwise
"""
try:
# Try to import the tools package
tools_package = importlib.import_module("agent.tools")
# Check if __all__ is defined
if hasattr(tools_package, "__all__"):
tool_classes = tools_package.__all__
# Import each tool class directly from the tools package
for class_name in tool_classes:
try:
# Skip base classes
if class_name in ["BaseTool", "ToolManager"]:
continue
# Get the class directly from the tools package
if hasattr(tools_package, class_name):
cls = getattr(tools_package, class_name)
if (
isinstance(cls, type)
and issubclass(cls, BaseTool)
and cls != BaseTool
):
try:
# Skip memory tools (they need special initialization with memory_manager)
if class_name in ["MemorySearchTool", "MemoryGetTool"]:
logger.debug(f"Skipped tool {class_name} (requires memory_manager)")
continue
# Create a temporary instance to get the name
temp_instance = cls()
tool_name = temp_instance.name
# Store the class, not the instance
self.tool_classes[tool_name] = cls
logger.debug(f"Loaded tool: {tool_name} from class {class_name}")
except ImportError as e:
# Handle missing dependencies with helpful messages
error_msg = str(e)
if "browser-use" in error_msg or "browser_use" in error_msg:
logger.warning(
f"[ToolManager] Browser tool not loaded - missing dependencies.\n"
f" To enable browser tool, run:\n"
f" pip install browser-use markdownify playwright\n"
f" playwright install chromium"
)
elif "markdownify" in error_msg:
logger.warning(
f"[ToolManager] {cls.__name__} not loaded - missing markdownify.\n"
f" Install with: pip install markdownify"
)
else:
logger.warning(f"[ToolManager] {cls.__name__} not loaded due to missing dependency: {error_msg}")
except Exception as e:
logger.error(f"Error initializing tool class {cls.__name__}: {e}")
except Exception as e:
logger.error(f"Error importing class {class_name}: {e}")
return len(self.tool_classes) > 0
return False
except ImportError:
logger.warning("Could not import agent.tools package")
return False
except Exception as e:
logger.error(f"Error loading tools from __init__.__all__: {e}")
return False
def _load_tools_from_directory(self, tools_dir: str):
"""Dynamically load tool classes from directory"""
tools_path = Path(tools_dir)
# Traverse all .py files
for py_file in tools_path.rglob("*.py"):
# Skip initialization files and base tool files
if py_file.name in ["__init__.py", "base_tool.py", "tool_manager.py"]:
continue
# Get module name
module_name = py_file.stem
try:
# Load module directly from file
spec = importlib.util.spec_from_file_location(module_name, py_file)
if spec and spec.loader:
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
# Find tool classes in the module
for attr_name in dir(module):
cls = getattr(module, attr_name)
if (
isinstance(cls, type)
and issubclass(cls, BaseTool)
and cls != BaseTool
):
try:
# Skip memory tools (they need special initialization with memory_manager)
if attr_name in ["MemorySearchTool", "MemoryGetTool"]:
logger.debug(f"Skipped tool {attr_name} (requires memory_manager)")
continue
# Create a temporary instance to get the name
temp_instance = cls()
tool_name = temp_instance.name
# Store the class, not the instance
self.tool_classes[tool_name] = cls
except ImportError as e:
# Handle missing dependencies with helpful messages
error_msg = str(e)
if "browser-use" in error_msg or "browser_use" in error_msg:
logger.warning(
f"[ToolManager] Browser tool not loaded - missing dependencies.\n"
f" To enable browser tool, run:\n"
f" pip install browser-use markdownify playwright\n"
f" playwright install chromium"
)
elif "markdownify" in error_msg:
logger.warning(
f"[ToolManager] {cls.__name__} not loaded - missing markdownify.\n"
f" Install with: pip install markdownify"
)
else:
logger.warning(f"[ToolManager] {cls.__name__} not loaded due to missing dependency: {error_msg}")
except Exception as e:
logger.error(f"Error initializing tool class {cls.__name__}: {e}")
except Exception as e:
print(f"Error importing module {py_file}: {e}")
def _configure_tools_from_config(self, config_dict=None):
"""Configure tool classes based on configuration file"""
try:
# Get tools configuration
tools_config = config_dict or conf().get("tools", {})
# Record tools that are configured but not loaded
missing_tools = []
# Store configurations for later use when instantiating
self.tool_configs = tools_config
# Check which configured tools are missing
for tool_name in tools_config:
if tool_name not in self.tool_classes:
missing_tools.append(tool_name)
# If there are missing tools, record warnings
if missing_tools:
for tool_name in missing_tools:
if tool_name == "browser":
logger.warning(
f"[ToolManager] Browser tool is configured but not loaded.\n"
f" To enable browser tool, run:\n"
f" pip install browser-use markdownify playwright\n"
f" playwright install chromium"
)
elif tool_name == "google_search":
logger.warning(
f"[ToolManager] Google Search tool is configured but may need API key.\n"
f" Get API key from: https://serper.dev\n"
f" Configure in config.json: tools.google_search.api_key"
)
else:
logger.warning(f"[ToolManager] Tool '{tool_name}' is configured but could not be loaded.")
except Exception as e:
logger.error(f"Error configuring tools from config: {e}")
def create_tool(self, name: str) -> BaseTool:
"""
Get a new instance of a tool by name.
:param name: The name of the tool to get.
:return: A new instance of the tool or None if not found.
"""
tool_class = self.tool_classes.get(name)
if tool_class:
# Create a new instance
tool_instance = tool_class()
# Apply configuration if available
if hasattr(self, 'tool_configs') and name in self.tool_configs:
tool_instance.config = self.tool_configs[name]
return tool_instance
return None
def list_tools(self) -> dict:
"""
Get information about all loaded tools.
:return: A dictionary with tool information.
"""
result = {}
for name, tool_class in self.tool_classes.items():
# Create a temporary instance to get schema
temp_instance = tool_class()
result[name] = {
"description": temp_instance.description,
"parameters": temp_instance.get_json_schema()
}
return result
| {
"repo_id": "zhayujie/chatgpt-on-wechat",
"file_path": "agent/tools/tool_manager.py",
"license": "MIT License",
"lines": 216,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
zhayujie/chatgpt-on-wechat:agent/tools/utils/truncate.py | """
Shared truncation utilities for tool outputs.
Truncation is based on two independent limits - whichever is hit first wins:
- Line limit (default: 2000 lines)
- Byte limit (default: 50KB)
Never returns partial lines (except bash tail truncation edge case).
"""
from typing import Dict, Any, Optional, Literal, Tuple
DEFAULT_MAX_LINES = 2000
DEFAULT_MAX_BYTES = 50 * 1024 # 50KB
GREP_MAX_LINE_LENGTH = 500 # Max chars per grep match line
class TruncationResult:
"""Truncation result"""
def __init__(
self,
content: str,
truncated: bool,
truncated_by: Optional[Literal["lines", "bytes"]],
total_lines: int,
total_bytes: int,
output_lines: int,
output_bytes: int,
last_line_partial: bool = False,
first_line_exceeds_limit: bool = False,
max_lines: int = DEFAULT_MAX_LINES,
max_bytes: int = DEFAULT_MAX_BYTES
):
self.content = content
self.truncated = truncated
self.truncated_by = truncated_by
self.total_lines = total_lines
self.total_bytes = total_bytes
self.output_lines = output_lines
self.output_bytes = output_bytes
self.last_line_partial = last_line_partial
self.first_line_exceeds_limit = first_line_exceeds_limit
self.max_lines = max_lines
self.max_bytes = max_bytes
def to_dict(self) -> Dict[str, Any]:
"""Convert to dictionary"""
return {
"content": self.content,
"truncated": self.truncated,
"truncated_by": self.truncated_by,
"total_lines": self.total_lines,
"total_bytes": self.total_bytes,
"output_lines": self.output_lines,
"output_bytes": self.output_bytes,
"last_line_partial": self.last_line_partial,
"first_line_exceeds_limit": self.first_line_exceeds_limit,
"max_lines": self.max_lines,
"max_bytes": self.max_bytes
}
def format_size(bytes_count: int) -> str:
"""Format bytes as human-readable size"""
if bytes_count < 1024:
return f"{bytes_count}B"
elif bytes_count < 1024 * 1024:
return f"{bytes_count / 1024:.1f}KB"
else:
return f"{bytes_count / (1024 * 1024):.1f}MB"
def truncate_head(content: str, max_lines: Optional[int] = None, max_bytes: Optional[int] = None) -> TruncationResult:
"""
Truncate content from the head (keep first N lines/bytes).
Suitable for file reads where you want to see the beginning.
Never returns partial lines. If first line exceeds byte limit,
returns empty content with first_line_exceeds_limit=True.
:param content: Content to truncate
:param max_lines: Maximum number of lines (default: 2000)
:param max_bytes: Maximum number of bytes (default: 50KB)
:return: Truncation result
"""
if max_lines is None:
max_lines = DEFAULT_MAX_LINES
if max_bytes is None:
max_bytes = DEFAULT_MAX_BYTES
total_bytes = len(content.encode('utf-8'))
lines = content.split('\n')
total_lines = len(lines)
# Check if no truncation is needed
if total_lines <= max_lines and total_bytes <= max_bytes:
return TruncationResult(
content=content,
truncated=False,
truncated_by=None,
total_lines=total_lines,
total_bytes=total_bytes,
output_lines=total_lines,
output_bytes=total_bytes,
last_line_partial=False,
first_line_exceeds_limit=False,
max_lines=max_lines,
max_bytes=max_bytes
)
# Check if first line alone exceeds byte limit
first_line_bytes = len(lines[0].encode('utf-8'))
if first_line_bytes > max_bytes:
return TruncationResult(
content="",
truncated=True,
truncated_by="bytes",
total_lines=total_lines,
total_bytes=total_bytes,
output_lines=0,
output_bytes=0,
last_line_partial=False,
first_line_exceeds_limit=True,
max_lines=max_lines,
max_bytes=max_bytes
)
# Collect complete lines that fit
output_lines_arr = []
output_bytes_count = 0
truncated_by = "lines"
for i, line in enumerate(lines):
if i >= max_lines:
break
# Calculate line bytes (add 1 for newline if not first line)
line_bytes = len(line.encode('utf-8')) + (1 if i > 0 else 0)
if output_bytes_count + line_bytes > max_bytes:
truncated_by = "bytes"
break
output_lines_arr.append(line)
output_bytes_count += line_bytes
# If exited due to line limit
if len(output_lines_arr) >= max_lines and output_bytes_count <= max_bytes:
truncated_by = "lines"
output_content = '\n'.join(output_lines_arr)
final_output_bytes = len(output_content.encode('utf-8'))
return TruncationResult(
content=output_content,
truncated=True,
truncated_by=truncated_by,
total_lines=total_lines,
total_bytes=total_bytes,
output_lines=len(output_lines_arr),
output_bytes=final_output_bytes,
last_line_partial=False,
first_line_exceeds_limit=False,
max_lines=max_lines,
max_bytes=max_bytes
)
def truncate_tail(content: str, max_lines: Optional[int] = None, max_bytes: Optional[int] = None) -> TruncationResult:
"""
Truncate content from tail (keep last N lines/bytes).
Suitable for bash output where you want to see the ending content (errors, final results).
If the last line of original content exceeds byte limit, may return partial first line.
:param content: Content to truncate
:param max_lines: Maximum lines (default: 2000)
:param max_bytes: Maximum bytes (default: 50KB)
:return: Truncation result
"""
if max_lines is None:
max_lines = DEFAULT_MAX_LINES
if max_bytes is None:
max_bytes = DEFAULT_MAX_BYTES
total_bytes = len(content.encode('utf-8'))
lines = content.split('\n')
total_lines = len(lines)
# Check if no truncation is needed
if total_lines <= max_lines and total_bytes <= max_bytes:
return TruncationResult(
content=content,
truncated=False,
truncated_by=None,
total_lines=total_lines,
total_bytes=total_bytes,
output_lines=total_lines,
output_bytes=total_bytes,
last_line_partial=False,
first_line_exceeds_limit=False,
max_lines=max_lines,
max_bytes=max_bytes
)
# Work backwards from the end
output_lines_arr = []
output_bytes_count = 0
truncated_by = "lines"
last_line_partial = False
for i in range(len(lines) - 1, -1, -1):
if len(output_lines_arr) >= max_lines:
break
line = lines[i]
# Calculate line bytes (add newline if not the first added line)
line_bytes = len(line.encode('utf-8')) + (1 if len(output_lines_arr) > 0 else 0)
if output_bytes_count + line_bytes > max_bytes:
truncated_by = "bytes"
# Edge case: if we haven't added any lines yet and this line exceeds maxBytes,
# take the end portion of this line
if len(output_lines_arr) == 0:
truncated_line = _truncate_string_to_bytes_from_end(line, max_bytes)
output_lines_arr.insert(0, truncated_line)
output_bytes_count = len(truncated_line.encode('utf-8'))
last_line_partial = True
break
output_lines_arr.insert(0, line)
output_bytes_count += line_bytes
# If exited due to line limit
if len(output_lines_arr) >= max_lines and output_bytes_count <= max_bytes:
truncated_by = "lines"
output_content = '\n'.join(output_lines_arr)
final_output_bytes = len(output_content.encode('utf-8'))
return TruncationResult(
content=output_content,
truncated=True,
truncated_by=truncated_by,
total_lines=total_lines,
total_bytes=total_bytes,
output_lines=len(output_lines_arr),
output_bytes=final_output_bytes,
last_line_partial=last_line_partial,
first_line_exceeds_limit=False,
max_lines=max_lines,
max_bytes=max_bytes
)
def _truncate_string_to_bytes_from_end(text: str, max_bytes: int) -> str:
"""
Truncate string to fit byte limit (from end).
Properly handles multi-byte UTF-8 characters.
:param text: String to truncate
:param max_bytes: Maximum bytes
:return: Truncated string
"""
encoded = text.encode('utf-8')
if len(encoded) <= max_bytes:
return text
# Start from end, skip back maxBytes
start = len(encoded) - max_bytes
# Find valid UTF-8 boundary (character start)
while start < len(encoded) and (encoded[start] & 0xC0) == 0x80:
start += 1
return encoded[start:].decode('utf-8', errors='ignore')
def truncate_line(line: str, max_chars: int = GREP_MAX_LINE_LENGTH) -> Tuple[str, bool]:
"""
Truncate single line to max characters, add [truncated] suffix.
Used for grep match lines.
:param line: Line to truncate
:param max_chars: Maximum characters
:return: (truncated text, whether truncated)
"""
if len(line) <= max_chars:
return line, False
return f"{line[:max_chars]}... [truncated]", True
| {
"repo_id": "zhayujie/chatgpt-on-wechat",
"file_path": "agent/tools/utils/truncate.py",
"license": "MIT License",
"lines": 243,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
zhayujie/chatgpt-on-wechat:agent/tools/write/write.py | """
Write tool - Write file content
Creates or overwrites files, automatically creates parent directories
"""
import os
from typing import Dict, Any
from pathlib import Path
from agent.tools.base_tool import BaseTool, ToolResult
from common.utils import expand_path
class Write(BaseTool):
"""Tool for writing file content"""
name: str = "write"
description: str = "Write content to a file. Creates the file if it doesn't exist, overwrites if it does. Automatically creates parent directories. IMPORTANT: Single write should not exceed 10KB. For large files, create a skeleton first, then use edit to add content in chunks."
params: dict = {
"type": "object",
"properties": {
"path": {
"type": "string",
"description": "Path to the file to write (relative or absolute)"
},
"content": {
"type": "string",
"description": "Content to write to the file"
}
},
"required": ["path", "content"]
}
def __init__(self, config: dict = None):
self.config = config or {}
self.cwd = self.config.get("cwd", os.getcwd())
self.memory_manager = self.config.get("memory_manager", None)
def execute(self, args: Dict[str, Any]) -> ToolResult:
"""
Execute file write operation
:param args: Contains file path and content
:return: Operation result
"""
path = args.get("path", "").strip()
content = args.get("content", "")
if not path:
return ToolResult.fail("Error: path parameter is required")
# Resolve path
absolute_path = self._resolve_path(path)
try:
# Create parent directory (if needed)
parent_dir = os.path.dirname(absolute_path)
if parent_dir:
os.makedirs(parent_dir, exist_ok=True)
# Write file
with open(absolute_path, 'w', encoding='utf-8') as f:
f.write(content)
# Get bytes written
bytes_written = len(content.encode('utf-8'))
# Auto-sync to memory database if this is a memory file
if self.memory_manager and 'memory/' in path:
self.memory_manager.mark_dirty()
result = {
"message": f"Successfully wrote {bytes_written} bytes to {path}",
"path": path,
"bytes_written": bytes_written
}
return ToolResult.success(result)
except PermissionError:
return ToolResult.fail(f"Error: Permission denied writing to {path}")
except Exception as e:
return ToolResult.fail(f"Error writing file: {str(e)}")
def _resolve_path(self, path: str) -> str:
"""
Resolve path to absolute path
:param path: Relative or absolute path
:return: Absolute path
"""
# Expand ~ to user home directory
path = expand_path(path)
if os.path.isabs(path):
return path
return os.path.abspath(os.path.join(self.cwd, path))
| {
"repo_id": "zhayujie/chatgpt-on-wechat",
"file_path": "agent/tools/write/write.py",
"license": "MIT License",
"lines": 77,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
zhayujie/chatgpt-on-wechat:bridge/agent_bridge.py | """
Agent Bridge - Integrates Agent system with existing COW bridge
"""
import os
from typing import Optional, List
from agent.protocol import Agent, LLMModel, LLMRequest
from bridge.agent_event_handler import AgentEventHandler
from bridge.agent_initializer import AgentInitializer
from bridge.bridge import Bridge
from bridge.context import Context
from bridge.reply import Reply, ReplyType
from common import const
from common.log import logger
from common.utils import expand_path
from models.openai_compatible_bot import OpenAICompatibleBot
def add_openai_compatible_support(bot_instance):
"""
Dynamically add OpenAI-compatible tool calling support to a bot instance.
This allows any bot to gain tool calling capability without modifying its code,
as long as it uses OpenAI-compatible API format.
Note: Some bots like ZHIPUAIBot have native tool calling support and don't need enhancement.
"""
if hasattr(bot_instance, 'call_with_tools'):
# Bot already has tool calling support (e.g., ZHIPUAIBot)
logger.debug(f"[AgentBridge] {type(bot_instance).__name__} already has native tool calling support")
return bot_instance
# Create a temporary mixin class that combines the bot with OpenAI compatibility
class EnhancedBot(bot_instance.__class__, OpenAICompatibleBot):
"""Dynamically enhanced bot with OpenAI-compatible tool calling"""
def get_api_config(self):
"""
Infer API config from common configuration patterns.
Most OpenAI-compatible bots use similar configuration.
"""
from config import conf
return {
'api_key': conf().get("open_ai_api_key"),
'api_base': conf().get("open_ai_api_base"),
'model': conf().get("model", "gpt-3.5-turbo"),
'default_temperature': conf().get("temperature", 0.9),
'default_top_p': conf().get("top_p", 1.0),
'default_frequency_penalty': conf().get("frequency_penalty", 0.0),
'default_presence_penalty': conf().get("presence_penalty", 0.0),
}
# Change the bot's class to the enhanced version
bot_instance.__class__ = EnhancedBot
logger.info(
f"[AgentBridge] Enhanced {bot_instance.__class__.__bases__[0].__name__} with OpenAI-compatible tool calling")
return bot_instance
class AgentLLMModel(LLMModel):
"""
LLM Model adapter that uses COW's existing bot infrastructure
"""
_MODEL_BOT_TYPE_MAP = {
"wenxin": const.BAIDU, "wenxin-4": const.BAIDU,
"xunfei": const.XUNFEI, const.QWEN: const.QWEN,
const.MODELSCOPE: const.MODELSCOPE,
}
_MODEL_PREFIX_MAP = [
("qwen", const.QWEN_DASHSCOPE), ("qwq", const.QWEN_DASHSCOPE), ("qvq", const.QWEN_DASHSCOPE),
("gemini", const.GEMINI), ("glm", const.ZHIPU_AI), ("claude", const.CLAUDEAPI),
("moonshot", const.MOONSHOT), ("kimi", const.MOONSHOT),
("doubao", const.DOUBAO),
]
def __init__(self, bridge: Bridge, bot_type: str = "chat"):
from config import conf
super().__init__(model=conf().get("model", const.GPT_41))
self.bridge = bridge
self.bot_type = bot_type
self._bot = None
self._bot_model = None
@property
def model(self):
from config import conf
return conf().get("model", const.GPT_41)
@model.setter
def model(self, value):
pass
def _resolve_bot_type(self, model_name: str) -> str:
"""Resolve bot type from model name, matching Bridge.__init__ logic."""
from config import conf
if conf().get("use_linkai", False) and conf().get("linkai_api_key"):
return const.LINKAI
if not model_name or not isinstance(model_name, str):
return const.CHATGPT
if model_name in self._MODEL_BOT_TYPE_MAP:
return self._MODEL_BOT_TYPE_MAP[model_name]
if model_name.lower().startswith("minimax") or model_name in ["abab6.5-chat"]:
return const.MiniMax
if model_name in [const.QWEN_TURBO, const.QWEN_PLUS, const.QWEN_MAX]:
return const.QWEN_DASHSCOPE
if model_name in [const.MOONSHOT, "moonshot-v1-8k", "moonshot-v1-32k", "moonshot-v1-128k"]:
return const.MOONSHOT
if model_name in [const.DEEPSEEK_CHAT, const.DEEPSEEK_REASONER]:
return const.CHATGPT
for prefix, btype in self._MODEL_PREFIX_MAP:
if model_name.startswith(prefix):
return btype
return const.CHATGPT
@property
def bot(self):
"""Lazy load the bot, re-create when model changes"""
from models.bot_factory import create_bot
cur_model = self.model
if self._bot is None or self._bot_model != cur_model:
bot_type = self._resolve_bot_type(cur_model)
self._bot = create_bot(bot_type)
self._bot = add_openai_compatible_support(self._bot)
self._bot_model = cur_model
return self._bot
def call(self, request: LLMRequest):
"""
Call the model using COW's bot infrastructure
"""
try:
# For non-streaming calls, we'll use the existing reply method
# This is a simplified implementation
if hasattr(self.bot, 'call_with_tools'):
# Use tool-enabled call if available
kwargs = {
'messages': request.messages,
'tools': getattr(request, 'tools', None),
'stream': False,
'model': self.model # Pass model parameter
}
# Only pass max_tokens if it's explicitly set
if request.max_tokens is not None:
kwargs['max_tokens'] = request.max_tokens
# Extract system prompt if present
system_prompt = getattr(request, 'system', None)
if system_prompt:
kwargs['system'] = system_prompt
response = self.bot.call_with_tools(**kwargs)
return self._format_response(response)
else:
# Fallback to regular call
# This would need to be implemented based on your specific needs
raise NotImplementedError("Regular call not implemented yet")
except Exception as e:
logger.error(f"AgentLLMModel call error: {e}")
raise
def call_stream(self, request: LLMRequest):
"""
Call the model with streaming using COW's bot infrastructure
"""
try:
if hasattr(self.bot, 'call_with_tools'):
# Use tool-enabled streaming call if available
# Extract system prompt if present
system_prompt = getattr(request, 'system', None)
# Build kwargs for call_with_tools
kwargs = {
'messages': request.messages,
'tools': getattr(request, 'tools', None),
'stream': True,
'model': self.model # Pass model parameter
}
# Only pass max_tokens if explicitly set, let the bot use its default
if request.max_tokens is not None:
kwargs['max_tokens'] = request.max_tokens
# Add system prompt if present
if system_prompt:
kwargs['system'] = system_prompt
# Pass channel_type for linkai tracking
channel_type = getattr(self, 'channel_type', None)
if channel_type:
kwargs['channel_type'] = channel_type
stream = self.bot.call_with_tools(**kwargs)
# Convert stream format to our expected format
for chunk in stream:
yield self._format_stream_chunk(chunk)
else:
bot_type = type(self.bot).__name__
raise NotImplementedError(f"Bot {bot_type} does not support call_with_tools. Please add the method.")
except Exception as e:
logger.error(f"AgentLLMModel call_stream error: {e}", exc_info=True)
raise
def _format_response(self, response):
"""Format Claude response to our expected format"""
# This would need to be implemented based on Claude's response format
return response
def _format_stream_chunk(self, chunk):
"""Format Claude stream chunk to our expected format"""
# This would need to be implemented based on Claude's stream format
return chunk
class AgentBridge:
"""
Bridge class that integrates super Agent with COW
Manages multiple agent instances per session for conversation isolation
"""
def __init__(self, bridge: Bridge):
self.bridge = bridge
self.agents = {} # session_id -> Agent instance mapping
self.default_agent = None # For backward compatibility (no session_id)
self.agent: Optional[Agent] = None
self.scheduler_initialized = False
# Create helper instances
self.initializer = AgentInitializer(bridge, self)
def create_agent(self, system_prompt: str, tools: List = None, **kwargs) -> Agent:
"""
Create the super agent with COW integration
Args:
system_prompt: System prompt
tools: List of tools (optional)
**kwargs: Additional agent parameters
Returns:
Agent instance
"""
# Create LLM model that uses COW's bot infrastructure
model = AgentLLMModel(self.bridge)
# Default tools if none provided
if tools is None:
# Use ToolManager to load all available tools
from agent.tools import ToolManager
tool_manager = ToolManager()
tool_manager.load_tools()
tools = []
for tool_name in tool_manager.tool_classes.keys():
try:
tool = tool_manager.create_tool(tool_name)
if tool:
tools.append(tool)
except Exception as e:
logger.warning(f"[AgentBridge] Failed to load tool {tool_name}: {e}")
# Create agent instance
agent = Agent(
system_prompt=system_prompt,
description=kwargs.get("description", "AI Super Agent"),
model=model,
tools=tools,
max_steps=kwargs.get("max_steps", 15),
output_mode=kwargs.get("output_mode", "logger"),
workspace_dir=kwargs.get("workspace_dir"), # Pass workspace for skills loading
enable_skills=kwargs.get("enable_skills", True), # Enable skills by default
memory_manager=kwargs.get("memory_manager"), # Pass memory manager
max_context_tokens=kwargs.get("max_context_tokens"),
context_reserve_tokens=kwargs.get("context_reserve_tokens"),
runtime_info=kwargs.get("runtime_info") # Pass runtime_info for dynamic time updates
)
# Log skill loading details
if agent.skill_manager:
logger.debug(f"[AgentBridge] SkillManager initialized with {len(agent.skill_manager.skills)} skills")
return agent
def get_agent(self, session_id: str = None) -> Optional[Agent]:
"""
Get agent instance for the given session
Args:
session_id: Session identifier (e.g., user_id). If None, returns default agent.
Returns:
Agent instance for this session
"""
# If no session_id, use default agent (backward compatibility)
if session_id is None:
if self.default_agent is None:
self._init_default_agent()
return self.default_agent
# Check if agent exists for this session
if session_id not in self.agents:
self._init_agent_for_session(session_id)
return self.agents[session_id]
def _init_default_agent(self):
"""Initialize default super agent"""
agent = self.initializer.initialize_agent(session_id=None)
self.default_agent = agent
def _init_agent_for_session(self, session_id: str):
"""Initialize agent for a specific session"""
agent = self.initializer.initialize_agent(session_id=session_id)
self.agents[session_id] = agent
def agent_reply(self, query: str, context: Context = None,
on_event=None, clear_history: bool = False) -> Reply:
"""
Use super agent to reply to a query
Args:
query: User query
context: COW context (optional, contains session_id for user isolation)
on_event: Event callback (optional)
clear_history: Whether to clear conversation history
Returns:
Reply object
"""
session_id = None
agent = None
try:
# Extract session_id from context for user isolation
if context:
session_id = context.kwargs.get("session_id") or context.get("session_id")
# Get agent for this session (will auto-initialize if needed)
agent = self.get_agent(session_id=session_id)
if not agent:
return Reply(ReplyType.ERROR, "Failed to initialize super agent")
# Create event handler for logging and channel communication
event_handler = AgentEventHandler(context=context, original_callback=on_event)
# Filter tools based on context
original_tools = agent.tools
filtered_tools = original_tools
# If this is a scheduled task execution, exclude scheduler tool to prevent recursion
if context and context.get("is_scheduled_task"):
filtered_tools = [tool for tool in agent.tools if tool.name != "scheduler"]
agent.tools = filtered_tools
logger.info(f"[AgentBridge] Scheduled task execution: excluded scheduler tool ({len(filtered_tools)}/{len(original_tools)} tools)")
else:
# Attach context to scheduler tool if present
if context and agent.tools:
for tool in agent.tools:
if tool.name == "scheduler":
try:
from agent.tools.scheduler.integration import attach_scheduler_to_tool
attach_scheduler_to_tool(tool, context)
except Exception as e:
logger.warning(f"[AgentBridge] Failed to attach context to scheduler: {e}")
break
# Pass channel_type to model so linkai requests carry it
if context and hasattr(agent, 'model'):
agent.model.channel_type = context.get("channel_type", "")
# Store session_id on agent so executor can clear DB on fatal errors
agent._current_session_id = session_id
# Record message count before execution so we can diff new messages
with agent.messages_lock:
pre_run_len = len(agent.messages)
try:
# Use agent's run_stream method with event handler
response = agent.run_stream(
user_message=query,
on_event=event_handler.handle_event,
clear_history=clear_history
)
finally:
# Restore original tools
if context and context.get("is_scheduled_task"):
agent.tools = original_tools
# Log execution summary
event_handler.log_summary()
# Persist new messages generated during this run
if session_id:
channel_type = (context.get("channel_type") or "") if context else ""
with agent.messages_lock:
new_messages = agent.messages[pre_run_len:]
if new_messages:
self._persist_messages(session_id, list(new_messages), channel_type)
elif pre_run_len > 0 and len(agent.messages) == 0:
# Agent cleared its messages (recovery from format error / overflow)
# Also clear the DB to prevent reloading dirty data
try:
from agent.memory import get_conversation_store
get_conversation_store().clear_session(session_id)
logger.info(f"[AgentBridge] Cleared DB for recovered session: {session_id}")
except Exception as e:
logger.warning(f"[AgentBridge] Failed to clear DB after recovery: {e}")
# Check if there are files to send (from read tool)
if hasattr(agent, 'stream_executor') and hasattr(agent.stream_executor, 'files_to_send'):
files_to_send = agent.stream_executor.files_to_send
if files_to_send:
# Send the first file (for now, handle one file at a time)
file_info = files_to_send[0]
logger.info(f"[AgentBridge] Sending file: {file_info.get('path')}")
# Clear files_to_send for next request
agent.stream_executor.files_to_send = []
# Return file reply based on file type
return self._create_file_reply(file_info, response, context)
return Reply(ReplyType.TEXT, response)
except Exception as e:
logger.error(f"Agent reply error: {e}")
# If the agent cleared its messages due to format error / overflow,
# also purge the DB so the next request starts clean.
if session_id and agent:
try:
with agent.messages_lock:
msg_count = len(agent.messages)
if msg_count == 0:
from agent.memory import get_conversation_store
get_conversation_store().clear_session(session_id)
logger.info(f"[AgentBridge] Cleared DB for session after error: {session_id}")
except Exception as db_err:
logger.warning(f"[AgentBridge] Failed to clear DB after error: {db_err}")
return Reply(ReplyType.ERROR, f"Agent error: {str(e)}")
def _create_file_reply(self, file_info: dict, text_response: str, context: Context = None) -> Reply:
"""
Create a reply for sending files
Args:
file_info: File metadata from read tool
text_response: Text response from agent
context: Context object
Returns:
Reply object for file sending
"""
file_type = file_info.get("file_type", "file")
file_path = file_info.get("path")
# For images, use IMAGE_URL type (channel will handle upload)
if file_type == "image":
# Convert local path to file:// URL for channel processing
file_url = f"file://{file_path}"
logger.info(f"[AgentBridge] Sending image: {file_url}")
reply = Reply(ReplyType.IMAGE_URL, file_url)
# Attach text message if present (for channels that support text+image)
if text_response:
reply.text_content = text_response # Store accompanying text
return reply
# For all file types (document, video, audio), use FILE type
if file_type in ["document", "video", "audio"]:
file_url = f"file://{file_path}"
logger.info(f"[AgentBridge] Sending {file_type}: {file_url}")
reply = Reply(ReplyType.FILE, file_url)
reply.file_name = file_info.get("file_name", os.path.basename(file_path))
# Attach text message if present
if text_response:
reply.text_content = text_response
return reply
# For other unknown file types, return text with file info
message = text_response or file_info.get("message", "文件已准备")
message += f"\n\n[文件: {file_info.get('file_name', file_path)}]"
return Reply(ReplyType.TEXT, message)
def _migrate_config_to_env(self, workspace_root: str):
"""
Migrate API keys from config.json to .env file if not already set
Args:
workspace_root: Workspace directory path (not used, kept for compatibility)
"""
from config import conf
import os
# Mapping from config.json keys to environment variable names
key_mapping = {
"open_ai_api_key": "OPENAI_API_KEY",
"open_ai_api_base": "OPENAI_API_BASE",
"gemini_api_key": "GEMINI_API_KEY",
"claude_api_key": "CLAUDE_API_KEY",
"linkai_api_key": "LINKAI_API_KEY",
}
# Use fixed secure location for .env file
env_file = expand_path("~/.cow/.env")
# Read existing env vars from .env file
existing_env_vars = {}
if os.path.exists(env_file):
try:
with open(env_file, 'r', encoding='utf-8') as f:
for line in f:
line = line.strip()
if line and not line.startswith('#') and '=' in line:
key, _ = line.split('=', 1)
existing_env_vars[key.strip()] = True
except Exception as e:
logger.warning(f"[AgentBridge] Failed to read .env file: {e}")
# Check which keys need to be migrated
keys_to_migrate = {}
for config_key, env_key in key_mapping.items():
# Skip if already in .env file
if env_key in existing_env_vars:
continue
# Get value from config.json
value = conf().get(config_key, "")
if value and value.strip(): # Only migrate non-empty values
keys_to_migrate[env_key] = value.strip()
# Log summary if there are keys to skip
if existing_env_vars:
logger.debug(f"[AgentBridge] {len(existing_env_vars)} env vars already in .env")
# Write new keys to .env file
if keys_to_migrate:
try:
# Ensure ~/.cow directory and .env file exist
env_dir = os.path.dirname(env_file)
if not os.path.exists(env_dir):
os.makedirs(env_dir, exist_ok=True)
if not os.path.exists(env_file):
open(env_file, 'a').close()
# Append new keys
with open(env_file, 'a', encoding='utf-8') as f:
f.write('\n# Auto-migrated from config.json\n')
for key, value in keys_to_migrate.items():
f.write(f'{key}={value}\n')
# Also set in current process
os.environ[key] = value
logger.info(f"[AgentBridge] Migrated {len(keys_to_migrate)} API keys from config.json to .env: {list(keys_to_migrate.keys())}")
except Exception as e:
logger.warning(f"[AgentBridge] Failed to migrate API keys: {e}")
def _persist_messages(
self, session_id: str, new_messages: list, channel_type: str = ""
) -> None:
"""
Persist new messages to the conversation store after each agent run.
Failures are logged but never propagate — they must not interrupt replies.
"""
if not new_messages:
return
try:
from config import conf
if not conf().get("conversation_persistence", True):
return
except Exception:
pass
try:
from agent.memory import get_conversation_store
get_conversation_store().append_messages(
session_id, new_messages, channel_type=channel_type
)
except Exception as e:
logger.warning(
f"[AgentBridge] Failed to persist messages for session={session_id}: {e}"
)
def clear_session(self, session_id: str):
"""
Clear a specific session's agent and conversation history
Args:
session_id: Session identifier to clear
"""
if session_id in self.agents:
logger.info(f"[AgentBridge] Clearing session: {session_id}")
del self.agents[session_id]
def clear_all_sessions(self):
"""Clear all agent sessions"""
logger.info(f"[AgentBridge] Clearing all sessions ({len(self.agents)} total)")
self.agents.clear()
self.default_agent = None
def refresh_all_skills(self) -> int:
"""
Refresh skills and conditional tools in all agent instances after
environment variable changes. This allows hot-reload without restarting.
Returns:
Number of agent instances refreshed
"""
import os
from dotenv import load_dotenv
from config import conf
# Reload environment variables from .env file
workspace_root = expand_path(conf().get("agent_workspace", "~/cow"))
env_file = os.path.join(workspace_root, '.env')
if os.path.exists(env_file):
load_dotenv(env_file, override=True)
logger.info(f"[AgentBridge] Reloaded environment variables from {env_file}")
refreshed_count = 0
# Collect all agent instances to refresh
agents_to_refresh = []
if self.default_agent:
agents_to_refresh.append(("default", self.default_agent))
for session_id, agent in self.agents.items():
agents_to_refresh.append((session_id, agent))
for label, agent in agents_to_refresh:
# Refresh skills
if hasattr(agent, 'skill_manager') and agent.skill_manager:
agent.skill_manager.refresh_skills()
# Refresh conditional tools (e.g. web_search depends on API keys)
self._refresh_conditional_tools(agent)
refreshed_count += 1
if refreshed_count > 0:
logger.info(f"[AgentBridge] Refreshed skills & tools in {refreshed_count} agent instance(s)")
return refreshed_count
@staticmethod
def _refresh_conditional_tools(agent):
"""
Add or remove conditional tools based on current environment variables.
For example, web_search should only be present when BOCHA_API_KEY or
LINKAI_API_KEY is set.
"""
try:
from agent.tools.web_search.web_search import WebSearch
has_tool = any(t.name == "web_search" for t in agent.tools)
available = WebSearch.is_available()
if available and not has_tool:
# API key was added - inject the tool
tool = WebSearch()
tool.model = agent.model
agent.tools.append(tool)
logger.info("[AgentBridge] web_search tool added (API key now available)")
elif not available and has_tool:
# API key was removed - remove the tool
agent.tools = [t for t in agent.tools if t.name != "web_search"]
logger.info("[AgentBridge] web_search tool removed (API key no longer available)")
except Exception as e:
logger.debug(f"[AgentBridge] Failed to refresh conditional tools: {e}") | {
"repo_id": "zhayujie/chatgpt-on-wechat",
"file_path": "bridge/agent_bridge.py",
"license": "MIT License",
"lines": 567,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
zhayujie/chatgpt-on-wechat:skills/skill-creator/scripts/init_skill.py | #!/usr/bin/env python3
"""
Skill Initializer - Creates a new skill from template
Usage:
init_skill.py <skill-name> --path <path>
Examples:
init_skill.py my-new-skill --path skills/public
init_skill.py my-api-helper --path skills/private
init_skill.py custom-skill --path /custom/location
"""
import sys
from pathlib import Path
SKILL_TEMPLATE = """---
name: {skill_name}
description: [TODO: Complete and informative explanation of what the skill does and when to use it. Include WHEN to use this skill - specific scenarios, file types, or tasks that trigger it.]
---
# {skill_title}
## Overview
[TODO: 1-2 sentences explaining what this skill enables]
## Structuring This Skill
[TODO: Choose the structure that best fits this skill's purpose. Common patterns:
**1. Workflow-Based** (best for sequential processes)
- Works well when there are clear step-by-step procedures
- Example: DOCX skill with "Workflow Decision Tree" → "Reading" → "Creating" → "Editing"
- Structure: ## Overview → ## Workflow Decision Tree → ## Step 1 → ## Step 2...
**2. Task-Based** (best for tool collections)
- Works well when the skill offers different operations/capabilities
- Example: PDF skill with "Quick Start" → "Merge PDFs" → "Split PDFs" → "Extract Text"
- Structure: ## Overview → ## Quick Start → ## Task Category 1 → ## Task Category 2...
**3. Reference/Guidelines** (best for standards or specifications)
- Works well for brand guidelines, coding standards, or requirements
- Example: Brand styling with "Brand Guidelines" → "Colors" → "Typography" → "Features"
- Structure: ## Overview → ## Guidelines → ## Specifications → ## Usage...
**4. Capabilities-Based** (best for integrated systems)
- Works well when the skill provides multiple interrelated features
- Example: Product Management with "Core Capabilities" → numbered capability list
- Structure: ## Overview → ## Core Capabilities → ### 1. Feature → ### 2. Feature...
Patterns can be mixed and matched as needed. Most skills combine patterns (e.g., start with task-based, add workflow for complex operations).
Delete this entire "Structuring This Skill" section when done - it's just guidance.]
## [TODO: Replace with the first main section based on chosen structure]
[TODO: Add content here. See examples in existing skills:
- Code samples for technical skills
- Decision trees for complex workflows
- Concrete examples with realistic user requests
- References to scripts/templates/references as needed]
## Resources
This skill includes example resource directories that demonstrate how to organize different types of bundled resources:
### scripts/
Executable code (Python/Bash/etc.) that can be run directly to perform specific operations.
**Examples from other skills:**
- PDF skill: `fill_fillable_fields.py`, `extract_form_field_info.py` - utilities for PDF manipulation
- DOCX skill: `document.py`, `utilities.py` - Python modules for document processing
**Appropriate for:** Python scripts, shell scripts, or any executable code that performs automation, data processing, or specific operations.
**Note:** Scripts may be executed without loading into context, but can still be read by Claude for patching or environment adjustments.
### references/
Documentation and reference material intended to be loaded into context to inform Claude's process and thinking.
**Examples from other skills:**
- Product management: `communication.md`, `context_building.md` - detailed workflow guides
- BigQuery: API reference documentation and query examples
- Finance: Schema documentation, company policies
**Appropriate for:** In-depth documentation, API references, database schemas, comprehensive guides, or any detailed information that Claude should reference while working.
### assets/
Files not intended to be loaded into context, but rather used within the output Claude produces.
**Examples from other skills:**
- Brand styling: PowerPoint template files (.pptx), logo files
- Frontend builder: HTML/React boilerplate project directories
- Typography: Font files (.ttf, .woff2)
**Appropriate for:** Templates, boilerplate code, document templates, images, icons, fonts, or any files meant to be copied or used in the final output.
---
**Any unneeded directories can be deleted.** Not every skill requires all three types of resources.
"""
EXAMPLE_SCRIPT = '''#!/usr/bin/env python3
"""
Example helper script for {skill_name}
This is a placeholder script that can be executed directly.
Replace with actual implementation or delete if not needed.
Example real scripts from other skills:
- pdf/scripts/fill_fillable_fields.py - Fills PDF form fields
- pdf/scripts/convert_pdf_to_images.py - Converts PDF pages to images
"""
def main():
print("This is an example script for {skill_name}")
# TODO: Add actual script logic here
# This could be data processing, file conversion, API calls, etc.
if __name__ == "__main__":
main()
'''
EXAMPLE_REFERENCE = """# Reference Documentation for {skill_title}
This is a placeholder for detailed reference documentation.
Replace with actual reference content or delete if not needed.
Example real reference docs from other skills:
- product-management/references/communication.md - Comprehensive guide for status updates
- product-management/references/context_building.md - Deep-dive on gathering context
- bigquery/references/ - API references and query examples
## When Reference Docs Are Useful
Reference docs are ideal for:
- Comprehensive API documentation
- Detailed workflow guides
- Complex multi-step processes
- Information too lengthy for main SKILL.md
- Content that's only needed for specific use cases
## Structure Suggestions
### API Reference Example
- Overview
- Authentication
- Endpoints with examples
- Error codes
- Rate limits
### Workflow Guide Example
- Prerequisites
- Step-by-step instructions
- Common patterns
- Troubleshooting
- Best practices
"""
EXAMPLE_ASSET = """# Example Asset File
This placeholder represents where asset files would be stored.
Replace with actual asset files (templates, images, fonts, etc.) or delete if not needed.
Asset files are NOT intended to be loaded into context, but rather used within
the output Claude produces.
Example asset files from other skills:
- Brand guidelines: logo.png, slides_template.pptx
- Frontend builder: hello-world/ directory with HTML/React boilerplate
- Typography: custom-font.ttf, font-family.woff2
- Data: sample_data.csv, test_dataset.json
## Common Asset Types
- Templates: .pptx, .docx, boilerplate directories
- Images: .png, .jpg, .svg, .gif
- Fonts: .ttf, .otf, .woff, .woff2
- Boilerplate code: Project directories, starter files
- Icons: .ico, .svg
- Data files: .csv, .json, .xml, .yaml
Note: This is a text placeholder. Actual assets can be any file type.
"""
def title_case_skill_name(skill_name):
"""Convert hyphenated skill name to Title Case for display."""
return ' '.join(word.capitalize() for word in skill_name.split('-'))
def init_skill(skill_name, path):
"""
Initialize a new skill directory with template SKILL.md.
Args:
skill_name: Name of the skill
path: Path where the skill directory should be created
Returns:
Path to created skill directory, or None if error
"""
# Determine skill directory path
skill_dir = Path(path).resolve() / skill_name
# Check if directory already exists
if skill_dir.exists():
print(f"❌ Error: Skill directory already exists: {skill_dir}")
return None
# Create skill directory
try:
skill_dir.mkdir(parents=True, exist_ok=False)
print(f"✅ Created skill directory: {skill_dir}")
except Exception as e:
print(f"❌ Error creating directory: {e}")
return None
# Create SKILL.md from template
skill_title = title_case_skill_name(skill_name)
skill_content = SKILL_TEMPLATE.format(
skill_name=skill_name,
skill_title=skill_title
)
skill_md_path = skill_dir / 'SKILL.md'
try:
skill_md_path.write_text(skill_content)
print("✅ Created SKILL.md")
except Exception as e:
print(f"❌ Error creating SKILL.md: {e}")
return None
# Create resource directories with example files
try:
# Create scripts/ directory with example script
scripts_dir = skill_dir / 'scripts'
scripts_dir.mkdir(exist_ok=True)
example_script = scripts_dir / 'example.py'
example_script.write_text(EXAMPLE_SCRIPT.format(skill_name=skill_name))
example_script.chmod(0o755)
print("✅ Created scripts/example.py")
# Create references/ directory with example reference doc
references_dir = skill_dir / 'references'
references_dir.mkdir(exist_ok=True)
example_reference = references_dir / 'api_reference.md'
example_reference.write_text(EXAMPLE_REFERENCE.format(skill_title=skill_title))
print("✅ Created references/api_reference.md")
# Create assets/ directory with example asset placeholder
assets_dir = skill_dir / 'assets'
assets_dir.mkdir(exist_ok=True)
example_asset = assets_dir / 'example_asset.txt'
example_asset.write_text(EXAMPLE_ASSET)
print("✅ Created assets/example_asset.txt")
except Exception as e:
print(f"❌ Error creating resource directories: {e}")
return None
# Print next steps
print(f"\n✅ Skill '{skill_name}' initialized successfully at {skill_dir}")
print("\nNext steps:")
print("1. Edit SKILL.md to complete the TODO items and update the description")
print("2. Customize or delete the example files in scripts/, references/, and assets/")
print("3. Run the validator when ready to check the skill structure")
return skill_dir
def main():
if len(sys.argv) < 4 or sys.argv[2] != '--path':
print("Usage: init_skill.py <skill-name> --path <path>")
print("\nSkill name requirements:")
print(" - Hyphen-case identifier (e.g., 'data-analyzer')")
print(" - Lowercase letters, digits, and hyphens only")
print(" - Max 40 characters")
print(" - Must match directory name exactly")
print("\nExamples:")
print(" init_skill.py my-new-skill --path workspace/skills")
print(" init_skill.py my-api-helper --path /path/to/skills")
print(" init_skill.py custom-skill --path /custom/location")
sys.exit(1)
skill_name = sys.argv[1]
path = sys.argv[3]
print(f"🚀 Initializing skill: {skill_name}")
print(f" Location: {path}")
print()
result = init_skill(skill_name, path)
if result:
sys.exit(0)
else:
sys.exit(1)
if __name__ == "__main__":
main()
| {
"repo_id": "zhayujie/chatgpt-on-wechat",
"file_path": "skills/skill-creator/scripts/init_skill.py",
"license": "MIT License",
"lines": 228,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
zhayujie/chatgpt-on-wechat:skills/skill-creator/scripts/package_skill.py | #!/usr/bin/env python3
"""
Skill Packager - Creates a distributable .skill file of a skill folder
Usage:
python utils/package_skill.py <path/to/skill-folder> [output-directory]
Example:
python utils/package_skill.py skills/public/my-skill
python utils/package_skill.py skills/public/my-skill ./dist
"""
import sys
import os
import zipfile
from pathlib import Path
# Add script directory to path for imports
script_dir = Path(__file__).parent
sys.path.insert(0, str(script_dir))
from quick_validate import validate_skill
def package_skill(skill_path, output_dir=None):
"""
Package a skill folder into a .skill file.
Args:
skill_path: Path to the skill folder
output_dir: Optional output directory for the .skill file (defaults to current directory)
Returns:
Path to the created .skill file, or None if error
"""
skill_path = Path(skill_path).resolve()
# Validate skill folder exists
if not skill_path.exists():
print(f"❌ Error: Skill folder not found: {skill_path}")
return None
if not skill_path.is_dir():
print(f"❌ Error: Path is not a directory: {skill_path}")
return None
# Validate SKILL.md exists
skill_md = skill_path / "SKILL.md"
if not skill_md.exists():
print(f"❌ Error: SKILL.md not found in {skill_path}")
return None
# Run validation before packaging
print("🔍 Validating skill...")
valid, message = validate_skill(skill_path)
if not valid:
print(f"❌ Validation failed: {message}")
print(" Please fix the validation errors before packaging.")
return None
print(f"✅ {message}\n")
# Determine output location
skill_name = skill_path.name
if output_dir:
output_path = Path(output_dir).resolve()
output_path.mkdir(parents=True, exist_ok=True)
else:
output_path = Path.cwd()
skill_filename = output_path / f"{skill_name}.skill"
# Create the .skill file (zip format)
try:
with zipfile.ZipFile(skill_filename, 'w', zipfile.ZIP_DEFLATED) as zipf:
# Walk through the skill directory
for file_path in skill_path.rglob('*'):
if file_path.is_file():
# Calculate the relative path within the zip
arcname = file_path.relative_to(skill_path.parent)
zipf.write(file_path, arcname)
print(f" Added: {arcname}")
print(f"\n✅ Successfully packaged skill to: {skill_filename}")
return skill_filename
except Exception as e:
print(f"❌ Error creating .skill file: {e}")
return None
def main():
if len(sys.argv) < 2:
print("Usage: python utils/package_skill.py <path/to/skill-folder> [output-directory]")
print("\nExample:")
print(" python utils/package_skill.py skills/public/my-skill")
print(" python utils/package_skill.py skills/public/my-skill ./dist")
sys.exit(1)
skill_path = sys.argv[1]
output_dir = sys.argv[2] if len(sys.argv) > 2 else None
print(f"📦 Packaging skill: {skill_path}")
if output_dir:
print(f" Output directory: {output_dir}")
print()
result = package_skill(skill_path, output_dir)
if result:
sys.exit(0)
else:
sys.exit(1)
if __name__ == "__main__":
main()
| {
"repo_id": "zhayujie/chatgpt-on-wechat",
"file_path": "skills/skill-creator/scripts/package_skill.py",
"license": "MIT License",
"lines": 90,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
zulip/zulip:zerver/lib/workplace_users.py | from django.utils.translation import gettext as _
from zerver.lib.exceptions import JsonableError
from zerver.lib.types import UserGroupMembersData
from zerver.lib.user_groups import (
check_group_membership_management_permissions_with_admins_only,
get_recursive_subgroups_for_groups,
get_role_based_system_groups_dict,
get_user_group_by_id_in_realm,
)
from zerver.models import NamedUserGroup, Realm
def validate_workplace_users_group(
workplace_users_group: int | UserGroupMembersData, realm: Realm
) -> None:
system_groups_name_dict = get_role_based_system_groups_dict(realm)
if isinstance(workplace_users_group, int):
group = get_user_group_by_id_in_realm(workplace_users_group, realm, for_read=True)
if group.is_system_group:
# System group memberships can only change when a user's role
# changes, which only admins can do.
return
workplace_users_group_subgroups = get_recursive_subgroups_for_groups(
[workplace_users_group], realm
)
else:
subgroup_ids = workplace_users_group.direct_subgroups
subgroups = NamedUserGroup.objects.filter(id__in=subgroup_ids, realm_for_sharding=realm)
non_system_group_subgroup_ids = [
subgroup.id for subgroup in subgroups if not subgroup.is_system_group
]
if len(non_system_group_subgroup_ids) == 0:
# All direct subgroups are system groups, whose memberships can
# only change when a user's role changes, which only admins can do.
return
workplace_users_group_subgroups = get_recursive_subgroups_for_groups(
non_system_group_subgroup_ids, realm
)
if not check_group_membership_management_permissions_with_admins_only(
list(workplace_users_group_subgroups), realm, system_groups_name_dict
):
raise JsonableError(
_(
"'workplace_users_group' must be a group whose membership can only be managed by organization administrators."
)
)
def realm_eligible_for_non_workplace_pricing(realm: Realm) -> bool:
if realm.plan_type == Realm.PLAN_TYPE_SELF_HOSTED:
# Non-workplace plan pricing is not yet implemented
# for self-hosted plans.
return False
if realm.plan_type == Realm.PLAN_TYPE_STANDARD_FREE:
# Fully sponsored plans are completely free, so it
# would be distracting to offer menu options for
# discounted pricing.
return False
if realm.plan_type == Realm.PLAN_TYPE_LIMITED:
# We want to allow organizations to enable discounted
# pricing for non workplace users before they upgrade.
return True
from corporate.models.plans import get_current_plan_by_realm
customer_plan = get_current_plan_by_realm(realm)
assert customer_plan is not None
if customer_plan.fixed_price is not None:
# Discounted pricing for non-workplace users is
# currently incompatible with a fixed-price plan.
return False
return True
def realm_on_discounted_cloud_plan(realm: Realm) -> bool:
if realm.plan_type == Realm.PLAN_TYPE_SELF_HOSTED:
return False
if realm.plan_type in {Realm.PLAN_TYPE_LIMITED, Realm.PLAN_TYPE_STANDARD_FREE}:
# Realm is on free plan or is fully sponsored.
return False
from corporate.models.customers import get_customer_by_realm
# We can assume that an active plan will be present
# if realm is not on free or fully sponsored plan.
customer = get_customer_by_realm(realm)
assert customer is not None
return customer.monthly_discounted_price > 0 or customer.annual_discounted_price > 0
| {
"repo_id": "zulip/zulip",
"file_path": "zerver/lib/workplace_users.py",
"license": "Apache License 2.0",
"lines": 78,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
zulip/zulip:corporate/lib/billing_management.py | from argparse import ArgumentParser
from typing import Any
from django.conf import settings
from django.core.management.base import CommandError
from zerver.lib.management import ZulipBaseCommand
from zerver.models.realms import Realm
from zilencer.models import RemoteRealm, RemoteZulipServer
if settings.BILLING_ENABLED:
from corporate.lib.stripe import (
BillingSession,
RealmBillingSession,
RemoteRealmBillingSession,
RemoteServerBillingSession,
)
class BillingSessionCommand(ZulipBaseCommand):
def add_billing_entity_args(self, parser: ArgumentParser) -> None:
parser.add_argument(
"--remote-server",
dest="remote_server_uuid",
required=False,
help="The UUID of the registered remote Zulip server to modify.",
)
parser.add_argument(
"--remote-realm",
dest="remote_realm_uuid",
required=False,
help="The UUID of the remote realm to modify.",
)
self.add_realm_args(parser)
def get_billing_session_from_args(self, options: dict[str, Any]) -> BillingSession:
realm: Realm | None = None
remote_realm: RemoteRealm | None = None
remote_server: RemoteZulipServer | None = None
billing_session: BillingSession | None = None
if options["realm_id"]:
realm = self.get_realm(options)
if realm is None:
raise CommandError("No realm found.")
billing_session = RealmBillingSession(user=None, realm=realm)
elif options["remote_realm_uuid"]:
remote_realm_uuid = options["remote_realm_uuid"]
try:
remote_realm = RemoteRealm.objects.get(uuid=remote_realm_uuid)
billing_session = RemoteRealmBillingSession(remote_realm=remote_realm)
except RemoteRealm.DoesNotExist:
raise CommandError(
"There is no remote realm with uuid '{}'. Aborting.".format(
options["remote_realm_uuid"]
)
)
elif options["remote_server_uuid"]:
remote_server_uuid = options["remote_server_uuid"]
try:
remote_server = RemoteZulipServer.objects.get(uuid=remote_server_uuid)
billing_session = RemoteServerBillingSession(remote_server=remote_server)
except RemoteZulipServer.DoesNotExist:
raise CommandError(
"There is no remote server with uuid '{}'. Aborting.".format(
options["remote_server_uuid"]
)
)
if realm is None and remote_realm is None and remote_server is None:
raise CommandError(
"No billing entity (Realm, RemoteRealm or RemoteZulipServer) specified."
)
assert billing_session is not None
return billing_session
| {
"repo_id": "zulip/zulip",
"file_path": "corporate/lib/billing_management.py",
"license": "Apache License 2.0",
"lines": 67,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
zulip/zulip:zerver/actions/devices.py | from zerver.lib.devices import check_device_id
from zerver.models.devices import Device
from zerver.models.users import UserProfile
from zerver.tornado.django_api import send_event_on_commit
def do_register_device(user_profile: UserProfile) -> int:
device = Device.objects.create(user=user_profile)
event = dict(
type="device",
op="add",
device_id=device.id,
)
send_event_on_commit(user_profile.realm, event, [user_profile.id])
return device.id
def do_remove_device(user_profile: UserProfile, device_id: int) -> None:
device = check_device_id(device_id, user_profile.id)
device.delete()
event = dict(
type="device",
op="remove",
device_id=device_id,
)
send_event_on_commit(user_profile.realm, event, [user_profile.id])
| {
"repo_id": "zulip/zulip",
"file_path": "zerver/actions/devices.py",
"license": "Apache License 2.0",
"lines": 22,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
zulip/zulip:zerver/lib/devices.py | import base64
import binascii
from django.utils.translation import gettext as _
from typing_extensions import TypedDict
from zerver.lib.exceptions import JsonableError
from zerver.lib.timestamp import datetime_to_timestamp
from zerver.models.devices import Device
from zerver.models.users import UserProfile
class DeviceInfoDict(TypedDict):
push_key_id: int | None
push_token_id: str | None
pending_push_token_id: str | None
push_token_last_updated_timestamp: int | None
push_registration_error_code: str | None
def b64encode_token_id_int(token_id_int: int) -> str:
token_id_bytes = token_id_int.to_bytes(8, byteorder="big", signed=True)
return base64.b64encode(token_id_bytes).decode()
def b64decode_token_id_base64(token_id_base64: str) -> int:
try:
token_id_bytes = base64.b64decode(token_id_base64, validate=True)
except binascii.Error:
raise JsonableError(_("{var_name} is not Base64 encoded").format(var_name="`token_id`"))
return int.from_bytes(token_id_bytes, byteorder="big", signed=True)
def get_devices(user_profile: UserProfile) -> dict[str, DeviceInfoDict]:
devices = Device.objects.filter(user=user_profile)
devices_dict: dict[str, DeviceInfoDict] = dict()
for device in devices:
push_token_id_base64 = None
pending_push_token_id_base64 = None
push_token_last_updated_timestamp = None
if device.push_token_id is not None:
push_token_id_base64 = b64encode_token_id_int(device.push_token_id)
if device.pending_push_token_id is not None:
pending_push_token_id_base64 = b64encode_token_id_int(device.pending_push_token_id)
if device.push_token_last_updated_timestamp is not None:
push_token_last_updated_timestamp = datetime_to_timestamp(
device.push_token_last_updated_timestamp
)
devices_dict[str(device.id)] = DeviceInfoDict(
push_key_id=device.push_key_id,
push_token_id=push_token_id_base64,
pending_push_token_id=pending_push_token_id_base64,
push_token_last_updated_timestamp=push_token_last_updated_timestamp,
push_registration_error_code=device.push_registration_error_code,
)
return devices_dict
def check_device_id(device_id: int, user_id: int) -> Device:
try:
device = Device.objects.get(id=device_id, user_id=user_id)
except Device.DoesNotExist:
raise JsonableError(_("Invalid `device_id`"))
return device
| {
"repo_id": "zulip/zulip",
"file_path": "zerver/lib/devices.py",
"license": "Apache License 2.0",
"lines": 52,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
zulip/zulip:zerver/tests/test_devices.py | from zerver.lib.test_classes import ZulipTestCase
from zerver.models.devices import Device
class TestDeviceRegistration(ZulipTestCase):
def test_register_device(self) -> None:
user = self.example_user("hamlet")
self.assertEqual(Device.objects.count(), 0)
result = self.api_post(user, "/api/v1/register_client_device")
data = self.assert_json_success(result)
self.assertIn("device_id", data)
device = Device.objects.get(id=data["device_id"])
self.assertEqual(device.user_id, user.id)
def test_remove_device(self) -> None:
user = self.example_user("hamlet")
self.assertEqual(Device.objects.count(), 0)
result = self.api_post(user, "/api/v1/register_client_device")
data = self.assert_json_success(result)
device = Device.objects.get(id=data["device_id"])
result = self.api_post(user, "/api/v1/remove_client_device", {"device_id": device.id})
self.assert_json_success(result)
self.assertEqual(Device.objects.count(), 0)
| {
"repo_id": "zulip/zulip",
"file_path": "zerver/tests/test_devices.py",
"license": "Apache License 2.0",
"lines": 20,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
zulip/zulip:zerver/views/devices.py | from django.http import HttpRequest, HttpResponse
from pydantic import Json
from zerver.actions.devices import do_register_device, do_remove_device
from zerver.lib.response import json_success
from zerver.lib.typed_endpoint import typed_endpoint, typed_endpoint_without_parameters
from zerver.models.users import UserProfile
@typed_endpoint_without_parameters
def register_device(request: HttpRequest, user_profile: UserProfile) -> HttpResponse:
device_id = do_register_device(user_profile)
return json_success(request, data={"device_id": device_id})
@typed_endpoint
def remove_device(
request: HttpRequest,
user_profile: UserProfile,
*,
device_id: Json[int],
) -> HttpResponse:
do_remove_device(user_profile, device_id)
return json_success(request)
| {
"repo_id": "zulip/zulip",
"file_path": "zerver/views/devices.py",
"license": "Apache License 2.0",
"lines": 19,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
zulip/zulip:zerver/models/devices.py | from django.db import models
from django.db.models import Q
from zerver.lib.exceptions import (
InvalidBouncerPublicKeyError,
InvalidEncryptedPushRegistrationError,
RequestExpiredError,
)
from zerver.models.users import UserProfile
class Device(models.Model):
"""Core zulip server table storing logged-in devices.
Currently, only used by mobile apps for E2EE push notifications.
"""
# The user on this server to whom this Device belongs.
user = models.ForeignKey(UserProfile, on_delete=models.CASCADE)
# Key to use to encrypt notifications for delivery to this device.
# Consists of a 1-byte prefix identifying the symmetric cryptosystem
# in use, followed by the secret key.
# Prefix Cryptosystem
# 0x31 libsodium's `crypto_secretbox_easy`
push_key = models.BinaryField(null=True)
# ID to reference the `push_key` - unsigned 32-bit integer.
push_key_id = models.PositiveBigIntegerField(null=True)
# ID to reference the token provided by FCM/APNs, registered to bouncer.
push_token_id = models.BigIntegerField(null=True)
# ID to reference the token provided by FCM/APNs, registration in progress to bouncer.
pending_push_token_id = models.BigIntegerField(null=True)
# The last time when `pending_push_token_id` was set to a new value.
push_token_last_updated_timestamp = models.DateTimeField(null=True)
class PushTokenKind(models.TextChoices):
APNS = "apns", "APNs"
FCM = "fcm", "FCM"
push_token_kind = models.CharField(max_length=4, choices=PushTokenKind.choices, null=True)
class PushRegistrationErrorCode(models.TextChoices):
INVALID_BOUNCER_PUBLIC_KEY = InvalidBouncerPublicKeyError.code.name
INVALID_ENCRYPTED_PUSH_REGISTRATION = InvalidEncryptedPushRegistrationError.code.name
REQUEST_EXPIRED = RequestExpiredError.code.name
# The error code returned when registration to bouncer fails.
push_registration_error_code = models.CharField(
max_length=100, choices=PushRegistrationErrorCode.choices, null=True
)
class Meta:
constraints = [
models.CheckConstraint(
condition=Q(push_key_id__lte=2**32 - 1),
name="push_key_id_lte_max_push_key_id",
)
]
indexes = [
models.Index(
# Used in 'get_recipient_info', `do_clear_mobile_push_notifications_for_ids`,
# `prepare_payload_and_send_push_notifications`, `send_push_notifications`,
# and `send_e2ee_test_push_notification_api`.
fields=["user", "push_token_id"],
condition=Q(push_token_id__isnull=False),
name="zerver_device_user_push_token_id_idx",
),
]
| {
"repo_id": "zulip/zulip",
"file_path": "zerver/models/devices.py",
"license": "Apache License 2.0",
"lines": 57,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
zulip/zulip:corporate/management/commands/initialize_fixed_price_plan.py | from typing import Any
from django.conf import settings
from django.core.management.base import CommandError, CommandParser
from typing_extensions import override
from zerver.lib.timestamp import timestamp_to_datetime
if settings.BILLING_ENABLED:
from corporate.lib.billing_management import BillingSessionCommand
from corporate.lib.stripe import BillingError, get_configured_fixed_price_plan_offer
from corporate.models.plans import CustomerPlan
class Command(BillingSessionCommand):
help = """
Initialize a paid fixed-price plan for a billing customer (Realm, RemoteRealm or RemoteZulipServer).
Defaults to `--dry-run=True` so that the billing changes are run in preview mode first.
"""
@override
def add_arguments(self, parser: CommandParser) -> None:
parser.add_argument(
"--plan-tier",
dest="plan_tier",
type=int,
required=True,
help="The CustomerPlan tier for the fixed-price plan.",
)
parser.add_argument(
"--billing-anchor",
dest="billing_cycle_anchor",
type=float,
required=False,
help="Adjusted billing cycle anchor timestamp. Must be in the past.",
)
self.add_billing_entity_args(parser)
parser.add_argument(
"--dry-run",
dest="dry_run",
action="store_true",
default=True,
required=False,
help="Check for errors before initializing paid fixed-price plan. Default value is True.",
)
@override
def handle(self, *args: Any, **options: Any) -> None:
if not settings.BILLING_ENABLED:
raise CommandError("Billing system not enabled.")
plan_tier = options["plan_tier"]
if plan_tier not in CustomerPlan.PAID_PLAN_TIERS:
raise CommandError("Invalid tier for paid plan.")
billing_cycle_anchor = None
if options["billing_cycle_anchor"]:
anchor_timestamp = options["billing_cycle_anchor"]
billing_cycle_anchor = timestamp_to_datetime(anchor_timestamp)
billing_session = self.get_billing_session_from_args(options)
if options["dry_run"]:
try:
billing_session.check_can_configure_prepaid_fixed_price_plan(plan_tier)
customer = billing_session.get_customer()
assert customer is not None
fixed_price_plan_offer = get_configured_fixed_price_plan_offer(customer, plan_tier)
assert fixed_price_plan_offer is not None
anchor_date_string = (
billing_cycle_anchor.strftime("%B %d, %Y") if billing_cycle_anchor else "today"
)
print(
f"Will initialize {fixed_price_plan_offer} with anchor date of {anchor_date_string}."
)
return
except BillingError as e:
raise CommandError(e.msg)
except AssertionError as e:
raise CommandError(e)
else:
try:
billing_session.initialize_prepaid_fixed_price_plan(plan_tier, billing_cycle_anchor)
print("Done! Check support panel for customer to review active fixed-price plan.")
except BillingError as e:
raise CommandError(e.msg)
except AssertionError as e:
raise CommandError(e)
| {
"repo_id": "zulip/zulip",
"file_path": "corporate/management/commands/initialize_fixed_price_plan.py",
"license": "Apache License 2.0",
"lines": 77,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
zulip/zulip:corporate/management/commands/link_customer_to_stripe_id.py | from typing import Any
from django.conf import settings
from django.core.management.base import CommandError, CommandParser
from typing_extensions import override
if settings.BILLING_ENABLED:
from corporate.lib.billing_management import BillingSessionCommand
from corporate.lib.stripe import BillingError, stripe_get_customer
from corporate.models.plans import get_current_plan_by_customer
class Command(BillingSessionCommand):
help = """Link a Customer object to a Stripe customer ID."""
@override
def add_arguments(self, parser: CommandParser) -> None:
parser.add_argument(
"--stripe-id",
dest="stripe_id",
required=True,
help="The ID of the customer in Stripe.",
)
self.add_billing_entity_args(parser)
@override
def handle(self, *args: Any, **options: Any) -> None:
if not settings.BILLING_ENABLED:
raise CommandError("Billing system not enabled.")
stripe_id = options["stripe_id"]
try:
stripe_get_customer(stripe_id)
except BillingError:
raise CommandError(f"Error checking for Stripe Customer with ID {stripe_id}. Aborting.")
billing_session = self.get_billing_session_from_args(options)
customer = billing_session.get_customer()
if customer is None:
print(f"No Customer object for {billing_session.billing_entity_display_name}.")
no_customer_object_prompt = input(
f"Do you want to create one and link it to Stripe customer with ID {stripe_id}? [Y/n]"
)
print()
if not (
no_customer_object_prompt.lower() == "y"
or no_customer_object_prompt.lower() == ""
or no_customer_object_prompt.lower() == "yes"
):
return
print("Creating Customer object...")
customer = billing_session.update_or_create_customer()
print(f"Linking {customer} to Stripe customer with ID {stripe_id}...")
billing_session.link_stripe_customer_id(stripe_id)
print("Done!")
return
plan = get_current_plan_by_customer(customer)
if plan is not None and plan.is_a_paid_plan():
raise CommandError(f"{customer} has an active paid plan! Aborting.")
if customer.stripe_customer_id is not None:
existing_id_prompt = input(
f"Do you want to overwrite the current stripe_customer_id for {customer}? [Y/n]"
)
print()
if not (
existing_id_prompt.lower() == "y"
or existing_id_prompt.lower() == ""
or existing_id_prompt.lower() == "yes"
):
return
print(f"Linking {customer} to Stripe customer with ID {stripe_id}...")
billing_session.link_stripe_customer_id(stripe_id)
print("Done!")
| {
"repo_id": "zulip/zulip",
"file_path": "corporate/management/commands/link_customer_to_stripe_id.py",
"license": "Apache License 2.0",
"lines": 65,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
zulip/zulip:zerver/webhooks/notion/tests.py | from zerver.lib.test_classes import WebhookTestCase
class NotionWebhookTest(WebhookTestCase):
def test_verification_request(self) -> None:
expected_topic = "Verification"
expected_message = """
Notion webhook has been successfully configured.
Your verification token is: `secret_tMrlL1qK5vuQAh1b6cZGhFChZTSYJlce98V0pYn7yBl`
Please copy this token and paste it into your Notion webhook configuration to complete the setup.
""".strip()
self.check_webhook("verification", expected_topic, expected_message)
| {
"repo_id": "zulip/zulip",
"file_path": "zerver/webhooks/notion/tests.py",
"license": "Apache License 2.0",
"lines": 10,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
zulip/zulip:zerver/webhooks/notion/view.py | from collections.abc import Callable
from django.http import HttpRequest
from django.http.response import HttpResponse
from zerver.decorator import webhook_view
from zerver.lib.exceptions import UnsupportedWebhookEventTypeError
from zerver.lib.response import json_success
from zerver.lib.typed_endpoint import JsonBodyPayload, typed_endpoint
from zerver.lib.validator import WildValue, check_none_or, check_string
from zerver.lib.webhooks.common import check_send_webhook_message, get_setup_webhook_message
from zerver.models import UserProfile
NOTION_VERIFICATION_TOKEN_MESSAGE = """
{setup_message}
Your verification token is: `{token}`
Please copy this token and paste it into your Notion webhook configuration to complete the setup.
""".strip()
def handle_verification_request(payload: WildValue) -> tuple[str, str]:
verification_token = payload["verification_token"].tame(check_string)
setup_message = get_setup_webhook_message("Notion")
body = NOTION_VERIFICATION_TOKEN_MESSAGE.format(
setup_message=setup_message, token=verification_token
)
return ("Verification", body)
EVENT_TO_FUNCTION_MAPPER: dict[str, Callable[[WildValue], tuple[str, str]]] = {
"verification": handle_verification_request,
}
def is_verification(payload: WildValue) -> bool:
return payload.get("verification_token").tame(check_none_or(check_string)) is not None
ALL_EVENT_TYPES = list(EVENT_TO_FUNCTION_MAPPER.keys())
@webhook_view("Notion", all_event_types=ALL_EVENT_TYPES)
@typed_endpoint
def api_notion_webhook(
request: HttpRequest,
user_profile: UserProfile,
*,
payload: JsonBodyPayload[WildValue],
) -> HttpResponse:
if is_verification(payload):
event_type = "verification"
else:
event_type = payload.get("type").tame(check_string) # nocoverage
handler = EVENT_TO_FUNCTION_MAPPER.get(event_type)
if handler is None:
raise UnsupportedWebhookEventTypeError(event_type)
topic_name, body = handler(payload)
check_send_webhook_message(request, user_profile, topic_name, body, event_type)
return json_success(request)
| {
"repo_id": "zulip/zulip",
"file_path": "zerver/webhooks/notion/view.py",
"license": "Apache License 2.0",
"lines": 46,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
zulip/zulip:zerver/actions/push_notifications.py | from django.utils.timezone import now as timezone_now
from zerver.lib.timestamp import datetime_to_timestamp
from zerver.models import UserProfile
from zerver.models.devices import Device
from zerver.tornado.django_api import send_event_on_commit
def do_register_push_device(
user_profile: UserProfile,
device: Device,
*,
token_kind: str,
push_key_bytes: bytes,
push_key_id: int,
token_id_int: int,
token_id_base64: str,
) -> None:
registered_at = timezone_now()
device.push_key = push_key_bytes
device.push_key_id = push_key_id
device.pending_push_token_id = token_id_int
device.push_token_kind = token_kind
device.push_token_last_updated_timestamp = registered_at
device.push_registration_error_code = None
device.save(
update_fields=[
"push_key",
"push_key_id",
"pending_push_token_id",
"push_token_kind",
"push_token_last_updated_timestamp",
"push_registration_error_code",
]
)
event = dict(
type="device",
op="update",
device_id=device.id,
push_key_id=device.push_key_id,
pending_push_token_id=token_id_base64,
push_token_last_updated_timestamp=datetime_to_timestamp(registered_at),
push_registration_error_code=None,
)
send_event_on_commit(user_profile.realm, event, [user_profile.id])
def do_rotate_push_key(
user_profile: UserProfile, device: Device, push_key_bytes: bytes, push_key_id: int
) -> None:
if (
device.push_key_id == push_key_id
and device.push_key is not None
and bytes(device.push_key) == push_key_bytes
):
return
device.push_key = push_key_bytes
device.push_key_id = push_key_id
device.save(update_fields=["push_key", "push_key_id"])
event = dict(
type="device",
op="update",
device_id=device.id,
push_key_id=device.push_key_id,
)
send_event_on_commit(user_profile.realm, event, [user_profile.id])
def do_rotate_token(
user_profile: UserProfile, device: Device, token_id_int: int, token_id_base64: str
) -> None:
token_updated_at = timezone_now()
device.pending_push_token_id = token_id_int
device.push_token_last_updated_timestamp = token_updated_at
device.push_registration_error_code = None
device.save(
update_fields=[
"pending_push_token_id",
"push_token_last_updated_timestamp",
"push_registration_error_code",
]
)
event = dict(
type="device",
op="update",
device_id=device.id,
pending_push_token_id=token_id_base64,
push_token_last_updated_timestamp=datetime_to_timestamp(token_updated_at),
push_registration_error_code=None,
)
send_event_on_commit(user_profile.realm, event, [user_profile.id])
| {
"repo_id": "zulip/zulip",
"file_path": "zerver/actions/push_notifications.py",
"license": "Apache License 2.0",
"lines": 84,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
zulip/zulip:zerver/webhooks/redmine/tests.py | from zerver.lib.test_classes import WebhookTestCase
class RedmineHookTests(WebhookTestCase):
CHANNEL_NAME = "redmine"
URL_TEMPLATE = "/api/v1/external/redmine?&api_key={api_key}&stream={stream}"
WEBHOOK_DIR_NAME = "redmine"
TOPIC_NAME = "Issue #191 Found a bug"
def test_issue_opened(self) -> None:
expected_message = """**test user** opened [#191 Found a bug](https://example.com) for **test user**.
~~~ quote
I'm having a problem with this.
~~~"""
self.check_webhook("issue_opened", self.TOPIC_NAME, expected_message)
def test_issue_opened_without_assignee(self) -> None:
expected_message = """**test user** opened [#191 Found a bug](https://example.com).
~~~ quote
I'm having a problem with this.
~~~"""
self.check_webhook("issue_opened_without_assignee", self.TOPIC_NAME, expected_message)
def test_issue_updated(self) -> None:
expected_message = """**test user** updated [#191 Found a bug](https://example.com).
~~~ quote
I've started working on this issue. The problem seems to be in the authentication module.
~~~"""
self.check_webhook("issue_updated", self.TOPIC_NAME, expected_message)
| {
"repo_id": "zulip/zulip",
"file_path": "zerver/webhooks/redmine/tests.py",
"license": "Apache License 2.0",
"lines": 24,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
zulip/zulip:zerver/webhooks/redmine/view.py | from django.http import HttpRequest, HttpResponse
from zerver.decorator import webhook_view
from zerver.lib.exceptions import UnsupportedWebhookEventTypeError
from zerver.lib.response import json_success
from zerver.lib.typed_endpoint import JsonBodyPayload, typed_endpoint
from zerver.lib.validator import WildValue, check_int, check_string
from zerver.lib.webhooks.common import check_send_webhook_message
from zerver.models import UserProfile
ISSUE_OPENED_MESSAGE_TEMPLATE = (
"**{author_name}** opened {issue_link}{assignee_info}.{issue_description}"
)
ISSUE_UPDATED_MESSAGE_TEMPLATE = "**{author_name}** updated {issue_link}.{journal_notes}"
CONTENT_MESSAGE_TEMPLATE = "\n\n~~~ quote\n{message}\n~~~"
def _extract_issue_label(issue: WildValue) -> str:
id = issue["id"].tame(check_int)
subject = issue["subject"].tame(check_string)
return f"#{id} {subject}"
def get_issue_topic(payload: WildValue) -> str:
issue_label = _extract_issue_label(payload["issue"])
return f"Issue {issue_label}"
def _get_issue_link(payload: WildValue) -> str:
url = payload["url"].tame(check_string)
issue = payload["issue"]
issue_label = _extract_issue_label(issue)
return f"[{issue_label}]({url})"
def _get_user_name(user: WildValue) -> str:
return f"{user['firstname'].tame(check_string)} {user['lastname'].tame(check_string)}"
def _get_assignee_string(issue: WildValue) -> str:
if assignee := issue.get("assignee"):
assignee_name = _get_user_name(assignee)
return f" for **{assignee_name}**"
return ""
def handle_issue_opened(payload: WildValue) -> str:
issue = payload["issue"]
author_name = _get_user_name(issue["author"])
issue_link = _get_issue_link(payload)
assignee_info = _get_assignee_string(issue)
issue_description = ""
if issue.get("description") and (
description := issue["description"].tame(check_string).strip()
):
issue_description = CONTENT_MESSAGE_TEMPLATE.format(message=description)
return ISSUE_OPENED_MESSAGE_TEMPLATE.format(
author_name=author_name,
issue_link=issue_link,
assignee_info=assignee_info,
issue_description=issue_description,
)
def handle_issue_updated(payload: WildValue) -> str:
issue = payload["issue"]
author_name = _get_user_name(issue["author"])
issue_link = _get_issue_link(payload)
journal_notes = ""
if (
(journal := payload.get("journal"))
and (notes := journal.get("notes"))
and (tamed_notes := notes.tame(check_string).strip())
):
journal_notes = CONTENT_MESSAGE_TEMPLATE.format(message=tamed_notes)
return ISSUE_UPDATED_MESSAGE_TEMPLATE.format(
author_name=author_name,
issue_link=issue_link,
journal_notes=journal_notes,
)
REDMINE_EVENT_FUNCTION_MAPPER = {
"opened": handle_issue_opened,
"updated": handle_issue_updated,
}
ALL_EVENT_TYPES = list(REDMINE_EVENT_FUNCTION_MAPPER.keys())
@webhook_view("Redmine", notify_bot_owner_on_invalid_json=True, all_event_types=ALL_EVENT_TYPES)
@typed_endpoint
def api_redmine_webhook(
request: HttpRequest, user_profile: UserProfile, *, payload: JsonBodyPayload[WildValue]
) -> HttpResponse:
redmine_payload = payload["payload"]
event = redmine_payload["action"].tame(check_string)
if event not in REDMINE_EVENT_FUNCTION_MAPPER:
raise UnsupportedWebhookEventTypeError(event)
topic_name = get_issue_topic(redmine_payload)
content_func = REDMINE_EVENT_FUNCTION_MAPPER[event]
content = content_func(redmine_payload)
check_send_webhook_message(request, user_profile, topic_name, content)
return json_success(request)
| {
"repo_id": "zulip/zulip",
"file_path": "zerver/webhooks/redmine/view.py",
"license": "Apache License 2.0",
"lines": 83,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
zulip/zulip:zerver/tests/test_exception_filter.py | from __future__ import annotations
import sys
from django.test import RequestFactory, override_settings
from django.views.debug import ExceptionReporter
from zerver.filters import ZulipExceptionReporterFilter
from zerver.lib.test_classes import ZulipTestCase
class TestExceptionFilter(ZulipTestCase):
def test_zulip_filter_masks_sensitive_post_data(self) -> None:
"""
Verifies that specific sensitive POST parameters are masked.
"""
rf = RequestFactory()
request = rf.post(
"/test",
{
"password": "sneaky",
"api_key": "abc123",
"content": "secret msg",
"realm_counts": "private",
"installation_counts": "private",
"normal_field": "safe",
},
)
filt = ZulipExceptionReporterFilter()
cleaned = filt.get_post_parameters(request)
for var in [
"password",
"api_key",
"content",
"realm_counts",
"installation_counts",
]:
self.assertEqual(cleaned.get(var), "**********")
self.assertEqual(cleaned.get("normal_field"), "safe")
def test_exception_reporter_returns_settings_in_dev(self) -> None:
"""
In non-production, settings should be present and non-empty.
"""
rf = RequestFactory()
request = rf.get("/")
try:
raise ValueError("test error")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(
request,
exc_type,
exc_value,
tb,
is_email=True,
)
reporter.filter = ZulipExceptionReporterFilter()
data = reporter.get_traceback_data()
self.assertIn("settings", data)
self.assertIsInstance(data["settings"], dict)
self.assertNotEqual(data["settings"], {})
@override_settings(
PRODUCTION=True,
DEPLOY_ROOT="/home/zulip/deployments/2024-01-01-00-00-00",
)
def test_exception_reporter_omits_settings_in_production(self) -> None:
"""
In production, settings must be omitted (empty dict).
"""
rf = RequestFactory()
request = rf.get("/")
try:
raise RuntimeError("production error")
except RuntimeError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(
request,
exc_type,
exc_value,
tb,
is_email=True,
)
reporter.filter = ZulipExceptionReporterFilter()
data = reporter.get_traceback_data()
self.assertEqual(data.get("settings"), {})
html = reporter.get_traceback_html()
self.assertNotIn("LANGUAGE_CODE", html)
self.assertNotIn("SECRET_KEY", html)
self.assertNotIn("DATABASES", html)
| {
"repo_id": "zulip/zulip",
"file_path": "zerver/tests/test_exception_filter.py",
"license": "Apache License 2.0",
"lines": 84,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
zulip/zulip:zerver/data_import/microsoft_teams.py | import logging
import os
from collections import defaultdict
from collections.abc import Iterable
from dataclasses import dataclass
from datetime import datetime
from email.headerregistry import Address
from typing import Any, Literal, TypeAlias
from urllib.parse import SplitResult
import requests
from django.conf import settings
from django.utils.timezone import now as timezone_now
from zerver.data_import.import_util import (
ZerverFieldsT,
build_message,
build_realm,
build_recipient,
build_stream,
build_subscription,
build_user_profile,
build_usermessages,
build_zerver_realm,
convert_html_to_text,
create_converted_data_files,
get_data_file,
make_subscriber_map,
validate_user_emails_for_import,
)
from zerver.data_import.sequencer import NEXT_ID
from zerver.lib.export import MESSAGE_BATCH_CHUNK_SIZE, do_common_export_processes
from zerver.models.recipients import Recipient
from zerver.models.users import UserProfile
@dataclass
class TeamMetadata:
"""
"team" is equivalent to a Zulip channel
"""
description: str
display_name: str
visibility: Literal["public", "private"]
is_archived: bool
zulip_channel_id: int
zulip_recipient_id: int
@dataclass
class ChannelMetadata:
"""
"channel" is equivalent to Zulip topics.
"""
display_name: str
is_favourite_by_default: bool
is_archived: bool
is_favorite_by_default: bool
membership_type: str
team_id: str
AddedTeamsT: TypeAlias = dict[str, TeamMetadata]
TeamIdToZulipRecipientIdT: TypeAlias = dict[str, int]
MicrosoftTeamsUserIdToZulipUserIdT: TypeAlias = dict[str, int]
MicrosoftTeamsFieldsT: TypeAlias = dict[str, Any]
MICROSOFT_TEAMS_DEFAULT_ANNOUNCEMENTS_CHANNEL_NAME = "All company"
def convert_teams_to_channels(
microsoft_teams_user_id_to_zulip_user_id: MicrosoftTeamsUserIdToZulipUserIdT,
realm: dict[str, Any],
realm_id: int,
teams_data_dir: str,
) -> AddedTeamsT:
team_data_folders = []
for f in os.listdir(teams_data_dir):
path = os.path.join(teams_data_dir, f)
if os.path.isdir(path):
team_data_folders.append(f)
logging.info("######### IMPORTING TEAMS STARTED #########\n")
# Build teams subscription map
team_id_to_zulip_subscriber_ids: dict[str, set[int]] = defaultdict(set)
for team_id in team_data_folders:
team_members_file_name = f"teamMembers_{team_id}.json"
team_members_file_path = os.path.join(teams_data_dir, team_id, team_members_file_name)
team_members: list[MicrosoftTeamsFieldsT] = get_data_file(team_members_file_path)
for member in team_members:
zulip_user_id = microsoft_teams_user_id_to_zulip_user_id[member["UserId"]]
team_id_to_zulip_subscriber_ids[team_id].add(zulip_user_id)
# Compile teamsSettings.json and teamsList.json and convert
# teams to Zulip channels.
teams_list = get_data_file(os.path.join(teams_data_dir, "teamsList.json"))
teams_settings = get_data_file(os.path.join(teams_data_dir, "teamsSettings.json"))
team_dict: dict[str, Any] = {team["GroupsId"]: team for team in teams_list}
teams_metadata: AddedTeamsT = {}
for team_settings in teams_settings:
team_id = team_settings.get("Id")
assert team_id and team_id in team_dict, (
f"Team {team_id} appears in teamsSettings.json but not teamsList.json!"
)
compiled_team_data: MicrosoftTeamsFieldsT = {**team_dict[team_id], **team_settings}
channel_id = NEXT_ID("channel")
recipient_id = NEXT_ID("recipient")
channel = build_stream(
# Microsoft Teams export doesn't include teams creation date.
date_created=float(timezone_now().timestamp()),
realm_id=realm_id,
name=compiled_team_data["Name"],
description=compiled_team_data["Description"] or "",
stream_id=channel_id,
deactivated=compiled_team_data["IsArchived"],
invite_only=compiled_team_data["Visibility"] == "private",
)
realm["zerver_stream"].append(channel)
recipient = build_recipient(channel_id, recipient_id, Recipient.STREAM)
realm["zerver_recipient"].append(recipient)
for zulip_user_id in team_id_to_zulip_subscriber_ids[team_id]:
sub = build_subscription(
recipient_id=recipient_id,
user_id=zulip_user_id,
subscription_id=NEXT_ID("subscription"),
)
realm["zerver_subscription"].append(sub)
# If the org uses the "All company" team, set it as the announcements channel.
if (
compiled_team_data["Name"] == MICROSOFT_TEAMS_DEFAULT_ANNOUNCEMENTS_CHANNEL_NAME
): # nocoverage
realm["zerver_realm"][0]["new_stream_announcements_stream"] = channel_id
realm["zerver_realm"][0]["zulip_update_announcements_stream"] = channel_id
realm["zerver_realm"][0]["signup_announcements_stream"] = channel_id
logging.info("Using the channel 'All company' as default announcements channel.")
teams_metadata[team_id] = TeamMetadata(
description=compiled_team_data["Description"],
display_name=compiled_team_data["DisplayName"],
visibility=compiled_team_data["Visibility"],
is_archived=compiled_team_data["IsArchived"],
zulip_channel_id=channel_id,
zulip_recipient_id=recipient_id,
)
return teams_metadata
@dataclass
class MicrosoftTeamsUserRoleData:
global_administrator_user_ids: set[str]
guest_user_ids: set[str]
@dataclass
class ODataQueryParameter:
# This is used to compute a request's OData query. It specifies the
# amount, type, and order of the data returned for the resource
# identified by the URL.
# https://learn.microsoft.com/en-us/graph/query-parameters?tabs=http
parameter: Literal["$filter", "$search", "$select", "$top"]
expression: str
MICROSOFT_GRAPH_API_URL = "https://graph.microsoft.com/v1.0{endpoint}"
def get_microsoft_graph_api_data(
api_url: str,
odata_parameters: list[ODataQueryParameter] | None = None,
**kwargs: Any,
) -> Any:
if not kwargs.get("token"):
raise AssertionError("Microsoft authorization token missing in kwargs")
token = kwargs.pop("token")
accumulated_result = []
parameters = {}
if odata_parameters is not None:
for parameter in odata_parameters:
assert parameter.parameter not in parameters
parameters[parameter.parameter] = parameter.expression
# If a request is paginated, "@odata.nextLink" will be included in the response,
# it points to the next page of result. Even if the `$top` query is not specified,
# depending on the endpoint and the result size, it may be paged by the server.
# https://learn.microsoft.com/en-us/graph/paging?tabs=http#server-side-paging
next_link: str | None = api_url
while next_link is not None:
response = requests.get(
next_link, headers={"Authorization": f"Bearer {token}"}, params=parameters
)
if response.status_code != requests.codes.ok:
logging.info("HTTP error: %s, Response: %s", response.status_code, response.text)
raise Exception("HTTP error accessing the Microsoft Graph API.")
response_data = response.json()
accumulated_result.extend(response_data["value"])
next_link = response_data.get("@odata.nextLink")
# Don't apply initial request's parameters to subsequent paginated requests.
if next_link is not None:
parameters = {}
return accumulated_result
def get_directory_roles(api_token: str) -> list[MicrosoftTeamsFieldsT]:
"""
https://learn.microsoft.com/en-us/graph/api/directoryrole-list?view=graph-rest-1.0
"""
return get_microsoft_graph_api_data(
MICROSOFT_GRAPH_API_URL.format(endpoint="/directoryRoles"), token=api_token
)
def get_users_with_directory_role_id(
directory_role_id: str, api_token: str
) -> list[MicrosoftTeamsFieldsT]:
"""
https://learn.microsoft.com/en-us/graph/api/directoryrole-list-members?view=graph-rest-1.0
"""
return get_microsoft_graph_api_data(
MICROSOFT_GRAPH_API_URL.format(endpoint=f"/directoryRoles/{directory_role_id}/members"),
token=api_token,
)
def get_user_ids_with_member_type(
member_type: Literal["Member", "Guest"], api_token: str
) -> list[MicrosoftTeamsFieldsT]:
"""
https://learn.microsoft.com/en-us/graph/api/user-list?view=graph-rest-1.0
"""
odata_parameter = [
ODataQueryParameter(parameter="$filter", expression=f"userType eq '{member_type}'"),
ODataQueryParameter(parameter="$select", expression="id"),
]
return get_microsoft_graph_api_data(
MICROSOFT_GRAPH_API_URL.format(endpoint="/users"),
odata_parameter,
token=api_token,
)
def get_user_roles(api_token: str) -> MicrosoftTeamsUserRoleData:
"""
User roles are not included in the export file, so this calls
to Microsoft Graph API endpoints for that data. We mainly
want to find out who the admins and guests are.
"""
directory_roles: list[MicrosoftTeamsFieldsT] = get_directory_roles(api_token)
global_administrator_role_id = None
for role in directory_roles:
if role["displayName"] == "Global Administrator":
global_administrator_role_id = role["id"]
if global_administrator_role_id is None:
raise AssertionError("Could not find Microsoft Teams organization owners/administrators.")
admin_users_data = get_users_with_directory_role_id(global_administrator_role_id, api_token)
guest_users_data = get_user_ids_with_member_type("Guest", api_token)
return MicrosoftTeamsUserRoleData(
global_administrator_user_ids={user_data["id"] for user_data in admin_users_data},
guest_user_ids={user_data["id"] for user_data in guest_users_data},
)
def get_user_email(user: MicrosoftTeamsFieldsT) -> str:
if user["Mail"]:
return user["Mail"]
else:
raise AssertionError(f"Could not find email address for Microsoft Teams user {user}")
def create_is_mirror_dummy_user(
microsoft_team_user_id: str,
microsoft_teams_user_id_to_zulip_user_id: MicrosoftTeamsUserIdToZulipUserIdT,
realm: dict[str, Any],
realm_id: int,
domain_name: str,
) -> None:
zulip_user_id = NEXT_ID("user")
user_full_name = f"Deleted Teams user {microsoft_team_user_id}"
email = Address(username=microsoft_team_user_id, domain=domain_name).addr_spec
user_profile_dict = build_user_profile(
avatar_source=UserProfile.DEFAULT_AVATAR_SOURCE,
date_joined=int(timezone_now().timestamp()),
delivery_email=email,
email=email,
full_name=user_full_name,
id=zulip_user_id,
is_active=False,
role=UserProfile.ROLE_MEMBER,
is_mirror_dummy=True,
realm_id=realm_id,
short_name=user_full_name,
timezone="UTC",
)
realm["zerver_userprofile"].append(user_profile_dict)
microsoft_teams_user_id_to_zulip_user_id[microsoft_team_user_id] = zulip_user_id
recipient_id = NEXT_ID("recipient")
subscription_id = NEXT_ID("subscription")
recipient = build_recipient(zulip_user_id, recipient_id, Recipient.PERSONAL)
sub = build_subscription(recipient_id, zulip_user_id, subscription_id)
realm["zerver_recipient"].append(recipient)
realm["zerver_subscription"].append(sub)
def convert_users(
microsoft_teams_user_role_data: MicrosoftTeamsUserRoleData,
realm: dict[str, Any],
realm_id: int,
timestamp: int,
users_list: list[MicrosoftTeamsFieldsT],
) -> MicrosoftTeamsUserIdToZulipUserIdT:
zerver_user_profile: list[ZerverFieldsT] = []
microsoft_teams_user_id_to_zulip_user_id: MicrosoftTeamsUserIdToZulipUserIdT = defaultdict(int)
found_emails: dict[str, int] = {}
has_owner = False
logging.info("######### IMPORTING USERS STARTED #########\n")
for user in users_list:
microsoft_teams_user_id = user["Id"]
user_full_name = user["DisplayName"]
if microsoft_teams_user_id in microsoft_teams_user_role_data.global_administrator_user_ids:
user_role = UserProfile.ROLE_REALM_OWNER
elif microsoft_teams_user_id in microsoft_teams_user_role_data.guest_user_ids:
user_role = UserProfile.ROLE_GUEST
else:
user_role = UserProfile.ROLE_MEMBER
microsoft_teams_user_email = get_user_email(user)
zulip_user_id = NEXT_ID("user")
found_emails[microsoft_teams_user_email.lower()] = zulip_user_id
user_profile_dict = build_user_profile(
avatar_source=UserProfile.DEFAULT_AVATAR_SOURCE,
date_joined=timestamp,
delivery_email=microsoft_teams_user_email,
email=microsoft_teams_user_email,
full_name=user_full_name,
id=zulip_user_id,
# This function only processes user data from `users/usersList.json` which only
# lists active users -- no bot or deleted user accounts.
is_active=True,
role=user_role,
is_mirror_dummy=False,
realm_id=realm_id,
short_name=user_full_name,
timezone="UTC",
)
user_profile_dict["realm"] = realm_id
zerver_user_profile.append(user_profile_dict)
microsoft_teams_user_id_to_zulip_user_id[microsoft_teams_user_id] = zulip_user_id
if user_role == UserProfile.ROLE_REALM_OWNER:
has_owner = True
recipient_id = NEXT_ID("recipient")
subscription_id = NEXT_ID("subscription")
recipient = build_recipient(zulip_user_id, recipient_id, Recipient.PERSONAL)
sub = build_subscription(recipient_id, zulip_user_id, subscription_id)
realm["zerver_recipient"].append(recipient)
realm["zerver_subscription"].append(sub)
logging.info(
"%s: %s -> %s",
microsoft_teams_user_id,
user_full_name,
microsoft_teams_user_email,
)
if not has_owner:
logging.warning("Converted realm has no owners!")
validate_user_emails_for_import(list(found_emails))
realm["zerver_userprofile"] = zerver_user_profile
logging.info("######### IMPORTING USERS FINISHED #########\n")
return microsoft_teams_user_id_to_zulip_user_id
def get_timestamp_from_message(message: MicrosoftTeamsFieldsT) -> float:
return datetime.fromisoformat(message["CreatedDateTime"]).timestamp()
def get_microsoft_teams_sender_id_from_message(message: MicrosoftTeamsFieldsT) -> str:
return message["From"]["User"]["Id"]
def is_microsoft_teams_event_message(message: MicrosoftTeamsFieldsT) -> bool:
return message["MessageType"] == "unknownFutureValue" and message["From"] is None
def process_messages(
added_teams: dict[str, TeamMetadata],
domain_name: str,
channel_metadata: None | dict[str, ChannelMetadata],
is_private: bool,
messages: list[MicrosoftTeamsFieldsT],
microsoft_teams_user_id_to_zulip_user_id: MicrosoftTeamsUserIdToZulipUserIdT,
realm: dict[str, Any],
realm_id: int,
subscriber_map: dict[int, set[int]],
) -> tuple[list[ZerverFieldsT], list[ZerverFieldsT]]:
zerver_usermessage: list[ZerverFieldsT] = []
zerver_messages: list[ZerverFieldsT] = []
for message in messages:
if is_microsoft_teams_event_message(message):
continue
message_content_type = message["Body"]["ContentType"]
if message_content_type == "html":
try:
content = convert_html_to_text(message["Body"]["Content"])
except Exception: # nocoverage
logging.warning(
"Error converting HTML to text for message: '%s'; continuing", content
)
logging.warning(str(message))
continue
else: # nocoverage
logging.warning("Unable to convert this message content type: %s", message_content_type)
continue
# Determine message type, private or channel.
if message["ChannelIdentity"] is not None:
if channel_metadata is None:
raise AssertionError("Failed to build channel data.")
current_channel = channel_metadata[message["ChannelIdentity"]["ChannelId"]]
if current_channel.membership_type == "private":
# Don't include private channel messages.
continue
topic_name = current_channel.display_name
is_direct_message_type = False
recipient_id = added_teams[message["ChannelIdentity"]["TeamId"]].zulip_recipient_id
else: # nocoverage
assert message["ChatId"] is not None
# TODO: Converting direct messages is not yet supported. Since
# subscription list and recipient map of direct message conversations
# are not listed, we have to manually build them as we iterate over
# the user messages.
continue
microsoft_teams_sender_id: str = get_microsoft_teams_sender_id_from_message(message)
if microsoft_teams_sender_id not in microsoft_teams_user_id_to_zulip_user_id:
create_is_mirror_dummy_user(
microsoft_teams_sender_id,
microsoft_teams_user_id_to_zulip_user_id,
realm,
realm_id,
domain_name,
)
message_id = NEXT_ID("message")
zulip_message = build_message(
topic_name=topic_name,
date_sent=get_timestamp_from_message(message),
message_id=message_id,
content=content,
rendered_content=None,
user_id=microsoft_teams_user_id_to_zulip_user_id[microsoft_teams_sender_id],
recipient_id=recipient_id,
realm_id=realm_id,
is_channel_message=not is_direct_message_type,
# TODO: Process links and attachments
has_image=False,
has_link=False,
has_attachment=False,
is_direct_message_type=is_direct_message_type,
)
zerver_messages.append(zulip_message)
(num_created, num_skipped) = build_usermessages(
zerver_usermessage=zerver_usermessage,
subscriber_map=subscriber_map,
recipient_id=recipient_id,
mentioned_user_ids=[],
message_id=message_id,
is_private=is_direct_message_type,
)
logging.debug(
"Created %s UserMessages; deferred %s due to long-term idle",
num_created,
num_skipped,
)
return (
zerver_messages,
zerver_usermessage,
)
def get_batched_export_message_data(
message_data_paths: list[str], chunk_size: int = MESSAGE_BATCH_CHUNK_SIZE
) -> Iterable[list[MicrosoftTeamsFieldsT]]:
batched_messages: list[MicrosoftTeamsFieldsT] = []
for path in message_data_paths:
messages = get_data_file(path)
# Teams export tool doesn't sort messages in chronological order.
# Sort Microsoft Teams messages by their ID, which is their date
# sent in unix time.
for message in sorted(messages, key=lambda m: int(m["Id"])):
if len(batched_messages) == chunk_size:
yield batched_messages
batched_messages.clear()
batched_messages.append(message)
if batched_messages:
yield batched_messages
def convert_messages(
added_teams: dict[str, TeamMetadata],
domain_name: str,
microsoft_teams_user_id_to_zulip_user_id: MicrosoftTeamsUserIdToZulipUserIdT,
output_dir: str,
realm_id: int,
realm: dict[str, Any],
teams_data_dir: str,
chunk_size: int = MESSAGE_BATCH_CHUNK_SIZE,
) -> None:
microsoft_teams_channel_metadata: dict[str, ChannelMetadata] = {}
subscriber_map = make_subscriber_map(
zerver_subscription=realm["zerver_subscription"],
)
team_data_folders = []
for f in os.listdir(teams_data_dir):
path = os.path.join(teams_data_dir, f)
if os.path.isdir(path):
team_data_folders.append(f)
message_file_paths = []
for team_id in team_data_folders:
team_data_folder = os.path.join(teams_data_dir, team_id)
team_messages_file_path = os.path.join(team_data_folder, f"messages_{team_id}.json")
message_file_paths.append(team_messages_file_path)
team_channels_list = get_data_file(
os.path.join(team_data_folder, f"channels_{team_id}.json")
)
for team_channel in team_channels_list:
microsoft_teams_channel_metadata[team_channel["Id"]] = ChannelMetadata(
display_name=team_channel["DisplayName"],
is_favourite_by_default=team_channel["IsFavoriteByDefault"],
is_archived=team_channel["IsArchived"],
is_favorite_by_default=team_channel["IsFavoriteByDefault"],
membership_type=team_channel["MembershipType"],
team_id=team_id,
)
dump_file_id = 1
for message_chunk in get_batched_export_message_data(message_file_paths, chunk_size):
(zerver_messages, zerver_usermessage) = process_messages(
added_teams=added_teams,
channel_metadata=microsoft_teams_channel_metadata,
domain_name=domain_name,
is_private=False,
messages=message_chunk,
microsoft_teams_user_id_to_zulip_user_id=microsoft_teams_user_id_to_zulip_user_id,
subscriber_map=subscriber_map,
realm=realm,
realm_id=realm_id,
)
create_converted_data_files(
dict(zerver_message=zerver_messages, zerver_usermessage=zerver_usermessage),
output_dir,
f"/messages-{dump_file_id:06}.json",
)
dump_file_id += 1
def do_convert_directory(
microsoft_teams_dir: str,
output_dir: str,
microsoft_graph_api_token: str,
threads: int = 6,
) -> None:
os.makedirs(output_dir, exist_ok=True)
if os.listdir(output_dir): # nocoverage
raise Exception("Output directory should be empty!")
users_data_dir = os.path.join(microsoft_teams_dir, "users")
if not os.path.isdir(users_data_dir): # nocoverage
raise ValueError("Import does not have the layout we expect from a Microsoft Teams export!")
realm_id = 0
domain_name = SplitResult("", settings.EXTERNAL_HOST, "", "", "").hostname
assert isinstance(domain_name, str)
NOW = float(timezone_now().timestamp())
zerver_realm: list[ZerverFieldsT] = build_zerver_realm(realm_id, "", NOW, "Microsoft Teams")
realm = build_realm(zerver_realm, realm_id, domain_name, import_source="microsoft_teams")
realm["zerver_stream"] = []
realm["zerver_defaultstream"] = []
realm["zerver_recipient"] = []
realm["zerver_subscription"] = []
microsoft_teams_user_id_to_zulip_user_id = convert_users(
microsoft_teams_user_role_data=get_user_roles(microsoft_graph_api_token),
realm=realm,
realm_id=realm_id,
timestamp=int(NOW),
users_list=get_data_file(os.path.join(users_data_dir, "usersList.json")),
)
teams_data_dir = os.path.join(microsoft_teams_dir, "teams")
added_teams = convert_teams_to_channels(
microsoft_teams_user_id_to_zulip_user_id=microsoft_teams_user_id_to_zulip_user_id,
realm=realm,
realm_id=realm_id,
teams_data_dir=teams_data_dir,
)
convert_messages(
added_teams=added_teams,
domain_name=domain_name,
microsoft_teams_user_id_to_zulip_user_id=microsoft_teams_user_id_to_zulip_user_id,
output_dir=output_dir,
realm_id=realm_id,
realm=realm,
teams_data_dir=teams_data_dir,
)
create_converted_data_files(realm, output_dir, "/realm.json")
# TODO:
create_converted_data_files([], output_dir, "/emoji/records.json")
create_converted_data_files([], output_dir, "/avatars/records.json")
create_converted_data_files([], output_dir, "/uploads/records.json")
attachment: dict[str, list[Any]] = {"zerver_attachment": []}
create_converted_data_files(attachment, output_dir, "/attachment.json")
create_converted_data_files([], output_dir, "/realm_icons/records.json")
do_common_export_processes(output_dir)
logging.info("######### DATA CONVERSION FINISHED #########\n")
logging.info("Zulip data dump created at %s", output_dir)
| {
"repo_id": "zulip/zulip",
"file_path": "zerver/data_import/microsoft_teams.py",
"license": "Apache License 2.0",
"lines": 552,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
zulip/zulip:zerver/management/commands/convert_microsoft_teams_data.py | import argparse
import os
import tempfile
from typing import Any
from django.conf import settings
from django.core.management.base import CommandError, CommandParser
from typing_extensions import override
from zerver.data_import.microsoft_teams import do_convert_directory
from zerver.lib.management import ZulipBaseCommand
class Command(ZulipBaseCommand):
help = """Convert the Microsoft Teams data into Zulip data format."""
@override
def add_arguments(self, parser: CommandParser) -> None:
parser.add_argument(
"microsoft_teams_data_path",
nargs="+",
metavar="<Microsoft Teams data path>",
help="Zipped Microsoft Teams data or directory",
)
parser.add_argument(
"--output", dest="output_dir", help="Directory to write exported data to."
)
parser.add_argument(
"--token",
metavar="<microsoft_graph_api_token>",
help="Microsoft Graph API token, see https://learn.microsoft.com/en-us/graph/auth-v2-service?tabs=http",
)
parser.add_argument(
"--threads",
default=settings.DEFAULT_DATA_EXPORT_IMPORT_PARALLELISM,
help="Threads to use in exporting UserMessage objects in parallel",
)
parser.formatter_class = argparse.RawTextHelpFormatter
@override
def handle(self, *args: Any, **options: Any) -> None:
output_dir = options["output_dir"]
if output_dir is None:
output_dir = tempfile.mkdtemp(prefix="converted-ms-teams-data-")
else:
output_dir = os.path.realpath(output_dir)
token = options["token"]
if token is None:
raise CommandError("Enter Microsoft Graph API token!")
num_threads = int(options["threads"])
if num_threads < 1:
raise CommandError("You must have at least one thread.")
for path in options["microsoft_teams_data_path"]:
if not os.path.exists(path):
raise CommandError(f"Microsoft Teams data file or directory not found: '{path}'")
print("Converting data ...")
if os.path.isdir(path):
print(path)
do_convert_directory(
path,
output_dir,
token,
threads=num_threads,
)
elif os.path.isfile(path) and path.endswith(".zip"):
raise ValueError(
"Importing .zip Microsoft Teams data is not yet supported, please try again with the extracted data."
)
else:
raise ValueError(f"Don't know how to import Microsoft Teams data from {path}")
| {
"repo_id": "zulip/zulip",
"file_path": "zerver/management/commands/convert_microsoft_teams_data.py",
"license": "Apache License 2.0",
"lines": 64,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
zulip/zulip:zerver/tests/test_microsoft_teams_importer.py | import json
import math
import os
from collections import defaultdict
from collections.abc import Callable
from functools import wraps
from typing import Any, Concatenate, TypeAlias
from urllib.parse import parse_qs, urlsplit
import responses
from django.utils.timezone import now as timezone_now
from requests import PreparedRequest
from typing_extensions import ParamSpec
from zerver.data_import.import_util import get_data_file
from zerver.data_import.microsoft_teams import (
MICROSOFT_GRAPH_API_URL,
ChannelMetadata,
MicrosoftTeamsFieldsT,
MicrosoftTeamsUserIdToZulipUserIdT,
MicrosoftTeamsUserRoleData,
ODataQueryParameter,
convert_users,
do_convert_directory,
get_batched_export_message_data,
get_microsoft_graph_api_data,
get_microsoft_teams_sender_id_from_message,
get_timestamp_from_message,
get_user_roles,
is_microsoft_teams_event_message,
)
from zerver.lib.export import MESSAGE_BATCH_CHUNK_SIZE
from zerver.lib.import_realm import do_import_realm
from zerver.lib.test_classes import ZulipTestCase
from zerver.lib.topic import messages_for_topic
from zerver.models.messages import Message
from zerver.models.realms import Realm, get_realm
from zerver.models.recipients import Recipient
from zerver.models.streams import Stream, Subscription
from zerver.models.users import UserProfile, get_system_bot
from zerver.tests.test_import_export import make_export_output_dir
from zproject import settings
ParamT = ParamSpec("ParamT")
ResponseTuple: TypeAlias = tuple[int, dict[str, str], str]
EXPORTED_MICROSOFT_TEAMS_USER_EMAIL = dict(
aaron="aaron@ZulipChat.onmicrosoft.com",
alya="alya@ZulipChat.onmicrosoft.com",
cordelia="cordelia@ZulipChat.onmicrosoft.com",
guest="guest@example.com",
pieter="pieterk@ZulipChat.onmicrosoft.com",
zoe="zoe@ZulipChat.onmicrosoft.com",
)
EXPORTED_REALM_OWNER_EMAILS = [
EXPORTED_MICROSOFT_TEAMS_USER_EMAIL["pieter"],
EXPORTED_MICROSOFT_TEAMS_USER_EMAIL["alya"],
]
GUEST_USER_EMAILS = [EXPORTED_MICROSOFT_TEAMS_USER_EMAIL["guest"]]
MICROSOFT_TEAMS_EXPORT_USER_ROLE_DATA = MicrosoftTeamsUserRoleData(
global_administrator_user_ids={
"88cbf3c2-0810-4d32-aa19-863c12bf7be9",
"3c6ee395-529d-4681-b5f7-582c707570f6",
},
guest_user_ids={"16741626-4cd8-46cc-bf36-42ecc2b5fdce"},
)
DELETED_MICROSOFT_TEAMS_USERS = ["9bd4aca7-99cf-4b1b-a16d-e25717dc9414"]
PRIVATE_MICROSOFT_TEAMS_CHANNELS = ["19:42c4944387224bf79bcad3cb6809a335@thread.tacv2"]
EXPORTED_MICROSOFT_TEAMS_TEAM_ID: dict[str, str] = {
"Core team": "7c050abd-3cbb-448b-a9de-405f54cc14b2",
"Community": "002145f2-eaba-4962-997d-6d841a9f50af",
"Contributors": "2a00a70a-00f5-4da5-8618-8281194f0de0",
"Feedback & support": "5e5f1988-3216-4ca0-83e9-18c04ecc7533",
"Kandra Labs": "1d513e46-d8cd-41db-b84f-381fe5730794",
}
def get_exported_microsoft_teams_user_data() -> list[MicrosoftTeamsFieldsT]:
test_class = ZulipTestCase()
return json.loads(
test_class.fixture_data(
"usersList.json", "microsoft_teams_fixtures/TeamsData_ZulipChat/users"
)
)
def get_exported_team_data(team_id: str) -> MicrosoftTeamsFieldsT:
test_class = ZulipTestCase()
team_list = json.loads(
test_class.fixture_data(
"teamsList.json",
"microsoft_teams_fixtures/TeamsData_ZulipChat/teams",
)
)
team_list_data = next(team_data for team_data in team_list if team_id == team_data["GroupsId"])
team_settings = json.loads(
test_class.fixture_data(
"teamsSettings.json",
"microsoft_teams_fixtures/TeamsData_ZulipChat/teams",
)
)
team_settings_data = next(
team_data for team_data in team_settings if team_data["Id"] == team_id
)
return {**team_list_data, **team_settings_data}
def get_exported_team_subscription_list(team_id: str) -> list[MicrosoftTeamsFieldsT]:
test_class = ZulipTestCase()
return json.loads(
test_class.fixture_data(
f"teamMembers_{team_id}.json",
f"microsoft_teams_fixtures/TeamsData_ZulipChat/teams/{team_id}",
)
)
def get_exported_team_message_list(team_id: str) -> list[MicrosoftTeamsFieldsT]:
test_class = ZulipTestCase()
return json.loads(
test_class.fixture_data(
f"messages_{team_id}.json",
f"microsoft_teams_fixtures/TeamsData_ZulipChat/teams/{team_id}",
)
)
def get_exported_team_channel_metadata(team_id: str) -> dict[str, ChannelMetadata]:
test_class = ZulipTestCase()
microsoft_teams_channel_metadata = {}
team_channels = json.loads(
test_class.fixture_data(
f"channels_{team_id}.json",
f"microsoft_teams_fixtures/TeamsData_ZulipChat/teams/{team_id}",
)
)
for team_channel in team_channels:
microsoft_teams_channel_metadata[team_channel["Id"]] = ChannelMetadata(
display_name=team_channel["DisplayName"],
is_favourite_by_default=team_channel["IsFavoriteByDefault"],
is_archived=team_channel["IsArchived"],
is_favorite_by_default=team_channel["IsFavoriteByDefault"],
membership_type=team_channel["MembershipType"],
team_id=team_id,
)
return microsoft_teams_channel_metadata
def graph_api_users_callback(request: PreparedRequest) -> ResponseTuple:
assert request.url is not None
parsed = urlsplit(request.url)
query_params = parse_qs(parsed.query)
if query_params.get("$filter") == ["userType eq 'Guest'"]:
test_class = ZulipTestCase()
body = test_class.fixture_data(
"users_guest.json",
"microsoft_graph_api_response_fixtures",
)
else:
raise AssertionError("There are no response fixture for this request.")
# https://learn.microsoft.com/en-us/graph/query-parameters?tabs=http#select
selected_fields = query_params.get("$select")
if selected_fields:
trimmed_values = []
response = json.loads(body)
for data in response["value"]:
trimmed_data = {}
for field in selected_fields:
trimmed_data[field] = data[field]
trimmed_values.append(trimmed_data)
response["value"] = trimmed_values
body = json.dumps(response)
headers = {"Content-Type": "application/json"}
return 200, headers, body
def mock_microsoft_graph_api_calls(
test_func: Callable[Concatenate["MicrosoftTeamsImporterIntegrationTest", ParamT], None],
) -> Callable[Concatenate["MicrosoftTeamsImporterIntegrationTest", ParamT], None]:
@wraps(test_func)
@responses.activate
def _wrapped(
self: "MicrosoftTeamsImporterIntegrationTest",
/,
*args: ParamT.args,
**kwargs: ParamT.kwargs,
) -> None:
responses.add_callback(
responses.GET,
"https://graph.microsoft.com/v1.0/users",
callback=graph_api_users_callback,
content_type="application/json",
)
responses.add(
responses.GET,
"https://graph.microsoft.com/v1.0/directoryRoles",
self.fixture_data("directory_roles.json", "microsoft_graph_api_response_fixtures"),
)
responses.add(
responses.GET,
"https://graph.microsoft.com/v1.0/directoryRoles/240d3723-f4d5-4e70-aa3b-2e574c4f6ea3/members",
self.fixture_data(
"directory_roles_global_administrator_members.json",
"microsoft_graph_api_response_fixtures",
),
)
test_func(self, *args, **kwargs)
return _wrapped
def get_channel_subscriber_emails(realm: Realm, channel: str | Stream) -> set[str]:
if isinstance(channel, str):
imported_channel = Stream.objects.get(
name=channel,
realm=realm,
)
else:
imported_channel = channel # nocoverage
subscriptions = Subscription.objects.filter(recipient=imported_channel.recipient)
users = {sub.user_profile.email for sub in subscriptions}
return users
class MicrosoftTeamsImportTestCase(ZulipTestCase):
def get_exported_microsoft_teams_user_data(self) -> list[MicrosoftTeamsFieldsT]:
return json.loads(
self.fixture_data(
"usersList.json", "microsoft_teams_fixtures/TeamsData_ZulipChat/users"
)
)
class MicrosoftTeamsImporterIntegrationTest(MicrosoftTeamsImportTestCase):
def convert_microsoft_teams_export_fixture(self, fixture_folder: str) -> None:
fixture_file_path = self.fixture_file_name(fixture_folder, "microsoft_teams_fixtures")
if not os.path.isdir(fixture_file_path):
raise AssertionError(f"Fixture file not found: {fixture_file_path}")
with self.assertLogs(level="INFO"), self.settings(EXTERNAL_HOST="zulip.example.com"):
do_convert_directory(
fixture_file_path, self.converted_file_output_dir, "MICROSOFT_GRAPH_API_TOKEN"
)
def import_microsoft_teams_export_fixture(self, fixture_folder: str) -> None:
self.convert_microsoft_teams_export_fixture(fixture_folder)
with self.settings(BILLING_ENABLED=False), self.assertLogs(level="INFO"):
do_import_realm(self.converted_file_output_dir, self.test_realm_subdomain)
@mock_microsoft_graph_api_calls
def do_import_realm_fixture(self, fixture: str = "TeamsData_ZulipChat/") -> None:
self.converted_file_output_dir = make_export_output_dir()
self.test_realm_subdomain = "test-import-teams-realm"
self.import_microsoft_teams_export_fixture(fixture)
exported_user_data = get_exported_microsoft_teams_user_data()
self.exported_user_data_map = {u["Id"]: u for u in exported_user_data}
def get_imported_realm_user_field_values(self, field: str, **kwargs: Any) -> list[str | int]:
return list(
UserProfile.objects.filter(
realm=get_realm(self.test_realm_subdomain),
**kwargs,
).values_list(field, flat=True)
)
def test_imported_users(self) -> None:
self.do_import_realm_fixture()
imported_user_emails = set(
self.get_imported_realm_user_field_values(
"email", is_mirror_dummy=False, is_active=True
)
)
self.assertSetEqual(imported_user_emails, set(EXPORTED_MICROSOFT_TEAMS_USER_EMAIL.values()))
mirror_dummy_accounts = self.get_imported_realm_user_field_values(
"id", is_mirror_dummy=True, is_active=False
)
self.assert_length(mirror_dummy_accounts, 1)
imported_realm_owner_emails = set(
self.get_imported_realm_user_field_values("email", role=UserProfile.ROLE_REALM_OWNER)
)
self.assertSetEqual(imported_realm_owner_emails, set(EXPORTED_REALM_OWNER_EMAILS))
imported_guest_user_emails = set(
self.get_imported_realm_user_field_values("email", role=UserProfile.ROLE_GUEST)
)
self.assertSetEqual(imported_guest_user_emails, set(GUEST_USER_EMAILS))
raw_exported_users_data = self.get_exported_microsoft_teams_user_data()
raw_exported_user_full_names = [user["DisplayName"] for user in raw_exported_users_data]
deleted_user_full_names = [
f"Deleted Teams user {user_id}" for user_id in DELETED_MICROSOFT_TEAMS_USERS
]
imported_user_full_names = self.get_imported_realm_user_field_values("full_name")
self.assertEqual(
sorted(raw_exported_user_full_names + deleted_user_full_names),
sorted(imported_user_full_names),
)
def test_imported_channels(self) -> None:
self.do_import_realm_fixture()
all_imported_channels = Stream.objects.filter(
realm=get_realm(self.test_realm_subdomain),
)
self.assert_length(all_imported_channels, len(EXPORTED_MICROSOFT_TEAMS_TEAM_ID))
for channel in all_imported_channels:
channel_name = channel.name
# Teams data are imported correctly
raw_team_data = get_exported_team_data(EXPORTED_MICROSOFT_TEAMS_TEAM_ID[channel_name])
self.assertEqual(channel_name, raw_team_data["Name"])
self.assertEqual(channel.description, raw_team_data["Description"] or "")
self.assertEqual(channel.deactivated, raw_team_data["IsArchived"])
self.assertEqual(channel.invite_only, raw_team_data["Visibility"] == "private")
# Teams subscription are imported correctly.
imported_channel_subscriber_emails = get_channel_subscriber_emails(
get_realm(self.test_realm_subdomain), channel_name
)
raw_subscription_list = get_exported_team_subscription_list(
EXPORTED_MICROSOFT_TEAMS_TEAM_ID[channel_name]
)
expected_subscriber_emails: set[str] = {
self.exported_user_data_map[sub["UserId"]]["Mail"] for sub in raw_subscription_list
}
self.assertSetEqual(expected_subscriber_emails, imported_channel_subscriber_emails)
def test_imported_channel_messages(self) -> None:
self.do_import_realm_fixture()
channel_name = "Core team"
exported_team_messages = get_exported_team_message_list(
EXPORTED_MICROSOFT_TEAMS_TEAM_ID[channel_name]
)
test_realm = get_realm(self.test_realm_subdomain)
channel = Stream.objects.get(
name=channel_name,
realm=test_realm,
)
assert channel.recipient is not None
convertable_exported_messages: list[MicrosoftTeamsFieldsT] = []
convertable_exported_message_datetimes: list[float] = []
exported_sender_messages_map: dict[str, list[float]] = defaultdict(list)
private_channel_message_exists: bool
deleted_user_message_exists: bool
for message in exported_team_messages:
if is_microsoft_teams_event_message(message):
continue
if message["ChannelIdentity"]["ChannelId"] in PRIVATE_MICROSOFT_TEAMS_CHANNELS:
private_channel_message_exists = True
continue
sender_id = get_microsoft_teams_sender_id_from_message(message)
if sender_id not in self.exported_user_data_map:
assert sender_id in DELETED_MICROSOFT_TEAMS_USERS
sender_email = f"{sender_id}@zulip.example.com"
deleted_user_message_exists = True
else:
sender_email = self.exported_user_data_map[sender_id]["Mail"]
convertable_exported_messages.append(message)
message_datetime = get_timestamp_from_message(message)
convertable_exported_message_datetimes.append(message_datetime)
exported_sender_messages_map[sender_email].append(message_datetime)
self.assertTrue(private_channel_message_exists)
self.assertTrue(deleted_user_message_exists)
imported_channel_messages = (
Message.objects.filter(
recipient=channel.recipient,
realm=test_realm,
)
.exclude(sender_id=get_system_bot(settings.WELCOME_BOT, test_realm.id).id)
.order_by("id")
)
self.assertTrue(imported_channel_messages.exists())
imported_message_datetimes: list[float] = []
imported_sender_messages_map: dict[str, list[float]] = defaultdict(list)
last_date_sent: float = float("-inf")
for imported_message in imported_channel_messages:
message_date_sent = imported_message.date_sent.timestamp()
# Imported messages are sorted chronologically.
self.assertLessEqual(last_date_sent, message_date_sent)
last_date_sent = max(last_date_sent, message_date_sent)
# Message content is not empty.
self.assertIsNotNone(imported_message.content)
self.assertIsNotNone(imported_message.rendered_content)
imported_message_datetimes.append(message_date_sent)
imported_sender_messages_map[imported_message.sender.email].append(message_date_sent)
self.assertListEqual(
sorted(imported_message_datetimes),
sorted(convertable_exported_message_datetimes),
)
# Message sender is correct.
for sender_email, exported_message_datetimes in exported_sender_messages_map.items():
self.assertListEqual(
sorted(exported_message_datetimes),
sorted(imported_sender_messages_map[sender_email]),
)
microsoft_team_channel_metadata = get_exported_team_channel_metadata(
EXPORTED_MICROSOFT_TEAMS_TEAM_ID[channel_name]
)
# Microsoft Teams channels are correctly converted and imported
# as Zulip topics.
for (
microsoft_team_channel_id,
microsoft_team_channel_data,
) in microsoft_team_channel_metadata.items():
messages_in_a_microsoft_team_channel = [
m
for m in convertable_exported_messages
if m["ChannelIdentity"]["ChannelId"] == microsoft_team_channel_id
]
topic_name = microsoft_team_channel_data.display_name
imported_messages_in_a_zulip_topic = messages_for_topic(
test_realm.id, channel.recipient.id, topic_name
)
self.assertEqual(
len(messages_in_a_microsoft_team_channel), len(imported_messages_in_a_zulip_topic)
)
class MicrosoftTeamsImporterUnitTest(MicrosoftTeamsImportTestCase):
def convert_users_handler(
self,
realm: dict[str, Any] | None = None,
realm_id: int = 0,
users_list: list[MicrosoftTeamsFieldsT] | None = None,
user_data_fixture_name: str | None = None,
microsoft_teams_user_role_data: MicrosoftTeamsUserRoleData = MICROSOFT_TEAMS_EXPORT_USER_ROLE_DATA,
) -> MicrosoftTeamsUserIdToZulipUserIdT:
if users_list is None:
users_list = self.get_exported_microsoft_teams_user_data()
if realm is None:
realm = {}
realm["zerver_stream"] = []
realm["zerver_defaultstream"] = []
realm["zerver_recipient"] = []
realm["zerver_subscription"] = []
if user_data_fixture_name is not None:
users_list = json.loads(
self.fixture_data(user_data_fixture_name, "microsoft_teams_fixtures/test_fixtures")
)
return convert_users(
realm=realm,
realm_id=realm_id,
users_list=users_list,
microsoft_teams_user_role_data=microsoft_teams_user_role_data,
timestamp=int(float(timezone_now().timestamp())),
)
@responses.activate
def get_user_roles_handler(
self,
directory_roles_response_fixture: str | None = "directory_roles.json",
global_administrators_response_fixture: str
| None = "directory_roles_global_administrator_members.json",
guest_users_response_fixture: str | None = "users_guest.json",
) -> MicrosoftTeamsUserRoleData:
# TODO: For simplicity, this test assumes we only query for the guest userss.
# This can be updated to use `add_callback` and call something like
# `graph_api_users_callback` if the importer performs other types of queries.
responses.add(
responses.GET,
"https://graph.microsoft.com/v1.0/users?%24filter=userType+eq+%27Guest%27&%24select=id",
self.fixture_data(
guest_users_response_fixture,
"microsoft_graph_api_response_fixtures",
)
if guest_users_response_fixture
else json.dumps({"value": []}),
)
responses.add(
responses.GET,
"https://graph.microsoft.com/v1.0/directoryRoles",
self.fixture_data(
directory_roles_response_fixture, "microsoft_graph_api_response_fixtures"
)
if directory_roles_response_fixture
else json.dumps({"value": []}),
)
responses.add(
responses.GET,
"https://graph.microsoft.com/v1.0/directoryRoles/240d3723-f4d5-4e70-aa3b-2e574c4f6ea3/members",
self.fixture_data(
global_administrators_response_fixture,
"microsoft_graph_api_response_fixtures",
)
if global_administrators_response_fixture
else json.dumps({"value": []}),
)
return get_user_roles("MICROSOFT_GRAPH_API_TOKEN")
def test_convert_users_with_no_admin(self) -> None:
microsoft_teams_user_role_data = MicrosoftTeamsUserRoleData(
global_administrator_user_ids=set(), guest_user_ids=set()
)
with self.assertLogs(level="INFO") as info_logs:
self.convert_users_handler(
microsoft_teams_user_role_data=microsoft_teams_user_role_data
)
self.assertIn(
"WARNING:root:Converted realm has no owners!",
info_logs.output,
)
def test_conver_users_with_missing_email(self) -> None:
with self.assertLogs(level="INFO"), self.assertRaises(AssertionError) as e:
self.convert_users_handler(user_data_fixture_name="user_list_with_missing_email.json")
self.assertEqual(
"Could not find email address for Microsoft Teams user {'BusinessPhones': [], 'JobTitle': None, 'Mail': None, 'MobilePhone': None, 'OfficeLocation': None, 'PreferredLanguage': None, 'UserPrincipalName': None, 'Id': '5dbe468a-1e96-4aaa-856d-cdf825081e11', 'UserId': None, 'DisplayName': 'zoe', 'UserName': None, 'PhoneNumber': None, 'Location': None, 'InterpretedUserType': None, 'DirectoryStatus': None, 'AudioConferencing': None, 'PhoneSystems': None, 'CallingPlan': None, 'AssignedPlans': None, 'OnlineDialinConferencingPolicy': None, 'FeatureTypes': None, 'State': None, 'City': None, 'Surname': None, 'GivenName': 'zoe'}",
str(e.exception),
)
def test_at_least_one_recipient_per_user(self) -> None:
"""
Make sure each user at least has a recipient field. This makes sure the
the onboarding messages, runs smoothly even for users without any
personal messages.
"""
realm: dict[str, Any] = {}
realm["zerver_stream"] = []
realm["zerver_defaultstream"] = []
realm["zerver_recipient"] = []
realm["zerver_subscription"] = []
with self.assertLogs(level="INFO"):
microsoft_teams_user_id_to_zulip_user_id = self.convert_users_handler(
realm=realm, microsoft_teams_user_role_data=MICROSOFT_TEAMS_EXPORT_USER_ROLE_DATA
)
self.assert_length(
realm["zerver_recipient"], len(self.get_exported_microsoft_teams_user_data())
)
zulip_user_ids = set(microsoft_teams_user_id_to_zulip_user_id.values())
for recipient in realm["zerver_recipient"]:
self.assertTrue(recipient["type_id"] in zulip_user_ids)
self.assertTrue(recipient["type"] == Recipient.PERSONAL)
@responses.activate
def test_failed_get_microsoft_graph_api_data(self) -> None:
responses.add(
method=responses.GET,
url="https://graph.microsoft.com/v1.0/directoryRoles",
status=403,
)
with self.assertRaises(Exception) as e, self.assertLogs(level="INFO"):
get_microsoft_graph_api_data(
MICROSOFT_GRAPH_API_URL.format(endpoint="/directoryRoles"),
token="MICROSOFT_GRAPH_API_TOKEN",
)
self.assertEqual("HTTP error accessing the Microsoft Graph API.", str(e.exception))
def test_get_user_roles(self) -> None:
with (
self.subTest("No global administrator role found"),
self.assertRaises(AssertionError) as e,
):
# This is primarily only for test coverage, it's likely a very rare case since
# this role is one of the built-in roles.
self.get_user_roles_handler(directory_roles_response_fixture=None)
self.assertEqual( # nocoverage
"Could not find Microsoft Teams organization owners/administrators.",
str(e.exception),
)
microsoft_teams_user_role_data: MicrosoftTeamsUserRoleData = self.get_user_roles_handler()
self.assertSetEqual(
microsoft_teams_user_role_data.global_administrator_user_ids,
MICROSOFT_TEAMS_EXPORT_USER_ROLE_DATA.global_administrator_user_ids,
)
self.assertSetEqual(
microsoft_teams_user_role_data.guest_user_ids,
MICROSOFT_TEAMS_EXPORT_USER_ROLE_DATA.guest_user_ids,
)
@responses.activate
def test_paginated_get_microsoft_graph_api_data(self) -> None:
def paginated_graph_api_users_callback(request: PreparedRequest) -> ResponseTuple:
assert request.url is not None
parsed = urlsplit(request.url)
query_params = parse_qs(parsed.query)
queries = set(query_params.keys())
if queries == {"$filter", "$top"}:
body = self.fixture_data(
"paginated_users_member.json",
"microsoft_graph_api_response_fixtures",
)
elif queries == {"$filter", "$skiptoken", "$top"}:
body = self.fixture_data(
"paginated_users_member_2.json",
"microsoft_graph_api_response_fixtures",
)
else:
raise AssertionError("There is no response fixture for this request.")
headers = {"Content-Type": "application/json"}
return 200, headers, body
responses.add_callback(
responses.GET,
"https://graph.microsoft.com/v1.0/users",
callback=paginated_graph_api_users_callback,
content_type="application/json",
)
odata_parameter = [
ODataQueryParameter(parameter="$filter", expression="userType eq 'Member'"),
ODataQueryParameter(parameter="$top", expression="3"),
]
result = get_microsoft_graph_api_data(
MICROSOFT_GRAPH_API_URL.format(endpoint="/users"),
odata_parameter,
token="MICROSOFT_GRAPH_API_TOKEN",
)
result_user_ids = {user["id"] for user in result}
expected_member_users = {
user["Id"]
for user in self.get_exported_microsoft_teams_user_data()
if user["Mail"] not in GUEST_USER_EMAILS
}
self.assertSetEqual(result_user_ids, expected_member_users)
def test_get_batched_export_message_data(self) -> None:
# Load a couple separate files to see how it handles combining and batching
# messages from multiple files.
message_file_paths = [
self.fixture_file_name(
"TeamsData_ZulipChat/teams/7c050abd-3cbb-448b-a9de-405f54cc14b2/messages_7c050abd-3cbb-448b-a9de-405f54cc14b2.json",
"microsoft_teams_fixtures",
),
self.fixture_file_name(
"TeamsData_ZulipChat/teams/2a00a70a-00f5-4da5-8618-8281194f0de0/messages_2a00a70a-00f5-4da5-8618-8281194f0de0.json",
"microsoft_teams_fixtures",
),
]
total_messages = 0
for path in message_file_paths:
total_messages += len(get_data_file(path))
chunk_sizes = [5, 10, MESSAGE_BATCH_CHUNK_SIZE]
for chunk_size in chunk_sizes:
message_batches = 0
expected_batch_amount = math.ceil(total_messages / chunk_size)
messages_left = total_messages
for batched_messages in get_batched_export_message_data(message_file_paths, chunk_size):
if chunk_size <= messages_left:
self.assertGreaterEqual(chunk_size, len(batched_messages))
messages_left -= len(batched_messages)
else:
self.assertGreaterEqual(messages_left, len(batched_messages))
message_batches += 1
self.assertTrue(message_batches == expected_batch_amount)
| {
"repo_id": "zulip/zulip",
"file_path": "zerver/tests/test_microsoft_teams_importer.py",
"license": "Apache License 2.0",
"lines": 595,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
zulip/zulip:zerver/webhooks/dbt/tests.py | from zerver.lib.test_classes import WebhookTestCase
class DBTHookTests(WebhookTestCase):
def test_dbt_webhook_when_job_started(self) -> None:
expected_message = """:yellow_circle: Daily Job (dbt build) deployment started in **Production**.\n
Job #123 was kicked off from the UI by bwilliams@example.com at <time:2023-01-31T19:28:07Z>."""
self.check_webhook("job_run_started", "Example Project", expected_message)
def test_dbt_webhook_when_job_completed_success(self) -> None:
expected_message = """:green_circle: Daily Job (dbt build) deployment succeeded in **Production**.\n
Job #123 was kicked off from the UI by bwilliams@example.com at <time:2023-01-31T19:28:07Z>."""
self.check_webhook("job_run_completed_success", "Example Project", expected_message)
def test_dbt_webhook_when_job_completed_errored(self) -> None:
expected_message = """:cross_mark: Daily Job (dbt build) deployment completed with errors in **Production**.\n
Job #123 was kicked off from the UI by bwilliams@example.com at <time:2025-10-05T19:15:56Z>."""
self.check_webhook("job_run_completed_errored", "Example Project", expected_message)
def test_dbt_webhook_when_job_errored(self) -> None:
expected_message = """:cross_mark: Daily Job (dbt build) deployment failed in **Production**.\n
Job #123 was kicked off from the UI by bwilliams@example.com at <time:2023-01-31T21:14:41Z>."""
self.check_webhook("job_run_errored", "Example Project", expected_message)
class DBTHookWithAccessUrlTests(WebhookTestCase):
URL_TEMPLATE = "/api/v1/external/dbt?&api_key={api_key}&stream={stream}&access_url=https%3A%2F%2Fexample.us1.dbt.com"
def test_dbt_webhook_with_valid_access_url(self) -> None:
expected_message = """:yellow_circle: Daily Job (dbt build) [deployment](https://example.us1.dbt.com/deploy/1/projects/167194/runs/12345) started in **Production**.\n
[Job #123](https://example.us1.dbt.com/deploy/1/projects/167194/jobs/123) was kicked off from the UI by bwilliams@example.com at <time:2023-01-31T19:28:07Z>."""
self.check_webhook("job_run_started", "Example Project", expected_message)
| {
"repo_id": "zulip/zulip",
"file_path": "zerver/webhooks/dbt/tests.py",
"license": "Apache License 2.0",
"lines": 24,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
zulip/zulip:zerver/webhooks/dbt/view.py | from urllib.parse import urljoin
from django.http import HttpRequest, HttpResponse
from zerver.decorator import webhook_view
from zerver.lib.response import json_success
from zerver.lib.typed_endpoint import JsonBodyPayload, typed_endpoint
from zerver.lib.validator import WildValue, check_int, check_string
from zerver.lib.webhooks.common import check_send_webhook_message
from zerver.models import UserProfile
DBT_NOTIFICATION_TEMPLATE = """
{emoji} {job_name} {run_text} {status} in **{environment}**.
{job_text} was {run_reason} at <time:{start_time}>.
"""
DBT_EVENT_TYPE_MAPPER = {
"job.run.started": {
"running": (":yellow_circle:", "started"),
},
"job.run.completed": {
"success": (":green_circle:", "succeeded"),
"errored": (":cross_mark:", "completed with errors"),
},
"job.run.errored": {
"errored": (":cross_mark:", "failed"),
},
}
ALL_EVENT_TYPES = list(DBT_EVENT_TYPE_MAPPER.keys())
def extract_data_from_payload(payload: JsonBodyPayload[WildValue]) -> dict[str, str]:
data: dict[str, str] = {
"account_id": str(payload["accountId"].tame(check_int)),
"event_type": payload["eventType"].tame(check_string),
"job_id": payload["data"]["jobId"].tame(check_string),
"job_name": payload["data"]["jobName"].tame(check_string),
"project_name": payload["data"]["projectName"].tame(check_string),
"project_id": payload["data"]["projectId"].tame(check_string),
"environment": payload["data"]["environmentName"].tame(check_string),
"run_id": payload["data"]["runId"].tame(check_string),
"start_time": payload["data"]["runStartedAt"].tame(check_string),
"run_status": payload["data"]["runStatus"].tame(check_string).lower(),
}
# We only change the capitalization of the first letter in this
# string for the formatting of our notification template.
run_reason = payload["data"]["runReason"].tame(check_string)
data["run_reason"] = run_reason[:1].lower() + run_reason[1:]
return data
def get_job_run_body(data: dict[str, str], access_url: str | None) -> str:
emoji, status = DBT_EVENT_TYPE_MAPPER[data["event_type"]][data["run_status"]]
project_url = (
urljoin(
access_url,
f"/deploy/{data['account_id']}/projects/{data['project_id']}",
)
if access_url
else None
)
job_text = (
f"[Job #{data['job_id']}]({project_url}/jobs/{data['job_id']})"
if project_url
else f"Job #{data['job_id']}"
)
run_text = f"[deployment]({project_url}/runs/{data['run_id']})" if project_url else "deployment"
body = DBT_NOTIFICATION_TEMPLATE.format(
emoji=emoji,
status=status,
run_text=run_text,
job_text=job_text,
**data,
)
return body
@webhook_view("DBT", all_event_types=ALL_EVENT_TYPES)
@typed_endpoint
def api_dbt_webhook(
request: HttpRequest,
user_profile: UserProfile,
*,
payload: JsonBodyPayload[WildValue],
access_url: str | None = None,
) -> HttpResponse:
data = extract_data_from_payload(payload)
body = get_job_run_body(data, access_url)
topic_name = data["project_name"]
event = data["event_type"]
check_send_webhook_message(request, user_profile, topic_name, body, event)
return json_success(request)
| {
"repo_id": "zulip/zulip",
"file_path": "zerver/webhooks/dbt/view.py",
"license": "Apache License 2.0",
"lines": 82,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
zulip/zulip:zerver/management/commands/set_owner_full_content_access.py | from argparse import ArgumentParser
from typing import Any
from typing_extensions import override
from zerver.lib.management import ZulipBaseCommand
class Command(ZulipBaseCommand):
help = (
"Set the owner_full_content_access flag for a realm. The flag determines "
"whether the organization's owner will have the ability to access private "
"content in the organization in the Zulip UI."
)
@override
def add_arguments(self, parser: ArgumentParser) -> None:
toggle_group = parser.add_mutually_exclusive_group(required=True)
toggle_group.add_argument(
"--enable",
action="store_true",
help="Set owner_full_content_access to True.",
)
toggle_group.add_argument(
"--disable",
action="store_false",
help="Set owner_full_content_access to False.",
)
self.add_realm_args(parser, required=True)
@override
def handle(self, *args: Any, **options: Any) -> None:
realm = self.get_realm(options)
assert realm is not None # Should be ensured by parser
owner_full_content_access = options["enable"]
realm.owner_full_content_access = owner_full_content_access
realm.save(update_fields=["owner_full_content_access"])
print(
f"owner_full_content_access set to {owner_full_content_access} for realm {realm.name} (id={realm.id})."
)
| {
"repo_id": "zulip/zulip",
"file_path": "zerver/management/commands/set_owner_full_content_access.py",
"license": "Apache License 2.0",
"lines": 34,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
zulip/zulip:zerver/tests/test_realm_creation.py | import base64
import time
from datetime import timedelta
from unittest.mock import patch
from urllib.parse import quote, quote_plus
import orjson
from django.conf import settings
from django.core.exceptions import ValidationError
from django.test import override_settings
from confirmation import settings as confirmation_settings
from zerver.actions.create_realm import do_change_realm_subdomain
from zerver.actions.create_user import do_create_user
from zerver.forms import check_subdomain_available
from zerver.lib.streams import create_stream_if_needed
from zerver.lib.subdomains import is_root_domain_available
from zerver.lib.test_classes import ZulipTestCase
from zerver.lib.test_helpers import find_key_by_email, ratelimit_rule
from zerver.models import (
Message,
OnboardingUserMessage,
PreregistrationRealm,
Realm,
RealmAuditLog,
Recipient,
ScheduledEmail,
UserProfile,
)
from zerver.models.realm_audit_logs import AuditLogEventType
from zerver.models.realms import get_realm
from zerver.models.scheduled_jobs import ScheduledMessage
from zerver.models.streams import get_stream
from zerver.models.users import get_system_bot, get_user
class DemoCreationTest(ZulipTestCase):
@override_settings(OPEN_REALM_CREATION=True, DEMO_ORG_DEADLINE_DAYS=30)
def test_create_demo_organization(self) -> None:
internal_realm = get_realm(settings.SYSTEM_BOT_REALM)
notification_bot = get_system_bot(settings.NOTIFICATION_BOT, internal_realm.id)
signups_channel, _ = create_stream_if_needed(notification_bot.realm, "signups")
result = self.submit_demo_creation_form()
realm = Realm.objects.filter(
demo_organization_scheduled_deletion_date__isnull=False
).latest("date_created")
self.assertEqual(result.status_code, 302)
self.assertTrue(
result["Location"].startswith(
f"http://{realm.string_id}.testserver/accounts/login/subdomain"
)
)
assert settings.DEMO_ORG_DEADLINE_DAYS is not None
expected_deletion_date = realm.date_created + timedelta(
days=settings.DEMO_ORG_DEADLINE_DAYS
)
self.assertEqual(realm.demo_organization_scheduled_deletion_date, expected_deletion_date)
result = self.client_get(result["Location"], subdomain=realm.string_id)
self.assertEqual(result.status_code, 302)
self.assertEqual(result["Location"], f"http://{realm.string_id}.testserver")
user_profile = UserProfile.objects.all().order_by("id").last()
assert user_profile is not None
self.assert_logged_in_user_id(user_profile.id)
# Demo organizations are created without setting an email address for the owner.
self.assertEqual(user_profile.delivery_email, "")
scheduled_email = ScheduledEmail.objects.filter(users=user_profile).last()
assert scheduled_email is None
self.assertIn(realm.string_id, user_profile.email)
self.assertEqual(
user_profile.email_address_visibility, UserProfile.EMAIL_ADDRESS_VISIBILITY_NOBODY
)
# Make sure the correct Welcome Bot direct message is sent.
welcome_msg = Message.objects.filter(
realm_id=realm.id,
sender__email="welcome-bot@zulip.com",
recipient__type=Recipient.PERSONAL,
).latest("id")
self.assertTrue(welcome_msg.content.startswith("Hello, and welcome to Zulip!"))
self.assertIn("getting started guide", welcome_msg.content)
self.assertNotIn("using Zulip for a class guide", welcome_msg.content)
self.assertIn("demo organization", welcome_msg.content)
# Confirm we have the expected audit log data.
realm_creation_audit_log = RealmAuditLog.objects.get(
realm=realm, event_type=AuditLogEventType.REALM_CREATED
)
self.assertEqual(realm_creation_audit_log.acting_user, user_profile)
self.assertEqual(realm_creation_audit_log.event_time, realm.date_created)
audit_log_extra_data = realm_creation_audit_log.extra_data
self.assertEqual(
audit_log_extra_data["how_realm_creator_found_zulip"],
RealmAuditLog.HOW_REALM_CREATOR_FOUND_ZULIP_OPTIONS["ai_chatbot"],
)
self.assertEqual(
audit_log_extra_data["how_realm_creator_found_zulip_extra_context"],
"I don't remember.",
)
# Confirm there is a scheduled message reminder about automated
# demo organization deletion.
demo_deletion_reminder = ScheduledMessage.objects.filter(
realm_id=realm.id,
sender__email="notification-bot@zulip.com",
).latest("id")
self.assertTrue(
demo_deletion_reminder.content.startswith("As a reminder, this [demo organization]")
)
self.assertIn("will be automatically deleted on <time", demo_deletion_reminder.content)
# Check admin organization's signups channel messages
recipient = signups_channel.recipient
messages = Message.objects.filter(realm_id=internal_realm.id, recipient=recipient).order_by(
"id"
)
self.assert_length(messages, 1)
# Check organization name, subdomain and organization type are in message content
self.assertIn(realm.name, messages[0].content)
self.assertIn(realm.string_id, messages[0].content)
self.assertEqual("business demos", messages[0].topic_name())
@ratelimit_rule(10, 2, domain="demo_realm_creation_by_ip")
def test_demo_creation_rate_limiter(self) -> None:
start_time = time.time()
with patch("time.time", return_value=start_time):
self.submit_demo_creation_form()
self.submit_demo_creation_form()
result = self.submit_demo_creation_form()
self.assertEqual(result.status_code, 429)
self.assert_in_response("Rate limit exceeded.", result)
result = self.client_get("/new/demo/")
self.assertEqual(result.status_code, 200)
with patch("time.time", return_value=start_time + 11):
self.submit_demo_creation_form()
@override_settings(OPEN_REALM_CREATION=True, USING_CAPTCHA=True, ALTCHA_HMAC_KEY="secret")
def test_create_demo_with_captcha(self) -> None:
result = self.client_get("/new/demo/")
self.assert_not_in_success_response(["Validation failed"], result)
# Without the CAPTCHA value, we get an error
result = self.submit_demo_creation_form()
self.assert_in_success_response(["Validation failed, please try again."], result)
# With an invalid value, we also get an error
with self.assertLogs(level="WARNING") as logs:
result = self.submit_demo_creation_form(captcha="moose")
self.assert_in_success_response(["Validation failed, please try again."], result)
self.assert_length(logs.output, 1)
self.assertIn("Invalid altcha solution: Invalid altcha payload", logs.output[0])
# With something which raises an exception, we also get the same error
with self.assertLogs(level="WARNING") as logs:
result = self.submit_demo_creation_form(
captcha=base64.b64encode(
orjson.dumps(["algorithm", "challenge", "number", "salt", "signature"])
).decode(),
)
self.assert_in_success_response(["Validation failed, please try again."], result)
self.assert_length(logs.output, 1)
self.assertIn(
"TypeError: list indices must be integers or slices, not str", logs.output[0]
)
# If we override the validation, we get an error because it's not in the session
payload = base64.b64encode(orjson.dumps({"challenge": "moose"})).decode()
with (
patch("zerver.forms.verify_solution", return_value=(True, None)) as verify,
self.assertLogs(level="WARNING") as logs,
):
result = self.submit_demo_creation_form(captcha=payload)
self.assert_in_success_response(["Validation failed, please try again."], result)
verify.assert_called_once_with(payload, "secret", check_expires=True)
self.assert_length(logs.output, 1)
self.assertIn("Expired or replayed altcha solution", logs.output[0])
self.assertEqual(self.client.session.get("altcha_challenges"), None)
result = self.client_get("/json/antispam_challenge")
data = self.assert_json_success(result)
self.assertEqual(data["algorithm"], "SHA-256")
self.assertEqual(data["max_number"], 500000)
self.assertIn("signature", data)
self.assertIn("challenge", data)
self.assertIn("salt", data)
self.assert_length(self.client.session["altcha_challenges"], 1)
self.assertEqual(self.client.session["altcha_challenges"][0][0], data["challenge"])
# Update the payload so the challenge matches what is in the
# session. The real payload would have other keys.
payload = base64.b64encode(orjson.dumps({"challenge": data["challenge"]})).decode()
with patch("zerver.forms.verify_solution", return_value=(True, None)) as verify:
result = self.submit_demo_creation_form(captcha=payload)
self.assertEqual(result.status_code, 302)
verify.assert_called_once_with(payload, "secret", check_expires=True)
# And the challenge has been stripped out of the session
self.assertEqual(self.client.session["altcha_challenges"], [])
def test_demo_organizations_disabled(self) -> None:
with self.settings(OPEN_REALM_CREATION=False):
result = self.submit_demo_creation_form()
self.assertEqual(result.status_code, 200)
self.assert_in_response("Demo organizations are not enabled on this server.", result)
with self.settings(DEMO_ORG_DEADLINE_DAYS=None):
result = self.submit_demo_creation_form()
self.assertEqual(result.status_code, 200)
self.assert_in_response("Demo organizations are not enabled on this server.", result)
class RealmCreationTest(ZulipTestCase):
@override_settings(OPEN_REALM_CREATION=True)
def check_able_to_create_realm(self, email: str, password: str = "test") -> None:
internal_realm = get_realm(settings.SYSTEM_BOT_REALM)
notification_bot = get_system_bot(settings.NOTIFICATION_BOT, internal_realm.id)
signups_stream, _ = create_stream_if_needed(notification_bot.realm, "signups")
string_id = "custom-test"
org_name = "Zulip Test"
# Make sure the realm does not exist
with self.assertRaises(Realm.DoesNotExist):
get_realm(string_id)
# Create new realm with the email
result = self.submit_realm_creation_form(
email, realm_subdomain=string_id, realm_name=org_name
)
self.assertEqual(result.status_code, 302)
self.assertTrue(
result["Location"].endswith(
f"/accounts/new/send_confirm/?email={quote(email)}&realm_name={quote_plus(org_name)}&realm_type=10&realm_default_language=en&realm_subdomain={string_id}"
)
)
result = self.client_get(result["Location"])
self.assert_in_response("check your email", result)
prereg_realm = PreregistrationRealm.objects.get(email=email)
self.assertEqual(prereg_realm.name, "Zulip Test")
self.assertEqual(prereg_realm.org_type, Realm.ORG_TYPES["business"]["id"])
self.assertEqual(prereg_realm.default_language, "en")
self.assertEqual(prereg_realm.string_id, string_id)
# Check confirmation email has the correct subject and body, extract
# confirmation link and visit it
confirmation_url = self.get_confirmation_url_from_outbox(
email,
email_subject_contains="Create your Zulip organization",
email_body_contains="You have requested a new Zulip organization",
)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(
email, password, realm_subdomain=string_id, realm_name=org_name
)
self.assertEqual(result.status_code, 302)
self.assertTrue(
result["Location"].startswith("http://custom-test.testserver/accounts/login/subdomain/")
)
# Make sure the realm is created
realm = get_realm(string_id)
self.assertEqual(realm.string_id, string_id)
user = get_user(email, realm)
self.assertEqual(user.realm, realm)
# Check that user is the owner.
self.assertEqual(user.role, UserProfile.ROLE_REALM_OWNER)
# Check defaults
self.assertEqual(realm.org_type, Realm.ORG_TYPES["business"]["id"])
self.assertEqual(realm.default_language, "en")
self.assertEqual(realm.emails_restricted_to_domains, False)
self.assertEqual(realm.invite_required, True)
prereg_realm = PreregistrationRealm.objects.get(email=email)
# Check created_realm and created_user field of PreregistrationRealm object
self.assertEqual(prereg_realm.created_realm, realm)
self.assertEqual(prereg_realm.created_user, user)
self.assertEqual(prereg_realm.status, confirmation_settings.STATUS_USED)
# Check initial realm messages for onboarding
greetings_message_content = "a great place to say “hi”"
experiments_message_content = "Use this topic to try out"
for stream_name, topic, text, message_count in [
(
str(Realm.DEFAULT_NOTIFICATION_STREAM_NAME),
"greetings",
greetings_message_content,
2,
),
(
str(Realm.ZULIP_SANDBOX_CHANNEL_NAME),
"experiments",
experiments_message_content,
5,
),
]:
stream = get_stream(stream_name, realm)
recipient = stream.recipient
messages = Message.objects.filter(realm_id=realm.id, recipient=recipient).order_by(
"date_sent"
)
self.assert_length(messages, message_count)
self.assertEqual(topic, messages[0].topic_name())
self.assertIn(text, messages[0].content)
# Check admin organization's signups stream messages
recipient = signups_stream.recipient
messages = Message.objects.filter(realm_id=internal_realm.id, recipient=recipient).order_by(
"id"
)
self.assert_length(messages, 1)
# Check organization name, subdomain and organization type are in message content
self.assertIn("Zulip Test", messages[0].content)
self.assertIn("custom-test", messages[0].content)
self.assertEqual("business signups", messages[0].topic_name())
realm_creation_audit_log = RealmAuditLog.objects.get(
realm=realm, event_type=AuditLogEventType.REALM_CREATED
)
self.assertEqual(realm_creation_audit_log.acting_user, user)
self.assertEqual(realm_creation_audit_log.event_time, realm.date_created)
audit_log_extra_data = realm_creation_audit_log.extra_data
self.assertEqual(
audit_log_extra_data["how_realm_creator_found_zulip"],
RealmAuditLog.HOW_REALM_CREATOR_FOUND_ZULIP_OPTIONS["other"],
)
self.assertEqual(
audit_log_extra_data["how_realm_creator_found_zulip_extra_context"],
"I found it on the internet.",
)
# Piggyback a little check for how we handle
# empty string_ids.
realm.string_id = ""
self.assertEqual(realm.display_subdomain, ".")
def test_create_realm_non_existing_email(self) -> None:
self.check_able_to_create_realm("user1@test.com")
def test_create_realm_existing_email(self) -> None:
self.check_able_to_create_realm("hamlet@zulip.com")
@override_settings(AUTHENTICATION_BACKENDS=("zproject.backends.ZulipLDAPAuthBackend",))
def test_create_realm_ldap_email(self) -> None:
self.init_default_ldap_database()
with self.settings(LDAP_EMAIL_ATTR="mail"):
self.check_able_to_create_realm(
"newuser_email@zulip.com", self.ldap_password("newuser_with_email")
)
def test_create_realm_as_system_bot(self) -> None:
result = self.submit_realm_creation_form(
email="notification-bot@zulip.com",
realm_subdomain="custom-test",
realm_name="Zulip test",
)
self.assertEqual(result.status_code, 200)
self.assert_in_response("notification-bot@zulip.com is reserved for system bots", result)
def test_create_realm_no_creation_key(self) -> None:
"""
Trying to create a realm without a creation_key should fail when
OPEN_REALM_CREATION is false.
"""
email = "user1@test.com"
with self.settings(OPEN_REALM_CREATION=False):
# Create new realm with the email, but no creation key.
result = self.submit_realm_creation_form(
email, realm_subdomain="custom-test", realm_name="Zulip test"
)
self.assertEqual(result.status_code, 200)
self.assert_in_response("Organization creation link required", result)
@override_settings(OPEN_REALM_CREATION=True)
def test_create_realm_without_password_backend_enabled(self) -> None:
email = "user@example.com"
with self.settings(
AUTHENTICATION_BACKENDS=(
"zproject.backends.SAMLAuthBackend",
"zproject.backends.ZulipDummyBackend",
)
):
result = self.submit_realm_creation_form(
email, realm_subdomain="custom-test", realm_name="Zulip test"
)
self.assertEqual(result.status_code, 200)
self.assert_in_response("Organization creation link required", result)
@override_settings(OPEN_REALM_CREATION=True)
def test_create_realm_with_subdomain(self) -> None:
password = "test"
string_id = "custom-test"
email = "user1@test.com"
realm_name = "Test"
# Make sure the realm does not exist
with self.assertRaises(Realm.DoesNotExist):
get_realm(string_id)
# Create new realm with the email
result = self.submit_realm_creation_form(
email, realm_subdomain=string_id, realm_name=realm_name
)
self.assertEqual(result.status_code, 302)
self.assertTrue(
result["Location"].endswith(
f"/accounts/new/send_confirm/?email={quote(email)}&realm_name={quote_plus(realm_name)}&realm_type=10&realm_default_language=en&realm_subdomain={string_id}"
)
)
result = self.client_get(result["Location"])
self.assert_in_response("check your email", result)
# Visit the confirmation link.
confirmation_url = self.get_confirmation_url_from_outbox(
email, email_body_contains="Organization URL"
)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(
email, password, realm_subdomain=string_id, realm_name=realm_name
)
self.assertEqual(result.status_code, 302)
result = self.client_get(result["Location"], subdomain=string_id)
self.assertEqual(result.status_code, 302)
self.assertEqual(result["Location"], "http://custom-test.testserver")
# Make sure the realm is created
realm = get_realm(string_id)
self.assertEqual(realm.string_id, string_id)
self.assertEqual(get_user(email, realm).realm, realm)
self.assertEqual(realm.name, realm_name)
self.assertEqual(realm.subdomain, string_id)
@override_settings(OPEN_REALM_CREATION=True)
def test_create_realm_with_marketing_emails_enabled(self) -> None:
password = "test"
string_id = "custom-test"
email = "user1@test.com"
realm_name = "Test"
# Make sure the realm does not exist
with self.assertRaises(Realm.DoesNotExist):
get_realm(string_id)
# Create new realm with the email
result = self.submit_realm_creation_form(
email, realm_subdomain=string_id, realm_name=realm_name
)
self.assertEqual(result.status_code, 302)
self.assertTrue(
result["Location"].endswith(
f"/accounts/new/send_confirm/?email={quote(email)}&realm_name={quote_plus(realm_name)}&realm_type=10&realm_default_language=en&realm_subdomain={string_id}"
)
)
result = self.client_get(result["Location"])
self.assert_in_response("check your email", result)
# Visit the confirmation link.
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(
email,
password,
realm_subdomain=string_id,
realm_name=realm_name,
enable_marketing_emails=True,
)
self.assertEqual(result.status_code, 302)
result = self.client_get(result["Location"], subdomain=string_id)
self.assertEqual(result.status_code, 302)
self.assertEqual(result["Location"], "http://custom-test.testserver")
# Make sure the realm is created
realm = get_realm(string_id)
self.assertEqual(realm.string_id, string_id)
user = get_user(email, realm)
self.assertEqual(user.realm, realm)
self.assertTrue(user.enable_marketing_emails)
@override_settings(OPEN_REALM_CREATION=True, CORPORATE_ENABLED=False)
def test_create_realm_without_prompting_for_marketing_emails(self) -> None:
password = "test"
string_id = "custom-test"
email = "user1@test.com"
realm_name = "Test"
# Make sure the realm does not exist
with self.assertRaises(Realm.DoesNotExist):
get_realm(string_id)
# Create new realm with the email
result = self.submit_realm_creation_form(
email, realm_subdomain=string_id, realm_name=realm_name
)
self.assertEqual(result.status_code, 302)
self.assertTrue(
result["Location"].endswith(
f"/accounts/new/send_confirm/?email={quote(email)}&realm_name={quote_plus(realm_name)}&realm_type=10&realm_default_language=en&realm_subdomain={string_id}"
)
)
result = self.client_get(result["Location"])
self.assert_in_response("check your email", result)
# Visit the confirmation link.
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
# Simulate the initial POST that is made by redirect-to-post.ts
# by triggering submit on confirm_preregistration.html.
payload = {
"full_name": "",
"key": find_key_by_email(email),
"from_confirmation": "1",
}
result = self.client_post("/realm/register/", payload)
# Assert that the form did not prompt the user for enabling
# marketing emails.
self.assert_not_in_success_response(['input id="id_enable_marketing_emails"'], result)
result = self.submit_reg_form_for_user(
email,
password,
realm_subdomain=string_id,
realm_name=realm_name,
)
self.assertEqual(result.status_code, 302)
result = self.client_get(result["Location"], subdomain=string_id)
self.assertEqual(result.status_code, 302)
self.assertEqual(result["Location"], "http://custom-test.testserver")
# Make sure the realm is created
realm = get_realm(string_id)
self.assertEqual(realm.string_id, string_id)
user = get_user(email, realm)
self.assertEqual(user.realm, realm)
self.assertFalse(user.enable_marketing_emails)
@override_settings(OPEN_REALM_CREATION=True)
def test_create_realm_with_marketing_emails_disabled(self) -> None:
password = "test"
string_id = "custom-test"
email = "user1@test.com"
realm_name = "Zulip test"
# Make sure the realm does not exist
with self.assertRaises(Realm.DoesNotExist):
get_realm(string_id)
# Create new realm with the email
result = self.submit_realm_creation_form(
email, realm_subdomain=string_id, realm_name=realm_name
)
self.assertEqual(result.status_code, 302)
self.assertTrue(
result["Location"].endswith(
f"/accounts/new/send_confirm/?email={quote(email)}&realm_name={quote_plus(realm_name)}&realm_type=10&realm_default_language=en&realm_subdomain={string_id}"
)
)
result = self.client_get(result["Location"])
self.assert_in_response("check your email", result)
# Visit the confirmation link.
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(
email,
password,
realm_subdomain=string_id,
realm_name=realm_name,
enable_marketing_emails=False,
)
self.assertEqual(result.status_code, 302)
result = self.client_get(result["Location"], subdomain=string_id)
self.assertEqual(result.status_code, 302)
self.assertEqual(result["Location"], "http://custom-test.testserver")
# Make sure the realm is created
realm = get_realm(string_id)
self.assertEqual(realm.string_id, string_id)
user = get_user(email, realm)
self.assertEqual(user.realm, realm)
self.assertFalse(user.enable_marketing_emails)
@override_settings(OPEN_REALM_CREATION=True)
def test_create_regular_realm_welcome_bot_direct_message(self) -> None:
password = "test"
string_id = "custom-test"
email = "user1@test.com"
realm_name = "Test"
# Create new realm with the email.
result = self.submit_realm_creation_form(
email, realm_subdomain=string_id, realm_name=realm_name
)
self.assertEqual(result.status_code, 302)
self.assertTrue(
result["Location"].endswith(
f"/accounts/new/send_confirm/?email={quote(email)}&realm_name={quote_plus(realm_name)}&realm_type=10&realm_default_language=en&realm_subdomain={string_id}"
)
)
result = self.client_get(result["Location"])
self.assert_in_response("check your email", result)
# Visit the confirmation link.
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(
email,
password,
realm_subdomain=string_id,
realm_name=realm_name,
enable_marketing_emails=False,
)
self.assertEqual(result.status_code, 302)
# Make sure the correct Welcome Bot direct message is sent.
realm = get_realm(string_id)
welcome_msg = Message.objects.filter(
realm_id=realm.id,
sender__email="welcome-bot@zulip.com",
recipient__type=Recipient.PERSONAL,
).latest("id")
self.assertTrue(welcome_msg.content.startswith("Hello, and welcome to Zulip!"))
# Organization type is not education or education_nonprofit,
# and organization is not a demo organization.
self.assertIn("getting started guide", welcome_msg.content)
self.assertNotIn("using Zulip for a class guide", welcome_msg.content)
self.assertNotIn("demo organization", welcome_msg.content)
# Organization has tracked onboarding messages.
self.assertTrue(OnboardingUserMessage.objects.filter(realm_id=realm.id).exists())
self.assertIn("I've kicked off some conversations", welcome_msg.content)
# Verify that Organization without 'OnboardingUserMessage' records
# doesn't include "I've kicked off..." text in welcome_msg content.
OnboardingUserMessage.objects.filter(realm_id=realm.id).delete()
do_create_user("hamlet", "password", realm, "hamlet", acting_user=None)
welcome_msg = Message.objects.filter(
realm_id=realm.id,
sender__email="welcome-bot@zulip.com",
recipient__type=Recipient.PERSONAL,
).latest("id")
self.assertTrue(welcome_msg.content.startswith("Hello, and welcome to Zulip!"))
self.assertNotIn("I've kicked off some conversations", welcome_msg.content)
@override_settings(OPEN_REALM_CREATION=True)
def test_create_education_organization_welcome_bot_direct_message(self) -> None:
password = "test"
string_id = "custom-test"
email = "user1@test.com"
realm_name = "Test"
# Create new realm with the email.
result = self.submit_realm_creation_form(
email,
realm_subdomain=string_id,
realm_name=realm_name,
realm_type=Realm.ORG_TYPES["education"]["id"],
)
self.assertEqual(result.status_code, 302)
self.assertTrue(
result["Location"].endswith(
f"/accounts/new/send_confirm/?email={quote(email)}&realm_name={quote_plus(realm_name)}&realm_type=35&realm_default_language=en&realm_subdomain={string_id}"
)
)
result = self.client_get(result["Location"])
self.assert_in_response("check your email", result)
# Visit the confirmation link.
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(
email,
password,
realm_subdomain=string_id,
realm_name=realm_name,
enable_marketing_emails=False,
realm_type=Realm.ORG_TYPES["education"]["id"],
)
self.assertEqual(result.status_code, 302)
# Make sure the correct Welcome Bot direct message is sent.
welcome_msg = Message.objects.filter(
realm_id=get_realm(string_id).id,
sender__email="welcome-bot@zulip.com",
recipient__type=Recipient.PERSONAL,
).latest("id")
self.assertTrue(welcome_msg.content.startswith("Hello, and welcome to Zulip!"))
# Organization type is education.
self.assertNotIn("getting started guide", welcome_msg.content)
self.assertIn("using Zulip for a class guide", welcome_msg.content)
@override_settings(OPEN_REALM_CREATION=True)
def test_create_realm_with_custom_language(self) -> None:
email = "user1@test.com"
password = "test"
string_id = "custom-test"
realm_name = "Zulip Test"
realm_language = "de"
# Make sure the realm does not exist
with self.assertRaises(Realm.DoesNotExist):
get_realm(string_id)
# Create new realm with the email
result = self.submit_realm_creation_form(
email,
realm_subdomain=string_id,
realm_name=realm_name,
realm_default_language=realm_language,
)
self.assertEqual(result.status_code, 302)
self.assertTrue(
result["Location"].endswith(
f"/accounts/new/send_confirm/?email={quote(email)}&realm_name={quote_plus(realm_name)}&realm_type=10&realm_default_language={realm_language}&realm_subdomain={string_id}"
)
)
result = self.client_get(result["Location"])
self.assert_in_response("check your email", result)
prereg_realm = PreregistrationRealm.objects.get(email=email)
# Check default_language field of PreregistrationRealm object
self.assertEqual(prereg_realm.default_language, realm_language)
# Visit the confirmation link.
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(
email,
password,
realm_subdomain=string_id,
realm_name=realm_name,
realm_default_language=realm_language,
)
self.assertEqual(result.status_code, 302)
result = self.client_get(result["Location"], subdomain=string_id)
self.assertEqual(result.status_code, 302)
self.assertEqual(result["Location"], "http://custom-test.testserver")
# Make sure the realm is created and check default_language field
realm = get_realm(string_id)
self.assertEqual(realm.string_id, string_id)
self.assertEqual(realm.default_language, realm_language)
# Check initial realm messages for onboarding
greetings_channel = "allgemein"
greetings_topic = "Grüße"
greetings_message_content = "Thema ist ein toller Ort um “hi”"
experiments_channel = "Sandbox"
experiments_topic = "Experimente"
experiments_message_content = "Verwende dieses Thema um"
for stream_name, topic, text, message_count in [
(greetings_channel, greetings_topic, greetings_message_content, 2),
(experiments_channel, experiments_topic, experiments_message_content, 5),
]:
stream = get_stream(stream_name, realm)
recipient = stream.recipient
messages = Message.objects.filter(realm_id=realm.id, recipient=recipient).order_by(
"date_sent"
)
self.assert_length(messages, message_count)
self.assertEqual(topic, messages[0].topic_name())
self.assertIn(text, messages[0].content)
@override_settings(OPEN_REALM_CREATION=True, CLOUD_FREE_TRIAL_DAYS=30)
def test_create_realm_during_free_trial(self) -> None:
password = "test"
string_id = "custom-test"
email = "user1@test.com"
realm_name = "Test"
with self.assertRaises(Realm.DoesNotExist):
get_realm(string_id)
result = self.submit_realm_creation_form(
email, realm_subdomain=string_id, realm_name=realm_name
)
self.assertEqual(result.status_code, 302)
self.assertTrue(
result["Location"].endswith(
f"/accounts/new/send_confirm/?email={quote(email)}&realm_name={quote_plus(realm_name)}&realm_type=10&realm_default_language=en&realm_subdomain={string_id}"
)
)
result = self.client_get(result["Location"])
self.assert_in_response("check your email", result)
confirmation_url = self.get_confirmation_url_from_outbox(
email, email_body_contains="Organization URL"
)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(
email, password, realm_subdomain=string_id, realm_name=realm_name
)
self.assertEqual(result.status_code, 302)
result = self.client_get(result["Location"], subdomain=string_id)
self.assertEqual(result["Location"], "http://custom-test.testserver/upgrade/")
result = self.client_get(result["Location"], subdomain=string_id)
self.assert_in_success_response(["Your card will not be charged", "free trial"], result)
realm = get_realm(string_id)
self.assertEqual(realm.string_id, string_id)
self.assertEqual(get_user(email, realm).realm, realm)
self.assertEqual(realm.name, realm_name)
self.assertEqual(realm.subdomain, string_id)
@override_settings(OPEN_REALM_CREATION=True)
def test_create_two_realms(self) -> None:
"""
Verify correct behavior and PreregistrationRealm handling when using
two pre-generated realm creation links to create two different realms.
"""
password = "test"
first_string_id = "custom-test"
second_string_id = "custom-test2"
email = "user1@test.com"
first_realm_name = "Test"
second_realm_name = "Test"
# Make sure the realms do not exist
with self.assertRaises(Realm.DoesNotExist):
get_realm(first_string_id)
with self.assertRaises(Realm.DoesNotExist):
get_realm(second_string_id)
# Now we pre-generate two realm creation links
result = self.submit_realm_creation_form(
email, realm_subdomain=first_string_id, realm_name=first_realm_name
)
self.assertEqual(result.status_code, 302)
self.assertTrue(
result["Location"].endswith(
f"/accounts/new/send_confirm/?email={quote(email)}&realm_name={quote_plus(first_realm_name)}&realm_type=10&realm_default_language=en&realm_subdomain={first_string_id}"
)
)
result = self.client_get(result["Location"])
self.assert_in_response("check your email", result)
first_confirmation_url = self.get_confirmation_url_from_outbox(
email, email_body_contains="Organization URL"
)
self.assertEqual(PreregistrationRealm.objects.filter(email=email, status=0).count(), 1)
result = self.submit_realm_creation_form(
email, realm_subdomain=second_string_id, realm_name=second_realm_name
)
self.assertEqual(result.status_code, 302)
self.assertTrue(
result["Location"].endswith(
f"/accounts/new/send_confirm/?email={quote(email)}&realm_name={quote_plus(second_realm_name)}&realm_type=10&realm_default_language=en&realm_subdomain={second_string_id}"
)
)
result = self.client_get(result["Location"])
self.assert_in_response("check your email", result)
second_confirmation_url = self.get_confirmation_url_from_outbox(
email, email_body_contains="Organization URL"
)
self.assertNotEqual(first_confirmation_url, second_confirmation_url)
self.assertEqual(PreregistrationRealm.objects.filter(email=email, status=0).count(), 2)
# Create and verify the first realm
result = self.client_get(first_confirmation_url)
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(
email,
password,
realm_subdomain=first_string_id,
realm_name=first_realm_name,
key=first_confirmation_url.split("/")[-1],
)
self.assertEqual(result.status_code, 302)
# Make sure the realm is created
realm = get_realm(first_string_id)
self.assertEqual(realm.string_id, first_string_id)
self.assertEqual(realm.name, first_realm_name)
# One of the PreregistrationRealm should have been used up:
self.assertEqual(PreregistrationRealm.objects.filter(email=email, status=0).count(), 1)
# Create and verify the second realm
result = self.client_get(second_confirmation_url)
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(
email,
password,
realm_subdomain=second_string_id,
realm_name=second_realm_name,
key=second_confirmation_url.split("/")[-1],
)
self.assertEqual(result.status_code, 302)
# Make sure the realm is created
realm = get_realm(second_string_id)
self.assertEqual(realm.string_id, second_string_id)
self.assertEqual(realm.name, second_realm_name)
# The remaining PreregistrationRealm should have been used up:
self.assertEqual(PreregistrationRealm.objects.filter(email=email, status=0).count(), 0)
@override_settings(OPEN_REALM_CREATION=True)
def test_invalid_email_signup(self) -> None:
result = self.submit_realm_creation_form(
email="<foo", realm_subdomain="custom-test", realm_name="Zulip test"
)
self.assert_in_response("Please use your real email address.", result)
self.assert_in_response("Enter a valid email address.", result)
result = self.submit_realm_creation_form(
email="foo\x00bar", realm_subdomain="custom-test", realm_name="Zulip test"
)
self.assert_in_response("Please use your real email address.", result)
self.assert_in_response("Null characters are not allowed.", result)
self.assert_in_response("Enter a valid email address.", result)
@override_settings(OPEN_REALM_CREATION=True)
def test_mailinator_signup(self) -> None:
result = self.client_post("/new/", {"email": "hi@mailinator.com"})
self.assert_in_response("Please use your real email address.", result)
@override_settings(OPEN_REALM_CREATION=True)
def test_subdomain_restrictions(self) -> None:
password = "test"
email = "user1@test.com"
realm_name = "Test"
errors = {
"id": "length 3 or greater",
"-id": "cannot start or end with a",
"string-ID": "lowercase letters",
"string_id": "lowercase letters",
"stream": "reserved",
"streams": "reserved",
"about": "reserved",
"abouts": "reserved",
"zephyr": "already in use",
}
for string_id, error_msg in errors.items():
result = self.submit_realm_creation_form(
email, realm_subdomain=string_id, realm_name=realm_name
)
self.assert_in_response(error_msg, result)
# test valid subdomain
result = self.submit_realm_creation_form(
email, realm_subdomain="a-0", realm_name=realm_name
)
self.client_get(result["Location"])
confirmation_url = self.get_confirmation_url_from_outbox(email)
self.client_get(confirmation_url)
result = self.submit_reg_form_for_user(
email, password, realm_subdomain="a-0", realm_name=realm_name
)
self.assertEqual(result.status_code, 302)
self.assertTrue(
result["Location"].startswith("http://a-0.testserver/accounts/login/subdomain/")
)
@override_settings(OPEN_REALM_CREATION=True)
def test_create_realm_using_old_subdomain_of_a_realm(self) -> None:
realm = get_realm("zulip")
do_change_realm_subdomain(realm, "new-name", acting_user=None)
email = "user1@test.com"
result = self.submit_realm_creation_form(email, realm_subdomain="test", realm_name="Test")
self.assert_in_response("Subdomain reserved. Please choose a different one.", result)
@override_settings(OPEN_REALM_CREATION=True)
def test_subdomain_restrictions_root_domain(self) -> None:
password = "test"
email = "user1@test.com"
realm_name = "Test"
# test root domain will fail with ROOT_DOMAIN_LANDING_PAGE
with self.settings(ROOT_DOMAIN_LANDING_PAGE=True):
result = self.submit_realm_creation_form(
email, realm_subdomain="", realm_name=realm_name
)
self.assert_in_response("already in use", result)
# test valid use of root domain
result = self.submit_realm_creation_form(email, realm_subdomain="", realm_name=realm_name)
self.client_get(result["Location"])
confirmation_url = self.get_confirmation_url_from_outbox(email)
self.client_get(confirmation_url)
result = self.submit_reg_form_for_user(
email, password, realm_subdomain="", realm_name=realm_name
)
self.assertEqual(result.status_code, 302)
self.assertTrue(
result["Location"].startswith("http://testserver/accounts/login/subdomain/")
)
@override_settings(OPEN_REALM_CREATION=True)
def test_subdomain_restrictions_root_domain_option(self) -> None:
password = "test"
email = "user1@test.com"
realm_name = "Test"
# test root domain will fail with ROOT_DOMAIN_LANDING_PAGE
with self.settings(ROOT_DOMAIN_LANDING_PAGE=True):
result = self.submit_realm_creation_form(
email, realm_subdomain="abcdef", realm_name=realm_name, realm_in_root_domain="true"
)
self.assert_in_response("already in use", result)
# test valid use of root domain
result = self.submit_realm_creation_form(
email, realm_subdomain="abcdef", realm_name=realm_name, realm_in_root_domain="true"
)
self.client_get(result["Location"])
confirmation_url = self.get_confirmation_url_from_outbox(email)
self.client_get(confirmation_url)
result = self.submit_reg_form_for_user(
email,
password,
realm_subdomain="abcdef",
realm_in_root_domain="true",
realm_name=realm_name,
)
self.assertEqual(result.status_code, 302)
self.assertTrue(
result["Location"].startswith("http://testserver/accounts/login/subdomain/")
)
def test_is_root_domain_available(self) -> None:
self.assertTrue(is_root_domain_available())
with self.settings(ROOT_DOMAIN_LANDING_PAGE=True):
self.assertFalse(is_root_domain_available())
realm = get_realm("zulip")
realm.string_id = Realm.SUBDOMAIN_FOR_ROOT_DOMAIN
realm.save()
self.assertFalse(is_root_domain_available())
def test_subdomain_check_api(self) -> None:
result = self.client_get("/json/realm/subdomain/zulip")
self.assert_in_success_response(
["Subdomain is already in use. Please choose a different one."], result
)
result = self.client_get("/json/realm/subdomain/zu_lip")
self.assert_in_success_response(
["Subdomain can only have lowercase letters, numbers, and '-'s."], result
)
with self.settings(SOCIAL_AUTH_SUBDOMAIN="zulipauth"):
result = self.client_get("/json/realm/subdomain/zulipauth")
self.assert_in_success_response(
["Subdomain reserved. Please choose a different one."], result
)
with self.settings(SELF_HOSTING_MANAGEMENT_SUBDOMAIN="zulipselfhosting"):
result = self.client_get("/json/realm/subdomain/zulipselfhosting")
self.assert_in_success_response(
["Subdomain reserved. Please choose a different one."], result
)
result = self.client_get("/json/realm/subdomain/hufflepuff")
self.assert_in_success_response(["available"], result)
self.assert_not_in_success_response(["already in use"], result)
self.assert_not_in_success_response(["reserved"], result)
def test_subdomain_check_management_command(self) -> None:
# Short names should not work, even with the flag
with self.assertRaises(ValidationError):
check_subdomain_available("aa")
with self.assertRaises(ValidationError):
check_subdomain_available("aa", allow_reserved_subdomain=True)
# Malformed names should never work
with self.assertRaises(ValidationError):
check_subdomain_available("-ba_d-")
with self.assertRaises(ValidationError):
check_subdomain_available("-ba_d-", allow_reserved_subdomain=True)
with patch("zerver.lib.name_restrictions.is_reserved_subdomain", return_value=False):
# Existing realms should never work even if they are not reserved keywords
with self.assertRaises(ValidationError):
check_subdomain_available("zulip")
with self.assertRaises(ValidationError):
check_subdomain_available("zulip", allow_reserved_subdomain=True)
# Reserved ones should only work with the flag
with self.assertRaises(ValidationError):
check_subdomain_available("stream")
check_subdomain_available("stream", allow_reserved_subdomain=True)
# "zulip" and "kandra" are allowed if not CORPORATE_ENABLED or with the flag
with self.settings(CORPORATE_ENABLED=False):
check_subdomain_available("we-are-zulip-team")
with self.settings(CORPORATE_ENABLED=True):
with self.assertRaises(ValidationError):
check_subdomain_available("we-are-zulip-team")
check_subdomain_available("we-are-zulip-team", allow_reserved_subdomain=True)
@override_settings(OPEN_REALM_CREATION=True, USING_CAPTCHA=True, ALTCHA_HMAC_KEY="secret")
def test_create_realm_with_captcha(self) -> None:
string_id = "custom-test"
email = "user1@test.com"
realm_name = "Test"
# Make sure the realm does not exist
with self.assertRaises(Realm.DoesNotExist):
get_realm(string_id)
result = self.client_get("/new/")
self.assert_not_in_success_response(["Validation failed"], result)
# Without the CAPTCHA value, we get an error
result = self.submit_realm_creation_form(
email, realm_subdomain=string_id, realm_name=realm_name
)
self.assert_in_success_response(["Validation failed, please try again."], result)
# With an invalid value, we also get an error
with self.assertLogs(level="WARNING") as logs:
result = self.submit_realm_creation_form(
email, realm_subdomain=string_id, realm_name=realm_name, captcha="moose"
)
self.assert_in_success_response(["Validation failed, please try again."], result)
self.assert_length(logs.output, 1)
self.assertIn("Invalid altcha solution: Invalid altcha payload", logs.output[0])
# With something which raises an exception, we also get the same error
with self.assertLogs(level="WARNING") as logs:
result = self.submit_realm_creation_form(
email,
realm_subdomain=string_id,
realm_name=realm_name,
captcha=base64.b64encode(
orjson.dumps(["algorithm", "challenge", "number", "salt", "signature"])
).decode(),
)
self.assert_in_success_response(["Validation failed, please try again."], result)
self.assert_length(logs.output, 1)
self.assertIn(
"TypeError: list indices must be integers or slices, not str", logs.output[0]
)
# If we override the validation, we get an error because it's not in the session
payload = base64.b64encode(orjson.dumps({"challenge": "moose"})).decode()
with (
patch("zerver.forms.verify_solution", return_value=(True, None)) as verify,
self.assertLogs(level="WARNING") as logs,
):
result = self.submit_realm_creation_form(
email, realm_subdomain=string_id, realm_name=realm_name, captcha=payload
)
self.assert_in_success_response(["Validation failed, please try again."], result)
verify.assert_called_once_with(payload, "secret", check_expires=True)
self.assert_length(logs.output, 1)
self.assertIn("Expired or replayed altcha solution", logs.output[0])
self.assertEqual(self.client.session.get("altcha_challenges"), None)
result = self.client_get("/json/antispam_challenge")
data = self.assert_json_success(result)
self.assertEqual(data["algorithm"], "SHA-256")
self.assertEqual(data["max_number"], 500000)
self.assertIn("signature", data)
self.assertIn("challenge", data)
self.assertIn("salt", data)
self.assert_length(self.client.session["altcha_challenges"], 1)
self.assertEqual(self.client.session["altcha_challenges"][0][0], data["challenge"])
# Update the payload so the challenge matches what is in the
# session. The real payload would have other keys.
payload = base64.b64encode(orjson.dumps({"challenge": data["challenge"]})).decode()
with patch("zerver.forms.verify_solution", return_value=(True, None)) as verify:
result = self.submit_realm_creation_form(
email, realm_subdomain=string_id, realm_name=realm_name, captcha=payload
)
self.assertEqual(result.status_code, 302)
verify.assert_called_once_with(payload, "secret", check_expires=True)
# And the challenge has been stripped out of the session
self.assertEqual(self.client.session["altcha_challenges"], [])
| {
"repo_id": "zulip/zulip",
"file_path": "zerver/tests/test_realm_creation.py",
"license": "Apache License 2.0",
"lines": 1054,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
zulip/zulip:zerver/checks.py | import os
import re
from collections.abc import Iterable, Sequence
from typing import Any
from django.apps.config import AppConfig
from django.conf import settings
from django.core import checks
def check_required_settings(
app_configs: Sequence[AppConfig] | None,
databases: Sequence[str] | None,
**kwargs: Any,
) -> Iterable[checks.CheckMessage]:
# These are the settings that we will check that the user has filled in for
# production deployments before starting the app. It consists of a series
# of pairs of (setting name, default value that it must be changed from)
required_settings = [
("EXTERNAL_HOST", "zulip.example.com"),
("ZULIP_ADMINISTRATOR", "zulip-admin@example.com"),
# SECRET_KEY doesn't really need to be here, in
# that we set it automatically, but just in
# case, it seems worth having in this list
("SECRET_KEY", ""),
("AUTHENTICATION_BACKENDS", ()),
]
errors = []
for setting_name, default in required_settings:
if (
hasattr(settings, setting_name)
and getattr(settings, setting_name) != default
and getattr(settings, setting_name)
):
continue
if settings.RUNNING_IN_DOCKER:
settings_location = "your Docker environment configuration"
setting_display_name = "SETTING_" + setting_name
else:
settings_location = "/etc/zulip/settings.py"
setting_display_name = setting_name
errors.append(
checks.Error(
f"You must set {setting_display_name} in {settings_location}",
obj=f"settings.{setting_name}",
id="zulip.E001",
)
)
return errors
def check_external_host_setting(
app_configs: Sequence[AppConfig] | None,
databases: Sequence[str] | None,
**kwargs: Any,
) -> Iterable[checks.CheckMessage]:
if not hasattr(settings, "EXTERNAL_HOST"): # nocoverage
return []
errors = []
scheme = settings.EXTERNAL_URI_SCHEME
if scheme != "https://" and not settings.DEVELOPMENT:
errors.append(
checks.Error(
"Zulip does not support a non-HTTPS external scheme in production",
obj="settings.EXTERNAL_URI_SCHEME",
hint="Do not override EXTERNAL_URI_SCHEME in production",
id="zulip.E004",
)
)
hostname = settings.EXTERNAL_HOST
if "." not in hostname and os.environ.get("ZULIP_TEST_SUITE") != "true" and settings.PRODUCTION:
suggest = ".localdomain" if hostname == "localhost" else ".local"
errors.append(
checks.Error(
f"EXTERNAL_HOST ({hostname}) does not contain a domain part",
obj="settings.EXTERNAL_HOST",
hint=f"Add {suggest} to the end",
id="zulip.E002",
)
)
if ":" in hostname:
hostname = hostname.split(":")[0]
if len(hostname) > 255:
errors.append(
checks.Error(
f"EXTERNAL_HOST ({hostname}) is too long to be a valid hostname",
obj="settings.EXTERNAL_HOST",
id="zulip.E002",
)
)
domain_part = re.compile(r"(?!-)[A-Z\d-]{1,63}(?<!-)$", re.IGNORECASE)
if not hostname.isascii():
suggestion = ".".join(
"xn--" + part.encode("punycode").decode() if not part.isascii() else part
for part in hostname.split(".")
)
errors.append(
checks.Error(
f"EXTERNAL_HOST ({hostname}) contains non-ASCII characters",
hint=f"Switch to punycode: {suggestion}",
obj="settings.EXTERNAL_HOST",
id="zulip.E002",
)
)
elif not all(domain_part.match(x) for x in hostname.split(".")):
errors.append(
checks.Error(
f"EXTERNAL_HOST ({hostname}) does not validate as a hostname",
obj="settings.EXTERNAL_HOST",
id="zulip.E002",
)
)
return errors
def check_auth_settings(
app_configs: Sequence[AppConfig] | None,
databases: Sequence[str] | None,
**kwargs: Any,
) -> Iterable[checks.CheckMessage]:
errors = []
for idp_name, idp_dict in settings.SOCIAL_AUTH_SAML_ENABLED_IDPS.items():
if "zulip_groups" in idp_dict.get("extra_attrs", []):
errors.append(
checks.Error(
"zulip_groups can't be listed in extra_attrs",
obj=f'settings.SOCIAL_AUTH_SAML_ENABLED_IDPS["{idp_name}"]["extra_attrs"]',
id="zulip.E003",
)
)
for subdomain, config_dict in settings.SOCIAL_AUTH_SYNC_ATTRS_DICT.items():
for auth_name, attrs_map in config_dict.items():
for attr_key, attr_value in attrs_map.items():
if attr_value == "zulip_groups":
errors.append(
checks.Error(
"zulip_groups can't be listed as a SAML attribute",
obj=f'settings.SOCIAL_AUTH_SYNC_ATTRS_DICT["{subdomain}"]["{auth_name}"]["{attr_key}"]',
id="zulip.E004",
)
)
return errors
| {
"repo_id": "zulip/zulip",
"file_path": "zerver/checks.py",
"license": "Apache License 2.0",
"lines": 135,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
zulip/zulip:zerver/tests/test_checks.py | import os
import re
from contextlib import ExitStack
from typing import Any
from django.core.management import call_command
from django.core.management.base import SystemCheckError
from django.test import override_settings
from zerver.lib.test_classes import ZulipTestCase
class TestChecks(ZulipTestCase):
def assert_check_with_error(self, test: re.Pattern[str] | str | None, **kwargs: Any) -> None:
with open(os.devnull, "w") as DEVNULL, override_settings(**kwargs), ExitStack() as stack:
if isinstance(test, str):
stack.enter_context(self.assertRaisesMessage(SystemCheckError, test))
elif isinstance(test, re.Pattern):
stack.enter_context(self.assertRaisesRegex(SystemCheckError, test))
call_command("check", stdout=DEVNULL)
@override_settings(RUNNING_IN_DOCKER=False)
def test_checks_required_setting(self) -> None:
self.assert_check_with_error(
"(zulip.E001) You must set ZULIP_ADMINISTRATOR in /etc/zulip/settings.py",
ZULIP_ADMINISTRATOR="zulip-admin@example.com",
)
self.assert_check_with_error(
"(zulip.E001) You must set ZULIP_ADMINISTRATOR in /etc/zulip/settings.py",
ZULIP_ADMINISTRATOR="",
)
self.assert_check_with_error(
"(zulip.E001) You must set ZULIP_ADMINISTRATOR in /etc/zulip/settings.py",
ZULIP_ADMINISTRATOR=None,
)
@override_settings(RUNNING_IN_DOCKER=True)
def test_checks_required_setting_docker(self) -> None:
self.assert_check_with_error(
"(zulip.E001) You must set SETTING_ZULIP_ADMINISTRATOR in your Docker environment configuration",
ZULIP_ADMINISTRATOR="zulip-admin@example.com",
)
self.assert_check_with_error(
"(zulip.E001) You must set SETTING_ZULIP_ADMINISTRATOR in your Docker environment configuration",
ZULIP_ADMINISTRATOR="",
)
self.assert_check_with_error(
"(zulip.E001) You must set SETTING_ZULIP_ADMINISTRATOR in your Docker environment configuration",
ZULIP_ADMINISTRATOR=None,
)
@override_settings(DEVELOPMENT=False, PRODUCTION=True, EXTERNAL_URI_SCHEME="https://")
def test_checks_external_host_domain(self) -> None:
message_re = r"\(zulip\.E002\) EXTERNAL_HOST \(\S+\) does not contain a domain part"
try:
# We default to skippping this check in CI, because
# "testserver" is part of so many tests. We temporarily
# strip out the environment variable we use to detect
# that, so we can trigger the check.
del os.environ["ZULIP_TEST_SUITE"]
self.assert_check_with_error(None, EXTERNAL_HOST="server-1.local")
self.assert_check_with_error(
re.compile(rf"{message_re}\s*HINT: Add .local to the end"), EXTERNAL_HOST="server-1"
)
self.assert_check_with_error(
re.compile(rf"{message_re}\s*HINT: Add .localdomain to the end"),
EXTERNAL_HOST="localhost",
)
finally:
os.environ["ZULIP_TEST_SUITE"] = "true"
def test_checks_external_host_value(self) -> None:
self.assert_check_with_error(None, EXTERNAL_HOST="testserver.local")
self.assert_check_with_error(None, EXTERNAL_HOST="testserver.local:443")
self.assert_check_with_error(None, EXTERNAL_HOST="testserver.local:https")
self.assert_check_with_error(
re.compile(r"EXTERNAL_HOST \(\S+\) is too long"),
EXTERNAL_HOST=("a234567890." * 25 + "local"),
)
self.assert_check_with_error(
re.compile(
r"\(zulip\.E002\) EXTERNAL_HOST \(\S+\) contains non-ASCII characters\n.*xn--wgv71a119e\.example\.com"
),
EXTERNAL_HOST="日本語.example.com",
)
self.assert_check_with_error(
"EXTERNAL_HOST (-bogus-.example.com) does not validate as a hostname",
EXTERNAL_HOST="-bogus-.example.com:443",
)
def test_checks_external_scheme(self) -> None:
self.assert_check_with_error(
None, EXTERNAL_URI_SCHEME="https://", DEVELOPMENT=False, PRODUCTION=True
)
self.assert_check_with_error(
None, EXTERNAL_URI_SCHEME="http://", DEVELOPMENT=True, PRODUCTION=False
)
self.assert_check_with_error(
"Zulip does not support a non-HTTPS external scheme in production",
EXTERNAL_URI_SCHEME="http://",
DEVELOPMENT=False,
PRODUCTION=True,
)
self.assert_check_with_error(
"Zulip does not support a non-HTTPS external scheme in production",
EXTERNAL_URI_SCHEME="https",
DEVELOPMENT=False,
PRODUCTION=True,
)
def test_checks_auth(self) -> None:
self.assert_check_with_error(
(
'SOCIAL_AUTH_SAML_ENABLED_IDPS["idp_name"]["extra_attrs"]: '
"(zulip.E003) zulip_groups can't be listed in extra_attrs"
),
SOCIAL_AUTH_SAML_ENABLED_IDPS={
"idp_name": {
"entity_id": "https://idp.testshib.org/idp/shibboleth",
"url": "https://idp.testshib.org/idp/profile/SAML2/Redirect/SSO",
"attr_user_permanent_id": "email",
"attr_first_name": "first_name",
"attr_last_name": "last_name",
"attr_username": "email",
"attr_email": "email",
"extra_attrs": ["title", "mobilePhone", "zulip_role", "zulip_groups"],
}
},
)
self.assert_check_with_error(
(
'settings.SOCIAL_AUTH_SYNC_ATTRS_DICT["example_org"]["saml"]["custom__groups"]: '
"(zulip.E004) zulip_groups can't be listed as a SAML attribute"
),
SOCIAL_AUTH_SYNC_ATTRS_DICT={
"example_org": {
"saml": {
"role": "zulip_role",
"custom__groups": "zulip_groups",
"custom__title": "title",
"groups": ["group1", "group2", ("samlgroup3", "zulipgroup3"), "group4"],
}
}
},
)
| {
"repo_id": "zulip/zulip",
"file_path": "zerver/tests/test_checks.py",
"license": "Apache License 2.0",
"lines": 135,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
zulip/zulip:zerver/webhooks/fixtureless_integrations.py | from datetime import datetime, timezone
from typing import TypedDict
from zerver.lib.timestamp import datetime_to_global_time
# For integrations that don't have example webhook fixtures/payloads,
# we create an Zulip notification message content and topic here in
# order to generate an example screenshot to include in the documentation
# page for those integrations.
# To keep these screenshots consistent and easy to review, there are
# shared string constants to use for common content in these integration
# notification messages/templates.
THREE_DIGIT_NUMBER = "492"
# Example user content
BO_NAME = "Bo Williams"
BO_EMAIL = "bwilliams@example.com"
BO_GIT_NAME = "bo-williams"
KEVIN_NAME = "Kevin Lin"
KEVIN_EMAIL = "klin@example.com"
# Example project content
PROJECT_NAME = "Example Project"
PROJECT_PATH_PERFORCE = "//depot/zerver/example-project/*"
PROJECT_STAGE = "production"
VERSION_NUMBER = "v9.2.3"
REVISION_NUMBER = THREE_DIGIT_NUMBER
# Example branch content
BRANCH_GIT = "main"
BRANCH_MERCURIAL = "default"
BRANCH_SVN = "trunk"
# Example commit content
COMMIT_MESSAGE_A = "Optimize image loading in catalog."
COMMIT_MESSAGE_B = 'Suppress "comment edited" events when body is unchanged.'
COMMIT_BODY_A = "Implement lazy loading for images in the catalog to improve load times."
COMMIT_HASH_A = "a2e84e86ddf7e7f8a9b0c1d2e3f4a5b6c7d8e9f0"
COMMIT_HASH_B = "9fceb02c0c4b8e4c1e7b43hd4e5f6a7b8c9d0e1f"
DEPLOYMENT_HASH = "e494a5be3393"
# Example task/issue/ticket content
TASK_TITLE = COMMIT_MESSAGE_A[:-1]
TASK_DESCRIPTION = COMMIT_BODY_A
TICKET_NUMBER = THREE_DIGIT_NUMBER
# Example datetime content
_DT = datetime(2025, 5, 30, 2, 0, 0, tzinfo=timezone.utc)
DATETIME_GLOBAL = datetime_to_global_time(_DT)
DATE_ISO_8601 = _DT.date().isoformat()
DATE_LONG = _DT.strftime("%A, %B %d, %Y")
class ScreenshotContent(TypedDict):
topic: str
content: str
ASANA = ScreenshotContent(
topic=PROJECT_NAME,
content=f"{BO_NAME} created a new task **[{TASK_TITLE}]()**.\n> {TASK_DESCRIPTION}",
)
CAPISTRANO = ScreenshotContent(
topic=PROJECT_NAME,
content=f"The [deployment]() to **{PROJECT_STAGE}** (version {VERSION_NUMBER}) has been completed successfully! :rocket:",
)
CODEBASE = ScreenshotContent(
topic=f"Push to {BRANCH_GIT} on {PROJECT_NAME}",
content=f"""{BO_NAME} pushed 2 commit(s) to `{BRANCH_GIT}` in project {PROJECT_NAME}:
* [{COMMIT_HASH_A[:10]}](): {COMMIT_MESSAGE_A}
* [{COMMIT_HASH_B[:10]}](): {COMMIT_MESSAGE_B}
""",
)
DISCOURSE = ScreenshotContent(
topic="chat",
content=f"""**@{BO_NAME}** posted in [Example channel]()
> {COMMIT_BODY_A}""",
)
GIT = ScreenshotContent(
topic=BRANCH_GIT,
content=f"""`{DEPLOYMENT_HASH[:12]}` was deployed to `{BRANCH_GIT}` with:
* {KEVIN_EMAIL} - {COMMIT_HASH_A[:7]}: {COMMIT_MESSAGE_A}
* {BO_EMAIL} - {COMMIT_HASH_B[:7]}: {COMMIT_MESSAGE_B}
""",
)
GITHUB_ACTIONS = ScreenshotContent(
topic="scheduled backups",
content=f"""Backup [failed]() at {DATETIME_GLOBAL}.
> Unable to connect.""",
)
GOOGLE_CALENDAR = ScreenshotContent(
topic="Team reminders",
content=f"""The [Development Sync]() event is scheduled from 2 PM - 3 PM on {DATE_LONG} at Conference Room B.
> Let's align on our current sprint progress, address any blockers, and share updates. Your input is crucial!
[Join call]().""",
)
JENKINS = ScreenshotContent(
topic=PROJECT_NAME,
content=f"**Build:** [#{REVISION_NUMBER}](): FAILURE :cross_mark:",
)
MASTODON = ScreenshotContent(
topic="MIT Technology Review",
content=f"""**[Don’t let hype about AI agents get ahead of reality](https://www.technologyreview.com/2025/07/03/1119545/dont-let-hype-about-ai-agents-get-ahead-of-reality/)**
Google’s recent unveiling of what it calls a “new class of agentic experiences” feels like a turning point. At its I/O event last month, for example, the company showed off a digital assistant that didn’t just answer questions; it helped work on a bicycle repair by finding a matching user manual, locating a YouTube…
https://www.technologyreview.com/{DATE_ISO_8601.replace("-", "/")}/1119545/dont-let-hype-about-ai-agents-get-ahead-of-reality/""",
)
MERCURIAL = ScreenshotContent(
topic=BRANCH_MERCURIAL,
content=f"""**{BO_NAME}** pushed [2 commits]() to **{BRANCH_MERCURIAL}** (`{REVISION_NUMBER}:{DEPLOYMENT_HASH[:12]}`):
* [{COMMIT_MESSAGE_A}]()
* [{COMMIT_MESSAGE_B}]()
""",
)
NAGIOS = ScreenshotContent(
topic="service Remote Load on myserver.example.com",
content="""**PROBLEM**: service is CRITICAL
~~~~
CRITICAL - load average: 7.49, 8.20, 4.72
~~~~
""",
)
NOTION_VIA_ZAPIER = ScreenshotContent(
topic=f"{PROJECT_NAME} Release {VERSION_NUMBER}",
content=f"""**{BO_NAME}** [commented]() on:
> project demo scheduled
Can we reschedule this to next week?""",
)
OPENSHIFT = ScreenshotContent(
topic=PROJECT_NAME,
content=f"""Deployment [{REVISION_NUMBER}]() triggered by a push to **{BRANCH_GIT}** by commit [{COMMIT_HASH_A[:7]}]() at {DATETIME_GLOBAL} has **failed**.""",
)
PERFORCE = ScreenshotContent(
topic=PROJECT_PATH_PERFORCE,
content=f"""
**{BO_NAME}** committed revision @[{REVISION_NUMBER}]() to `{PROJECT_PATH_PERFORCE}`.
```quote
{COMMIT_MESSAGE_A}
```
""",
)
PUPPET = ScreenshotContent(
topic="Reports",
content=f"""Puppet production run for web-server-01 completed at {DATETIME_GLOBAL}. [GitHub Gist]() | [Report URL]()""",
)
RSS = MASTODON
SVN = ScreenshotContent(
topic=PROJECT_NAME,
content=f"""**{BO_GIT_NAME}** committed revision r{REVISION_NUMBER} to `{BRANCH_SVN}`.
> {COMMIT_MESSAGE_A}
""",
)
TRAC = ScreenshotContent(
topic=f"#{TICKET_NUMBER} {TASK_TITLE}",
content=f"""**{BO_GIT_NAME}** updated [ticket #{TICKET_NUMBER}]() with comment:
> Fixed in {COMMIT_HASH_A}
status: **new** => **closed**, resolution: => **fixed**""",
)
| {
"repo_id": "zulip/zulip",
"file_path": "zerver/webhooks/fixtureless_integrations.py",
"license": "Apache License 2.0",
"lines": 145,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
zulip/zulip:zerver/views/development/help.py | import os
import werkzeug
from django.conf import settings
from django.http import HttpRequest, HttpResponse
from django.shortcuts import render
def help_dev_mode_view(request: HttpRequest, subpath: str = "") -> HttpResponse:
"""
Dev only view that displays help information for setting up the
help center dev server in the default `run-dev` mode where the
help center server is not running. Also serves raw MDX content when
`raw` query param is passed is passed.
"""
def read_mdx_file(filename: str) -> HttpResponse:
file_path = os.path.join(
settings.DEPLOY_ROOT, "starlight_help", "src", "content", "docs", f"{filename}.mdx"
)
try:
with open(file_path, encoding="utf-8") as f:
content = f.read()
return HttpResponse(content, content_type="text/plain")
except OSError:
return HttpResponse("Error reading MDX file", status=500)
mdx_file_exists = False
is_requesting_raw_file = request.GET.get("raw") == ""
if subpath:
subpath = werkzeug.utils.secure_filename(subpath)
raw_url = f"/help/{subpath}?raw"
mdx_path = os.path.join(
settings.DEPLOY_ROOT, "starlight_help", "src", "content", "docs", f"{subpath}.mdx"
)
mdx_file_exists = os.path.exists(mdx_path) and "/include/" not in mdx_path
if mdx_file_exists and is_requesting_raw_file:
return read_mdx_file(subpath)
else:
if request.path.endswith("/"):
raw_url = "/help/?raw"
else:
raw_url = "/help?raw"
mdx_file_exists = True
if is_requesting_raw_file:
return read_mdx_file("index")
return render(
request,
"zerver/development/dev_help.html",
{
"subpath": subpath,
"mdx_file_exists": mdx_file_exists,
"raw_url": raw_url,
},
)
| {
"repo_id": "zulip/zulip",
"file_path": "zerver/views/development/help.py",
"license": "Apache License 2.0",
"lines": 50,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
zulip/zulip:zerver/management/commands/update_subscriber_counts.py | import argparse
import logging
from datetime import timedelta
from typing import Any
from django.conf import settings
from django.db import transaction
from django.db.models import F, QuerySet
from django.utils.timezone import now as timezone_now
from typing_extensions import override
from zerver.lib.logging_util import log_to_file
from zerver.lib.management import ZulipBaseCommand
from zerver.models import RealmAuditLog, Stream, Subscription
from zerver.models.realm_audit_logs import AuditLogEventType
## Logging setup ##
logger = logging.getLogger(__name__)
log_to_file(logger, settings.DIGEST_LOG_PATH)
class Command(ZulipBaseCommand):
help = """Update the `Stream.subscriber_count` field based on current subscribers.
There may be race conditions with keeping the cached subscriber count
accurate; this command is run as a daily cron job to ensure the number is accurate.
"""
@override
def add_arguments(self, parser: argparse.ArgumentParser) -> None:
parser.add_argument(
"--since",
type=int,
help="Only examine channels with changed subscribers in this many hours",
)
self.add_realm_args(parser, help="The optional name of the realm to limit to")
@override
def handle(self, *args: Any, **options: Any) -> None:
realm = self.get_realm(options)
streams = Stream.objects.all()
if options["since"]:
since_time = timezone_now() - timedelta(hours=options["since"])
# Two ways the count can change -- via a subscription
# being changed, or via a user being (de)activated.
changed_subs = RealmAuditLog.objects.filter(
event_type__in=(
AuditLogEventType.SUBSCRIPTION_CREATED,
AuditLogEventType.SUBSCRIPTION_ACTIVATED,
AuditLogEventType.SUBSCRIPTION_DEACTIVATED,
),
event_time__gte=since_time,
)
if realm:
changed_subs = changed_subs.filter(realm=realm)
# Find all users changed in the time period, join those to
# their subscriptions and distinct recipients, and thence
# to streams.
changed_users = RealmAuditLog.objects.filter(
event_type__in=(
AuditLogEventType.USER_CREATED,
AuditLogEventType.USER_DEACTIVATED,
AuditLogEventType.USER_ACTIVATED,
AuditLogEventType.USER_REACTIVATED,
),
event_time__gte=since_time,
)
if realm:
changed_users = changed_users.filter(realm=realm)
changed_user_ids = (
changed_users.values_list("modified_user_id", flat=True)
.distinct()
.order_by("modified_user_id")
)
changed_user_subs = (
Subscription.objects.filter(user_profile_id__in=changed_user_ids)
.values_list("recipient_id", flat=True)
.distinct()
.order_by("recipient_id")
)
streams_from_users = Stream.objects.filter(recipient_id__in=changed_user_subs)
if realm:
streams_from_users = streams_from_users.filter(realm=realm)
stream_ids: QuerySet[Any, int] = (
changed_subs.distinct("modified_stream_id")
.order_by("modified_stream_id")
.annotate(stream_id=F("modified_stream_id"))
.union(streams_from_users.annotate(stream_id=F("id")))
.values_list("stream_id", flat=True)
)
elif realm := self.get_realm(options):
stream_ids = streams.filter(realm=realm).values_list("id", flat=True)
else:
stream_ids = streams.all().values_list("id", flat=True)
for stream_id in stream_ids.iterator():
with transaction.atomic(durable=True):
stream = Stream.objects.select_for_update(no_key=True).get(id=stream_id)
actual_subscriber_count = Subscription.objects.filter(
active=True,
recipient__type=2,
recipient__type_id=stream_id,
is_user_active=True,
).count()
db_count = stream.subscriber_count
if actual_subscriber_count == db_count:
continue
stream.subscriber_count = actual_subscriber_count
stream.save(update_fields=["subscriber_count"])
logging.info(
"Updated subscriber count of %s, #%s: from %d to %d",
stream.realm.string_id,
stream.name,
db_count,
actual_subscriber_count,
)
| {
"repo_id": "zulip/zulip",
"file_path": "zerver/management/commands/update_subscriber_counts.py",
"license": "Apache License 2.0",
"lines": 107,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
zulip/zulip:tools/lib/html_elements.py | from collections.abc import Iterator
VALID_HTML_CONTEXTS: dict[tuple[str, str], str] = {
# https://html.spec.whatwg.org/multipage/indices.html#elements-3
("a", "phrasing"): "transparent",
("abbr", "phrasing"): "phrasing",
("address", "flow"): "flow",
("area", "phrasing"): "void",
("article", "flow"): "flow",
("aside", "flow"): "flow",
("audio", "phrasing"): "<audio>",
("b", "phrasing"): "phrasing",
("base", "<head>"): "void",
("bdi", "phrasing"): "phrasing",
("bdo", "phrasing"): "phrasing",
("blockquote", "flow"): "flow",
("body", "<html>"): "flow",
("br", "phrasing"): "void",
("button", "phrasing"): "phrasing",
("button", "<select>"): "phrasing",
("canvas", "phrasing"): "transparent",
("caption", "<table>"): "flow",
("center", "flow"): "flow", # FIXME: obsolete, remove this
("cite", "phrasing"): "phrasing",
("code", "phrasing"): "phrasing",
("col", "<colgroup>"): "void",
("colgroup", "<table>"): "<colgroup>",
("data", "phrasing"): "phrasing",
("datalist", "phrasing"): "<datalist>",
("dd", "<dl>"): "flow",
("del", "phrasing"): "transparent",
("details", "flow"): "<details>",
("dfn", "phrasing"): "phrasing",
("dialog", "flow"): "flow",
("div", "flow"): "flow",
("div", "<dl>"): "<dl>",
("div", "<select> content"): "<select> content",
("div", "<optgroup> content"): "<optgroup> content",
("div", "<option> content"): "<option> content",
("dl", "flow"): "<dl>",
("dt", "<dl>"): "phrasing",
("em", "phrasing"): "phrasing",
("embed", "phrasing"): "void",
("fieldset", "flow"): "<fieldset>",
("figcaption", "<figure>"): "flow",
("figure", "flow"): "<figure>",
("footer", "flow"): "flow",
("form", "flow"): "flow",
("h1", "plain heading"): "phrasing",
("h2", "plain heading"): "phrasing",
("h3", "plain heading"): "phrasing",
("h4", "plain heading"): "phrasing",
("h5", "plain heading"): "phrasing",
("h6", "plain heading"): "phrasing",
("head", "<html>"): "<head>",
("header", "flow"): "flow",
("hgroup", "heading"): "<hgroup>",
("hr", "flow"): "void",
("hr", "<select> content"): "void",
("html", "document"): "<html>",
("i", "phrasing"): "phrasing",
("iframe", "phrasing"): "empty",
("img", "phrasing"): "void",
("img", "<picture>"): "void",
("input", "phrasing"): "void",
("ins", "phrasing"): "transparent",
("kbd", "phrasing"): "phrasing",
("label", "phrasing"): "phrasing",
("legend", "<fieldset>"): "phrasing/heading",
("li", "list"): "flow",
("link", "<head>"): "void",
("link", "phrasing"): "void",
("main", "flow"): "flow",
("map", "phrasing"): "<map>",
("mark", "phrasing"): "phrasing",
("math", "phrasing"): "MathML",
("menu", "flow"): "list",
("meta", "<head>"): "void",
("meta", "phrasing"): "void",
("meter", "phrasing"): "phrasing",
("nav", "flow"): "flow",
("noscript", "<head>"): "transparent",
("noscript", "phrasing"): "transparent",
("noscript", "<select> content"): "transparent",
("noscript", "<optgroup> content"): "transparent",
("object", "phrasing"): "transparent",
("ol", "flow"): "list",
("optgroup", "<select>"): "<optgroup> content",
("option", "<select>"): "<option> content",
("option", "<datalist>"): "<option> content",
("option", "<optgroup>"): "<option> content",
("output", "phrasing"): "phrasing",
("p", "flow"): "phrasing",
("p", "<hgroup>"): "phrasing",
("picture", "phrasing"): "<picture>",
("pre", "flow"): "phrasing",
("progress", "phrasing"): "phrasing",
("q", "phrasing"): "phrasing",
("rp", "<ruby>"): "phrasing",
("rt", "<ruby>"): "phrasing",
("ruby", "phrasing"): "<ruby>",
("s", "phrasing"): "phrasing",
("samp", "phrasing"): "phrasing",
("script", "<head>"): "<script>",
("script", "phrasing"): "<script>",
("script", "script-supporting"): "<script>",
("search", "flow"): "flow",
("section", "flow"): "flow",
("select", "phrasing"): "<select>",
("selectedcontent", "<button>"): "empty",
("slot", "phrasing"): "transparent",
("small", "phrasing"): "phrasing",
("source", "<picture>"): "void",
("source", "<video>"): "void",
("source", "<audio>"): "void",
("span", "phrasing"): "phrasing",
("strong", "phrasing"): "phrasing",
("style", "<head>"): "<style>",
("sub", "phrasing"): "phrasing",
("summary", "<details>"): "phrasing/heading",
("sup", "phrasing"): "phrasing",
("svg", "phrasing"): "SVG",
("table", "flow"): "<table>",
("tbody", "<table>"): "<tbody>",
("td", "<tr>"): "flow",
("template", "<head>"): "unknown",
("template", "phrasing"): "unknown",
("template", "script-supporting"): "unknown",
("template", "<colgroup>"): "unknown",
("textarea", "phrasing"): "text",
("tfoot", "<table>"): "<tfoot>",
("th", "<tr>"): "flow",
("thead", "<table>"): "<thead>",
("time", "phrasing"): "phrasing",
("title", "<head>"): "text",
("tr", "<table>"): "<tr>",
("tr", "<thead>"): "<tr>",
("tr", "<tbody>"): "<tr>",
("tr", "<tfoot>"): "<tr>",
("track", "<audio>"): "void",
("track", "<video>"): "void",
("u", "phrasing"): "phrasing",
("ul", "flow"): "list",
("var", "phrasing"): "phrasing",
("video", "phrasing"): "<video>",
("wbr", "phrasing"): "void",
# https://html.spec.whatwg.org/multipage/embedded-content-other.html#mathml
("annotation-xml", "MathML"): "flow",
("mi", "MathML"): "phrasing",
("mo", "MathML"): "phrasing",
("mn", "MathML"): "phrasing",
("ms", "MathML"): "phrasing",
("mtext", "MathML"): "phrasing",
# https://html.spec.whatwg.org/multipage/embedded-content-other.html#svg-0
("foreignObject", "SVG"): "flow",
("title", "SVG"): "phrasing",
}
HTML_CONTEXT_FALLBACKS: dict[str, list[str]] = {
"<datalist>": ["phrasing", "script-supporting"],
"<details>": ["flow"],
"<dl>": ["script-supporting"],
"<fieldset>": ["flow"],
"<figure>": ["flow"],
"<hgroup>": ["plain heading", "script-supporting"],
"<optgroup> content": ["script-supporting"],
"<option> content": ["phrasing"],
"<option>": ["<option> content"],
"<picture>": ["script-supporting"],
"<ruby>": ["phrasing"],
"<select> content": ["script-supporting"],
"<select>": ["<select> content"],
"<table>": ["script-supporting"],
"<tbody>": ["script-supporting"],
"<tfoot>": ["script-supporting"],
"<thead>": ["script-supporting"],
"<tr>": ["script-supporting"],
"flow": ["phrasing", "heading"],
"heading": ["plain heading"],
"list": ["script-supporting"],
"phrasing/heading": ["phrasing", "heading"],
"unknown": ["document", "flow", "list", "<head>", "<select>", "<table>", "<tr>"],
}
FOREIGN_CONTEXTS = ["MathML", "SVG"]
def html_context_fallbacks(context: str) -> Iterator[str]:
yield context
for fallback_context in HTML_CONTEXT_FALLBACKS.get(context, []):
yield from html_context_fallbacks(fallback_context)
| {
"repo_id": "zulip/zulip",
"file_path": "tools/lib/html_elements.py",
"license": "Apache License 2.0",
"lines": 186,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
zulip/zulip:zerver/lib/zstd_level9.py | from zstd import compress as original_compress
from zstd import decompress
__all__ = ["compress", "decompress"]
def compress(data: bytes, level: int | None = None) -> bytes:
if level is None:
level = 9
return original_compress(data, level)
| {
"repo_id": "zulip/zulip",
"file_path": "zerver/lib/zstd_level9.py",
"license": "Apache License 2.0",
"lines": 7,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
zulip/zulip:zerver/tests/test_welcome_bot_custom_message.py | from typing_extensions import override
from zerver.lib.test_classes import ZulipTestCase
class WelcomeBotCustomMessageTest(ZulipTestCase):
@override
def setUp(self) -> None:
super().setUp()
self.user_profile = self.example_user("iago")
def test_empty_welcome_bot_custom_message(self) -> None:
user = self.example_user("desdemona")
self.login_user(user)
result = self.client_post(
"/json/realm/test_welcome_bot_custom_message",
{"welcome_message_custom_text": ""},
)
self.assert_json_error(result, "Message must not be empty")
def test_welcome_bot_custom_message(self) -> None:
user = self.example_user("desdemona")
self.login_user(user)
welcome_message_custom_text = "Welcome Bot custom message for testing"
result = self.client_post(
"/json/realm/test_welcome_bot_custom_message",
{"welcome_message_custom_text": welcome_message_custom_text},
)
response_dict = self.assert_json_success(result)
welcome_bot_custom_message_id = response_dict["message_id"]
# Make sure that only message with custom text is sent.
previous_message = self.get_second_to_last_message()
self.assertNotEqual(previous_message.sender.email, "welcome-bot@zulip.com")
received_welcome_bot_custom_message = self.get_last_message()
self.assertEqual(received_welcome_bot_custom_message.sender.email, "welcome-bot@zulip.com")
self.assertIn(welcome_message_custom_text, received_welcome_bot_custom_message.content)
self.assertEqual(welcome_bot_custom_message_id, received_welcome_bot_custom_message.id)
| {
"repo_id": "zulip/zulip",
"file_path": "zerver/tests/test_welcome_bot_custom_message.py",
"license": "Apache License 2.0",
"lines": 32,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
zulip/zulip:zerver/views/welcome_bot_custom_message.py | from typing import Annotated
from django.conf import settings
from django.http import HttpRequest, HttpResponse
from django.utils.translation import gettext as _
from pydantic import StringConstraints
from zerver.actions.message_send import internal_send_private_message
from zerver.decorator import require_realm_admin
from zerver.lib.exceptions import JsonableError
from zerver.lib.onboarding import get_custom_welcome_message_string
from zerver.lib.response import json_success
from zerver.lib.typed_endpoint import typed_endpoint
from zerver.models.realms import Realm
from zerver.models.users import UserProfile, get_system_bot
@require_realm_admin
@typed_endpoint
def send_test_welcome_bot_custom_message(
request: HttpRequest,
user_profile: UserProfile,
*,
welcome_message_custom_text: Annotated[
str,
StringConstraints(
max_length=Realm.MAX_REALM_WELCOME_MESSAGE_CUSTOM_TEXT_LENGTH,
),
],
) -> HttpResponse:
if len(welcome_message_custom_text) == 0:
raise JsonableError(_("Message must not be empty"))
welcome_bot_custom_message_string = get_custom_welcome_message_string(
user_profile.realm, welcome_message_custom_text
)
message_id = internal_send_private_message(
get_system_bot(settings.WELCOME_BOT, user_profile.realm_id),
user_profile,
welcome_bot_custom_message_string,
disable_external_notifications=True,
)
assert message_id is not None
return json_success(request, data={"message_id": message_id})
| {
"repo_id": "zulip/zulip",
"file_path": "zerver/views/welcome_bot_custom_message.py",
"license": "Apache License 2.0",
"lines": 39,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
zulip/zulip:zerver/tests/test_e2ee_push_notifications.py | from datetime import datetime, timezone
from typing import Any
from unittest import mock
import responses
import time_machine
from django.test import override_settings
from django.utils.timezone import now
from firebase_admin.exceptions import InternalError
from firebase_admin.messaging import UnregisteredError
from typing_extensions import override
from analytics.models import RealmCount
from zerver.actions.user_groups import check_add_user_group
from zerver.lib.avatar import absolute_avatar_url
from zerver.lib.devices import b64encode_token_id_int
from zerver.lib.exceptions import MissingRemoteRealmError
from zerver.lib.push_notifications import (
PushNotificationsDisallowedByBouncerError,
handle_push_notification,
handle_remove_push_notification,
)
from zerver.lib.remote_server import (
PushNotificationBouncerError,
PushNotificationBouncerRetryLaterError,
PushNotificationBouncerServerError,
)
from zerver.lib.test_classes import E2EEPushNotificationTestCase
from zerver.lib.test_helpers import activate_push_notification_service
from zerver.lib.timestamp import datetime_to_timestamp
from zerver.models import Device, UserMessage
from zerver.models.realms import get_realm
from zerver.models.scheduled_jobs import NotificationTriggers
from zerver.models.streams import get_stream
from zilencer.lib.push_notifications import SentPushNotificationResult
from zilencer.models import RemoteRealm, RemoteRealmCount
@activate_push_notification_service()
class SendPushNotificationTest(E2EEPushNotificationTestCase):
def test_success_cloud(self) -> None:
hamlet = self.example_user("hamlet")
aaron = self.example_user("aaron")
iago = self.example_user("iago")
registered_device_apple, registered_device_android = (
self.register_push_devices_for_notification()
)
def test_end_to_end(missed_message: dict[str, Any], *, db_query_count: int) -> None:
self.assertEqual(RealmCount.objects.count(), 0)
with (
self.mock_fcm() as mock_fcm_messaging,
self.mock_apns() as send_notification,
self.assertLogs("zerver.lib.push_notifications", level="INFO") as zerver_logger,
self.assertLogs("zilencer.lib.push_notifications", level="INFO") as zilencer_logger,
mock.patch("time.perf_counter", side_effect=[10.0, 15.0]),
):
mock_fcm_messaging.send_each.return_value = self.make_fcm_success_response()
send_notification.return_value.is_successful = True
with self.assert_database_query_count(db_query_count):
handle_push_notification(hamlet.id, missed_message)
mock_fcm_messaging.send_each.assert_called_once()
send_notification.assert_called_once()
self.assertEqual(
"INFO:zerver.lib.push_notifications:"
f"Sending push notifications to mobile clients for user {hamlet.id}",
zerver_logger.output[0],
)
self.assertEqual(
"INFO:zerver.lib.push_notifications:"
f"Skipping legacy push notifications for user {hamlet.id} because there are no registered devices",
zerver_logger.output[1],
)
self.assertEqual(
"INFO:zerver.lib.push_notifications:"
f"APNs: Success sending to (realm={hamlet.realm.uuid}, device={registered_device_apple.token})",
zerver_logger.output[2],
)
self.assertEqual(
"INFO:zilencer.lib.push_notifications:"
f"FCM: Sent message with ID: 0 to (realm={hamlet.realm.uuid}, device={registered_device_android.token})",
zilencer_logger.output[0],
)
self.assertEqual(
"INFO:zerver.lib.push_notifications:"
f"Sent E2EE mobile push notifications for user {hamlet.id}: 1 via FCM, 1 via APNs in 5.000s",
zerver_logger.output[3],
)
realm_count_dict = (
RealmCount.objects.filter(property="mobile_pushes_sent::day")
.values("subgroup", "value")
.last()
)
self.assertEqual(realm_count_dict, dict(subgroup=None, value=2))
# Reset
RealmCount.objects.all().delete()
# 1:1 DM
# query count : source
# * 1 : `get_user_profile_by_id`
# * 2 : `access_message_and_usermessage` (Fetch Message + UserMessage)
# * 1 : update fetched user_message flag
# * 2 : fetch PushDeviceToken + Device
# * 1 : `get_display_recipient` in `get_message_payload`
# * 2 : fetch RemotePushDevice + update RealmCount
message_id = self.send_personal_message(
from_user=aaron, to_user=hamlet, skip_capture_on_commit_callbacks=True
)
missed_message = {
"message_id": message_id,
"trigger": NotificationTriggers.DIRECT_MESSAGE,
}
test_end_to_end(missed_message, db_query_count=9)
# Group DM
message_id = self.send_group_direct_message(
iago, [hamlet, aaron, iago], skip_capture_on_commit_callbacks=True
)
missed_message = {
"message_id": message_id,
"trigger": NotificationTriggers.DIRECT_MESSAGE,
}
test_end_to_end(missed_message, db_query_count=9)
# Channel message
# 2 extra queries than 1:1 DM
# 1 : fetch Stream in `access_message_and_usermessage` codepath
# 1 : query NamedUserGroup in `check_can_access_user` codepath
# 1 : fetch Stream in `get_message_payload` (TODO: we can avoid this)
# -1 : `get_display_recipient` not needed
message_id = self.send_stream_message(
aaron, "Verona", skip_capture_on_commit_callbacks=True
)
missed_message = {"message_id": message_id, "trigger": NotificationTriggers.STREAM_PUSH}
test_end_to_end(missed_message, db_query_count=11)
# Channel message: private channel + user-group mention
# 3 extra queries than prev:
# 1 : query Subscription in `access_message_and_usermessage` codepath (needed for private channel)
# 2 : `get_mentioned_user_group` b/c content includes user-group mention.
channel = get_stream("core team", iago.realm)
user_group = check_add_user_group(
iago.realm, "test_group", [iago, aaron, hamlet], acting_user=iago
)
message_id = self.send_stream_message(
iago, channel.name, f"@*{user_group.name}*", skip_capture_on_commit_callbacks=True
)
missed_message = {
"message_id": message_id,
"trigger": NotificationTriggers.MENTION,
"mentioned_user_group_id": user_group.id,
}
test_end_to_end(missed_message, db_query_count=14)
def test_no_registered_device(self) -> None:
aaron = self.example_user("aaron")
hamlet = self.example_user("hamlet")
message_id = self.send_personal_message(
from_user=aaron, to_user=hamlet, skip_capture_on_commit_callbacks=True
)
missed_message = {
"message_id": message_id,
"trigger": NotificationTriggers.DIRECT_MESSAGE,
}
with self.assertLogs("zerver.lib.push_notifications", level="INFO") as zerver_logger:
handle_push_notification(hamlet.id, missed_message)
self.assertEqual(
"INFO:zerver.lib.push_notifications:"
f"Sending push notifications to mobile clients for user {hamlet.id}",
zerver_logger.output[0],
)
self.assertEqual(
"INFO:zerver.lib.push_notifications:"
f"Skipping legacy push notifications for user {hamlet.id} because there are no registered devices",
zerver_logger.output[1],
)
self.assertEqual(
"INFO:zerver.lib.push_notifications:"
f"Skipping E2EE push notifications for user {hamlet.id} because there are no registered devices",
zerver_logger.output[2],
)
def test_invalid_or_expired_token(self) -> None:
aaron = self.example_user("aaron")
hamlet = self.example_user("hamlet")
registered_device_apple, registered_device_android = (
self.register_push_devices_for_notification()
)
self.assertIsNone(registered_device_apple.expired_time)
self.assertIsNone(registered_device_android.expired_time)
self.assertEqual(Device.objects.filter(push_token_id__isnull=False).count(), 2)
message_id = self.send_personal_message(
from_user=aaron, to_user=hamlet, skip_capture_on_commit_callbacks=True
)
missed_message = {
"message_id": message_id,
"trigger": NotificationTriggers.DIRECT_MESSAGE,
}
with (
self.mock_fcm() as mock_fcm_messaging,
self.mock_apns() as send_notification,
self.assertLogs("zerver.lib.push_notifications", level="INFO") as zerver_logger,
self.assertLogs("zilencer.lib.push_notifications", level="INFO") as zilencer_logger,
mock.patch("time.perf_counter", side_effect=[10.5, 11.0]),
):
mock_fcm_messaging.send_each.return_value = self.make_fcm_error_response(
UnregisteredError("Token expired")
)
send_notification.return_value.is_successful = False
send_notification.return_value.description = "BadDeviceToken"
handle_push_notification(hamlet.id, missed_message)
mock_fcm_messaging.send_each.assert_called_once()
send_notification.assert_called_once()
self.assertEqual(
"INFO:zerver.lib.push_notifications:"
f"Sending push notifications to mobile clients for user {hamlet.id}",
zerver_logger.output[0],
)
self.assertEqual(
"INFO:zerver.lib.push_notifications:"
f"APNs: Removing invalid/expired token {registered_device_apple.token} (BadDeviceToken)",
zerver_logger.output[2],
)
self.assertEqual(
"INFO:zilencer.lib.push_notifications:"
f"FCM: Removing {registered_device_android.token} due to NOT_FOUND",
zilencer_logger.output[0],
)
self.assertEqual(
"INFO:zerver.lib.push_notifications:"
f"Clearing `push_token_id` for Device rows with the following token IDs based on response from bouncer: ['{b64encode_token_id_int(registered_device_apple.token_id)}', '{b64encode_token_id_int(registered_device_android.token_id)}']",
zerver_logger.output[3],
)
self.assertEqual(
"INFO:zerver.lib.push_notifications:"
f"Sent E2EE mobile push notifications for user {hamlet.id}: 0 via FCM, 0 via APNs in 0.500s",
zerver_logger.output[4],
)
# Verify `expired_time` set for `RemotePushDevice` entries
# and corresponding `PushDevice` deleted on server.
registered_device_apple.refresh_from_db()
registered_device_android.refresh_from_db()
self.assertIsNotNone(registered_device_apple.expired_time)
self.assertIsNotNone(registered_device_android.expired_time)
self.assertEqual(Device.objects.filter(push_token_id__isnull=False).count(), 0)
def test_fcm_apns_error(self) -> None:
hamlet = self.example_user("hamlet")
aaron = self.example_user("aaron")
_registered_device_apple, registered_device_android = (
self.register_push_devices_for_notification()
)
message_id = self.send_personal_message(
from_user=aaron, to_user=hamlet, skip_capture_on_commit_callbacks=True
)
missed_message = {
"message_id": message_id,
"trigger": NotificationTriggers.DIRECT_MESSAGE,
}
# `get_apns_context` returns `None` + FCM returns error other than UnregisteredError.
with (
self.mock_fcm() as mock_fcm_messaging,
mock.patch("zilencer.lib.push_notifications.get_apns_context", return_value=None),
self.assertLogs("zerver.lib.push_notifications", level="INFO") as zerver_logger,
self.assertLogs("zilencer.lib.push_notifications", level="WARNING") as zilencer_logger,
mock.patch("time.perf_counter", side_effect=[10.0, 12.0]),
):
mock_fcm_messaging.send_each.return_value = self.make_fcm_error_response(
InternalError("fcm-error")
)
handle_push_notification(hamlet.id, missed_message)
mock_fcm_messaging.send_each.assert_called_once()
self.assertEqual(
"INFO:zerver.lib.push_notifications:"
f"Sending push notifications to mobile clients for user {hamlet.id}",
zerver_logger.output[0],
)
self.assertEqual(
"ERROR:zilencer.lib.push_notifications:"
"APNs: Dropping push notifications since neither APNS_TOKEN_KEY_FILE nor APNS_CERT_FILE is set.",
zilencer_logger.output[0],
)
self.assertIn(
"WARNING:zilencer.lib.push_notifications:"
f"FCM: Delivery failed to (realm={hamlet.realm.uuid}, device={registered_device_android.token})",
zilencer_logger.output[1],
)
self.assertEqual(
"INFO:zerver.lib.push_notifications:"
f"Sent E2EE mobile push notifications for user {hamlet.id}: 0 via FCM, 0 via APNs in 2.000s",
zerver_logger.output[2],
)
# `firebase_messaging.send_each` raises Error.
message_id = self.send_personal_message(
from_user=aaron, to_user=hamlet, skip_capture_on_commit_callbacks=True
)
missed_message = {
"message_id": message_id,
"trigger": NotificationTriggers.DIRECT_MESSAGE,
}
with (
self.mock_fcm() as mock_fcm_messaging,
mock.patch(
"zilencer.lib.push_notifications.send_e2ee_push_notification_apple",
return_value=SentPushNotificationResult(
successfully_sent_count=1,
delete_token_ids_base64=[],
),
),
self.assertLogs("zerver.lib.push_notifications", level="INFO") as zerver_logger,
self.assertLogs("zilencer.lib.push_notifications", level="WARNING") as zilencer_logger,
mock.patch("time.perf_counter", side_effect=[10.0, 12.0]),
):
mock_fcm_messaging.send_each.side_effect = InternalError("server error")
handle_push_notification(hamlet.id, missed_message)
self.assertEqual(
"INFO:zerver.lib.push_notifications:"
f"Sending push notifications to mobile clients for user {hamlet.id}",
zerver_logger.output[0],
)
self.assertIn(
"WARNING:zilencer.lib.push_notifications:Error while pushing to FCM",
zilencer_logger.output[0],
)
self.assertEqual(
"INFO:zerver.lib.push_notifications:"
f"Sent E2EE mobile push notifications for user {hamlet.id}: 0 via FCM, 1 via APNs in 2.000s",
zerver_logger.output[2],
)
# `ANDROID_FCM_CREDENTIALS_PATH` is unset / fcm_app=None.
message_id = self.send_personal_message(
from_user=aaron, to_user=hamlet, skip_capture_on_commit_callbacks=True
)
missed_message = {
"message_id": message_id,
"trigger": NotificationTriggers.DIRECT_MESSAGE,
}
with (
mock.patch("zilencer.lib.push_notifications.fcm_app", new=None),
mock.patch(
"zilencer.lib.push_notifications.send_e2ee_push_notification_apple",
return_value=SentPushNotificationResult(
successfully_sent_count=1,
delete_token_ids_base64=[],
),
),
self.assertLogs("zerver.lib.push_notifications", level="INFO") as zerver_logger,
self.assertLogs("zilencer.lib.push_notifications", level="ERROR") as zilencer_logger,
mock.patch("time.perf_counter", side_effect=[10.0, 12.0]),
):
handle_push_notification(hamlet.id, missed_message)
self.assertEqual(
"INFO:zerver.lib.push_notifications:"
f"Sending push notifications to mobile clients for user {hamlet.id}",
zerver_logger.output[0],
)
self.assertEqual(
"ERROR:zilencer.lib.push_notifications:"
"FCM: Dropping push notifications since ANDROID_FCM_CREDENTIALS_PATH is unset",
zilencer_logger.output[0],
)
self.assertEqual(
"INFO:zerver.lib.push_notifications:"
f"Sent E2EE mobile push notifications for user {hamlet.id}: 0 via FCM, 1 via APNs in 2.000s",
zerver_logger.output[2],
)
def test_early_return_if_expired_time_set(self) -> None:
aaron = self.example_user("aaron")
hamlet = self.example_user("hamlet")
registered_device_apple, registered_device_android = (
self.register_push_devices_for_notification()
)
registered_device_apple.expired_time = datetime(2099, 4, 24, tzinfo=timezone.utc)
registered_device_android.expired_time = datetime(2099, 4, 24, tzinfo=timezone.utc)
registered_device_apple.save(update_fields=["expired_time"])
registered_device_android.save(update_fields=["expired_time"])
self.assertEqual(Device.objects.filter(push_token_id__isnull=False).count(), 2)
message_id = self.send_personal_message(
from_user=aaron, to_user=hamlet, skip_capture_on_commit_callbacks=True
)
missed_message = {
"message_id": message_id,
"trigger": NotificationTriggers.DIRECT_MESSAGE,
}
# Since 'expired_time' is set for concerned 'RemotePushDevice' rows,
# the bouncer will not attempt to send notification and instead returns
# a list of token IDs which server should erase on their own end.
with (
mock.patch(
"zilencer.lib.push_notifications.send_e2ee_push_notification_apple"
) as send_apple,
mock.patch(
"zilencer.lib.push_notifications.send_e2ee_push_notification_android"
) as send_android,
):
handle_push_notification(hamlet.id, missed_message)
send_apple.assert_not_called()
send_android.assert_not_called()
self.assertEqual(Device.objects.filter(push_token_id__isnull=False).count(), 0)
@responses.activate
@override_settings(ZILENCER_ENABLED=False)
def test_success_self_hosted(self) -> None:
self.add_mock_response()
hamlet = self.example_user("hamlet")
aaron = self.example_user("aaron")
realm = hamlet.realm
registered_device_apple, registered_device_android = (
self.register_push_devices_for_notification(is_server_self_hosted=True)
)
message_id = self.send_personal_message(
from_user=aaron, to_user=hamlet, skip_capture_on_commit_callbacks=True
)
missed_message = {
"message_id": message_id,
"trigger": NotificationTriggers.DIRECT_MESSAGE,
}
# Setup to verify whether these fields get updated correctly.
realm.push_notifications_enabled = False
realm.push_notifications_enabled_end_timestamp = datetime(2099, 4, 24, tzinfo=timezone.utc)
realm.save(
update_fields=["push_notifications_enabled", "push_notifications_enabled_end_timestamp"]
)
self.assertEqual(RealmCount.objects.count(), 0)
self.assertEqual(RemoteRealmCount.objects.count(), 0)
with (
self.mock_fcm() as mock_fcm_messaging,
self.mock_apns() as send_notification,
mock.patch(
"corporate.lib.stripe.RemoteRealmBillingSession.current_count_for_billed_licenses",
return_value=10,
),
self.assertLogs("zerver.lib.push_notifications", level="INFO") as zerver_logger,
self.assertLogs("zilencer.lib.push_notifications", level="INFO") as zilencer_logger,
mock.patch("time.perf_counter", side_effect=[10.05, 12.10]),
):
mock_fcm_messaging.send_each.return_value = self.make_fcm_success_response()
send_notification.return_value.is_successful = True
handle_push_notification(hamlet.id, missed_message)
mock_fcm_messaging.send_each.assert_called_once()
send_notification.assert_called_once()
self.assertEqual(
"INFO:zerver.lib.push_notifications:"
f"Sending push notifications to mobile clients for user {hamlet.id}",
zerver_logger.output[0],
)
self.assertEqual(
"INFO:zerver.lib.push_notifications:"
f"Skipping legacy push notifications for user {hamlet.id} because there are no registered devices",
zerver_logger.output[1],
)
self.assertEqual(
"INFO:zerver.lib.push_notifications:"
f"APNs: Success sending to (remote_realm={hamlet.realm.uuid}, device={registered_device_apple.token})",
zerver_logger.output[2],
)
self.assertEqual(
"INFO:zilencer.lib.push_notifications:"
f"FCM: Sent message with ID: 0 to (remote_realm={hamlet.realm.uuid}, device={registered_device_android.token})",
zilencer_logger.output[0],
)
self.assertEqual(
"INFO:zerver.lib.push_notifications:"
f"Sent E2EE mobile push notifications for user {hamlet.id}: 1 via FCM, 1 via APNs in 2.050s",
zerver_logger.output[3],
)
realm_count_dict = (
RealmCount.objects.filter(property="mobile_pushes_sent::day")
.values("subgroup", "value")
.last()
)
self.assertEqual(realm_count_dict, dict(subgroup=None, value=2))
remote_realm_count_dict = (
RemoteRealmCount.objects.filter(property="mobile_pushes_received::day")
.values("subgroup", "value")
.last()
)
self.assertEqual(remote_realm_count_dict, dict(subgroup=None, value=2))
remote_realm_count_dict = (
RemoteRealmCount.objects.filter(property="mobile_pushes_forwarded::day")
.values("subgroup", "value")
.last()
)
self.assertEqual(remote_realm_count_dict, dict(subgroup=None, value=2))
realm.refresh_from_db()
self.assertTrue(realm.push_notifications_enabled)
self.assertIsNone(realm.push_notifications_enabled_end_timestamp)
@responses.activate
@override_settings(ZILENCER_ENABLED=False)
def test_missing_remote_realm_error(self) -> None:
self.add_mock_response()
hamlet = self.example_user("hamlet")
aaron = self.example_user("aaron")
realm = hamlet.realm
self.register_push_devices_for_notification(is_server_self_hosted=True)
message_id = self.send_personal_message(
from_user=aaron, to_user=hamlet, skip_capture_on_commit_callbacks=True
)
missed_message = {
"message_id": message_id,
"trigger": NotificationTriggers.DIRECT_MESSAGE,
}
# Setup to verify whether these fields get updated correctly.
realm.push_notifications_enabled = True
realm.push_notifications_enabled_end_timestamp = datetime(2099, 4, 24, tzinfo=timezone.utc)
realm.save(
update_fields=["push_notifications_enabled", "push_notifications_enabled_end_timestamp"]
)
# To replicate missing remote realm
RemoteRealm.objects.all().delete()
with (
self.assertLogs("zerver.lib.push_notifications", level="INFO") as zerver_logger,
self.assertLogs("zilencer.views", level="INFO") as zilencer_logger,
):
handle_push_notification(hamlet.id, missed_message)
self.assertEqual(
"INFO:zerver.lib.push_notifications:"
f"Sending push notifications to mobile clients for user {hamlet.id}",
zerver_logger.output[0],
)
self.assertEqual(
"INFO:zilencer.views:"
f"/api/v1/remotes/push/e2ee/notify: Received request for unknown realm {realm.uuid}, server {self.server.id}",
zilencer_logger.output[0],
)
self.assertEqual(
"WARNING:zerver.lib.push_notifications:"
"Bouncer refused to send E2EE push notification: Organization not registered",
zerver_logger.output[2],
)
realm.refresh_from_db()
self.assertFalse(realm.push_notifications_enabled)
self.assertIsNone(realm.push_notifications_enabled_end_timestamp)
@responses.activate
@override_settings(ZILENCER_ENABLED=False)
def test_no_plan_error(self) -> None:
self.add_mock_response()
hamlet = self.example_user("hamlet")
aaron = self.example_user("aaron")
realm = hamlet.realm
self.register_push_devices_for_notification(is_server_self_hosted=True)
message_id = self.send_personal_message(
from_user=aaron, to_user=hamlet, skip_capture_on_commit_callbacks=True
)
missed_message = {
"message_id": message_id,
"trigger": NotificationTriggers.DIRECT_MESSAGE,
}
# Setup to verify whether these fields get updated correctly.
realm.push_notifications_enabled = True
realm.push_notifications_enabled_end_timestamp = datetime(2099, 4, 24, tzinfo=timezone.utc)
realm.save(
update_fields=["push_notifications_enabled", "push_notifications_enabled_end_timestamp"]
)
with (
mock.patch(
"corporate.lib.stripe.RemoteRealmBillingSession.current_count_for_billed_licenses",
return_value=100,
),
self.assertLogs("zerver.lib.push_notifications", level="INFO") as zerver_logger,
):
handle_push_notification(hamlet.id, missed_message)
self.assertEqual(
"INFO:zerver.lib.push_notifications:"
f"Sending push notifications to mobile clients for user {hamlet.id}",
zerver_logger.output[0],
)
self.assertEqual(
"WARNING:zerver.lib.push_notifications:"
"Bouncer refused to send E2EE push notification: Your plan doesn't allow sending push notifications. "
"Reason provided by the server: Push notifications access with 10+ users requires signing up for a plan. https://zulip.com/plans/",
zerver_logger.output[2],
)
realm.refresh_from_db()
self.assertFalse(realm.push_notifications_enabled)
self.assertIsNone(realm.push_notifications_enabled_end_timestamp)
def test_both_old_and_new_client_coexists(self) -> None:
"""Test coexistence of old (which don't support E2EE)
and new client devices registered for push notifications.
"""
hamlet = self.example_user("hamlet")
aaron = self.example_user("aaron")
registered_device_apple, registered_device_android = (
self.register_push_devices_for_notification()
)
registered_device_apple_old, registered_device_android_old = (
self.register_old_push_devices_for_notification()
)
message_id = self.send_personal_message(
from_user=aaron, to_user=hamlet, skip_capture_on_commit_callbacks=True
)
missed_message = {
"message_id": message_id,
"trigger": NotificationTriggers.DIRECT_MESSAGE,
}
self.assertEqual(RealmCount.objects.count(), 0)
with (
self.mock_fcm(for_legacy=True) as mock_fcm_messaging_legacy,
self.mock_apns(for_legacy=True) as send_notification_legacy,
self.mock_fcm() as mock_fcm_messaging,
self.mock_apns() as send_notification,
mock.patch(
"zerver.lib.push_notifications.uses_notification_bouncer", return_value=False
),
self.assertLogs("zerver.lib.push_notifications", level="INFO") as zerver_logger,
self.assertLogs("zilencer.lib.push_notifications", level="INFO") as zilencer_logger,
mock.patch("time.perf_counter", side_effect=[10.0, 12.0]),
):
mock_fcm_messaging_legacy.send_each.return_value = self.make_fcm_success_response(
for_legacy=True
)
send_notification_legacy.return_value.is_successful = True
mock_fcm_messaging.send_each.return_value = self.make_fcm_success_response()
send_notification.return_value.is_successful = True
handle_push_notification(hamlet.id, missed_message)
mock_fcm_messaging_legacy.send_each.assert_called_once()
send_notification_legacy.assert_called_once()
mock_fcm_messaging.send_each.assert_called_once()
send_notification.assert_called_once()
self.assertEqual(
"INFO:zerver.lib.push_notifications:"
f"Sending push notifications to mobile clients for user {hamlet.id}",
zerver_logger.output[0],
)
# Logs in legacy codepath
self.assertEqual(
zerver_logger.output[1:6],
[
f"INFO:zerver.lib.push_notifications:Sending mobile push notifications for local user {hamlet.id}: 1 via FCM devices, 1 via APNs devices",
f"INFO:zerver.lib.push_notifications:APNs: Sending notification for local user <id:{hamlet.id}> to 1 devices (skipped 0 duplicates)",
f"INFO:zerver.lib.push_notifications:APNs: Success sending for user <id:{hamlet.id}> to device {registered_device_apple_old.token}",
f"INFO:zerver.lib.push_notifications:FCM: Sending notification for local user <id:{hamlet.id}> to 1 devices",
f"INFO:zerver.lib.push_notifications:FCM: Sent message with ID: 0 to {registered_device_android_old.token}",
],
)
# Logs in E2EE codepath
self.assertEqual(
"INFO:zerver.lib.push_notifications:"
f"APNs: Success sending to (realm={hamlet.realm.uuid}, device={registered_device_apple.token})",
zerver_logger.output[6],
)
self.assertEqual(
"INFO:zilencer.lib.push_notifications:"
f"FCM: Sent message with ID: 0 to (realm={hamlet.realm.uuid}, device={registered_device_android.token})",
zilencer_logger.output[0],
)
self.assertEqual(
"INFO:zerver.lib.push_notifications:"
f"Sent E2EE mobile push notifications for user {hamlet.id}: 1 via FCM, 1 via APNs in 2.000s",
zerver_logger.output[7],
)
realm_count_dict = (
RealmCount.objects.filter(property="mobile_pushes_sent::day")
.values("subgroup", "value")
.last()
)
self.assertEqual(realm_count_dict, dict(subgroup=None, value=4))
def test_payload_data_to_encrypt_channel_message(self) -> None:
hamlet = self.example_user("hamlet")
aaron = self.example_user("aaron")
realm = get_realm("zulip")
user_group = check_add_user_group(realm, "test_user_group", [hamlet], acting_user=hamlet)
time_now = now()
self.subscribe(aaron, "Denmark")
self.register_push_devices_for_notification()
with time_machine.travel(time_now, tick=False):
message_id = self.send_stream_message(
sender=aaron,
stream_name="Denmark",
content=f"@*{user_group.name}*",
skip_capture_on_commit_callbacks=True,
)
missed_message = {
"message_id": message_id,
"trigger": NotificationTriggers.MENTION,
"mentioned_user_group_id": user_group.id,
}
expected_payload_data_to_encrypt = {
"realm_url": realm.url,
"realm_name": realm.name,
"user_id": hamlet.id,
"sender_id": aaron.id,
"mentioned_user_group_id": user_group.id,
"mentioned_user_group_name": user_group.name,
"recipient_type": "channel",
"channel_name": "Denmark",
"channel_id": self.get_stream_id("Denmark"),
"topic": "test",
"type": "message",
"message_id": message_id,
"time": datetime_to_timestamp(time_now),
"content": f"@{user_group.name}",
"sender_full_name": aaron.full_name,
"sender_avatar_url": absolute_avatar_url(aaron),
}
with mock.patch("zerver.lib.push_notifications.send_push_notifications") as m:
handle_push_notification(hamlet.id, missed_message)
self.assertEqual(m.call_args.args[1], expected_payload_data_to_encrypt)
def test_payload_data_to_encrypt_direct_message(self) -> None:
hamlet = self.example_user("hamlet")
aaron = self.example_user("aaron")
realm = get_realm("zulip")
time_now = now()
self.register_push_devices_for_notification()
with time_machine.travel(time_now, tick=False):
message_id = self.send_personal_message(
from_user=aaron, to_user=hamlet, skip_capture_on_commit_callbacks=True
)
missed_message = {
"message_id": message_id,
"trigger": NotificationTriggers.DIRECT_MESSAGE,
}
expected_payload_data_to_encrypt = {
"realm_url": realm.url,
"realm_name": realm.name,
"user_id": hamlet.id,
"sender_id": aaron.id,
"recipient_type": "direct",
"recipient_user_ids": sorted([aaron.id, hamlet.id]),
"type": "message",
"message_id": message_id,
"time": datetime_to_timestamp(time_now),
"content": "test content",
"sender_full_name": aaron.full_name,
"sender_avatar_url": absolute_avatar_url(aaron),
}
with mock.patch("zerver.lib.push_notifications.send_push_notifications") as m:
handle_push_notification(hamlet.id, missed_message)
self.assertEqual(m.call_args.args[1], expected_payload_data_to_encrypt)
def test_payload_data_to_encrypt_group_direct_message(self) -> None:
aaron = self.example_user("aaron")
cordelia = self.example_user("cordelia")
hamlet = self.example_user("hamlet")
realm = get_realm("zulip")
time_now = now()
self.register_push_devices_for_notification()
with time_machine.travel(time_now, tick=False):
message_id = self.send_group_direct_message(
from_user=aaron, to_users=[hamlet, cordelia], skip_capture_on_commit_callbacks=True
)
missed_message = {
"message_id": message_id,
"trigger": NotificationTriggers.DIRECT_MESSAGE,
}
expected_payload_data_to_encrypt = {
"realm_url": realm.url,
"realm_name": realm.name,
"user_id": hamlet.id,
"sender_id": aaron.id,
"recipient_type": "direct",
"recipient_user_ids": sorted([aaron.id, cordelia.id, hamlet.id]),
"type": "message",
"message_id": message_id,
"time": datetime_to_timestamp(time_now),
"content": "test content",
"sender_full_name": aaron.full_name,
"sender_avatar_url": absolute_avatar_url(aaron),
}
with mock.patch("zerver.lib.push_notifications.send_push_notifications") as m:
handle_push_notification(hamlet.id, missed_message)
self.assertEqual(m.call_args.args[1], expected_payload_data_to_encrypt)
@activate_push_notification_service()
class RemovePushNotificationTest(E2EEPushNotificationTestCase):
def test_success_cloud(self) -> None:
hamlet = self.example_user("hamlet")
aaron = self.example_user("aaron")
self.register_push_devices_for_notification()
message_id = self.send_personal_message(
from_user=aaron, to_user=hamlet, skip_capture_on_commit_callbacks=True
)
user_message = UserMessage.objects.get(user_profile=hamlet, message_id=message_id)
user_message.flags.active_mobile_push_notification = True
user_message.save(update_fields=["flags"])
with (
self.mock_fcm() as mock_fcm_messaging,
self.mock_apns() as send_notification,
self.assertLogs("zerver.lib.push_notifications", level="INFO") as zerver_logger,
self.assertLogs("zilencer.lib.push_notifications", level="INFO"),
mock.patch("time.perf_counter", side_effect=[10.0, 12.0]),
):
mock_fcm_messaging.send_each.return_value = self.make_fcm_success_response()
send_notification.return_value.is_successful = True
handle_remove_push_notification(hamlet.id, [message_id])
mock_fcm_messaging.send_each.assert_called_once()
send_notification.assert_called_once()
user_message.refresh_from_db()
self.assertFalse(user_message.flags.active_mobile_push_notification)
self.assertEqual(
"INFO:zerver.lib.push_notifications:"
f"Sent E2EE mobile push notifications for user {hamlet.id}: 1 via FCM, 1 via APNs in 2.000s",
zerver_logger.output[2],
)
@responses.activate
@override_settings(ZILENCER_ENABLED=False)
def test_success_self_hosted(self) -> None:
self.add_mock_response()
hamlet = self.example_user("hamlet")
aaron = self.example_user("aaron")
self.register_push_devices_for_notification(is_server_self_hosted=True)
message_id = self.send_personal_message(
from_user=aaron, to_user=hamlet, skip_capture_on_commit_callbacks=True
)
user_message = UserMessage.objects.get(user_profile=hamlet, message_id=message_id)
user_message.flags.active_mobile_push_notification = True
user_message.save(update_fields=["flags"])
with (
self.mock_fcm() as mock_fcm_messaging,
self.mock_apns() as send_notification,
mock.patch(
"corporate.lib.stripe.RemoteRealmBillingSession.current_count_for_billed_licenses",
return_value=10,
),
self.assertLogs("zerver.lib.push_notifications", level="INFO") as zerver_logger,
self.assertLogs("zilencer.lib.push_notifications", level="INFO"),
mock.patch("time.perf_counter", side_effect=[10.0, 12.0]),
):
mock_fcm_messaging.send_each.return_value = self.make_fcm_success_response()
send_notification.return_value.is_successful = True
handle_remove_push_notification(hamlet.id, [message_id])
mock_fcm_messaging.send_each.assert_called_once()
send_notification.assert_called_once()
user_message.refresh_from_db()
self.assertFalse(user_message.flags.active_mobile_push_notification)
self.assertEqual(
"INFO:zerver.lib.push_notifications:"
f"Sent E2EE mobile push notifications for user {hamlet.id}: 1 via FCM, 1 via APNs in 2.000s",
zerver_logger.output[2],
)
def test_remove_payload_data_to_encrypt(self) -> None:
hamlet = self.example_user("hamlet")
aaron = self.example_user("aaron")
realm = get_realm("zulip")
self.register_push_devices_for_notification()
message_id_one = self.send_personal_message(
from_user=aaron, to_user=hamlet, skip_capture_on_commit_callbacks=True
)
message_id_two = self.send_personal_message(
from_user=aaron, to_user=hamlet, skip_capture_on_commit_callbacks=True
)
expected_payload_data_to_encrypt = {
"realm_url": realm.url,
"realm_name": realm.name,
"user_id": hamlet.id,
"type": "remove",
"message_ids": [message_id_one, message_id_two],
}
with mock.patch("zerver.lib.push_notifications.send_push_notifications") as m:
handle_remove_push_notification(hamlet.id, [message_id_one, message_id_two])
self.assertEqual(m.call_args.args[1], expected_payload_data_to_encrypt)
class RequireE2EEPushNotificationsSettingTest(E2EEPushNotificationTestCase):
@override
def setUp(self) -> None:
super().setUp()
self.hamlet = self.example_user("hamlet")
self.aaron = self.example_user("aaron")
realm = self.hamlet.realm
realm.require_e2ee_push_notifications = True
realm.save(update_fields=["require_e2ee_push_notifications"])
def get_example_missed_message(self, content: str = "test content") -> dict[str, int | str]:
message_id = self.send_personal_message(
from_user=self.aaron,
to_user=self.hamlet,
content=content,
skip_capture_on_commit_callbacks=True,
)
missed_message: dict[str, int | str] = {
"message_id": message_id,
"trigger": NotificationTriggers.DIRECT_MESSAGE,
}
return missed_message
def test_content_redacted(self) -> None:
self.register_old_push_devices_for_notification()
self.register_push_devices_for_notification()
missed_message = self.get_example_missed_message(content="not-redacted")
# Verify that the content is redacted in payloads supplied to
# 'send_notifications_to_bouncer' - payloads supplied to bouncer (legacy codepath).
#
# Verify that the content is not redacted in payloads supplied to
# 'send_push_notifications' - payloads which get encrypted.
with (
activate_push_notification_service(),
mock.patch(
"zerver.lib.push_notifications.send_notifications_to_bouncer"
) as mock_legacy,
mock.patch("zerver.lib.push_notifications.send_push_notifications") as mock_e2ee,
):
handle_push_notification(self.hamlet.id, missed_message)
mock_legacy.assert_called_once()
self.assertEqual(mock_legacy.call_args.args[1]["alert"]["body"], "New message")
self.assertEqual(mock_legacy.call_args.args[2]["content"], "New message")
mock_e2ee.assert_called_once()
self.assertEqual(mock_e2ee.call_args.args[1]["content"], "not-redacted")
missed_message = self.get_example_missed_message()
# Verify that the content is redacted in payloads supplied to
# to functions for sending it through APNs and FCM directly.
with (
mock.patch("zerver.lib.push_notifications.has_apns_credentials", return_value=True),
mock.patch("zerver.lib.push_notifications.has_fcm_credentials", return_value=True),
mock.patch(
"zerver.lib.push_notifications.send_notifications_to_bouncer"
) as send_bouncer,
mock.patch(
"zerver.lib.push_notifications.send_apple_push_notification", return_value=0
) as send_apple,
mock.patch(
"zerver.lib.push_notifications.send_android_push_notification", return_value=0
) as send_android,
# We have already asserted the payloads passed to E2EE codepath above.
mock.patch("zerver.lib.push_notifications.send_push_notifications"),
):
handle_push_notification(self.hamlet.id, missed_message)
send_bouncer.assert_not_called()
send_apple.assert_called_once()
send_android.assert_called_once()
self.assertEqual(send_apple.call_args.args[2]["alert"]["body"], "New message")
self.assertEqual(send_android.call_args.args[2]["content"], "New message")
def test_content_redacted_only_android_registered(self) -> None:
registered_device_apple, _ = self.register_old_push_devices_for_notification()
registered_device_apple.delete()
missed_message = self.get_example_missed_message()
with (
activate_push_notification_service(),
mock.patch(
"zerver.lib.push_notifications.send_notifications_to_bouncer"
) as mock_legacy,
):
handle_push_notification(self.hamlet.id, missed_message)
mock_legacy.assert_called_once()
self.assertEqual(mock_legacy.call_args.args[1], {})
self.assertEqual(mock_legacy.call_args.args[2]["content"], "New message")
def test_content_redacted_only_apple_registered(self) -> None:
_, registered_device_android = self.register_old_push_devices_for_notification()
registered_device_android.delete()
missed_message = self.get_example_missed_message()
with (
activate_push_notification_service(),
mock.patch(
"zerver.lib.push_notifications.send_notifications_to_bouncer"
) as mock_legacy,
):
handle_push_notification(self.hamlet.id, missed_message)
mock_legacy.assert_called_once()
self.assertEqual(mock_legacy.call_args.args[1]["alert"]["body"], "New message")
self.assertEqual(mock_legacy.call_args.args[2], {})
class SendTestPushNotificationTest(E2EEPushNotificationTestCase):
def test_success_cloud(self) -> None:
hamlet = self.example_user("hamlet")
_registered_device_apple, registered_device_android = (
self.register_push_devices_for_notification()
)
with (
self.mock_fcm() as mock_fcm_messaging,
self.mock_apns() as send_notification,
self.assertLogs("zilencer.lib.push_notifications", level="INFO"),
self.assertLogs("zerver.lib.push_notifications", level="INFO") as zerver_logger,
mock.patch("time.perf_counter", side_effect=[10.0, 12.0, 13.0, 16.0]),
):
mock_fcm_messaging.send_each.return_value = self.make_fcm_success_response()
send_notification.return_value.is_successful = True
# Send test notification to all of the registered mobile devices.
result = self.api_post(
hamlet, "/api/v1/mobile_push/e2ee/test_notification", subdomain="zulip"
)
self.assert_json_success(result)
mock_fcm_messaging.send_each.assert_called_once()
send_notification.assert_called_once()
self.assertEqual(
"INFO:zerver.lib.push_notifications:"
f"Sending E2EE test push notification for user {hamlet.id}",
zerver_logger.output[0],
)
self.assertEqual(
"INFO:zerver.lib.push_notifications:"
f"Sent E2EE mobile push notifications for user {hamlet.id}: 1 via FCM, 1 via APNs in 2.000s",
zerver_logger.output[-1],
)
# Send test notification to a selected mobile device.
push_device = Device.objects.get(push_token_id=registered_device_android.token_id)
result = self.api_post(
hamlet,
"/api/v1/mobile_push/e2ee/test_notification",
{"device_id": push_device.id},
subdomain="zulip",
)
self.assert_json_success(result)
self.assertEqual(
"INFO:zerver.lib.push_notifications:"
f"Sending E2EE test push notification for user {hamlet.id}",
zerver_logger.output[0],
)
self.assertEqual(
"INFO:zerver.lib.push_notifications:"
f"Sent E2EE mobile push notifications for user {hamlet.id}: 1 via FCM, 0 via APNs in 3.000s",
zerver_logger.output[-1],
)
@responses.activate
@override_settings(ZILENCER_ENABLED=False)
def test_success_self_hosted(self) -> None:
self.add_mock_response()
hamlet = self.example_user("hamlet")
self.register_push_devices_for_notification(is_server_self_hosted=True)
with (
self.mock_fcm() as mock_fcm_messaging,
self.mock_apns() as send_notification,
mock.patch(
"corporate.lib.stripe.RemoteRealmBillingSession.current_count_for_billed_licenses",
return_value=10,
),
self.assertLogs("zilencer.lib.push_notifications", level="INFO"),
self.assertLogs("zerver.lib.push_notifications", level="INFO") as zerver_logger,
mock.patch("time.perf_counter", side_effect=[10.0, 12.0]),
):
mock_fcm_messaging.send_each.return_value = self.make_fcm_success_response()
send_notification.return_value.is_successful = True
# Send test notification to all of the registered mobile devices.
result = self.api_post(
hamlet, "/api/v1/mobile_push/e2ee/test_notification", subdomain="zulip"
)
self.assert_json_success(result)
mock_fcm_messaging.send_each.assert_called_once()
send_notification.assert_called_once()
self.assertEqual(
"INFO:zerver.lib.push_notifications:"
f"Sending E2EE test push notification for user {hamlet.id}",
zerver_logger.output[0],
)
self.assertEqual(
"INFO:zerver.lib.push_notifications:"
f"Sent E2EE mobile push notifications for user {hamlet.id}: 1 via FCM, 1 via APNs in 2.000s",
zerver_logger.output[-1],
)
@responses.activate
@override_settings(ZILENCER_ENABLED=False)
def test_error_responses(self) -> None:
self.add_mock_response()
hamlet = self.example_user("hamlet")
# No registered device to send to.
result = self.api_post(
hamlet, "/api/v1/mobile_push/e2ee/test_notification", subdomain="zulip"
)
self.assert_json_error(result, "No active registered push device", 400)
# Verify errors propagated to the client.
registered_device_apple, registered_device_android = (
self.register_push_devices_for_notification(is_server_self_hosted=True)
)
def assert_error_response(msg: str, http_status_code: int) -> None:
with self.assertLogs("zerver.lib.push_notifications", level="INFO") as zerver_logger:
result = self.api_post(
hamlet, "/api/v1/mobile_push/e2ee/test_notification", subdomain="zulip"
)
self.assert_json_error(result, msg, http_status_code)
self.assertEqual(
"INFO:zerver.lib.push_notifications:"
f"Sending E2EE test push notification for user {hamlet.id}",
zerver_logger.output[0],
)
with (
mock.patch(
"zerver.lib.remote_server.send_to_push_bouncer",
side_effect=PushNotificationBouncerRetryLaterError("network error"),
),
self.assertLogs(level="ERROR") as error_logs,
):
assert_error_response(
"Network error while connecting to Zulip push notification service.", 502
)
self.assertEqual(
"ERROR:django.request:Bad Gateway: /api/v1/mobile_push/e2ee/test_notification",
error_logs.output[0],
)
with (
mock.patch(
"zerver.lib.remote_server.send_to_push_bouncer",
side_effect=PushNotificationBouncerServerError("server error"),
),
self.assertLogs(level="ERROR") as error_logs,
):
assert_error_response(
"Internal server error on Zulip push notification service, retry later.", 502
)
self.assertEqual(
"ERROR:django.request:Bad Gateway: /api/v1/mobile_push/e2ee/test_notification",
error_logs.output[0],
)
with mock.patch(
"zerver.lib.remote_server.send_to_push_bouncer", side_effect=MissingRemoteRealmError
):
assert_error_response(
"Push notification configuration issue on server, contact the server administrator or retry later.",
403,
)
with mock.patch(
"zerver.lib.remote_server.send_to_push_bouncer",
side_effect=PushNotificationBouncerError,
):
assert_error_response(
"Push notification configuration issue on server, contact the server administrator or retry later.",
403,
)
with mock.patch(
"zerver.lib.remote_server.send_to_push_bouncer",
side_effect=PushNotificationsDisallowedByBouncerError("plan expired"),
):
assert_error_response(
"Push notification configuration issue on server, contact the server administrator or retry later.",
403,
)
# Device marked expired on bouncer (not on server).
registered_device_apple.delete()
registered_device_android.delete()
with mock.patch(
"corporate.lib.stripe.RemoteRealmBillingSession.current_count_for_billed_licenses",
return_value=10,
):
assert_error_response("No active registered push device", 400)
| {
"repo_id": "zulip/zulip",
"file_path": "zerver/tests/test_e2ee_push_notifications.py",
"license": "Apache License 2.0",
"lines": 1102,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
zulip/zulip:zilencer/lib/push_notifications.py | import asyncio
import logging
from collections.abc import Iterable
from dataclasses import asdict, dataclass
from aioapns import NotificationRequest
from django.utils.timezone import now as timezone_now
from firebase_admin import exceptions as firebase_exceptions
from firebase_admin import messaging as firebase_messaging
from firebase_admin.messaging import UnregisteredError as FCMUnregisteredError
from zerver.lib.devices import b64decode_token_id_base64, b64encode_token_id_int
from zerver.lib.push_notifications import (
APNsPushRequest,
FCMPushRequest,
SendNotificationResponseData,
fcm_app,
get_apns_context,
get_info_from_apns_result,
)
from zerver.models.realms import Realm
from zilencer.models import RemotePushDevice, RemoteRealm
logger = logging.getLogger(__name__)
@dataclass
class SentPushNotificationResult:
successfully_sent_count: int
delete_token_ids_base64: list[str]
def send_e2ee_push_notification_apple(
apns_requests: list[NotificationRequest],
apns_remote_push_devices: list[RemotePushDevice],
log_context: str,
) -> SentPushNotificationResult:
import aioapns
successfully_sent_count = 0
delete_token_ids_base64: list[str] = []
apns_context = get_apns_context()
if apns_context is None:
logger.error(
"APNs: Dropping push notifications since "
"neither APNS_TOKEN_KEY_FILE nor APNS_CERT_FILE is set."
)
return SentPushNotificationResult(
successfully_sent_count=successfully_sent_count,
delete_token_ids_base64=delete_token_ids_base64,
)
async def send_all_notifications() -> Iterable[
tuple[RemotePushDevice, aioapns.common.NotificationResult | BaseException]
]:
results = await asyncio.gather(
*(apns_context.apns.send_notification(request) for request in apns_requests),
return_exceptions=True,
)
return zip(apns_remote_push_devices, results, strict=False)
results = apns_context.loop.run_until_complete(send_all_notifications())
for remote_push_device, result in results:
result_info = get_info_from_apns_result(
result,
remote_push_device,
log_context % remote_push_device.token,
)
if result_info.successfully_sent:
successfully_sent_count += 1
elif result_info.delete_token_id_base64 is not None:
remote_push_device.expired_time = timezone_now()
remote_push_device.save(update_fields=["expired_time"])
delete_token_ids_base64.append(result_info.delete_token_id_base64)
return SentPushNotificationResult(
successfully_sent_count=successfully_sent_count,
delete_token_ids_base64=delete_token_ids_base64,
)
def send_e2ee_push_notification_android(
fcm_requests: list[firebase_messaging.Message],
fcm_remote_push_devices: list[RemotePushDevice],
log_context: str,
) -> SentPushNotificationResult:
successfully_sent_count = 0
delete_token_ids_base64: list[str] = []
if fcm_app is None:
logger.error("FCM: Dropping push notifications since ANDROID_FCM_CREDENTIALS_PATH is unset")
return SentPushNotificationResult(
successfully_sent_count=successfully_sent_count,
delete_token_ids_base64=delete_token_ids_base64,
)
try:
batch_response = firebase_messaging.send_each(fcm_requests, app=fcm_app)
except firebase_exceptions.FirebaseError:
logger.warning("Error while pushing to FCM", exc_info=True)
return SentPushNotificationResult(
successfully_sent_count=successfully_sent_count,
delete_token_ids_base64=delete_token_ids_base64,
)
for idx, response in enumerate(batch_response.responses):
# We enumerate to have idx to track which token the response
# corresponds to. send_each() preserves the order of the messages,
# so this works.
remote_push_device = fcm_remote_push_devices[idx]
token = remote_push_device.token
if response.success:
successfully_sent_count += 1
logger.info(
"FCM: Sent message with ID: %s %s", response.message_id, log_context % token
)
else:
error = response.exception
if isinstance(error, FCMUnregisteredError):
remote_push_device.expired_time = timezone_now()
remote_push_device.save(update_fields=["expired_time"])
delete_token_ids_base64.append(b64encode_token_id_int(remote_push_device.token_id))
logger.info("FCM: Removing %s due to %s", token, error.code)
else:
logger.warning(
"FCM: Delivery failed %s: %s:%s",
log_context % token,
error.__class__,
error,
)
return SentPushNotificationResult(
successfully_sent_count=successfully_sent_count,
delete_token_ids_base64=delete_token_ids_base64,
)
def send_e2ee_push_notifications(
push_requests: list[APNsPushRequest | FCMPushRequest],
*,
realm: Realm | None = None,
remote_realm: RemoteRealm | None = None,
) -> SendNotificationResponseData:
assert (realm is None) ^ (remote_realm is None)
import aioapns
token_ids_base64 = set()
token_ids_int = set()
for push_request in push_requests:
token_ids_base64.add(push_request.token_id)
token_ids_int.add(b64decode_token_id_base64(push_request.token_id))
remote_push_devices = RemotePushDevice.objects.filter(
token_id__in=token_ids_int,
realm=realm,
remote_realm=remote_realm,
expired_time__isnull=True,
)
token_id_base64_to_remote_push_device = {
b64encode_token_id_int(remote_push_device.token_id): remote_push_device
for remote_push_device in remote_push_devices
}
unexpired_token_ids_base64 = set(token_id_base64_to_remote_push_device.keys())
# Token IDs which should be deleted on server.
# Either the token ID is invalid or the token
# associated has been marked invalid/expired by APNs/FCM.
delete_token_ids_base64 = list(
filter(
lambda token_id_base64: token_id_base64 not in unexpired_token_ids_base64,
token_ids_base64,
)
)
apns_requests = []
apns_remote_push_devices: list[RemotePushDevice] = []
fcm_requests = []
fcm_remote_push_devices: list[RemotePushDevice] = []
for push_request in push_requests:
token_id_base64 = push_request.token_id
if token_id_base64 not in unexpired_token_ids_base64:
continue
remote_push_device = token_id_base64_to_remote_push_device[token_id_base64]
if remote_push_device.token_kind == RemotePushDevice.TokenKind.APNS:
assert isinstance(push_request, APNsPushRequest)
apns_requests.append(
aioapns.NotificationRequest(
apns_topic=remote_push_device.ios_app_id,
device_token=remote_push_device.token,
message=asdict(push_request.payload),
priority=push_request.http_headers.apns_priority,
push_type=push_request.http_headers.apns_push_type,
)
)
apns_remote_push_devices.append(remote_push_device)
else:
assert isinstance(push_request, FCMPushRequest)
fcm_payload = dict(
# FCM only allows string values, so we stringify push_key_id.
push_key_id=str(push_request.payload.push_key_id),
encrypted_data=push_request.payload.encrypted_data,
)
fcm_requests.append(
firebase_messaging.Message(
data=fcm_payload,
token=remote_push_device.token,
android=firebase_messaging.AndroidConfig(priority=push_request.fcm_priority),
)
)
fcm_remote_push_devices.append(remote_push_device)
if realm is not None:
log_context = f"to (realm={realm.uuid}, device=%s)"
else:
assert remote_realm is not None
log_context = f"to (remote_realm={remote_realm.uuid}, device=%s)"
apple_successfully_sent_count = 0
if len(apns_requests) > 0:
sent_push_notification_result = send_e2ee_push_notification_apple(
apns_requests,
apns_remote_push_devices,
log_context,
)
apple_successfully_sent_count = sent_push_notification_result.successfully_sent_count
delete_token_ids_base64.extend(sent_push_notification_result.delete_token_ids_base64)
android_successfully_sent_count = 0
if len(fcm_requests) > 0:
sent_push_notification_result = send_e2ee_push_notification_android(
fcm_requests,
fcm_remote_push_devices,
log_context,
)
android_successfully_sent_count = sent_push_notification_result.successfully_sent_count
delete_token_ids_base64.extend(sent_push_notification_result.delete_token_ids_base64)
return {
"apple_successfully_sent_count": apple_successfully_sent_count,
"android_successfully_sent_count": android_successfully_sent_count,
"delete_token_ids": delete_token_ids_base64,
}
| {
"repo_id": "zulip/zulip",
"file_path": "zilencer/lib/push_notifications.py",
"license": "Apache License 2.0",
"lines": 215,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
zulip/zulip:zerver/lib/push_registration.py | import base64
import binascii
import logging
from typing import TypedDict
from django.conf import settings
from django.utils.translation import gettext as _
from zerver.lib.devices import b64decode_token_id_base64
from zerver.lib.exceptions import (
InvalidBouncerPublicKeyError,
InvalidEncryptedPushRegistrationError,
JsonableError,
MissingRemoteRealmError,
RequestExpiredError,
)
from zerver.lib.remote_server import (
PushNotificationBouncerError,
PushNotificationBouncerRetryLaterError,
PushNotificationBouncerServerError,
send_to_push_bouncer,
)
from zerver.models.devices import Device
from zerver.models.users import UserProfile, get_user_profile_by_id
from zerver.tornado.django_api import send_event_on_commit
if settings.ZILENCER_ENABLED:
from zilencer.views import do_register_remote_push_device
logger = logging.getLogger(__name__)
class RegisterPushDeviceToBouncerQueueItem(TypedDict):
user_profile_id: int
device_id: int
bouncer_public_key: str
encrypted_push_registration: str
token_id_base64: str
def handle_registration_to_bouncer_failure(
user_profile: UserProfile, device_id: int, error_code: str
) -> None:
"""Handles a failed registration request to the bouncer by
notifying or preparing to notify clients.
* Sends a `device` event to notify online clients immediately.
* Stores the `error_code` in the `Device` table. This is later
used, along with other metadata, to notify offline clients the
next time they call `/register`. See the `devices` field in
the `/register` response.
"""
Device.objects.filter(id=device_id).update(push_registration_error_code=error_code)
event = dict(
type="device",
op="update",
device_id=device_id,
push_registration_error_code=error_code,
)
send_event_on_commit(user_profile.realm, event, [user_profile.id])
# Report the `REQUEST_EXPIRED_ERROR` to the server admins as it indicates
# a long-lasting outage somewhere between the server and the bouncer,
# most likely in either the server or its local network configuration.
if error_code == Device.PushRegistrationErrorCode.REQUEST_EXPIRED:
logging.error("Push registration request for device_id=%s expired.", device_id)
def handle_register_push_device_to_bouncer(
queue_item: RegisterPushDeviceToBouncerQueueItem,
) -> None:
user_profile_id = queue_item["user_profile_id"]
user_profile = get_user_profile_by_id(user_profile_id)
device_id = queue_item["device_id"]
bouncer_public_key = queue_item["bouncer_public_key"]
encrypted_push_registration = queue_item["encrypted_push_registration"]
token_id_base64 = queue_item["token_id_base64"]
try:
if settings.ZILENCER_ENABLED:
do_register_remote_push_device(
bouncer_public_key,
encrypted_push_registration,
token_id_base64,
realm=user_profile.realm,
)
else:
post_data: dict[str, str | int] = {
"realm_uuid": str(user_profile.realm.uuid),
"token_id": token_id_base64,
"encrypted_push_registration": encrypted_push_registration,
"bouncer_public_key": bouncer_public_key,
}
send_to_push_bouncer("POST", "push/e2ee/register", post_data)
except (
PushNotificationBouncerRetryLaterError,
PushNotificationBouncerServerError,
) as e: # nocoverage
# Network error or 5xx error response from bouncer server.
# Keep retrying to register until `RequestExpiredError` is raised.
raise PushNotificationBouncerRetryLaterError(e.msg)
except (
# Need to resubmit realm info - `manage.py register_server`
MissingRemoteRealmError,
# Invalid credentials or unexpected status code
PushNotificationBouncerError,
):
# Server admins need to fix these set of errors, report them.
# Server should keep retrying to register until `RequestExpiredError` is raised.
error_msg = f"Push device registration request for device_id={device_id} failed."
logging.error(error_msg)
raise PushNotificationBouncerRetryLaterError(error_msg)
except (
InvalidBouncerPublicKeyError,
InvalidEncryptedPushRegistrationError,
RequestExpiredError,
# Any future or unexpected exceptions that we add.
JsonableError,
) as e:
handle_registration_to_bouncer_failure(
user_profile, device_id, error_code=e.__class__.code.name
)
return
# Registration successful.
Device.objects.filter(id=device_id).update(
push_token_id=b64decode_token_id_base64(token_id_base64), pending_push_token_id=None
)
event = dict(
type="device",
op="update",
device_id=device_id,
push_token_id=token_id_base64,
pending_push_token_id=None,
)
send_event_on_commit(user_profile.realm, event, [user_profile.id])
def check_push_key(push_key_str: str) -> bytes:
error_message = _("Invalid `push_key`")
try:
push_key_bytes = base64.b64decode(push_key_str, validate=True)
except binascii.Error:
raise JsonableError(error_message)
if len(push_key_bytes) != 33 or push_key_bytes[0] != 0x31:
raise JsonableError(error_message)
return push_key_bytes
| {
"repo_id": "zulip/zulip",
"file_path": "zerver/lib/push_registration.py",
"license": "Apache License 2.0",
"lines": 131,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
zulip/zulip:zerver/tests/test_push_registration.py | import base64
import hashlib
import uuid
from datetime import timedelta
import orjson
import responses
import time_machine
from django.conf import settings
from django.test import override_settings
from django.utils.timezone import now
from nacl.encoding import Base64Encoder
from nacl.public import PublicKey, SealedBox
from zerver.lib.devices import b64decode_token_id_base64
from zerver.lib.exceptions import (
InvalidBouncerPublicKeyError,
InvalidEncryptedPushRegistrationError,
RequestExpiredError,
)
from zerver.lib.push_registration import check_push_key
from zerver.lib.queue import queue_event_on_commit
from zerver.lib.test_classes import BouncerTestCase
from zerver.lib.test_helpers import activate_push_notification_service, mock_queue_publish
from zerver.lib.timestamp import datetime_to_timestamp
from zerver.models import Device, UserProfile
from zilencer.models import RemotePushDevice, RemoteRealm
class RegisterPushDeviceToBouncer(BouncerTestCase):
DEFAULT_SUBDOMAIN = ""
def get_register_push_device_payload(
self,
token: str = "c0ffee",
token_kind: str = RemotePushDevice.TokenKind.APNS,
ios_app_id: str | None = "example.app",
timestamp: int | None = None,
) -> dict[str, str | int]:
hamlet = self.example_user("hamlet")
remote_realm = RemoteRealm.objects.get(uuid=hamlet.realm.uuid)
if timestamp is None:
timestamp = datetime_to_timestamp(now())
push_registration = {
"token": token,
"token_kind": token_kind,
"ios_app_id": ios_app_id,
"timestamp": timestamp,
}
hash_bytes = hashlib.sha256(token.encode()).digest()
token_id_base64 = base64.b64encode(hash_bytes[0:8]).decode()
assert settings.PUSH_REGISTRATION_ENCRYPTION_KEYS
public_key_str: str = next(iter(settings.PUSH_REGISTRATION_ENCRYPTION_KEYS.keys()))
public_key = PublicKey(public_key_str.encode("utf-8"), Base64Encoder)
sealed_box = SealedBox(public_key)
encrypted_push_registration_bytes = sealed_box.encrypt(
orjson.dumps(push_registration), Base64Encoder
)
encrypted_push_registration = encrypted_push_registration_bytes.decode("utf-8")
payload: dict[str, str | int] = {
"realm_uuid": str(remote_realm.uuid),
"token_id": token_id_base64,
"encrypted_push_registration": encrypted_push_registration,
"bouncer_public_key": public_key_str,
}
return payload
def test_register_push_device_success(self) -> None:
remote_push_devices_count = RemotePushDevice.objects.count()
self.assertEqual(remote_push_devices_count, 0)
payload = self.get_register_push_device_payload()
result = self.uuid_post(
self.server_uuid,
"/api/v1/remotes/push/e2ee/register",
payload,
)
self.assert_json_success(result)
remote_push_devices = RemotePushDevice.objects.all()
self.assert_length(remote_push_devices, 1)
assert type(payload["token_id"]) is str # for mypy
self.assertEqual(
remote_push_devices[0].token_id, b64decode_token_id_base64(payload["token_id"])
)
# Idempotent
result = self.uuid_post(
self.server_uuid,
"/api/v1/remotes/push/e2ee/register",
payload,
)
self.assert_json_success(result)
remote_push_devices_count = RemotePushDevice.objects.count()
self.assertEqual(remote_push_devices_count, 1)
# Android
payload = self.get_register_push_device_payload(
token="android-tokenaz", token_kind=RemotePushDevice.TokenKind.FCM, ios_app_id=None
)
result = self.uuid_post(
self.server_uuid,
"/api/v1/remotes/push/e2ee/register",
payload,
)
self.assert_json_success(result)
remote_push_devices = RemotePushDevice.objects.order_by("pk")
self.assert_length(remote_push_devices, 2)
assert type(payload["token_id"]) is str # for mypy
self.assertEqual(
remote_push_devices[1].token_id, b64decode_token_id_base64(payload["token_id"])
)
def test_register_push_device_error(self) -> None:
payload = self.get_register_push_device_payload()
invalid_realm_uuid_payload = {**payload, "realm_uuid": str(uuid.uuid4())}
with self.assertLogs("zilencer.views", level="INFO"):
result = self.uuid_post(
self.server_uuid,
"/api/v1/remotes/push/e2ee/register",
invalid_realm_uuid_payload,
)
self.assert_json_error(result, "Organization not registered", status_code=403)
invalid_bouncer_public_key_payload = {**payload, "bouncer_public_key": "invalid public key"}
result = self.uuid_post(
self.server_uuid,
"/api/v1/remotes/push/e2ee/register",
invalid_bouncer_public_key_payload,
)
self.assert_json_error(result, "Invalid bouncer_public_key")
liveness_timedout_payload = self.get_register_push_device_payload(
timestamp=datetime_to_timestamp(now() - timedelta(days=2))
)
result = self.uuid_post(
self.server_uuid,
"/api/v1/remotes/push/e2ee/register",
liveness_timedout_payload,
)
self.assert_json_error(result, "Request expired")
# Test the various cases resulting in InvalidEncryptedPushRegistrationError
payload = self.get_register_push_device_payload()
payload["encrypted_push_registration"] = "random-string-no-encryption"
result = self.uuid_post(
self.server_uuid,
"/api/v1/remotes/push/e2ee/register",
payload,
)
self.assert_json_error(result, "Invalid encrypted_push_registration")
invalid_ios_app_id_format_payload = self.get_register_push_device_payload(
ios_app_id="* -- +"
)
result = self.uuid_post(
self.server_uuid,
"/api/v1/remotes/push/e2ee/register",
invalid_ios_app_id_format_payload,
)
self.assert_json_error(result, "Invalid encrypted_push_registration")
invalid_token_kind_payload = self.get_register_push_device_payload(token_kind="xyz")
result = self.uuid_post(
self.server_uuid,
"/api/v1/remotes/push/e2ee/register",
invalid_token_kind_payload,
)
self.assert_json_error(result, "Invalid encrypted_push_registration")
missing_ios_app_id_payload = self.get_register_push_device_payload(ios_app_id=None)
result = self.uuid_post(
self.server_uuid,
"/api/v1/remotes/push/e2ee/register",
missing_ios_app_id_payload,
)
self.assert_json_error(result, "Invalid encrypted_push_registration")
set_ios_app_id_for_android_payload = self.get_register_push_device_payload(
token_kind=RemotePushDevice.TokenKind.FCM, ios_app_id="not-null"
)
result = self.uuid_post(
self.server_uuid,
"/api/v1/remotes/push/e2ee/register",
set_ios_app_id_for_android_payload,
)
self.assert_json_error(result, "Invalid encrypted_push_registration")
invalid_token_payload = self.get_register_push_device_payload(token="")
result = self.uuid_post(
self.server_uuid,
"/api/v1/remotes/push/e2ee/register",
invalid_token_payload,
)
self.assert_json_error(result, "Invalid encrypted_push_registration")
invalid_token_payload = self.get_register_push_device_payload(
token="xyz non-hex characters"
)
result = self.uuid_post(
self.server_uuid,
"/api/v1/remotes/push/e2ee/register",
invalid_token_payload,
)
self.assert_json_error(result, "Invalid encrypted_push_registration")
# Invalid `token_id`
payload = self.get_register_push_device_payload()
payload["token_id"] = "192057"
result = self.uuid_post(
self.server_uuid,
"/api/v1/remotes/push/e2ee/register",
payload,
)
self.assert_json_error(result, "Invalid encrypted_push_registration")
class RegisterPushDeviceToServer(BouncerTestCase):
def get_register_push_device_payload(
self,
device_user: UserProfile | None = None,
token: str = "c0ffee",
token_kind: str = RemotePushDevice.TokenKind.APNS,
ios_app_id: str | None = "example.app",
timestamp: int | None = None,
) -> dict[str, str | int]:
if timestamp is None:
timestamp = datetime_to_timestamp(now())
push_registration = {
"token": token,
"token_kind": token_kind,
"ios_app_id": ios_app_id,
"timestamp": timestamp,
}
hash_bytes = hashlib.sha256(token.encode()).digest()
token_id_base64 = base64.b64encode(hash_bytes[0:8]).decode()
assert settings.PUSH_REGISTRATION_ENCRYPTION_KEYS
public_key_str: str = next(iter(settings.PUSH_REGISTRATION_ENCRYPTION_KEYS.keys()))
public_key = PublicKey(public_key_str.encode("utf-8"), Base64Encoder)
sealed_box = SealedBox(public_key)
encrypted_push_registration_bytes = sealed_box.encrypt(
orjson.dumps(push_registration), Base64Encoder
)
encrypted_push_registration = encrypted_push_registration_bytes.decode("utf-8")
if device_user is None:
device_user = self.example_user("hamlet")
device = Device.objects.create(user=device_user)
payload: dict[str, str | int] = {
"device_id": device.id,
"token_kind": token_kind,
"push_key": "MY+paNlyduYJRQFNZva8w7Gv3PkBua9kIj581F9Vr301",
"push_key_id": 2408,
"bouncer_public_key": public_key_str,
"encrypted_push_registration": encrypted_push_registration,
"token_id": token_id_base64,
}
return payload
def assert_push_fields_null(self, device: Device) -> None:
self.assertIsNone(device.push_key)
self.assertIsNone(device.push_key_id)
self.assertIsNone(device.push_token_id)
self.assertIsNone(device.pending_push_token_id)
self.assertIsNone(device.push_token_kind)
self.assertIsNone(device.push_token_last_updated_timestamp)
self.assertIsNone(device.push_registration_error_code)
def test_register_push_device_success(self) -> None:
self.login("hamlet")
payload = self.get_register_push_device_payload()
devices = Device.objects.all()
self.assert_length(devices, 1)
self.assert_push_fields_null(devices[0])
# Verify the updated `Device` row and `device` event
# while the `register_push_device_to_bouncer` event is still
# not consumed by the `PushNotificationsWorker` worker.
time_now = now()
with (
time_machine.travel(time_now, tick=False),
self.capture_send_event_calls(expected_num_events=1) as events,
mock_queue_publish("zerver.views.push_notifications.queue_event_on_commit") as m,
):
result = self.client_post("/json/mobile_push/register", payload)
self.assert_json_success(result)
m.assert_called_once()
devices = Device.objects.all()
self.assert_length(devices, 1)
device = devices[0]
self.assertEqual(device.push_key_id, payload["push_key_id"])
self.assertIsNone(device.push_token_id)
assert type(payload["token_id"]) is str # for mypy
self.assertEqual(
device.pending_push_token_id, b64decode_token_id_base64(payload["token_id"])
)
self.assertEqual(device.push_token_kind, payload["token_kind"])
self.assertEqual(device.push_token_last_updated_timestamp, time_now)
self.assertIsNone(device.push_registration_error_code)
self.assertEqual(
events[0]["event"],
dict(
type="device",
op="update",
device_id=device.id,
push_key_id=device.push_key_id,
pending_push_token_id=payload["token_id"],
push_token_last_updated_timestamp=datetime_to_timestamp(time_now),
push_registration_error_code=None,
),
)
queue_name = m.call_args[0][0]
queue_message = m.call_args[0][1]
# Now, the `PushNotificationsWorker` worker consumes.
with self.capture_send_event_calls(expected_num_events=1) as events:
queue_event_on_commit(queue_name, queue_message)
device.refresh_from_db()
assert type(payload["token_id"]) is str # for mypy
self.assertEqual(device.push_token_id, b64decode_token_id_base64(payload["token_id"]))
self.assertIsNone(device.pending_push_token_id)
self.assertEqual(
events[0]["event"],
dict(
type="device",
op="update",
device_id=device.id,
push_token_id=payload["token_id"],
pending_push_token_id=None,
),
)
# Idempotent
with self.capture_send_event_calls(expected_num_events=0):
result = self.client_post("/json/mobile_push/register", payload)
self.assert_json_success(result)
pending_push_devices_count = Device.objects.filter(
pending_push_token_id__isnull=False
).count()
self.assertEqual(pending_push_devices_count, 0)
@activate_push_notification_service()
@responses.activate
@override_settings(ZILENCER_ENABLED=False)
def test_register_push_device_self_hosted_server_success(self) -> None:
"""
Self-hosted servers make a network call to bouncer instead of
a `do_register_remote_push_device` function call.
"""
self.add_mock_response()
self.login("hamlet")
payload = self.get_register_push_device_payload()
devices = Device.objects.all()
self.assert_length(devices, 1)
self.assert_push_fields_null(devices[0])
time_now = now()
with (
time_machine.travel(time_now, tick=False),
self.capture_send_event_calls(expected_num_events=2) as events,
):
result = self.client_post("/json/mobile_push/register", payload)
self.assert_json_success(result)
devices = Device.objects.all()
self.assert_length(devices, 1)
device = devices[0]
self.assertEqual(device.push_key_id, payload["push_key_id"])
assert type(payload["token_id"]) is str # for mypy
self.assertEqual(device.push_token_id, b64decode_token_id_base64(payload["token_id"]))
self.assertIsNone(device.pending_push_token_id)
self.assertEqual(device.push_token_kind, payload["token_kind"])
self.assertEqual(device.push_token_last_updated_timestamp, time_now)
self.assertIsNone(device.push_registration_error_code)
self.assertEqual(
events[0]["event"],
dict(
type="device",
op="update",
device_id=device.id,
push_key_id=device.push_key_id,
pending_push_token_id=payload["token_id"],
push_token_last_updated_timestamp=datetime_to_timestamp(time_now),
push_registration_error_code=None,
),
)
self.assertEqual(
events[1]["event"],
dict(
type="device",
op="update",
device_id=device.id,
push_token_id=payload["token_id"],
pending_push_token_id=None,
),
)
@override_settings(ZILENCER_ENABLED=False)
def test_server_not_configured_for_push_notification_error(self) -> None:
self.login("hamlet")
payload = self.get_register_push_device_payload()
result = self.client_post("/json/mobile_push/register", payload)
self.assert_json_error(result, "Server is not configured to use push notification service.")
@activate_push_notification_service()
def test_invalid_device_error(self) -> None:
self.login("hamlet")
iago = self.example_user("iago")
payload = self.get_register_push_device_payload(device_user=iago)
result = self.client_post("/json/mobile_push/register", payload)
self.assert_json_error(result, "Invalid `device_id`")
@activate_push_notification_service()
def test_missing_parameters_error(self) -> None:
self.login("hamlet")
payload = self.get_register_push_device_payload()
# Payload not good for push key rotation, token rotation, or fresh registration.
del payload["push_key_id"]
del payload["token_id"]
result = self.client_post("/json/mobile_push/register", payload)
self.assert_json_error(
result,
"Missing parameters: must provide either all push key fields, all token fields, or both.",
)
@activate_push_notification_service()
def test_invalid_push_key_error(self) -> None:
self.login("hamlet")
payload = self.get_register_push_device_payload()
# Invalid Base64 alphabet in `push_key`
invalid_push_key_payload = {**payload, "push_key": "@abcdefg"}
result = self.client_post("/json/mobile_push/register", invalid_push_key_payload)
self.assert_json_error(result, "Invalid `push_key`")
# Value (which is base64 encoded to get `push_key`) is not 33 bytes in size
invalid_push_key_payload = {**payload, "push_key": "abcd"}
result = self.client_post("/json/mobile_push/register", invalid_push_key_payload)
self.assert_json_error(result, "Invalid `push_key`")
# Verify error when prefix (1st byte) is not 0x31
push_key = payload["push_key"]
assert type(push_key) is str # for mypy
valid_push_key_bytes = base64.b64decode(push_key)
self.assertEqual(valid_push_key_bytes[0], 0x31)
self.assert_length(valid_push_key_bytes, 33)
# Note: Prefix changed to 0x32
invalid_push_key_bytes = bytes([0x32]) + valid_push_key_bytes[1:]
invalid_push_key = base64.b64encode(invalid_push_key_bytes).decode("utf-8")
invalid_push_key_payload = {**payload, "push_key": invalid_push_key}
result = self.client_post("/json/mobile_push/register", invalid_push_key_payload)
self.assert_json_error(result, "Invalid `push_key`")
@activate_push_notification_service()
def test_invalid_push_key_id_error(self) -> None:
self.login("hamlet")
payload = self.get_register_push_device_payload()
# Test negative push_key_id
invalid_push_key_id_payload = {**payload, "push_key_id": -1}
result = self.client_post("/json/mobile_push/register", invalid_push_key_id_payload)
self.assert_json_error(
result, "Invalid push_key_id: Value error, Not a valid unsigned 32-bit integer"
)
# Test push_key_id > 2^32 - 1
invalid_push_key_id_payload = {**payload, "push_key_id": 4294967296}
result = self.client_post("/json/mobile_push/register", invalid_push_key_id_payload)
self.assert_json_error(
result, "Invalid push_key_id: Value error, Not a valid unsigned 32-bit integer"
)
@activate_push_notification_service()
def test_invalid_token_id_error(self) -> None:
self.login("hamlet")
payload = self.get_register_push_device_payload()
# Invalid Base64 alphabet in `token_id`
invalid_token_id_payload = {**payload, "token_id": "@abcdefg"}
result = self.client_post("/json/mobile_push/register", invalid_token_id_payload)
self.assert_json_error(result, "`token_id` is not Base64 encoded")
@activate_push_notification_service()
@override_settings(ZILENCER_ENABLED=False)
@responses.activate
def test_invalid_bouncer_public_key_error(self) -> None:
self.add_mock_response()
hamlet = self.example_user("hamlet")
self.login_user(hamlet)
payload = self.get_register_push_device_payload()
devices = Device.objects.all()
self.assert_length(devices, 1)
self.assert_push_fields_null(devices[0])
# Verify InvalidBouncerPublicKeyError
time_now = now()
invalid_bouncer_public_key_payload = {**payload, "bouncer_public_key": "invalid public key"}
with (
time_machine.travel(time_now, tick=False),
self.capture_send_event_calls(expected_num_events=2) as events,
):
result = self.client_post(
"/json/mobile_push/register", invalid_bouncer_public_key_payload
)
self.assert_json_success(result)
devices = Device.objects.all()
self.assert_length(devices, 1)
device = devices[0]
self.assertEqual(
device.push_registration_error_code, InvalidBouncerPublicKeyError.code.name
)
self.assertEqual(
events[0]["event"],
dict(
type="device",
op="update",
device_id=device.id,
push_key_id=device.push_key_id,
pending_push_token_id=payload["token_id"],
push_token_last_updated_timestamp=datetime_to_timestamp(time_now),
push_registration_error_code=None,
),
)
self.assertEqual(
events[1]["event"],
dict(
type="device",
op="update",
device_id=device.id,
push_registration_error_code="INVALID_BOUNCER_PUBLIC_KEY",
),
)
# Retrying with correct payload results in success.
# `push_registration_error_code` of the same Device row updated to None.
with (
time_machine.travel(time_now, tick=False),
self.capture_send_event_calls(expected_num_events=2) as events,
):
result = self.client_post("/json/mobile_push/register", payload)
self.assert_json_success(result)
device.refresh_from_db()
self.assertIsNone(device.push_registration_error_code)
assert type(payload["token_id"]) is str # for mypy
self.assertEqual(device.push_token_id, b64decode_token_id_base64(payload["token_id"]))
self.assertIsNone(device.pending_push_token_id)
self.assertEqual(
events[0]["event"],
dict(
type="device",
op="update",
device_id=device.id,
push_key_id=device.push_key_id,
pending_push_token_id=payload["token_id"],
push_token_last_updated_timestamp=datetime_to_timestamp(time_now),
push_registration_error_code=None,
),
)
self.assertEqual(
events[1]["event"],
dict(
type="device",
op="update",
device_id=device.id,
push_token_id=payload["token_id"],
pending_push_token_id=None,
),
)
@activate_push_notification_service()
@override_settings(ZILENCER_ENABLED=False)
@responses.activate
def test_invalid_encrypted_push_registration_error(self) -> None:
self.add_mock_response()
hamlet = self.example_user("hamlet")
self.login_user(hamlet)
invalid_token_payload = self.get_register_push_device_payload(token="")
devices = Device.objects.all()
self.assert_length(devices, 1)
self.assert_push_fields_null(devices[0])
# Verify InvalidEncryptedPushRegistrationError
time_now = now()
with (
time_machine.travel(time_now, tick=False),
self.capture_send_event_calls(expected_num_events=2) as events,
):
result = self.client_post("/json/mobile_push/register", invalid_token_payload)
self.assert_json_success(result)
devices = Device.objects.all()
self.assert_length(devices, 1)
device = devices[0]
self.assertEqual(
device.push_registration_error_code, InvalidEncryptedPushRegistrationError.code.name
)
self.assertEqual(
events[0]["event"],
dict(
type="device",
op="update",
device_id=device.id,
push_key_id=device.push_key_id,
pending_push_token_id=invalid_token_payload["token_id"],
push_token_last_updated_timestamp=datetime_to_timestamp(time_now),
push_registration_error_code=None,
),
)
self.assertEqual(
events[1]["event"],
dict(
type="device",
op="update",
device_id=device.id,
push_registration_error_code="BAD_REQUEST",
),
)
@activate_push_notification_service()
@override_settings(ZILENCER_ENABLED=False)
@responses.activate
def test_request_expired_error(self) -> None:
self.add_mock_response()
hamlet = self.example_user("hamlet")
self.login_user(hamlet)
liveness_timed_out_payload = self.get_register_push_device_payload(
timestamp=datetime_to_timestamp(now() - timedelta(days=2))
)
devices = Device.objects.all()
self.assert_length(devices, 1)
self.assert_push_fields_null(devices[0])
# Verify RequestExpiredError
time_now = now()
with (
time_machine.travel(time_now, tick=False),
self.assertLogs(level="ERROR") as m,
self.capture_send_event_calls(expected_num_events=2) as events,
):
result = self.client_post("/json/mobile_push/register", liveness_timed_out_payload)
self.assert_json_success(result)
devices = Device.objects.all()
self.assert_length(devices, 1)
device = devices[0]
self.assertEqual(device.push_registration_error_code, RequestExpiredError.code.name)
self.assertEqual(
events[0]["event"],
dict(
type="device",
op="update",
device_id=device.id,
push_key_id=device.push_key_id,
pending_push_token_id=liveness_timed_out_payload["token_id"],
push_token_last_updated_timestamp=datetime_to_timestamp(time_now),
push_registration_error_code=None,
),
)
self.assertEqual(
events[1]["event"],
dict(
type="device",
op="update",
device_id=device.id,
push_registration_error_code="REQUEST_EXPIRED",
),
)
self.assertEqual(
m.output,
[f"ERROR:root:Push registration request for device_id={device.id} expired."],
)
@activate_push_notification_service()
@override_settings(ZILENCER_ENABLED=False)
@responses.activate
def test_missing_remote_realm_error(self) -> None:
self.add_mock_response()
hamlet = self.example_user("hamlet")
self.login_user(hamlet)
payload = self.get_register_push_device_payload()
devices = Device.objects.all()
self.assert_length(devices, 1)
self.assert_push_fields_null(devices[0])
# Verify MissingRemoteRealm
# Update realm's UUID to a random UUID.
hamlet.realm.uuid = uuid.uuid4()
hamlet.realm.save()
time_now = now()
with (
time_machine.travel(time_now, tick=False),
self.assertLogs(level="ERROR") as m,
self.capture_send_event_calls(expected_num_events=1) as events,
):
result = self.client_post("/json/mobile_push/register", payload)
self.assert_json_success(result)
devices = Device.objects.all()
self.assert_length(devices, 1)
device = devices[0]
# We keep retrying until `RequestExpiredError` is raised.
self.assertIsNotNone(device.pending_push_token_id)
self.assertIsNone(device.push_registration_error_code)
self.assertEqual(
events[0]["event"],
dict(
type="device",
op="update",
device_id=device.id,
push_key_id=device.push_key_id,
pending_push_token_id=payload["token_id"],
push_token_last_updated_timestamp=datetime_to_timestamp(time_now),
push_registration_error_code=None,
),
)
self.assertEqual(
m.output[0],
f"ERROR:root:Push device registration request for device_id={device.id} failed.",
)
# TODO: Verify that we retry for a day, then raise `RequestExpiredError`.
# This implementation would be a follow-up. Currently `retry_event`
# leads to 3 retries at max.
@activate_push_notification_service()
def test_push_key_rotation(self) -> None:
self.login("hamlet")
payload = self.get_register_push_device_payload()
device = Device.objects.get(id=payload["device_id"])
self.assert_push_fields_null(device)
# Attempt to set only `push_key` and `push_key_id` fields.
rotate_push_key_payload = {
"device_id": device.id,
"push_key": "MTaUDJDMWypQ1WufZ1NRTHSSvgYtXh1qVNSjN3aBiEFt",
"push_key_id": 1144,
}
result = self.client_post("/json/mobile_push/register", rotate_push_key_payload)
self.assert_json_error(result, "No push registration exists to rotate key for.")
# Fresh push registration.
with self.capture_send_event_calls(expected_num_events=2):
result = self.client_post("/json/mobile_push/register", payload)
self.assert_json_success(result)
device.refresh_from_db()
assert device.push_key is not None
assert type(payload["push_key"]) is str # for mypy
self.assertEqual(bytes(device.push_key), check_push_key(payload["push_key"]))
self.assertEqual(device.push_key_id, payload["push_key_id"])
# Rotate push key for the registration.
with self.capture_send_event_calls(expected_num_events=1) as events:
result = self.client_post("/json/mobile_push/register", rotate_push_key_payload)
self.assert_json_success(result)
device.refresh_from_db()
assert device.push_key is not None
assert type(rotate_push_key_payload["push_key"]) is str # for mypy
self.assertEqual(
bytes(device.push_key), check_push_key(rotate_push_key_payload["push_key"])
)
self.assertEqual(device.push_key_id, rotate_push_key_payload["push_key_id"])
self.assertEqual(
events[0]["event"],
dict(type="device", op="update", device_id=device.id, push_key_id=device.push_key_id),
)
# Idempotent
with self.capture_send_event_calls(expected_num_events=0):
result = self.client_post("/json/mobile_push/register", rotate_push_key_payload)
self.assert_json_success(result)
@activate_push_notification_service()
def test_token_rotation(self) -> None:
self.login("hamlet")
payload = self.get_register_push_device_payload()
device = Device.objects.get(id=payload["device_id"])
self.assert_push_fields_null(device)
# Attempt to set only token fields.
push_registration: dict[str, str | int] = {
"token": "abcdef",
"token_kind": Device.PushTokenKind.APNS,
"ios_app_id": "example.app",
"timestamp": datetime_to_timestamp(now()),
}
assert settings.PUSH_REGISTRATION_ENCRYPTION_KEYS
public_key_str: str = next(iter(settings.PUSH_REGISTRATION_ENCRYPTION_KEYS.keys()))
public_key = PublicKey(public_key_str.encode("utf-8"), Base64Encoder)
sealed_box = SealedBox(public_key)
encrypted_push_registration_bytes = sealed_box.encrypt(
orjson.dumps(push_registration), Base64Encoder
)
encrypted_push_registration = encrypted_push_registration_bytes.decode("utf-8")
assert type(push_registration["token"]) is str # for mypy
hash_bytes = hashlib.sha256(push_registration["token"].encode()).digest()
token_id_base64 = base64.b64encode(hash_bytes[0:8]).decode()
rotate_token_payload = {
"device_id": device.id,
"token_kind": Device.PushTokenKind.APNS,
"bouncer_public_key": public_key_str,
"encrypted_push_registration": encrypted_push_registration,
"token_id": token_id_base64,
}
result = self.client_post("/json/mobile_push/register", rotate_token_payload)
self.assert_json_error(result, "No push registration exists to rotate token for.")
# Fresh push registration.
with self.capture_send_event_calls(expected_num_events=2):
result = self.client_post("/json/mobile_push/register", payload)
self.assert_json_success(result)
device.refresh_from_db()
assert type(payload["token_id"]) is str # for mypy
self.assertEqual(device.push_token_id, b64decode_token_id_base64(payload["token_id"]))
# Rotate token for the registration.
time_now = now()
with (
time_machine.travel(time_now, tick=False),
self.capture_send_event_calls(expected_num_events=2) as events,
):
result = self.client_post("/json/mobile_push/register", rotate_token_payload)
self.assert_json_success(result)
device.refresh_from_db()
assert type(rotate_token_payload["token_id"]) is str # for mypy
self.assertEqual(
device.push_token_id, b64decode_token_id_base64(rotate_token_payload["token_id"])
)
self.assertEqual(
events[0]["event"],
dict(
type="device",
op="update",
device_id=device.id,
pending_push_token_id=rotate_token_payload["token_id"],
push_token_last_updated_timestamp=datetime_to_timestamp(time_now),
push_registration_error_code=None,
),
)
self.assertEqual(
events[1]["event"],
dict(
type="device",
op="update",
device_id=device.id,
push_token_id=rotate_token_payload["token_id"],
pending_push_token_id=None,
),
)
# Idempotent
with self.capture_send_event_calls(expected_num_events=0):
result = self.client_post("/json/mobile_push/register", rotate_token_payload)
self.assert_json_success(result)
@activate_push_notification_service()
def test_avoid_parallel_registration_request_to_bouncer(self) -> None:
self.login("hamlet")
payload = self.get_register_push_device_payload()
# Fresh registration request, `register_push_device_to_bouncer` event
# not consumed by the `PushNotificationsWorker` worker yet.
with (
self.capture_send_event_calls(expected_num_events=1),
mock_queue_publish("zerver.views.push_notifications.queue_event_on_commit") as m,
):
result = self.client_post("/json/mobile_push/register", payload)
self.assert_json_success(result)
m.assert_called_once()
# Another registration request is not processed.
with (
self.capture_send_event_calls(expected_num_events=0),
mock_queue_publish("zerver.views.push_notifications.queue_event_on_commit") as m,
):
result = self.client_post("/json/mobile_push/register", payload)
self.assert_json_error(result, "A registration for the device already in progress.")
m.assert_not_called()
@activate_push_notification_service()
@override_settings(ZILENCER_ENABLED=False)
@responses.activate
def test_no_plan_purchased_registration_succeeds(self) -> None:
"""Registration succeeds even when no plan is purchased.
The plan check only happens when sending push notifications,
not during device registration.
"""
self.add_mock_response()
hamlet = self.example_user("hamlet")
self.login_user(hamlet)
payload = self.get_register_push_device_payload()
devices = Device.objects.all()
self.assert_length(devices, 1)
self.assert_push_fields_null(devices[0])
# Assert remote_realm is not on any plan.
remote_realm = RemoteRealm.objects.get(uuid=hamlet.realm.uuid)
self.assertEqual(remote_realm.plan_type, RemoteRealm.PLAN_TYPE_SELF_MANAGED)
time_now = now()
with (
time_machine.travel(time_now, tick=False),
self.capture_send_event_calls(expected_num_events=2) as events,
):
result = self.client_post("/json/mobile_push/register", payload)
self.assert_json_success(result)
devices = Device.objects.all()
self.assert_length(devices, 1)
device = devices[0]
assert type(payload["token_id"]) is str # for mypy
self.assertEqual(device.push_token_id, b64decode_token_id_base64(payload["token_id"]))
self.assertIsNone(device.pending_push_token_id)
self.assertIsNone(device.push_registration_error_code)
self.assertEqual(
events[0]["event"],
dict(
type="device",
op="update",
device_id=device.id,
push_key_id=device.push_key_id,
pending_push_token_id=payload["token_id"],
push_token_last_updated_timestamp=datetime_to_timestamp(time_now),
push_registration_error_code=None,
),
)
self.assertEqual(
events[1]["event"],
dict(
type="device",
op="update",
device_id=device.id,
push_token_id=payload["token_id"],
pending_push_token_id=None,
),
)
| {
"repo_id": "zulip/zulip",
"file_path": "zerver/tests/test_push_registration.py",
"license": "Apache License 2.0",
"lines": 861,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
zulip/zulip:zilencer/management/commands/manage_push_registration_encryption_keys.py | import configparser
import json
from argparse import ArgumentParser
from typing import Any
from django.conf import settings
from nacl.encoding import Base64Encoder
from nacl.public import PrivateKey
from typing_extensions import override
from zerver.lib.management import ZulipBaseCommand
class Command(ZulipBaseCommand):
help = """
Add or remove a key pair from the `push_registration_encryption_keys` map.
Usage:
./manage.py manage_push_registration_encryption_keys --add
./manage.py manage_push_registration_encryption_keys --remove-key <public-key>
"""
@override
def add_arguments(self, parser: ArgumentParser) -> None:
parser.add_argument(
"--add",
action="store_true",
help="Add a new key pair to the `push_registration_encryption_keys` map.",
)
parser.add_argument(
"--remove-key",
metavar="PUBLIC_KEY",
help="Remove the key pair associated with the given public key from the `push_registration_encryption_keys` map.",
)
@override
def handle(self, *args: Any, **options: Any) -> None:
if not options["add"] and options["remove_key"] is None:
print("Error: Please provide either --add or --remove-key <public-key>.")
return
if settings.DEVELOPMENT:
SECRETS_FILENAME = "zproject/dev-secrets.conf"
else:
SECRETS_FILENAME = "/etc/zulip/zulip-secrets.conf"
config = configparser.ConfigParser()
config.read(SECRETS_FILENAME)
push_registration_encryption_keys: dict[str, str] = json.loads(
config.get("secrets", "push_registration_encryption_keys", fallback="{}")
)
added_key_pair: tuple[str, str] | None = None
if options["add"]:
# Generate a new key-pair and store.
private_key = PrivateKey.generate()
private_key_str = Base64Encoder.encode(bytes(private_key)).decode("utf-8")
public_key_str = Base64Encoder.encode(bytes(private_key.public_key)).decode("utf-8")
push_registration_encryption_keys[public_key_str] = private_key_str
added_key_pair = (public_key_str, private_key_str)
if options["remove_key"] is not None:
# Remove the key-pair for the given public key.
remove_key = options["remove_key"]
if remove_key not in push_registration_encryption_keys:
print("Error: No key pair found for the given public key.")
return
del push_registration_encryption_keys[remove_key]
config.set(
"secrets",
"push_registration_encryption_keys",
json.dumps(push_registration_encryption_keys),
)
with open(SECRETS_FILENAME, "w") as secrets_file:
config.write(secrets_file)
if added_key_pair is not None:
public_key_str, private_key_str = added_key_pair
print("Added a new key pair:")
print(f"- Public key: {public_key_str}")
print(f"- Private key: {private_key_str}")
if options["remove_key"] is not None:
print(f"Removed the key pair for public key: {options['remove_key']}")
| {
"repo_id": "zulip/zulip",
"file_path": "zilencer/management/commands/manage_push_registration_encryption_keys.py",
"license": "Apache License 2.0",
"lines": 71,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
zulip/zulip:zerver/tests/test_url_encoding.py | import os
import orjson
from zerver.lib.test_classes import ZulipTestCase
from zerver.lib.url_encoding import (
encode_channel,
encode_hash_component,
encode_user_full_name_and_id,
encode_user_ids,
stream_message_url,
)
from zerver.models.messages import Message
from zerver.models.realms import get_realm
from zerver.models.streams import get_stream
class URLEncodeTest(ZulipTestCase):
def test_encode_hash_component(self) -> None:
fixture_path = os.path.join(
os.path.dirname(__file__), "fixtures/url_encoding_test_cases.json"
)
with open(fixture_path, "rb") as f:
data = orjson.loads(f.read())
for test in data:
with self.subTest(name=test["name"]):
self.assertEqual(encode_hash_component(test["input"]), test["expected_output"])
def test_encode_channel(self) -> None:
# We have more tests for this function in `test_topic_link_utils.py`
self.assertEqual(encode_channel(9, "Verona"), "9-Verona")
self.assertEqual(encode_channel(123, "General"), "123-General")
self.assertEqual(encode_channel(7, "random_channel"), "7-random_channel")
self.assertEqual(encode_channel(9, "Verona", with_operator=True), "channel/9-Verona")
def test_encode_user_ids(self) -> None:
# Group narrow URL has 3 or more user IDs
self.assertEqual(encode_user_ids([1, 2, 3]), "1,2,3-group")
self.assertEqual(encode_user_ids([3, 1, 2]), "1,2,3-group")
# One-on-one narrow URL has 2 user IDs
self.assertEqual(encode_user_ids([1, 2]), "1,2")
# Narrow URL to ones own direct message conversation
self.assertEqual(encode_user_ids([1]), "1")
self.assertEqual(encode_user_ids([1, 2, 3], with_operator=True), "dm/1,2,3-group")
with self.assertRaises(AssertionError):
encode_user_ids([])
def test_encode_user_full_name_and_id(self) -> None:
self.assertEqual(encode_user_full_name_and_id("King Hamlet", 9), "9-King-Hamlet")
self.assertEqual(
encode_user_full_name_and_id("King Hamlet", 9, with_operator=True), "dm/9-King-Hamlet"
)
self.assertEqual(encode_user_full_name_and_id("ZOE", 1), "1-ZOE")
self.assertEqual(encode_user_full_name_and_id(" User Name ", 100), "100-User-Name")
self.assertEqual(encode_user_full_name_and_id("User Name", 101), "101-User-Name")
self.assertEqual(encode_user_full_name_and_id("User/Name", 200), "200-User-Name")
self.assertEqual(encode_user_full_name_and_id("User%Name", 201), "201-User-Name")
self.assertEqual(encode_user_full_name_and_id("User<Name>", 202), "202-User-Name-")
self.assertEqual(encode_user_full_name_and_id('User"Name`', 203), "203-User-Name-")
self.assertEqual(encode_user_full_name_and_id('User/ % < > ` " Name', 204), "204-User-Name")
self.assertEqual(encode_user_full_name_and_id("User--Name", 205), "205-User--Name")
self.assertEqual(encode_user_full_name_and_id("User%%Name", 206), "206-User-Name")
self.assertEqual(encode_user_full_name_and_id("User_Name", 5), "5-User_Name")
def test_stream_message_url(self) -> None:
realm = get_realm("zulip")
topic = "test topic"
channel = get_stream("Verona", realm)
channel_message_id = self.send_stream_message(
sender=self.example_user("hamlet"), stream_name=channel.name, topic_name=topic
)
channel_message = Message.objects.get(id=channel_message_id, realm=realm)
message_dict = dict(
id=channel_message_id,
stream_id=channel.id,
display_recipient=channel_message.recipient.label(),
topic=topic,
)
channel_message_url = stream_message_url(
realm,
message_dict,
)
expected_channel_message_url = f"{realm.url}/#narrow/{encode_channel(channel.id, channel.name, True)}/topic/{encode_hash_component(topic)}/near/{channel_message_id}"
self.assertEqual(channel_message_url, expected_channel_message_url)
relative_channel_message_url = stream_message_url(
realm, message_dict, include_base_url=False
)
expected_relative_channel_message_url = f"#narrow/{encode_channel(channel.id, channel.name, True)}/topic/{encode_hash_component(topic)}/near/{channel_message_id}"
self.assertEqual(relative_channel_message_url, expected_relative_channel_message_url)
with self.assertRaises(ValueError) as e:
stream_message_url(realm=None, message=message_dict, include_base_url=True)
self.assertEqual(str(e.exception), "realm is required when include_base_url=True")
| {
"repo_id": "zulip/zulip",
"file_path": "zerver/tests/test_url_encoding.py",
"license": "Apache License 2.0",
"lines": 84,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
zulip/zulip:zerver/openapi/merge_api_changelogs.py | import glob
import os
import re
from pathlib import Path
def get_changelog_files_list() -> list[str]:
dir_path = Path("api_docs/unmerged.d")
if os.path.exists(dir_path):
return [os.path.basename(path) for path in glob.glob(f"{dir_path}/ZF-??????.md")]
return []
def get_unmerged_changelogs(verbose: bool = True) -> str:
changelogs = ""
dir_path = Path("api_docs/unmerged.d")
changelog_files_list = get_changelog_files_list()
if verbose:
if changelog_files_list:
print(f"Unmerged changelog files: {changelog_files_list}")
else:
print("No unmerged changelog files found.")
for file_name in changelog_files_list:
file_path = Path(f"{dir_path}/{file_name}")
with open(file_path) as f:
changelogs += f.read().strip("\n") + "\n"
return changelogs
def get_feature_level(update_feature_level: bool = True) -> int:
new_feature_level = None
version_file_path = Path("version.py")
with open(version_file_path) as file:
lines = file.readlines()
new_feature_level = None
with open(version_file_path, "w") as file:
for line in lines:
if line.startswith("API_FEATURE_LEVEL = "):
match = re.search(r"\d+", line)
if match:
new_feature_level = int(match.group()) + 1
if update_feature_level:
file.write(f"API_FEATURE_LEVEL = {new_feature_level}\n")
continue
file.write(line)
assert new_feature_level is not None
if update_feature_level:
print(f"Updated API feature level: {new_feature_level - 1} -> {new_feature_level}")
return new_feature_level
def get_current_major_version() -> str | None:
changelog_path = Path("api_docs/changelog.md")
with open(changelog_path) as file:
for line in file:
match = re.search(r"## Changes in Zulip (\d+\.\d+)", line)
if match:
return match.group(1)
return None
def merge_changelogs(changelogs: str, new_feature_level: int, update_changelog: bool = True) -> str:
changelogs_merged = False
changelog_path = Path("api_docs/changelog.md")
changelog_markdown_string = ""
with open(changelog_path) as file:
lines = file.readlines()
changelogs_merged = False
with open(changelog_path, "w") as file:
for line in lines:
file.write(line)
changelog_markdown_string += line
if changelogs_merged:
continue
if re.fullmatch(r"## Changes in Zulip \d+\.\d+\n", line):
changelogs_merged = True
updates = f"\n**Feature level {new_feature_level}**\n\n{changelogs}"
changelog_markdown_string += updates
if update_changelog:
file.write(updates)
if update_changelog:
print(f"Changelogs merged to {changelog_path}.")
return changelog_markdown_string
def update_feature_level_in_api_docs(new_feature_level: int) -> None:
changelog_files_list = get_changelog_files_list()
num_replaces = 0
current_version = get_current_major_version()
# Get all the markdown files in api_docs folder along with zulip.yaml.
api_docs_folder = Path("api_docs")
api_docs_paths = list(api_docs_folder.glob("*.md"))
api_docs_paths.append(Path("zerver/openapi/zulip.yaml"))
for api_docs_path in api_docs_paths:
with open(api_docs_path) as file:
lines = file.readlines()
num_replaces = 0
with open(api_docs_path, "w") as file:
for line in lines:
old_line = line
for file_name in changelog_files_list:
temporary_feature_level = file_name[: -len(".md")]
pattern = rf"Zulip \d+\.\d+ \(feature level {temporary_feature_level}\)"
replacement = f"Zulip {current_version} (feature level {new_feature_level})"
line = re.sub(pattern, replacement, line)
if old_line != line:
num_replaces += 1
file.write(line)
if num_replaces:
print(f"Updated {api_docs_path}; {num_replaces} replaces were made.")
def remove_unmerged_changelog_files() -> None:
changelog_files_list = get_changelog_files_list()
for file_name in changelog_files_list:
os.remove(Path(f"api_docs/unmerged.d/{file_name}"))
if changelog_files_list:
print("Removed all the unmerged changelog files.")
| {
"repo_id": "zulip/zulip",
"file_path": "zerver/openapi/merge_api_changelogs.py",
"license": "Apache License 2.0",
"lines": 104,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
zulip/zulip:zerver/tests/test_subscription_settings.py | import orjson
from zerver.lib.subscription_info import gather_subscriptions, gather_subscriptions_helper
from zerver.lib.test_classes import ZulipTestCase
from zerver.lib.test_helpers import get_subscription
from zerver.models import Recipient, Subscription
class SubscriptionPropertiesTest(ZulipTestCase):
def test_set_stream_color(self) -> None:
"""
A POST request to /api/v1/users/me/subscriptions/properties with stream_id and
color data sets the stream color, and for that stream only. Also, make sure that
any invalid hex color codes are bounced.
"""
test_user = self.example_user("hamlet")
self.login_user(test_user)
old_subs, _ = gather_subscriptions(test_user)
sub = old_subs[0]
stream_id = sub["stream_id"]
new_color = "#ffffff" # TODO: ensure that this is different from old_color
result = self.api_post(
test_user,
"/api/v1/users/me/subscriptions/properties",
{
"subscription_data": orjson.dumps(
[{"property": "color", "stream_id": stream_id, "value": "#ffffff"}]
).decode()
},
)
self.assert_json_success(result)
new_subs = gather_subscriptions(test_user)[0]
found_sub = None
for sub in new_subs:
if sub["stream_id"] == stream_id:
found_sub = sub
break
assert found_sub is not None
self.assertEqual(found_sub["color"], new_color)
new_subs.remove(found_sub)
for sub in old_subs:
if sub["stream_id"] == stream_id:
found_sub = sub
break
old_subs.remove(found_sub)
self.assertEqual(old_subs, new_subs)
invalid_color = "3ffrff"
result = self.api_post(
test_user,
"/api/v1/users/me/subscriptions/properties",
{
"subscription_data": orjson.dumps(
[{"property": "color", "stream_id": stream_id, "value": invalid_color}]
).decode()
},
)
self.assert_json_error(
result, "Invalid subscription_data[0]: Value error, color is not a valid hex color code"
)
def test_set_color_missing_stream_id(self) -> None:
"""
Updating the color property requires a `stream_id` key.
"""
test_user = self.example_user("hamlet")
self.login_user(test_user)
result = self.api_post(
test_user,
"/api/v1/users/me/subscriptions/properties",
{
"subscription_data": orjson.dumps(
[{"property": "color", "value": "#ffffff"}]
).decode()
},
)
self.assert_json_error(
result, 'subscription_data[0]["stream_id"] field is missing: Field required'
)
def test_set_color_unsubscribed_stream_id(self) -> None:
"""
Updating the color property requires a subscribed stream.
"""
test_user = self.example_user("hamlet")
self.login_user(test_user)
sub_info = gather_subscriptions_helper(test_user)
not_subbed = sub_info.never_subscribed
result = self.api_post(
test_user,
"/api/v1/users/me/subscriptions/properties",
{
"subscription_data": orjson.dumps(
[
{
"property": "color",
"stream_id": not_subbed[0]["stream_id"],
"value": "#ffffff",
}
]
).decode()
},
)
self.assert_json_error(
result, "Not subscribed to channel ID {}".format(not_subbed[0]["stream_id"])
)
def test_set_color_missing_color(self) -> None:
"""
Updating the color property requires a color.
"""
test_user = self.example_user("hamlet")
self.login_user(test_user)
subs = gather_subscriptions(test_user)[0]
result = self.api_post(
test_user,
"/api/v1/users/me/subscriptions/properties",
{
"subscription_data": orjson.dumps(
[{"property": "color", "stream_id": subs[0]["stream_id"]}]
).decode()
},
)
self.assert_json_error(
result, 'subscription_data[0]["value"] field is missing: Field required'
)
def test_set_stream_wildcard_mentions_notify(self) -> None:
"""
A POST request to /api/v1/users/me/subscriptions/properties with wildcard_mentions_notify
sets the property.
"""
test_user = self.example_user("hamlet")
self.login_user(test_user)
subs = gather_subscriptions(test_user)[0]
sub = subs[0]
result = self.api_post(
test_user,
"/api/v1/users/me/subscriptions/properties",
{
"subscription_data": orjson.dumps(
[
{
"property": "wildcard_mentions_notify",
"stream_id": sub["stream_id"],
"value": True,
}
]
).decode()
},
)
self.assert_json_success(result)
updated_sub = get_subscription(sub["name"], test_user)
self.assertIsNotNone(updated_sub)
self.assertEqual(updated_sub.wildcard_mentions_notify, True)
def test_set_pin_to_top(self) -> None:
"""
A POST request to /api/v1/users/me/subscriptions/properties with stream_id and
pin_to_top data pins the stream.
"""
user = self.example_user("hamlet")
self.login_user(user)
old_subs, _ = gather_subscriptions(user)
sub = old_subs[0]
stream_id = sub["stream_id"]
new_pin_to_top = not sub["pin_to_top"]
result = self.api_post(
user,
"/api/v1/users/me/subscriptions/properties",
{
"subscription_data": orjson.dumps(
[{"property": "pin_to_top", "stream_id": stream_id, "value": new_pin_to_top}]
).decode()
},
)
self.assert_json_success(result)
updated_sub = get_subscription(sub["name"], user)
self.assertIsNotNone(updated_sub)
self.assertEqual(updated_sub.pin_to_top, new_pin_to_top)
def test_change_is_muted(self) -> None:
test_user = self.example_user("hamlet")
self.login_user(test_user)
subs = gather_subscriptions(test_user)[0]
sub = Subscription.objects.get(
recipient__type=Recipient.STREAM,
recipient__type_id=subs[0]["stream_id"],
user_profile=test_user,
)
self.assertEqual(sub.is_muted, False)
property_name = "is_muted"
with self.capture_send_event_calls(expected_num_events=2) as events:
result = self.api_post(
test_user,
"/api/v1/users/me/subscriptions/properties",
{
"subscription_data": orjson.dumps(
[
{
"property": property_name,
"value": True,
"stream_id": subs[0]["stream_id"],
}
]
).decode()
},
)
self.assert_json_success(result)
self.assertEqual(events[0]["event"]["property"], "in_home_view")
self.assertEqual(events[0]["event"]["value"], False)
self.assertEqual(events[1]["event"]["property"], "is_muted")
self.assertEqual(events[1]["event"]["value"], True)
sub = Subscription.objects.get(
recipient__type=Recipient.STREAM,
recipient__type_id=subs[0]["stream_id"],
user_profile=test_user,
)
self.assertEqual(sub.is_muted, True)
legacy_property_name = "in_home_view"
with self.capture_send_event_calls(expected_num_events=2) as events:
result = self.api_post(
test_user,
"/api/v1/users/me/subscriptions/properties",
{
"subscription_data": orjson.dumps(
[
{
"property": legacy_property_name,
"value": True,
"stream_id": subs[0]["stream_id"],
}
]
).decode()
},
)
self.assert_json_success(result)
self.assertEqual(events[0]["event"]["property"], "in_home_view")
self.assertEqual(events[0]["event"]["value"], True)
self.assertEqual(events[1]["event"]["property"], "is_muted")
self.assertEqual(events[1]["event"]["value"], False)
self.assert_json_success(result)
sub = Subscription.objects.get(
recipient__type=Recipient.STREAM,
recipient__type_id=subs[0]["stream_id"],
user_profile=test_user,
)
self.assertEqual(sub.is_muted, False)
with self.capture_send_event_calls(expected_num_events=2) as events:
result = self.api_post(
test_user,
"/api/v1/users/me/subscriptions/properties",
{
"subscription_data": orjson.dumps(
[
{
"property": legacy_property_name,
"value": False,
"stream_id": subs[0]["stream_id"],
}
]
).decode()
},
)
self.assert_json_success(result)
self.assertEqual(events[0]["event"]["property"], "in_home_view")
self.assertEqual(events[0]["event"]["value"], False)
self.assertEqual(events[1]["event"]["property"], "is_muted")
self.assertEqual(events[1]["event"]["value"], True)
sub = Subscription.objects.get(
recipient__type=Recipient.STREAM,
recipient__type_id=subs[0]["stream_id"],
user_profile=test_user,
)
self.assertEqual(sub.is_muted, True)
def test_set_subscription_property_incorrect(self) -> None:
"""
Trying to set a property incorrectly returns a JSON error.
"""
test_user = self.example_user("hamlet")
self.login_user(test_user)
subs = gather_subscriptions(test_user)[0]
property_name = "is_muted"
result = self.api_post(
test_user,
"/api/v1/users/me/subscriptions/properties",
{
"subscription_data": orjson.dumps(
[{"property": property_name, "value": "bad", "stream_id": subs[0]["stream_id"]}]
).decode()
},
)
self.assert_json_error(result, f"{property_name} is not a boolean")
property_name = "in_home_view"
result = self.api_post(
test_user,
"/api/v1/users/me/subscriptions/properties",
{
"subscription_data": orjson.dumps(
[{"property": property_name, "value": "bad", "stream_id": subs[0]["stream_id"]}]
).decode()
},
)
self.assert_json_error(result, f"{property_name} is not a boolean")
property_name = "desktop_notifications"
result = self.api_post(
test_user,
"/api/v1/users/me/subscriptions/properties",
{
"subscription_data": orjson.dumps(
[{"property": property_name, "value": "bad", "stream_id": subs[0]["stream_id"]}]
).decode()
},
)
self.assert_json_error(result, f"{property_name} is not a boolean")
property_name = "audible_notifications"
result = self.api_post(
test_user,
"/api/v1/users/me/subscriptions/properties",
{
"subscription_data": orjson.dumps(
[{"property": property_name, "value": "bad", "stream_id": subs[0]["stream_id"]}]
).decode()
},
)
self.assert_json_error(result, f"{property_name} is not a boolean")
property_name = "push_notifications"
result = self.api_post(
test_user,
"/api/v1/users/me/subscriptions/properties",
{
"subscription_data": orjson.dumps(
[{"property": property_name, "value": "bad", "stream_id": subs[0]["stream_id"]}]
).decode()
},
)
self.assert_json_error(result, f"{property_name} is not a boolean")
property_name = "email_notifications"
result = self.api_post(
test_user,
"/api/v1/users/me/subscriptions/properties",
{
"subscription_data": orjson.dumps(
[{"property": property_name, "value": "bad", "stream_id": subs[0]["stream_id"]}]
).decode()
},
)
self.assert_json_error(result, f"{property_name} is not a boolean")
property_name = "wildcard_mentions_notify"
result = self.api_post(
test_user,
"/api/v1/users/me/subscriptions/properties",
{
"subscription_data": orjson.dumps(
[{"property": property_name, "value": "bad", "stream_id": subs[0]["stream_id"]}]
).decode()
},
)
self.assert_json_error(result, f"{property_name} is not a boolean")
property_name = "color"
result = self.api_post(
test_user,
"/api/v1/users/me/subscriptions/properties",
{
"subscription_data": orjson.dumps(
[{"property": property_name, "value": False, "stream_id": subs[0]["stream_id"]}]
).decode()
},
)
self.assert_json_error(
result, "Invalid subscription_data[0]: Value error, color is not a valid hex color code"
)
def test_json_subscription_property_invalid_stream(self) -> None:
test_user = self.example_user("hamlet")
self.login_user(test_user)
stream_id = 1000
result = self.api_post(
test_user,
"/api/v1/users/me/subscriptions/properties",
{
"subscription_data": orjson.dumps(
[{"property": "is_muted", "stream_id": stream_id, "value": False}]
).decode()
},
)
self.assert_json_error(result, "Invalid channel ID")
def test_set_invalid_property(self) -> None:
"""
Trying to set an invalid property returns a JSON error.
"""
test_user = self.example_user("hamlet")
self.login_user(test_user)
subs = gather_subscriptions(test_user)[0]
result = self.api_post(
test_user,
"/api/v1/users/me/subscriptions/properties",
{
"subscription_data": orjson.dumps(
[{"property": "bad", "value": "bad", "stream_id": subs[0]["stream_id"]}]
).decode()
},
)
self.assert_json_error(result, "Unknown subscription property: bad")
def test_ignored_parameters_in_subscriptions_properties_endpoint(self) -> None:
"""
Sending an invalid parameter with a valid parameter returns
an `ignored_parameters_unsupported` array.
"""
test_user = self.example_user("hamlet")
self.login_user(test_user)
subs = gather_subscriptions(test_user)[0]
sub = subs[0]
result = self.api_post(
test_user,
"/api/v1/users/me/subscriptions/properties",
{
"subscription_data": orjson.dumps(
[
{
"property": "wildcard_mentions_notify",
"stream_id": sub["stream_id"],
"value": True,
}
]
).decode(),
"invalid_parameter": orjson.dumps(
[{"property": "pin_to_top", "stream_id": sub["stream_id"], "value": False}]
).decode(),
},
)
self.assert_json_success(result, ignored_parameters=["invalid_parameter"])
| {
"repo_id": "zulip/zulip",
"file_path": "zerver/tests/test_subscription_settings.py",
"license": "Apache License 2.0",
"lines": 421,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
zulip/zulip:zerver/tests/test_channel_fetch.py | from datetime import timedelta
from typing import TYPE_CHECKING, Any
import orjson
from django.conf import settings
from django.test import override_settings
from django.utils.timezone import now as timezone_now
from typing_extensions import override
from analytics.models import StreamCount
from zerver.actions.streams import (
do_change_stream_group_based_setting,
do_change_stream_permission,
do_deactivate_stream,
)
from zerver.lib.email_mirror_helpers import encode_email_address, get_channel_email_token
from zerver.lib.subscription_info import gather_subscriptions, gather_subscriptions_helper
from zerver.lib.test_classes import ZulipTestCase
from zerver.lib.test_helpers import most_recent_message
from zerver.lib.types import (
APIStreamDict,
APISubscriptionDict,
NeverSubscribedStreamDict,
SubscriptionInfo,
UserGroupMembersData,
UserGroupMembersDict,
)
from zerver.models import NamedUserGroup, Realm, Stream, Subscription, UserProfile
from zerver.models.groups import SystemGroups
from zerver.models.realms import get_realm
from zerver.models.streams import get_stream
from zerver.models.users import get_system_bot
if TYPE_CHECKING:
from django.test.client import _MonkeyPatchedWSGIResponse as TestHttpResponse
def fix_expected_fields_for_stream_group_settings(expected_fields: set[str]) -> set[str]:
for setting_name in Stream.stream_permission_group_settings:
expected_fields -= {setting_name + "_id"}
expected_fields |= {setting_name}
return expected_fields
class GetStreamsTest(ZulipTestCase):
def test_streams_api_for_bot_owners(self) -> None:
hamlet = self.example_user("hamlet")
test_bot = self.create_test_bot("foo", hamlet)
assert test_bot is not None
realm = get_realm("zulip")
self.login_user(hamlet)
# Check it correctly lists the bot owner's subs with
# include_owner_subscribed=true
filters = dict(
include_owner_subscribed="true",
include_public="false",
include_subscribed="false",
)
with self.assert_database_query_count(7):
result = self.api_get(test_bot, "/api/v1/streams", filters)
owner_subs = self.api_get(hamlet, "/api/v1/users/me/subscriptions")
json = self.assert_json_success(result)
self.assertIn("streams", json)
self.assertIsInstance(json["streams"], list)
self.assert_json_success(owner_subs)
owner_subs_json = orjson.loads(owner_subs.content)
self.assertEqual(
sorted(s["name"] for s in json["streams"]),
sorted(s["name"] for s in owner_subs_json["subscriptions"]),
)
# Check it correctly lists the bot owner's subs and the
# bot's subs
self.subscribe(test_bot, "Scotland")
filters = dict(
include_owner_subscribed="true",
include_public="false",
include_subscribed="true",
)
with self.assert_database_query_count(8):
result = self.api_get(test_bot, "/api/v1/streams", filters)
json = self.assert_json_success(result)
self.assertIn("streams", json)
self.assertIsInstance(json["streams"], list)
actual = sorted(s["name"] for s in json["streams"])
expected = [s["name"] for s in owner_subs_json["subscriptions"]]
expected.append("Scotland")
expected.sort()
self.assertEqual(actual, expected)
# Check it correctly lists the bot owner's subs + all public streams
self.make_stream("private_stream", realm=realm, invite_only=True)
self.subscribe(test_bot, "private_stream")
with self.assert_database_query_count(7):
result = self.api_get(
test_bot,
"/api/v1/streams",
{
"include_owner_subscribed": "true",
"include_public": "true",
"include_subscribed": "false",
},
)
json = self.assert_json_success(result)
self.assertIn("streams", json)
self.assertIsInstance(json["streams"], list)
actual = sorted(s["name"] for s in json["streams"])
expected = [s["name"] for s in owner_subs_json["subscriptions"]]
expected.extend(["Rome", "Venice", "Scotland"])
expected.sort()
self.assertEqual(actual, expected)
# Check it correctly lists the bot owner's subs + all public streams +
# the bot's subs
with self.assert_database_query_count(8):
result = self.api_get(
test_bot,
"/api/v1/streams",
{
"include_owner_subscribed": "true",
"include_public": "true",
"include_subscribed": "true",
},
)
json = self.assert_json_success(result)
self.assertIn("streams", json)
self.assertIsInstance(json["streams"], list)
actual = sorted(s["name"] for s in json["streams"])
expected = [s["name"] for s in owner_subs_json["subscriptions"]]
expected.extend(["Rome", "Venice", "Scotland", "private_stream"])
expected.sort()
self.assertEqual(actual, expected)
private_stream_2 = self.make_stream("private_stream_2", realm=realm, invite_only=True)
private_stream_3 = self.make_stream("private_stream_3", realm=realm, invite_only=True)
self.make_stream("private_stream_4", realm=realm, invite_only=True)
test_bot_group_member_dict = UserGroupMembersData(
direct_members=[test_bot.id], direct_subgroups=[]
)
do_change_stream_group_based_setting(
private_stream_2,
"can_add_subscribers_group",
test_bot_group_member_dict,
acting_user=hamlet,
)
do_change_stream_group_based_setting(
private_stream_3,
"can_administer_channel_group",
test_bot_group_member_dict,
acting_user=hamlet,
)
# Check it correctly lists the bot owner's subs + the channels
# bot has content access to.
with self.assert_database_query_count(10):
result = self.api_get(
test_bot,
"/api/v1/streams",
{
"include_owner_subscribed": "true",
"include_can_access_content": "true",
},
)
json = self.assert_json_success(result)
self.assertIn("streams", json)
self.assertIsInstance(json["streams"], list)
actual = sorted(s["name"] for s in json["streams"])
expected = [s["name"] for s in owner_subs_json["subscriptions"]]
expected.extend(["Rome", "Venice", "Scotland", "private_stream", "private_stream_2"])
expected.sort()
self.assertEqual(actual, expected)
def test_all_streams_api(self) -> None:
url = "/api/v1/streams"
data = {"include_all": "true"}
backward_compatible_data = {"include_all_active": "true"}
# Normal user should be able to make this request and get all
# the streams they have metadata access to.
normal_user = self.example_user("cordelia")
realm = normal_user.realm
normal_user_group_members_dict = UserGroupMembersData(
direct_members=[normal_user.id], direct_subgroups=[]
)
private_stream_1 = self.make_stream("private_stream_1", realm=realm, invite_only=True)
private_stream_2 = self.make_stream("private_stream_2", realm=realm, invite_only=True)
private_stream_3 = self.make_stream("private_stream_3", realm=realm, invite_only=True)
self.make_stream("private_stream_4", realm=realm, invite_only=True)
deactivated_public_stream = self.make_stream(
"deactivated_public_stream", realm=realm, invite_only=False
)
do_deactivate_stream(deactivated_public_stream, acting_user=normal_user)
self.subscribe(normal_user, private_stream_1.name)
do_change_stream_group_based_setting(
private_stream_2,
"can_add_subscribers_group",
normal_user_group_members_dict,
acting_user=normal_user,
)
do_change_stream_group_based_setting(
private_stream_3,
"can_administer_channel_group",
normal_user_group_members_dict,
acting_user=normal_user,
)
result_stream_names: list[str] = [
stream.name
for stream in Stream.objects.filter(realm=realm, invite_only=False, deactivated=False)
]
result_stream_names.extend(
[private_stream_1.name, private_stream_2.name, private_stream_3.name]
)
with self.assert_database_query_count(8):
result = self.api_get(normal_user, url, data)
json = self.assert_json_success(result)
self.assertEqual(sorted(s["name"] for s in json["streams"]), sorted(result_stream_names))
# Normal user should be able to make this request and get all
# the streams they have metadata access to.
guest_user = self.example_user("polonius")
guest_user_group_member_dict = UserGroupMembersData(
direct_members=[guest_user.id], direct_subgroups=[]
)
self.subscribe(guest_user, private_stream_1.name)
self.subscribe(guest_user, "design")
do_change_stream_group_based_setting(
private_stream_2,
"can_add_subscribers_group",
guest_user_group_member_dict,
acting_user=normal_user,
)
do_change_stream_group_based_setting(
get_stream("Rome", realm),
"can_add_subscribers_group",
guest_user_group_member_dict,
acting_user=normal_user,
)
do_change_stream_group_based_setting(
private_stream_3,
"can_administer_channel_group",
guest_user_group_member_dict,
acting_user=normal_user,
)
do_change_stream_group_based_setting(
get_stream("Denmark", realm),
"can_administer_channel_group",
guest_user_group_member_dict,
acting_user=normal_user,
)
# Guest user should not gain metadata access to a channel via
# `can_add_subscribers_group` or `can_administer_channel_group`
# since `allow_everyone_group` if false for both of those groups.
result_stream_names = ["Verona", "private_stream_1", "design", "Rome"]
with self.assert_database_query_count(7):
result = self.api_get(guest_user, url, data)
json = self.assert_json_success(result)
self.assertEqual(sorted(s["name"] for s in json["streams"]), sorted(result_stream_names))
# Realm admin users can see all active streams if
# `exclude_archived` is not set.
admin_user = self.example_user("iago")
self.assertTrue(admin_user.is_realm_admin)
with self.assert_database_query_count(7):
result = self.api_get(admin_user, url, data)
json = self.assert_json_success(result)
backward_compatible_result = self.api_get(admin_user, url, backward_compatible_data)
json_for_backward_compatible_request = self.assert_json_success(backward_compatible_result)
self.assertEqual(json, json_for_backward_compatible_request)
self.assertIn("streams", json)
self.assertIsInstance(json["streams"], list)
stream_names = {s["name"] for s in json["streams"]}
result_stream_names = [
stream.name for stream in Stream.objects.filter(realm=realm, deactivated=False)
]
self.assertEqual(
sorted(stream_names),
sorted(result_stream_names),
)
# Realm admin users can see all streams if `exclude_archived`
# is set to false.
data = {"include_all": "true", "exclude_archived": "false"}
with self.assert_database_query_count(7):
result = self.api_get(admin_user, url, data)
json = self.assert_json_success(result)
stream_names = {s["name"] for s in json["streams"]}
result_stream_names = [stream.name for stream in Stream.objects.filter(realm=realm)]
self.assertEqual(
sorted(stream_names),
sorted(result_stream_names),
)
# This case will not happen in practice, we are adding this
# test block to add coverage for the case where
# `get_metadata_access_streams` returns an empty list without
# query if an empty list of streams is passed to it.
all_active_streams = Stream.objects.filter(realm=realm, deactivated=False)
for stream in all_active_streams:
do_deactivate_stream(stream, acting_user=None)
data = {"include_all": "true"}
with self.assert_database_query_count(3):
result = self.api_get(admin_user, url, data)
json = self.assert_json_success(result)
stream_names = {s["name"] for s in json["streams"]}
self.assertEqual(stream_names, set())
def test_public_streams_api(self) -> None:
"""
Ensure that the query we use to get public streams successfully returns
a list of streams
"""
user = self.example_user("hamlet")
realm = get_realm("zulip")
self.login_user(user)
# Check it correctly lists the user's subs with include_public=false
result = self.api_get(user, "/api/v1/streams", {"include_public": "false"})
result2 = self.api_get(user, "/api/v1/users/me/subscriptions")
json = self.assert_json_success(result)
self.assertIn("streams", json)
self.assertIsInstance(json["streams"], list)
self.assert_json_success(result2)
json2 = orjson.loads(result2.content)
self.assertEqual(
sorted(s["name"] for s in json["streams"]),
sorted(s["name"] for s in json2["subscriptions"]),
)
# Check it correctly lists all public streams with include_subscribed=false
filters = dict(include_public="true", include_subscribed="false")
result = self.api_get(user, "/api/v1/streams", filters)
json = self.assert_json_success(result)
all_streams = [
stream.name for stream in Stream.objects.filter(realm=realm, invite_only=False)
]
self.assertEqual(sorted(s["name"] for s in json["streams"]), sorted(all_streams))
def test_include_can_access_content_streams_api(self) -> None:
"""
Ensure that the query we use to get public streams successfully returns
a list of streams
"""
# Cordelia is not subscribed to private stream `core team`.
user = self.example_user("cordelia")
realm = get_realm("zulip")
self.login_user(user)
user_group_members_dict = UserGroupMembersData(
direct_members=[user.id], direct_subgroups=[]
)
private_stream_1 = self.make_stream("private_stream_1", realm=realm, invite_only=True)
private_stream_2 = self.make_stream("private_stream_2", realm=realm, invite_only=True)
private_stream_3 = self.make_stream("private_stream_3", realm=realm, invite_only=True)
self.make_stream("private_stream_4", realm=realm, invite_only=True)
self.subscribe(user, private_stream_1.name)
do_change_stream_group_based_setting(
private_stream_2, "can_add_subscribers_group", user_group_members_dict, acting_user=user
)
do_change_stream_group_based_setting(
private_stream_3,
"can_administer_channel_group",
user_group_members_dict,
acting_user=user,
)
# Check it correctly lists all content access streams with
# include_can_access_content=false
filters = dict(include_can_access_content="true")
with self.assert_database_query_count(8):
result = self.api_get(user, "/api/v1/streams", filters)
json = self.assert_json_success(result)
result_streams = [
stream.name for stream in Stream.objects.filter(realm=realm, invite_only=False)
]
result_streams.extend([private_stream_1.name, private_stream_2.name])
self.assertEqual(sorted(s["name"] for s in json["streams"]), sorted(result_streams))
def test_get_single_stream_api(self) -> None:
self.login("hamlet")
realm = get_realm("zulip")
denmark_stream = get_stream("Denmark", realm)
result = self.client_get(f"/json/streams/{denmark_stream.id}")
json = self.assert_json_success(result)
self.assertEqual(json["stream"]["name"], "Denmark")
self.assertEqual(json["stream"]["stream_id"], denmark_stream.id)
result = self.client_get("/json/streams/9999")
self.assert_json_error(result, "Invalid channel ID")
private_stream = self.make_stream("private_stream", invite_only=True)
self.subscribe(self.example_user("cordelia"), "private_stream")
# Non-admins cannot access unsubscribed private streams.
result = self.client_get(f"/json/streams/{private_stream.id}")
self.assert_json_error(result, "Invalid channel ID")
self.login("iago")
result = self.client_get(f"/json/streams/{private_stream.id}")
json = self.assert_json_success(result)
self.assertEqual(json["stream"]["name"], "private_stream")
self.assertEqual(json["stream"]["stream_id"], private_stream.id)
self.login("cordelia")
result = self.client_get(f"/json/streams/{private_stream.id}")
json = self.assert_json_success(result)
self.assertEqual(json["stream"]["name"], "private_stream")
self.assertEqual(json["stream"]["stream_id"], private_stream.id)
def test_get_stream_email_address(self) -> None:
self.login("hamlet")
hamlet = self.example_user("hamlet")
iago = self.example_user("iago")
polonius = self.example_user("polonius")
realm = get_realm("zulip")
email_gateway_bot = get_system_bot(settings.EMAIL_GATEWAY_BOT, realm.id)
denmark_stream = get_stream("Denmark", realm)
result = self.client_get(f"/json/streams/{denmark_stream.id}/email_address")
json = self.assert_json_success(result)
email_token = get_channel_email_token(
denmark_stream, creator=hamlet, sender=email_gateway_bot
)
hamlet_denmark_email = encode_email_address(
denmark_stream.name, email_token, show_sender=True
)
self.assertEqual(json["email"], hamlet_denmark_email)
# Users without permission to post cannot access the channel email.
owners_group = NamedUserGroup.objects.get(
name=SystemGroups.OWNERS, realm_for_sharding=realm
)
do_change_stream_group_based_setting(
denmark_stream, "can_send_message_group", owners_group, acting_user=iago
)
result = self.client_get(f"/json/streams/{denmark_stream.id}/email_address")
self.assert_json_error(result, "You do not have permission to post in this channel.")
everyone_group = NamedUserGroup.objects.get(
name=SystemGroups.EVERYONE, realm_for_sharding=realm
)
do_change_stream_group_based_setting(
denmark_stream, "can_send_message_group", everyone_group, acting_user=iago
)
self.login("polonius")
result = self.client_get(f"/json/streams/{denmark_stream.id}/email_address")
self.assert_json_error(result, "Invalid channel ID")
self.subscribe(polonius, "Denmark")
result = self.client_get(f"/json/streams/{denmark_stream.id}/email_address")
json = self.assert_json_success(result)
email_token = get_channel_email_token(
denmark_stream, creator=polonius, sender=email_gateway_bot
)
polonius_denmark_email = encode_email_address(
denmark_stream.name, email_token, show_sender=True
)
self.assertEqual(json["email"], polonius_denmark_email)
do_change_stream_permission(
denmark_stream,
invite_only=True,
history_public_to_subscribers=True,
is_web_public=False,
acting_user=iago,
)
self.login("hamlet")
result = self.client_get(f"/json/streams/{denmark_stream.id}/email_address")
json = self.assert_json_success(result)
self.assertEqual(json["email"], hamlet_denmark_email)
self.unsubscribe(hamlet, "Denmark")
result = self.client_get(f"/json/streams/{denmark_stream.id}/email_address")
self.assert_json_error(result, "Invalid channel ID")
self.login("iago")
result = self.client_get(f"/json/streams/{denmark_stream.id}/email_address")
json = self.assert_json_success(result)
email_token = get_channel_email_token(
denmark_stream, creator=iago, sender=email_gateway_bot
)
iago_denmark_email = encode_email_address(
denmark_stream.name, email_token, show_sender=True
)
self.assertEqual(json["email"], iago_denmark_email)
self.unsubscribe(iago, "Denmark")
result = self.client_get(f"/json/streams/{denmark_stream.id}/email_address")
self.assert_json_error(result, "Invalid channel ID")
def test_guest_user_access_to_streams(self) -> None:
user_profile = self.example_user("polonius")
self.login_user(user_profile)
self.assertEqual(user_profile.role, UserProfile.ROLE_GUEST)
# Get all the streams that Polonius has access to (subscribed + web-public streams)
result = self.client_get("/json/streams", {"include_web_public": "true"})
streams = self.assert_json_success(result)["streams"]
sub_info = gather_subscriptions_helper(user_profile)
subscribed = sub_info.subscriptions
unsubscribed = sub_info.unsubscribed
never_subscribed = sub_info.never_subscribed
self.assert_length(streams, len(subscribed) + len(unsubscribed) + len(never_subscribed))
stream_names = [stream["name"] for stream in streams]
expected_stream_names = [stream["name"] for stream in subscribed + unsubscribed]
expected_stream_names += [stream["name"] for stream in never_subscribed]
self.assertEqual(set(stream_names), set(expected_stream_names))
class StreamIdTest(ZulipTestCase):
def test_get_stream_id(self) -> None:
user = self.example_user("hamlet")
self.login_user(user)
stream = gather_subscriptions(user)[0][0]
result = self.client_get("/json/get_stream_id", {"stream": stream["name"]})
response_dict = self.assert_json_success(result)
self.assertEqual(response_dict["stream_id"], stream["stream_id"])
def test_get_stream_id_wrong_name(self) -> None:
user = self.example_user("hamlet")
self.login_user(user)
result = self.client_get("/json/get_stream_id", {"stream": "wrongname"})
self.assert_json_error(result, "Invalid channel name 'wrongname'")
class GetSubscribersTest(ZulipTestCase):
@override
def setUp(self) -> None:
super().setUp()
self.user_profile = self.example_user("hamlet")
self.login_user(self.user_profile)
def test_api_fields(self) -> None:
"""Verify that all the fields from `Stream.API_FIELDS` and `Subscription.API_FIELDS` present
in `APIStreamDict` and `APISubscriptionDict`, respectively.
"""
expected_fields = set(Stream.API_FIELDS) | {"stream_id", "is_archived"}
expected_fields -= {"id", "deactivated"}
expected_fields = fix_expected_fields_for_stream_group_settings(expected_fields)
stream_dict_fields = set(APIStreamDict.__annotations__.keys())
computed_fields = {
"is_announcement_only",
"is_default",
"stream_post_policy",
"stream_weekly_traffic",
}
self.assertEqual(stream_dict_fields - computed_fields, expected_fields)
expected_fields = set(Subscription.API_FIELDS)
subscription_dict_fields = set(APISubscriptionDict.__annotations__.keys())
computed_fields = {"in_home_view", "email_address", "stream_weekly_traffic", "subscribers"}
# `APISubscriptionDict` is a subclass of `APIStreamDict`, therefore having all the
# fields in addition to the computed fields and `Subscription.API_FIELDS` that
# need to be excluded here.
self.assertEqual(
subscription_dict_fields - computed_fields - stream_dict_fields,
expected_fields,
)
def verify_sub_fields(self, sub_data: SubscriptionInfo) -> None:
other_fields = {
"is_archived",
"is_announcement_only",
"in_home_view",
"stream_id",
"stream_post_policy",
"stream_weekly_traffic",
"subscribers",
}
expected_fields = set(Stream.API_FIELDS) | set(Subscription.API_FIELDS) | other_fields
expected_fields -= {"id", "deactivated"}
expected_fields = fix_expected_fields_for_stream_group_settings(expected_fields)
for lst in [sub_data.subscriptions, sub_data.unsubscribed]:
for sub in lst:
self.assertEqual(set(sub), expected_fields)
other_fields = {
"is_archived",
"is_announcement_only",
"stream_id",
"stream_post_policy",
"stream_weekly_traffic",
"subscribers",
}
expected_fields = set(Stream.API_FIELDS) | other_fields
expected_fields -= {"id", "deactivated"}
expected_fields = fix_expected_fields_for_stream_group_settings(expected_fields)
for never_sub in sub_data.never_subscribed:
self.assertEqual(set(never_sub), expected_fields)
def assert_user_got_subscription_notification(
self, user: UserProfile, expected_msg: str
) -> None:
# verify that the user was sent a message informing them about the subscription
realm = user.realm
msg = most_recent_message(user)
self.assertEqual(msg.recipient.type, msg.recipient.PERSONAL)
self.assertEqual(msg.sender_id, self.notification_bot(realm).id)
def non_ws(s: str) -> str:
return s.replace("\n", "").replace(" ", "")
assert msg.rendered_content is not None
self.assertEqual(non_ws(msg.rendered_content), non_ws(expected_msg))
def check_well_formed_result(
self, result: dict[str, Any], stream_name: str, realm: Realm
) -> None:
"""
A successful call to get_subscribers returns the list of subscribers in
the form:
{"msg": "",
"result": "success",
"subscribers": [hamlet_user.id, prospero_user.id]}
"""
self.assertIn("subscribers", result)
self.assertIsInstance(result["subscribers"], list)
true_subscribers = [
user_profile.id for user_profile in self.users_subscribed_to_stream(stream_name, realm)
]
self.assertEqual(sorted(result["subscribers"]), sorted(true_subscribers))
def make_subscriber_request(
self, stream_id: int, user: UserProfile | None = None
) -> "TestHttpResponse":
if user is None:
user = self.user_profile
return self.api_get(user, f"/api/v1/streams/{stream_id}/members")
def make_successful_subscriber_request(self, stream_name: str) -> None:
stream_id = get_stream(stream_name, self.user_profile.realm).id
result = self.make_subscriber_request(stream_id)
response_dict = self.assert_json_success(result)
self.check_well_formed_result(response_dict, stream_name, self.user_profile.realm)
def test_subscriber(self) -> None:
"""
get_subscribers returns the list of subscribers.
"""
stream_name = gather_subscriptions(self.user_profile)[0][0]["name"]
self.make_successful_subscriber_request(stream_name)
@override_settings(MIN_PARTIAL_SUBSCRIBERS_CHANNEL_SIZE=5)
def test_gather_partial_subscriptions(self) -> None:
othello = self.example_user("othello")
user_names = ["iago", "cordelia", "polonius", "shiva", "prospero"]
idle_users = [self.example_user(name) for name in user_names]
for user in idle_users:
user.long_term_idle = True
user.save()
bot = self.create_test_bot("bot", othello, "Foo Bot")
stream_names = [
"never_subscribed_only_bots",
"never_subscribed_many_more_than_bots",
"unsubscribed_only_bots",
"subscribed_more_than_bots_including_idle",
"subscribed_many_more_than_bots",
]
for stream_name in stream_names:
self.make_stream(stream_name)
self.subscribe_via_post(
self.user_profile,
["never_subscribed_only_bots"],
dict(principals=orjson.dumps([bot.id]).decode()),
)
self.subscribe_via_post(
self.user_profile,
["never_subscribed_many_more_than_bots"],
dict(
principals=orjson.dumps(
[bot.id, othello.id] + [user.id for user in idle_users]
).decode()
),
)
self.subscribe_via_post(
self.user_profile,
["unsubscribed_only_bots"],
dict(principals=orjson.dumps([bot.id, self.user_profile.id]).decode()),
)
self.unsubscribe(
self.user_profile,
"unsubscribed_only_bots",
)
self.subscribe_via_post(
self.user_profile,
["subscribed_more_than_bots_including_idle"],
dict(
principals=orjson.dumps(
[bot.id, othello.id, self.user_profile.id, idle_users[0].id]
).decode()
),
)
self.subscribe_via_post(
self.user_profile,
["subscribed_many_more_than_bots"],
dict(
principals=orjson.dumps(
[bot.id, othello.id, self.user_profile.id] + [user.id for user in idle_users]
).decode()
),
)
with self.assert_database_query_count(9):
sub_data = gather_subscriptions_helper(self.user_profile, include_subscribers="partial")
never_subscribed_streams = sub_data.never_subscribed
unsubscribed_streams = sub_data.unsubscribed
subscribed_streams = sub_data.subscriptions
self.assertGreaterEqual(len(never_subscribed_streams), 2)
self.assertGreaterEqual(len(unsubscribed_streams), 1)
self.assertGreaterEqual(len(subscribed_streams), 1)
# Streams with only bots have sent all of their subscribers,
# since we always send bots. We tell the client it doesn't
# need to fetch more, by filling "subscribers" instead
# of "partial_subscribers". If there are non-bot subscribers,
# a partial fetch will return only partial subscribers.
for sub in never_subscribed_streams:
if sub["name"] == "never_subscribed_only_bots":
self.assert_length(sub["subscribers"], 1)
self.assertIsNone(sub.get("partial_subscribers"))
continue
if sub["name"] == "never_subscribed_many_more_than_bots":
# the bot and Othello (who is not long_term_idle)
self.assert_length(sub["partial_subscribers"], 2)
self.assertIsNone(sub.get("subscribers"))
for sub in unsubscribed_streams:
if sub["name"] == "unsubscribed_only_bots":
self.assert_length(sub["subscribers"], 1)
self.assertIsNone(sub.get("partial_subscribers"))
break
for sub in subscribed_streams:
# fewer than MIN_PARTIAL_SUBSCRIBERS_CHANNEL_SIZE subscribers,
# so we get all of them
if sub["name"] == "subscribed_more_than_bots_including_idle":
self.assertNotIn("partial_subscribers", sub)
self.assert_length(sub["subscribers"], 4)
if sub["name"] == "subscribed_many_more_than_bots":
# the bot, Othello (who is not long_term_idle), and current user
self.assert_length(sub["partial_subscribers"], 3)
self.assertNotIn("subscribers", sub)
@override_settings(MIN_PARTIAL_SUBSCRIBERS_CHANNEL_SIZE=5)
def test_gather_partial_subscriptions_api(self) -> None:
othello = self.example_user("othello")
user_names = ["iago", "cordelia", "polonius", "shiva", "prospero"]
idle_users = [self.example_user(name) for name in user_names]
for user in idle_users:
user.long_term_idle = True
user.save()
bot = self.create_test_bot("bot", othello, "Foo Bot")
stream_names = [
"subscribed_more_than_bots_including_idle",
"subscribed_many_more_than_bots",
]
for stream_name in stream_names:
self.make_stream(stream_name)
for user in [bot, othello, self.user_profile, idle_users[0]]:
self.subscribe(user, stream_names[0])
for user in [bot, othello, self.user_profile, *idle_users]:
self.subscribe(user, stream_names[1])
with self.assert_database_query_count(11):
result = self.api_get(
self.user_profile,
"/api/v1/users/me/subscriptions",
{"include_subscribers": "partial"},
)
sub_data = self.assert_json_success(result)
subscribed_streams = sub_data["subscriptions"]
self.assertGreaterEqual(len(subscribed_streams), 2)
# Streams with only bots have sent all of their subscribers,
# since we always send bots. We tell the client it doesn't
# need to fetch more, by filling "subscribers" instead
# of "partial_subscribers". If there are non-bot subscribers,
# a partial fetch will return only partial subscribers.
for sub in subscribed_streams:
# fewer than MIN_PARTIAL_SUBSCRIBERS_CHANNEL_SIZE subscribers,
# so we get all of them
if sub["name"] == "subscribed_more_than_bots_including_idle":
self.assertIsNone(sub.get("partial_subscribers"))
self.assert_length(sub["subscribers"], 4)
continue
if sub["name"] == "subscribed_many_more_than_bots":
# the bot, Othello (who is not long_term_idle), and current user
self.assert_length(sub["partial_subscribers"], 3)
self.assertIsNone(sub.get("subscribers"))
def test_gather_subscriptions(self) -> None:
"""
gather_subscriptions returns correct results with only 3 queries
(We also use this test to verify subscription notifications to
folks who get subscribed to streams.)
"""
hamlet = self.example_user("hamlet")
cordelia = self.example_user("cordelia")
othello = self.example_user("othello")
polonius = self.example_user("polonius")
realm = hamlet.realm
stream_names = [f"stream_{i}" for i in range(10)]
streams: list[Stream] = [self.make_stream(stream_name) for stream_name in stream_names]
users_to_subscribe = [
self.user_profile.id,
othello.id,
cordelia.id,
polonius.id,
]
with self.assert_database_query_count(49):
self.subscribe_via_post(
self.user_profile,
stream_names,
dict(principals=orjson.dumps(users_to_subscribe).decode()),
)
rendered_stream_list = ""
for stream in streams:
rendered_stream_list = (
rendered_stream_list
+ f"""<li><a class="stream" data-stream-id="{stream.id}" href="/#narrow/channel/{stream.id}-{stream.name}">#{stream.name}</a></li>\n"""
)
msg = f"""
<p><span class="user-mention silent" data-user-id="{hamlet.id}">King Hamlet</span> subscribed you to the following channels:</p>
<ul>
{rendered_stream_list}
</ul>
"""
for user in [cordelia, othello, polonius]:
self.assert_user_got_subscription_notification(user, msg)
# Subscribe ourself first.
self.subscribe_via_post(
self.user_profile,
["stream_invite_only_1"],
dict(principals=orjson.dumps([self.user_profile.id]).decode()),
invite_only=True,
)
# Now add in other users, and this should trigger messages
# to notify the user.
self.subscribe_via_post(
self.user_profile,
["stream_invite_only_1"],
dict(principals=orjson.dumps(users_to_subscribe).decode()),
invite_only=True,
)
stream_invite_only_1 = get_stream("stream_invite_only_1", realm)
msg = f"""
<p><span class="user-mention silent" data-user-id="{hamlet.id}">King Hamlet</span> subscribed you to <a class="stream" data-stream-id="{stream_invite_only_1.id}" href="/#narrow/channel/{stream_invite_only_1.id}-{stream_invite_only_1.name}">#{stream_invite_only_1.name}</a>.</p>
"""
for user in [cordelia, othello, polonius]:
self.assert_user_got_subscription_notification(user, msg)
with self.assert_database_query_count(9):
subscribed_streams, _ = gather_subscriptions(
self.user_profile, include_subscribers=True
)
self.assertGreaterEqual(len(subscribed_streams), 11)
for sub in subscribed_streams:
if not sub["name"].startswith("stream_"):
continue
self.assert_length(sub["subscribers"], len(users_to_subscribe))
# Test query count when setting is set to anonymous group.
stream = get_stream("stream_1", realm)
admins_group = NamedUserGroup.objects.get(
name=SystemGroups.ADMINISTRATORS, realm_for_sharding=realm, is_system_group=True
)
setting_group_members_dict = UserGroupMembersData(
direct_members=[hamlet.id], direct_subgroups=[admins_group.id]
)
do_change_stream_group_based_setting(
stream,
"can_remove_subscribers_group",
setting_group_members_dict,
acting_user=hamlet,
)
stream = get_stream("stream_2", realm)
setting_group_members_dict = UserGroupMembersData(
direct_members=[cordelia.id], direct_subgroups=[admins_group.id]
)
do_change_stream_group_based_setting(
stream,
"can_remove_subscribers_group",
setting_group_members_dict,
acting_user=hamlet,
)
with self.assert_database_query_count(9):
subscribed_streams, _ = gather_subscriptions(
self.user_profile, include_subscribers=True
)
self.assertGreaterEqual(len(subscribed_streams), 11)
for sub in subscribed_streams:
if not sub["name"].startswith("stream_"):
continue
self.assert_length(sub["subscribers"], len(users_to_subscribe))
if sub["name"] == "stream_1":
self.assertEqual(
sub["can_remove_subscribers_group"],
UserGroupMembersDict(
direct_members=[hamlet.id],
direct_subgroups=[admins_group.id],
),
)
elif sub["name"] == "stream_2":
self.assertEqual(
sub["can_remove_subscribers_group"],
UserGroupMembersDict(
direct_members=[cordelia.id],
direct_subgroups=[admins_group.id],
),
)
else:
self.assertEqual(sub["can_remove_subscribers_group"], admins_group.id)
def test_stream_post_policy_values_in_subscription_objects(self) -> None:
hamlet = self.example_user("hamlet")
cordelia = self.example_user("cordelia")
desdemona = self.example_user("desdemona")
streams = [f"stream_{i}" for i in range(6)]
for stream_name in streams:
self.make_stream(stream_name)
realm = hamlet.realm
self.subscribe_via_post(
hamlet,
streams,
dict(principals=orjson.dumps([hamlet.id, cordelia.id]).decode()),
)
admins_group = NamedUserGroup.objects.get(
name=SystemGroups.ADMINISTRATORS, realm_for_sharding=realm, is_system_group=True
)
members_group = NamedUserGroup.objects.get(
name=SystemGroups.MEMBERS, realm_for_sharding=realm, is_system_group=True
)
full_members_group = NamedUserGroup.objects.get(
name=SystemGroups.FULL_MEMBERS, realm_for_sharding=realm, is_system_group=True
)
stream = get_stream("stream_1", realm)
do_change_stream_group_based_setting(
stream, "can_send_message_group", admins_group, acting_user=desdemona
)
stream = get_stream("stream_2", realm)
do_change_stream_group_based_setting(
stream, "can_send_message_group", members_group, acting_user=desdemona
)
stream = get_stream("stream_3", realm)
do_change_stream_group_based_setting(
stream, "can_send_message_group", full_members_group, acting_user=desdemona
)
hamletcharacters_group = NamedUserGroup.objects.get(
name="hamletcharacters", realm_for_sharding=realm
)
stream = get_stream("stream_4", realm)
do_change_stream_group_based_setting(
stream, "can_send_message_group", hamletcharacters_group, acting_user=desdemona
)
setting_group_members_dict = UserGroupMembersData(
direct_members=[cordelia.id], direct_subgroups=[admins_group.id]
)
stream = get_stream("stream_5", realm)
do_change_stream_group_based_setting(
stream, "can_send_message_group", setting_group_members_dict, acting_user=desdemona
)
with self.assert_database_query_count(9):
subscribed_streams, _ = gather_subscriptions(hamlet, include_subscribers=True)
[stream_1_sub] = [sub for sub in subscribed_streams if sub["name"] == "stream_1"]
self.assertEqual(stream_1_sub["can_send_message_group"], admins_group.id)
self.assertEqual(stream_1_sub["stream_post_policy"], Stream.STREAM_POST_POLICY_ADMINS)
[stream_2_sub] = [sub for sub in subscribed_streams if sub["name"] == "stream_2"]
self.assertEqual(stream_2_sub["can_send_message_group"], members_group.id)
self.assertEqual(stream_2_sub["stream_post_policy"], Stream.STREAM_POST_POLICY_EVERYONE)
[stream_3_sub] = [sub for sub in subscribed_streams if sub["name"] == "stream_3"]
self.assertEqual(stream_3_sub["can_send_message_group"], full_members_group.id)
self.assertEqual(
stream_3_sub["stream_post_policy"], Stream.STREAM_POST_POLICY_RESTRICT_NEW_MEMBERS
)
[stream_4_sub] = [sub for sub in subscribed_streams if sub["name"] == "stream_4"]
self.assertEqual(stream_4_sub["can_send_message_group"], hamletcharacters_group.id)
self.assertEqual(stream_4_sub["stream_post_policy"], Stream.STREAM_POST_POLICY_EVERYONE)
[stream_5_sub] = [sub for sub in subscribed_streams if sub["name"] == "stream_5"]
self.assertEqual(
stream_5_sub["can_send_message_group"],
UserGroupMembersDict(
direct_members=[cordelia.id],
direct_subgroups=[admins_group.id],
),
)
self.assertEqual(stream_5_sub["stream_post_policy"], Stream.STREAM_POST_POLICY_EVERYONE)
def test_never_subscribed_streams(self) -> None:
"""
Check never_subscribed streams are fetched correctly and not include invite_only streams,
or invite_only and public streams to guest users.
"""
realm = get_realm("zulip")
users_to_subscribe = [
self.example_user("othello").id,
self.example_user("cordelia").id,
]
public_streams = [
"test_stream_public_1",
"test_stream_public_2",
"test_stream_public_3",
"test_stream_public_4",
"test_stream_public_5",
]
private_streams = [
"test_stream_invite_only_1",
"test_stream_invite_only_2",
]
web_public_streams = [
"test_stream_web_public_1",
"test_stream_web_public_2",
]
nobody_group = NamedUserGroup.objects.get(
name="role:nobody", is_system_group=True, realm_for_sharding=realm
)
def create_public_streams() -> None:
for stream_name in public_streams:
self.make_stream(stream_name, realm=realm)
self.subscribe_via_post(
self.user_profile,
public_streams,
dict(
principals=orjson.dumps(users_to_subscribe).decode(),
can_administer_channel_group=nobody_group.id,
),
)
create_public_streams()
def create_web_public_streams() -> None:
for stream_name in web_public_streams:
self.make_stream(stream_name, realm=realm, is_web_public=True)
ret = self.subscribe_via_post(
self.user_profile,
web_public_streams,
dict(
principals=orjson.dumps(users_to_subscribe).decode(),
can_administer_channel_group=nobody_group.id,
),
)
self.assert_json_success(ret)
create_web_public_streams()
def create_private_streams() -> None:
self.subscribe_via_post(
self.user_profile,
private_streams,
dict(
principals=orjson.dumps(users_to_subscribe).decode(),
can_administer_channel_group=nobody_group.id,
),
invite_only=True,
)
create_private_streams()
def get_never_subscribed(query_count: int = 9) -> list[NeverSubscribedStreamDict]:
with self.assert_database_query_count(query_count):
sub_data = gather_subscriptions_helper(self.user_profile)
self.verify_sub_fields(sub_data)
never_subscribed = sub_data.never_subscribed
# Ignore old streams.
never_subscribed = [dct for dct in never_subscribed if dct["name"].startswith("test_")]
return never_subscribed
never_subscribed = get_never_subscribed()
# Invite only stream should not be there in never_subscribed streams
self.assert_length(never_subscribed, len(public_streams) + len(web_public_streams))
for stream_dict in never_subscribed:
name = stream_dict["name"]
self.assertFalse("invite_only" in name)
self.assert_length(stream_dict["subscribers"], len(users_to_subscribe))
# Send private stream subscribers to all realm admins.
def test_realm_admin_case() -> None:
self.user_profile.role = UserProfile.ROLE_REALM_ADMINISTRATOR
# Test realm admins can get never subscribed private stream's subscribers.
never_subscribed = get_never_subscribed(7)
self.assertEqual(
len(never_subscribed),
len(public_streams) + len(private_streams) + len(web_public_streams),
)
for stream_dict in never_subscribed:
self.assert_length(stream_dict["subscribers"], len(users_to_subscribe))
test_realm_admin_case()
# Send private stream subscribers to all realm admins.
def test_channel_admin_case() -> None:
self.user_profile.role = UserProfile.ROLE_MEMBER
user_group_members_dict = UserGroupMembersData(
direct_members=[self.user_profile.id], direct_subgroups=[]
)
do_change_stream_group_based_setting(
get_stream("test_stream_invite_only_1", realm),
"can_administer_channel_group",
user_group_members_dict,
acting_user=self.user_profile,
)
# Test channel admins can get never subscribed private stream's subscribers.
never_subscribed = get_never_subscribed()
self.assertEqual(
len(never_subscribed),
len(public_streams) + 1 + len(web_public_streams),
)
for stream_dict in never_subscribed:
self.assert_length(stream_dict["subscribers"], len(users_to_subscribe))
test_channel_admin_case()
def test_can_add_subscribers_case() -> None:
self.user_profile.role = UserProfile.ROLE_MEMBER
user_group_members_dict = UserGroupMembersData(
direct_members=[self.user_profile.id], direct_subgroups=[]
)
do_change_stream_group_based_setting(
get_stream("test_stream_invite_only_1", realm),
"can_add_subscribers_group",
user_group_members_dict,
acting_user=self.user_profile,
)
# Test channel admins can get never subscribed private stream's subscribers.
never_subscribed = get_never_subscribed()
self.assertEqual(
len(never_subscribed),
len(public_streams) + 1 + len(web_public_streams),
)
for stream_dict in never_subscribed:
self.assert_length(stream_dict["subscribers"], len(users_to_subscribe))
test_can_add_subscribers_case()
def test_guest_user_case() -> None:
self.user_profile.role = UserProfile.ROLE_GUEST
helper_result = gather_subscriptions_helper(self.user_profile)
self.verify_sub_fields(helper_result)
sub = helper_result.subscriptions
unsub = helper_result.unsubscribed
never_sub = helper_result.never_subscribed
# It's +1 because of the stream Rome.
self.assert_length(never_sub, len(web_public_streams) + 1)
sub_ids = [stream["stream_id"] for stream in sub]
unsub_ids = [stream["stream_id"] for stream in unsub]
for stream_dict in never_sub:
self.assertTrue(stream_dict["is_web_public"])
self.assertTrue(stream_dict["stream_id"] not in sub_ids)
self.assertTrue(stream_dict["stream_id"] not in unsub_ids)
# The Rome stream has is_web_public=True, with default
# subscribers not set up by this test, so we do the
# following check only for the streams we created.
if stream_dict["name"] in web_public_streams:
self.assert_length(stream_dict["subscribers"], len(users_to_subscribe))
test_guest_user_case()
def test_gather_subscribed_streams_for_guest_user(self) -> None:
guest_user = self.example_user("polonius")
stream_name_sub = "public_stream_1"
self.make_stream(stream_name_sub, realm=get_realm("zulip"))
self.subscribe(guest_user, stream_name_sub)
stream_name_unsub = "public_stream_2"
self.make_stream(stream_name_unsub, realm=get_realm("zulip"))
self.subscribe(guest_user, stream_name_unsub)
self.unsubscribe(guest_user, stream_name_unsub)
stream_name_never_sub = "public_stream_3"
self.make_stream(stream_name_never_sub, realm=get_realm("zulip"))
normal_user = self.example_user("aaron")
self.subscribe(normal_user, stream_name_sub)
self.subscribe(normal_user, stream_name_unsub)
self.subscribe(normal_user, stream_name_unsub)
helper_result = gather_subscriptions_helper(guest_user)
self.verify_sub_fields(helper_result)
subs = helper_result.subscriptions
neversubs = helper_result.never_subscribed
# Guest users get info about subscribed public stream's subscribers
expected_stream_exists = False
for sub in subs:
if sub["name"] == stream_name_sub:
expected_stream_exists = True
self.assert_length(sub["subscribers"], 2)
self.assertTrue(expected_stream_exists)
# Guest user only get data about never subscribed streams if they're
# web-public.
for stream in neversubs:
self.assertTrue(stream["is_web_public"])
# Guest user only get data about never subscribed web-public streams
self.assert_length(neversubs, 1)
def test_stream_weekly_traffic(self) -> None:
realm = self.user_profile.realm
subscribed_stream = self.make_stream("subscribed", realm=realm)
unsubscribed_stream = self.make_stream("unsubscribed", realm=realm)
never_subscribed_stream = self.make_stream("never_subscribed", realm=realm)
self.subscribe(self.user_profile, subscribed_stream.name)
self.subscribe(self.user_profile, unsubscribed_stream.name)
self.unsubscribe(self.user_profile, unsubscribed_stream.name)
end_time = timezone_now()
for stream in [subscribed_stream, unsubscribed_stream, never_subscribed_stream]:
stream.date_created = end_time - timedelta(days=7, minutes=1)
stream.save(update_fields=["date_created"])
StreamCount.objects.create(
realm=realm,
stream=stream,
property="messages_in_stream:is_bot:day",
end_time=end_time,
value=999,
)
helper_result = gather_subscriptions_helper(self.user_profile)
self.verify_sub_fields(helper_result)
[subscribed_entry] = [
sub for sub in helper_result.subscriptions if sub["name"] == subscribed_stream.name
]
[unsubscribed_entry] = [
sub for sub in helper_result.unsubscribed if sub["name"] == unsubscribed_stream.name
]
[never_subscribed_entry] = [
sub
for sub in helper_result.never_subscribed
if sub["name"] == never_subscribed_stream.name
]
# Stream traffic values are rounded to two significant digits
# in get_average_weekly_stream_traffic, so 999 is converted to 1000.
self.assertEqual(subscribed_entry["stream_weekly_traffic"], 1000)
self.assertEqual(unsubscribed_entry["stream_weekly_traffic"], 1000)
self.assertEqual(never_subscribed_entry["stream_weekly_traffic"], 1000)
def test_api_fields_present(self) -> None:
user = self.example_user("cordelia")
sub_data = gather_subscriptions_helper(user)
subscribed = sub_data.subscriptions
self.assertGreaterEqual(len(subscribed), 1)
self.verify_sub_fields(sub_data)
def test_previously_subscribed_private_streams(self) -> None:
admin_user = self.example_user("iago")
non_admin_user = self.example_user("cordelia")
guest_user = self.example_user("polonius")
stream_name = "private_stream"
stream = self.make_stream(stream_name, realm=get_realm("zulip"), invite_only=True)
self.subscribe(admin_user, stream_name)
self.subscribe(non_admin_user, stream_name)
self.subscribe(guest_user, stream_name)
self.subscribe(self.example_user("othello"), stream_name)
self.unsubscribe(admin_user, stream_name)
self.unsubscribe(non_admin_user, stream_name)
self.unsubscribe(guest_user, stream_name)
# Test admin user gets previously subscribed private stream's subscribers.
sub_data = gather_subscriptions_helper(admin_user)
self.verify_sub_fields(sub_data)
unsubscribed_streams = sub_data.unsubscribed
self.assert_length(unsubscribed_streams, 1)
self.assert_length(unsubscribed_streams[0]["subscribers"], 1)
# Test non-admin users cannot get previously subscribed private stream's subscribers.
sub_data = gather_subscriptions_helper(non_admin_user)
self.verify_sub_fields(sub_data)
unsubscribed_streams = sub_data.unsubscribed
self.assert_length(unsubscribed_streams, 0)
# Test channel admin gets previously subscribed private stream's subscribers.
non_admin_user_group_members_dict = UserGroupMembersData(
direct_members=[non_admin_user.id], direct_subgroups=[]
)
do_change_stream_group_based_setting(
stream,
"can_administer_channel_group",
non_admin_user_group_members_dict,
acting_user=admin_user,
)
sub_data = gather_subscriptions_helper(non_admin_user)
self.verify_sub_fields(sub_data)
unsubscribed_streams = sub_data.unsubscribed
self.assert_length(unsubscribed_streams, 1)
self.assert_length(unsubscribed_streams[0]["subscribers"], 1)
sub_data = gather_subscriptions_helper(guest_user)
self.verify_sub_fields(sub_data)
unsubscribed_streams = sub_data.unsubscribed
self.assert_length(unsubscribed_streams, 0)
def test_previously_subscribed_public_streams(self) -> None:
public_stream_name = "public_stream"
web_public_stream_name = "web_public_stream"
guest_user = self.example_user("polonius")
member_user = self.example_user("hamlet")
self.make_stream(public_stream_name, realm=get_realm("zulip"))
self.make_stream(web_public_stream_name, realm=get_realm("zulip"), is_web_public=True)
for stream_name in [public_stream_name, web_public_stream_name]:
self.subscribe(guest_user, stream_name)
self.subscribe(member_user, stream_name)
self.subscribe(self.example_user("othello"), stream_name)
for stream_name in [public_stream_name, web_public_stream_name]:
self.unsubscribe(guest_user, stream_name)
self.unsubscribe(member_user, stream_name)
# Test member user gets previously subscribed public stream and its subscribers.
sub_data = gather_subscriptions_helper(member_user)
self.verify_sub_fields(sub_data)
unsubscribed_streams = sub_data.unsubscribed
self.assert_length(unsubscribed_streams, 2)
self.assert_length(unsubscribed_streams[0]["subscribers"], 1)
self.assert_length(unsubscribed_streams[1]["subscribers"], 1)
# Test guest users cannot get previously subscribed public stream but can get
# web-public stream and its subscribers.
sub_data = gather_subscriptions_helper(guest_user)
self.verify_sub_fields(sub_data)
unsubscribed_streams = sub_data.unsubscribed
self.assert_length(unsubscribed_streams, 1)
self.assertEqual(unsubscribed_streams[0]["is_web_public"], True)
self.assert_length(unsubscribed_streams[0]["subscribers"], 1)
def test_nonsubscriber(self) -> None:
"""
Even a non-subscriber to a public stream can query a stream's membership
with get_subscribers.
"""
# Create a stream for which Hamlet is the only subscriber.
stream_name = "Saxony"
self.subscribe_via_post(self.user_profile, [stream_name])
other_user = self.example_user("othello")
# Fetch the subscriber list as a non-member.
self.login_user(other_user)
self.make_successful_subscriber_request(stream_name)
def test_subscriber_private_stream(self) -> None:
"""
A subscriber to a private stream can query that stream's membership.
"""
stream_name = "Saxony"
self.subscribe_via_post(self.user_profile, [stream_name], invite_only=True)
self.make_successful_subscriber_request(stream_name)
stream_id = get_stream(stream_name, self.user_profile.realm).id
# Verify another user can't get the data.
self.login("cordelia")
result = self.client_get(f"/json/streams/{stream_id}/members")
self.assert_json_error(result, "Invalid channel ID")
# But an organization administrator can
self.login("iago")
result = self.client_get(f"/json/streams/{stream_id}/members")
self.assert_json_success(result)
def test_json_get_subscribers_stream_not_exist(self) -> None:
"""
json_get_subscribers also returns the list of subscribers for a stream.
"""
stream_id = 99999999
result = self.client_get(f"/json/streams/{stream_id}/members")
self.assert_json_error(result, "Invalid channel ID")
def test_json_get_subscribers(self) -> None:
"""
json_get_subscribers in zerver/views/streams.py
also returns the list of subscribers for a stream, when requested.
"""
stream_name = gather_subscriptions(self.user_profile)[0][0]["name"]
stream_id = get_stream(stream_name, self.user_profile.realm).id
expected_subscribers = gather_subscriptions(self.user_profile, include_subscribers=True)[0][
0
]["subscribers"]
result = self.client_get(f"/json/streams/{stream_id}/members")
result_dict = self.assert_json_success(result)
self.assertIn("subscribers", result_dict)
self.assertIsInstance(result_dict["subscribers"], list)
subscribers: list[int] = []
for subscriber in result_dict["subscribers"]:
self.assertIsInstance(subscriber, int)
subscribers.append(subscriber)
self.assertEqual(set(subscribers), set(expected_subscribers))
def test_json_get_subscribers_for_guest_user(self) -> None:
"""
Guest users should have access to subscribers of web-public streams, even
if they aren't subscribed or have never subscribed to that stream.
"""
guest_user = self.example_user("polonius")
never_subscribed = gather_subscriptions_helper(guest_user, True).never_subscribed
# A guest user can only see never subscribed streams that are web-public.
# For Polonius, the only web-public stream that he is not subscribed at
# this point is Rome.
self.assert_length(never_subscribed, 1)
web_public_stream_id = never_subscribed[0]["stream_id"]
result = self.client_get(f"/json/streams/{web_public_stream_id}/members")
result_dict = self.assert_json_success(result)
self.assertIn("subscribers", result_dict)
self.assertIsInstance(result_dict["subscribers"], list)
self.assertGreater(len(result_dict["subscribers"]), 0)
def test_nonsubscriber_private_stream(self) -> None:
"""
A non-subscriber non-realm-admin user to a private stream can't query that stream's membership.
But unsubscribed realm admin users can query private stream's membership.
"""
# Create a private stream for which Hamlet is the only subscriber.
stream_name = "NewStream"
self.subscribe_via_post(self.user_profile, [stream_name], invite_only=True)
user_profile = self.example_user("othello")
# Try to fetch the subscriber list as a non-member & non-realm-admin-user.
stream_id = get_stream(stream_name, user_profile.realm).id
result = self.make_subscriber_request(stream_id, user=user_profile)
self.assert_json_error(result, "Invalid channel ID")
# Try to fetch the subscriber list as a non-member & realm-admin-user.
self.login("iago")
self.make_successful_subscriber_request(stream_name)
| {
"repo_id": "zulip/zulip",
"file_path": "zerver/tests/test_channel_fetch.py",
"license": "Apache License 2.0",
"lines": 1303,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.