id
int64 0
328k
| repository_name
stringlengths 7
58
| file_path
stringlengths 9
302
| class_name
stringlengths 5
256
| human_written_code
stringlengths 16
2.16M
| class_skeleton
stringlengths 18
1.49M
⌀ | total_program_units
int64 1
1.76k
| total_doc_str
int64 0
771
| AvgCountLine
float64 0
7.89k
| AvgCountLineBlank
float64 0
297
| AvgCountLineCode
float64 0
7.89k
| AvgCountLineComment
float64 0
7.89k
| AvgCyclomatic
float64 0
130
| CommentToCodeRatio
float64 0
168
| CountClassBase
float64 0
40
| CountClassCoupled
float64 0
583
| CountClassCoupledModified
float64 0
575
| CountClassDerived
float64 0
5.35k
| CountDeclInstanceMethod
float64 0
529
| CountDeclInstanceVariable
float64 0
296
| CountDeclMethod
float64 0
599
| CountDeclMethodAll
float64 0
1.12k
| CountLine
float64 1
40.4k
| CountLineBlank
float64 0
8.16k
| CountLineCode
float64 1
25.7k
| CountLineCodeDecl
float64 1
8.15k
| CountLineCodeExe
float64 0
24.2k
| CountLineComment
float64 0
16.5k
| CountStmt
float64 1
9.71k
| CountStmtDecl
float64 1
8.15k
| CountStmtExe
float64 0
9.69k
| MaxCyclomatic
float64 0
759
| MaxInheritanceTree
float64 0
16
| MaxNesting
float64 0
34
| SumCyclomatic
float64 0
2.9k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
328,200
|
saya-ashen/Dingent
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/saya-ashen_Dingent/src/dingent/core/context.py
|
dingent.core.context.AppContext
|
from .analytics_manager import AnalyticsManager
from .plugin_manager import PluginManager
from dingent.core.market_service import MarketService
from .resource_manager import ResourceManager
from .config_manager import ConfigManager
from .utils import find_project_root
from .log_manager import LogManager
from .assistant_manager import AssistantManager
from dingent.engine.graph_manager import GraphManager
from pathlib import Path
from .workflow_manager import WorkflowManager
from .llm_manager import LLMManager
class AppContext:
"""A container for all manager instances to handle dependency injection."""
def __init__(self, project_root: Path | None=None):
self.project_root = project_root or find_project_root()
if not self.project_root:
return
self.log_manager = LogManager()
self.config_manager = ConfigManager(self.project_root, self.log_manager)
self.resource_manager = ResourceManager(self.log_manager, self.project_root / '.dingent' / 'data' / 'resources.db')
self.llm_manager = LLMManager(self.log_manager)
self.analytics_manager = AnalyticsManager('test_project')
self.analytics_manager.register()
plugin_dir = self.project_root / 'plugins'
self.plugin_manager = PluginManager(plugin_dir, self.resource_manager, self.log_manager)
self.assistant_manager = AssistantManager(self.config_manager, self.plugin_manager, self.log_manager)
self.workflow_manager = WorkflowManager(self.config_manager, self.log_manager, self.assistant_manager)
self.market_service = MarketService(self.config_manager.project_root, self.log_manager)
self.graph_manager = GraphManager(self)
async def close(self):
"""Close all managers that require cleanup."""
self.resource_manager.close()
await self.assistant_manager.aclose()
|
class AppContext:
'''A container for all manager instances to handle dependency injection.'''
def __init__(self, project_root: Path | None=None):
pass
async def close(self):
'''Close all managers that require cleanup.'''
pass
| 3
| 2
| 13
| 2
| 11
| 1
| 2
| 0.14
| 0
| 10
| 9
| 0
| 2
| 10
| 2
| 2
| 30
| 5
| 22
| 14
| 19
| 3
| 18
| 14
| 15
| 2
| 0
| 1
| 3
|
328,201
|
saya-ashen/Dingent
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/saya-ashen_Dingent/src/dingent/core/llm_manager.py
|
dingent.core.llm_manager.LLMManager
|
from pydantic import SecretStr
from dingent.core.log_manager import LogManager
from langchain.chat_models.base import BaseChatModel
from langchain_litellm import ChatLiteLLM
from typing import Any
class LLMManager:
"""
A class to manage and maintain instances of large language models (LLMs).
This class responsibles for creating and caching LLM instances based on configuration,
ensuring efficient resource utilization, and providing a unified access point for the application.
"""
def __init__(self, log_manager: LogManager):
self._llms: dict[Any, BaseChatModel] = {}
self._log_manager = log_manager
def get_llm(self, **kwargs):
"""
Get a LLM instance by its name.
If the instance already exists in the cache, return it directly.
Otherwise, create a new instance based on the provided configuration,
"""
cache_key = tuple(sorted(kwargs.items()))
kwargs_hidden = {k: '***' if k == 'api_key' else v for k, v in kwargs.items()}
if cache_key in self._llms:
print(f'Returning cached LLM instance with params: {kwargs_hidden}')
self._log_manager.log_with_context('info', 'Returning cached LLM instance.', context={'params': kwargs_hidden})
return self._llms[cache_key]
if 'model_provider' not in kwargs and 'provider' in kwargs:
kwargs['model_provider'] = kwargs.pop('provider')
model = kwargs.get('model', 'gpt-3.5-turbo')
api_key: str | SecretStr = kwargs.get('api_key', 'sk-xxxxxx')
api_base = kwargs.get('api_base', None) or kwargs.get('base_url', None)
if isinstance(api_key, SecretStr):
api_key = api_key.get_secret_value()
model_instance = ChatLiteLLM(model=model, api_key=api_key, api_base=api_base)
self._llms[cache_key] = model_instance
self._log_manager.log_with_context('info', 'LLM instance created and cached.', context={'params': kwargs_hidden})
return model_instance
def list_available_llms(self) -> list[str]:
return list(self._llms.keys())
|
class LLMManager:
'''
A class to manage and maintain instances of large language models (LLMs).
This class responsibles for creating and caching LLM instances based on configuration,
ensuring efficient resource utilization, and providing a unified access point for the application.
'''
def __init__(self, log_manager: LogManager):
pass
def get_llm(self, **kwargs):
'''
Get a LLM instance by its name.
If the instance already exists in the cache, return it directly.
Otherwise, create a new instance based on the provided configuration,
'''
pass
def list_available_llms(self) -> list[str]:
pass
| 4
| 2
| 11
| 1
| 8
| 2
| 2
| 0.42
| 0
| 7
| 1
| 0
| 3
| 2
| 3
| 3
| 41
| 7
| 24
| 12
| 20
| 10
| 24
| 12
| 20
| 5
| 0
| 1
| 7
|
328,202
|
saya-ashen/Dingent
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/saya-ashen_Dingent/src/dingent/core/log_manager.py
|
dingent.core.log_manager.LogEntry
|
from typing import Any
from dataclasses import asdict, dataclass
from datetime import datetime
@dataclass
class LogEntry:
"""Structured log entry for dashboard display."""
timestamp: datetime
level: str
message: str
module: str
function: str
context: dict[str, Any] | None = None
correlation_id: str | None = None
def to_dict(self) -> dict[str, Any]:
"""Convert log entry to dictionary."""
data = asdict(self)
data['timestamp'] = self.timestamp.isoformat()
return data
|
@dataclass
class LogEntry:
'''Structured log entry for dashboard display.'''
def to_dict(self) -> dict[str, Any]:
'''Convert log entry to dictionary.'''
pass
| 3
| 2
| 5
| 0
| 4
| 1
| 1
| 0.17
| 0
| 3
| 0
| 0
| 1
| 0
| 1
| 1
| 16
| 2
| 12
| 5
| 10
| 2
| 12
| 5
| 10
| 1
| 0
| 0
| 1
|
328,203
|
saya-ashen/Dingent
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/saya-ashen_Dingent/src/dingent/core/log_manager.py
|
dingent.core.log_manager.LogManager
|
from typing import Any
import threading
from loguru import logger
from collections import deque
class LogManager:
"""
Enhanced logging manager that captures structured logs for dashboard display.
Features:
- In-memory log storage with configurable retention
- Structured logging with context and correlation IDs
- Thread-safe operations
- Dashboard integration ready
"""
def __init__(self, max_logs: int=1000):
"""
Initialize the log manager.
Args:
max_logs: Maximum number of logs to keep in memory
"""
self.max_logs = max_logs
self._logs: deque[LogEntry] = deque(maxlen=max_logs)
self._lock = threading.RLock()
self._setup_loguru_handler()
def _setup_loguru_handler(self):
"""Set up loguru handler to capture logs for dashboard."""
def log_sink(message):
try:
record = message.record
log_entry = LogEntry(timestamp=record['time'], level=record['level'].name, message=record['message'], module=record['module'], function=record['function'], context=record.get('extra', {}), correlation_id=record.get('extra', {}).get('correlation_id'))
self._add_log_entry(log_entry)
except Exception as e:
print(f'Error in log sink: {e}')
logger.add(log_sink, level='DEBUG', format='{message}')
def _add_log_entry(self, entry: LogEntry):
"""Thread-safe addition of log entry."""
with self._lock:
self._logs.append(entry)
def get_logs(self, level: str | None=None, module: str | None=None, limit: int | None=None, search: str | None=None) -> list[LogEntry]:
"""
Retrieve logs with optional filtering.
Args:
level: Filter by log level (DEBUG, INFO, WARNING, ERROR, CRITICAL)
module: Filter by module name
limit: Maximum number of logs to return
search: Search term in message content
Returns:
List of matching log entries
"""
with self._lock:
logs = list(self._logs)
if level:
logs = [log for log in logs if log.level == level.upper()]
if module:
logs = [log for log in logs if module.lower() in log.module.lower()]
if search:
search_lower = search.lower()
logs = [log for log in logs if search_lower in log.message.lower()]
logs.sort(key=lambda x: x.timestamp, reverse=True)
if limit:
logs = logs[:limit]
return logs
def get_log_stats(self) -> dict[str, Any]:
"""Get logging statistics for dashboard display."""
with self._lock:
logs = list(self._logs)
if not logs:
return {'total_logs': 0, 'by_level': {}, 'by_module': {}, 'oldest_timestamp': None, 'newest_timestamp': None}
by_level = {}
by_module = {}
for log in logs:
by_level[log.level] = by_level.get(log.level, 0) + 1
by_module[log.module] = by_module.get(log.module, 0) + 1
return {'total_logs': len(logs), 'by_level': by_level, 'by_module': by_module, 'oldest_timestamp': min((log.timestamp for log in logs)).isoformat(), 'newest_timestamp': max((log.timestamp for log in logs)).isoformat()}
def clear_logs(self):
"""Clear all stored logs."""
with self._lock:
self._logs.clear()
def log_with_context(self, level: str, message: str, context: dict[str, Any] | None=None, correlation_id: str | None=None):
"""
Log a message with additional context for structured logging.
"""
extra = {}
if context:
extra.update(context)
if correlation_id:
extra['correlation_id'] = correlation_id
log = logger.opt(depth=1).bind(**extra)
logger_method = getattr(log, level.lower())
logger_method(message)
|
class LogManager:
'''
Enhanced logging manager that captures structured logs for dashboard display.
Features:
- In-memory log storage with configurable retention
- Structured logging with context and correlation IDs
- Thread-safe operations
- Dashboard integration ready
'''
def __init__(self, max_logs: int=1000):
'''
Initialize the log manager.
Args:
max_logs: Maximum number of logs to keep in memory
'''
pass
def _setup_loguru_handler(self):
'''Set up loguru handler to capture logs for dashboard.'''
pass
def log_sink(message):
pass
def _add_log_entry(self, entry: LogEntry):
'''Thread-safe addition of log entry.'''
pass
def get_logs(self, level: str | None=None, module: str | None=None, limit: int | None=None, search: str | None=None) -> list[LogEntry]:
'''
Retrieve logs with optional filtering.
Args:
level: Filter by log level (DEBUG, INFO, WARNING, ERROR, CRITICAL)
module: Filter by module name
limit: Maximum number of logs to return
search: Search term in message content
Returns:
List of matching log entries
'''
pass
def get_log_stats(self) -> dict[str, Any]:
'''Get logging statistics for dashboard display.'''
pass
def clear_logs(self):
'''Clear all stored logs.'''
pass
def log_with_context(self, level: str, message: str, context: dict[str, Any] | None=None, correlation_id: str | None=None):
'''
Log a message with additional context for structured logging.
'''
pass
| 9
| 8
| 16
| 2
| 10
| 4
| 2
| 0.54
| 0
| 7
| 1
| 0
| 7
| 3
| 7
| 7
| 130
| 24
| 69
| 24
| 60
| 37
| 55
| 23
| 46
| 5
| 0
| 1
| 17
|
328,204
|
saya-ashen/Dingent
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/saya-ashen_Dingent/src/dingent/core/market_service.py
|
dingent.core.market_service.MarketItem
|
from pydantic import BaseModel, model_validator
from typing import Any
class MarketItem(BaseModel):
id: str
name: str
description: str | None = None
version: str | None = None
author: str | None = None
category: MarketItemCategory
tags: list[str] = []
license: str | None = None
readme: str | None = None
downloads: int | None = None
rating: float | None = None
created_at: str | None = None
updated_at: str | None = None
is_installed: bool = False
installed_version: str | None = None
update_available: bool = False
@model_validator(mode='before')
@classmethod
def _normalize_name_field(cls, data: Any) -> Any:
if isinstance(data, dict):
source_for_display_name = data.get('display_name') or data.get('name')
if source_for_display_name:
data['name'] = source_for_display_name
return data
|
class MarketItem(BaseModel):
@model_validator(mode='before')
@classmethod
def _normalize_name_field(cls, data: Any) -> Any:
pass
| 4
| 0
| 7
| 1
| 6
| 0
| 3
| 0
| 1
| 2
| 0
| 0
| 0
| 0
| 1
| 83
| 27
| 2
| 25
| 17
| 21
| 0
| 23
| 16
| 21
| 3
| 5
| 2
| 3
|
328,205
|
saya-ashen/Dingent
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/saya-ashen_Dingent/src/dingent/core/market_service.py
|
dingent.core.market_service.MarketItemCategory
|
from enum import Enum
class MarketItemCategory(str, Enum):
"""Enumeration for different categories of items in the Dingent Hub."""
PLUGIN = 'plugin'
ASSISTANT = 'assistant'
WORKFLOW = 'workflow'
ALL = 'all'
def __str__(self) -> str:
"""Return the string value of the enum member."""
return self.value
|
class MarketItemCategory(str, Enum):
'''Enumeration for different categories of items in the Dingent Hub.'''
def __str__(self) -> str:
'''Return the string value of the enum member.'''
pass
| 2
| 2
| 3
| 0
| 2
| 1
| 1
| 0.29
| 2
| 0
| 0
| 0
| 1
| 0
| 1
| 116
| 11
| 2
| 7
| 6
| 5
| 2
| 7
| 6
| 5
| 1
| 4
| 0
| 1
|
328,206
|
saya-ashen/Dingent
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/saya-ashen_Dingent/src/dingent/core/market_service.py
|
dingent.core.market_service.MarketMetadata
|
from pydantic import BaseModel, model_validator
class MarketMetadata(BaseModel):
version: str
updated_at: str
categories: dict[str, int]
|
class MarketMetadata(BaseModel):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 82
| 4
| 0
| 4
| 1
| 3
| 0
| 4
| 1
| 3
| 0
| 5
| 0
| 0
|
328,207
|
saya-ashen/Dingent
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/saya-ashen_Dingent/src/dingent/core/market_service.py
|
dingent.core.market_service.MarketService
|
import toml
from typing import Any
from packaging.version import InvalidVersion, Version
import os
import asyncio
import httpx
from dingent.core.log_manager import LogManager
from pathlib import Path
from async_lru import alru_cache
import json
class MarketService:
"""Service for interacting with the dingent-market repository."""
CATEGORY_TARGETS = {MarketItemCategory.PLUGIN: 'plugins', MarketItemCategory.ASSISTANT: 'config/assistants', MarketItemCategory.WORKFLOW: 'config/workflows'}
def __init__(self, project_root: Path, log_manager: LogManager):
self.project_root = project_root
self._log_manager = log_manager
auth_token = os.getenv('GITHUB_TOKEN')
headers = {'User-Agent': 'Dingent-Market-Client/1.0'}
if auth_token:
headers['Authorization'] = f'token {auth_token}'
self.client = httpx.AsyncClient(headers=headers, timeout=30.0, follow_redirects=True)
async def close(self):
"""Close the HTTP session."""
await self.client.aclose()
async def _fetch_url_as_text(self, url: str) -> str | None:
"""A reusable helper to fetch content from a URL."""
try:
response = await self.client.get(url)
response.raise_for_status()
return response.text
except httpx.RequestError as e:
self._log_manager.log_with_context('warning', 'Failed to fetch URL: {url}', context={'url': str(e.request.url), 'error': str(e)})
return None
@alru_cache(maxsize=1)
async def get_market_metadata(self) -> MarketMetadata:
"""Fetch market metadata from the market.json file."""
url = f'{GITHUB_RAW_BASE}/{MARKET_REPO_OWNER}/{MARKET_REPO_NAME}/main/market.json'
content = await self._fetch_url_as_text(url)
if content:
try:
return MarketMetadata.model_validate_json(content)
except Exception as e:
self._log_manager.log_with_context('warning', 'Failed to parse market metadata', context={'error': str(e)})
return MarketMetadata(version='0.0.0', updated_at='', categories={})
async def get_market_items(self, category: MarketItemCategory, installed_items: dict[str, dict[str, str]] | None=None) -> list[MarketItem]:
"""Fetch a list of available market items concurrently."""
if installed_items is None:
installed_items = {}
hashable_installed_items = tuple(((category_name, tuple(sorted(versions.items()))) for category_name, versions in sorted(installed_items.items())))
categories_to_fetch = [c for c in MarketItemCategory if c != MarketItemCategory.ALL] if category == MarketItemCategory.ALL else [category]
tasks = [self._fetch_category_items(f'{cat.value}s', cat, hashable_installed_items) for cat in categories_to_fetch]
results = await asyncio.gather(*tasks)
all_items = [item for sublist in results for item in sublist]
return all_items
@alru_cache(maxsize=16)
async def _fetch_category_items(self, repo_directory: str, category_enum: MarketItemCategory, installed_items_tuple: tuple) -> list[MarketItem]:
"""Fetch and process all items for a specific category concurrently."""
try:
url = f'{GITHUB_API_BASE}/repos/{MARKET_REPO_OWNER}/{MARKET_REPO_NAME}/contents/{repo_directory}'
content = await self._fetch_url_as_text(url)
if not content:
return []
directories = [dir_info['name'] for dir_info in json.loads(content) if dir_info.get('type') == 'dir']
category_key = f'{category_enum.value}s'
relevant_versions_tuple = next((versions for cat, versions in installed_items_tuple if cat == category_key), ())
tasks = [self._fetch_item_details(repo_directory, category_enum, item_id, relevant_versions_tuple) for item_id in directories]
results = await asyncio.gather(*tasks)
return [item for item in results if item]
except Exception as e:
self._log_manager.log_with_context('warning', 'Failed to fetch category items', context={'category': category_enum.value, 'error': str(e)})
return []
async def _get_plugin_meta(self, repo_dir: str, item_id: str) -> dict:
"""Fetches and merges pyproject.toml and plugin.toml for a plugin."""
final_meta = {}
pyproject_url = f'{GITHUB_RAW_BASE}/{MARKET_REPO_OWNER}/{MARKET_REPO_NAME}/main/{repo_dir}/{item_id}/pyproject.toml'
pyproject_content = await self._fetch_url_as_text(pyproject_url)
if pyproject_content:
pyproject_data = toml.loads(pyproject_content)
final_meta.update(pyproject_data.get('project', {}))
plugin_url = f'{GITHUB_RAW_BASE}/{MARKET_REPO_OWNER}/{MARKET_REPO_NAME}/main/{repo_dir}/{item_id}/plugin.toml'
plugin_content = await self._fetch_url_as_text(plugin_url)
if not plugin_content:
raise FileNotFoundError('plugin.toml is required but was not found.')
plugin_data = toml.loads(plugin_content)
plugin_meta = plugin_data.get('plugin', plugin_data)
final_meta.update(plugin_meta)
return final_meta
@alru_cache(maxsize=128)
async def _fetch_item_details(self, repo_directory: str, category_enum: MarketItemCategory, item_id: str, installed_versions_tuple: tuple[tuple[str, str], ...]) -> MarketItem | None:
"""Fetch details for a specific item, merging configs if necessary."""
installed_items = dict(installed_versions_tuple)
try:
final_meta = {}
if category_enum == MarketItemCategory.PLUGIN:
final_meta = await self._get_plugin_meta(repo_directory, item_id)
else:
self._log_manager.log_with_context('warning', "Config fetching for '{cat}' is not yet implemented.", context={'cat': category_enum.value})
return None
is_item_installed = item_id in installed_items
installed_version = installed_items.get(item_id)
remote_version = str(final_meta.get('version', '0.0.0'))
update_available = False
if is_item_installed and installed_version:
try:
if Version(remote_version) > Version(installed_version):
update_available = True
except InvalidVersion:
pass
return MarketItem(id=item_id, name=final_meta.get('name', item_id), description=final_meta.get('description'), version=str(final_meta.get('version', '1.0.0')), author=final_meta.get('author'), category=category_enum, tags=final_meta.get('tags', []), license=final_meta.get('license', {}).get('text') or 'License not specified', is_installed=is_item_installed, installed_version=installed_version, update_available=update_available)
except Exception as e:
self._log_manager.log_with_context('warning', 'Failed to fetch item details, using fallback', context={'item_id': item_id, 'category': category_enum.value, 'error': str(e)})
return MarketItem(id=item_id, name=item_id.replace('-', ' ').title(), description=f'A {category_enum.value} from the market', category=category_enum, version='1.0.0', is_installed=item_id in installed_items, installed_version=installed_items.get(item_id), update_available=False)
async def download_item(self, item_id: str, category: MarketItemCategory) -> dict[str, Any]:
"""Download and install a market item."""
try:
target_path_suffix = self.CATEGORY_TARGETS.get(category)
if not target_path_suffix:
raise ValueError(f'Cannot download category: {category}')
target_dir = self.project_root / target_path_suffix
if category == MarketItemCategory.PLUGIN:
target_dir = target_dir / item_id
target_dir.mkdir(parents=True, exist_ok=True)
source_path = f'{category.value}s/{item_id}'
await self._download_directory(source_path, target_dir)
await self._install_item(item_id, category, target_dir)
return {'success': True, 'message': f'Successfully downloaded {category.value}: {item_id}', 'installed_path': str(target_dir.relative_to(self.project_root))}
except Exception as e:
self._log_manager.log_with_context('error', 'Download failed', context={'item_id': item_id, 'category': category.value, 'error': str(e)})
return {'success': False, 'message': f"Failed to download {category.value} '{item_id}': {str(e)}", 'installed_path': None}
async def _download_directory(self, source_path: str, target_dir: Path):
"""Download all files from a directory in the repository."""
try:
url = f'{GITHUB_API_BASE}/repos/{MARKET_REPO_OWNER}/{MARKET_REPO_NAME}/contents/{source_path}'
content_json = await self._fetch_url_as_text(url)
if not content_json:
return
contents = json.loads(content_json)
download_tasks = []
for item in contents:
if item['type'] == 'file':
task = self._download_file(item['download_url'], target_dir / item['name'])
download_tasks.append(task)
elif item['type'] == 'dir':
subdir_path = target_dir / item['name']
subdir_path.mkdir(exist_ok=True)
task = self._download_directory(f"{source_path}/{item['name']}", subdir_path)
download_tasks.append(task)
if download_tasks:
await asyncio.gather(*download_tasks)
except Exception as e:
self._log_manager.log_with_context('error', 'Failed to download directory', context={'source_path': source_path, 'error': str(e)})
raise
async def _download_file(self, url: str, path: Path):
"""Downloads a single file to the given path."""
try:
async with self.client.stream('GET', url) as response:
response.raise_for_status()
with open(path, 'wb') as f:
async for chunk in response.aiter_bytes():
f.write(chunk)
self._log_manager.log_with_context('info', 'Downloaded file: {file}', context={'file': str(path)})
except httpx.RequestError as e:
self._log_manager.log_with_context('error', 'Failed to download file', context={'url': url, 'error': str(e)})
async def _install_item(self, item_id: str, category: MarketItemCategory, target_dir: Path):
"""Perform category-specific installation steps."""
try:
if category == MarketItemCategory.PLUGIN:
await self._install_plugin(item_id, target_dir)
elif category == MarketItemCategory.ASSISTANT:
await self._install_assistant(item_id, target_dir)
elif category == MarketItemCategory.WORKFLOW:
await self._install_workflow(item_id, target_dir)
except Exception as e:
self._log_manager.log_with_context('error', 'Installation failed', context={'item_id': item_id, 'category': category.value, 'error': str(e)})
raise
async def _install_plugin(self, plugin_id: str, target_dir: Path):
"""Install a plugin."""
self._log_manager.log_with_context('info', 'Plugin installed', context={'plugin_id': plugin_id, 'path': str(target_dir)})
async def _install_assistant(self, assistant_id: str, target_dir: Path):
"""Install an assistant configuration."""
self._log_manager.log_with_context('info', 'Assistant installed', context={'assistant_id': assistant_id, 'path': str(target_dir)})
async def _install_workflow(self, workflow_id: str, target_dir: Path):
"""Install a workflow configuration."""
self._log_manager.log_with_context('info', 'Workflow installed', context={'workflow_id': workflow_id, 'path': str(target_dir)})
async def get_item_readme(self, item_id: str, category: MarketItemCategory) -> str | None:
"""Get README content for a specific item."""
try:
readme_path = f'{category.value}s/{item_id}/README.md'
url = f'{GITHUB_RAW_BASE}/{MARKET_REPO_OWNER}/{MARKET_REPO_NAME}/main/{readme_path}'
response = await self.client.get(url)
response.raise_for_status()
return response.text
except Exception as e:
self._log_manager.log_with_context('warning', 'Failed to fetch README', context={'item_id': item_id, 'category': category.value, 'error': str(e)})
return None
|
class MarketService:
'''Service for interacting with the dingent-market repository.'''
def __init__(self, project_root: Path, log_manager: LogManager):
pass
async def close(self):
'''Close the HTTP session.'''
pass
async def _fetch_url_as_text(self, url: str) -> str | None:
'''A reusable helper to fetch content from a URL.'''
pass
@alru_cache(maxsize=1)
async def get_market_metadata(self) -> MarketMetadata:
'''Fetch market metadata from the market.json file.'''
pass
async def get_market_items(self, category: MarketItemCategory, installed_items: dict[str, dict[str, str]] | None=None) -> list[MarketItem]:
'''Fetch a list of available market items concurrently.'''
pass
@alru_cache(maxsize=16)
async def _fetch_category_items(self, repo_directory: str, category_enum: MarketItemCategory, installed_items_tuple: tuple) -> list[MarketItem]:
'''Fetch and process all items for a specific category concurrently.'''
pass
async def _get_plugin_meta(self, repo_dir: str, item_id: str) -> dict:
'''Fetches and merges pyproject.toml and plugin.toml for a plugin.'''
pass
@alru_cache(maxsize=128)
async def _fetch_item_details(self, repo_directory: str, category_enum: MarketItemCategory, item_id: str, installed_versions_tuple: tuple[tuple[str, str], ...]) -> MarketItem | None:
'''Fetch details for a specific item, merging configs if necessary.'''
pass
async def download_item(self, item_id: str, category: MarketItemCategory) -> dict[str, Any]:
'''Download and install a market item.'''
pass
async def _download_directory(self, source_path: str, target_dir: Path):
'''Download all files from a directory in the repository.'''
pass
async def _download_file(self, url: str, path: Path):
'''Downloads a single file to the given path.'''
pass
async def _install_item(self, item_id: str, category: MarketItemCategory, target_dir: Path):
'''Perform category-specific installation steps.'''
pass
async def _install_plugin(self, plugin_id: str, target_dir: Path):
'''Install a plugin.'''
pass
async def _install_assistant(self, assistant_id: str, target_dir: Path):
'''Install an assistant configuration.'''
pass
async def _install_workflow(self, workflow_id: str, target_dir: Path):
'''Install a workflow configuration.'''
pass
async def get_item_readme(self, item_id: str, category: MarketItemCategory) -> str | None:
'''Get README content for a specific item.'''
pass
| 20
| 16
| 16
| 1
| 13
| 2
| 3
| 0.14
| 0
| 15
| 4
| 0
| 16
| 3
| 16
| 16
| 281
| 35
| 218
| 96
| 183
| 31
| 162
| 66
| 145
| 7
| 0
| 4
| 47
|
328,208
|
saya-ashen/Dingent
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/saya-ashen_Dingent/src/dingent/core/plugin_manager.py
|
dingent.core.plugin_manager.PluginInstance
|
from fastmcp.tools import Tool
from collections.abc import Callable
from pydantic import BaseModel, Field, PrivateAttr, SecretStr, ValidationError, create_model
from .types import ConfigItemDetail, ExecutionModel, PluginBase, PluginConfigSchema, PluginUserConfig, ToolResult
from typing import Any, Literal
from fastmcp.server.middleware import Middleware, MiddlewareContext
from pathlib import Path
from fastmcp import Client, FastMCP
from fastmcp.client import SSETransport, StreamableHttpTransport, UvStdioTransport
class PluginInstance:
mcp_client: Client
name: str
config: dict[str, Any] | None = None
manifest: 'PluginManifest'
_transport: StreamableHttpTransport | UvStdioTransport | None = None
_mcp: FastMCP
_status: Literal['active', 'inactive', 'error'] = 'inactive'
def __init__(self, name: str, mcp_client: Client, mcp: FastMCP, status: Literal['active', 'inactive', 'error'], manifest: 'PluginManifest', config: dict[str, Any] | None=None, transport=None):
self.name = name
self.mcp_client = mcp_client
self._mcp = mcp
self._status = status
self.config = config
self.manifest = manifest
self._transport = transport
@classmethod
async def from_config(cls, manifest: 'PluginManifest', user_config: PluginUserConfig, log_method: Callable, middleware: Middleware | None=None) -> 'PluginInstance':
if not user_config.enabled:
raise ValueError(f"Plugin '{manifest.name}' is not enabled. This should not happend")
env = {}
validated_config_dict = {}
if manifest.config_schema:
DynamicConfigModel = _create_dynamic_config_model(manifest.name, manifest.config_schema)
try:
validated_model = DynamicConfigModel.model_validate(user_config.config or {})
validated_config_dict = validated_model.model_dump(mode='json')
env = _prepare_environment(validated_model)
except ValidationError as e:
log_method('warning', "Configuration validation error for plugin '{plugin}': {error_msg}", context={'plugin': manifest.name, 'error_msg': f'{e}'})
validated_config_dict = user_config.config or {}
if manifest.execution.mode == 'remote':
assert manifest.execution.url is not None
if manifest.execution.url.endswith('sse'):
transport = SSETransport(url=manifest.execution.url, headers=env)
else:
transport = StreamableHttpTransport(url=manifest.execution.url, headers=env, auth='oauth')
remote_proxy = FastMCP.as_proxy(transport)
else:
assert manifest.execution.script_path
module_path = '.'.join(Path(manifest.execution.script_path).with_suffix('').parts)
transport = UvStdioTransport(module_path, module=True, project_directory=manifest.path.as_posix(), env_vars=env, python_version=manifest.python_version)
remote_proxy = FastMCP.as_proxy(transport)
_status = 'inactive'
try:
await remote_proxy.get_tools()
_status = 'active'
except Exception as e:
_status = 'error'
log_method('error', 'Failed to connect to MCP server: {error_msg}', context={'plugin': manifest.name, 'error_msg': f'{e}'})
mcp = FastMCP(name=manifest.name)
mcp.mount(remote_proxy)
if middleware:
mcp.add_middleware(middleware)
base_tools_dict = await mcp.get_tools()
if not user_config.tools_default_enabled:
for tool in base_tools_dict.values():
mirrored_tool = tool.copy()
mirrored_tool.disable()
mcp.add_tool(mirrored_tool)
for tool in user_config.tools or []:
base_tool = base_tools_dict.get(tool.name)
if not base_tool:
continue
log_method('info', "Translating tool '{tool}' to user config", context={'tool': tool.name})
trans_tool = Tool.from_tool(base_tool, name=tool.name, description=tool.description, enabled=tool.enabled)
mcp.add_tool(trans_tool)
if tool.name != base_tool.name:
mirrored_tool = base_tool.copy()
mirrored_tool.disable()
mcp.add_tool(mirrored_tool)
mcp_client = Client(mcp)
instance = cls(name=manifest.name, mcp_client=mcp_client, mcp=mcp, status=_status, config=validated_config_dict, manifest=manifest, transport=transport)
return instance
async def aclose(self):
if self._transport:
await self._transport.close()
await self.mcp_client.close()
@property
def status(self):
return self._status
async def list_tools(self):
return await self._mcp.get_tools()
def get_config_details(self) -> list[ConfigItemDetail]:
if not self.manifest or not self.manifest.config_schema:
return []
details = []
for schema_item in self.manifest.config_schema:
current_value = (self.config or {}).get(schema_item.name)
is_secret = getattr(schema_item, 'secret', False)
if is_secret and current_value is not None:
display_value = '********'
else:
display_value = current_value
item_detail = ConfigItemDetail(name=schema_item.name, type=schema_item.type, description=schema_item.description, required=schema_item.required, secret=is_secret, default=schema_item.default, value=display_value)
details.append(item_detail)
return details
|
class PluginInstance:
def __init__(self, name: str, mcp_client: Client, mcp: FastMCP, status: Literal['active', 'inactive', 'error'], manifest: 'PluginManifest', config: dict[str, Any] | None=None, transport=None):
pass
@classmethod
async def from_config(cls, manifest: 'PluginManifest', user_config: PluginUserConfig, log_method: Callable, middleware: Middleware | None=None) -> 'PluginInstance':
pass
async def aclose(self):
pass
@property
def status(self):
pass
async def list_tools(self):
pass
def get_config_details(self) -> list[ConfigItemDetail]:
pass
| 9
| 0
| 23
| 2
| 21
| 0
| 4
| 0
| 0
| 10
| 2
| 0
| 5
| 0
| 6
| 6
| 152
| 15
| 137
| 50
| 113
| 0
| 91
| 32
| 84
| 13
| 0
| 2
| 22
|
328,209
|
saya-ashen/Dingent
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/saya-ashen_Dingent/src/dingent/core/plugin_manager.py
|
dingent.core.plugin_manager.PluginManager
|
from pathlib import Path
from .types import ConfigItemDetail, ExecutionModel, PluginBase, PluginConfigSchema, PluginUserConfig, ToolResult
from .resource_manager import ResourceManager
import shutil
from dingent.core.log_manager import LogManager
class PluginManager:
plugins: dict[str, PluginManifest] = {}
def __init__(self, plugin_dir: Path, resource_manager: ResourceManager, log_manager: LogManager):
self.plugin_dir = plugin_dir
self.plugin_dir.mkdir(parents=True, exist_ok=True)
self.log_manager = log_manager
self.log_manager.log_with_context('info', 'PluginManager initialized with plugin directory: {dir}', context={'dir': str(self.plugin_dir)})
self._scan_and_register_plugins()
self.middleware = ResourceMiddleware(resource_manager, self.log_manager.log_with_context)
def _scan_and_register_plugins(self):
if not self.plugin_dir.is_dir():
self.log_manager.log_with_context('warning', "Plugin directory '{dir}' not found.", context={'dir': str(self.plugin_dir)})
return
for plugin_path in self.plugin_dir.iterdir():
if not plugin_path.is_dir():
self.log_manager.log_with_context('warning', "Skipping '{path}' as it is not a directory.", context={'path': str(plugin_path)})
continue
toml_path = plugin_path / 'plugin.toml'
if not toml_path.is_file():
self.log_manager.log_with_context('warning', "Skipping '{path}' as 'plugin.toml' is missing.", context={'path': str(plugin_path)})
continue
try:
plugin_manifest = PluginManifest.from_toml(toml_path)
self.plugins[plugin_manifest.id] = plugin_manifest
except Exception as e:
self.log_manager.log_with_context('error', "Failed to load plugin from '{path}': {error_msg}", context={'path': str(toml_path), 'error_msg': f'{e}'})
def list_plugins(self) -> dict[str, PluginManifest]:
return self.plugins
async def create_instance(self, instance_settings: PluginUserConfig):
plugin_id = instance_settings.plugin_id
if plugin_id not in self.plugins:
raise ValueError(f"Plugin '{plugin_id}' is not registered or failed to load.")
plugin_definition = self.plugins[plugin_id]
return await plugin_definition.create_instance(instance_settings, self.log_manager.log_with_context, self.middleware)
def get_plugin_manifest(self, plugin_id: str) -> PluginManifest | None:
return self.plugins.get(plugin_id)
def remove_plugin(self, plugin_id: str):
if plugin_id in self.plugins:
plugin = self.plugins[plugin_id]
plugin_path = plugin.path
shutil.rmtree(plugin_path)
self.log_manager.log_with_context('info', "Plugin '{plugin}' ({id}) removed.", context={'plugin': plugin.name, 'id': plugin_id})
del self.plugins[plugin_id]
else:
self.log_manager.log_with_context('warning', "Plugin with ID '{id}' not found in PluginManager.", context={'id': plugin_id})
def reload_plugins(self):
self.plugins.clear()
self._scan_and_register_plugins()
def get_installed_versions(self) -> dict[str, str]:
"""
Scans registered plugins and returns a dictionary of their IDs and versions.
Returns:
A dictionary mapping plugin_id to its version string.
Example: {"my-cool-plugin": "1.2.0", "another-plugin": "0.9.1"}
"""
if not self.plugins:
return {}
return {plugin_id: str(manifest.version) for plugin_id, manifest in self.plugins.items() if manifest.version is not None}
|
class PluginManager:
def __init__(self, plugin_dir: Path, resource_manager: ResourceManager, log_manager: LogManager):
pass
def _scan_and_register_plugins(self):
pass
def list_plugins(self) -> dict[str, PluginManifest]:
pass
async def create_instance(self, instance_settings: PluginUserConfig):
pass
def get_plugin_manifest(self, plugin_id: str) -> PluginManifest | None:
pass
def remove_plugin(self, plugin_id: str):
pass
def reload_plugins(self):
pass
def get_installed_versions(self) -> dict[str, str]:
'''
Scans registered plugins and returns a dictionary of their IDs and versions.
Returns:
A dictionary mapping plugin_id to its version string.
Example: {"my-cool-plugin": "1.2.0", "another-plugin": "0.9.1"}
'''
pass
| 9
| 1
| 8
| 0
| 7
| 1
| 2
| 0.13
| 0
| 10
| 5
| 0
| 8
| 3
| 8
| 8
| 74
| 11
| 56
| 21
| 47
| 7
| 51
| 20
| 42
| 6
| 0
| 2
| 16
|
328,210
|
saya-ashen/Dingent
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/saya-ashen_Dingent/src/dingent/core/plugin_manager.py
|
dingent.core.plugin_manager.PluginManifest
|
import toml
from fastmcp.server.middleware import Middleware, MiddlewareContext
from .types import ConfigItemDetail, ExecutionModel, PluginBase, PluginConfigSchema, PluginUserConfig, ToolResult
from pydantic import BaseModel, Field, PrivateAttr, SecretStr, ValidationError, create_model
from pathlib import Path
from collections.abc import Callable
class PluginManifest(PluginBase):
id: str = Field(default='no_name_plugin', description='插件唯一标识符')
spec_version: str | float = Field('2.0', description='插件规范版本 (遵循语义化版本)')
execution: ExecutionModel
dependencies: list[str] | None = None
python_version: str | None = None
config_schema: list[PluginConfigSchema] | None = None
_plugin_path: Path | None = PrivateAttr(default=None)
@classmethod
def from_toml(cls, toml_path: Path) -> 'PluginManifest':
if not toml_path.is_file():
raise FileNotFoundError(f"'plugin.toml' not found at '{toml_path}'")
plugin_dir = toml_path.parent
pyproject_toml_path = plugin_dir / 'pyproject.toml'
base_meta = {}
if pyproject_toml_path.is_file():
pyproject_data = toml.load(pyproject_toml_path)
project_section = pyproject_data.get('project', {})
valid_keys = cls.model_fields.keys()
base_meta = {k: v for k, v in project_section.items() if k in valid_keys}
plugin_info = toml.load(toml_path)
plugin_meta = plugin_info.get('plugin', {})
final_meta = base_meta | plugin_meta
manifest = cls(**final_meta)
manifest._plugin_path = plugin_dir
return manifest
@property
def path(self) -> Path:
if self._plugin_path is None:
raise AttributeError('Plugin path has not been set.')
return self._plugin_path
async def create_instance(self, user_config: PluginUserConfig, log_method: Callable, middleware: Middleware | None=None) -> 'PluginInstance':
if self.path is None:
raise ValueError('Plugin path is not set. Please set the path before creating an instance.')
return await PluginInstance.from_config(manifest=self, user_config=user_config, middleware=middleware, log_method=log_method)
|
class PluginManifest(PluginBase):
@classmethod
def from_toml(cls, toml_path: Path) -> 'PluginManifest':
pass
@property
def path(self) -> Path:
pass
async def create_instance(self, user_config: PluginUserConfig, log_method: Callable, middleware: Middleware | None=None) -> 'PluginInstance':
pass
| 6
| 0
| 13
| 1
| 12
| 0
| 2
| 0
| 1
| 7
| 2
| 0
| 2
| 0
| 3
| 86
| 52
| 7
| 45
| 26
| 34
| 0
| 33
| 19
| 29
| 3
| 6
| 1
| 7
|
328,211
|
saya-ashen/Dingent
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/saya-ashen_Dingent/src/dingent/core/plugin_manager.py
|
dingent.core.plugin_manager.ResourceMiddleware
|
import json
from fastmcp.server.middleware import Middleware, MiddlewareContext
from typing import Any, Literal
from mcp.types import TextContent
from .types import ConfigItemDetail, ExecutionModel, PluginBase, PluginConfigSchema, PluginUserConfig, ToolResult
from collections.abc import Callable
from fastmcp.tools.tool import ToolResult as FastMCPToolResult
from .resource_manager import ResourceManager
class ResourceMiddleware(Middleware):
"""
拦截工具调用结果,标准化为 ToolResult,并存储,仅向模型暴露最小必要文本。
"""
def __init__(self, resource_manager: ResourceManager, log_method: Callable):
super().__init__()
self.resource_manager = resource_manager
self.log_with_context = log_method
async def on_call_tool(self, context: MiddlewareContext, call_next):
result = await call_next(context)
assert context.fastmcp_context
raw_text = ''
if result.content and result.content[0].text:
raw_text = result.content[0].text
structured = result.structured_content
parsed_obj: Any = None
if structured and isinstance(structured, dict):
parsed_obj = structured
elif raw_text:
try:
parsed_obj = json.loads(raw_text)
except Exception:
parsed_obj = raw_text
else:
parsed_obj = raw_text
try:
tool_result = ToolResult.from_any(parsed_obj)
except Exception as e:
self.log_with_context('warning', 'Failed to parse tool result: {error_msg}', context={'error_msg': f'{e}'})
tool_result = ToolResult.from_any(raw_text)
artifact_id = self.resource_manager.register(tool_result)
minimal_struct = {'artifact_id': artifact_id, 'model_text': tool_result.model_text, 'version': tool_result.version}
result.structured_content = minimal_struct
if result.content:
result.content[0].text = tool_result.model_text
else:
result.content = [FastMCPToolResult(content=TextContent(type='text', text=tool_result.model_text))]
return result
|
class ResourceMiddleware(Middleware):
'''
拦截工具调用结果,标准化为 ToolResult,并存储,仅向模型暴露最小必要文本。
'''
def __init__(self, resource_manager: ResourceManager, log_method: Callable):
pass
async def on_call_tool(self, context: MiddlewareContext, call_next):
pass
| 3
| 1
| 30
| 5
| 21
| 4
| 4
| 0.26
| 1
| 6
| 1
| 0
| 2
| 2
| 2
| 2
| 65
| 12
| 42
| 13
| 39
| 11
| 33
| 12
| 30
| 7
| 1
| 3
| 8
|
328,212
|
saya-ashen/Dingent
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/saya-ashen_Dingent/src/dingent/core/resource_manager.py
|
dingent.core.resource_manager.ResourceManager
|
import pickle
from pathlib import Path
from .types import ToolResult
import uuid
from dingent.core.log_manager import LogManager
import sqlite3
class ResourceManager:
"""
使用 SQLite 存储 ToolResult(工具完整输出)的持久化资源管理器。
当达到最大容量时,会根据时间戳移除最旧的资源 (FIFO)。
"""
def __init__(self, log_manager: LogManager, store_path: str | Path, max_size: int=100):
self._log_manager = log_manager
if not isinstance(max_size, int) or max_size <= 0:
raise ValueError('max_size must be a positive integer.')
self.db_path = Path(store_path)
self.max_size = max_size
self.db_path.parent.mkdir(parents=True, exist_ok=True)
self._conn = sqlite3.connect(self.db_path, check_same_thread=False)
self._initialize_db()
self._log_manager.log_with_context('info', "SqliteResourceManager initialized with DB at '{db_path}' and a maximum capacity of {max_size} resources.", context={'db_path': self.db_path, 'max_size': self.max_size})
def _initialize_db(self):
"""Create the resources table if it doesn't exist."""
with self._conn:
self._conn.execute('\n CREATE TABLE IF NOT EXISTS resources (\n id TEXT PRIMARY KEY,\n data BLOB NOT NULL,\n created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP\n )\n ')
def register(self, resource: ToolResult) -> str:
"""
序列化并存储一个新的资源,如果超出容量,则删除最旧的资源。
"""
if len(self) >= self.max_size:
with self._conn:
cursor = self._conn.cursor()
cursor.execute('SELECT id FROM resources ORDER BY created_at ASC LIMIT 1')
oldest_id_tuple = cursor.fetchone()
if oldest_id_tuple:
oldest_id = oldest_id_tuple[0]
cursor.execute('DELETE FROM resources WHERE id = ?', (oldest_id,))
self._log_manager.log_with_context('warning', 'Capacity reached. Removed oldest resource.', context={'removed_resource_id': oldest_id})
new_id = str(uuid.uuid4())
serialized_resource = pickle.dumps(resource)
with self._conn:
self._conn.execute('INSERT INTO resources (id, data) VALUES (?, ?)', (new_id, serialized_resource))
current_size = len(self)
self._log_manager.log_with_context('info', 'ToolResult registered to SQLite', context={'resource_id': new_id, 'payload_count': len(resource.display), 'has_data': resource.data is not None, 'total_resources': current_size, 'capacity_used_percent': round(current_size / self.max_size * 100, 2)}, correlation_id=f'toolres_{new_id[:8]}')
return new_id
def get(self, resource_id: str) -> ToolResult | None:
"""通过 ID 从数据库中检索并反序列化资源。"""
cursor = self._conn.cursor()
cursor.execute('SELECT data FROM resources WHERE id = ?', (resource_id,))
row = cursor.fetchone()
if not row:
self._log_manager.log_with_context('warning', 'Resource not found in SQLite database.', context={'resource_id': resource_id})
return None
return pickle.loads(row[0])
def get_model_text(self, resource_id: str) -> str | None:
"""获取资源的 model_text 字段。"""
resource = self.get(resource_id)
return resource.model_text if resource else None
def clear(self) -> None:
"""从数据库中删除所有资源。"""
with self._conn:
self._conn.execute('DELETE FROM resources')
self._log_manager.log_with_context('info', 'All resources cleared from SQLite database.')
def close(self) -> None:
"""关闭数据库连接。"""
self._conn.close()
self._log_manager.log_with_context('info', 'SQLiteResourceManager database connection closed.')
def __len__(self) -> int:
"""返回数据库中当前存储的资源数量。"""
cursor = self._conn.cursor()
cursor.execute('SELECT COUNT(id) FROM resources')
return cursor.fetchone()[0]
def __repr__(self) -> str:
return f"<SqliteResourceManager(db='{self.db_path}', current_size={len(self)}, max_size={self.max_size})>"
|
class ResourceManager:
'''
使用 SQLite 存储 ToolResult(工具完整输出)的持久化资源管理器。
当达到最大容量时,会根据时间戳移除最旧的资源 (FIFO)。
'''
def __init__(self, log_manager: LogManager, store_path: str | Path, max_size: int=100):
pass
def _initialize_db(self):
'''Create the resources table if it doesn't exist.'''
pass
def register(self, resource: ToolResult) -> str:
'''
序列化并存储一个新的资源,如果超出容量,则删除最旧的资源。
'''
pass
def get(self, resource_id: str) -> ToolResult | None:
'''通过 ID 从数据库中检索并反序列化资源。'''
pass
def get_model_text(self, resource_id: str) -> str | None:
'''获取资源的 model_text 字段。'''
pass
def clear(self) -> None:
'''从数据库中删除所有资源。'''
pass
def close(self) -> None:
'''关闭数据库连接。'''
pass
def __len__(self) -> int:
'''返回数据库中当前存储的资源数量。'''
pass
def __repr__(self) -> str:
pass
| 10
| 8
| 11
| 1
| 8
| 2
| 2
| 0.29
| 0
| 6
| 2
| 0
| 9
| 4
| 9
| 9
| 114
| 17
| 76
| 24
| 66
| 22
| 55
| 24
| 45
| 3
| 0
| 3
| 14
|
328,213
|
saya-ashen/Dingent
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/saya-ashen_Dingent/src/dingent/core/secret_manager.py
|
dingent.core.secret_manager.SecretManager
|
import keyring.errors
import keyring
from cryptography.fernet import Fernet
from pathlib import Path
import json
class SecretManager:
"""
一个健壮的密钥管理器,自动处理 keyring 的可用性。
如果 keyring 可用,则使用它。
如果 keyring 不可用(例如在 WSL/Docker 中),则回退到一个本地加密文件。
"""
def __init__(self, project_root: Path):
self._keyring_available = self._check_keyring_availability()
self._fallback_key = None
self._fallback_secrets_path = None
if not self._keyring_available:
logger.warning('Keyring backend not available. Falling back to encrypted local file for secrets.')
fallback_dir = project_root / '.secrets'
self._initialize_fallback(fallback_dir)
def _check_keyring_availability(self) -> bool:
"""在启动时检查 keyring 后端是否可用。"""
try:
keyring.get_password(f'{KEYRING_SERVICE_NAME}-test', 'availability-check')
logger.info('Keyring backend is available.')
return True
except keyring.errors.NoKeyringError:
return False
def _initialize_fallback(self, fallback_dir: Path):
"""初始化基于加密文件的回退方案。"""
try:
fallback_dir.mkdir(parents=True, exist_ok=True)
fallback_dir.chmod(448)
master_key_path = fallback_dir / FALLBACK_MASTER_KEY_FILE
self._fallback_secrets_path = fallback_dir / FALLBACK_SECRETS_FILE
if not master_key_path.exists():
logger.info('Generating new master key for fallback secret storage.')
key = Fernet.generate_key()
master_key_path.write_bytes(key)
master_key_path.chmod(384)
self._fallback_key = master_key_path.read_bytes()
if not self._fallback_secrets_path.exists():
self._write_fallback_secrets({})
except Exception as e:
logger.error(f'Failed to initialize fallback secret storage: {e}', exc_info=True)
raise RuntimeError('Could not initialize fallback secret storage.') from e
def _read_fallback_secrets(self) -> dict:
"""读取并解密回退密钥文件。"""
fernet = Fernet(self._fallback_key)
encrypted_data = self._fallback_secrets_path.read_bytes()
decrypted_data = fernet.decrypt(encrypted_data)
return json.loads(decrypted_data.decode())
def _write_fallback_secrets(self, secrets: dict):
"""加密并写入回退密钥文件。"""
fernet = Fernet(self._fallback_key)
encrypted_data = fernet.encrypt(json.dumps(secrets).encode())
self._fallback_secrets_path.write_bytes(encrypted_data)
self._fallback_secrets_path.chmod(384)
def set_secret(self, key_path: str, value: str):
"""根据可用性,将密钥保存到 keyring 或回退文件。"""
if self._keyring_available:
keyring.set_password(KEYRING_SERVICE_NAME, key_path, value)
else:
secrets = self._read_fallback_secrets()
secrets[key_path] = value
self._write_fallback_secrets(secrets)
def get_secret(self, key_path: str) -> str | None:
"""根据可用性,从 keyring 或回退文件读取密钥。"""
if self._keyring_available:
return keyring.get_password(KEYRING_SERVICE_NAME, key_path)
else:
secrets = self._read_fallback_secrets()
return secrets.get(key_path)
|
class SecretManager:
'''
一个健壮的密钥管理器,自动处理 keyring 的可用性。
如果 keyring 可用,则使用它。
如果 keyring 不可用(例如在 WSL/Docker 中),则回退到一个本地加密文件。
'''
def __init__(self, project_root: Path):
pass
def _check_keyring_availability(self) -> bool:
'''在启动时检查 keyring 后端是否可用。'''
pass
def _initialize_fallback(self, fallback_dir: Path):
'''初始化基于加密文件的回退方案。'''
pass
def _read_fallback_secrets(self) -> dict:
'''读取并解密回退密钥文件。'''
pass
def _write_fallback_secrets(self, secrets: dict):
'''加密并写入回退密钥文件。'''
pass
def set_secret(self, key_path: str, value: str):
'''根据可用性,将密钥保存到 keyring 或回退文件。'''
pass
def get_secret(self, key_path: str) -> str | None:
'''根据可用性,从 keyring 或回退文件读取密钥。'''
pass
| 8
| 7
| 10
| 1
| 8
| 2
| 2
| 0.29
| 0
| 7
| 0
| 0
| 7
| 3
| 7
| 7
| 84
| 13
| 56
| 22
| 48
| 16
| 54
| 21
| 46
| 4
| 0
| 2
| 14
|
328,214
|
saya-ashen/Dingent
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/saya-ashen_Dingent/src/dingent/core/settings.py
|
dingent.core.settings.AppSettings
|
import tomlkit
from loguru import logger
from .types import AssistantBase, PluginUserConfig, Workflow
from pydantic import BaseModel, ConfigDict, Field, SecretStr
class AppSettings(BaseModel):
model_config = ConfigDict(env_prefix='DINGENT_', populate_by_name=True, extra='ignore')
assistants: list[AssistantSettings] = []
llm: LLMSettings = LLMSettings()
backend_port: int = 8000
frontend_port: int = 3000
workflows: list[Workflow] = Field(default_factory=list, description='All workflows cached in settings')
current_workflow: str | None = Field(None, description='ID of the current workflow')
def save(self):
source = TomlConfigSettingsSource(self.__class__)
if not source.toml_path:
logger.error('Cannot save: dingent.toml path not found.')
return
data = self.model_dump(mode='json', exclude_none=True)
data = {k: v for k, v in data.items() if k != 'assistants'}
if source.toml_path.is_file():
doc = tomlkit.parse(source.toml_path.read_text())
else:
doc = tomlkit.document()
doc.update(data)
source.toml_path.write_text(tomlkit.dumps(doc, sort_keys=True), 'utf-8')
logger.success(f'Global configuration saved to {source.toml_path}')
|
class AppSettings(BaseModel):
def save(self):
pass
| 2
| 0
| 14
| 0
| 14
| 0
| 3
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 1
| 83
| 23
| 1
| 22
| 12
| 20
| 0
| 21
| 12
| 19
| 3
| 5
| 1
| 3
|
328,215
|
saya-ashen/Dingent
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/saya-ashen_Dingent/src/dingent/core/settings.py
|
dingent.core.settings.AssistantSettings
|
from .types import AssistantBase, PluginUserConfig, Workflow
import uuid
from pydantic import BaseModel, ConfigDict, Field, SecretStr
class AssistantSettings(AssistantBase):
id: str = Field(default_factory=lambda: str(uuid.uuid4()), description='Unique identifier for the assistant.')
plugins: list[PluginUserConfig] = []
|
class AssistantSettings(AssistantBase):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 83
| 3
| 0
| 3
| 2
| 2
| 0
| 3
| 2
| 2
| 0
| 6
| 0
| 0
|
328,216
|
saya-ashen/Dingent
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/saya-ashen_Dingent/src/dingent/core/settings.py
|
dingent.core.settings.LLMSettings
|
from pydantic import BaseModel, ConfigDict, Field, SecretStr
class LLMSettings(BaseModel):
model: str = Field('gpt-4.1', description='LLM model name.')
provider: str | None = Field(None, description='Provider name.')
base_url: str | None = Field(None, description='Base URL.')
api_key: SecretStr | None = Field(None, description='API key for authentication.')
|
class LLMSettings(BaseModel):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 82
| 5
| 0
| 5
| 5
| 4
| 0
| 5
| 5
| 4
| 0
| 5
| 0
| 0
|
328,217
|
saya-ashen/Dingent
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/saya-ashen_Dingent/src/dingent/core/settings.py
|
dingent.core.settings.TomlConfigSettingsSource
|
from typing import Any
from pydantic.fields import FieldInfo
from loguru import logger
import tomlkit
from pathlib import Path
from pydantic_settings import BaseSettings, PydanticBaseSettingsSource
from .utils import find_project_root
class TomlConfigSettingsSource(PydanticBaseSettingsSource):
def __init__(self, settings_cls: type[BaseSettings]):
super().__init__(settings_cls)
self.project_root: Path | None = find_project_root()
if self.project_root:
self.toml_path = self.project_root / 'dingent.toml'
else:
self.toml_path = None
def get_field_value(self, field: FieldInfo, field_name: str) -> tuple[Any, str, bool]:
if not self.toml_path or not self.toml_path.is_file():
return (None, '', False)
file_content = tomlkit.parse(self.toml_path.read_text('utf-8'))
field_value = file_content.get(field_name)
return (field_value, field_name, True)
def __call__(self) -> dict[str, Any]:
if not self.toml_path or not self.toml_path.is_file():
logger.warning('dingent.toml not found. Skipping TomlConfigSettingsSource.')
return {}
logger.info(f'Loading global settings from: {self.toml_path}')
return tomlkit.loads(self.toml_path.read_text()).unwrap()
|
class TomlConfigSettingsSource(PydanticBaseSettingsSource):
def __init__(self, settings_cls: type[BaseSettings]):
pass
def get_field_value(self, field: FieldInfo, field_name: str) -> tuple[Any, str, bool]:
pass
def __call__(self) -> dict[str, Any]:
pass
| 4
| 0
| 6
| 0
| 6
| 0
| 2
| 0
| 1
| 9
| 0
| 0
| 3
| 2
| 3
| 3
| 22
| 2
| 20
| 8
| 16
| 0
| 19
| 8
| 15
| 2
| 1
| 1
| 6
|
328,218
|
saya-ashen/Dingent
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/saya-ashen_Dingent/src/dingent/core/types.py
|
dingent.core.types.AssistantBase
|
from typing import Any, Literal, TypeVar
from pydantic import BaseModel, Field, FilePath, model_validator
class AssistantBase(BaseModel):
id: str = Field(..., description='The unique and permanent ID for the assistant.')
name: str = Field(..., description='The display name of the assistant.')
description: str
version: str | float = Field('0.2.0', description='Assistant version.')
spec_version: str | float = Field('2.0', description='Specification version.')
enabled: bool = Field(True, description='Enable or disable the assistant.')
@model_validator(mode='before')
@classmethod
def _normalize_and_generate_id(cls, data: Any) -> Any:
if isinstance(data, dict):
source_for_display_name = data.get('display_name') or data.get('name')
if source_for_display_name:
data['name'] = source_for_display_name
if 'id' not in data and source_for_display_name:
data['id'] = generate_id_from_name(source_for_display_name)
return data
|
class AssistantBase(BaseModel):
@model_validator(mode='before')
@classmethod
def _normalize_and_generate_id(cls, data: Any) -> Any:
pass
| 4
| 0
| 15
| 3
| 8
| 4
| 4
| 0.24
| 1
| 2
| 0
| 3
| 0
| 0
| 1
| 83
| 25
| 4
| 17
| 8
| 13
| 4
| 15
| 7
| 13
| 4
| 5
| 2
| 4
|
328,219
|
saya-ashen/Dingent
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/saya-ashen_Dingent/src/dingent/core/types.py
|
dingent.core.types.AssistantCreate
|
class AssistantCreate(AssistantBase):
pass
|
class AssistantCreate(AssistantBase):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 83
| 2
| 0
| 2
| 1
| 1
| 0
| 2
| 1
| 1
| 0
| 6
| 0
| 0
|
328,220
|
saya-ashen/Dingent
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/saya-ashen_Dingent/src/dingent/core/types.py
|
dingent.core.types.AssistantUpdate
|
from pydantic import BaseModel, Field, FilePath, model_validator
class AssistantUpdate(BaseModel):
name: str | None = None
description: str | None = None
plugins: list[PluginUserConfig] | None = None
version: str | float | None = None
spec_version: str | float | None = None
enabled: bool | None = None
|
class AssistantUpdate(BaseModel):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 82
| 7
| 0
| 7
| 7
| 6
| 0
| 7
| 7
| 6
| 0
| 5
| 0
| 0
|
328,221
|
saya-ashen/Dingent
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/saya-ashen_Dingent/src/dingent/core/types.py
|
dingent.core.types.ConfigItemDetail
|
from typing import Any, Literal, TypeVar
from pydantic import BaseModel, Field, FilePath, model_validator
class ConfigItemDetail(PluginConfigSchema):
"""Represents a single configuration item with its schema and value."""
value: Any | None = Field(None, description='用户设置的当前值')
|
class ConfigItemDetail(PluginConfigSchema):
'''Represents a single configuration item with its schema and value.'''
pass
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 82
| 4
| 1
| 2
| 2
| 1
| 1
| 2
| 2
| 1
| 0
| 6
| 0
| 0
|
328,222
|
saya-ashen/Dingent
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/saya-ashen_Dingent/src/dingent/core/types.py
|
dingent.core.types.ExecutionModel
|
from pydantic import BaseModel, Field, FilePath, model_validator
from typing import Any, Literal, TypeVar
class ExecutionModel(BaseModel):
mode: Literal['local', 'remote'] = Field(..., description="运行模式: 'local' 或 'remote'")
url: str | None = None
script_path: str | None = Field(None, description='插件管理器需要运行的Python入口文件路径')
mcp_json_path: str | None = None
@model_validator(mode='after')
def check_exclusive_execution_mode(self) -> 'ExecutionModel':
return self
|
class ExecutionModel(BaseModel):
@model_validator(mode='after')
def check_exclusive_execution_mode(self) -> 'ExecutionModel':
pass
| 3
| 0
| 2
| 0
| 2
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 83
| 9
| 1
| 8
| 7
| 5
| 0
| 7
| 6
| 5
| 1
| 5
| 0
| 1
|
328,223
|
saya-ashen/Dingent
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/saya-ashen_Dingent/src/dingent/core/types.py
|
dingent.core.types.MarkdownPayload
|
from typing import Any, Literal, TypeVar
class MarkdownPayload(ToolDisplayPayloadBase):
type: Literal['markdown'] = 'markdown'
content: str
|
class MarkdownPayload(ToolDisplayPayloadBase):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 82
| 3
| 0
| 3
| 2
| 2
| 0
| 3
| 2
| 2
| 0
| 6
| 0
| 0
|
328,224
|
saya-ashen/Dingent
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/saya-ashen_Dingent/src/dingent/core/types.py
|
dingent.core.types.PluginBase
|
from typing import Any, Literal, TypeVar
from pydantic import BaseModel, Field, FilePath, model_validator
class PluginBase(BaseModel):
id: str = Field(..., description='插件的唯一永久ID')
name: str = Field(..., description='插件的显示名称')
description: str = Field(..., description='插件描述')
version: str | float = Field('0.1.0', description='插件版本')
@model_validator(mode='before')
@classmethod
def _normalize_and_generate_id(cls, data: Any) -> Any:
if isinstance(data, dict):
source_for_display_name = data.get('display_name') or data.get('name')
if source_for_display_name:
data['name'] = source_for_display_name
if 'id' not in data and source_for_display_name:
data['id'] = generate_id_from_name(source_for_display_name)
return data
|
class PluginBase(BaseModel):
@model_validator(mode='before')
@classmethod
def _normalize_and_generate_id(cls, data: Any) -> Any:
pass
| 4
| 0
| 15
| 3
| 8
| 4
| 4
| 0.33
| 1
| 2
| 0
| 1
| 0
| 0
| 1
| 83
| 26
| 6
| 15
| 7
| 11
| 5
| 13
| 6
| 11
| 4
| 5
| 2
| 4
|
328,225
|
saya-ashen/Dingent
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/saya-ashen_Dingent/src/dingent/core/types.py
|
dingent.core.types.PluginConfigSchema
|
from pydantic import BaseModel, Field, FilePath, model_validator
from typing import Any, Literal, TypeVar
class PluginConfigSchema(BaseModel):
name: str = Field(..., description='配置项的名称 (环境变量名)')
type: Literal['string', 'float', 'integer', 'bool'] = Field(..., description="配置项的期望类型 (e.g., 'string', 'number')")
required: bool = Field(..., description='是否为必需项')
secret: bool = Field(False, description='是否为敏感信息 (如 API Key)')
description: str | None = Field(None, description='该配置项的描述')
default: Any | None = Field(None, description='默认值 (如果存在)')
|
class PluginConfigSchema(BaseModel):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 82
| 7
| 0
| 7
| 7
| 6
| 0
| 7
| 7
| 6
| 0
| 5
| 0
| 0
|
328,226
|
saya-ashen/Dingent
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/saya-ashen_Dingent/src/dingent/core/types.py
|
dingent.core.types.PluginUserConfig
|
from pydantic import BaseModel, Field, FilePath, model_validator
class PluginUserConfig(BaseModel):
plugin_id: str
tools_default_enabled: bool = True
enabled: bool = True
tools: list[ToolOverrideConfig] | None = None
config: dict | None = None
|
class PluginUserConfig(BaseModel):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 82
| 6
| 0
| 6
| 5
| 5
| 0
| 6
| 5
| 5
| 0
| 5
| 0
| 0
|
328,227
|
saya-ashen/Dingent
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/saya-ashen_Dingent/src/dingent/core/types.py
|
dingent.core.types.TablePayload
|
from typing import Any, Literal, TypeVar
class TablePayload(ToolDisplayPayloadBase):
type: Literal['table'] = 'table'
columns: list[str]
rows: list[dict]
title: str = ''
|
class TablePayload(ToolDisplayPayloadBase):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 82
| 5
| 0
| 5
| 3
| 4
| 0
| 5
| 3
| 4
| 0
| 6
| 0
| 0
|
328,228
|
saya-ashen/Dingent
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/saya-ashen_Dingent/src/dingent/core/types.py
|
dingent.core.types.ToolConfigModel
|
from pydantic import BaseModel, Field, FilePath, model_validator
class ToolConfigModel(BaseModel):
schema_path: FilePath = Field(..., description='指向一个包含用户配置Pydantic类的Python文件')
|
class ToolConfigModel(BaseModel):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 82
| 2
| 0
| 2
| 2
| 1
| 0
| 2
| 2
| 1
| 0
| 5
| 0
| 0
|
328,229
|
saya-ashen/Dingent
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/saya-ashen_Dingent/src/dingent/core/types.py
|
dingent.core.types.ToolDisplayPayloadBase
|
from pydantic import BaseModel, Field, FilePath, model_validator
class ToolDisplayPayloadBase(BaseModel):
"""前端展示用的 Payload 基类"""
type: str
|
class ToolDisplayPayloadBase(BaseModel):
'''前端展示用的 Payload 基类'''
pass
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0
| 0
| 2
| 0
| 0
| 0
| 82
| 4
| 1
| 2
| 1
| 1
| 1
| 2
| 1
| 1
| 0
| 5
| 0
| 0
|
328,230
|
saya-ashen/Dingent
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/saya-ashen_Dingent/src/dingent/core/types.py
|
dingent.core.types.ToolOverrideConfig
|
from pydantic import BaseModel, Field, FilePath, model_validator
class ToolOverrideConfig(BaseModel):
name: str
enabled: bool = True
description: str | None = None
|
class ToolOverrideConfig(BaseModel):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 82
| 4
| 0
| 4
| 3
| 3
| 0
| 4
| 3
| 3
| 0
| 5
| 0
| 0
|
328,231
|
saya-ashen/Dingent
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/saya-ashen_Dingent/src/dingent/core/types.py
|
dingent.core.types.ToolResult
|
from pydantic import BaseModel, Field, FilePath, model_validator
from typing import Any, Literal, TypeVar
class ToolResult(BaseModel):
"""
标准化后的工具输出。
model_text: (必填)给模型使用的精简文本(总结 / 提炼 / 结构说明)
display: (选填)前端富展示用的 payload 数组
data: (选填)结构化原始数据(前端/工作流/自动化二次使用)
metadata: (选填)附加元信息(执行耗时、来源、版本等)
version: 协议/结构版本,方便将来升级
"""
version: str = '1.0'
model_text: str = Field(..., description='提供给 LLM 的简洁上下文文本')
display: list[ToolDisplayPayload] = Field(default_factory=list, description='前端展示用 payload 列表')
data: Any | None = Field(None, description='原始/结构化数据')
metadata: dict = Field(default_factory=dict, description='元信息')
@classmethod
def from_any(cls, obj: str | dict) -> 'ToolResult':
"""
松散输入转标准 ToolResult:
- str -> model_text + markdown display
- dict -> 解析 keys
- 已经是 ToolResult -> 原样返回
"""
if isinstance(obj, cls):
return obj
if isinstance(obj, str):
return cls(model_text=obj, display=[MarkdownPayload(content=obj)])
if isinstance(obj, dict):
model_text = obj.get('model_text') or obj.get('model') or obj.get('text')
if not isinstance(model_text, str):
return cls(model_text=str(obj), display=[MarkdownPayload(content=str(obj))])
raw_display = obj.get('display') or []
display_payloads: list[ToolDisplayPayload] = []
for p in raw_display:
if not isinstance(p, dict):
continue
p_type = p.get('type')
try:
if p_type == 'markdown':
display_payloads.append(MarkdownPayload(**p))
elif p_type == 'table':
display_payloads.append(TablePayload(**p))
else:
display_payloads.append(MarkdownPayload(content=f'[Unsupported payload type {p_type}] {p}'))
except Exception as _:
display_payloads.append(MarkdownPayload(content=f'[Invalid payload] {p}'))
data = obj.get('data')
metadata = obj.get('metadata') or obj.get('meta') or {}
if not model_text:
if display_payloads:
first = display_payloads[0]
if isinstance(first, MarkdownPayload):
model_text = first.content[:1000]
elif isinstance(first, TablePayload):
model_text = f"表格:{first.title or '未命名'} 行数:{len(first.rows)} 列:{','.join(first.columns)}"
elif data is not None:
model_text = f'返回数据 keys: {list(data.keys())[:10]}' if isinstance(data, dict) else '工具返回数据'
else:
model_text = ''
return cls(model_text=model_text, display=display_payloads, data=data, metadata=metadata)
return cls(model_text=str(obj), display=[MarkdownPayload(content=str(obj))])
|
class ToolResult(BaseModel):
'''
标准化后的工具输出。
model_text: (必填)给模型使用的精简文本(总结 / 提炼 / 结构说明)
display: (选填)前端富展示用的 payload 数组
data: (选填)结构化原始数据(前端/工作流/自动化二次使用)
metadata: (选填)附加元信息(执行耗时、来源、版本等)
version: 协议/结构版本,方便将来升级
'''
@classmethod
def from_any(cls, obj: str | dict) -> 'ToolResult':
'''
松散输入转标准 ToolResult:
- str -> model_text + markdown display
- dict -> 解析 keys
- 已经是 ToolResult -> 原样返回
'''
pass
| 3
| 2
| 66
| 6
| 50
| 10
| 16
| 0.32
| 1
| 6
| 2
| 0
| 0
| 0
| 1
| 83
| 84
| 9
| 57
| 17
| 54
| 18
| 40
| 15
| 38
| 16
| 5
| 4
| 16
|
328,232
|
saya-ashen/Dingent
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/saya-ashen_Dingent/src/dingent/core/types.py
|
dingent.core.types.Workflow
|
from pydantic import BaseModel, Field, FilePath, model_validator
class Workflow(WorkflowBase):
nodes: list[WorkflowNode] = Field(default_factory=list, description='Workflow nodes')
edges: list[WorkflowEdge] = Field(default_factory=list, description='Workflow edges')
created_at: str | None = Field(None, description='Creation timestamp')
updated_at: str | None = Field(None, description='Last update timestamp')
|
class Workflow(WorkflowBase):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 83
| 5
| 0
| 5
| 5
| 4
| 0
| 5
| 5
| 4
| 0
| 6
| 0
| 0
|
328,233
|
saya-ashen/Dingent
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/saya-ashen_Dingent/src/dingent/core/types.py
|
dingent.core.types.WorkflowBase
|
from pydantic import BaseModel, Field, FilePath, model_validator
from typing import Any, Literal, TypeVar
class WorkflowBase(BaseModel):
id: str = Field(..., description='The unique and permanent ID for the workflow')
name: str = Field(..., description='The display name for the workflow')
description: str | None = Field(None, description='A description of what the workflow does')
@model_validator(mode='before')
@classmethod
def _normalize_and_generate_id(cls, data: Any) -> Any:
if isinstance(data, dict):
source_for_display_name = data.get('display_name') or data.get('name')
if source_for_display_name:
data['name'] = source_for_display_name
if 'id' not in data and source_for_display_name:
data['id'] = generate_id_from_name(source_for_display_name)
return data
|
class WorkflowBase(BaseModel):
@model_validator(mode='before')
@classmethod
def _normalize_and_generate_id(cls, data: Any) -> Any:
pass
| 4
| 0
| 17
| 3
| 8
| 6
| 4
| 0.43
| 1
| 2
| 0
| 2
| 0
| 0
| 1
| 83
| 24
| 4
| 14
| 6
| 10
| 6
| 12
| 5
| 10
| 4
| 5
| 2
| 4
|
328,234
|
saya-ashen/Dingent
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/saya-ashen_Dingent/src/dingent/core/types.py
|
dingent.core.types.WorkflowCreate
|
class WorkflowCreate(WorkflowBase):
pass
|
class WorkflowCreate(WorkflowBase):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 83
| 2
| 0
| 2
| 1
| 1
| 0
| 2
| 1
| 1
| 0
| 6
| 0
| 0
|
328,235
|
saya-ashen/Dingent
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/saya-ashen_Dingent/src/dingent/core/types.py
|
dingent.core.types.WorkflowEdge
|
from pydantic import BaseModel, Field, FilePath, model_validator
class WorkflowEdge(BaseModel):
id: str = Field(..., description='Unique edge identifier')
source: str = Field(..., description='Source node ID')
target: str = Field(..., description='Target node ID')
sourceHandle: str | None = Field(None, description='Source handle ID')
targetHandle: str | None = Field(None, description='Target handle ID')
type: str | None = Field('default', description='Edge type')
data: WorkflowEdgeData | None = Field(None, description='Edge data')
|
class WorkflowEdge(BaseModel):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 82
| 8
| 0
| 8
| 7
| 7
| 0
| 8
| 7
| 7
| 0
| 5
| 0
| 0
|
328,236
|
saya-ashen/Dingent
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/saya-ashen_Dingent/src/dingent/core/types.py
|
dingent.core.types.WorkflowEdgeData
|
from pydantic import BaseModel, Field, FilePath, model_validator
from typing import Any, Literal, TypeVar
class WorkflowEdgeData(BaseModel):
mode: Literal['single', 'bidirectional'] = Field('single', description='Edge mode')
|
class WorkflowEdgeData(BaseModel):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 82
| 2
| 0
| 2
| 2
| 1
| 0
| 2
| 2
| 1
| 0
| 5
| 0
| 0
|
328,237
|
saya-ashen/Dingent
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/saya-ashen_Dingent/src/dingent/core/types.py
|
dingent.core.types.WorkflowNode
|
from pydantic import BaseModel, Field, FilePath, model_validator
from typing import Any, Literal, TypeVar
class WorkflowNode(BaseModel):
id: str = Field(..., description='Unique node identifier')
type: Literal['assistant'] = Field('assistant', description='Node type')
position: dict[str, float] = Field(..., description='Node position {x, y}')
data: WorkflowNodeData = Field(..., description='Node data')
|
class WorkflowNode(BaseModel):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 82
| 5
| 0
| 5
| 4
| 4
| 0
| 5
| 4
| 4
| 0
| 5
| 0
| 0
|
328,238
|
saya-ashen/Dingent
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/saya-ashen_Dingent/src/dingent/core/types.py
|
dingent.core.types.WorkflowNodeData
|
from pydantic import BaseModel, Field, FilePath, model_validator
class WorkflowNodeData(BaseModel):
assistantId: str = Field(..., description='Assistant ID referenced by this node')
assistantName: str = Field(..., description='Assistant name for display')
description: str | None = Field(None, description='Node description')
isStart: bool = Field(False, description='Is this the start node')
|
class WorkflowNodeData(BaseModel):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 82
| 5
| 0
| 5
| 5
| 4
| 0
| 5
| 5
| 4
| 0
| 5
| 0
| 0
|
328,239
|
saya-ashen/Dingent
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/saya-ashen_Dingent/src/dingent/core/types.py
|
dingent.core.types.WorkflowUpdate
|
from pydantic import BaseModel, Field, FilePath, model_validator
class WorkflowUpdate(BaseModel):
name: str | None = None
description: str | None = None
nodes: list[WorkflowNode] | None = None
edges: list[WorkflowEdge] | None = None
|
class WorkflowUpdate(BaseModel):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 82
| 5
| 0
| 5
| 5
| 4
| 0
| 5
| 5
| 4
| 0
| 5
| 0
| 0
|
328,240
|
saya-ashen/Dingent
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/saya-ashen_Dingent/src/dingent/core/workflow_manager.py
|
dingent.core.workflow_manager.WorkflowManager
|
from pydantic import ValidationError
from datetime import UTC, datetime
from dingent.core.config_manager import ConfigManager
from dingent.core.settings import AppSettings
from dingent.core.log_manager import LogManager
from pathlib import Path
import json
from threading import RLock
from dingent.core.types import Workflow, WorkflowCreate, WorkflowUpdate
class WorkflowManager:
"""
Manages workflow persistence (CRUD) + active workflow selection + (optional) runtime assistant instantiation.
Responsibilities:
- Load all workflow JSON definitions from config/workflows
- CRUD operations on workflows
- Track / persist current active workflow id via ConfigManager
- Provide runtime assistant instantiation helper (optional)
- Emit change events to registered callbacks for observability
"""
def __init__(self, config_manager: ConfigManager, log_manager: LogManager, assistant_manager: AssistantManager | None=None, workflows_dir: Path | None=None, auto_set_active_if_missing: bool=True):
self.config_manager = config_manager
self.assistant_manager = assistant_manager
self.log_manager = log_manager
self._dir = workflows_dir or config_manager.project_root / 'config' / 'workflows'
self._lock = RLock()
self._workflows: dict[str, Workflow] = {}
self._active_workflow_id: str | None = None
self._callbacks: list[WorkflowChangeCallback] = []
self._load_all_from_disk()
settings = self.config_manager.get_settings()
if settings.current_workflow and settings.current_workflow in self._workflows:
self._active_workflow_id = settings.current_workflow
elif auto_set_active_if_missing and self._workflows:
first_id = next(iter(self._workflows.keys()))
self._active_workflow_id = first_id
self._persist_active_workflow_id(first_id)
self.config_manager.register_on_change(self._on_config_change)
self.log_manager.log_with_context('info', 'WorkflowManager initialized.', context={'workflow_count': len(self._workflows), 'active_workflow_id': self._active_workflow_id, 'workflows_dir': str(self._dir)})
def list_workflows(self) -> list[Workflow]:
with self._lock:
return [wf.model_copy(deep=True) for wf in self._workflows.values()]
def get_workflow(self, workflow_id: str) -> Workflow | None:
with self._lock:
wf = self._workflows.get(workflow_id)
return wf.model_copy(deep=True) if wf else None
def get_workflow_id_by_name(self, name: str) -> str | None:
with self._lock:
for wf in self._workflows.values():
if wf.name == name:
return wf.id
return None
@property
def active_workflow_id(self) -> str | None:
with self._lock:
return self._active_workflow_id
def is_active(self, workflow_id: str) -> bool:
with self._lock:
return self._active_workflow_id == workflow_id
def create_workflow(self, wf_create: WorkflowCreate, *, make_active: bool=False, forbid_duplicate_name: bool=True) -> Workflow:
with self._lock:
if forbid_duplicate_name and any((wf.name == wf_create.name for wf in self._workflows.values())):
raise ValueError(f"Workflow name '{wf_create.name}' already exists.")
workflow_id = wf_create.id
now = datetime.now(UTC).isoformat()
wf = Workflow(id=workflow_id, name=wf_create.name, description=wf_create.description, nodes=[], edges=[], created_at=now, updated_at=now)
self._workflows[workflow_id] = wf
self._write_workflow_file(wf)
if make_active:
self._set_active_locked(workflow_id)
self._emit_change('created', workflow_id, wf)
return wf.model_copy(deep=True)
def update_workflow(self, workflow_id: str, wf_update: WorkflowUpdate) -> Workflow:
with self._lock:
existing = self._workflows.get(workflow_id)
if not existing:
raise ValueError(f"Workflow '{workflow_id}' not found.")
patch = wf_update.model_dump(exclude_unset=True)
if not patch:
return existing.model_copy(deep=True)
if 'name' in patch:
for wid, wf in self._workflows.items():
if wid != workflow_id and wf.name == patch['name']:
raise ValueError(f"Another workflow already uses name '{patch['name']}'.")
updated = existing.model_copy(update=patch)
updated.updated_at = datetime.now(UTC).isoformat()
try:
updated = Workflow.model_validate(updated.model_dump())
except ValidationError as e:
raise ValueError(f'Invalid workflow update: {e}') from e
self._workflows[workflow_id] = updated
self._write_workflow_file(updated)
self._emit_change('updated', workflow_id, updated)
return updated.model_copy(deep=True)
def save_workflow(self, workflow: Workflow) -> Workflow:
"""
Full replace save. Use update_workflow for partial patch.
"""
with self._lock:
if workflow.id not in self._workflows:
raise ValueError(f"Cannot save unknown workflow '{workflow.id}'.")
workflow.updated_at = datetime.now(UTC).isoformat()
wf_valid = Workflow.model_validate(workflow.model_dump())
self._workflows[wf_valid.id] = wf_valid
self._write_workflow_file(wf_valid)
self._emit_change('updated', wf_valid.id, wf_valid)
return wf_valid.model_copy(deep=True)
def delete_workflow(self, workflow_id: str) -> bool:
with self._lock:
if workflow_id not in self._workflows:
return False
wf = self._workflows.pop(workflow_id)
self._delete_workflow_file(workflow_id)
if self._active_workflow_id == workflow_id:
self._active_workflow_id = None
self._persist_active_workflow_id(None)
self._emit_change('deactivated', workflow_id, None)
self._emit_change('deleted', workflow_id, wf)
return True
def rename_workflow(self, workflow_id: str, new_name: str) -> Workflow:
return self.update_workflow(workflow_id, WorkflowUpdate(name=new_name))
def cleanup_workflows_for_deleted_assistant(self, assistant_id: str) -> list[str]:
"""
Clean up all workflows by removing nodes that reference the deleted assistant
and their connected edges. Returns list of workflow IDs that were modified.
"""
modified_workflow_ids = []
with self._lock:
for workflow_id, workflow in self._workflows.items():
nodes_to_remove = [node for node in workflow.nodes if node.data.assistantId == assistant_id]
if not nodes_to_remove:
continue
node_ids_to_remove = {node.id for node in nodes_to_remove}
updated_nodes = [node for node in workflow.nodes if node.data.assistantId != assistant_id]
updated_edges = [edge for edge in workflow.edges if edge.source not in node_ids_to_remove and edge.target not in node_ids_to_remove]
workflow.nodes = updated_nodes
workflow.edges = updated_edges
workflow.updated_at = datetime.utcnow().isoformat()
self._write_workflow_file(workflow)
self._emit_change('updated', workflow_id, workflow)
modified_workflow_ids.append(workflow_id)
return modified_workflow_ids
def _on_config_change(self, old_settings: AppSettings, new_settings: AppSettings) -> None:
"""Handle configuration changes, specifically assistant deletions."""
try:
old_assistant_ids = {assistant.id for assistant in old_settings.assistants}
new_assistant_ids = {assistant.id for assistant in new_settings.assistants}
deleted_assistant_ids = old_assistant_ids - new_assistant_ids
for assistant_id in deleted_assistant_ids:
modified_workflows = self.cleanup_workflows_for_deleted_assistant(assistant_id)
if modified_workflows:
self.log_manager.log_with_context('info', 'Cleaned up workflows after assistant deletion', context={'deleted_assistant_id': assistant_id, 'modified_workflow_ids': modified_workflows})
except Exception as e:
self.log_manager.log_with_context('error', 'Error handling config change for workflow cleanup', context={'error': str(e)})
def set_active(self, workflow_id: str) -> None:
with self._lock:
self._set_active_locked(workflow_id)
def clear_active(self) -> None:
with self._lock:
if self._active_workflow_id is not None:
old_id = self._active_workflow_id
self._active_workflow_id = None
self._persist_active_workflow_id(None)
self._emit_change('deactivated', old_id, None)
def _set_active_locked(self, workflow_id: str) -> None:
if workflow_id not in self._workflows:
raise ValueError(f"Workflow '{workflow_id}' not found.")
if self._active_workflow_id == workflow_id:
return
self._active_workflow_id = workflow_id
self._persist_active_workflow_id(workflow_id)
self._emit_change('activated', workflow_id, self._workflows[workflow_id])
def _persist_active_workflow_id(self, workflow_id: str | None) -> None:
try:
self.config_manager.update_global({'current_workflow': workflow_id})
except Exception as e:
self.log_manager.log_with_context('error', 'Failed to persist current_workflow in ConfigManager', context={'error': str(e)})
def export_snapshot(self) -> dict:
with self._lock:
return {'active_workflow_id': self._active_workflow_id, 'workflows': [wf.model_dump() for wf in self._workflows.values()]}
def import_snapshot(self, snapshot: dict, *, overwrite: bool=True, make_active_if_present: bool=True) -> None:
"""
snapshot format:
{
"active_workflow_id": "...",
"workflows": [ {workflow_dict} ... ]
}
"""
with self._lock:
wfs_data = snapshot.get('workflows', [])
loaded: dict[str, Workflow] = {}
for entry in wfs_data:
try:
wf = Workflow.model_validate(entry)
loaded[wf.id] = wf
except ValidationError as e:
self.log_manager.log_with_context('error', 'Skip invalid workflow in snapshot', context={'error': str(e), 'entry': entry})
if overwrite:
self._workflows.clear()
self._dir.mkdir(parents=True, exist_ok=True)
for f in self._dir.glob('*.json'):
try:
f.unlink()
except Exception:
pass
self._workflows.update(loaded)
for wf in loaded.values():
self._write_workflow_file(wf)
if make_active_if_present:
active_id = snapshot.get('active_workflow_id')
if active_id and active_id in self._workflows:
self._set_active_locked(active_id)
elif not self._active_workflow_id and self._workflows:
self._set_active_locked(next(iter(self._workflows.keys())))
self._emit_change('reloaded', '*', None)
def reload_from_disk(self) -> None:
with self._lock:
self._load_all_from_disk()
if self._active_workflow_id and self._active_workflow_id not in self._workflows:
old_id = self._active_workflow_id
self._active_workflow_id = None
self._persist_active_workflow_id(None)
self._emit_change('deactivated', old_id, None)
self._emit_change('reloaded', '*', None)
def build_adjacency(self, workflow_id: str, *, include_self_loops: bool=False, honor_bidirectional: bool=True) -> dict[str, list[str]]:
"""
Returns: assistantName -> sorted list of destination assistantNames
"""
with self._lock:
wf = self._workflows.get(workflow_id)
if not wf:
raise ValueError(f"Workflow '{workflow_id}' not found.")
node_id_to_assistant: dict[str, str] = {}
for node in wf.nodes:
node_id_to_assistant[node.id] = node.data.assistantName
adjacency: dict[str, set] = {aname: set() for aname in node_id_to_assistant.values()}
for edge in wf.edges:
src_a = node_id_to_assistant.get(edge.source)
tgt_a = node_id_to_assistant.get(edge.target)
if not src_a or not tgt_a:
continue
if include_self_loops or src_a != tgt_a:
adjacency.setdefault(src_a, set()).add(tgt_a)
if honor_bidirectional and getattr(edge, 'data', None):
mode = getattr(edge.data, 'mode', None)
if mode == 'bidirectional':
if include_self_loops or tgt_a != src_a:
adjacency.setdefault(tgt_a, set()).add(src_a)
return {k: sorted(v) for k, v in adjacency.items()}
async def instantiate_workflow_assistants(self, workflow_id: str, *, set_active: bool=True, reset_assistants: bool=True, include_self_loops: bool=False, honor_bidirectional: bool=True, mutate_assistant_destinations: bool=True) -> dict[str, Assistant]:
"""
Construct runtime assistant instances according to the workflow graph.
Args:
workflow_id: target workflow
set_active: mark as active workflow
reset_assistants: if True, calls assistant_manager.aclose() before building
include_self_loops: keep A->A edges
honor_bidirectional: expand bidirectional edges
mutate_assistant_destinations: if True, assigns computed destinations to assistant.destinations
"""
if not self.assistant_manager:
raise RuntimeError('assistant_manager is not attached to WorkflowManager.')
wf = self.get_workflow(workflow_id)
if not wf:
raise ValueError(f"Workflow '{workflow_id}' not found.")
adj = self.build_adjacency(workflow_id, include_self_loops=include_self_loops, honor_bidirectional=honor_bidirectional)
if reset_assistants:
await self.assistant_manager.aclose()
assistant_name_to_id: dict[str, str] = {}
for node in wf.nodes:
assistant_name_to_id[node.data.assistantName] = node.data.assistantId
result: dict[str, Assistant] = {}
for aname, aid in assistant_name_to_id.items():
try:
assistant = await self.assistant_manager.get_assistant(aid)
except ValueError as e:
self.log_manager.log_with_context('error', message='Failed to instantiate assistant for workflow: {assistant_id}', context={'assistant_id': aid, 'assistant_name': aname, 'error': str(e)})
continue
if mutate_assistant_destinations:
assistant.destinations = adj.get(aname, [])
result[aname] = assistant
if set_active:
self.set_active(workflow_id)
return result
def register_callback(self, cb: WorkflowChangeCallback) -> None:
with self._lock:
if cb not in self._callbacks:
self._callbacks.append(cb)
def unregister_callback(self, cb: WorkflowChangeCallback) -> None:
with self._lock:
if cb in self._callbacks:
self._callbacks.remove(cb)
def _load_all_from_disk(self) -> None:
self._workflows.clear()
if not self._dir.exists():
return
for file in self._dir.glob('*.json'):
try:
wf = self._load_single_file(file)
if wf:
self._workflows[wf.id] = wf
except Exception as e:
self.log_manager.log_with_context('error', 'Failed loading workflow file', context={'file': str(file), 'error': str(e)})
def _load_single_file(self, path: Path) -> Workflow | None:
with path.open('r', encoding='utf-8') as f:
raw = json.load(f)
try:
wf = Workflow.model_validate(raw)
return wf
except ValidationError as e:
self.log_manager.log_with_context('error', 'Invalid workflow file', context={'file': str(path), 'error': str(e)})
return None
def _write_workflow_file(self, wf: Workflow) -> None:
self._dir.mkdir(parents=True, exist_ok=True)
path = self._dir / f'{wf.id}.json'
tmp_path = path.with_suffix('.json.tmp')
data = wf.model_dump()
try:
tmp_path.write_text(json.dumps(data, ensure_ascii=False, indent=2), encoding='utf-8')
tmp_path.replace(path)
except Exception as e:
self.log_manager.log_with_context('error', 'Failed to write workflow file', context={'file': str(path), 'error': str(e)})
if tmp_path.exists():
try:
tmp_path.unlink()
except Exception:
pass
raise
def _delete_workflow_file(self, workflow_id: str) -> None:
path = self._dir / f'{workflow_id}.json'
if path.exists():
try:
path.unlink()
except Exception as e:
self.log_manager.log_with_context('error', 'Failed to delete workflow file', context={'file': str(path), 'error': str(e)})
def _emit_change(self, event: str, workflow_id: str, wf: Workflow | None) -> None:
for cb in list(self._callbacks):
try:
cb(event, workflow_id, wf)
except Exception as e:
self.log_manager.log_with_context('error', 'Workflow change callback error', context={'error': str(e), 'event': event, 'workflow_id': workflow_id})
|
class WorkflowManager:
'''
Manages workflow persistence (CRUD) + active workflow selection + (optional) runtime assistant instantiation.
Responsibilities:
- Load all workflow JSON definitions from config/workflows
- CRUD operations on workflows
- Track / persist current active workflow id via ConfigManager
- Provide runtime assistant instantiation helper (optional)
- Emit change events to registered callbacks for observability
'''
def __init__(self, config_manager: ConfigManager, log_manager: LogManager, assistant_manager: AssistantManager | None=None, workflows_dir: Path | None=None, auto_set_active_if_missing: bool=True):
pass
def list_workflows(self) -> list[Workflow]:
pass
def get_workflow(self, workflow_id: str) -> Workflow | None:
pass
def get_workflow_id_by_name(self, name: str) -> str | None:
pass
@property
def active_workflow_id(self) -> str | None:
pass
def is_active(self, workflow_id: str) -> bool:
pass
def create_workflow(self, wf_create: WorkflowCreate, *, make_active: bool=False, forbid_duplicate_name: bool=True) -> Workflow:
pass
def update_workflow(self, workflow_id: str, wf_update: WorkflowUpdate) -> Workflow:
pass
def save_workflow(self, workflow: Workflow) -> Workflow:
'''
Full replace save. Use update_workflow for partial patch.
'''
pass
def delete_workflow(self, workflow_id: str) -> bool:
pass
def rename_workflow(self, workflow_id: str, new_name: str) -> Workflow:
pass
def cleanup_workflows_for_deleted_assistant(self, assistant_id: str) -> list[str]:
'''
Clean up all workflows by removing nodes that reference the deleted assistant
and their connected edges. Returns list of workflow IDs that were modified.
'''
pass
def _on_config_change(self, old_settings: AppSettings, new_settings: AppSettings) -> None:
'''Handle configuration changes, specifically assistant deletions.'''
pass
def set_active(self, workflow_id: str) -> None:
pass
def clear_active(self) -> None:
pass
def _set_active_locked(self, workflow_id: str) -> None:
pass
def _persist_active_workflow_id(self, workflow_id: str | None) -> None:
pass
def export_snapshot(self) -> dict:
pass
def import_snapshot(self, snapshot: dict, *, overwrite: bool=True, make_active_if_present: bool=True) -> None:
'''
snapshot format:
{
"active_workflow_id": "...",
"workflows": [ {workflow_dict} ... ]
}
'''
pass
def reload_from_disk(self) -> None:
pass
def build_adjacency(self, workflow_id: str, *, include_self_loops: bool=False, honor_bidirectional: bool=True) -> dict[str, list[str]]:
'''
Returns: assistantName -> sorted list of destination assistantNames
'''
pass
async def instantiate_workflow_assistants(self, workflow_id: str, *, set_active: bool=True, reset_assistants: bool=True, include_self_loops: bool=False, honor_bidirectional: bool=True, mutate_assistant_destinations: bool=True) -> dict[str, Assistant]:
'''
Construct runtime assistant instances according to the workflow graph.
Args:
workflow_id: target workflow
set_active: mark as active workflow
reset_assistants: if True, calls assistant_manager.aclose() before building
include_self_loops: keep A->A edges
honor_bidirectional: expand bidirectional edges
mutate_assistant_destinations: if True, assigns computed destinations to assistant.destinations
'''
pass
def register_callback(self, cb: WorkflowChangeCallback) -> None:
pass
def unregister_callback(self, cb: WorkflowChangeCallback) -> None:
pass
def _load_all_from_disk(self) -> None:
pass
def _load_single_file(self, path: Path) -> Workflow | None:
pass
def _write_workflow_file(self, wf: Workflow) -> None:
pass
def _delete_workflow_file(self, workflow_id: str) -> None:
pass
def _emit_change(self, event: str, workflow_id: str, wf: Workflow | None) -> None:
pass
| 31
| 7
| 15
| 1
| 12
| 2
| 3
| 0.26
| 0
| 18
| 8
| 0
| 29
| 8
| 29
| 29
| 500
| 71
| 341
| 127
| 288
| 90
| 291
| 93
| 261
| 10
| 0
| 5
| 94
|
328,241
|
saya-ashen/Dingent
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/saya-ashen_Dingent/src/dingent/engine/graph.py
|
dingent.engine.graph.ConfigSchema
|
from typing import Annotated, Any, TypedDict
class ConfigSchema(TypedDict):
model_provider: str
model_name: str
default_route: str
|
class ConfigSchema(TypedDict):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4
| 0
| 4
| 1
| 3
| 0
| 4
| 1
| 3
| 0
| 1
| 0
| 0
|
328,242
|
saya-ashen/Dingent
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/saya-ashen_Dingent/src/dingent/engine/graph.py
|
dingent.engine.graph.MainState
|
from langgraph_swarm import SwarmState
from copilotkit import CopilotKitState
class MainState(CopilotKitState, SwarmState):
artifact_ids: list[str]
|
class MainState(CopilotKitState, SwarmState):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2
| 0
| 2
| 1
| 1
| 0
| 2
| 1
| 1
| 0
| 1
| 0
| 0
|
328,243
|
saya-ashen/Dingent
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/saya-ashen_Dingent/src/dingent/engine/graph_manager.py
|
dingent.engine.graph_manager.GraphCacheEntry
|
from langgraph.graph.state import CompiledStateGraph
from contextlib import AsyncExitStack
from dataclasses import dataclass
@dataclass
class GraphCacheEntry:
workflow_id: str
graph: CompiledStateGraph
stack: AsyncExitStack
checkpointer: object
default_active_agent: str | None
dirty: bool = False
building: bool = False
epoch: int = 0
|
@dataclass
class GraphCacheEntry:
pass
| 2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 9
| 0
| 9
| 4
| 8
| 0
| 9
| 4
| 8
| 0
| 0
| 0
| 0
|
328,244
|
saya-ashen/Dingent
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/saya-ashen_Dingent/src/dingent/engine/graph_manager.py
|
dingent.engine.graph_manager.GraphManager
|
from langgraph.graph import END, START, StateGraph
from collections.abc import Awaitable, Callable
from pathlib import Path
import asyncio
from langgraph.checkpoint.memory import InMemorySaver
from langgraph.graph.state import CompiledStateGraph
from contextlib import AsyncExitStack
from langgraph_swarm import create_swarm
from langgraph.checkpoint.sqlite.aio import AsyncSqliteSaver
from .graph import MainState, _normalize_name, create_assistant_graphs, get_safe_swarm
class GraphManager:
"""
多 workflow 图缓存管理:
- 按 workflow_id 缓存编译后的 LangGraph
- 懒加载与按需重建
- 配置/Workflow 事件触发 dirty 标记
"""
def __init__(self, app_context: AppContext):
"""
Initializes the GraphManager with an explicit AppContext.
"""
self._app_ctx = app_context
self._cache: dict[str, GraphCacheEntry] = {}
self._lock = asyncio.Lock()
self._pending_tasks: dict[str, asyncio.Task] = {}
self._global_epoch = 0
self._app_ctx.config_manager.register_on_change(self._on_config_change)
self._app_ctx.workflow_manager.register_callback(self._on_workflow_change)
self._rebuild_callbacks: list[Callable[[str, CompiledStateGraph], Awaitable[None]]] = []
self.log_manager = self._app_ctx.log_manager
def _checkpoint_path(self, workflow_id: str) -> Path:
project_root = self._app_ctx.project_root
assert project_root is not None, 'Project root must be set in AppContext.'
return project_root / CHECKPOINT_ROOT / f'{_slug(workflow_id)}.sqlite'
async def get_graph(self, workflow_id: str | None=None) -> CompiledStateGraph:
wid = workflow_id or self._resolve_active_workflow_id() or '__basic__'
entry = await self._ensure_entry(wid)
return entry.graph
async def rebuild_workflow(self, workflow_id: str) -> CompiledStateGraph:
entry = await self._rebuild_internal(workflow_id, reason='explicit_rebuild')
return entry.graph
async def invalidate_workflow(self, workflow_id: str):
async with self._lock:
if workflow_id in self._cache:
self._cache[workflow_id].dirty = True
async def invalidate_all(self):
async with self._lock:
for e in self._cache.values():
e.dirty = True
def request_rebuild(self, workflow_id: str, debounce: float=0.4):
if workflow_id in self._pending_tasks:
return
async def _job():
try:
await asyncio.sleep(debounce)
await self.rebuild_workflow(workflow_id)
finally:
self._pending_tasks.pop(workflow_id, None)
self._pending_tasks[workflow_id] = asyncio.create_task(_job())
async def close_workflow(self, workflow_id: str):
async with self._lock:
entry = self._cache.pop(workflow_id, None)
if entry:
try:
await entry.stack.aclose()
except Exception as e:
self.log_manager.log_with_context('warning', 'Close workflow graph failed: {err}', context={'err': str(e), 'wf': workflow_id})
async def close_all(self):
async with self._lock:
cache = self._cache
self._cache = {}
for entry in cache.values():
try:
await entry.stack.aclose()
except Exception as e:
self.log_manager.log_with_context('warning', 'Close workflow graph failed: {err}', context={'err': str(e), 'wf': entry.workflow_id})
def register_rebuild_callback(self, callback: Callable[[str, CompiledStateGraph], Awaitable[None]]):
"""
Registers an asynchronous callback to be executed after a graph is successfully rebuilt.
The callback will receive the workflow_id and the new graph instance.
"""
self._rebuild_callbacks.append(callback)
def _resolve_active_workflow_id(self) -> str | None:
return self._app_ctx.workflow_manager.active_workflow_id
async def _ensure_entry(self, workflow_id: str) -> GraphCacheEntry:
async with self._lock:
entry = self._cache.get(workflow_id)
if entry and (not entry.dirty):
return entry
return await self._rebuild_internal(workflow_id, reason='ensure')
async def _rebuild_internal(self, workflow_id: str, reason: str) -> GraphCacheEntry:
async with self._lock:
entry = self._cache.get(workflow_id)
if entry and entry.building:
while entry.building:
await asyncio.sleep(0.05)
if not entry.dirty:
return entry
if not entry:
entry = GraphCacheEntry(workflow_id=workflow_id, graph=None, stack=AsyncExitStack(), checkpointer=None, default_active_agent=None)
self._cache[workflow_id] = entry
entry.building = True
try:
if entry.graph is not None:
try:
await entry.stack.aclose()
except Exception as e:
self.log_manager.log_with_context('warning', 'Close old stack failed: {err}', context={'err': str(e), 'wf': workflow_id})
stack = AsyncExitStack()
if workflow_id == '__basic__':
llm = self._app_ctx.llm_manager.get_llm(**self._app_ctx.config_manager.get_settings().llm.model_dump())
graph = StateGraph(MainState)
def basic_chatbot(state: MainState):
return {'messages': [llm.invoke(state['messages'])]}
graph.add_node('basic_chatbot', basic_chatbot)
graph.add_edge(START, 'basic_chatbot')
graph.add_edge('basic_chatbot', END)
saver = InMemorySaver()
compiled = graph.compile(saver)
compiled.name = 'agent'
new_entry = GraphCacheEntry(workflow_id=workflow_id, graph=compiled, stack=stack, checkpointer=saver, default_active_agent=None, dirty=False, epoch=self._global_epoch)
async with self._lock:
self._cache[workflow_id] = new_entry
self.log_manager.log_with_context('info', 'Built basic fallback graph.', context={'workflow_id': workflow_id, 'reason': reason})
return new_entry
workflow = self._app_ctx.workflow_manager.get_workflow(workflow_id)
if not workflow:
return await self._rebuild_internal('__basic__', reason='missing_workflow')
start_node = next((n for n in workflow.nodes if n.data.isStart), None)
if not start_node:
self.log_manager.log_with_context('warning', 'Workflow missing start node; fallback basic.', context={'workflow_id': workflow_id})
return await self._rebuild_internal('__basic__', reason='no_start')
default_active_agent = _normalize_name(start_node.data.assistantName)
llm = self._app_ctx.llm_manager.get_llm(**self._app_ctx.config_manager.get_settings().llm.model_dump())
assistants_ctx = create_assistant_graphs(self._app_ctx.workflow_manager, workflow_id, llm, self.log_manager.log_with_context)
assistants = await stack.enter_async_context(assistants_ctx)
swarm = create_swarm(agents=list(assistants.values()), state_schema=MainState, default_active_agent=default_active_agent, context_schema=dict)
compiled_swarm = swarm.compile()
safe_swarm = get_safe_swarm(compiled_swarm, self.log_manager.log_with_context)
outer = StateGraph(MainState)
outer.add_node('swarm', safe_swarm)
outer.add_edge(START, 'swarm')
outer.add_edge('swarm', END)
cp_path = self._checkpoint_path(workflow_id)
cp_path.parent.mkdir(parents=True, exist_ok=True)
checkpointer = await stack.enter_async_context(AsyncSqliteSaver.from_conn_string(cp_path.as_posix()))
compiled_graph = outer.compile(checkpointer)
compiled_graph.name = 'agent'
new_entry = GraphCacheEntry(workflow_id=workflow_id, graph=compiled_graph, stack=stack, checkpointer=checkpointer, default_active_agent=default_active_agent, dirty=False, epoch=self._global_epoch)
async with self._lock:
self._cache[workflow_id] = new_entry
self.log_manager.log_with_context('info', 'Workflow graph built', context={'workflow_id': workflow_id, 'default_agent': default_active_agent, 'reason': reason})
if self._rebuild_callbacks:
self.log_manager.log_with_context('info', 'Triggering rebuild callbacks for workflow: {wf_id}', context={'wf_id': workflow_id})
for callback in self._rebuild_callbacks:
asyncio.create_task(callback(workflow_id, new_entry.graph))
return new_entry
finally:
async with self._lock:
self._cache[workflow_id].building = False
def _on_config_change(self, old_settings, new_settings):
try:
for e in self._cache.values():
e.dirty = True
self._global_epoch += 1
self.log_manager.log_with_context('info', 'Config changed -> all graphs dirty.', context={'epoch': self._global_epoch})
active_wid = self._resolve_active_workflow_id()
if active_wid and active_wid in self._cache:
self.log_manager.log_with_context('info', 'Proactively rebuilding active workflow due to global config change.', context={'workflow_id': active_wid})
self.request_rebuild(active_wid, debounce=0.2)
except Exception as e:
self.log_manager.log_with_context('error', 'Config change hook error: {err}', context={'err': str(e)})
def _on_workflow_change(self, event: str, workflow_id: str, _wf):
try:
self.log_manager.log_with_context('info', 'Workflow event: {event}', context={'event': event, 'wf': workflow_id})
if event == 'deleted':
asyncio.create_task(self.close_workflow(workflow_id))
elif event in ('updated',):
asyncio.create_task(self.invalidate_workflow(workflow_id))
if workflow_id == self._resolve_active_workflow_id():
self.log_manager.log_with_context('info', 'Proactively rebuilding active workflow because it was updated.', context={'workflow_id': workflow_id})
self.request_rebuild(workflow_id, debounce=0.2)
elif event == 'activated':
self.request_rebuild(workflow_id, debounce=0.05)
except Exception as e:
self.log_manager.log_with_context('error', 'Workflow change hook error: {err}', context={'err': str(e), 'event': event, 'wf': workflow_id})
|
class GraphManager:
'''
多 workflow 图缓存管理:
- 按 workflow_id 缓存编译后的 LangGraph
- 懒加载与按需重建
- 配置/Workflow 事件触发 dirty 标记
'''
def __init__(self, app_context: AppContext):
'''
Initializes the GraphManager with an explicit AppContext.
'''
pass
def _checkpoint_path(self, workflow_id: str) -> Path:
pass
async def get_graph(self, workflow_id: str | None=None) -> CompiledStateGraph:
pass
async def rebuild_workflow(self, workflow_id: str) -> CompiledStateGraph:
pass
async def invalidate_workflow(self, workflow_id: str):
pass
async def invalidate_all(self):
pass
def request_rebuild(self, workflow_id: str, debounce: float=0.4):
pass
async def _job():
pass
async def close_workflow(self, workflow_id: str):
pass
async def close_all(self):
pass
def register_rebuild_callback(self, callback: Callable[[str, CompiledStateGraph], Awaitable[None]]):
'''
Registers an asynchronous callback to be executed after a graph is successfully rebuilt.
The callback will receive the workflow_id and the new graph instance.
'''
pass
def _resolve_active_workflow_id(self) -> str | None:
pass
async def _ensure_entry(self, workflow_id: str) -> GraphCacheEntry:
pass
async def _rebuild_internal(self, workflow_id: str, reason: str) -> GraphCacheEntry:
pass
def basic_chatbot(state: MainState):
pass
def _on_config_change(self, old_settings, new_settings):
pass
def _on_workflow_change(self, event: str, workflow_id: str, _wf):
pass
| 18
| 3
| 13
| 1
| 12
| 1
| 3
| 0.12
| 0
| 11
| 2
| 0
| 15
| 7
| 15
| 15
| 248
| 33
| 194
| 60
| 176
| 24
| 159
| 56
| 141
| 12
| 0
| 3
| 44
|
328,245
|
saya-ashen/Dingent
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/saya-ashen_Dingent/src/dingent/engine/simple_react_agent.py
|
dingent.engine.simple_react_agent.SimpleAgentState
|
from langchain_core.messages import AIMessage, BaseMessage, SystemMessage, ToolMessage
import operator
from typing import Annotated, Any, TypedDict
class SimpleAgentState(TypedDict, total=False):
messages: Annotated[list[BaseMessage], operator.add]
iteration: int
artifact_ids: list[str] = []
|
class SimpleAgentState(TypedDict, total=False):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0.25
| 2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4
| 0
| 4
| 2
| 3
| 1
| 4
| 2
| 3
| 0
| 1
| 0
| 0
|
328,246
|
saya-ashen/Dingent
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/saya-ashen_Dingent/src/dingent/server/api/schemas.py
|
dingent.server.api.schemas.AddPluginRequest
|
from typing import Any
from pydantic import BaseModel, Field
class AddPluginRequest(BaseModel):
plugin_id: str
config: dict[str, Any] | None = None
enabled: bool = True
tools_default_enabled: bool = True
|
class AddPluginRequest(BaseModel):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 82
| 5
| 0
| 5
| 4
| 4
| 0
| 5
| 4
| 4
| 0
| 5
| 0
| 0
|
328,247
|
saya-ashen/Dingent
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/saya-ashen_Dingent/src/dingent/server/api/schemas.py
|
dingent.server.api.schemas.AppAdminDetail
|
from typing import Any
from pydantic import BaseModel, Field
class AppAdminDetail(BaseModel):
current_workflow: str | None = None
workflows: list[dict[str, str]] = Field(default_factory=list)
llm: dict[str, Any]
|
class AppAdminDetail(BaseModel):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 82
| 4
| 0
| 4
| 3
| 3
| 0
| 4
| 3
| 3
| 0
| 5
| 0
| 0
|
328,248
|
saya-ashen/Dingent
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/saya-ashen_Dingent/src/dingent/server/api/schemas.py
|
dingent.server.api.schemas.AssistantAdminDetail
|
from dingent.core.types import AssistantBase, AssistantCreate, AssistantUpdate, ConfigItemDetail, PluginUserConfig
from pydantic import BaseModel, Field
class AssistantAdminDetail(AssistantBase):
id: str
status: str = Field(..., description='运行状态 (active/inactive/error)')
plugins: list[PluginAdminDetail]
|
class AssistantAdminDetail(AssistantBase):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 83
| 4
| 0
| 4
| 2
| 3
| 0
| 4
| 2
| 3
| 0
| 6
| 0
| 0
|
328,249
|
saya-ashen/Dingent
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/saya-ashen_Dingent/src/dingent/server/api/schemas.py
|
dingent.server.api.schemas.AssistantCreateRequest
|
from dingent.core.types import AssistantBase, AssistantCreate, AssistantUpdate, ConfigItemDetail, PluginUserConfig
class AssistantCreateRequest(AssistantCreate):
pass
|
class AssistantCreateRequest(AssistantCreate):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 83
| 2
| 0
| 2
| 1
| 1
| 0
| 2
| 1
| 1
| 0
| 7
| 0
| 0
|
328,250
|
saya-ashen/Dingent
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/saya-ashen_Dingent/src/dingent/server/api/schemas.py
|
dingent.server.api.schemas.AssistantUpdateRequest
|
from dingent.core.types import AssistantBase, AssistantCreate, AssistantUpdate, ConfigItemDetail, PluginUserConfig
class AssistantUpdateRequest(AssistantUpdate):
pass
|
class AssistantUpdateRequest(AssistantUpdate):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 82
| 2
| 0
| 2
| 1
| 1
| 0
| 2
| 1
| 1
| 0
| 6
| 0
| 0
|
328,251
|
saya-ashen/Dingent
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/saya-ashen_Dingent/src/dingent/server/api/schemas.py
|
dingent.server.api.schemas.AssistantsBulkReplaceRequest
|
from dingent.core.types import AssistantBase, AssistantCreate, AssistantUpdate, ConfigItemDetail, PluginUserConfig
from pydantic import BaseModel, Field
class AssistantsBulkReplaceRequest(BaseModel):
assistants: list[AssistantCreate | AssistantUpdate | dict]
|
class AssistantsBulkReplaceRequest(BaseModel):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 82
| 2
| 0
| 2
| 1
| 1
| 0
| 2
| 1
| 1
| 0
| 5
| 0
| 0
|
328,252
|
saya-ashen/Dingent
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/saya-ashen_Dingent/src/dingent/server/api/schemas.py
|
dingent.server.api.schemas.MarketDownloadRequest
|
from pydantic import BaseModel, Field
class MarketDownloadRequest(BaseModel):
item_id: str
category: str
|
class MarketDownloadRequest(BaseModel):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0.33
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 82
| 3
| 0
| 3
| 1
| 2
| 1
| 3
| 1
| 2
| 0
| 5
| 0
| 0
|
328,253
|
saya-ashen/Dingent
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/saya-ashen_Dingent/src/dingent/server/api/schemas.py
|
dingent.server.api.schemas.MarketDownloadResponse
|
from pydantic import BaseModel, Field
class MarketDownloadResponse(BaseModel):
success: bool
message: str
installed_path: str | None = None
|
class MarketDownloadResponse(BaseModel):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 82
| 4
| 0
| 4
| 2
| 3
| 0
| 4
| 2
| 3
| 0
| 5
| 0
| 0
|
328,254
|
saya-ashen/Dingent
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/saya-ashen_Dingent/src/dingent/server/api/schemas.py
|
dingent.server.api.schemas.PluginAdminDetail
|
from dingent.core.types import AssistantBase, AssistantCreate, AssistantUpdate, ConfigItemDetail, PluginUserConfig
from pydantic import BaseModel, Field
class PluginAdminDetail(PluginUserConfig):
name: str = Field(..., description='插件名称')
tools: list[ToolAdminDetail] = Field(default_factory=list, description='该插件的工具列表')
status: str = Field(..., description='运行状态 (active/inactive/error)')
config: list[ConfigItemDetail] = Field(default_factory=list)
|
class PluginAdminDetail(PluginUserConfig):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 82
| 5
| 0
| 5
| 5
| 4
| 0
| 5
| 5
| 4
| 0
| 6
| 0
| 0
|
328,255
|
saya-ashen/Dingent
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/saya-ashen_Dingent/src/dingent/server/api/schemas.py
|
dingent.server.api.schemas.ReplacePluginsRequest
|
from pydantic import BaseModel, Field
from dingent.core.types import AssistantBase, AssistantCreate, AssistantUpdate, ConfigItemDetail, PluginUserConfig
class ReplacePluginsRequest(BaseModel):
plugins: list[PluginUserConfig]
|
class ReplacePluginsRequest(BaseModel):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 82
| 2
| 0
| 2
| 1
| 1
| 0
| 2
| 1
| 1
| 0
| 5
| 0
| 0
|
328,256
|
saya-ashen/Dingent
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/saya-ashen_Dingent/src/dingent/server/api/schemas.py
|
dingent.server.api.schemas.SetActiveWorkflowRequest
|
from pydantic import BaseModel, Field
class SetActiveWorkflowRequest(BaseModel):
workflow_id: str
|
class SetActiveWorkflowRequest(BaseModel):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 82
| 2
| 0
| 2
| 1
| 1
| 0
| 2
| 1
| 1
| 0
| 5
| 0
| 0
|
328,257
|
saya-ashen/Dingent
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/saya-ashen_Dingent/src/dingent/server/api/schemas.py
|
dingent.server.api.schemas.ToolAdminDetail
|
from pydantic import BaseModel, Field
class ToolAdminDetail(BaseModel):
name: str
description: str
enabled: bool
|
class ToolAdminDetail(BaseModel):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 82
| 4
| 0
| 4
| 1
| 3
| 0
| 4
| 1
| 3
| 0
| 5
| 0
| 0
|
328,258
|
saya-ashen/Dingent
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/saya-ashen_Dingent/src/dingent/server/api/schemas.py
|
dingent.server.api.schemas.UpdatePluginConfigRequest
|
from pydantic import BaseModel, Field
from typing import Any
class UpdatePluginConfigRequest(BaseModel):
config: dict[str, Any] | None = None
enabled: bool | None = None
tools_default_enabled: bool | None = None
tools: list[str] | None = None
|
class UpdatePluginConfigRequest(BaseModel):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 82
| 5
| 0
| 5
| 5
| 4
| 0
| 5
| 5
| 4
| 0
| 5
| 0
| 0
|
328,259
|
saya-ashen/Dingent
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/saya-ashen_Dingent/templates/basic/{{ cookiecutter.project_slug }}/plugins/text2sql/database.py
|
text2sql.database.DBManager
|
from loguru import logger
from pathlib import Path
from .settings import DatabaseSettings
class DBManager:
"""
A class to manage and maintain all database instances.
It creates and caches database connection instances on demand based on the configuration file.
"""
def __init__(self, db_configs: list[DatabaseSettings]):
"""
Initializes the database manager with a list of database configurations.
"""
self._configs: dict[str, DatabaseSettings] = {config.name: config for config in db_configs}
self._connections: dict[str, Database] = {}
logger.info(f'DBManager initialized with {len(self._configs)} databases.')
async def get_connection(self, name: str) -> Database | None:
"""
Get a database instance by its name.
If there is an instance cached, return it directly.
"""
if name in self._connections:
logger.debug(f'Retrieving cached database connection: {name}')
return self._connections[name]
if name not in self._configs:
logger.error(f"Database '{name}' not found in configuration.")
raise ValueError(f"Database '{name}' not found in configuration.")
logger.debug(f"Creating a new connection for database '{name}'...")
config = self._configs[name]
schemas_relative_path = config.schemas_file
try:
if schemas_relative_path:
schemas_path = await Path(schemas_relative_path).resolve()
instance = Database(db_name=config.name, uri=config.uri, schemas_path=str(schemas_path))
else:
instance = Database(db_name=config.name, uri=config.uri)
self._connections[name] = instance
logger.info(f"Database connection '{name}' created and cached.")
return instance
except Exception as e:
logger.error(f"Failed to create database connection '{name}': {e}")
return None
def list_available_databases(self) -> list[str]:
return list(self._configs.keys())
|
class DBManager:
'''
A class to manage and maintain all database instances.
It creates and caches database connection instances on demand based on the configuration file.
'''
def __init__(self, db_configs: list[DatabaseSettings]):
'''
Initializes the database manager with a list of database configurations.
'''
pass
async def get_connection(self, name: str) -> Database | None:
'''
Get a database instance by its name.
If there is an instance cached, return it directly.
'''
pass
def list_available_databases(self) -> list[str]:
pass
| 4
| 3
| 13
| 1
| 9
| 2
| 2
| 0.38
| 0
| 8
| 2
| 0
| 3
| 2
| 3
| 3
| 46
| 6
| 29
| 11
| 25
| 11
| 28
| 10
| 24
| 5
| 0
| 2
| 7
|
328,260
|
saya-ashen/Dingent
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/saya-ashen_Dingent/templates/basic/{{ cookiecutter.project_slug }}/plugins/text2sql/database.py
|
text2sql.database.Database
|
import pandas as pd
from sqlmodel import Session, SQLModel, create_engine, text
from loguru import logger
from sqlalchemy import inspect as table_inspect
from sqlalchemy.engine.url import make_url
class Database:
def __init__(self, uri: str, name: str, schemas_path: str | None=None, dialect: str | None=None, **kwargs):
self.uri = uri
self.db_name = name
self.summarizer = self._get_summarizer(schemas_path)
if schemas_path:
self._tables = self._get_tables(schemas_path)
else:
self._tables = []
self.db = create_engine(uri)
url_object = make_url(uri)
self.dialect = dialect or url_object.get_dialect().name
@property
def tables(self) -> list[type[SQLModel]]:
return getattr(self, '_tables', [])
def run(self, query: str):
with Session(self.db) as session:
statement = text(query)
results = session.exec(statement).all()
df = pd.DataFrame(results, dtype=object)
return {'data': df, 'metadata': {}}
def _get_tables(self, schemas_path) -> list[type[SQLModel]]:
all_tables: list[type[SQLModel]] = find_definitions_from_file(schemas_path, base_class=SQLModel)
for table in all_tables:
try:
table_inspect(table)
except Exception as e:
raise e
return all_tables
def _get_summarizer(self, schemas_path):
def default_summarizer(data: dict[str, list[dict]]) -> str:
summary = ''
for table_name, instances in data.items():
if not instances:
continue
instance_10 = instances[:10]
summary += f'Table: {table_name}\n'
summary += f"The first 10 records retrieved: {', '.join((str(instance) for instance in instance_10))}\n"
return summary
if not schemas_path:
return default_summarizer
try:
summarizer = find_definitions_from_file(schemas_path, target_name='summarize_data')[0]
except IndexError:
logger.warning("function 'summarize_data' not found。Use default summarizer instead.")
return default_summarizer
assert callable(summarizer), f'Summarizer in {self.db_name} module is not callable'
return summarizer
def _describe(self, model: type[SQLModel]):
info = model.model_json_schema()
description = {}
description['description'] = model.__table__.info.get('description', '')
description['columns'] = {}
for key, value in info['properties'].items():
column_desc = value.get('description')
if not column_desc:
continue
description['columns'][key] = {}
is_enum_field, possible_values = is_enum_field_flexible(model, key)
if column_desc:
description['columns'][key]['description'] = column_desc
if is_enum_field:
description['columns'][key]['type'] = 'enum'
description['columns'][key]['possible_values'] = possible_values
return description
def get_tables_info(self):
if not self.tables:
return
tables_info = {}
for table in self.tables:
tables_info[table.__tablename__] = self._describe(table)
return tables_info
|
class Database:
def __init__(self, uri: str, name: str, schemas_path: str | None=None, dialect: str | None=None, **kwargs):
pass
@property
def tables(self) -> list[type[SQLModel]]:
pass
def run(self, query: str):
pass
def _get_tables(self, schemas_path) -> list[type[SQLModel]]:
pass
def _get_summarizer(self, schemas_path):
pass
def default_summarizer(data: dict[str, list[dict]]) -> str:
pass
def _describe(self, model: type[SQLModel]):
pass
def get_tables_info(self):
pass
| 10
| 0
| 11
| 1
| 10
| 0
| 3
| 0.03
| 0
| 7
| 0
| 0
| 7
| 6
| 7
| 7
| 83
| 10
| 72
| 35
| 62
| 2
| 70
| 32
| 61
| 5
| 0
| 2
| 21
|
328,261
|
saya-ashen/Dingent
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/saya-ashen_Dingent/templates/basic/{{ cookiecutter.project_slug }}/plugins/text2sql/graph.py
|
text2sql.graph.Text2SqlAgent
|
from langchain_core.prompts import ChatPromptTemplate
from langgraph.graph import END, StateGraph
from langchain_community.utilities import SQLDatabase
from typing import Any, Literal, cast
from langchain_core.runnables import RunnableConfig
from langchain.chat_models.base import BaseChatModel
from .handlers.base import DBRequest, Handler
from langchain_community.agent_toolkits import SQLDatabaseToolkit
from langchain_core.messages import AIMessage, HumanMessage
from loguru import logger
from collections import defaultdict
from langdetect import detect
from .types_ import SQLQueryResultPresenter, SQLState
from .database import Database
class Text2SqlAgent:
"""An agent that converts natural language to SQL queries and executes them."""
def __init__(self, llm: BaseChatModel, db: Database, sql_result_handler: Handler, vectorstore=None):
self.llm = llm
self.db = db
if vectorstore:
self.retriever = vectorstore.as_retriever(search_kwargs={'k': 50})
else:
self.retriever = None
self.sql_result_handler = sql_result_handler
self.graph = self._build_graph()
if self.retriever:
prompt_template = COMMON_SQL_GEN_PROMPT + '\n\n### Contextual Examples (for reference only):\n{similar_rows}'
else:
prompt_template = COMMON_SQL_GEN_PROMPT
prompt = ChatPromptTemplate.from_messages([('system', prompt_template), ('placeholder', '{messages}')])
self.query_gen_chain = prompt | self.llm.with_structured_output(SQLQueryResultPresenter)
def _build_graph(self):
"""Builds the LangGraph agent."""
workflow = StateGraph(SQLState)
workflow.add_node('generate_sql', self._generate_sql)
workflow.add_node('execute_sql', self._execute_sql)
workflow.set_entry_point('generate_sql')
workflow.add_edge('generate_sql', 'execute_sql')
workflow.add_conditional_edges('execute_sql', self._should_retry)
return workflow.compile()
def _should_retry(self, state: SQLState) -> Literal['generate_sql', END]:
"""Determines whether to retry SQL generation after an execution error."""
last_message = state['messages'][-1]
if isinstance(last_message, HumanMessage) and last_message.content.startswith('Error:'):
return 'generate_sql'
return END
async def _similarity_search(self, query: str) -> str:
"""Performs similarity search to find relevant schema/data examples."""
if detect(query) != 'en':
translation_prompt = TRANSLATOR_PROMPT.format(question=query)
translated_message = await self.llm.ainvoke(translation_prompt)
query = cast(str, translated_message.content)
assert self.retriever is not None
docs = self.retriever.invoke(query)
columns_data = defaultdict(list)
for doc in docs:
columns_data[doc.metadata['column']].append(doc.page_content)
text = ''
for column, values in columns_data.items():
text += f"{column}: {','.join(values[:5])}\n"
return text
async def _generate_sql(self, state: SQLState, config: RunnableConfig) -> dict[str, Any]:
"""Generates the SQL query from the user question."""
dialect = config.get('configurable', {}).get('dialect', 'mysql')
user_query = cast(str, state['messages'][-1].content)
tables_info = self.db.get_tables_info()
if not tables_info:
toolkit = SQLDatabaseToolkit(db=SQLDatabase(self.db.db), llm=self.llm)
tools = toolkit.get_tools()
sql_db_list_tables = None
sql_db_schema = None
for tool in tools:
if tool.name == 'sql_db_schema':
sql_db_schema = tool
elif tool.name == 'sql_db_list_tables':
sql_db_list_tables = tool
assert sql_db_list_tables is not None and sql_db_schema is not None, 'SQL Database tools are not available.'
tables = sql_db_list_tables.run('')
tables_info = sql_db_schema.run(tables)
tables_info = str(tables_info)
logger.debug(f'Generating SQL for user query: {user_query}; Tables info: {tables_info}')
if self.retriever:
similar_rows = await self._similarity_search(user_query)
else:
similar_rows = ''
response = await self.query_gen_chain.ainvoke({'messages': state['messages'], 'tables_info': tables_info, 'similar_rows': similar_rows, 'dialect': dialect})
response = cast(SQLQueryResultPresenter, response)
sql_query = response.sql_query
if not sql_query:
raise ValueError('LLM failed to generate a SQL query.')
return {'messages': [AIMessage(content=sql_query, role='ai')], 'result_grouping': response.result_grouping}
async def _execute_sql(self, state: SQLState, config: RunnableConfig) -> dict[str, Any]:
"""Executes the SQL query against the database."""
sql_query = str(state['messages'][-1].content)
result_grouping = state.get('result_grouping')
request: DBRequest = DBRequest(data={'query': sql_query, 'result_grouping': result_grouping})
try:
response = await self.sql_result_handler.ahandle(request)
tool_message = HumanMessage(content=response.data['str_result'], name='db_query_tool')
return {'messages': [tool_message], 'sql_result': response.data.get('result', {})}
except Exception as e:
logger.warning(f'Error executing SQL query: {e}. Rewriting')
error_message = HumanMessage(content=f'Error: Query failed. Please rewrite your query and try again. Error information: {e}')
return {'messages': [error_message]}
async def arun(self, user_query: str, recursion_limit: int=15) -> tuple[str | None, str, dict]:
"""Runs the complete text-to-SQL process."""
dialect = self.db.dialect
config = {'recursion_limit': recursion_limit, 'configurable': {'dialect': dialect}}
initial_state = {'messages': [HumanMessage(content=user_query)]}
final_state = {}
query = ''
sql_result = {}
text_result = ''
async for chunk in self.graph.astream(initial_state, config=config, stream_mode='updates'):
if 'generate_sql' in chunk:
query = chunk['generate_sql']['messages'][-1].content
if 'execute_sql' in chunk:
final_state = chunk['execute_sql']
sql_result = final_state.get('sql_result', {})
text_result = final_state['messages'][-1].content
context = f'=== Generated SQL Query ===\n{query}\n\n=== SQL Result ===\n{text_result}\n\n'
return (None, context, sql_result)
|
class Text2SqlAgent:
'''An agent that converts natural language to SQL queries and executes them.'''
def __init__(self, llm: BaseChatModel, db: Database, sql_result_handler: Handler, vectorstore=None):
pass
def _build_graph(self):
'''Builds the LangGraph agent.'''
pass
def _should_retry(self, state: SQLState) -> Literal['generate_sql', END]:
'''Determines whether to retry SQL generation after an execution error.'''
pass
async def _similarity_search(self, query: str) -> str:
'''Performs similarity search to find relevant schema/data examples.'''
pass
async def _generate_sql(self, state: SQLState, config: RunnableConfig) -> dict[str, Any]:
'''Generates the SQL query from the user question.'''
pass
async def _execute_sql(self, state: SQLState, config: RunnableConfig) -> dict[str, Any]:
'''Executes the SQL query against the database.'''
pass
async def arun(self, user_query: str, recursion_limit: int=15) -> tuple[str | None, str, dict]:
'''Runs the complete text-to-SQL process.'''
pass
| 8
| 7
| 22
| 3
| 18
| 2
| 3
| 0.1
| 0
| 13
| 5
| 0
| 7
| 6
| 7
| 7
| 164
| 27
| 126
| 63
| 108
| 12
| 97
| 52
| 89
| 7
| 0
| 3
| 23
|
328,262
|
saya-ashen/Dingent
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/saya-ashen_Dingent/templates/basic/{{ cookiecutter.project_slug }}/plugins/text2sql/handlers/base.py
|
text2sql.handlers.base.DBRequest
|
from pydantic import BaseModel, Field
from typing import Any
class DBRequest(BaseModel):
"""A structured, validated request object for the handler chain."""
data: dict[str, Any] = Field(default_factory=dict)
metadata: dict[str, Any] = Field(default_factory=dict)
|
class DBRequest(BaseModel):
'''A structured, validated request object for the handler chain.'''
pass
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0.33
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 82
| 5
| 1
| 3
| 3
| 2
| 1
| 3
| 3
| 2
| 0
| 5
| 0
| 0
|
328,263
|
saya-ashen/Dingent
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/saya-ashen_Dingent/templates/basic/{{ cookiecutter.project_slug }}/plugins/text2sql/handlers/base.py
|
text2sql.handlers.base.Handler
|
from abc import ABC, abstractmethod
class Handler(ABC):
"""
Abstract base handler with a helper for chain construction.
"""
def __init__(self, **kwargs):
self._next_handler: Handler | None = None
def set_next(self, handler: Handler) -> Handler:
self._next_handler = handler
return handler
@abstractmethod
async def ahandle(self, request: DBRequest) -> DBRequest:
pass
async def _apass_to_next(self, request: DBRequest) -> DBRequest:
if self._next_handler:
return await self._next_handler.ahandle(request)
return request
@staticmethod
def build_chain(handlers: list[Handler]) -> Handler:
"""A helper method to link a list of handlers into a chain."""
if not handlers:
raise ValueError('Handler list cannot be empty')
head = handlers[0]
current = head
for next_handler in handlers[1:]:
current.set_next(next_handler)
current = next_handler
return head
| null | 8
| 2
| 5
| 0
| 4
| 0
| 2
| 0.22
| 1
| 3
| 1
| 3
| 4
| 1
| 5
| 25
| 34
| 6
| 23
| 12
| 15
| 5
| 21
| 10
| 15
| 3
| 4
| 1
| 8
|
328,264
|
saya-ashen/Dingent
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/saya-ashen_Dingent/templates/basic/{{ cookiecutter.project_slug }}/plugins/text2sql/handlers/handler_builder.py
|
text2sql.handlers.handler_builder.ChainFactory
|
from ..database import Database
from . import result_handler
from .base import Handler
class ChainFactory:
def __init__(self):
pass
def build_result_chain(self, db: Database) -> Handler:
"""Builds the result processing chain for a given database."""
sql_run_handler = result_handler.ResultGetHandler(db)
context_builder = result_handler.ContextBuilder(db)
pydantic_handler = result_handler.ResultStructureHandler()
handlers = [sql_run_handler, pydantic_handler, context_builder]
return Handler.build_chain(handlers)
|
class ChainFactory:
def __init__(self):
pass
def build_result_chain(self, db: Database) -> Handler:
'''Builds the result processing chain for a given database.'''
pass
| 3
| 1
| 10
| 2
| 7
| 1
| 1
| 0.07
| 0
| 5
| 5
| 0
| 2
| 0
| 2
| 2
| 21
| 5
| 15
| 9
| 10
| 1
| 9
| 7
| 6
| 1
| 0
| 0
| 2
|
328,265
|
saya-ashen/Dingent
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/saya-ashen_Dingent/templates/basic/{{ cookiecutter.project_slug }}/plugins/text2sql/handlers/result_handler.py
|
text2sql.handlers.result_handler.ContextBuilder
|
from .base import DBRequest, Handler
import pandas as pd
class ContextBuilder(Handler):
def __init__(self, db, max_length: int=5):
super().__init__()
self.max_length = max_length
self.summarizer = db.summarizer
async def ahandle(self, request: DBRequest):
result = request.data['result']
if isinstance(result, dict) and self.summarizer is not None:
summary = self.summarizer(result)
request.data['str_result'] += summary
elif isinstance(result, pd.DataFrame):
length = len(result)
if length > self.max_length:
result = result.iloc[:self.max_length]
str_result = f'The content of the first {self.max_length} data entries is as follows:'
str_result += str(result.to_dict(orient='records'))
elif length == 0:
str_result = 'SQL query result is empty.'
else:
str_result = 'Content is as follows:'
str_result += str(result.to_dict(orient='records'))
request.data['str_result'] += str_result
else:
raise ValueError('Unsupported result type for summarization.')
return await self._apass_to_next(request)
|
class ContextBuilder(Handler):
def __init__(self, db, max_length: int=5):
pass
async def ahandle(self, request: DBRequest):
pass
| 3
| 0
| 12
| 0
| 12
| 0
| 3
| 0
| 1
| 6
| 1
| 0
| 2
| 2
| 2
| 27
| 26
| 1
| 25
| 9
| 22
| 0
| 21
| 9
| 18
| 5
| 5
| 2
| 6
|
328,266
|
saya-ashen/Dingent
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/saya-ashen_Dingent/templates/basic/{{ cookiecutter.project_slug }}/plugins/text2sql/handlers/result_handler.py
|
text2sql.handlers.result_handler.ResultGetHandler
|
from .base import DBRequest, Handler
from ..database import Database
class ResultGetHandler(Handler):
def __init__(self, db):
super().__init__()
self.db: Database = db
async def ahandle(self, request):
query = request.data['query']
raw_result = self.db.run(query)
request.data['result'] = raw_result['data']
request.data['str_result'] = ''
request.data['total_items'] = raw_result.get('metadata', {}).get('total_items')
return await self._apass_to_next(request)
|
class ResultGetHandler(Handler):
def __init__(self, db):
pass
async def ahandle(self, request):
pass
| 3
| 0
| 6
| 1
| 5
| 0
| 1
| 0
| 1
| 2
| 1
| 0
| 2
| 1
| 2
| 27
| 14
| 3
| 11
| 6
| 8
| 0
| 11
| 6
| 8
| 1
| 5
| 0
| 2
|
328,267
|
saya-ashen/Dingent
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/saya-ashen_Dingent/templates/basic/{{ cookiecutter.project_slug }}/plugins/text2sql/handlers/result_handler.py
|
text2sql.handlers.result_handler.ResultStructureHandler
|
from .base import DBRequest, Handler
from ..types_ import Group
import pandas as pd
class ResultStructureHandler(Handler):
def __init__(self):
pass
async def ahandle(self, request: DBRequest):
source_df: pd.DataFrame = request.data['result']
grouping_schema: list[Group] | None = request.data.get('result_grouping')
if not grouping_schema:
print("信息: 未找到分组规则,启用默认分组 'main_result'。")
deduplicated_df = source_df.drop_duplicates().reset_index(drop=True)
request.data['result'] = {'main_result': deduplicated_df.to_dict(orient='records')}
return await self._apass_to_next(request)
final_result_data = {}
for group in grouping_schema:
group_name, columns_in_group = (group.primary_entity_name, group.columns)
existing_columns = [col for col in columns_in_group if col in source_df.columns]
if not existing_columns:
print(f"警告: 分组 '{group_name}' 中定义的所有列都不在DataFrame中,已跳过。")
continue
group_df = source_df[existing_columns]
deduplicated_df = group_df.drop_duplicates().reset_index(drop=True)
final_result_data[group_name] = deduplicated_df.to_dict(orient='records')
request.data['result'] = final_result_data
return await self._apass_to_next(request)
|
class ResultStructureHandler(Handler):
def __init__(self):
pass
async def ahandle(self, request: DBRequest):
pass
| 3
| 0
| 19
| 4
| 11
| 4
| 3
| 0.35
| 1
| 3
| 2
| 0
| 2
| 0
| 2
| 27
| 40
| 9
| 23
| 11
| 20
| 8
| 23
| 11
| 20
| 4
| 5
| 2
| 5
|
328,268
|
saya-ashen/Dingent
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/saya-ashen_Dingent/templates/basic/{{ cookiecutter.project_slug }}/plugins/text2sql/settings.py
|
text2sql.settings.DatabaseSettings
|
import os
from pydantic import BaseModel, model_validator
class DatabaseSettings(BaseModel):
name: str
uri: str = ''
uri_env: str = ''
schemas_file: str | None = None
type: str | None = None
@model_validator(mode='after')
def determine_type_from_uri(self) -> 'DatabaseSettings':
"""
Runs after all fields are populated to ensure `uri` is available.
"""
db_uri = self.uri
if not db_uri:
if self.uri_env and self.uri_env in os.environ:
db_uri = os.environ[self.uri_env]
self.uri = db_uri
if not db_uri:
raise ValueError("A database URI must be provided either via 'uri' field or 'uri_env' environment variable.")
if db_uri.startswith('postgresql'):
self.type = 'postgresql'
elif db_uri.startswith('mysql'):
self.type = 'mysql'
elif db_uri.startswith('sqlite'):
self.type = 'sqlite'
else:
raise ValueError(f"Could not determine database type from URI: '{db_uri[:30]}...'")
return self
|
class DatabaseSettings(BaseModel):
@model_validator(mode='after')
def determine_type_from_uri(self) -> 'DatabaseSettings':
'''
Runs after all fields are populated to ensure `uri` is available.
'''
pass
| 3
| 1
| 23
| 3
| 17
| 3
| 7
| 0.17
| 1
| 1
| 0
| 0
| 1
| 0
| 1
| 83
| 32
| 4
| 24
| 8
| 21
| 4
| 20
| 7
| 18
| 7
| 5
| 2
| 7
|
328,269
|
saya-ashen/Dingent
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/saya-ashen_Dingent/templates/basic/{{ cookiecutter.project_slug }}/plugins/text2sql/settings.py
|
text2sql.settings.Settings
|
from pydantic_settings import BaseSettings, SettingsConfigDict
from typing import Any
class Settings(BaseSettings):
model_config = SettingsConfigDict(env_nested_delimiter='__')
database: DatabaseSettings
llm: dict[str, str | Any]
|
class Settings(BaseSettings):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 7
| 1
| 6
| 2
| 5
| 0
| 4
| 2
| 3
| 0
| 1
| 0
| 0
|
328,270
|
saya-ashen/Dingent
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/saya-ashen_Dingent/templates/basic/{{ cookiecutter.project_slug }}/plugins/text2sql/tool.py
|
text2sql.tool.Text2SqlTool
|
from .graph import Text2SqlAgent
from typing import Annotated
from .database import Database
from .settings import Settings
from pydantic import Field
from langchain.chat_models import init_chat_model
from .handlers.handler_builder import ChainFactory
class Text2SqlTool:
"""A tool that uses the Text2SqlAgent to answer questions from a database."""
agent: Text2SqlAgent
def __init__(self, settings: Settings, **kwargs):
super().__init__(**kwargs)
db = Database(**settings.database.model_dump())
factory = ChainFactory()
result_handler = factory.build_result_chain(db)
llm = init_chat_model(**settings.llm)
self.agent = Text2SqlAgent(llm=llm, db=db, sql_result_handler=result_handler)
async def tool_run(self, question: Annotated[str, Field(description="sub question of user's original question")]) -> dict:
"""Use the tool."""
_, context, tool_outputs = await self.agent.arun(user_query=question, recursion_limit=15)
tool_outputs = format_sql_tool_output(tool_outputs)
return {'context': context, 'tool_outputs': tool_outputs}
|
class Text2SqlTool:
'''A tool that uses the Text2SqlAgent to answer questions from a database.'''
def __init__(self, settings: Settings, **kwargs):
pass
async def tool_run(self, question: Annotated[str, Field(description="sub question of user's original question")]) -> dict:
'''Use the tool.'''
pass
| 3
| 2
| 12
| 1
| 11
| 1
| 1
| 0.08
| 0
| 8
| 4
| 0
| 2
| 0
| 2
| 2
| 30
| 4
| 24
| 15
| 14
| 2
| 13
| 8
| 10
| 1
| 0
| 0
| 2
|
328,271
|
saya-ashen/Dingent
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/saya-ashen_Dingent/templates/basic/{{ cookiecutter.project_slug }}/plugins/text2sql/types_.py
|
text2sql.types_.Group
|
from pydantic import BaseModel, Field
class Group(BaseModel):
primary_entity_name: str
columns: list[str]
|
class Group(BaseModel):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 82
| 3
| 0
| 3
| 1
| 2
| 0
| 3
| 1
| 2
| 0
| 5
| 0
| 0
|
328,272
|
saya-ashen/Dingent
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/saya-ashen_Dingent/templates/basic/{{ cookiecutter.project_slug }}/plugins/text2sql/types_.py
|
text2sql.types_.SQLQueryResultPresenter
|
from pydantic import BaseModel, Field
class SQLQueryResultPresenter(BaseModel):
"""
Generates an SQL query and structures its results into a few, intuitive groups for presentation.
"""
sql_query: str = Field(description="The SQL query that answers the user's question. All calculated or aggregated columns MUST have a clear alias using 'AS'.")
result_grouping: list[Group] = Field(description="Organize result columns into a minimal number of logical groups. A group's title should be an intuitive, human-readable description of its content, not just a raw database table name.")
|
class SQLQueryResultPresenter(BaseModel):
'''
Generates an SQL query and structures its results into a few, intuitive groups for presentation.
'''
pass
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0.38
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 82
| 13
| 2
| 8
| 3
| 7
| 3
| 3
| 3
| 2
| 0
| 5
| 0
| 0
|
328,273
|
saya-ashen/Dingent
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/saya-ashen_Dingent/templates/basic/{{ cookiecutter.project_slug }}/plugins/text2sql/types_.py
|
text2sql.types_.SQLState
|
from langgraph.graph.message import MessagesState
class SQLState(MessagesState):
"""
Represents the state of the SQL generation graph.
Attributes:
sql_result: A list to store the results of the SQL query.
"""
sql_result: list[dict]
result_grouping: list[Group] | None
|
class SQLState(MessagesState):
'''
Represents the state of the SQL generation graph.
Attributes:
sql_result: A list to store the results of the SQL query.
'''
pass
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1.67
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 10
| 2
| 3
| 1
| 2
| 5
| 3
| 1
| 2
| 0
| 1
| 0
| 0
|
328,274
|
saya-ashen/Dingent
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/saya-ashen_Dingent/templates/plugins/{{ cookiecutter.project_slug }}/settings.py
|
{{ cookiecutter.project_slug }}.settings.Settings
|
from dingent.engine.plugins import ToolBaseSettings
class Settings(ToolBaseSettings):
greeterName: str
|
class Settings(ToolBaseSettings):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2
| 0
| 2
| 1
| 1
| 0
| 2
| 1
| 1
| 0
| 1
| 0
| 0
|
328,275
|
saya-ashen/Dingent
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/saya-ashen_Dingent/templates/plugins/{{ cookiecutter.project_slug }}/tool.py
|
{{ cookiecutter.project_slug }}.tool.Greeter
|
from dingent.engine.plugins import BaseTool
from .settings import Settings
from typing import Annotated
from pydantic import Field
from dingent.engine.plugins.types import TablePayload, ToolOutput
class Greeter(BaseTool):
"""A tool that uses the Text2SqlAgent to answer questions from a database."""
def __init__(self, config: Settings, **kwargs):
super().__init__(config, **kwargs)
async def tool_run(self, target: Annotated[str, Field(description='Say hello to this person.')]) -> dict:
"""Use the tool."""
artifact_ids = []
artifact_ids.append(self.resource_manager.register(ToolOutput(type='greeter', payload=TablePayload(columns=['greeter', 'target'], rows=[{'greeter': self.name, 'target': target}]))))
return {'context': f'{self.name} is saying hello to {target}', 'artifact_ids': artifact_ids, 'source': 'greeter'}
|
class Greeter(BaseTool):
'''A tool that uses the Text2SqlAgent to answer questions from a database.'''
def __init__(self, config: Settings, **kwargs):
pass
async def tool_run(self, target: Annotated[str, Field(description='Say hello to this person.')]) -> dict:
'''Use the tool.'''
pass
| 3
| 2
| 8
| 0
| 8
| 1
| 1
| 0.13
| 1
| 5
| 1
| 0
| 2
| 0
| 2
| 2
| 20
| 2
| 16
| 11
| 6
| 2
| 7
| 4
| 4
| 1
| 1
| 0
| 2
|
328,276
|
maicos-devel/maicos
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/maicos-devel_maicos/benchmarks/benchmarks.py
|
benchmarks.benchmarks.DielectricPlanarBenchmark
|
from tests.data import DIPOLE_GRO, DIPOLE_ITP
import MDAnalysis as mda
from maicos import DielectricPlanar
class DielectricPlanarBenchmark:
"""Benchmark the DielectricPlanar class."""
def setup(self):
"""Setup the analysis objects."""
self.dipole1 = mda.Universe(DIPOLE_ITP, DIPOLE_GRO, topology_format='itp').atoms
self.dielectric = DielectricPlanar(self.dipole1)
self.dielectric._prepare()
def time_single_dielectric_planar(self):
"""Benchmark of a complete run over a single frame."""
self.dielectric.run()
|
class DielectricPlanarBenchmark:
'''Benchmark the DielectricPlanar class.'''
def setup(self):
'''Setup the analysis objects.'''
pass
def time_single_dielectric_planar(self):
'''Benchmark of a complete run over a single frame.'''
pass
| 3
| 3
| 4
| 0
| 3
| 1
| 1
| 0.43
| 0
| 0
| 0
| 0
| 2
| 2
| 2
| 2
| 12
| 2
| 7
| 5
| 4
| 3
| 7
| 5
| 4
| 1
| 0
| 0
| 2
|
328,277
|
maicos-devel/maicos
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/maicos-devel_maicos/developer/check_changelog.py
|
check_changelog.ChangelogError
|
class ChangelogError(Exception):
"""Changelog error."""
pass
|
class ChangelogError(Exception):
'''Changelog error.'''
pass
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 10
| 4
| 1
| 2
| 1
| 1
| 1
| 2
| 1
| 1
| 0
| 3
| 0
| 0
|
328,278
|
maicos-devel/maicos
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/maicos-devel_maicos/src/maicos/_version.py
|
maicos._version.VersioneerConfig
|
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
VCS: str
style: str
tag_prefix: str
parentdir_prefix: str
versionfile_source: str
verbose: bool
|
class VersioneerConfig:
'''Container for Versioneer configuration parameters.'''
pass
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0.14
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 9
| 1
| 7
| 1
| 6
| 1
| 7
| 1
| 6
| 0
| 0
| 0
| 0
|
328,279
|
maicos-devel/maicos
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/maicos-devel_maicos/src/maicos/core/base.py
|
maicos.core.base.AnalysisBase
|
from datetime import datetime
import numpy as np
from MDAnalysis.analysis.base import Results
import numbers
from ..lib.math import center_cluster, new_mean, new_variance
from tqdm.contrib.logging import logging_redirect_tqdm
import MDAnalysis as mda
import MDAnalysis.analysis.base
from ..lib.util import atomgroup_header, correlation_analysis, get_center, get_cli_input, get_module_input_str, maicos_banner, render_docs
from typing_extensions import Self
import logging
@render_docs
class AnalysisBase(_Runner, MDAnalysis.analysis.base.AnalysisBase):
"""Base class derived from MDAnalysis for defining multi-frame analysis.
The class is designed as a template for creating multi-frame analyses. This class
will automatically take care of setting up the trajectory reader for iterating, and
it offers to show a progress meter. Computed results are stored inside the
:attr:`results` attribute. To define a new analysis, `AnalysisBase` needs to be
subclassed and :meth:`_single_frame` must be defined. It is also possible to define
:meth:`_prepare` and :meth:`_conclude` for pre- and post-processing. All results
should be stored as attributes of the :class:`MDAnalysis.analysis.base.Results`
container.
During the analysis, the correlation time of an observable can be estimated to
ensure that calculated errors are reasonable. For this, the :meth:`_single_frame`
method has to return a single :obj:`float`. For details on the computation of the
correlation and its further analysis refer to
:func:`maicos.lib.util.correlation_analysis`.
Parameters
----------
${ATOMGROUP_PARAMETER}
${BASE_CLASS_PARAMETERS}
${WRAP_COMPOUND_PARAMETER}
Attributes
----------
${ATOMGROUP_PARAMETER}
_universe : MDAnalysis.core.universe.Universe
The Universe the AtomGroup belong to
_trajectory : MDAnalysis.coordinates.base.ReaderBase
The trajectory the AtomGroup belong to
times : numpy.ndarray
array of Timestep times. Only exists after calling
:meth:`AnalysisBase.run`
frames : numpy.ndarray
array of Timestep frame indices. Only exists after calling
:meth:`AnalysisBase.run`
_frame_index : int
index of the frame currently analysed
_index : int
Number of frames already analysed (same as _frame_index + 1)
results : MDAnalysis.analysis.base.Results
results of calculation are stored after call to :meth:`AnalysisBase.run`
_obs : MDAnalysis.analysis.base.Results
Observables of the current frame
_obs.box_center : numpy.ndarray
Center of the simulation cell of the current frame
sums : MDAnalysis.analysis.base.Results
Sum of the observables across frames. Keys are the same as :attr:`_obs`.
means : MDAnalysis.analysis.base.Results
Means of the observables. Keys are the same as :attr:`_obs`.
sems : MDAnalysis.analysis.base.Results
Standard errors of the mean of the observables. Keys are the same as
:attr:`_obs`
corrtime : float
The correlation time of the analysed data. For details on how this is
calculated see :func:`maicos.lib.util.correlation_analysis`.
Raises
------
ValueError
If any of the provided AtomGroups (`atomgroup` or `refgroup`) does
not contain any atoms.
Example
-------
To write your own analysis module you can use the example given below. As with all
MAICoS modules, this inherits from the :class:`AnalysisBase
<maicos.core.base.AnalysisBase>` class.
The example will calculate the average box volume and stores the result within the
``result`` object of the class.
>>> import logging
>>> from typing import Optional
>>> import MDAnalysis as mda
>>> import numpy as np
>>> from maicos.core import AnalysisBase
>>> from maicos.lib.util import render_docs
Adding logging messages to your code makes debugging easier.
Due to the similar structure of all MAICoS modules you can render the parameters
using the :func:`maicos.lib.util.render_docs` decorator. The decorator will replace
special keywords with a leading ``$`` with the actual docstring as defined in
:attr:`maicos.lib.util.DOC_DICT`.
>>> @render_docs
... class NewAnalysis(AnalysisBase):
... '''Analysis class calcuting the average box volume.'''
...
... def __init__(
... self,
... atomgroup: mda.AtomGroup,
... concfreq: int = 0,
... temperature: float = 300,
... output: str = "outfile.dat",
... ):
... super().__init__(
... atomgroup=atomgroup,
... refgroup=None,
... unwrap=False,
... pack=True,
... jitter=0.0,
... wrap_compound="atoms",
... concfreq=concfreq,
... )
...
... self.temperature = temperature
... self.output = output
...
... def _prepare(self):
... '''Set things up before the analysis loop begins.'''
... # self.atomgroup refers to the provided `atomgroup`
... # self._universe refers to full universe of given `atomgroup`
... self.volume = 0
...
... def _single_frame(self):
... '''Calculate data from a single frame of trajectory.
...
... Don't worry about normalising, just deal with a single frame.
... '''
... # Current frame index: self._frame_index
... # Current timestep object: self._ts
...
... volume = self._ts.volume
... self.volume += volume
...
... # Eeach module should return a characteristic scalar which is used
... # by MAICoS to estimate correlations of an Analysis.
... return volume
...
... def _conclude(self):
... '''Finalise the results you've gathered.
...
... Called at the end of the run() method to finish everything up.
... '''
... self.results.volume = self.volume / self.n_frames
... logging.info(
... f"Average volume of the simulation box {self.results.volume:.2f} ų"
... )
...
... def save(self) -> None:
... '''Save results of analysis to file specified by ``output``.
...
... Called at the end of the run() method after _conclude.
... '''
... self.savetxt(
... self.output, np.array([self.results.volume]), columns="volume / ų"
... )
Afterwards the new analysis can be run like this
>>> import MDAnalysis as mda
>>> from MDAnalysisTests.datafiles import TPR, XTC
>>> u = mda.Universe(TPR, XTC)
>>> na = NewAnalysis(u.atoms)
>>> _ = na.run(start=0, stop=10)
>>> print(round(na.results.volume, 2))
362631.65
Results can also be accessed by key
>>> print(round(na.results["volume"], 2))
362631.65
"""
def __init__(self, atomgroup: mda.AtomGroup, unwrap: bool, pack: bool, refgroup: None | mda.AtomGroup, jitter: float, concfreq: int, wrap_compound: str) -> None:
self.atomgroup = atomgroup
if self.atomgroup.n_atoms == 0:
raise ValueError('The provided `atomgroup` does not contain any atoms.')
self._universe = atomgroup.universe
self._trajectory = self._universe.trajectory
self.refgroup = refgroup
self.unwrap = unwrap
self.pack = pack
self.jitter = jitter
self.concfreq = concfreq
if wrap_compound not in ['atoms', 'group', 'residues', 'segments', 'molecules', 'fragments']:
raise ValueError(f"Unrecognized `wrap_compound` definition {wrap_compound}: \nPlease use one of 'atoms', 'group', 'residues', 'segments', 'molecules', or 'fragments'.")
self.wrap_compound = wrap_compound
if self.unwrap and self._universe.dimensions is None:
raise ValueError("Universe does not have `dimensions` and can't be unwrapped!")
if self.pack and self._universe.dimensions is None:
raise ValueError("Universe does not have `dimensions` and can't be packed!")
if self.unwrap and self.wrap_compound == 'atoms':
logging.warning("Unwrapping in combination with the `wrap_compound='atoms` is superfluous. `unwrap` will be set to `False`.")
self.unwrap = False
if self.refgroup is not None:
if self.refgroup.n_atoms == 0:
raise ValueError('The provided `refgroup` does not contain any atoms.')
if not self.pack:
raise ValueError('Disabling `pack` with a `refgroup` is not allowed. Shifting atoms probably outside of the primary cell withput packing them back may lead to sever problems during the analysis!')
self.module_has_save = callable(getattr(self.__class__, 'save', None))
super().__init__(trajectory=self._trajectory)
@property
def box_lengths(self) -> np.ndarray:
"""Lengths of the simulation cell vectors."""
return self._universe.dimensions[:3].astype(np.float64)
@property
def box_center(self) -> np.ndarray:
"""Center of the simulation cell."""
return self.box_lengths / 2
def _prepare(self) -> None:
"""Set things up before the analysis loop begins."""
pass
def _call_prepare(self) -> None:
"""Base method wrapping all _prepare logic into a single call."""
if self.refgroup is not None:
if not hasattr(self.refgroup, 'masses') or np.sum(self.refgroup.masses) == 0:
logging.warning('No masses available in refgroup, falling back to center of geometry')
self.ref_weights = np.ones_like(self.refgroup.atoms)
else:
self.ref_weights = self.refgroup.masses
if hasattr(self, '_bin_width'):
if not isinstance(self._bin_width, numbers.Real):
raise TypeError(f"Binwidth must be a real number but is of type '{type(self._bin_width).__name__}'.")
if self._bin_width <= 0:
raise ValueError(f'Binwidth must be a positive number but is {self._bin_width}.')
self._prepare()
if self.refgroup is not None:
logging.info(f'Coordinates are relative to the center of mass of reference atomgroup {atomgroup_header(self.refgroup)}.')
else:
logging.info('Coordinates are relative to the center of the simulation box.')
logging.info(f'Considered atomgroup {atomgroup_header(self.atomgroup)}.')
if hasattr(self, 'n_bins'):
logging.info(f'Using {self.n_bins} bins.')
self.timeseries = np.zeros(self.n_frames)
logging.info(f'Analysing {self.n_frames} trajectory frames.')
logging.debug(f'Module input: {get_module_input_str(self)}')
def _single_frame(self) -> None | float:
"""Calculate data from a single frame of trajectory.
Don't worry about normalising, just deal with a single frame.
"""
raise NotImplementedError('Only implemented in child classes')
def _call_single_frame(self, ts, current_frame_index) -> None:
"""Base method wrapping all single_frame logic into a single call."""
compatible_types = [np.ndarray, float, int, list, np.float32, np.float64, np.int32, np.int64]
self._frame_index = current_frame_index
self._index = self._frame_index + 1
self._ts = ts
self.frames[current_frame_index] = ts.frame
self.times[current_frame_index] = ts.time
if self.unwrap:
self._universe.atoms.unwrap(compound=self.wrap_compound)
if self.refgroup is not None:
com_refgroup = center_cluster(self.refgroup, self.ref_weights)
t = self.box_center - com_refgroup
self._universe.atoms.translate(t)
if self.pack and self._universe.dimensions is not None:
self._universe.atoms.wrap(compound=self.wrap_compound)
if self.jitter != 0.0:
ts.positions += np.random.random(size=(len(ts.positions), 3)) * self.jitter
self._obs = Results()
self.timeseries[current_frame_index] = self._single_frame()
try:
for key in self._obs:
if type(self._obs[key]) is list:
self._obs[key] = np.array(self._obs[key])
old_mean = self.means[key]
old_var = self.sems[key] ** 2 * (self._index - 1)
self.means[key] = new_mean(self.means[key], self._obs[key], self._index)
self.sems[key] = np.sqrt(new_variance(old_var, old_mean, self.means[key], self._obs[key], self._index) / self._index)
self.sums[key] += self._obs[key]
except AttributeError as err:
with logging_redirect_tqdm():
logging.debug('Initializing error estimation.')
self.sums = self._obs.copy()
self.means = self._obs.copy()
self.sems = Results()
for key in self._obs:
if type(self._obs[key]) not in compatible_types:
raise TypeError(f'Obervable {key} has uncompatible type.') from err
self.sems[key] = np.zeros(np.shape(self._obs[key]))
if self.concfreq and self._index % self.concfreq == 0 and (self._frame_index > 0):
self._conclude()
if self.module_has_save:
self.save()
def _conclude(self) -> None:
"""Finalize the results you've gathered.
Called at the end of the :meth:`run` method to finish everything up.
"""
pass
def _call_conclude(self) -> None:
"""Base method wrapping all _conclude logic into a single call."""
self.corrtime = correlation_analysis(self.timeseries)
self._conclude()
if self.concfreq and self.module_has_save:
self.save()
@render_docs
def run(self, start: int | None=None, stop: int | None=None, step: int | None=None, frames: int | None=None, verbose: bool | None=None, progressbar_kwargs: dict | None=None) -> Self:
"""${RUN_METHOD_DESCRIPTION}"""
return _Runner._run(self, analysis_instances=(self,), start=start, stop=stop, step=step, frames=frames, verbose=verbose, progressbar_kwargs=progressbar_kwargs)
def savetxt(self, fname: str, X: np.ndarray, columns: list[str] | None=None) -> None:
"""Save to text.
An extension of the numpy savetxt function. Adds the command line input to the
header and checks for a doubled defined filesuffix.
Return a header for the text file to save the data to. This method builds a
generic header that can be used by any MAICoS module. It is called by the save
method of each module.
The information it collects is:
- timestamp of the analysis
- name of the module
- version of MAICoS that was used
- command line arguments that were used to run the module
- module call including the default arguments
- number of frames that were analyzed
- atomgroup that was analyzed
- output messages from modules and base classes (if they exist)
"""
fname = str(fname)
current_time = datetime.now().strftime('%a, %b %d %Y at %H:%M:%S ')
module_name = self.__class__.__name__
messages_list = []
for cls in self.__class__.mro()[-3::-1]:
if hasattr(cls, 'OUTPUT') and cls.OUTPUT not in messages_list:
messages_list.append(cls.OUTPUT)
messages = '\n'.join(messages_list)
atomgroups = f' (grp) {atomgroup_header(self.atomgroup)}\n'
if hasattr(self, 'refgroup') and self.refgroup is not None:
atomgroups += f' (ref) {atomgroup_header(self.refgroup)}\n'
module_input = get_module_input_str(self)
header = f'This file was generated by {module_name} on {current_time}\n\n{module_name} is part of MAICoS v{__version__}\n\nCommand line: {get_cli_input()}\nModule input: {module_input}\n\nStatistics over {self._index} frames\n\nConsidered atomgroups:\n{atomgroups}\n{messages}\n\n'
if columns is not None:
header += '|'.join([f'{i:^23}' for i in columns])[3:]
fname = '{}{}'.format(fname, (not fname.endswith('.dat')) * '.dat')
np.savetxt(fname, X, header=header, fmt='% .14e ', encoding='utf8')
|
@render_docs
class AnalysisBase(_Runner, MDAnalysis.analysis.base.AnalysisBase):
'''Base class derived from MDAnalysis for defining multi-frame analysis.
The class is designed as a template for creating multi-frame analyses. This class
will automatically take care of setting up the trajectory reader for iterating, and
it offers to show a progress meter. Computed results are stored inside the
:attr:`results` attribute. To define a new analysis, `AnalysisBase` needs to be
subclassed and :meth:`_single_frame` must be defined. It is also possible to define
:meth:`_prepare` and :meth:`_conclude` for pre- and post-processing. All results
should be stored as attributes of the :class:`MDAnalysis.analysis.base.Results`
container.
During the analysis, the correlation time of an observable can be estimated to
ensure that calculated errors are reasonable. For this, the :meth:`_single_frame`
method has to return a single :obj:`float`. For details on the computation of the
correlation and its further analysis refer to
:func:`maicos.lib.util.correlation_analysis`.
Parameters
----------
${ATOMGROUP_PARAMETER}
${BASE_CLASS_PARAMETERS}
${WRAP_COMPOUND_PARAMETER}
Attributes
----------
${ATOMGROUP_PARAMETER}
_universe : MDAnalysis.core.universe.Universe
The Universe the AtomGroup belong to
_trajectory : MDAnalysis.coordinates.base.ReaderBase
The trajectory the AtomGroup belong to
times : numpy.ndarray
array of Timestep times. Only exists after calling
:meth:`AnalysisBase.run`
frames : numpy.ndarray
array of Timestep frame indices. Only exists after calling
:meth:`AnalysisBase.run`
_frame_index : int
index of the frame currently analysed
_index : int
Number of frames already analysed (same as _frame_index + 1)
results : MDAnalysis.analysis.base.Results
results of calculation are stored after call to :meth:`AnalysisBase.run`
_obs : MDAnalysis.analysis.base.Results
Observables of the current frame
_obs.box_center : numpy.ndarray
Center of the simulation cell of the current frame
sums : MDAnalysis.analysis.base.Results
Sum of the observables across frames. Keys are the same as :attr:`_obs`.
means : MDAnalysis.analysis.base.Results
Means of the observables. Keys are the same as :attr:`_obs`.
sems : MDAnalysis.analysis.base.Results
Standard errors of the mean of the observables. Keys are the same as
:attr:`_obs`
corrtime : float
The correlation time of the analysed data. For details on how this is
calculated see :func:`maicos.lib.util.correlation_analysis`.
Raises
------
ValueError
If any of the provided AtomGroups (`atomgroup` or `refgroup`) does
not contain any atoms.
Example
-------
To write your own analysis module you can use the example given below. As with all
MAICoS modules, this inherits from the :class:`AnalysisBase
<maicos.core.base.AnalysisBase>` class.
The example will calculate the average box volume and stores the result within the
``result`` object of the class.
>>> import logging
>>> from typing import Optional
>>> import MDAnalysis as mda
>>> import numpy as np
>>> from maicos.core import AnalysisBase
>>> from maicos.lib.util import render_docs
Adding logging messages to your code makes debugging easier.
Due to the similar structure of all MAICoS modules you can render the parameters
using the :func:`maicos.lib.util.render_docs` decorator. The decorator will replace
special keywords with a leading ``$`` with the actual docstring as defined in
:attr:`maicos.lib.util.DOC_DICT`.
>>> @render_docs
... class NewAnalysis(AnalysisBase):
... '''Analysis class calcuting the average box volume.'''
...
... def __init__(
... self,
... atomgroup: mda.AtomGroup,
... concfreq: int = 0,
... temperature: float = 300,
... output: str = "outfile.dat",
... ):
... super().__init__(
... atomgroup=atomgroup,
... refgroup=None,
... unwrap=False,
... pack=True,
... jitter=0.0,
... wrap_compound="atoms",
... concfreq=concfreq,
... )
...
... self.temperature = temperature
... self.output = output
...
... def _prepare(self):
... '''Set things up before the analysis loop begins.'''
... # self.atomgroup refers to the provided `atomgroup`
... # self._universe refers to full universe of given `atomgroup`
... self.volume = 0
...
... def _single_frame(self):
... '''Calculate data from a single frame of trajectory.
...
... Don't worry about normalising, just deal with a single frame.
... '''
... # Current frame index: self._frame_index
... # Current timestep object: self._ts
...
... volume = self._ts.volume
... self.volume += volume
...
... # Eeach module should return a characteristic scalar which is used
... # by MAICoS to estimate correlations of an Analysis.
... return volume
...
... def _conclude(self):
... '''Finalise the results you've gathered.
...
... Called at the end of the run() method to finish everything up.
... '''
... self.results.volume = self.volume / self.n_frames
... logging.info(
... f"Average volume of the simulation box {self.results.volume:.2f} ų"
... )
...
... def save(self) -> None:
... '''Save results of analysis to file specified by ``output``.
...
... Called at the end of the run() method after _conclude.
... '''
... self.savetxt(
... self.output, np.array([self.results.volume]), columns="volume / ų"
... )
Afterwards the new analysis can be run like this
>>> import MDAnalysis as mda
>>> from MDAnalysisTests.datafiles import TPR, XTC
>>> u = mda.Universe(TPR, XTC)
>>> na = NewAnalysis(u.atoms)
>>> _ = na.run(start=0, stop=10)
>>> print(round(na.results.volume, 2))
362631.65
Results can also be accessed by key
>>> print(round(na.results["volume"], 2))
362631.65
'''
def __init__(self, atomgroup: mda.AtomGroup, unwrap: bool, pack: bool, refgroup: None | mda.AtomGroup, jitter: float, concfreq: int, wrap_compound: str) -> None:
pass
@property
def box_lengths(self) -> np.ndarray:
'''Lengths of the simulation cell vectors.'''
pass
@property
def box_center(self) -> np.ndarray:
'''Center of the simulation cell.'''
pass
def _prepare(self) -> None:
'''Set things up before the analysis loop begins.'''
pass
def _call_prepare(self) -> None:
'''Base method wrapping all _prepare logic into a single call.'''
pass
def _single_frame(self) -> None | float:
'''Calculate data from a single frame of trajectory.
Don't worry about normalising, just deal with a single frame.
'''
pass
def _call_single_frame(self, ts, current_frame_index) -> None:
'''Base method wrapping all single_frame logic into a single call.'''
pass
def _conclude(self) -> None:
'''Finalize the results you've gathered.
Called at the end of the :meth:`run` method to finish everything up.
'''
pass
def _call_conclude(self) -> None:
'''Base method wrapping all _conclude logic into a single call.'''
pass
@render_docs
def run(self, start: int | None=None, stop: int | None=None, step: int | None=None, frames: int | None=None, verbose: bool | None=None, progressbar_kwargs: dict | None=None) -> Self:
'''${RUN_METHOD_DESCRIPTION}'''
pass
def savetxt(self, fname: str, X: np.ndarray, columns: list[str] | None=None) -> None:
'''Save to text.
An extension of the numpy savetxt function. Adds the command line input to the
header and checks for a doubled defined filesuffix.
Return a header for the text file to save the data to. This method builds a
generic header that can be used by any MAICoS module. It is called by the save
method of each module.
The information it collects is:
- timestamp of the analysis
- name of the module
- version of MAICoS that was used
- command line arguments that were used to run the module
- module call including the default arguments
- number of frames that were analyzed
- atomgroup that was analyzed
- output messages from modules and base classes (if they exist)
'''
pass
| 16
| 11
| 28
| 3
| 21
| 5
| 4
| 0.9
| 2
| 14
| 0
| 24
| 11
| 20
| 11
| 12
| 498
| 71
| 231
| 69
| 197
| 207
| 127
| 46
| 115
| 12
| 1
| 3
| 42
|
328,280
|
maicos-devel/maicos
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/maicos-devel_maicos/src/maicos/core/base.py
|
maicos.core.base.AnalysisCollection
|
from ..lib.util import atomgroup_header, correlation_analysis, get_center, get_cli_input, get_module_input_str, maicos_banner, render_docs
import warnings
from typing_extensions import Self
class AnalysisCollection(_Runner):
"""Running a collection of analysis classes on the same single trajectory.
.. warning::
``AnalysisCollection`` is still experimental. You should not use it for anything
important.
An analyses with ``AnalysisCollection`` can lead to a speedup compared to running
the individual analyses, since the trajectory loop is performed only once. The class
requires that each analysis is a child of :class:`AnalysisBase`. Additionally, the
trajectory of all ``analysis_instances`` must be the same. It is ensured that all
analysis instances use the *same original* timestep and not an altered one from a
previous analysis instance.
Parameters
----------
*analysis_instances : AnalysisBase
Arbitrary number of analysis instances to be run on the same trajectory.
Raises
------
AttributeError
If the provided ``analysis_instances`` do not work on the same trajectory.
AttributeError
If an ``analysis_instances`` is not a child of :class:`AnalysisBase`.
Example
-------
>>> import MDAnalysis as mda
>>> from maicos import DensityPlanar
>>> from maicos.core import AnalysisCollection
>>> from MDAnalysisTests.datafiles import TPR, XTC
>>> u = mda.Universe(TPR, XTC)
Select atoms
>>> ag_O = u.select_atoms("name O")
>>> ag_H = u.select_atoms("name H")
Create the individual analysis instances
>>> dplan_O = DensityPlanar(ag_O)
>>> dplan_H = DensityPlanar(ag_H)
Create a collection for common trajectory
>>> collection = AnalysisCollection(dplan_O, dplan_H)
Run the collected analysis
>>> _ = collection.run(start=0, stop=100, step=10)
Results are stored in the individual instances see :class:`AnalysisBase` on how to
access them. You can also save all results of the analysis within one call:
>>> collection.save()
"""
def __init__(self, *analysis_instances: AnalysisBase) -> None:
warnings.warn('`AnalysisCollection` is still experimental. You should not use it for anything important.', stacklevel=2)
for analysis_object in analysis_instances:
if analysis_instances[0]._trajectory != analysis_object._trajectory:
raise ValueError('`analysis_instances` do not have the same trajectory.')
if not isinstance(analysis_object, AnalysisBase):
raise TypeError(f'Analysis object {analysis_object} is not a child of `AnalysisBase`.')
self._analysis_instances = analysis_instances
@render_docs
def run(self, start: int | None=None, stop: int | None=None, step: int | None=None, frames: int | None=None, verbose: bool | None=None, progressbar_kwargs: dict | None=None) -> Self:
"""${RUN_METHOD_DESCRIPTION}"""
return _Runner._run(self, analysis_instances=self._analysis_instances, start=start, stop=stop, step=step, frames=frames, verbose=verbose, progressbar_kwargs=progressbar_kwargs)
def save(self) -> None:
"""Save results of all ``analysis_instances`` to disk.
The methods calls the :meth:`save` method of all ``analysis_instances`` if
available. If an instance has no :meth:`save` method a warning for this instance
is issued.
"""
for analysis_object in self._analysis_instances:
if analysis_object.module_has_save:
analysis_object.save()
else:
warnings.warn(f'`{analysis_object}` has no save() method. Analysis results of this instance can not be written to disk.', stacklevel=2)
|
class AnalysisCollection(_Runner):
'''Running a collection of analysis classes on the same single trajectory.
.. warning::
``AnalysisCollection`` is still experimental. You should not use it for anything
important.
An analyses with ``AnalysisCollection`` can lead to a speedup compared to running
the individual analyses, since the trajectory loop is performed only once. The class
requires that each analysis is a child of :class:`AnalysisBase`. Additionally, the
trajectory of all ``analysis_instances`` must be the same. It is ensured that all
analysis instances use the *same original* timestep and not an altered one from a
previous analysis instance.
Parameters
----------
*analysis_instances : AnalysisBase
Arbitrary number of analysis instances to be run on the same trajectory.
Raises
------
AttributeError
If the provided ``analysis_instances`` do not work on the same trajectory.
AttributeError
If an ``analysis_instances`` is not a child of :class:`AnalysisBase`.
Example
-------
>>> import MDAnalysis as mda
>>> from maicos import DensityPlanar
>>> from maicos.core import AnalysisCollection
>>> from MDAnalysisTests.datafiles import TPR, XTC
>>> u = mda.Universe(TPR, XTC)
Select atoms
>>> ag_O = u.select_atoms("name O")
>>> ag_H = u.select_atoms("name H")
Create the individual analysis instances
>>> dplan_O = DensityPlanar(ag_O)
>>> dplan_H = DensityPlanar(ag_H)
Create a collection for common trajectory
>>> collection = AnalysisCollection(dplan_O, dplan_H)
Run the collected analysis
>>> _ = collection.run(start=0, stop=100, step=10)
Results are stored in the individual instances see :class:`AnalysisBase` on how to
access them. You can also save all results of the analysis within one call:
>>> collection.save()
'''
def __init__(self, *analysis_instances: AnalysisBase) -> None:
pass
@render_docs
def run(self, start: int | None=None, stop: int | None=None, step: int | None=None, frames: int | None=None, verbose: bool | None=None, progressbar_kwargs: dict | None=None) -> Self:
'''${RUN_METHOD_DESCRIPTION}'''
pass
def save(self) -> None:
'''Save results of all ``analysis_instances`` to disk.
The methods calls the :meth:`save` method of all ``analysis_instances`` if
available. If an instance has no :meth:`save` method a warning for this instance
is issued.
'''
pass
| 5
| 3
| 18
| 1
| 15
| 2
| 3
| 0.98
| 1
| 6
| 1
| 0
| 3
| 1
| 3
| 4
| 117
| 22
| 48
| 16
| 35
| 47
| 16
| 7
| 12
| 4
| 1
| 2
| 8
|
328,281
|
maicos-devel/maicos
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/maicos-devel_maicos/src/maicos/core/base.py
|
maicos.core.base.ProfileBase
|
import MDAnalysis as mda
from ..lib.util import atomgroup_header, correlation_analysis, get_center, get_cli_input, get_module_input_str, maicos_banner, render_docs
import numpy as np
from collections.abc import Callable
from MDAnalysis.analysis.base import Results
import logging
@render_docs
class ProfileBase:
"""Base class for computing profiles.
Parameters
----------
${ATOMGROUP_PARAMETER}
${PROFILE_CLASS_PARAMETERS}
${PROFILE_CLASS_PARAMETERS_PRIVATE}
Attributes
----------
${PROFILE_CLASS_ATTRIBUTES}
"""
def __init__(self, atomgroup: mda.AtomGroup, grouping: str, bin_method: str, output: str, weighting_function: Callable, weighting_function_kwargs: None | dict, normalization: str) -> None:
self.atomgroup = atomgroup
self.grouping = grouping.lower()
self.bin_method = bin_method.lower()
self.output = output
self.normalization = normalization.lower()
if weighting_function_kwargs is None:
weighting_function_kwargs = {}
self.weighting_function = lambda ag: weighting_function(ag, grouping, **weighting_function_kwargs)
self.results = Results()
self._obs = Results()
def _prepare(self):
normalizations = ['none', 'volume', 'number']
if self.normalization not in normalizations:
raise ValueError(f"Normalization '{self.normalization}' not supported. Use {', '.join(normalizations)}.")
groupings = ['atoms', 'segments', 'residues', 'molecules', 'fragments']
if self.grouping not in groupings:
raise ValueError(f"'{self.grouping}' is not a valid option for grouping. Use {', '.join(groupings)}.")
logging.info(f'Atoms grouped by {self.grouping}.')
if not hasattr(self, 'unwrap'):
self.unwrap = True
def _compute_histogram(self, positions: np.ndarray, weights: np.ndarray | None=None) -> np.ndarray:
"""Calculate histogram based on positions.
Parameters
----------
positions : numpy.ndarray
positions
weights : numpy.ndarray
weights for the histogram.
Returns
-------
hist : numpy.ndarray
histogram
"""
raise NotImplementedError('Only implemented in child classes.')
def _single_frame(self) -> None | float:
self._obs.profile = np.zeros(self.n_bins)
self._obs.bincount = np.zeros(self.n_bins)
if self.grouping == 'atoms':
positions = self.atomgroup.positions
else:
positions = get_center(self.atomgroup, bin_method=self.bin_method, compound=self.grouping)
weights = self.weighting_function(self.atomgroup)
profile = self._compute_histogram(positions, weights)
self._obs.bincount = self._compute_histogram(positions, weights=None)
if self.normalization == 'volume':
profile /= self._obs.bin_volume
self._obs.profile = profile
return None
def _conclude(self) -> None:
if self.normalization == 'number':
with np.errstate(divide='ignore', invalid='ignore'):
self.results.profile = self.sums.profile / self.sums.bincount
else:
self.results.profile = self.means.profile
self.results.dprofile = self.sems.profile
@render_docs
def save(self) -> None:
"""${SAVE_METHOD_DESCRIPTION}"""
columns = ['positions [Å]']
columns.append('profile')
columns.append('error')
AnalysisBase.savetxt(self, self.output, np.vstack((self.results.bin_pos, self.results.profile, self.results.dprofile)).T, columns=columns)
|
@render_docs
class ProfileBase:
'''Base class for computing profiles.
Parameters
----------
${ATOMGROUP_PARAMETER}
${PROFILE_CLASS_PARAMETERS}
${PROFILE_CLASS_PARAMETERS_PRIVATE}
Attributes
----------
${PROFILE_CLASS_ATTRIBUTES}
'''
def __init__(self, atomgroup: mda.AtomGroup, grouping: str, bin_method: str, output: str, weighting_function: Callable, weighting_function_kwargs: None | dict, normalization: str) -> None:
pass
def _prepare(self):
pass
def _compute_histogram(self, positions: np.ndarray, weights: np.ndarray | None=None) -> np.ndarray:
'''Calculate histogram based on positions.
Parameters
----------
positions : numpy.ndarray
positions
weights : numpy.ndarray
weights for the histogram.
Returns
-------
hist : numpy.ndarray
histogram
'''
pass
def _single_frame(self) -> None | float:
pass
def _conclude(self) -> None:
pass
@render_docs
def save(self) -> None:
'''${SAVE_METHOD_DESCRIPTION}'''
pass
| 9
| 3
| 19
| 3
| 14
| 4
| 2
| 0.4
| 0
| 8
| 1
| 3
| 6
| 9
| 6
| 6
| 136
| 24
| 85
| 34
| 66
| 34
| 48
| 22
| 41
| 4
| 0
| 2
| 13
|
328,282
|
maicos-devel/maicos
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/maicos-devel_maicos/src/maicos/core/base.py
|
maicos.core.base._Runner
|
from mdacli.logger import setup_logging
from MDAnalysis.lib.log import ProgressBar
from ..lib.util import atomgroup_header, correlation_analysis, get_center, get_cli_input, get_module_input_str, maicos_banner, render_docs
from typing_extensions import Self
from tempfile import NamedTemporaryFile
import logging
class _Runner:
"""Private Runner class that provides a common ``run`` method.
Class is used inside ``AnalysisBase`` as well as in ``AnalysisCollection``
"""
def _run(self, analysis_instances: tuple['AnalysisBase', ...], start: int | None=None, stop: int | None=None, step: int | None=None, frames: int | None=None, verbose: bool | None=None, progressbar_kwargs: dict | None=None) -> Self:
self._run_locals = locals()
tempfile = NamedTemporaryFile()
level = logging.INFO if verbose else logging.WARNING
with setup_logging(logobj=logging.getLogger(__name__), logfile=tempfile.name + '.log', level=level):
logging.debug('Choosing frames to analyze')
if frames is not None and (not all((opt is None for opt in [start, stop, step]))):
raise ValueError('start/stop/step cannot be combined with frames')
logging.info(maicos_banner(frame_char='#', version=f'v{__version__}'))
for analysis_object in analysis_instances:
analysis_object._setup_frames(analysis_object._trajectory, start=start, stop=stop, step=step, frames=frames)
for analysis_object in analysis_instances:
analysis_object._call_prepare()
if progressbar_kwargs is None:
progressbar_kwargs = {}
for i, ts in enumerate(ProgressBar(analysis_instances[0]._sliced_trajectory, verbose=verbose, **progressbar_kwargs)):
ts_original = ts.copy()
for analysis_object in analysis_instances:
analysis_object._call_single_frame(ts=ts, current_frame_index=i)
ts = ts_original
logging.debug('Concluding analysis.')
for analysis_object in analysis_instances:
analysis_object._call_conclude()
tempfile.close()
return self
|
class _Runner:
'''Private Runner class that provides a common ``run`` method.
Class is used inside ``AnalysisBase`` as well as in ``AnalysisCollection``
'''
def _run(self, analysis_instances: tuple['AnalysisBase', ...], start: int | None=None, stop: int | None=None, step: int | None=None, frames: int | None=None, verbose: bool | None=None, progressbar_kwargs: dict | None=None) -> Self:
pass
| 2
| 1
| 63
| 12
| 50
| 3
| 9
| 0.12
| 0
| 6
| 0
| 2
| 1
| 1
| 1
| 1
| 69
| 14
| 51
| 17
| 40
| 6
| 26
| 8
| 24
| 9
| 0
| 2
| 9
|
328,283
|
maicos-devel/maicos
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/maicos-devel_maicos/src/maicos/core/cylinder.py
|
maicos.core.cylinder.CylinderBase
|
import numpy as np
from .planar import PlanarBase
import logging
from ..lib.util import render_docs
import MDAnalysis as mda
from ..lib.math import transform_cylinder
@render_docs
class CylinderBase(PlanarBase):
"""Analysis class providing options and attributes for a cylinder system.
Provide the results attribute `r`.
Parameters
----------
${ATOMGROUP_PARAMETER}
${CYLINDER_CLASS_PARAMETERS}
${WRAP_COMPOUND_PARAMETER}
Attributes
----------
${CYLINDER_CLASS_ATTRIBUTES}
pos_cyl : numpy.ndarray
positions in cylinder coordinats (r, phi, z)
_obs.R : float
Average length (in Å) along the radial dimension in the current frame.
_obs.bin_pos : numpy.ndarray, (n_bins)
Central bin position of each bin (in Å) in the current frame.
_obs.bin_width : float
Bin width (in Å) in the current frame
_obs.bin_edges : numpy.ndarray, (n_bins + 1)
Edges of the bins (in Å) in the current frame.
_obs.bin_area : numpy.ndarray, (n_bins)
Area of the annulus pf the each bin in the current frame. Calculated via
:math:`\\pi \\left( r_{i+1}^2 - r_i^2 \\right)` where `i` is the index of the bin.
_obs.bin_volume : numpy.ndarray, (n_bins)
Volume of an hollow cylinder of each bin (in Å^3) in the current frame.
Calculated via :math:`\\pi L \\left( r_{i+1}^2 - r_i^2 \\right)` where `i` is the
index of the bin.
"""
def __init__(self, atomgroup: mda.AtomGroup, unwrap: bool, pack: bool, refgroup: mda.AtomGroup | None, jitter: float, concfreq: int, dim: int, zmin: None | float, zmax: None | float, bin_width: float, rmin: float, rmax: None | float, wrap_compound: str):
super().__init__(atomgroup=atomgroup, unwrap=unwrap, pack=pack, refgroup=refgroup, jitter=jitter, concfreq=concfreq, wrap_compound=wrap_compound, dim=dim, zmin=zmin, zmax=zmax, bin_width=bin_width)
self.rmin = rmin
self._rmax = rmax
def _compute_lab_frame_cylinder(self):
"""Compute lab limit `rmax`."""
box_half = self.box_lengths[self.odims].min() / 2
if self._rmax is None:
self.rmax = box_half
elif self._rmax <= box_half:
self.rmax = self._rmax
else:
logging.warning(f'`rmax` is bigger than half the smallest box vector ({box_half:.2f}) in the radial direction. This will lead to artifacts at the edges.')
self.rmax = self._rmax
self.rmin = np.float64(self.rmin)
self.rmax = np.float64(self.rmax)
self.pos_cyl = transform_cylinder(self._universe.atoms.positions, origin=self.box_center, dim=self.dim)
def _prepare(self):
"""Prepare the cylinder analysis."""
super()._prepare()
self._compute_lab_frame_cylinder()
if self.rmin < 0:
raise ValueError('Only values for `rmin` larger or equal 0 are allowed.')
if self._rmax is not None and self._rmax <= self.rmin:
raise ValueError('`rmax` can not be smaller than or equal to `rmin`!')
R = self.rmax - self.rmin
self.n_bins = int(np.ceil(R / self._bin_width))
def _single_frame(self):
"""Single frame for the cylinder analysis."""
super()._single_frame()
self._compute_lab_frame_cylinder()
self._obs.R = self.rmax - self.rmin
self._obs.bin_edges = np.linspace(self.rmin, self.rmax, self.n_bins + 1, endpoint=True)
self._obs.bin_width = self._obs.R / self.n_bins
self._obs.bin_pos = self._obs.bin_edges[1:] - self._obs.bin_width / 2
self._obs.bin_area = np.pi * np.diff(self._obs.bin_edges ** 2)
self._obs.bin_volume = self._obs.bin_area * self._obs.L
def _conclude(self):
"""Results calculations for the cylinder analysis."""
super()._conclude()
self.results.bin_pos = self.means.bin_pos
|
@render_docs
class CylinderBase(PlanarBase):
'''Analysis class providing options and attributes for a cylinder system.
Provide the results attribute `r`.
Parameters
----------
${ATOMGROUP_PARAMETER}
${CYLINDER_CLASS_PARAMETERS}
${WRAP_COMPOUND_PARAMETER}
Attributes
----------
${CYLINDER_CLASS_ATTRIBUTES}
pos_cyl : numpy.ndarray
positions in cylinder coordinats (r, phi, z)
_obs.R : float
Average length (in Å) along the radial dimension in the current frame.
_obs.bin_pos : numpy.ndarray, (n_bins)
Central bin position of each bin (in Å) in the current frame.
_obs.bin_width : float
Bin width (in Å) in the current frame
_obs.bin_edges : numpy.ndarray, (n_bins + 1)
Edges of the bins (in Å) in the current frame.
_obs.bin_area : numpy.ndarray, (n_bins)
Area of the annulus pf the each bin in the current frame. Calculated via
:math:`\pi \left( r_{i+1}^2 - r_i^2 \right)` where `i` is the index of the bin.
_obs.bin_volume : numpy.ndarray, (n_bins)
Volume of an hollow cylinder of each bin (in Å^3) in the current frame.
Calculated via :math:`\pi L \left( r_{i+1}^2 - r_i^2 \right)` where `i` is the
index of the bin.
'''
def __init__(self, atomgroup: mda.AtomGroup, unwrap: bool, pack: bool, refgroup: mda.AtomGroup | None, jitter: float, concfreq: int, dim: int, zmin: None | float, zmax: None | float, bin_width: float, rmin: float, rmax: None | float, wrap_compound: str):
pass
def _compute_lab_frame_cylinder(self):
'''Compute lab limit `rmax`.'''
pass
def _prepare(self):
'''Prepare the cylinder analysis.'''
pass
def _single_frame(self):
'''Single frame for the cylinder analysis.'''
pass
def _conclude(self):
'''Results calculations for the cylinder analysis.'''
pass
| 7
| 5
| 17
| 1
| 14
| 1
| 2
| 0.47
| 1
| 6
| 0
| 6
| 5
| 6
| 5
| 23
| 122
| 16
| 72
| 29
| 51
| 34
| 36
| 13
| 30
| 3
| 3
| 1
| 9
|
328,284
|
maicos-devel/maicos
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/maicos-devel_maicos/src/maicos/core/cylinder.py
|
maicos.core.cylinder.ProfileCylinderBase
|
import numpy as np
from .base import ProfileBase
import logging
import MDAnalysis as mda
from ..lib.util import render_docs
from ..lib.math import transform_cylinder
from collections.abc import Callable
@render_docs
class ProfileCylinderBase(CylinderBase, ProfileBase):
"""Base class for computing radial profiles in a cylindrical geometry.
${CORRELATION_INFO_RADIAL}
Parameters
----------
${PROFILE_CYLINDER_CLASS_PARAMETERS}
${PROFILE_CLASS_PARAMETERS_PRIVATE}
Attributes
----------
${PROFILE_CYLINDER_CLASS_ATTRIBUTES}
"""
def __init__(self, atomgroup: mda.AtomGroup, unwrap: bool, pack: bool, refgroup: mda.AtomGroup | None, jitter: float, concfreq: int, dim: int, zmin: None | float, zmax: None | float, bin_width: float, rmin: float, rmax: None | float, grouping: str, bin_method: str, output: str, weighting_function: Callable, weighting_function_kwargs: None | dict, normalization: str):
CylinderBase.__init__(self, atomgroup=atomgroup, unwrap=unwrap, pack=pack, refgroup=refgroup, jitter=jitter, concfreq=concfreq, dim=dim, zmin=zmin, zmax=zmax, bin_width=bin_width, rmin=rmin, rmax=rmax, wrap_compound=grouping)
ProfileBase.__init__(self, atomgroup=self.atomgroup, grouping=grouping, bin_method=bin_method, output=output, weighting_function=weighting_function, weighting_function_kwargs=weighting_function_kwargs, normalization=normalization)
def _prepare(self):
CylinderBase._prepare(self)
ProfileBase._prepare(self)
logging.info(f"Profile along the radial axis in a cylindrical coordinate system, with the {'xyz'[self.dim]}-axis as cylindrical axis.")
def _compute_histogram(self, positions: np.ndarray, weights: np.ndarray | None=None) -> np.ndarray:
positions = transform_cylinder(positions, self.box_center, self.dim)
hist, _, _ = np.histogram2d(positions[:, 0], positions[:, 2], bins=(self.n_bins, 1), range=((self.rmin, self.rmax), (self.zmin, self.zmax)), weights=weights)
return hist[:, 0]
def _single_frame(self) -> float:
CylinderBase._single_frame(self)
ProfileBase._single_frame(self)
return self._obs.profile[0]
def _conclude(self):
CylinderBase._conclude(self)
ProfileBase._conclude(self)
|
@render_docs
class ProfileCylinderBase(CylinderBase, ProfileBase):
'''Base class for computing radial profiles in a cylindrical geometry.
${CORRELATION_INFO_RADIAL}
Parameters
----------
${PROFILE_CYLINDER_CLASS_PARAMETERS}
${PROFILE_CLASS_PARAMETERS_PRIVATE}
Attributes
----------
${PROFILE_CYLINDER_CLASS_ATTRIBUTES}
'''
def __init__(self, atomgroup: mda.AtomGroup, unwrap: bool, pack: bool, refgroup: mda.AtomGroup | None, jitter: float, concfreq: int, dim: int, zmin: None | float, zmax: None | float, bin_width: float, rmin: float, rmax: None | float, grouping: str, bin_method: str, output: str, weighting_function: Callable, weighting_function_kwargs: None | dict, normalization: str):
pass
def _prepare(self):
pass
def _compute_histogram(self, positions: np.ndarray, weights: np.ndarray | None=None) -> np.ndarray:
pass
def _single_frame(self) -> float:
pass
def _conclude(self):
pass
| 7
| 1
| 16
| 1
| 15
| 1
| 1
| 0.22
| 2
| 6
| 0
| 6
| 5
| 6
| 5
| 34
| 101
| 12
| 74
| 32
| 46
| 16
| 19
| 7
| 13
| 1
| 4
| 0
| 5
|
328,285
|
maicos-devel/maicos
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/maicos-devel_maicos/src/maicos/core/planar.py
|
maicos.core.planar.PlanarBase
|
from ..lib.util import render_docs
import MDAnalysis as mda
from .base import AnalysisBase, ProfileBase
import numpy as np
@render_docs
class PlanarBase(AnalysisBase):
"""Analysis class providing options and attributes for a planar system.
Parameters
----------
${ATOMGROUP_PARAMETER}
${PLANAR_CLASS_PARAMETERS}
${WRAP_COMPOUND_PARAMETER}
Attributes
----------
${PLANAR_CLASS_ATTRIBUTES}
zmin : float
Minimal coordinate for evaluation (Å) with in the lab frame, where 0
corresponds to the origin of the cell.
zmax : float
Maximal coordinate for evaluation (Å) with in the lab frame, where 0
corresponds to the origin of the cell.
_obs.L : float
Length (in Å) along the chosen dimension in the current frame.
_obs.bin_pos : numpy.ndarray, (n_bins)
Central bin positions (in Å) of each bin (in Å) in the current frame.
_obs.bin_width : float
Bin width (in Å) in the current frame
_obs.bin_edges : numpy.ndarray, (n_bins + 1)
Edges of the bins (in Å) in the current frame.
_obs.bin_area : numpy.ndarray, (n_bins)
Area of the rectangle of each bin in the current frame. Calculated via
:math:`L_x \\cdot L_y / N_\\mathrm{bins}` where :math:`L_x` and :math:`L_y` are
the box lengths perpendicular to the dimension of evaluations given by `dim`.
:math:`N_\\mathrm{bins}` is the number of bins.
_obs.bin_volume : numpy.ndarray, (n_bins)
Volume of an cuboid of each bin (in Å^3) in the current frame.
"""
def __init__(self, atomgroup: mda.AtomGroup, unwrap: bool, pack: bool, refgroup: mda.AtomGroup | None, jitter: float, concfreq: int, dim: int, zmin: None | float, zmax: None | float, bin_width: float, wrap_compound: str):
super().__init__(atomgroup=atomgroup, unwrap=unwrap, pack=pack, refgroup=refgroup, jitter=jitter, concfreq=concfreq, wrap_compound=wrap_compound)
if dim not in [0, 1, 2]:
raise ValueError('Dimension can only be x=0, y=1 or z=2.')
self.dim = dim
self._zmax = zmax
self._zmin = zmin
self._bin_width = bin_width
@property
def odims(self) -> np.ndarray:
"""Other dimensions perpendicular to dim i.e. (0,2) if dim = 1."""
return np.roll(np.arange(3), -self.dim)[1:]
def _compute_lab_frame_planar(self):
"""Compute lab limits `zmin` and `zmax`."""
if self._zmin is None:
self.zmin = 0
else:
self.zmin = self.box_center[self.dim] + self._zmin
if self._zmax is None:
self.zmax = self.box_lengths[self.dim]
else:
self.zmax = self.box_center[self.dim] + self._zmax
self.zmin = np.float64(self.zmin)
self.zmax = np.float64(self.zmax)
def _prepare(self):
"""Prepare the planar analysis."""
self._compute_lab_frame_planar()
if self._zmax is not None and self._zmin is not None and (self._zmax <= self._zmin):
raise ValueError('`zmax` can not be smaller or equal than `zmin`!')
L = self.zmax - self.zmin
self.n_bins = int(np.ceil(L / self._bin_width))
def _single_frame(self):
"""Single frame for the planar analysis."""
self._compute_lab_frame_planar()
self._obs.L = self.zmax - self.zmin
self._obs.box_center = self.box_center
self._obs.bin_edges = np.linspace(self.zmin, self.zmax, self.n_bins + 1)
self._obs.bin_width = self._obs.L / self.n_bins
self._obs.bin_pos = self._obs.bin_edges[1:] - self._obs.bin_width / 2
self._obs.bin_area = np.ones(self.n_bins) * np.prod(self.box_lengths[self.odims])
self._obs.bin_volume = self._obs.bin_area * self._obs.bin_width
def _conclude(self):
"""Results calculations for the planar analysis."""
self.results.bin_pos = self.means.bin_pos - self.means.box_center[self.dim]
|
@render_docs
class PlanarBase(AnalysisBase):
'''Analysis class providing options and attributes for a planar system.
Parameters
----------
${ATOMGROUP_PARAMETER}
${PLANAR_CLASS_PARAMETERS}
${WRAP_COMPOUND_PARAMETER}
Attributes
----------
${PLANAR_CLASS_ATTRIBUTES}
zmin : float
Minimal coordinate for evaluation (Å) with in the lab frame, where 0
corresponds to the origin of the cell.
zmax : float
Maximal coordinate for evaluation (Å) with in the lab frame, where 0
corresponds to the origin of the cell.
_obs.L : float
Length (in Å) along the chosen dimension in the current frame.
_obs.bin_pos : numpy.ndarray, (n_bins)
Central bin positions (in Å) of each bin (in Å) in the current frame.
_obs.bin_width : float
Bin width (in Å) in the current frame
_obs.bin_edges : numpy.ndarray, (n_bins + 1)
Edges of the bins (in Å) in the current frame.
_obs.bin_area : numpy.ndarray, (n_bins)
Area of the rectangle of each bin in the current frame. Calculated via
:math:`L_x \cdot L_y / N_\mathrm{bins}` where :math:`L_x` and :math:`L_y` are
the box lengths perpendicular to the dimension of evaluations given by `dim`.
:math:`N_\mathrm{bins}` is the number of bins.
_obs.bin_volume : numpy.ndarray, (n_bins)
Volume of an cuboid of each bin (in Å^3) in the current frame.
'''
def __init__(self, atomgroup: mda.AtomGroup, unwrap: bool, pack: bool, refgroup: mda.AtomGroup | None, jitter: float, concfreq: int, dim: int, zmin: None | float, zmax: None | float, bin_width: float, wrap_compound: str):
pass
@property
def odims(self) -> np.ndarray:
'''Other dimensions perpendicular to dim i.e. (0,2) if dim = 1.'''
pass
def _compute_lab_frame_planar(self):
'''Compute lab limits `zmin` and `zmax`.'''
pass
def _prepare(self):
'''Prepare the planar analysis.'''
pass
def _single_frame(self):
'''Single frame for the planar analysis.'''
pass
def _conclude(self):
'''Results calculations for the planar analysis.'''
pass
| 9
| 6
| 14
| 1
| 11
| 2
| 2
| 0.67
| 1
| 6
| 0
| 7
| 6
| 7
| 6
| 18
| 127
| 15
| 67
| 29
| 46
| 45
| 37
| 15
| 30
| 3
| 2
| 1
| 10
|
328,286
|
maicos-devel/maicos
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/maicos-devel_maicos/src/maicos/core/planar.py
|
maicos.core.planar.ProfilePlanarBase
|
import logging
from ..lib.math import symmetrize
from collections.abc import Callable
import MDAnalysis as mda
from ..lib.util import render_docs
from .base import AnalysisBase, ProfileBase
import numpy as np
@render_docs
class ProfilePlanarBase(PlanarBase, ProfileBase):
"""Base class for computing profiles in a cartesian geometry.
${CORRELATION_INFO_RADIAL}
Parameters
----------
${PROFILE_PLANAR_CLASS_PARAMETERS}
${PROFILE_CLASS_PARAMETERS_PRIVATE}
Attributes
----------
${PROFILE_PLANAR_CLASS_ATTRIBUTES}
"""
def __init__(self, atomgroup: mda.AtomGroup, unwrap: bool, pack: bool, refgroup: mda.AtomGroup | None, jitter: float, concfreq: int, dim: int, zmin: None | float, zmax: None | float, bin_width: float, sym: bool, sym_odd: bool, grouping: str, bin_method: str, output: str, weighting_function: Callable, weighting_function_kwargs: None | dict, normalization: str):
PlanarBase.__init__(self, atomgroup=atomgroup, unwrap=unwrap, pack=pack, refgroup=refgroup, jitter=jitter, concfreq=concfreq, dim=dim, zmin=zmin, zmax=zmax, bin_width=bin_width, wrap_compound=grouping)
ProfileBase.__init__(self, atomgroup=self.atomgroup, grouping=grouping, bin_method=bin_method, output=output, weighting_function=weighting_function, weighting_function_kwargs=weighting_function_kwargs, normalization=normalization)
self.sym = sym
if self.sym and self.refgroup is None:
raise ValueError('For symmetrization the `refgroup` argument is required.')
self.sym_odd = sym_odd
def _prepare(self):
PlanarBase._prepare(self)
ProfileBase._prepare(self)
logging.info(f"Profile along {'xyz'[self.dim]}-axis normal to the plane.")
def _compute_histogram(self, positions: np.ndarray, weights: np.ndarray | None=None) -> np.ndarray:
positions = positions[:, self.dim]
hist, _ = np.histogram(positions, bins=self.n_bins, range=(self.zmin, self.zmax), weights=weights)
return hist
def _single_frame(self) -> float:
PlanarBase._single_frame(self)
ProfileBase._single_frame(self)
return self._obs.profile[self.n_bins // 2]
def _conclude(self):
PlanarBase._conclude(self)
if self.sym:
symmetrize(self.sums.profile, inplace=True, is_odd=self.sym_odd)
symmetrize(self.means.profile, inplace=True, is_odd=self.sym_odd)
symmetrize(self.sems.profile, inplace=True, is_odd=False)
if self.normalization == 'number':
symmetrize(self.sums.bincount, inplace=True, is_odd=self.sym_odd)
ProfileBase._conclude(self)
|
@render_docs
class ProfilePlanarBase(PlanarBase, ProfileBase):
'''Base class for computing profiles in a cartesian geometry.
${CORRELATION_INFO_RADIAL}
Parameters
----------
${PROFILE_PLANAR_CLASS_PARAMETERS}
${PROFILE_CLASS_PARAMETERS_PRIVATE}
Attributes
----------
${PROFILE_PLANAR_CLASS_ATTRIBUTES}
'''
def __init__(self, atomgroup: mda.AtomGroup, unwrap: bool, pack: bool, refgroup: mda.AtomGroup | None, jitter: float, concfreq: int, dim: int, zmin: None | float, zmax: None | float, bin_width: float, sym: bool, sym_odd: bool, grouping: str, bin_method: str, output: str, weighting_function: Callable, weighting_function_kwargs: None | dict, normalization: str):
pass
def _prepare(self):
pass
def _compute_histogram(self, positions: np.ndarray, weights: np.ndarray | None=None) -> np.ndarray:
pass
def _single_frame(self) -> float:
pass
def _conclude(self):
pass
| 7
| 1
| 18
| 2
| 15
| 1
| 2
| 0.21
| 2
| 7
| 0
| 8
| 5
| 6
| 5
| 29
| 108
| 18
| 75
| 33
| 47
| 16
| 29
| 9
| 23
| 3
| 3
| 2
| 8
|
328,287
|
maicos-devel/maicos
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/maicos-devel_maicos/src/maicos/core/sphere.py
|
maicos.core.sphere.ProfileSphereBase
|
import MDAnalysis as mda
import numpy as np
import logging
from collections.abc import Callable
from .base import ProfileBase
from ..lib.math import transform_sphere
from ..lib.util import render_docs
@render_docs
class ProfileSphereBase(SphereBase, ProfileBase):
"""Base class for computing radial profiles in a spherical geometry.
${CORRELATION_INFO_RADIAL}
Parameters
----------
${PROFILE_SPHERE_CLASS_PARAMETERS}
${PROFILE_CLASS_PARAMETERS_PRIVATE}
Attributes
----------
${PROFILE_CYLINDER_CLASS_ATTRIBUTES}
"""
def __init__(self, atomgroup: mda.AtomGroup, unwrap: bool, pack: bool, refgroup: mda.AtomGroup | None, jitter: float, concfreq: int, rmin: float, rmax: None | float, bin_width: float, grouping: str, bin_method: str, output: str, weighting_function: Callable, weighting_function_kwargs: dict | None, normalization: str):
SphereBase.__init__(self, atomgroup=atomgroup, unwrap=unwrap, pack=pack, refgroup=refgroup, jitter=jitter, concfreq=concfreq, rmin=rmin, rmax=rmax, bin_width=bin_width, wrap_compound=grouping)
ProfileBase.__init__(self, atomgroup=self.atomgroup, grouping=grouping, bin_method=bin_method, output=output, weighting_function=weighting_function, weighting_function_kwargs=weighting_function_kwargs, normalization=normalization)
def _prepare(self):
SphereBase._prepare(self)
ProfileBase._prepare(self)
logging.info('Profile along the radial coordinate in a spherical coordinate system.')
def _compute_histogram(self, positions: np.ndarray, weights: np.ndarray | None=None) -> np.ndarray:
positions = transform_sphere(positions, origin=self.box_center)[:, 0]
hist, _ = np.histogram(positions, bins=self.n_bins, range=(self.rmin, self.rmax), weights=weights)
return hist
def _single_frame(self) -> float:
SphereBase._single_frame(self)
ProfileBase._single_frame(self)
return self._obs.profile[0]
def _conclude(self):
SphereBase._conclude(self)
ProfileBase._conclude(self)
|
@render_docs
class ProfileSphereBase(SphereBase, ProfileBase):
'''Base class for computing radial profiles in a spherical geometry.
${CORRELATION_INFO_RADIAL}
Parameters
----------
${PROFILE_SPHERE_CLASS_PARAMETERS}
${PROFILE_CLASS_PARAMETERS_PRIVATE}
Attributes
----------
${PROFILE_CYLINDER_CLASS_ATTRIBUTES}
'''
def __init__(self, atomgroup: mda.AtomGroup, unwrap: bool, pack: bool, refgroup: mda.AtomGroup | None, jitter: float, concfreq: int, rmin: float, rmax: None | float, bin_width: float, grouping: str, bin_method: str, output: str, weighting_function: Callable, weighting_function_kwargs: dict | None, normalization: str):
pass
def _prepare(self):
pass
def _compute_histogram(self, positions: np.ndarray, weights: np.ndarray | None=None) -> np.ndarray:
pass
def _single_frame(self) -> float:
pass
def _conclude(self):
pass
| 7
| 1
| 14
| 1
| 13
| 1
| 1
| 0.22
| 2
| 6
| 0
| 4
| 5
| 4
| 5
| 28
| 89
| 12
| 64
| 28
| 39
| 14
| 19
| 7
| 13
| 1
| 3
| 0
| 5
|
328,288
|
maicos-devel/maicos
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/maicos-devel_maicos/src/maicos/core/sphere.py
|
maicos.core.sphere.SphereBase
|
from ..lib.util import render_docs
import logging
import numpy as np
from .planar import AnalysisBase
import MDAnalysis as mda
from ..lib.math import transform_sphere
@render_docs
class SphereBase(AnalysisBase):
"""Analysis class providing options and attributes for a spherical system.
Provide the results attribute `r`.
Parameters
----------
${ATOMGROUP_PARAMETER}
${SPHERE_CLASS_PARAMETERS}
${WRAP_COMPOUND_PARAMETER}
Attributes
----------
${SPHERE_CLASS_ATTRIBUTES}
pos_sph : numpy.ndarray
positions in spherical coordinats (r, phi, theta)
_obs.R : float
Average length (in Å) along the radial dimension in the current frame.
_obs.bin_pos : numpy.ndarray, (n_bins)
Central bin position of each bin (in Å) in the current frame.
_obs.bin_width : float
Bin width (in Å) in the current frame
_obs.bin_edges : numpy.ndarray, (n_bins + 1)
Edges of the bins (in Å) in the current frame.
_obs.bin_area : numpy.ndarray, (n_bins)
Surface area (in Å^2) of the sphere of each bin with radius `bin_pos` in the
current frame. Calculated via :math:`4 \\pi r_i^2` where :math:`i` is the index
of the bin.
results.bin_volume : numpy.ndarray, (n_bins)
volume of a spherical shell of each bins (in Å^3) of the current frame.
Calculated via :math:`4\\pi/3 \\left(r_{i+1}^3 - r_i^3 \\right)` where `i` is the
index of the bin.
"""
def __init__(self, atomgroup: mda.AtomGroup, unwrap: bool, pack: bool, refgroup: mda.AtomGroup | None, jitter: float, concfreq: int, rmin: float, rmax: None | float, bin_width: float, wrap_compound: str):
super().__init__(atomgroup=atomgroup, unwrap=unwrap, pack=pack, refgroup=refgroup, jitter=jitter, concfreq=concfreq, wrap_compound=wrap_compound)
self.rmin = rmin
self._rmax = rmax
self._bin_width = bin_width
def _compute_lab_frame_sphere(self):
"""Compute lab limit `rmax`."""
box_half = self.box_lengths.min() / 2
if self._rmax is None:
self.rmax = box_half
elif self._rmax <= box_half:
self.rmax = self._rmax
else:
logging.warning(f'`rmax` is bigger than half the smallest box vector ({box_half:.2f}) in the radial direction. This will lead to artifacts at the edges.')
self.rmax = self._rmax
self.rmin = np.float64(self.rmin)
self.rmax = np.float64(self.rmax)
self.pos_sph = transform_sphere(self._universe.atoms.positions, origin=self.box_center)
def _prepare(self):
"""Prepare the spherical analysis."""
self._compute_lab_frame_sphere()
if self.rmin < 0:
raise ValueError('Only values for `rmin` larger or equal 0 are allowed.')
if self._rmax is not None and self._rmax <= self.rmin:
raise ValueError('`rmax` can not be smaller than or equal to `rmin`!')
R = self.rmax - self.rmin
self.n_bins = int(np.ceil(R / self._bin_width))
def _single_frame(self):
"""Single frame for the sphercial analysis."""
self._compute_lab_frame_sphere()
self._obs.R = self.rmax - self.rmin
self._obs.bin_edges = np.linspace(self.rmin, self.rmax, self.n_bins + 1, endpoint=True)
self._obs.bin_width = self._obs.R / self.n_bins
self._obs.bin_pos = self._obs.bin_edges[1:] - self._obs.bin_width / 2
self._obs.bin_area = 4 * np.pi * self._obs.bin_pos ** 2
self._obs.bin_volume = 4 * np.pi * np.diff(self._obs.bin_edges ** 3) / 3
def _conclude(self):
"""Results calculations for the sphercial analysis."""
super()._conclude()
self.results.bin_pos = self.means.bin_pos
|
@render_docs
class SphereBase(AnalysisBase):
'''Analysis class providing options and attributes for a spherical system.
Provide the results attribute `r`.
Parameters
----------
${ATOMGROUP_PARAMETER}
${SPHERE_CLASS_PARAMETERS}
${WRAP_COMPOUND_PARAMETER}
Attributes
----------
${SPHERE_CLASS_ATTRIBUTES}
pos_sph : numpy.ndarray
positions in spherical coordinats (r, phi, theta)
_obs.R : float
Average length (in Å) along the radial dimension in the current frame.
_obs.bin_pos : numpy.ndarray, (n_bins)
Central bin position of each bin (in Å) in the current frame.
_obs.bin_width : float
Bin width (in Å) in the current frame
_obs.bin_edges : numpy.ndarray, (n_bins + 1)
Edges of the bins (in Å) in the current frame.
_obs.bin_area : numpy.ndarray, (n_bins)
Surface area (in Å^2) of the sphere of each bin with radius `bin_pos` in the
current frame. Calculated via :math:`4 \pi r_i^2` where :math:`i` is the index
of the bin.
results.bin_volume : numpy.ndarray, (n_bins)
volume of a spherical shell of each bins (in Å^3) of the current frame.
Calculated via :math:`4\pi/3 \left(r_{i+1}^3 - r_i^3 \right)` where `i` is the
index of the bin.
'''
def __init__(self, atomgroup: mda.AtomGroup, unwrap: bool, pack: bool, refgroup: mda.AtomGroup | None, jitter: float, concfreq: int, rmin: float, rmax: None | float, bin_width: float, wrap_compound: str):
pass
def _compute_lab_frame_sphere(self):
'''Compute lab limit `rmax`.'''
pass
def _prepare(self):
'''Prepare the spherical analysis.'''
pass
def _single_frame(self):
'''Single frame for the sphercial analysis.'''
pass
def _conclude(self):
'''Results calculations for the sphercial analysis.'''
pass
| 7
| 5
| 15
| 1
| 13
| 1
| 2
| 0.55
| 1
| 6
| 0
| 4
| 5
| 6
| 5
| 17
| 114
| 15
| 64
| 26
| 46
| 35
| 35
| 14
| 29
| 3
| 2
| 1
| 9
|
328,289
|
maicos-devel/maicos
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/maicos-devel_maicos/src/maicos/lib/tables.py
|
maicos.lib.tables.CMParameter
|
import numpy as np
from dataclasses import dataclass
@dataclass
class CMParameter:
"""Cromer-Mann X-ray scattering factor parameters."""
a: np.ndarray
b: np.ndarray
c: float
|
@dataclass
class CMParameter:
'''Cromer-Mann X-ray scattering factor parameters.'''
pass
| 2
| 1
| 0
| 0
| 0
| 0
| 0
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 6
| 1
| 4
| 1
| 3
| 1
| 4
| 1
| 3
| 0
| 0
| 0
| 0
|
328,290
|
maicos-devel/maicos
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/maicos-devel_maicos/src/maicos/lib/util.py
|
maicos.lib.util.Unit_vector
|
from typing import Protocol
import MDAnalysis as mda
import numpy as np
class Unit_vector(Protocol):
"""Protocol class for unit vector methods type hints."""
def __call__(self, atomgroup: mda.AtomGroup, grouping: str) -> np.ndarray:
"""Call for type hints."""
...
|
class Unit_vector(Protocol):
'''Protocol class for unit vector methods type hints.'''
def __call__(self, atomgroup: mda.AtomGroup, grouping: str) -> np.ndarray:
'''Call for type hints.'''
pass
| 2
| 2
| 3
| 0
| 2
| 1
| 1
| 0.67
| 1
| 1
| 0
| 0
| 1
| 0
| 1
| 25
| 6
| 1
| 3
| 2
| 1
| 2
| 3
| 2
| 1
| 1
| 5
| 0
| 1
|
328,291
|
maicos-devel/maicos
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/maicos-devel_maicos/src/maicos/modules/DensityCylinder.py
|
maicos.modules.DensityCylinder.DensityCylinder
|
from ..lib.weights import density_weights
import MDAnalysis as mda
import logging
from ..core import ProfileCylinderBase
from ..lib.util import render_docs
@render_docs
class DensityCylinder(ProfileCylinderBase):
"""Cylindrical partial density profiles.
${DENSITY_CYLINDER_DESCRIPTION}
${CORRELATION_INFO_RADIAL}
Parameters
----------
${ATOMGROUP_PARAMETER}
${DENS_PARAMETER}
${PROFILE_CYLINDER_CLASS_PARAMETERS}
${OUTPUT_PARAMETER}
Attributes
----------
${PROFILE_CYLINDER_CLASS_ATTRIBUTES}
"""
def __init__(self, atomgroup: mda.AtomGroup, dens: str='mass', dim: int=2, zmin: float | None=None, zmax: float | None=None, rmin: float=0, rmax: float | None=None, bin_width: float=1, bin_method: str='com', grouping: str='atoms', refgroup: mda.AtomGroup | None=None, unwrap: bool=True, pack: bool=True, concfreq: int=0, jitter: float=0.0, output: str='density.dat') -> None:
self._locals = locals()
super().__init__(atomgroup=atomgroup, unwrap=unwrap, pack=pack, jitter=jitter, concfreq=concfreq, dim=dim, zmin=zmin, zmax=zmax, bin_width=bin_width, rmin=rmin, rmax=rmax, refgroup=refgroup, grouping=grouping, bin_method=bin_method, output=output, weighting_function=density_weights, weighting_function_kwargs={'dens': dens}, normalization='volume')
def _prepare(self):
logging.info(f"Analysis of the {self._locals['dens']} density profile.")
super()._prepare()
|
@render_docs
class DensityCylinder(ProfileCylinderBase):
'''Cylindrical partial density profiles.
${DENSITY_CYLINDER_DESCRIPTION}
${CORRELATION_INFO_RADIAL}
Parameters
----------
${ATOMGROUP_PARAMETER}
${DENS_PARAMETER}
${PROFILE_CYLINDER_CLASS_PARAMETERS}
${OUTPUT_PARAMETER}
Attributes
----------
${PROFILE_CYLINDER_CLASS_ATTRIBUTES}
'''
def __init__(self, atomgroup: mda.AtomGroup, dens: str='mass', dim: int=2, zmin: float | None=None, zmax: float | None=None, rmin: float=0, rmax: float | None=None, bin_width: float=1, bin_method: str='com', grouping: str='atoms', refgroup: mda.AtomGroup | None=None, unwrap: bool=True, pack: bool=True, concfreq: int=0, jitter: float=0.0, output: str='density.dat') -> None:
pass
def _prepare(self):
pass
| 4
| 1
| 22
| 0
| 22
| 0
| 1
| 0.3
| 1
| 5
| 0
| 0
| 2
| 1
| 2
| 36
| 64
| 7
| 44
| 22
| 23
| 13
| 7
| 4
| 4
| 1
| 5
| 0
| 2
|
328,292
|
maicos-devel/maicos
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/maicos-devel_maicos/src/maicos/modules/DensityPlanar.py
|
maicos.modules.DensityPlanar.DensityPlanar
|
from ..lib.util import render_docs
import MDAnalysis as mda
import logging
from ..lib.weights import density_weights
from ..core import ProfilePlanarBase
@render_docs
class DensityPlanar(ProfilePlanarBase):
"""Cartesian partial density profiles.
${DENSITY_PLANAR_DESCRIPTION}
${CORRELATION_INFO_PLANAR}
Parameters
----------
${ATOMGROUP_PARAMETER}
${DENS_PARAMETER}
${PROFILE_PLANAR_CLASS_PARAMETERS}
${OUTPUT_PARAMETER}
Attributes
----------
${PROFILE_PLANAR_CLASS_ATTRIBUTES}
Notes
-----
Partial mass density profiles can be used to calculate the ideal component of the
chemical potential. For details, take a look at the corresponding :ref:`How-to
guide<howto-chemical-potential>`.
"""
def __init__(self, atomgroup: mda.AtomGroup, dens: str='mass', dim: int=2, zmin: float | None=None, zmax: float | None=None, bin_width: float=1, bin_method: str='com', grouping: str='atoms', sym: bool=False, refgroup: mda.AtomGroup | None=None, unwrap: bool=True, pack: bool=True, jitter: float=0.0, concfreq: int=0, output: str='density.dat') -> None:
self._locals = locals()
super().__init__(atomgroup=atomgroup, unwrap=unwrap, pack=pack, jitter=jitter, concfreq=concfreq, dim=dim, zmin=zmin, zmax=zmax, bin_width=bin_width, refgroup=refgroup, sym=sym, sym_odd=False, grouping=grouping, bin_method=bin_method, output=output, weighting_function=density_weights, weighting_function_kwargs={'dens': dens}, normalization='volume')
def _prepare(self):
logging.info(f"Analysis of the {self._locals['dens']} density profile.")
super()._prepare()
|
@render_docs
class DensityPlanar(ProfilePlanarBase):
'''Cartesian partial density profiles.
${DENSITY_PLANAR_DESCRIPTION}
${CORRELATION_INFO_PLANAR}
Parameters
----------
${ATOMGROUP_PARAMETER}
${DENS_PARAMETER}
${PROFILE_PLANAR_CLASS_PARAMETERS}
${OUTPUT_PARAMETER}
Attributes
----------
${PROFILE_PLANAR_CLASS_ATTRIBUTES}
Notes
-----
Partial mass density profiles can be used to calculate the ideal component of the
chemical potential. For details, take a look at the corresponding :ref:`How-to
guide<howto-chemical-potential>`.
'''
def __init__(self, atomgroup: mda.AtomGroup, dens: str='mass', dim: int=2, zmin: float | None=None, zmax: float | None=None, bin_width: float=1, bin_method: str='com', grouping: str='atoms', sym: bool=False, refgroup: mda.AtomGroup | None=None, unwrap: bool=True, pack: bool=True, jitter: float=0.0, concfreq: int=0, output: str='density.dat') -> None:
pass
def _prepare(self):
pass
| 4
| 1
| 21
| 0
| 21
| 0
| 1
| 0.42
| 1
| 5
| 0
| 0
| 2
| 1
| 2
| 31
| 69
| 8
| 43
| 21
| 23
| 18
| 7
| 4
| 4
| 1
| 4
| 0
| 2
|
328,293
|
maicos-devel/maicos
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/maicos-devel_maicos/src/maicos/modules/DensitySphere.py
|
maicos.modules.DensitySphere.DensitySphere
|
import MDAnalysis as mda
from ..core import ProfileSphereBase
from ..lib.util import render_docs
import logging
from ..lib.weights import density_weights
@render_docs
class DensitySphere(ProfileSphereBase):
"""Spherical partial density profiles.
${DENSITY_SPHERE_DESCRIPTION}
${CORRELATION_INFO_RADIAL}
Parameters
----------
${ATOMGROUP_PARAMETER}
${DENS_PARAMETER}
${PROFILE_SPHERE_CLASS_PARAMETERS}
${OUTPUT_PARAMETER}
Attributes
----------
${PROFILE_SPHERE_CLASS_ATTRIBUTES}
"""
def __init__(self, atomgroup: mda.AtomGroup, dens: str='mass', rmin: float=0, rmax: float | None=None, bin_width: float=1, bin_method: str='com', grouping: str='atoms', refgroup: mda.AtomGroup | None=None, unwrap: bool=True, pack: bool=True, jitter: float=0.0, concfreq: int=0, output: str='density.dat') -> None:
self._locals = locals()
super().__init__(atomgroup=atomgroup, unwrap=unwrap, pack=pack, refgroup=refgroup, jitter=jitter, concfreq=concfreq, rmin=rmin, rmax=rmax, bin_width=bin_width, grouping=grouping, bin_method=bin_method, output=output, weighting_function=density_weights, weighting_function_kwargs={'dens': dens}, normalization='volume')
def _prepare(self):
logging.info(f"Analysis of the {self._locals['dens']} density profile.")
super()._prepare()
|
@render_docs
class DensitySphere(ProfileSphereBase):
'''Spherical partial density profiles.
${DENSITY_SPHERE_DESCRIPTION}
${CORRELATION_INFO_RADIAL}
Parameters
----------
${ATOMGROUP_PARAMETER}
${DENS_PARAMETER}
${PROFILE_SPHERE_CLASS_PARAMETERS}
${OUTPUT_PARAMETER}
Attributes
----------
${PROFILE_SPHERE_CLASS_ATTRIBUTES}
'''
def __init__(self, atomgroup: mda.AtomGroup, dens: str='mass', rmin: float=0, rmax: float | None=None, bin_width: float=1, bin_method: str='com', grouping: str='atoms', refgroup: mda.AtomGroup | None=None, unwrap: bool=True, pack: bool=True, jitter: float=0.0, concfreq: int=0, output: str='density.dat') -> None:
pass
def _prepare(self):
pass
| 4
| 1
| 19
| 0
| 19
| 0
| 1
| 0.34
| 1
| 5
| 0
| 0
| 2
| 1
| 2
| 30
| 58
| 7
| 38
| 19
| 20
| 13
| 7
| 4
| 4
| 1
| 4
| 0
| 2
|
328,294
|
maicos-devel/maicos
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/maicos-devel_maicos/src/maicos/modules/DielectricCylinder.py
|
maicos.modules.DielectricCylinder.DielectricCylinder
|
import numpy as np
import scipy.constants
import logging
from ..core import CylinderBase
import MDAnalysis as mda
from ..lib.util import charge_neutral, citation_reminder, get_compound, render_docs
@render_docs
@charge_neutral(filter='error')
class DielectricCylinder(CylinderBase):
"""Cylindrical dielectric profiles.
Computes the axial :math:`\\varepsilon_z(r)` and inverse radial
:math:`\\varepsilon_r^{-1}(r)` components of the cylindrical dielectric tensor
:math:`\\varepsilon`. The components are binned along the radial direction of the
cylinder. The :math:`z`-axis of the cylinder is pointing in the direction given by
the ``dim`` parameter. The center of the cylinder is either located at the center of
the simulation box (default) or at the center of mass of the ``refgroup``, if
provided.
For usage please refer to :ref:`How-to: Dielectric constant<howto-dielectric>` and
for details on the theory see :ref:`dielectric-explanations`.
For correlation analysis, the component along the :math:`z`-axis is used.
${CORRELATION_INFO}
Also, please read and cite :footcite:p:`locheGiantaxialDielectric2019`.
Parameters
----------
${ATOMGROUP_PARAMETER}
${TEMPERATURE_PARAMETER}
vcutwidth : float
Spacing of virtual cuts (bins) along the parallel directions.
single : bool
For a single chain of molecules the average of :math:`M` is zero. This flag sets
:math:`\\langle M \\rangle = 0`.
${CYLINDER_CLASS_PARAMETERS}
${BASE_CLASS_PARAMETERS}
${OUTPUT_PREFIX_PARAMETER}
Attributes
----------
${CYLINDER_CLASS_ATTRIBUTES}
results.eps_z : numpy.ndarray
Reduced axial dielectric profile :math:`(\\varepsilon_z(r) - 1)` of the
selected atomgroup
results.deps_z : numpy.ndarray
Estimated uncertainty of axial dielectric profile
results.eps_r : numpy.ndarray
Reduced inverse radial dielectric profile
:math:`(\\varepsilon^{-1}_r(r) - 1)`
results.deps_r : numpy.ndarray
Estimated uncertainty of inverse radial dielectric profile
"""
def __init__(self, atomgroup: mda.AtomGroup, temperature: float=300, vcutwidth: float=0.1, single: bool=False, dim: int=2, zmin: float | None=None, zmax: float | None=None, rmin: float=0, rmax: float | None=None, bin_width: float=0.1, refgroup: mda.AtomGroup | None=None, unwrap: bool=True, pack: bool=True, jitter: float=0.0, concfreq: int=0, output_prefix: str='eps_cyl') -> None:
self._locals = locals()
self.comp = get_compound(atomgroup)
ix = atomgroup._get_compound_indices(self.comp)
_, self.inverse_ix = np.unique(ix, return_inverse=True)
if zmin is not None or zmax is not None or rmin != 0 or (rmax is not None):
logging.warning('Setting `rmin` and `rmax` (as well as `zmin` and `zmax`) might cut off molecules. This will lead to severe artifacts in the dielectric profiles.')
super().__init__(atomgroup, unwrap=unwrap, pack=pack, refgroup=refgroup, jitter=jitter, concfreq=concfreq, dim=dim, zmin=zmin, zmax=zmax, bin_width=bin_width, rmin=rmin, rmax=rmax, wrap_compound=self.comp)
self.output_prefix = output_prefix
self.temperature = temperature
self.single = single
self.vcutwidth = vcutwidth
def _prepare(self) -> None:
logging.info('Analysis of the axial and inverse radial components of the cylindrical dielectric tensor.')
logging.info(citation_reminder('10.1021/acs.jpcb.9b09269'))
super()._prepare()
def _single_frame(self) -> float:
super()._single_frame()
rbins = np.digitize(self.pos_cyl[:, 0], self._obs.bin_edges[1:-1])
curQ_r = np.bincount(rbins[self.atomgroup.ix], weights=self.atomgroup.charges, minlength=self.n_bins)
self._obs.m_r = -np.cumsum(curQ_r) / 2 / np.pi / self._obs.L / self._obs.bin_pos
curQ_r_tot = np.bincount(rbins, weights=self._universe.atoms.charges, minlength=self.n_bins)
self._obs.m_r_tot = -np.cumsum(curQ_r_tot) / 2 / np.pi / self._obs.L / self._obs.bin_pos
self._obs.M_r = np.sum(self._obs.m_r_tot * self._obs.bin_width)
self._obs.mM_r = self._obs.m_r * self._obs.M_r
nbinsz = np.ceil(self._obs.L / self.vcutwidth).astype(int)
chargepos = self.pos_cyl[self.atomgroup.ix, 0] * np.abs(self.atomgroup.charges)
center = self.atomgroup.accumulate(chargepos, compound=self.comp) / self.atomgroup.accumulate(np.abs(self.atomgroup.charges), compound=self.comp)
testpos = center[self.inverse_ix]
rbins = np.digitize(testpos, self._obs.bin_edges[1:-1])
z = np.arange(nbinsz) * (self._obs.L / nbinsz)
zbins = np.digitize(self.pos_cyl[self.atomgroup.ix, 2], z[1:])
curQz = np.bincount(rbins + self.n_bins * zbins, weights=self.atomgroup.charges, minlength=self.n_bins * nbinsz).reshape(nbinsz, self.n_bins)
curqz = np.cumsum(curQz, axis=0) / self._obs.bin_area[np.newaxis, :]
self._obs.m_z = -curqz.mean(axis=0)
self._obs.M_z = np.dot(self._universe.atoms.charges, self.pos_cyl[:, 2])
self._obs.mM_z = self._obs.m_z * self._obs.M_z
return self._obs.M_z
def _conclude(self) -> None:
super()._conclude()
self._pref = 1 / scipy.constants.epsilon_0
self._pref /= scipy.constants.Boltzmann * self.temperature
self._pref /= scipy.constants.angstrom / scipy.constants.elementary_charge ** 2
if not self.single:
cov_z = self.means.mM_z - self.means.m_z * self.means.M_z
cov_r = self.means.mM_r - self.means.m_r * self.means.M_r
dcov_z = np.sqrt(self.sems.mM_z ** 2 + self.sems.m_z ** 2 * self.means.M_z ** 2 + self.means.m_z ** 2 * self.sems.M_z ** 2)
dcov_r = np.sqrt(self.sems.mM_r ** 2 + self.sems.m_r ** 2 * self.means.M_r ** 2 + self.means.m_r ** 2 * self.sems.M_r ** 2)
else:
cov_z = self.means.mM_z
cov_r = self.means.mM_r
dcov_z = self.sems.mM_z
dcov_r = self.sems.mM_r
self.results.eps_z = self._pref * cov_z
self.results.deps_z = self._pref * dcov_z
self.results.eps_r = -(2 * np.pi * self._obs.L * self._pref * self.results.bin_pos * cov_r)
self.results.deps_r = 2 * np.pi * self._obs.L * self._pref * self.results.bin_pos * dcov_r
@render_docs
def save(self) -> None:
"""${SAVE_METHOD_PREFIX_DESCRIPTION}"""
outdata_z = np.array([self.results.bin_pos, self.results.eps_z, self.results.deps_z]).T
outdata_r = np.array([self.results.bin_pos, self.results.eps_r, self.results.deps_r]).T
columns = ['positions [Å]']
columns += ['ε_z - 1', 'Δε_z']
self.savetxt('{}{}'.format(self.output_prefix, '_z.dat'), outdata_z, columns=columns)
columns = ['positions [Å]']
columns += ['ε^-1_r - 1', 'Δε^-1_r']
self.savetxt('{}{}'.format(self.output_prefix, '_r.dat'), outdata_r, columns=columns)
|
@render_docs
@charge_neutral(filter='error')
class DielectricCylinder(CylinderBase):
'''Cylindrical dielectric profiles.
Computes the axial :math:`\varepsilon_z(r)` and inverse radial
:math:`\varepsilon_r^{-1}(r)` components of the cylindrical dielectric tensor
:math:`\varepsilon`. The components are binned along the radial direction of the
cylinder. The :math:`z`-axis of the cylinder is pointing in the direction given by
the ``dim`` parameter. The center of the cylinder is either located at the center of
the simulation box (default) or at the center of mass of the ``refgroup``, if
provided.
For usage please refer to :ref:`How-to: Dielectric constant<howto-dielectric>` and
for details on the theory see :ref:`dielectric-explanations`.
For correlation analysis, the component along the :math:`z`-axis is used.
${CORRELATION_INFO}
Also, please read and cite :footcite:p:`locheGiantaxialDielectric2019`.
Parameters
----------
${ATOMGROUP_PARAMETER}
${TEMPERATURE_PARAMETER}
vcutwidth : float
Spacing of virtual cuts (bins) along the parallel directions.
single : bool
For a single chain of molecules the average of :math:`M` is zero. This flag sets
:math:`\langle M \rangle = 0`.
${CYLINDER_CLASS_PARAMETERS}
${BASE_CLASS_PARAMETERS}
${OUTPUT_PREFIX_PARAMETER}
Attributes
----------
${CYLINDER_CLASS_ATTRIBUTES}
results.eps_z : numpy.ndarray
Reduced axial dielectric profile :math:`(\varepsilon_z(r) - 1)` of the
selected atomgroup
results.deps_z : numpy.ndarray
Estimated uncertainty of axial dielectric profile
results.eps_r : numpy.ndarray
Reduced inverse radial dielectric profile
:math:`(\varepsilon^{-1}_r(r) - 1)`
results.deps_r : numpy.ndarray
Estimated uncertainty of inverse radial dielectric profile
'''
def __init__(self, atomgroup: mda.AtomGroup, temperature: float=300, vcutwidth: float=0.1, single: bool=False, dim: int=2, zmin: float | None=None, zmax: float | None=None, rmin: float=0, rmax: float | None=None, bin_width: float=0.1, refgroup: mda.AtomGroup | None=None, unwrap: bool=True, pack: bool=True, jitter: float=0.0, concfreq: int=0, output_prefix: str='eps_cyl') -> None:
pass
def _prepare(self) -> None:
pass
def _single_frame(self) -> float:
pass
def _conclude(self) -> None:
pass
@render_docs
def save(self) -> None:
'''${SAVE_METHOD_PREFIX_DESCRIPTION}'''
pass
| 9
| 2
| 39
| 5
| 29
| 5
| 1
| 0.45
| 1
| 5
| 0
| 0
| 5
| 8
| 5
| 28
| 248
| 38
| 145
| 52
| 120
| 65
| 66
| 33
| 60
| 2
| 4
| 1
| 7
|
328,295
|
maicos-devel/maicos
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/maicos-devel_maicos/src/maicos/modules/DielectricPlanar.py
|
maicos.modules.DielectricPlanar.DielectricPlanar
|
from ..core import PlanarBase
from ..lib.math import symmetrize
import logging
import numpy as np
from ..lib.util import charge_neutral, citation_reminder, get_compound, render_docs
import scipy.constants
import MDAnalysis as mda
@render_docs
@charge_neutral(filter='error')
class DielectricPlanar(PlanarBase):
"""Planar dielectric profiles.
Computes the parallel :math:`\\varepsilon_\\parallel(z)` and inverse perpendicular
(:math:`\\varepsilon_\\perp^{-1}(r)`) components of the planar dielectric tensor
:math:`\\varepsilon`. The components are binned along the cartesian :math:`z`
direction yielding the component normal to the surface and defined by the ``dim``
parameter.
For usage please refer to :ref:`How-to: Dielectric constant<howto-dielectric>` and
for details on the theory see :ref:`dielectric-explanations`.
For correlation analysis, the norm of the parallel total dipole moment is used.
${CORRELATION_INFO}
Also, please read and cite
:footcite:t:`schlaichWaterDielectricEffects2016` and Refs.
:footcite:p:`locheUniversalNonuniversalAspects2020`,
:footcite:p:`bonthuisProfileStaticPermittivity2012`.
Parameters
----------
${ATOMGROUP_PARAMETER}
${TEMPERATURE_PARAMETER}
vcutwidth : float
Spacing of virtual cuts (bins) along the parallel directions.
is_3d : bool
Use 3d-periodic boundary conditions, i.e., include the dipole correction for
the interaction between periodic images
:footcite:p:`sternCalculationDielectricPermittivity2003`.
${PLANAR_CLASS_PARAMETERS}
${SYM_PARAMETER}
${BASE_CLASS_PARAMETERS}
${OUTPUT_PREFIX_PARAMETER}
Attributes
----------
${PLANAR_CLASS_ATTRIBUTES}
results.eps_par : numpy.ndarray
Reduced parallel dielectric profile
:math:`(\\varepsilon_\\parallel(z) - 1)` of the selected AtomGroup
results.deps_par : numpy.ndarray
Uncertainty of parallel dielectric profile
results.eps_par_self : numpy.ndarray
Reduced self contribution of parallel dielectric profile
:math:`(\\varepsilon_{\\parallel,\\mathrm{self}}(z) - 1)`
results.eps_par_coll : numpy.ndarray
Reduced collective contribution of parallel dielectric profile
:math:`(\\varepsilon_{\\parallel,\\mathrm{coll}}(z) - 1)`
results.eps_perp : numpy.ndarray
Reduced inverse perpendicular dielectric profile
:math:`(\\varepsilon^{-1}_\\perp(z) - 1)`
results.deps_perp : numpy.ndarray
Uncertainty of inverse perpendicular dielectric profile
results.eps_perp_self : numpy.ndarray
Reduced self contribution of the inverse perpendicular dielectric
profile :math:`(\\varepsilon^{-1}_{\\perp,\\mathrm{self}}(z) - 1)`
results.eps_perp_coll : numpy.ndarray
Reduced collective contribution of the inverse perpendicular dielectric profile
:math:`(\\varepsilon^{-1}_{\\perp,\\mathrm{coll}}(z) - 1)`
"""
def __init__(self, atomgroup: mda.AtomGroup, temperature: float=300, vcutwidth: float=0.1, is_3d: bool=False, dim: int=2, zmin: float | None=None, zmax: float | None=None, bin_width: float=0.5, sym: bool=False, refgroup: mda.AtomGroup | None=None, unwrap: bool=True, pack: bool=True, jitter: float=0.0, concfreq: int=0, output_prefix: str='eps') -> None:
self._locals = locals()
wrap_compound = get_compound(atomgroup)
if zmin is not None or zmax is not None:
logging.warning('Setting `zmin` and `zmax` might cut off molecules. This will lead to severe artifacts in the dielectric profiles.')
super().__init__(atomgroup=atomgroup, unwrap=unwrap, pack=pack, refgroup=refgroup, jitter=jitter, dim=dim, zmin=zmin, zmax=zmax, bin_width=bin_width, wrap_compound=wrap_compound, concfreq=concfreq)
self.is_3d = is_3d
self.sym = sym
self.temperature = temperature
self.output_prefix = output_prefix
self.concfreq = concfreq
self.vcutwidth = vcutwidth
def _prepare(self) -> None:
logging.info('Analysis of the parallel and inverse perpendicular components of the planar dielectric tensor.')
logging.info(citation_reminder('10.1103/PhysRevLett.117.048001'))
super()._prepare()
self.comp = get_compound(self.atomgroup)
ix = self.atomgroup._get_compound_indices(self.comp)
_, inverse_ix = np.unique(ix, return_inverse=True)
self.inverse_ix = inverse_ix
def _single_frame(self) -> float:
super()._single_frame()
self._obs.M = np.dot(self._universe.atoms.charges, self._universe.atoms.positions)
self._obs.M_perp = self._obs.M[self.dim]
self._obs.M_perp_2 = self._obs.M[self.dim] ** 2
self._obs.M_par = self._obs.M[self.odims]
self._obs.m_par = np.zeros((self.n_bins, 2))
self._obs.mM_par = np.zeros(self.n_bins)
self._obs.mm_par = np.zeros(self.n_bins)
self._obs.cmM_par = np.zeros(self.n_bins)
self._obs.cM_par = np.zeros((self.n_bins, 2))
self._obs.m_perp = np.zeros(self.n_bins)
self._obs.mM_perp = np.zeros(self.n_bins)
self._obs.mm_perp = np.zeros(self.n_bins)
self._obs.cmM_perp = np.zeros(self.n_bins)
self._obs.cM_perp = np.zeros(self.n_bins)
zbins = np.digitize(self.atomgroup.atoms.positions[:, self.dim], self._obs.bin_edges[1:-1])
curQ = np.bincount(zbins, weights=self.atomgroup.atoms.charges, minlength=self.n_bins)
self._obs.m_perp = -np.cumsum(curQ / self._obs.bin_area)
self._obs.mM_perp = self._obs.m_perp * self._obs.M_perp
self._obs.mm_perp = self._obs.m_perp ** 2 * self._obs.bin_volume
self._obs.cmM_perp = self._obs.m_perp * (self._obs.M_perp - self._obs.m_perp * self._obs.bin_volume)
self._obs.cM_perp = self._obs.M_perp - self._obs.m_perp * self._obs.bin_volume
testpos = self.atomgroup.center(weights=np.abs(self.atomgroup.charges), compound=self.comp)[self.inverse_ix, self.dim]
for j, direction in enumerate(self.odims):
Lx = self._ts.dimensions[direction]
Ax = self._ts.dimensions[self.odims[1 - j]] * self._obs.bin_width
vbinsx = np.ceil(Lx / self.vcutwidth).astype(int)
x_bin_edges = np.arange(vbinsx) * (Lx / vbinsx)
zpos = np.digitize(testpos, self._obs.bin_edges[1:-1])
xbins = np.digitize(self.atomgroup.atoms.positions[:, direction], x_bin_edges[1:])
curQx = np.bincount(zpos + self.n_bins * xbins, weights=self.atomgroup.charges, minlength=vbinsx * self.n_bins).reshape(vbinsx, self.n_bins)
self._obs.m_par[:, j] = -np.cumsum(curQx / Ax, axis=0).mean(axis=0)
bin_volume = self._obs.bin_volume[0]
self._obs.mM_par = np.dot(self._obs.m_par, self._obs.M_par)
self._obs.mm_par = (self._obs.m_par * self._obs.m_par).sum(axis=1) * bin_volume
self._obs.cmM_par = (self._obs.m_par * (self._obs.M_par - self._obs.m_par * bin_volume)).sum(axis=1)
self._obs.cM_par = self._obs.M_par - self._obs.m_par * bin_volume
return np.linalg.norm(self._obs.M_par)
def _conclude(self) -> None:
super()._conclude()
self._pref = 1 / scipy.constants.epsilon_0
self._pref /= scipy.constants.Boltzmann * self.temperature
self._pref /= scipy.constants.angstrom / scipy.constants.elementary_charge ** 2
self.results.V = self.means.bin_volume.sum()
cov_perp = self.means.mM_perp - self.means.m_perp * self.means.M_perp
dcov_perp = np.sqrt(self.sems.mM_perp ** 2 + (self.means.M_perp * self.sems.m_perp) ** 2 + (self.means.m_perp * self.sems.M_perp) ** 2)
var_perp = self.means.M_perp_2 - self.means.M_perp ** 2
cov_perp_self = self.means.mm_perp - self.means.m_perp ** 2 * self.means.bin_volume[0]
cov_perp_coll = self.means.cmM_perp - self.means.m_perp * self.means.cM_perp
if not self.is_3d:
self.results.eps_perp = -self._pref * cov_perp
self.results.eps_perp_self = -self._pref * cov_perp_self
self.results.eps_perp_coll = -self._pref * cov_perp_coll
self.results.deps_perp = self._pref * dcov_perp
else:
self.results.eps_perp = -cov_perp / (self._pref ** (-1) + var_perp / self.results.V)
self.results.deps_perp = self._pref * dcov_perp
self.results.eps_perp_self = -self._pref * cov_perp_self / (1 + self._pref / self.results.V * var_perp)
self.results.eps_perp_coll = -self._pref * cov_perp_coll / (1 + self._pref / self.results.V * var_perp)
cov_par = np.zeros(self.n_bins)
dcov_par = np.zeros(self.n_bins)
cov_par_self = np.zeros(self.n_bins)
cov_par_coll = np.zeros(self.n_bins)
cov_par = 0.5 * (self.means.mM_par - np.dot(self.means.m_par, self.means.M_par.T))
dcov_par = 0.5 * np.sqrt(self.sems.mM_par ** 2 + np.dot(self.sems.m_par ** 2, (self.means.M_par ** 2).T) + np.dot(self.means.m_par ** 2, (self.sems.M_par ** 2).T))
cov_par_self = 0.5 * (self.means.mm_par - np.dot(self.means.m_par, self.means.m_par.sum(axis=0)))
cov_par_coll = 0.5 * (self.means.cmM_par - (self.means.m_par * self.means.cM_par).sum(axis=1))
self.results.eps_par = self._pref * cov_par
self.results.deps_par = self._pref * dcov_par
self.results.eps_par_self = self._pref * cov_par_self
self.results.eps_par_coll = self._pref * cov_par_coll
if self.sym:
symmetrize(self.results.eps_perp, axis=0, inplace=True)
symmetrize(self.results.deps_perp, axis=0, inplace=True)
symmetrize(self.results.eps_perp_self, axis=0, inplace=True)
symmetrize(self.results.eps_perp_coll, axis=0, inplace=True)
symmetrize(self.results.eps_par, axis=0, inplace=True)
symmetrize(self.results.deps_par, axis=0, inplace=True)
symmetrize(self.results.eps_par_self, axis=0, inplace=True)
symmetrize(self.results.eps_par_coll, axis=0, inplace=True)
@render_docs
def save(self) -> None:
"""${SAVE_METHOD_PREFIX_DESCRIPTION}"""
columns = ['position [Å]']
columns.append('ε^-1_⟂ - 1')
columns.append('Δε^-1_⟂')
columns.append('self ε^-1_⟂ - 1')
columns.append('coll. ε^-1_⟂ - 1')
outdata_perp = np.vstack([self.results.bin_pos, self.results.eps_perp, self.results.deps_perp, self.results.eps_perp_self, self.results.eps_perp_coll]).T
self.savetxt('{}{}'.format(self.output_prefix, '_perp'), outdata_perp, columns=columns)
columns = ['position [Å]']
columns.append('ε_∥ - 1')
columns.append('Δε_∥')
columns.append('self ε_∥ - 1')
columns.append('coll ε_∥ - 1')
outdata_par = np.vstack([self.results.bin_pos, self.results.eps_par, self.results.deps_par, self.results.eps_par_self, self.results.eps_par_coll]).T
self.savetxt('{}{}'.format(self.output_prefix, '_par'), outdata_par, columns=columns)
|
@render_docs
@charge_neutral(filter='error')
class DielectricPlanar(PlanarBase):
'''Planar dielectric profiles.
Computes the parallel :math:`\varepsilon_\parallel(z)` and inverse perpendicular
(:math:`\varepsilon_\perp^{-1}(r)`) components of the planar dielectric tensor
:math:`\varepsilon`. The components are binned along the cartesian :math:`z`
direction yielding the component normal to the surface and defined by the ``dim``
parameter.
For usage please refer to :ref:`How-to: Dielectric constant<howto-dielectric>` and
for details on the theory see :ref:`dielectric-explanations`.
For correlation analysis, the norm of the parallel total dipole moment is used.
${CORRELATION_INFO}
Also, please read and cite
:footcite:t:`schlaichWaterDielectricEffects2016` and Refs.
:footcite:p:`locheUniversalNonuniversalAspects2020`,
:footcite:p:`bonthuisProfileStaticPermittivity2012`.
Parameters
----------
${ATOMGROUP_PARAMETER}
${TEMPERATURE_PARAMETER}
vcutwidth : float
Spacing of virtual cuts (bins) along the parallel directions.
is_3d : bool
Use 3d-periodic boundary conditions, i.e., include the dipole correction for
the interaction between periodic images
:footcite:p:`sternCalculationDielectricPermittivity2003`.
${PLANAR_CLASS_PARAMETERS}
${SYM_PARAMETER}
${BASE_CLASS_PARAMETERS}
${OUTPUT_PREFIX_PARAMETER}
Attributes
----------
${PLANAR_CLASS_ATTRIBUTES}
results.eps_par : numpy.ndarray
Reduced parallel dielectric profile
:math:`(\varepsilon_\parallel(z) - 1)` of the selected AtomGroup
results.deps_par : numpy.ndarray
Uncertainty of parallel dielectric profile
results.eps_par_self : numpy.ndarray
Reduced self contribution of parallel dielectric profile
:math:`(\varepsilon_{\parallel,\mathrm{self}}(z) - 1)`
results.eps_par_coll : numpy.ndarray
Reduced collective contribution of parallel dielectric profile
:math:`(\varepsilon_{\parallel,\mathrm{coll}}(z) - 1)`
results.eps_perp : numpy.ndarray
Reduced inverse perpendicular dielectric profile
:math:`(\varepsilon^{-1}_\perp(z) - 1)`
results.deps_perp : numpy.ndarray
Uncertainty of inverse perpendicular dielectric profile
results.eps_perp_self : numpy.ndarray
Reduced self contribution of the inverse perpendicular dielectric
profile :math:`(\varepsilon^{-1}_{\perp,\mathrm{self}}(z) - 1)`
results.eps_perp_coll : numpy.ndarray
Reduced collective contribution of the inverse perpendicular dielectric profile
:math:`(\varepsilon^{-1}_{\perp,\mathrm{coll}}(z) - 1)`
'''
def __init__(self, atomgroup: mda.AtomGroup, temperature: float=300, vcutwidth: float=0.1, is_3d: bool=False, dim: int=2, zmin: float | None=None, zmax: float | None=None, bin_width: float=0.5, sym: bool=False, refgroup: mda.AtomGroup | None=None, unwrap: bool=True, pack: bool=True, jitter: float=0.0, concfreq: int=0, output_prefix: str='eps') -> None:
pass
def _prepare(self) -> None:
pass
def _single_frame(self) -> float:
pass
def _conclude(self) -> None:
pass
@render_docs
def save(self) -> None:
'''${SAVE_METHOD_PREFIX_DESCRIPTION}'''
pass
| 9
| 2
| 56
| 9
| 43
| 5
| 2
| 0.37
| 1
| 6
| 0
| 0
| 5
| 10
| 5
| 23
| 350
| 56
| 215
| 61
| 191
| 79
| 116
| 43
| 110
| 3
| 3
| 1
| 9
|
328,296
|
maicos-devel/maicos
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/maicos-devel_maicos/src/maicos/modules/DielectricSpectrum.py
|
maicos.modules.DielectricSpectrum.DielectricSpectrum
|
from ..lib.util import bin, charge_neutral, citation_reminder, get_compound, render_docs
import MDAnalysis as mda
import numpy as np
import scipy.constants
import logging
from ..core import AnalysisBase
from ..lib.math import FT, iFT
from pathlib import Path
@render_docs
@charge_neutral(filter='error')
class DielectricSpectrum(AnalysisBase):
"""Linear dielectric spectrum.
This module, given a molecular dynamics trajectory, produces a `.txt` file
containing the complex dielectric function as a function of the (linear, not radial
- i.e., :math:`\\nu` or :math:`f`, rather than :math:`\\omega`) frequency, along with
the associated standard deviations. The algorithm is based on the Fluctuation
Dissipation Relation: :math:`\\chi(f) = -1/(3 V k_B T \\varepsilon_0)
\\mathcal{L}[\\theta(t) \\langle P(0) dP(t)/dt\\rangle]`, where :math:`\\mathcal{L}` is
the Laplace transformation.
.. note::
The polarization time series and the average system volume are also saved.
Please read and cite :footcite:p:`carlsonExploringAbsorptionSpectrum2020`.
Parameters
----------
${ATOMGROUP_PARAMETER}
${TEMPERATURE_PARAMETER}
segs : int
Sets the number of segments the trajectory is broken into.
df : float
The desired frequency spacing in THz. This determines the minimum frequency
about which there is data. Overrides `segs` option.
bins : int
Determines the number of bins used for data averaging; (this parameter sets the
upper limit). The data are by default binned logarithmically. This helps to
reduce noise, particularly in the high-frequency domain, and also prevents plot
files from being too large.
binafter : int
The number of low-frequency data points that are left unbinned.
nobin : bool
Prevents the data from being binned altogether. This can result in very large
plot files and errors.
${BASE_CLASS_PARAMETERS}
${OUTPUT_PREFIX_PARAMETER}
Attributes
----------
results
"""
def __init__(self, atomgroup: mda.AtomGroup, temperature: float=300, segs: int=20, df: float | None=None, bins: int=200, binafter: float=20, nobin: bool=False, refgroup: mda.AtomGroup | None=None, unwrap: bool=True, pack: bool=True, jitter: float=0.0, concfreq: int=0, output_prefix: str='') -> None:
self._locals = locals()
wrap_compound = get_compound(atomgroup)
super().__init__(atomgroup, unwrap=unwrap, pack=pack, refgroup=refgroup, concfreq=concfreq, wrap_compound=wrap_compound, jitter=jitter)
self.temperature = temperature
self.output_prefix = output_prefix
self.segs = segs
self.df = df
self.bins = bins
self.binafter = binafter
self.nobin = nobin
def _prepare(self) -> None:
logging.info('Analysis of the linear dielectric spectrum.')
logging.info(citation_reminder('10.1021/acs.jpca.0c04063'))
if len(self.output_prefix) > 0:
self.output_prefix += '_'
self.dt = self._trajectory.dt * self.step
self.V = 0
self.P = np.zeros((self.n_frames, 3))
def _single_frame(self) -> None:
self.V += self._ts.volume
self.P[self._frame_index, :] = np.dot(self.atomgroup.charges, self.atomgroup.positions)
def _conclude(self) -> None:
self.results.t = self._trajectory.dt * self.frames
self.results.V = self.V / self._index
self.results.P = self.P
if self.df is not None:
self.segs = np.max([int(self.n_frames * self.dt * self.df), 2])
self.seglen = int(self.n_frames / self.segs)
pref = scipy.constants.e ** 2 * scipy.constants.angstrom ** 2
pref /= 3 * self.results.V * scipy.constants.angstrom ** 3
pref /= scipy.constants.k * self.temperature
pref /= scipy.constants.epsilon_0
logging.info('Calculating susceptibility and errors...')
if len(self.results.t) < 2 * self.seglen:
self.results.t = np.append(self.results.t, self.results.t + self.results.t[-1] + self.dt)
self.results.t = self.results.t[:2 * self.seglen]
self.results.nu = FT(self.results.t, np.append(self.results.P[:self.seglen, 0], np.zeros(self.seglen)))[0]
self.results.susc = np.zeros(self.seglen, dtype=complex)
self.results.dsusc = np.zeros(self.seglen, dtype=complex)
ss = np.zeros(2 * self.seglen, dtype=complex)
for s in range(0, self.segs):
logging.info(f'\rSegment {s + 1} of {self.segs}')
ss = 0 + 0j
for self._i in range(3):
FP: np.ndarry = FT(self.results.t, np.append(self.results.P[s * self.seglen:(s + 1) * self.seglen, self._i], np.zeros(self.seglen)), False)
ss += FP.real * FP.real + FP.imag * FP.imag
ss *= self.results.nu * 1j
ift: np.ndarray = iFT(self.results.t, 1j * np.sign(self.results.nu) * FT(self.results.nu, ss, False), False)
ss.real = ift.imag
if s == 0:
self.results.susc += ss[self.seglen:]
else:
ds = ss[self.seglen:] - self.results.susc / s
self.results.susc += ss[self.seglen:]
dif = ss[self.seglen:] - self.results.susc / (s + 1)
ds.real *= dif.real
ds.imag *= dif.imag
self.results.dsusc += ds
self.results.dsusc.real = np.sqrt(self.results.dsusc.real)
self.results.dsusc.imag = np.sqrt(self.results.dsusc.imag)
self.results.susc *= pref / (2 * self.seglen * self.segs * self.dt)
self.results.dsusc *= pref / (2 * self.seglen * self.segs * self.dt)
self.results.nu = self.results.nu[self.seglen:] / (2 * np.pi)
logging.info(f'Length of segments: {self.seglen} frames, {self.seglen * self.dt:.0f} ps')
logging.info(f'Frequency spacing: ~ {self.segs / (self.n_frames * self.dt):.5f} THz')
if not (self.nobin or self.seglen <= self.bins):
bins = np.logspace(np.log(self.binafter) / np.log(10), np.log(len(self.results.susc)) / np.log(10), self.bins - self.binafter + 1).astype(int)
bins = np.unique(np.append(np.arange(self.binafter), bins))[:-1]
self.results.nu_binned = bin(self.results.nu, bins)
self.results.susc_binned = bin(self.results.susc, bins)
self.results.dsusc_binned = bin(self.results.dsusc, bins)
logging.info(f'Binning data above datapoint {self.binafter} in log-spaced bins')
logging.info(f'Binned data consists of {len(self.results.susc)} datapoints')
logging.info(f'Not binning data: there are {len(self.results.susc)} datapoints')
@render_docs
def save(self) -> None:
"""${SAVE_METHOD_PREFIX_DESCRIPTION}"""
np.save(self.output_prefix + 'tseries.npy', self.results.t)
with Path(self.output_prefix + 'V.txt').open(mode='w') as Vfile:
Vfile.write(str(self.results.V))
np.save(self.output_prefix + 'P_tseries.npy', self.results.P)
suscfilename = '{}{}'.format(self.output_prefix, 'susc.dat')
self.savetxt(suscfilename, np.transpose([self.results.nu, self.results.susc.real, self.results.dsusc.real, self.results.susc.imag, self.results.dsusc.imag]), columns=['ν [THz]', 'real(χ)', ' Δ real(χ)', 'imag(χ)', 'Δ imag(χ)'])
logging.info('Susceptibility data saved as {suscfilename}')
if not (self.nobin or self.seglen <= self.bins):
suscfilename = '{}{}'.format(self.output_prefix, 'susc_binned.dat')
self.savetxt(suscfilename, np.transpose([self.results.nu_binned, self.results.susc_binned.real, self.results.dsusc_binned.real, self.results.susc_binned.imag, self.results.dsusc_binned.imag]), columns=['ν [THz]', 'real(χ)', ' Δ real(χ)', 'imag(χ)', 'Δ imag(χ)'])
logging.info('Binned susceptibility data saved as {suscfilename}')
|
@render_docs
@charge_neutral(filter='error')
class DielectricSpectrum(AnalysisBase):
'''Linear dielectric spectrum.
This module, given a molecular dynamics trajectory, produces a `.txt` file
containing the complex dielectric function as a function of the (linear, not radial
- i.e., :math:`\nu` or :math:`f`, rather than :math:`\omega`) frequency, along with
the associated standard deviations. The algorithm is based on the Fluctuation
Dissipation Relation: :math:`\chi(f) = -1/(3 V k_B T \varepsilon_0)
\mathcal{L}[\theta(t) \langle P(0) dP(t)/dt\rangle]`, where :math:`\mathcal{L}` is
the Laplace transformation.
.. note::
The polarization time series and the average system volume are also saved.
Please read and cite :footcite:p:`carlsonExploringAbsorptionSpectrum2020`.
Parameters
----------
${ATOMGROUP_PARAMETER}
${TEMPERATURE_PARAMETER}
segs : int
Sets the number of segments the trajectory is broken into.
df : float
The desired frequency spacing in THz. This determines the minimum frequency
about which there is data. Overrides `segs` option.
bins : int
Determines the number of bins used for data averaging; (this parameter sets the
upper limit). The data are by default binned logarithmically. This helps to
reduce noise, particularly in the high-frequency domain, and also prevents plot
files from being too large.
binafter : int
The number of low-frequency data points that are left unbinned.
nobin : bool
Prevents the data from being binned altogether. This can result in very large
plot files and errors.
${BASE_CLASS_PARAMETERS}
${OUTPUT_PREFIX_PARAMETER}
Attributes
----------
results
'''
def __init__(self, atomgroup: mda.AtomGroup, temperature: float=300, segs: int=20, df: float | None=None, bins: int=200, binafter: float=20, nobin: bool=False, refgroup: mda.AtomGroup | None=None, unwrap: bool=True, pack: bool=True, jitter: float=0.0, concfreq: int=0, output_prefix: str='') -> None:
pass
def _prepare(self) -> None:
pass
def _single_frame(self) -> None:
pass
def _conclude(self) -> None:
pass
@render_docs
def save(self) -> None:
'''${SAVE_METHOD_PREFIX_DESCRIPTION}'''
pass
| 9
| 2
| 43
| 6
| 33
| 4
| 3
| 0.35
| 1
| 8
| 0
| 0
| 5
| 14
| 5
| 17
| 266
| 40
| 167
| 47
| 145
| 59
| 87
| 29
| 81
| 7
| 2
| 2
| 13
|
328,297
|
maicos-devel/maicos
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/maicos-devel_maicos/src/maicos/modules/DielectricSphere.py
|
maicos.modules.DielectricSphere.DielectricSphere
|
from ..lib.util import charge_neutral, citation_reminder, get_compound, render_docs
import MDAnalysis as mda
from ..core import SphereBase
import logging
import scipy.constants
import numpy as np
@render_docs
@charge_neutral(filter='error')
class DielectricSphere(SphereBase):
"""Spherical dielectric profiles.
Computes the inverse radial :math:`\\varepsilon_r^{-1}(r)` component of the
spherical dielectric tensor :math:`\\varepsilon`. The center of the sphere is either
located at the center of the simulation box (default) or at the center of mass of
the ``refgroup``, if provided.
For usage, please refer to :ref:`How-to: Dielectric
constant<howto-dielectric>` and for details on the theory see
:ref:`dielectric-explanations`.
For correlation analysis, the radial (:math:`r`) component is used.
${CORRELATION_INFO}
Also, please read and cite :footcite:p:`schaafDielectricResponseWater2015`.
Parameters
----------
${ATOMGROUP_PARAMETER}
${TEMPERATURE_PARAMETER}
${SPHERE_CLASS_PARAMETERS}
${BASE_CLASS_PARAMETERS}
${OUTPUT_PREFIX_PARAMETER}
Attributes
----------
${RADIAL_CLASS_ATTRIBUTES}
results.eps_rad : numpy.ndarray
Reduced inverse radial dielectric profile (:math:`\\varepsilon^{-1}_r(r) - 1)`
results.deps_rad : numpy.ndarray
Uncertainty of inverse radial dielectric profile
"""
def __init__(self, atomgroup: mda.AtomGroup, temperature: float=300, rmin: float=0, rmax: float | None=None, bin_width: float=0.1, refgroup: mda.AtomGroup | None=None, unwrap: bool=True, pack: bool=True, jitter: float=0.0, concfreq: int=0, output_prefix: str='eps_sph') -> None:
self._locals = locals()
self.comp = get_compound(atomgroup)
ix = atomgroup._get_compound_indices(self.comp)
_, self.inverse_ix = np.unique(ix, return_inverse=True)
if rmin != 0 or rmax is not None:
logging.warning('Setting `rmin` and `rmax` might cut off molecules. This will lead to severe artifacts in the dielectric profiles.')
super().__init__(atomgroup, concfreq=concfreq, jitter=jitter, refgroup=refgroup, rmin=rmin, rmax=rmax, bin_width=bin_width, unwrap=unwrap, pack=pack, wrap_compound=self.comp)
self.output_prefix = output_prefix
self.bin_width = bin_width
self.temperature = temperature
def _prepare(self) -> None:
logging.info('Analysis of the inverse radial component of the spherical dielectric tensor.')
logging.info(citation_reminder('10.1103/PhysRevE.92.032718'))
super()._prepare()
def _single_frame(self) -> float:
super()._single_frame()
rbins = np.digitize(self.pos_sph[:, 0], self._obs.bin_edges[1:-1])
curQ_rad = np.bincount(rbins[self.atomgroup.ix], weights=self.atomgroup.charges, minlength=self.n_bins)
self._obs.m_r = -np.cumsum(curQ_rad) / 4 / np.pi / self._obs.bin_pos ** 2
curQ_rad_tot = np.bincount(rbins, weights=self._universe.atoms.charges, minlength=self.n_bins)
self._obs.m_r_tot = -np.cumsum(curQ_rad_tot) / 4 / np.pi / self._obs.bin_pos ** 2
self._obs.M_r = np.sum(self._obs.m_r_tot * self._obs.bin_width)
self._obs.mM_r = self._obs.m_r * self._obs.M_r
return self._obs.M_r
def _conclude(self) -> None:
super()._conclude()
self._pref = 1 / scipy.constants.epsilon_0
self._pref /= scipy.constants.Boltzmann * self.temperature
self._pref /= scipy.constants.angstrom / scipy.constants.elementary_charge ** 2
cov_rad = self.means.mM_r - self.means.m_r * self.means.M_r
dcov_rad = np.sqrt(self.sems.mM_r ** 2 + self.sems.m_r ** 2 * self.means.M_r ** 2 + self.means.m_r ** 2 * self.sems.M_r ** 2)
self.results.eps_rad = 1 - 4 * np.pi * self.results.bin_pos ** 2 * self._pref * cov_rad
self.results.deps_rad = 4 * np.pi * self.results.bin_pos ** 2 * self._pref * dcov_rad
@render_docs
def save(self) -> None:
"""${SAVE_METHOD_PREFIX_DESCRIPTION}"""
outdata_rad = np.array([self.results.bin_pos, self.results.eps_rad, self.results.deps_rad]).T
columns = ['positions [Å]', 'eps_rad - 1', 'eps_rad error']
self.savetxt('{}{}'.format(self.output_prefix, '_rad.dat'), outdata_rad, columns=columns)
|
@render_docs
@charge_neutral(filter='error')
class DielectricSphere(SphereBase):
'''Spherical dielectric profiles.
Computes the inverse radial :math:`\varepsilon_r^{-1}(r)` component of the
spherical dielectric tensor :math:`\varepsilon`. The center of the sphere is either
located at the center of the simulation box (default) or at the center of mass of
the ``refgroup``, if provided.
For usage, please refer to :ref:`How-to: Dielectric
constant<howto-dielectric>` and for details on the theory see
:ref:`dielectric-explanations`.
For correlation analysis, the radial (:math:`r`) component is used.
${CORRELATION_INFO}
Also, please read and cite :footcite:p:`schaafDielectricResponseWater2015`.
Parameters
----------
${ATOMGROUP_PARAMETER}
${TEMPERATURE_PARAMETER}
${SPHERE_CLASS_PARAMETERS}
${BASE_CLASS_PARAMETERS}
${OUTPUT_PREFIX_PARAMETER}
Attributes
----------
${RADIAL_CLASS_ATTRIBUTES}
results.eps_rad : numpy.ndarray
Reduced inverse radial dielectric profile (:math:`\varepsilon^{-1}_r(r) - 1)`
results.deps_rad : numpy.ndarray
Uncertainty of inverse radial dielectric profile
'''
def __init__(self, atomgroup: mda.AtomGroup, temperature: float=300, rmin: float=0, rmax: float | None=None, bin_width: float=0.1, refgroup: mda.AtomGroup | None=None, unwrap: bool=True, pack: bool=True, jitter: float=0.0, concfreq: int=0, output_prefix: str='eps_sph') -> None:
pass
def _prepare(self) -> None:
pass
def _single_frame(self) -> float:
pass
def _conclude(self) -> None:
pass
@render_docs
def save(self) -> None:
'''${SAVE_METHOD_PREFIX_DESCRIPTION}'''
pass
| 9
| 2
| 23
| 3
| 18
| 3
| 1
| 0.43
| 1
| 5
| 0
| 0
| 5
| 7
| 5
| 22
| 157
| 28
| 90
| 35
| 70
| 39
| 39
| 21
| 33
| 2
| 3
| 1
| 6
|
328,298
|
maicos-devel/maicos
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/maicos-devel_maicos/src/maicos/modules/DipoleAngle.py
|
maicos.modules.DipoleAngle.DipoleAngle
|
from ..lib.util import get_compound, render_docs, unit_vectors_planar
from ..lib.weights import diporder_weights
import logging
import MDAnalysis as mda
import numpy as np
from ..core import AnalysisBase
@render_docs
class DipoleAngle(AnalysisBase):
"""Angle timeseries of dipole moments with respect to an axis.
The analysis can be applied to study the orientational dynamics of water molecules
during an excitation pulse. For more details read
:footcite:t:`elgabartyEnergyTransferHydrogen2020`.
Parameters
----------
${ATOMGROUP_PARAMETER}
${PDIM_PLANAR_PARAMETER}
${GROUPING_PARAMETER}
${BASE_CLASS_PARAMETERS}
${OUTPUT_PARAMETER}
Attributes
----------
results.t : numpy.ndarray
time (ps).
results.cos_theta_i : numpy.ndarray
Average :math:`\\cos` between dipole and axis.
results.cos_theta_ii : numpy.ndarray
Average :math:`\\cos²` of the dipoles and axis.
results.cos_theta_ij : numpy.ndarray
Product :math:`\\cos` of dipole i and cos of dipole j (``i != j``).
"""
def __init__(self, atomgroup: mda.AtomGroup, pdim: int=2, grouping: str='residues', refgroup: mda.AtomGroup | None=None, unwrap: bool=False, pack: bool=True, jitter: float=0.0, concfreq: int=0, output: str='dipangle.dat') -> None:
self._locals = locals()
self.wrap_compound = get_compound(atomgroup)
super().__init__(atomgroup, refgroup=refgroup, unwrap=unwrap, pack=pack, concfreq=concfreq, wrap_compound=self.wrap_compound, jitter=jitter)
self.grouping = grouping
self.pdim = pdim
self.output = output
def _prepare(self) -> None:
logging.info('Analysis of the dipole moment angles (timeseries).')
self.n_residues = self.atomgroup.residues.n_residues
def get_unit_vectors(atomgroup: mda.AtomGroup, grouping: str):
return unit_vectors_planar(atomgroup=atomgroup, grouping=grouping, pdim=self.pdim)
self.get_unit_vectors = get_unit_vectors
self.cos_theta_i = np.empty(self.n_frames)
self.cos_theta_ii = np.empty(self.n_frames)
self.cos_theta_ij = np.empty(self.n_frames)
def _single_frame(self) -> None:
cos_theta = diporder_weights(self.atomgroup, grouping=self.grouping, order_parameter='cos_theta', get_unit_vectors=self.get_unit_vectors)
matrix = np.outer(cos_theta, cos_theta)
trace = matrix.trace()
self.cos_theta_i[self._frame_index] = cos_theta.mean()
self.cos_theta_ii[self._frame_index] = trace / self.n_residues
self.cos_theta_ij[self._frame_index] = matrix.sum() - trace
self.cos_theta_ij[self._frame_index] /= self.n_residues ** 2 - self.n_residues
def _conclude(self) -> None:
self.results.t = self.times
self.results.cos_theta_i = self.cos_theta_i[:self._index]
self.results.cos_theta_ii = self.cos_theta_ii[:self._index]
self.results.cos_theta_ij = self.cos_theta_ij[:self._index]
@render_docs
def save(self) -> None:
"""${SAVE_METHOD_DESCRIPTION}"""
self.savetxt(self.output, np.vstack([self.results.t, self.results.cos_theta_i, self.results.cos_theta_ii, self.results.cos_theta_ij]).T, columns=['t', '<cos(θ_i)>', '<cos(θ_i)cos(θ_i)>', '<cos(θ_i)cos(θ_j)>'])
|
@render_docs
class DipoleAngle(AnalysisBase):
'''Angle timeseries of dipole moments with respect to an axis.
The analysis can be applied to study the orientational dynamics of water molecules
during an excitation pulse. For more details read
:footcite:t:`elgabartyEnergyTransferHydrogen2020`.
Parameters
----------
${ATOMGROUP_PARAMETER}
${PDIM_PLANAR_PARAMETER}
${GROUPING_PARAMETER}
${BASE_CLASS_PARAMETERS}
${OUTPUT_PARAMETER}
Attributes
----------
results.t : numpy.ndarray
time (ps).
results.cos_theta_i : numpy.ndarray
Average :math:`\cos` between dipole and axis.
results.cos_theta_ii : numpy.ndarray
Average :math:`\cos²` of the dipoles and axis.
results.cos_theta_ij : numpy.ndarray
Product :math:`\cos` of dipole i and cos of dipole j (``i != j``).
'''
def __init__(self, atomgroup: mda.AtomGroup, pdim: int=2, grouping: str='residues', refgroup: mda.AtomGroup | None=None, unwrap: bool=False, pack: bool=True, jitter: float=0.0, concfreq: int=0, output: str='dipangle.dat') -> None:
pass
def _prepare(self) -> None:
pass
def get_unit_vectors(atomgroup: mda.AtomGroup, grouping: str):
pass
def _single_frame(self) -> None:
pass
def _conclude(self) -> None:
pass
@render_docs
def save(self) -> None:
'''${SAVE_METHOD_DESCRIPTION}'''
pass
| 9
| 2
| 13
| 1
| 12
| 0
| 1
| 0.33
| 1
| 5
| 0
| 0
| 5
| 11
| 5
| 17
| 106
| 13
| 70
| 33
| 51
| 23
| 32
| 20
| 25
| 1
| 2
| 0
| 6
|
328,299
|
maicos-devel/maicos
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/maicos-devel_maicos/src/maicos/modules/DiporderCylinder.py
|
maicos.modules.DiporderCylinder.DiporderCylinder
|
from ..lib.weights import diporder_weights
import MDAnalysis as mda
from ..core import ProfileCylinderBase
import logging
from ..lib.util import render_docs, unit_vectors_cylinder
@render_docs
class DiporderCylinder(ProfileCylinderBase):
"""Cylindrical dipolar order parameters.
${DIPORDER_DESCRIPTION}
${CORRELATION_INFO_RADIAL}
Parameters
----------
${ATOMGROUP_PARAMETER}
${ORDER_PARAMETER_PARAMETER}
${PDIM_RADIAL_PARAMETER}
${PROFILE_CYLINDER_CLASS_PARAMETERS}
${OUTPUT_PARAMETER}
Attributes
----------
${PROFILE_CYLINDER_CLASS_ATTRIBUTES}
"""
def __init__(self, atomgroup: mda.AtomGroup, order_parameter: str='P0', pdim: str='r', dim: int=2, zmin: float | None=None, zmax: float | None=None, rmin: float=0, rmax: float | None=None, bin_width: float=1, bin_method: str='com', grouping: str='residues', refgroup: mda.AtomGroup | None=None, unwrap: bool=True, pack: bool=True, jitter: float=0.0, concfreq: int=0, output: str='diporder_cylinder.dat') -> None:
normalization = 'volume' if order_parameter == 'P0' else 'number'
def get_unit_vectors(atomgroup: mda.AtomGroup, grouping: str):
return unit_vectors_cylinder(atomgroup=atomgroup, grouping=grouping, bin_method=bin_method, dim=dim, pdim=pdim)
super().__init__(atomgroup=atomgroup, unwrap=unwrap, pack=pack, refgroup=refgroup, jitter=jitter, concfreq=concfreq, dim=dim, zmin=zmin, zmax=zmax, bin_width=bin_width, rmin=rmin, rmax=rmax, grouping=grouping, bin_method=bin_method, output=output, weighting_function=diporder_weights, weighting_function_kwargs={'order_parameter': order_parameter, 'get_unit_vectors': get_unit_vectors}, normalization=normalization)
def _prepare(self):
logging.info('Analysis of the cylindrical dipolar order parameters.')
super()._prepare()
|
@render_docs
class DiporderCylinder(ProfileCylinderBase):
'''Cylindrical dipolar order parameters.
${DIPORDER_DESCRIPTION}
${CORRELATION_INFO_RADIAL}
Parameters
----------
${ATOMGROUP_PARAMETER}
${ORDER_PARAMETER_PARAMETER}
${PDIM_RADIAL_PARAMETER}
${PROFILE_CYLINDER_CLASS_PARAMETERS}
${OUTPUT_PARAMETER}
Attributes
----------
${PROFILE_CYLINDER_CLASS_ATTRIBUTES}
'''
def __init__(self, atomgroup: mda.AtomGroup, order_parameter: str='P0', pdim: str='r', dim: int=2, zmin: float | None=None, zmax: float | None=None, rmin: float=0, rmax: float | None=None, bin_width: float=1, bin_method: str='com', grouping: str='residues', refgroup: mda.AtomGroup | None=None, unwrap: bool=True, pack: bool=True, jitter: float=0.0, concfreq: int=0, output: str='diporder_cylinder.dat') -> None:
pass
def get_unit_vectors(atomgroup: mda.AtomGroup, grouping: str):
pass
def _prepare(self):
pass
| 5
| 1
| 22
| 1
| 21
| 0
| 1
| 0.25
| 1
| 5
| 0
| 0
| 2
| 0
| 2
| 36
| 79
| 9
| 56
| 24
| 33
| 14
| 9
| 5
| 5
| 2
| 5
| 0
| 4
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.