id
int64 0
328k
| repository_name
stringlengths 7
58
| file_path
stringlengths 9
302
| class_name
stringlengths 5
256
| human_written_code
stringlengths 16
2.16M
| class_skeleton
stringlengths 18
1.49M
⌀ | total_program_units
int64 1
1.76k
| total_doc_str
int64 0
771
| AvgCountLine
float64 0
7.89k
| AvgCountLineBlank
float64 0
297
| AvgCountLineCode
float64 0
7.89k
| AvgCountLineComment
float64 0
7.89k
| AvgCyclomatic
float64 0
130
| CommentToCodeRatio
float64 0
168
| CountClassBase
float64 0
40
| CountClassCoupled
float64 0
583
| CountClassCoupledModified
float64 0
575
| CountClassDerived
float64 0
5.35k
| CountDeclInstanceMethod
float64 0
529
| CountDeclInstanceVariable
float64 0
296
| CountDeclMethod
float64 0
599
| CountDeclMethodAll
float64 0
1.12k
| CountLine
float64 1
40.4k
| CountLineBlank
float64 0
8.16k
| CountLineCode
float64 1
25.7k
| CountLineCodeDecl
float64 1
8.15k
| CountLineCodeExe
float64 0
24.2k
| CountLineComment
float64 0
16.5k
| CountStmt
float64 1
9.71k
| CountStmtDecl
float64 1
8.15k
| CountStmtExe
float64 0
9.69k
| MaxCyclomatic
float64 0
759
| MaxInheritanceTree
float64 0
16
| MaxNesting
float64 0
34
| SumCyclomatic
float64 0
2.9k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
327,800
|
flux0-ai/flux0
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/flux0-ai_flux0/packages/cli/src/flux0_cli/main.py
|
flux0_cli.main.Flux0CLIContext
|
from flux0_client import Flux0Client
class Flux0CLIContext:
"""Typed context object for Click CLI."""
def __init__(self) -> None:
self.client: Flux0Client = Flux0Client()
|
class Flux0CLIContext:
'''Typed context object for Click CLI.'''
def __init__(self) -> None:
pass
| 2
| 1
| 2
| 0
| 2
| 0
| 1
| 0.33
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 5
| 1
| 3
| 3
| 1
| 1
| 3
| 3
| 1
| 1
| 0
| 0
| 1
|
327,801
|
flux0-ai/flux0
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/flux0-ai_flux0/packages/core/src/flux0_core/agents.py
|
flux0_core.agents.Agent
|
from typing import List, NewType, Optional, Sequence, TypedDict
from dataclasses import dataclass
from datetime import datetime
@dataclass(frozen=True)
class Agent:
id: AgentId
type: AgentType
name: str
description: Optional[str]
created_at: datetime
|
@dataclass(frozen=True)
class Agent:
pass
| 2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 6
| 0
| 6
| 1
| 5
| 0
| 6
| 1
| 5
| 0
| 0
| 0
| 0
|
327,802
|
flux0-ai/flux0
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/flux0-ai_flux0/packages/core/src/flux0_core/agents.py
|
flux0_core.agents.AgentStore
|
from abc import ABC, abstractmethod
from typing import List, NewType, Optional, Sequence, TypedDict
from datetime import datetime
class AgentStore(ABC):
@abstractmethod
async def create_agent(self, name: str, type: AgentType, description: Optional[str]=None, created_at: Optional[datetime]=None) -> Agent:
...
@abstractmethod
async def list_agents(self, offset: int=0, limit: int=10, projection: Optional[List[str]]=None) -> Sequence[Agent]:
...
@abstractmethod
async def read_agent(self, agent_id: AgentId) -> Optional[Agent]:
...
@abstractmethod
async def update_agent(self, agent_id: AgentId, params: AgentUpdateParams) -> Agent:
...
@abstractmethod
async def delete_agent(self, agent_id: AgentId) -> bool:
...
|
class AgentStore(ABC):
@abstractmethod
async def create_agent(self, name: str, type: AgentType, description: Optional[str]=None, created_at: Optional[datetime]=None) -> Agent:
pass
@abstractmethod
async def list_agents(self, offset: int=0, limit: int=10, projection: Optional[List[str]]=None) -> Sequence[Agent]:
pass
@abstractmethod
async def read_agent(self, agent_id: AgentId) -> Optional[Agent]:
pass
@abstractmethod
async def update_agent(self, agent_id: AgentId, params: AgentUpdateParams) -> Agent:
pass
@abstractmethod
async def delete_agent(self, agent_id: AgentId) -> bool:
pass
| 11
| 0
| 5
| 0
| 5
| 0
| 1
| 0
| 1
| 6
| 2
| 1
| 5
| 0
| 5
| 25
| 36
| 4
| 32
| 32
| 5
| 0
| 11
| 6
| 5
| 1
| 4
| 0
| 5
|
327,803
|
flux0-ai/flux0
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/flux0-ai_flux0/packages/core/src/flux0_core/agents.py
|
flux0_core.agents.AgentUpdateParams
|
from typing import List, NewType, Optional, Sequence, TypedDict
class AgentUpdateParams(TypedDict, total=False):
name: str
description: Optional[str]
|
class AgentUpdateParams(TypedDict, total=False):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3
| 0
| 3
| 1
| 2
| 0
| 3
| 1
| 2
| 0
| 1
| 0
| 0
|
327,804
|
flux0-ai/flux0
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/flux0-ai_flux0/packages/core/src/flux0_core/async_utils.py
|
flux0_core.async_utils.RWLock
|
from contextlib import asynccontextmanager
from typing import AsyncContextManager, AsyncIterator
import aiorwlock
class RWLock:
def __init__(self) -> None:
"""
Initializes a new instance of the ReaderWriterLock class.
The constructor creates an underlying `aiorwlock.RWLock` instance and
sets up the reader and writer locks.
"""
_lock = aiorwlock.RWLock()
self._reader_lock = _lock.reader
self._writer_lock = _lock.writer
@property
def reader_lock(self) -> AsyncContextManager[None]:
"""
Provides an asynchronous context manager for acquiring the reader lock.
This lock allows multiple readers to access the shared resource simultaneously.
Writers are blocked while any reader holds the lock.
Returns:
AsyncContextManager[None]: An asynchronous context manager for the reader lock.
Example:
```python
async with lock.reader_lock:
# Read from the shared resource
```
"""
@asynccontextmanager
async def _reader_acm() -> AsyncIterator[None]:
async with self._reader_lock:
yield
return _reader_acm()
@property
def writer_lock(self) -> AsyncContextManager[None]:
"""
Provides an asynchronous context manager for acquiring the writer lock.
This lock ensures exclusive access to the shared resource for writing.
No readers or other writers are allowed while the writer lock is held.
Returns:
AsyncContextManager[None]: An asynchronous context manager for the writer lock.
Example:
```python
async with lock.writer_lock:
# Write to the shared resource
```
"""
@asynccontextmanager
async def _writer_acm() -> AsyncIterator[None]:
async with self._writer_lock:
yield
return _writer_acm()
|
class RWLock:
def __init__(self) -> None:
'''
Initializes a new instance of the ReaderWriterLock class.
The constructor creates an underlying `aiorwlock.RWLock` instance and
sets up the reader and writer locks.
'''
pass
@property
def reader_lock(self) -> AsyncContextManager[None]:
'''
Provides an asynchronous context manager for acquiring the reader lock.
This lock allows multiple readers to access the shared resource simultaneously.
Writers are blocked while any reader holds the lock.
Returns:
AsyncContextManager[None]: An asynchronous context manager for the reader lock.
Example:
```python
async with lock.reader_lock:
# Read from the shared resource
```
'''
pass
@asynccontextmanager
async def _reader_acm() -> AsyncIterator[None]:
pass
@property
def writer_lock(self) -> AsyncContextManager[None]:
'''
Provides an asynchronous context manager for acquiring the writer lock.
This lock ensures exclusive access to the shared resource for writing.
No readers or other writers are allowed while the writer lock is held.
Returns:
AsyncContextManager[None]: An asynchronous context manager for the writer lock.
Example:
```python
async with lock.writer_lock:
# Write to the shared resource
```
'''
pass
@asynccontextmanager
async def _writer_acm() -> AsyncIterator[None]:
pass
| 10
| 3
| 13
| 2
| 4
| 6
| 1
| 1.53
| 0
| 0
| 0
| 0
| 3
| 2
| 3
| 3
| 62
| 14
| 19
| 13
| 9
| 29
| 15
| 9
| 9
| 1
| 0
| 1
| 5
|
327,805
|
flux0-ai/flux0
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/flux0-ai_flux0/packages/core/src/flux0_core/background_tasks_service.py
|
flux0_core.background_tasks_service.BackgroundTaskService
|
from flux0_core.logging import Logger
import traceback
from typing import Any, Coroutine, Dict, Optional, Self, TypeAlias
import asyncio
class BackgroundTaskService:
"""A service for managing background tasks."""
def __init__(self, logger: Logger) -> None:
self._logger: Logger = logger
self._last_garbage_collection: float = 0.0
self._garbage_collection_interval: float = 5.0
self._tasks: Dict[str, Task] = {}
self._lock = asyncio.Lock()
async def __aenter__(self) -> Self:
self._collector_task = asyncio.create_task(self._run_collector())
return self
async def __aexit__(self, exc_type: Optional[type[BaseException]], exc_value: Optional[BaseException], tb: Optional[Any]) -> bool:
if exc_value:
await self.cancel_all(reason='Shutting down due to exception')
self._logger.info(f'{type(self).__name__}: Exiting context, cleaning up tasks')
await self.collect(force=True)
return False
async def cancel(self, *, tag: str, reason: str='(not set)') -> None:
"""
Cancels the task identified by the given tag.
"""
async with self._lock:
task = self._tasks.get(tag)
if task and (not task.done()):
task.cancel(f'Forced cancellation by {type(self).__name__} [reason: {reason}]')
self._logger.info(f"{type(self).__name__}: Cancelled task '{tag}'")
await self.collect()
async def cancel_all(self, *, reason: str='(not set)') -> None:
"""
Cancels all running tasks.
"""
async with self._lock:
self._logger.info(f'{type(self).__name__}: Cancelling all remaining tasks ({len(self._tasks)}) [reason: {reason}]')
for tag, task in self._tasks.items():
if not task.done():
task.cancel(f'Forced cancellation by {type(self).__name__} [reason: {reason}]')
await self.collect()
async def start(self, coro: Coroutine[Any, Any, None], /, *, tag: str) -> Task:
"""
Starts a new background task using the provided coroutine and associates it with the given tag.
Raises an exception if a task with the same tag is already running.
"""
await self.collect()
async with self._lock:
if tag in self._tasks and (not self._tasks[tag].done()):
raise Exception(f"Task '{tag}' is already running; consider calling restart() instead")
self._logger.info(f"{type(self).__name__}: Starting task '{tag}'")
task: Task = asyncio.create_task(coro)
self._tasks[tag] = task
return task
async def restart(self, coro: Coroutine[Any, Any, None], /, *, tag: str) -> Task:
"""
Restarts the background task identified by the given tag.
If an existing task is running, it will be canceled and awaited before starting a new one.
"""
await self.collect()
async with self._lock:
if tag in self._tasks:
existing_task = self._tasks[tag]
if not existing_task.done():
existing_task.cancel(f"Restarting task '{tag}'")
await self._await_task(existing_task)
self._logger.info(f"{type(self).__name__}: Restarting task '{tag}'")
task: Task = asyncio.create_task(coro)
self._tasks[tag] = task
return task
async def collect(self, *, force: bool=False) -> None:
"""
Cleans up finished tasks from the internal registry.
If 'force' is True, it waits for all tasks to finish before cleanup.
"""
now: float = asyncio.get_running_loop().time()
if not force and now - self._last_garbage_collection < self._garbage_collection_interval:
return
async with self._lock:
new_tasks: Dict[str, Task] = {}
for tag, task in self._tasks.items():
if task.done() or force:
if not task.done():
self._logger.info(f"{type(self).__name__}: Waiting for task '{tag}' to finish before cleanup")
await self._await_task(task)
else:
new_tasks[tag] = task
self._tasks = new_tasks
self._last_garbage_collection = now
async def _await_task(self, task: Task) -> None:
"""
Awaits the task and logs exceptions if they occur.
"""
try:
await task
except asyncio.CancelledError:
pass
except Exception as exc:
self._logger.warning(f"{type(self).__name__}: Task raised an exception: {''.join(traceback.format_exception_only(type(exc), exc)).strip()}")
async def _run_collector(self) -> None:
try:
while True:
await asyncio.sleep(self._garbage_collection_interval)
await self.collect()
except asyncio.CancelledError:
pass
|
class BackgroundTaskService:
'''A service for managing background tasks.'''
def __init__(self, logger: Logger) -> None:
pass
async def __aenter__(self) -> Self:
pass
async def __aexit__(self, exc_type: Optional[type[BaseException]], exc_value: Optional[BaseException], tb: Optional[Any]) -> bool:
pass
async def cancel(self, *, tag: str, reason: str='(not set)') -> None:
'''
Cancels the task identified by the given tag.
'''
pass
async def cancel_all(self, *, reason: str='(not set)') -> None:
'''
Cancels all running tasks.
'''
pass
async def start(self, coro: Coroutine[Any, Any, None], /, *, tag: str) -> Task:
'''
Starts a new background task using the provided coroutine and associates it with the given tag.
Raises an exception if a task with the same tag is already running.
'''
pass
async def restart(self, coro: Coroutine[Any, Any, None], /, *, tag: str) -> Task:
'''
Restarts the background task identified by the given tag.
If an existing task is running, it will be canceled and awaited before starting a new one.
'''
pass
async def collect(self, *, force: bool=False) -> None:
'''
Cleans up finished tasks from the internal registry.
If 'force' is True, it waits for all tasks to finish before cleanup.
'''
pass
async def _await_task(self, task: Task) -> None:
'''
Awaits the task and logs exceptions if they occur.
'''
pass
async def _run_collector(self) -> None:
pass
| 11
| 7
| 12
| 0
| 9
| 2
| 3
| 0.26
| 0
| 8
| 1
| 0
| 10
| 6
| 10
| 10
| 127
| 11
| 93
| 31
| 77
| 24
| 79
| 25
| 68
| 5
| 0
| 4
| 25
|
327,806
|
flux0-ai/flux0
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/flux0-ai_flux0/packages/core/src/flux0_core/contextual_correlator.py
|
flux0_core.contextual_correlator.ContextualCorrelator
|
import contextvars
from flux0_core.ids import gen_id
from contextlib import contextmanager
from typing import Iterator
class ContextualCorrelator:
def __init__(self, delimiter: str='::', default: str='<main>') -> None:
self._instance_id: str = gen_id()
self._scopes: contextvars.ContextVar[str] = contextvars.ContextVar(f'correlator_{self._instance_id}_scopes', default='')
self._delimiter: str = delimiter
self._default: str = default
@contextmanager
def scope(self, scope_id: str) -> Iterator[None]:
"""
Enter a new correlation scope.
Each new scope is appended to the current one using the delimiter.
When the context exits, the previous scope is automatically restored.
"""
current: str = self._scopes.get()
new_scope: str = f'{current}{self._delimiter}{scope_id}' if current else scope_id
token = self._scopes.set(new_scope)
try:
yield
finally:
self._scopes.reset(token)
@property
def correlation_id(self) -> str:
"""
Return the current overall correlation string.
If no scope is active, returns the default value.
"""
current: str = self._scopes.get()
return current if current else self._default
|
class ContextualCorrelator:
def __init__(self, delimiter: str='::', default: str='<main>') -> None:
pass
@contextmanager
def scope(self, scope_id: str) -> Iterator[None]:
'''
Enter a new correlation scope.
Each new scope is appended to the current one using the delimiter.
When the context exits, the previous scope is automatically restored.
'''
pass
@property
def correlation_id(self) -> str:
'''
Return the current overall correlation string.
If no scope is active, returns the default value.
'''
pass
| 6
| 2
| 9
| 0
| 6
| 3
| 2
| 0.43
| 0
| 1
| 0
| 0
| 3
| 4
| 3
| 3
| 33
| 3
| 21
| 14
| 15
| 9
| 16
| 12
| 12
| 2
| 0
| 1
| 5
|
327,807
|
flux0-ai/flux0
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/flux0-ai_flux0/packages/core/src/flux0_core/logging.py
|
flux0_core.logging.ContextualLogger
|
import structlog
import asyncio
from flux0_core.contextual_correlator import ContextualCorrelator
import time
import contextvars
from structlog.types import EventDict
from structlog.typing import Processor
from flux0_core.ids import gen_id
from typing import Any, Dict, Iterator, Optional, override
import logging
from contextlib import contextmanager
class ContextualLogger(Logger):
"""
A structured logger with support for correlation and scopes.
Correlation links related logs across systems that are part of the same context (e.g., HTTP Request), while scope provides finer-grained tracking within a process."
What is Corrleation?
Correlation refers to a unique identifier (correlation ID) that links related logs together across different services.
Useful for tracking a request end-to-end across microservices, threads, or async tasks.
Typically, a correlation ID remains the same across multiple logs that are part of the same "request" (e.g., HTTP request), "session" or "transaction."
Example of Correlation:
When a user makes an HTTP request, we generate a correlation ID and attach it to all logs where multiple services
handle this request should log with the same correlation ID.
{
"timestamp": "2025-02-26T10:41:37.917Z",
"level": "info",
"event": "User login request received",
"correlation_id": "request-12345"
}
What is scope?
Scope is more granular and refers to a specific context within a process.
Scopes allow to track sub-operations within a correlated request.
Each scope is added on top of the existing correlation ID, making logs more informative.
Example of Scope:
A user logs in (correlation_id: request-12345):
{
"timestamp": "2025-02-26T10:42:10.917Z",
"level": "info",
"event": "Querying user data",
"correlation_id": "request-12345",
"scope": "[UserLogin]"
}
How Are They Used Together?
(1) A user logs in → Generate a correlation ID
with correlator.scope("UserLogin"):
logger.info("User login request received")
log output:
{
...
"correlation_id": "request-12345",
"scope": "[UserLogin]"
}
(2) Inside the login request, we fetch user details → Add a scope
with logger.scope("FetchUser"):
logger.info("Fetching user details from DB")
log output:
{
...
"correlation_id": "request-12345",
"scope": "[UserLogin][FetchUser]"
}
(3) Validate credentials → Add a scope
with logger.scope("ValidateCredentials"):
logger.info("Validating user credentials")
log output:
{
...
"correlation_id": "request-12345",
"scope": "[UserLogin][FetchUser][ValidateCredentials]"
}
"""
def __init__(self, correlator: ContextualCorrelator, level: LogLevel=LogLevel.DEBUG, logger_id: str | None=None, renderer: Optional[Processor]=None) -> None:
self.correlator = correlator
self._instance_id = gen_id()
self._scopes: contextvars.ContextVar[str] = contextvars.ContextVar(f'logger_{self._instance_id}_scopes', default='')
self.raw_logger = logging.getLogger(logger_id or 'flux0')
self.raw_logger.setLevel(level.logging_level())
def add_context_fields(_: 'ContextualLogger', __: str, event_dict: EventDict) -> EventDict:
"""
Processor to inject correlation_id and scope_id into the structured log.
Since structlog does not pass our StructuredLogger instance, we retrieve the values from context.
"""
event_dict['correlation'] = self.correlator.correlation_id
if self._scopes.get():
event_dict['scope'] = self._scopes.get()
return event_dict
renderer = renderer if renderer is not None else structlog.processors.JSONRenderer()
self._logger = structlog.wrap_logger(self.raw_logger, processors=[structlog.processors.TimeStamper(fmt='iso'), structlog.stdlib.add_log_level, structlog.stdlib.filter_by_level, add_context_fields, structlog.stdlib.PositionalArgumentsFormatter(), structlog.processors.StackInfoRenderer(), structlog.processors.format_exc_info, renderer], wrapper_class=structlog.make_filtering_bound_logger(logging.DEBUG))
@override
def set_level(self, level: LogLevel) -> None:
self.raw_logger.setLevel(level.logging_level())
def debug(self, message: str, *args: Any, **kwargs: Any) -> None:
self._logger.debug(self._format_message(message), *args, **kwargs)
def info(self, message: str, *args: Any, **kwargs: Any) -> None:
self._logger.info(self._format_message(message), *args, **kwargs)
def warning(self, message: str, *args: Any, **kwargs: Any) -> None:
self._logger.warning(self._format_message(message), *args, **kwargs)
def error(self, message: str, *args: Any, **kwargs: Any) -> None:
self._logger.error(self._format_message(message), *args, **kwargs)
def critical(self, message: str, *args: Any, **kwargs: Any) -> None:
self._logger.critical(self._format_message(message), *args, **kwargs)
@contextmanager
def scope(self, scope_id: str) -> Iterator[None]:
"""
Extend the logging scope with an additional identifier.
"""
current_scope = self._scopes.get()
new_scope = f'{current_scope}[{scope_id}]' if current_scope else f'[{scope_id}]'
token = self._scopes.set(new_scope)
try:
yield
finally:
self._scopes.reset(token)
@property
def current_scope(self) -> str:
return self._scopes.get() if self._scopes.get() else ''
@contextmanager
def operation(self, name: str, props: Optional[Dict[str, Any]]=None) -> Iterator[None]:
"""
Context manager that logs the beginning and end of an operation,
including timing and error handling.
"""
props = props or {}
start_time = time.time()
self.info(f'{name} started', **props)
try:
yield
elapsed = time.time() - start_time
self.info(f'{name} completed in {elapsed:.3f}s', **props)
except asyncio.CancelledError:
elapsed = time.time() - start_time
self.warning(f'{name} cancelled after {elapsed:.3f}s', **props)
raise
except Exception:
self.error(f'{name} failed', **props)
self.error('Exception details', exc_info=True)
raise
def _format_message(self, message: str) -> str:
"""
Prepend the current correlation id and scope information to the message.
"""
correlation = self.correlator.correlation_id
return f'[{correlation}]{self.current_scope} {message}'
|
class ContextualLogger(Logger):
'''
A structured logger with support for correlation and scopes.
Correlation links related logs across systems that are part of the same context (e.g., HTTP Request), while scope provides finer-grained tracking within a process."
What is Corrleation?
Correlation refers to a unique identifier (correlation ID) that links related logs together across different services.
Useful for tracking a request end-to-end across microservices, threads, or async tasks.
Typically, a correlation ID remains the same across multiple logs that are part of the same "request" (e.g., HTTP request), "session" or "transaction."
Example of Correlation:
When a user makes an HTTP request, we generate a correlation ID and attach it to all logs where multiple services
handle this request should log with the same correlation ID.
{
"timestamp": "2025-02-26T10:41:37.917Z",
"level": "info",
"event": "User login request received",
"correlation_id": "request-12345"
}
What is scope?
Scope is more granular and refers to a specific context within a process.
Scopes allow to track sub-operations within a correlated request.
Each scope is added on top of the existing correlation ID, making logs more informative.
Example of Scope:
A user logs in (correlation_id: request-12345):
{
"timestamp": "2025-02-26T10:42:10.917Z",
"level": "info",
"event": "Querying user data",
"correlation_id": "request-12345",
"scope": "[UserLogin]"
}
How Are They Used Together?
(1) A user logs in → Generate a correlation ID
with correlator.scope("UserLogin"):
logger.info("User login request received")
log output:
{
...
"correlation_id": "request-12345",
"scope": "[UserLogin]"
}
(2) Inside the login request, we fetch user details → Add a scope
with logger.scope("FetchUser"):
logger.info("Fetching user details from DB")
log output:
{
...
"correlation_id": "request-12345",
"scope": "[UserLogin][FetchUser]"
}
(3) Validate credentials → Add a scope
with logger.scope("ValidateCredentials"):
logger.info("Validating user credentials")
log output:
{
...
"correlation_id": "request-12345",
"scope": "[UserLogin][FetchUser][ValidateCredentials]"
}
'''
def __init__(self, correlator: ContextualCorrelator, level: LogLevel=LogLevel.DEBUG, logger_id: str | None=None, renderer: Optional[Processor]=None) -> None:
pass
def add_context_fields(_: 'ContextualLogger', __: str, event_dict: EventDict) -> EventDict:
'''
Processor to inject correlation_id and scope_id into the structured log.
Since structlog does not pass our StructuredLogger instance, we retrieve the values from context.
'''
pass
@override
def set_level(self, level: LogLevel) -> None:
pass
def debug(self, message: str, *args: Any, **kwargs: Any) -> None:
pass
def info(self, message: str, *args: Any, **kwargs: Any) -> None:
pass
def warning(self, message: str, *args: Any, **kwargs: Any) -> None:
pass
def error(self, message: str, *args: Any, **kwargs: Any) -> None:
pass
def critical(self, message: str, *args: Any, **kwargs: Any) -> None:
pass
@contextmanager
def scope(self, scope_id: str) -> Iterator[None]:
'''
Extend the logging scope with an additional identifier.
'''
pass
@property
def current_scope(self) -> str:
pass
@contextmanager
def operation(self, name: str, props: Optional[Dict[str, Any]]=None) -> Iterator[None]:
'''
Context manager that logs the beginning and end of an operation,
including timing and error handling.
'''
pass
def _format_message(self, message: str) -> str:
'''
Prepend the current correlation id and scope information to the message.
'''
pass
| 17
| 5
| 9
| 0
| 7
| 2
| 2
| 0.94
| 1
| 5
| 2
| 1
| 11
| 5
| 11
| 39
| 177
| 22
| 80
| 34
| 57
| 75
| 54
| 24
| 41
| 3
| 5
| 1
| 18
|
327,808
|
flux0-ai/flux0
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/flux0-ai_flux0/packages/core/src/flux0_core/logging.py
|
flux0_core.logging.LogLevel
|
import logging
from enum import IntEnum, auto
class LogLevel(IntEnum):
DEBUG = auto()
INFO = auto()
WARNING = auto()
ERROR = auto()
CRITICAL = auto()
def logging_level(self) -> int:
return {LogLevel.DEBUG: logging.DEBUG, LogLevel.INFO: logging.INFO, LogLevel.WARNING: logging.WARNING, LogLevel.ERROR: logging.ERROR, LogLevel.CRITICAL: logging.CRITICAL}[self]
|
class LogLevel(IntEnum):
def logging_level(self) -> int:
pass
| 2
| 0
| 8
| 0
| 8
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 56
| 15
| 1
| 14
| 7
| 12
| 0
| 8
| 7
| 6
| 1
| 3
| 0
| 1
|
327,809
|
flux0-ai/flux0
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/flux0-ai_flux0/packages/core/src/flux0_core/logging.py
|
flux0_core.logging.Logger
|
from contextlib import contextmanager
from abc import ABC, abstractmethod
from typing import Any, Dict, Iterator, Optional, override
class Logger(ABC):
"""
Logging interface for structured logs with support for scopes and operation measurement.
"""
@abstractmethod
def set_level(self, level: LogLevel) -> None:
"""Set the logging level."""
...
@abstractmethod
def debug(self, message: str, *args: Any, **kwargs: Any) -> None:
...
@abstractmethod
def info(self, message: str, *args: Any, **kwargs: Any) -> None:
...
@abstractmethod
def warning(self, message: str, *args: Any, **kwargs: Any) -> None:
...
@abstractmethod
def error(self, message: str, *args: Any, **kwargs: Any) -> None:
...
@abstractmethod
def critical(self, message: str, *args: Any, **kwargs: Any) -> None:
...
@abstractmethod
@contextmanager
def scope(self, scope_id: str) -> Iterator[None]:
"""
Create a new logging scope. Any log issued within this context
will include the provided scope id.
"""
...
@abstractmethod
@contextmanager
def operation(self, name: str, props: Optional[Dict[str, Any]]=None) -> Iterator[None]:
"""
Context manager that logs the start and end of an operation.
Measures execution time and handles cancellations/exceptions.
"""
...
|
class Logger(ABC):
'''
Logging interface for structured logs with support for scopes and operation measurement.
'''
@abstractmethod
def set_level(self, level: LogLevel) -> None:
'''Set the logging level.'''
pass
@abstractmethod
def debug(self, message: str, *args: Any, **kwargs: Any) -> None:
pass
@abstractmethod
def info(self, message: str, *args: Any, **kwargs: Any) -> None:
pass
@abstractmethod
def warning(self, message: str, *args: Any, **kwargs: Any) -> None:
pass
@abstractmethod
def error(self, message: str, *args: Any, **kwargs: Any) -> None:
pass
@abstractmethod
def critical(self, message: str, *args: Any, **kwargs: Any) -> None:
pass
@abstractmethod
@contextmanager
def scope(self, scope_id: str) -> Iterator[None]:
'''
Create a new logging scope. Any log issued within this context
will include the provided scope id.
'''
pass
@abstractmethod
@contextmanager
def operation(self, name: str, props: Optional[Dict[str, Any]]=None) -> Iterator[None]:
'''
Context manager that logs the start and end of an operation.
Measures execution time and handles cancellations/exceptions.
'''
pass
| 19
| 4
| 3
| 0
| 1
| 1
| 1
| 0.55
| 1
| 3
| 1
| 1
| 8
| 0
| 8
| 28
| 42
| 8
| 22
| 17
| 8
| 12
| 17
| 9
| 8
| 1
| 4
| 0
| 8
|
327,810
|
flux0-ai/flux0
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/flux0-ai_flux0/packages/core/src/flux0_core/logging.py
|
flux0_core.logging.StdoutLogger
|
import logging
from flux0_core.contextual_correlator import ContextualCorrelator
import structlog
class StdoutLogger(ContextualLogger):
def __init__(self, correlator: ContextualCorrelator, log_level: LogLevel=LogLevel.DEBUG, logger_id: str | None=None, json: bool=True) -> None:
super().__init__(correlator, log_level, logger_id, structlog.dev.ConsoleRenderer(colors=True) if not json else None)
self.raw_logger.addHandler(logging.StreamHandler())
|
class StdoutLogger(ContextualLogger):
def __init__(self, correlator: ContextualCorrelator, log_level: LogLevel=LogLevel.DEBUG, logger_id: str | None=None, json: bool=True) -> None:
pass
| 2
| 0
| 14
| 0
| 14
| 0
| 2
| 0
| 1
| 6
| 2
| 0
| 1
| 0
| 1
| 40
| 15
| 0
| 15
| 8
| 7
| 0
| 4
| 2
| 2
| 2
| 6
| 0
| 2
|
327,811
|
flux0-ai/flux0
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/flux0-ai_flux0/packages/core/src/flux0_core/recordings.py
|
flux0_core.recordings.RecordedChunkPayload
|
from flux0_core.sessions import EventId, EventSource, EventType, MessageEventData, SessionId, StatusEventData, ToolEventData
from flux0_stream.types import JsonPatchOperation
from flux0_core.types import JSONSerializable
from typing import Literal, Mapping, NewType, Optional, Sequence, Tuple, TypedDict, Union
class RecordedChunkPayload(TypedDict):
"""Captured JSON Patch 'chunk' as sent to the client."""
correlation_id: str
event_id: EventId
seq: int
patches: list[JsonPatchOperation]
metadata: Mapping[str, JSONSerializable]
|
class RecordedChunkPayload(TypedDict):
'''Captured JSON Patch 'chunk' as sent to the client.'''
pass
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0.17
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 8
| 1
| 6
| 1
| 5
| 1
| 6
| 1
| 5
| 0
| 1
| 0
| 0
|
327,812
|
flux0-ai/flux0
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/flux0-ai_flux0/packages/core/src/flux0_core/recordings.py
|
flux0_core.recordings.RecordedEmittedPayload
|
from flux0_core.types import JSONSerializable
from flux0_core.sessions import EventId, EventSource, EventType, MessageEventData, SessionId, StatusEventData, ToolEventData
from typing import Literal, Mapping, NewType, Optional, Sequence, Tuple, TypedDict, Union
class RecordedEmittedPayload(TypedDict):
"""Captured non-chunk event (status/message/tool/custom) as sent to the client."""
id: EventId
source: EventSource
type: EventType
correlation_id: str
data: Union[MessageEventData, StatusEventData, ToolEventData]
metadata: Optional[Mapping[str, JSONSerializable]]
|
class RecordedEmittedPayload(TypedDict):
'''Captured non-chunk event (status/message/tool/custom) as sent to the client.'''
pass
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0.43
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 9
| 1
| 7
| 1
| 6
| 3
| 7
| 1
| 6
| 0
| 1
| 0
| 0
|
327,813
|
flux0-ai/flux0
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/flux0-ai_flux0/packages/core/src/flux0_core/recordings.py
|
flux0_core.recordings.RecordedEvent
|
from datetime import datetime
from dataclasses import dataclass
@dataclass(frozen=True)
class RecordedEvent:
"""
One persisted event from a recorded stream.
Invariants:
• Unique (recording_id, offset)
• Header frame: kind == "header" and offset == 0 (exactly one per recording)
• All other frames: offset >= 1
• created_at is the backend arrival timestamp used for pacing during replay
"""
id: RecordedEventId
recording_id: RecordingId
offset: int
kind: RecordedEventKind
created_at: datetime
payload: RecordedEventPayload
|
@dataclass(frozen=True)
class RecordedEvent:
'''
One persisted event from a recorded stream.
Invariants:
• Unique (recording_id, offset)
• Header frame: kind == "header" and offset == 0 (exactly one per recording)
• All other frames: offset >= 1
• created_at is the backend arrival timestamp used for pacing during replay
'''
pass
| 2
| 1
| 0
| 0
| 0
| 0
| 0
| 2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 17
| 2
| 7
| 1
| 6
| 14
| 7
| 1
| 6
| 0
| 0
| 0
| 0
|
327,814
|
flux0-ai/flux0
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/flux0-ai_flux0/packages/core/src/flux0_core/recordings.py
|
flux0_core.recordings.RecordedHeaderPayload
|
from flux0_core.sessions import EventId, EventSource, EventType, MessageEventData, SessionId, StatusEventData, ToolEventData
from typing import Literal, Mapping, NewType, Optional, Sequence, Tuple, TypedDict, Union
class RecordedHeaderPayload(TypedDict):
"""
Sentinel header frame for a recording.
Exactly one per recording at offset == 0.
"""
source_session_id: SessionId
|
class RecordedHeaderPayload(TypedDict):
'''
Sentinel header frame for a recording.
Exactly one per recording at offset == 0.
'''
pass
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 7
| 1
| 2
| 1
| 1
| 4
| 2
| 1
| 1
| 0
| 1
| 0
| 0
|
327,815
|
flux0-ai/flux0
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/flux0-ai_flux0/packages/core/src/flux0_core/recordings.py
|
flux0_core.recordings.RecordingStore
|
from flux0_core.sessions import EventId, EventSource, EventType, MessageEventData, SessionId, StatusEventData, ToolEventData
from abc import ABC, abstractmethod
from typing import Literal, Mapping, NewType, Optional, Sequence, Tuple, TypedDict, Union
from datetime import datetime
class RecordingStore(ABC):
"""
Minimal store interface for recording + interactive replay.
Storage model:
- Single normalized collection/table of RecordedEvent documents.
- Exactly one header per recording at offset == 0.
- All stream frames have offset >= 1.
- Offsets are assigned atomically, strictly increasing per recording_id.
Concurrency:
- Implementations MUST assign offsets with a per-recording atomic counter.
DB:
- Unique: { recording_id: 1, offset: 1 }
- Partial unique (headers): (recording_id) with filter { kind: "header", offset: 0 }
- Lookup by session: payload.source_session_id (partial filter { kind: "header" })
- Should support efficient querying by recording_id and offset.
"""
@abstractmethod
async def create_recording(self, source_session_id: SessionId, *, recording_id: Optional[RecordingId]=None, created_at: Optional[datetime]=None) -> RecordedEvent:
"""
Create the header frame (kind='header', offset=0) for a new recording.
Returns:
The persisted header event.
Must:
- Generate a RecordingId if not provided
- Enforce: exactly one header per recording with offset == 0
"""
@abstractmethod
async def append_emitted(self, recording_id: RecordingId, payload: RecordedEmittedPayload, *, created_at: Optional[datetime]=None) -> RecordedEvent:
"""
Append an 'emitted' event (status/message/tool/custom) to the recording.
Returns:
The persisted emitted event.
Must:
- Atomically assign the next monotonic offset (>= 1) per recording_id
"""
@abstractmethod
async def append_chunk(self, recording_id: RecordingId, payload: RecordedChunkPayload, *, created_at: Optional[datetime]=None) -> RecordedEvent:
"""
Append a 'chunk' frame (JSON Patch) to the recording.
Must:
- Same guarantees as append_emitted()
"""
@abstractmethod
async def read_header_by_source_session_id(self, source_session_id: SessionId) -> Optional[RecordedEvent]:
"""
Fetch the header (offset == 0, kind='header') for the recording whose
payload.source_session_id == source_session_id.
Returns:
- The header RecordedEvent if exactly one exists.
- None if not found.
Must:
- avoid having multiple recordings with the same source_session_id.
"""
...
@abstractmethod
async def read_header_by_recording_id(self, recording_id: RecordingId) -> Optional[RecordedEvent]:
"""
Fetch the header frame (offset == 0) for a recording.
Returns None if not found.
"""
@abstractmethod
async def read_next_turn_range_after_offset(self, recording_id: RecordingId, after_offset: int) -> Optional[TurnRange]:
"""
Return the next USER-anchored turn that begins strictly AFTER `after_offset`.
Semantics:
• A turn STARTS at the next 'emitted' frame whose payload.source == 'user'.
• A turn ENDS just before the following such user frame.
• If there is no following user frame, end_offset_exclusive = None (stream to end).
• Header (offset 0) is never part of a turn.
Returns:
• (start_offset_inclusive, end_offset_exclusive) or None if no subsequent user turn.
Notes:
• Pass `after_offset=0` for the very first lookup (header is at 0).
• If you later want to narrow anchors to only user *messages*, you can
implement the predicate as:
kind == 'emitted' AND payload.source == 'user' AND payload.type == 'message'
For v1, source=='user' is sufficient given your recording policy.
"""
@abstractmethod
async def read_frames_range(self, recording_id: RecordingId, start_offset_inclusive: int, end_offset_exclusive: Optional[int]=None, *, limit: Optional[int]=None) -> Sequence[RecordedEvent]:
"""
Read frames ordered by offset for [start_offset_inclusive, end_offset_exclusive).
If end_offset_exclusive is None, read to the end (respect `limit` if provided).
Must:
• Return frames sorted by offset ascending.
• Exclude the header unless start_offset_inclusive == 0 (normally you'll pass >= 1).
• Return <= limit items when 'limit' is provided.
"""
|
class RecordingStore(ABC):
'''
Minimal store interface for recording + interactive replay.
Storage model:
- Single normalized collection/table of RecordedEvent documents.
- Exactly one header per recording at offset == 0.
- All stream frames have offset >= 1.
- Offsets are assigned atomically, strictly increasing per recording_id.
Concurrency:
- Implementations MUST assign offsets with a per-recording atomic counter.
DB:
- Unique: { recording_id: 1, offset: 1 }
- Partial unique (headers): (recording_id) with filter { kind: "header", offset: 0 }
- Lookup by session: payload.source_session_id (partial filter { kind: "header" })
- Should support efficient querying by recording_id and offset.
'''
@abstractmethod
async def create_recording(self, source_session_id: SessionId, *, recording_id: Optional[RecordingId]=None, created_at: Optional[datetime]=None) -> RecordedEvent:
'''
Create the header frame (kind='header', offset=0) for a new recording.
Returns:
The persisted header event.
Must:
- Generate a RecordingId if not provided
- Enforce: exactly one header per recording with offset == 0
'''
pass
@abstractmethod
async def append_emitted(self, recording_id: RecordingId, payload: RecordedEmittedPayload, *, created_at: Optional[datetime]=None) -> RecordedEvent:
'''
Append an 'emitted' event (status/message/tool/custom) to the recording.
Returns:
The persisted emitted event.
Must:
- Atomically assign the next monotonic offset (>= 1) per recording_id
'''
pass
@abstractmethod
async def append_chunk(self, recording_id: RecordingId, payload: RecordedChunkPayload, *, created_at: Optional[datetime]=None) -> RecordedEvent:
'''
Append a 'chunk' frame (JSON Patch) to the recording.
Must:
- Same guarantees as append_emitted()
'''
pass
@abstractmethod
async def read_header_by_source_session_id(self, source_session_id: SessionId) -> Optional[RecordedEvent]:
'''
Fetch the header (offset == 0, kind='header') for the recording whose
payload.source_session_id == source_session_id.
Returns:
- The header RecordedEvent if exactly one exists.
- None if not found.
Must:
- avoid having multiple recordings with the same source_session_id.
'''
pass
@abstractmethod
async def read_header_by_recording_id(self, recording_id: RecordingId) -> Optional[RecordedEvent]:
'''
Fetch the header frame (offset == 0) for a recording.
Returns None if not found.
'''
pass
@abstractmethod
async def read_next_turn_range_after_offset(self, recording_id: RecordingId, after_offset: int) -> Optional[TurnRange]:
'''
Return the next USER-anchored turn that begins strictly AFTER `after_offset`.
Semantics:
• A turn STARTS at the next 'emitted' frame whose payload.source == 'user'.
• A turn ENDS just before the following such user frame.
• If there is no following user frame, end_offset_exclusive = None (stream to end).
• Header (offset 0) is never part of a turn.
Returns:
• (start_offset_inclusive, end_offset_exclusive) or None if no subsequent user turn.
Notes:
• Pass `after_offset=0` for the very first lookup (header is at 0).
• If you later want to narrow anchors to only user *messages*, you can
implement the predicate as:
kind == 'emitted' AND payload.source == 'user' AND payload.type == 'message'
For v1, source=='user' is sufficient given your recording policy.
'''
pass
@abstractmethod
async def read_frames_range(self, recording_id: RecordingId, start_offset_inclusive: int, end_offset_exclusive: Optional[int]=None, *, limit: Optional[int]=None) -> Sequence[RecordedEvent]:
'''
Read frames ordered by offset for [start_offset_inclusive, end_offset_exclusive).
If end_offset_exclusive is None, read to the end (respect `limit` if provided).
Must:
• Return frames sorted by offset ascending.
• Exclude the header unless start_offset_inclusive == 0 (normally you'll pass >= 1).
• Return <= limit items when 'limit' is provided.
'''
pass
| 15
| 8
| 16
| 2
| 6
| 8
| 1
| 1.48
| 1
| 5
| 3
| 1
| 7
| 0
| 7
| 27
| 146
| 22
| 50
| 49
| 1
| 74
| 9
| 8
| 1
| 1
| 4
| 0
| 7
|
327,816
|
flux0-ai/flux0
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/flux0-ai_flux0/packages/core/src/flux0_core/sessions.py
|
flux0_core.sessions.ContentPart
|
from flux0_core.types import JSONSerializable
from typing import List, Literal, Mapping, NewType, NotRequired, Optional, Sequence, TypeAlias, TypedDict, Union
class ContentPart(TypedDict):
type: Literal['content']
content: JSONSerializable
|
class ContentPart(TypedDict):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3
| 0
| 3
| 1
| 2
| 0
| 3
| 1
| 2
| 0
| 1
| 0
| 0
|
327,817
|
flux0-ai/flux0
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/flux0-ai_flux0/packages/core/src/flux0_core/sessions.py
|
flux0_core.sessions.ControlOptions
|
from typing import List, Literal, Mapping, NewType, NotRequired, Optional, Sequence, TypeAlias, TypedDict, Union
class ControlOptions(TypedDict, total=False):
mode: SessionMode
|
class ControlOptions(TypedDict, total=False):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2
| 0
| 2
| 1
| 1
| 0
| 2
| 1
| 1
| 0
| 1
| 0
| 0
|
327,818
|
flux0-ai/flux0
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/flux0-ai_flux0/packages/core/src/flux0_core/sessions.py
|
flux0_core.sessions.Event
|
from dataclasses import dataclass
from flux0_core.types import JSONSerializable
from typing import List, Literal, Mapping, NewType, NotRequired, Optional, Sequence, TypeAlias, TypedDict, Union
from datetime import datetime
@dataclass(frozen=True)
class Event:
id: EventId
source: EventSource
type: EventType
offset: int
correlation_id: str
data: Union[MessageEventData, StatusEventData, ToolEventData]
deleted: bool
created_at: datetime
metadata: Optional[Mapping[str, JSONSerializable]]
|
@dataclass(frozen=True)
class Event:
pass
| 2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 10
| 0
| 10
| 1
| 9
| 0
| 10
| 1
| 9
| 0
| 0
| 0
| 0
|
327,819
|
flux0-ai/flux0
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/flux0-ai_flux0/packages/core/src/flux0_core/sessions.py
|
flux0_core.sessions.MessageEventData
|
from typing import List, Literal, Mapping, NewType, NotRequired, Optional, Sequence, TypeAlias, TypedDict, Union
class MessageEventData(TypedDict):
type: Literal['message']
participant: Participant
flagged: NotRequired[bool]
tags: NotRequired[list[str]]
parts: List[Union[ContentPart, ReasoningPart, ToolCallPart]]
|
class MessageEventData(TypedDict):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 6
| 0
| 6
| 1
| 5
| 0
| 6
| 1
| 5
| 0
| 1
| 0
| 0
|
327,820
|
flux0-ai/flux0
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/flux0-ai_flux0/packages/core/src/flux0_core/sessions.py
|
flux0_core.sessions.Participant
|
from typing import List, Literal, Mapping, NewType, NotRequired, Optional, Sequence, TypeAlias, TypedDict, Union
from flux0_core.users import UserId
from flux0_core.agents import AgentId
class Participant(TypedDict):
id: NotRequired[AgentId | UserId | None]
name: str
|
class Participant(TypedDict):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3
| 0
| 3
| 1
| 2
| 0
| 3
| 1
| 2
| 0
| 1
| 0
| 0
|
327,821
|
flux0-ai/flux0
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/flux0-ai_flux0/packages/core/src/flux0_core/sessions.py
|
flux0_core.sessions.ReasoningPart
|
from typing import List, Literal, Mapping, NewType, NotRequired, Optional, Sequence, TypeAlias, TypedDict, Union
from flux0_core.types import JSONSerializable
class ReasoningPart(TypedDict):
type: Literal['reasoning']
reasoning: JSONSerializable
|
class ReasoningPart(TypedDict):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3
| 0
| 3
| 1
| 2
| 0
| 3
| 1
| 2
| 0
| 1
| 0
| 0
|
327,822
|
flux0-ai/flux0
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/flux0-ai_flux0/packages/core/src/flux0_core/sessions.py
|
flux0_core.sessions.Session
|
from dataclasses import dataclass
from typing import List, Literal, Mapping, NewType, NotRequired, Optional, Sequence, TypeAlias, TypedDict, Union
from flux0_core.users import UserId
from flux0_core.agents import AgentId
from datetime import datetime
from flux0_core.types import JSONSerializable
@dataclass(frozen=True)
class Session:
id: SessionId
agent_id: AgentId
user_id: UserId
mode: SessionMode
title: Optional[str]
consumption_offsets: Mapping[ConsumerId, int]
created_at: datetime
metadata: Optional[Mapping[str, JSONSerializable]]
|
@dataclass(frozen=True)
class Session:
pass
| 2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 9
| 0
| 9
| 1
| 8
| 0
| 9
| 1
| 8
| 0
| 0
| 0
| 0
|
327,823
|
flux0-ai/flux0
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/flux0-ai_flux0/packages/core/src/flux0_core/sessions.py
|
flux0_core.sessions.SessionStore
|
from flux0_core.types import JSONSerializable
from typing import List, Literal, Mapping, NewType, NotRequired, Optional, Sequence, TypeAlias, TypedDict, Union
from flux0_core.agents import AgentId
from datetime import datetime
from flux0_core.users import UserId
from abc import ABC, abstractmethod
class SessionStore(ABC):
@abstractmethod
async def create_session(self, user_id: UserId, agent_id: AgentId, id: Optional[SessionId]=None, mode: Optional[SessionMode]=None, title: Optional[str]=None, metadata: Optional[Mapping[str, JSONSerializable]]=None, created_at: Optional[datetime]=None) -> Session:
...
@abstractmethod
async def read_session(self, session_id: SessionId) -> Optional[Session]:
...
@abstractmethod
async def delete_session(self, session_id: SessionId) -> bool:
...
@abstractmethod
async def update_session(self, session_id: SessionId, params: SessionUpdateParams) -> Session:
...
@abstractmethod
async def list_sessions(self, agent_id: Optional[AgentId]=None, user_id: Optional[UserId]=None) -> Sequence[Session]:
...
@abstractmethod
async def create_event(self, session_id: SessionId, source: EventSource, type: EventType, correlation_id: str, data: Union[MessageEventData, StatusEventData, ToolEventData], metadata: Optional[Mapping[str, JSONSerializable]]=None, created_at: Optional[datetime]=None) -> Event:
...
@abstractmethod
async def read_event(self, session_id: SessionId, event_id: EventId) -> Optional[Event]:
...
@abstractmethod
async def delete_event(self, event_id: EventId) -> bool:
...
@abstractmethod
async def list_events(self, session_id: SessionId, source: Optional[EventSource]=None, correlation_id: Optional[str]=None, types: Sequence[EventType]=[], min_offset: Optional[int]=None, exclude_deleted: bool=True) -> Sequence[Event]:
...
|
class SessionStore(ABC):
@abstractmethod
async def create_session(self, user_id: UserId, agent_id: AgentId, id: Optional[SessionId]=None, mode: Optional[SessionMode]=None, title: Optional[str]=None, metadata: Optional[Mapping[str, JSONSerializable]]=None, created_at: Optional[datetime]=None) -> Session:
pass
@abstractmethod
async def read_session(self, session_id: SessionId) -> Optional[Session]:
pass
@abstractmethod
async def delete_session(self, session_id: SessionId) -> bool:
pass
@abstractmethod
async def update_session(self, session_id: SessionId, params: SessionUpdateParams) -> Session:
pass
@abstractmethod
async def list_sessions(self, agent_id: Optional[AgentId]=None, user_id: Optional[UserId]=None) -> Sequence[Session]:
pass
@abstractmethod
async def create_event(self, session_id: SessionId, source: EventSource, type: EventType, correlation_id: str, data: Union[MessageEventData, StatusEventData, ToolEventData], metadata: Optional[Mapping[str, JSONSerializable]]=None, created_at: Optional[datetime]=None) -> Event:
pass
@abstractmethod
async def read_event(self, session_id: SessionId, event_id: EventId) -> Optional[Event]:
pass
@abstractmethod
async def delete_event(self, event_id: EventId) -> bool:
pass
@abstractmethod
async def list_events(self, session_id: SessionId, source: Optional[EventSource]=None, correlation_id: Optional[str]=None, types: Sequence[EventType]=[], min_offset: Optional[int]=None, exclude_deleted: bool=True) -> Sequence[Event]:
pass
| 19
| 0
| 6
| 0
| 6
| 0
| 1
| 0
| 1
| 10
| 6
| 1
| 9
| 0
| 9
| 29
| 74
| 8
| 66
| 66
| 9
| 0
| 19
| 10
| 9
| 1
| 4
| 0
| 9
|
327,824
|
flux0-ai/flux0
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/flux0-ai_flux0/packages/core/src/flux0_core/sessions.py
|
flux0_core.sessions.SessionUpdateParams
|
from typing import List, Literal, Mapping, NewType, NotRequired, Optional, Sequence, TypeAlias, TypedDict, Union
from flux0_core.agents import AgentId
from flux0_core.types import JSONSerializable
from flux0_core.users import UserId
class SessionUpdateParams(TypedDict, total=False):
user_id: UserId
agent_id: AgentId
mode: SessionMode
title: Optional[str]
consumption_offsets: Mapping[ConsumerId, int]
metadata: Optional[Mapping[str, JSONSerializable]]
|
class SessionUpdateParams(TypedDict, total=False):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 7
| 0
| 7
| 1
| 6
| 0
| 7
| 1
| 6
| 0
| 1
| 0
| 0
|
327,825
|
flux0-ai/flux0
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/flux0-ai_flux0/packages/core/src/flux0_core/sessions.py
|
flux0_core.sessions.StatusEventData
|
from typing import List, Literal, Mapping, NewType, NotRequired, Optional, Sequence, TypeAlias, TypedDict, Union
from flux0_core.types import JSONSerializable
class StatusEventData(TypedDict):
type: Literal['status']
acknowledged_offset: NotRequired[int]
status: SessionStatus
data: NotRequired[JSONSerializable]
|
class StatusEventData(TypedDict):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 5
| 0
| 5
| 1
| 4
| 0
| 5
| 1
| 4
| 0
| 1
| 0
| 0
|
327,826
|
flux0-ai/flux0
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/flux0-ai_flux0/packages/core/src/flux0_core/sessions.py
|
flux0_core.sessions.ToolCall
|
from typing import List, Literal, Mapping, NewType, NotRequired, Optional, Sequence, TypeAlias, TypedDict, Union
from flux0_core.types import JSONSerializable
class ToolCall(TypedDict):
tool_call_id: str
tool_name: str
args: Mapping[str, JSONSerializable]
result: NotRequired[ToolResult]
error: NotRequired[str]
|
class ToolCall(TypedDict):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0.83
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 6
| 0
| 6
| 1
| 5
| 5
| 6
| 1
| 5
| 0
| 1
| 0
| 0
|
327,827
|
flux0-ai/flux0
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/flux0-ai_flux0/packages/core/src/flux0_core/sessions.py
|
flux0_core.sessions.ToolCallPart
|
from flux0_core.types import JSONSerializable
from typing import List, Literal, Mapping, NewType, NotRequired, Optional, Sequence, TypeAlias, TypedDict, Union
class ToolCallPart(TypedDict):
type: ToolCallPartType
tool_call_id: str
tool_name: str
args: JSONSerializable
|
class ToolCallPart(TypedDict):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 5
| 0
| 5
| 1
| 4
| 1
| 5
| 1
| 4
| 0
| 1
| 0
| 0
|
327,828
|
flux0-ai/flux0
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/flux0-ai_flux0/packages/core/src/flux0_core/sessions.py
|
flux0_core.sessions.ToolEventData
|
from typing import List, Literal, Mapping, NewType, NotRequired, Optional, Sequence, TypeAlias, TypedDict, Union
class ToolEventData(TypedDict):
type: ToolCallResultType
tool_calls: list[ToolCall]
|
class ToolEventData(TypedDict):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3
| 0
| 3
| 1
| 2
| 0
| 3
| 1
| 2
| 0
| 1
| 0
| 0
|
327,829
|
flux0-ai/flux0
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/flux0-ai_flux0/packages/core/src/flux0_core/sessions.py
|
flux0_core.sessions.ToolResult
|
from flux0_core.types import JSONSerializable
from typing import List, Literal, Mapping, NewType, NotRequired, Optional, Sequence, TypeAlias, TypedDict, Union
class ToolResult(TypedDict):
data: JSONSerializable
metadata: Mapping[str, JSONSerializable]
control: ControlOptions
|
class ToolResult(TypedDict):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4
| 0
| 4
| 1
| 3
| 0
| 4
| 1
| 3
| 0
| 1
| 0
| 0
|
327,830
|
flux0-ai/flux0
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/flux0-ai_flux0/packages/core/src/flux0_core/users.py
|
flux0_core.users.User
|
from dataclasses import dataclass
from typing import NewType, Optional, TypedDict
from datetime import datetime
@dataclass(frozen=True)
class User:
id: UserId
sub: str
name: str
email: Optional[str]
created_at: datetime
|
@dataclass(frozen=True)
class User:
pass
| 2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 6
| 0
| 6
| 1
| 5
| 0
| 6
| 1
| 5
| 0
| 0
| 0
| 0
|
327,831
|
flux0-ai/flux0
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/flux0-ai_flux0/packages/core/src/flux0_core/users.py
|
flux0_core.users.UserStore
|
from abc import ABC, abstractmethod
from datetime import datetime
from typing import NewType, Optional, TypedDict
class UserStore(ABC):
@abstractmethod
async def create_user(self, sub: str, name: str, email: Optional[str]=None, created_at: Optional[datetime]=None) -> User:
...
@abstractmethod
async def read_user(self, user_id: UserId) -> Optional[User]:
...
@abstractmethod
async def read_user_by_sub(self, sub: str) -> Optional[User]:
...
@abstractmethod
async def update_user(self, user_id: UserId, params: UserUpdateParams) -> User:
...
|
class UserStore(ABC):
@abstractmethod
async def create_user(self, sub: str, name: str, email: Optional[str]=None, created_at: Optional[datetime]=None) -> User:
pass
@abstractmethod
async def read_user(self, user_id: UserId) -> Optional[User]:
pass
@abstractmethod
async def read_user_by_sub(self, sub: str) -> Optional[User]:
pass
@abstractmethod
async def update_user(self, user_id: UserId, params: UserUpdateParams) -> User:
pass
| 9
| 0
| 5
| 0
| 5
| 0
| 1
| 0
| 1
| 4
| 2
| 1
| 4
| 0
| 4
| 24
| 28
| 3
| 25
| 25
| 4
| 0
| 9
| 5
| 4
| 1
| 4
| 0
| 4
|
327,832
|
flux0-ai/flux0
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/flux0-ai_flux0/packages/core/src/flux0_core/users.py
|
flux0_core.users.UserUpdateParams
|
from typing import NewType, Optional, TypedDict
class UserUpdateParams(TypedDict, total=False):
name: str
|
class UserUpdateParams(TypedDict, total=False):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2
| 0
| 2
| 1
| 1
| 0
| 2
| 1
| 1
| 0
| 1
| 0
| 0
|
327,833
|
flux0-ai/flux0
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/flux0-ai_flux0/packages/nanodb/src/flux0_nanodb/api.py
|
flux0_nanodb.api.DocumentCollection
|
from typing import Generic, List, Mapping, Optional, Sequence, Tuple, Type
from flux0_nanodb.projection import Projection
from flux0_nanodb.types import DeleteResult, InsertOneResult, JSONPatchOperation, SortingOrder, TDocument, UpdateOneResult
from flux0_nanodb.query import QueryFilter
from abc import ABC, abstractmethod
class DocumentCollection(ABC, Generic[TDocument]):
@abstractmethod
async def find(self, filters: Optional[QueryFilter], projection: Optional[Mapping[str, Projection]]=None, limit: Optional[int]=None, offset: Optional[int]=None, sort: Optional[Sequence[Tuple[str, SortingOrder]]]=None) -> Sequence[TDocument]:
"""
Find all documents that match the optional filters.
Optionally apply a projection, pagination, and sorting.
- `sort` is a list of tuples where:
- The first element is the field name.
- The second element is the sorting order (`SortOrder.ASC` for ascending or `SortOrder.DESC` for descending).
Sorting is applied after filtering but before pagination.
"""
pass
@abstractmethod
async def insert_one(self, document: TDocument) -> InsertOneResult:
"""
Insert a single document into the collection.
"""
pass
@abstractmethod
async def update_one(self, filters: QueryFilter, patch: List[JSONPatchOperation], upsert: bool=False) -> UpdateOneResult:
"""
Apply a JSON Patch (RFC 6902) to a single document that matches the provided filters.
If upsert is True and no document matches, insert a new document.
Parameters:
filters (QueryFilter): Query to match a document.
patch (List[JSONPatchOperation]): JSON Patch operations in a type-safe structured format.
upsert (bool): If True, insert a new document if no match is found.
Returns:
UpdateOneResult: Metadata about the update operation.
"""
pass
@abstractmethod
async def delete_one(self, filters: QueryFilter) -> DeleteResult[TDocument]:
"""
Delete the first document that matches the provided filters.
"""
pass
|
class DocumentCollection(ABC, Generic[TDocument]):
@abstractmethod
async def find(self, filters: Optional[QueryFilter], projection: Optional[Mapping[str, Projection]]=None, limit: Optional[int]=None, offset: Optional[int]=None, sort: Optional[Sequence[Tuple[str, SortingOrder]]]=None) -> Sequence[TDocument]:
'''
Find all documents that match the optional filters.
Optionally apply a projection, pagination, and sorting.
- `sort` is a list of tuples where:
- The first element is the field name.
- The second element is the sorting order (`SortOrder.ASC` for ascending or `SortOrder.DESC` for descending).
Sorting is applied after filtering but before pagination.
'''
pass
@abstractmethod
async def insert_one(self, document: TDocument) -> InsertOneResult:
'''
Insert a single document into the collection.
'''
pass
@abstractmethod
async def update_one(self, filters: QueryFilter, patch: List[JSONPatchOperation], upsert: bool=False) -> UpdateOneResult:
'''
Apply a JSON Patch (RFC 6902) to a single document that matches the provided filters.
If upsert is True and no document matches, insert a new document.
Parameters:
filters (QueryFilter): Query to match a document.
patch (List[JSONPatchOperation]): JSON Patch operations in a type-safe structured format.
upsert (bool): If True, insert a new document if no match is found.
Returns:
UpdateOneResult: Metadata about the update operation.
'''
pass
@abstractmethod
async def delete_one(self, filters: QueryFilter) -> DeleteResult[TDocument]:
'''
Delete the first document that matches the provided filters.
'''
pass
| 9
| 4
| 11
| 1
| 4
| 6
| 1
| 1.09
| 2
| 8
| 5
| 3
| 4
| 0
| 4
| 26
| 53
| 7
| 22
| 18
| 4
| 24
| 9
| 5
| 4
| 1
| 4
| 0
| 4
|
327,834
|
flux0-ai/flux0
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/flux0-ai_flux0/packages/nanodb/src/flux0_nanodb/api.py
|
flux0_nanodb.api.DocumentDatabase
|
from abc import ABC, abstractmethod
from flux0_nanodb.types import DeleteResult, InsertOneResult, JSONPatchOperation, SortingOrder, TDocument, UpdateOneResult
from typing import Generic, List, Mapping, Optional, Sequence, Tuple, Type
class DocumentDatabase(ABC):
@abstractmethod
async def create_collection(self, name: str, schema: Type[TDocument]) -> DocumentCollection[TDocument]:
"""
Create a new collection with the given name and document schema.
"""
pass
@abstractmethod
async def get_collection(self, name: str, schema: Type[TDocument]) -> DocumentCollection[TDocument]:
"""
Retrieve an existing collection by its name and document schema.
"""
pass
@abstractmethod
async def delete_collection(self, name: str) -> None:
"""
Delete a collection by its name.
"""
pass
|
class DocumentDatabase(ABC):
@abstractmethod
async def create_collection(self, name: str, schema: Type[TDocument]) -> DocumentCollection[TDocument]:
'''
Create a new collection with the given name and document schema.
'''
pass
@abstractmethod
async def get_collection(self, name: str, schema: Type[TDocument]) -> DocumentCollection[TDocument]:
'''
Retrieve an existing collection by its name and document schema.
'''
pass
@abstractmethod
async def delete_collection(self, name: str) -> None:
'''
Delete a collection by its name.
'''
pass
| 7
| 3
| 6
| 0
| 3
| 3
| 1
| 0.64
| 1
| 2
| 1
| 3
| 3
| 0
| 3
| 23
| 25
| 2
| 14
| 11
| 3
| 9
| 7
| 4
| 3
| 1
| 4
| 0
| 3
|
327,835
|
flux0-ai/flux0
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/flux0-ai_flux0/packages/nanodb/src/flux0_nanodb/json.py
|
flux0_nanodb.json.JsonDocumentCollection
|
from flux0_nanodb.api import DocumentCollection, DocumentDatabase
from pathlib import Path
from flux0_nanodb.query import QueryFilter, matches_query
from flux0_nanodb.projection import Projection, apply_projection
from typing import Any, List, Mapping, Optional, Protocol, Sequence, Tuple, Type, cast
import jsonpatch
import json
from flux0_nanodb.types import DeleteResult, DocumentID, InsertOneResult, JSONPatchOperation, SortingOrder, TDocument, UpdateOneResult
from flux0_core.async_utils import RWLock
from flux0_nanodb.common import convert_patch, validate_is_total
class JsonDocumentCollection(DocumentCollection[TDocument]):
def __init__(self, name: str, schema: Type[TDocument], data_dir: Path) -> None:
self._name = name
self._schema = schema
self._data_dir = data_dir
self._file_path = data_dir / f'{name}.json'
self._lock = RWLock()
self._data_dir.mkdir(parents=True, exist_ok=True)
if not self._file_path.exists():
self._save_documents([])
def _load_documents(self) -> List[TDocument]:
"""Load documents from the JSON file."""
try:
with open(self._file_path, 'r', encoding='utf-8') as f:
data = json.load(f, object_hook=_json_object_hook)
return cast(List[TDocument], data)
except (FileNotFoundError, json.JSONDecodeError):
return []
def _save_documents(self, documents: List[TDocument]) -> None:
"""Save documents to the JSON file."""
with open(self._file_path, 'w', encoding='utf-8') as f:
json.dump(documents, f, indent=2, ensure_ascii=False, default=_json_default)
async def find(self, filters: Optional[QueryFilter]=None, projection: Optional[Mapping[str, Projection]]=None, limit: Optional[int]=None, offset: Optional[int]=None, sort: Optional[Sequence[Tuple[str, SortingOrder]]]=None) -> Sequence[TDocument]:
async with self._lock.reader_lock:
documents = self._load_documents()
docs: Sequence[TDocument] = []
if filters is None:
docs = documents
else:
docs = [doc for doc in documents if matches_query(filters, doc)]
if sort is not None:
for field, order in reversed(sort):
docs.sort(key=lambda doc: cast(Comparable, doc.get(field, None)), reverse=order == SortingOrder.DESC)
if projection:
docs = [cast(TDocument, apply_projection(doc, projection)) for doc in docs]
if offset is not None:
if offset < 0:
raise ValueError('Offset must be non-negative')
docs = docs[offset:]
if limit is not None:
if limit < 0:
raise ValueError('Limit must be non-negative')
docs = docs[:limit]
return docs
async def insert_one(self, document: TDocument) -> InsertOneResult:
validate_is_total(document, self._schema)
inserted_id: Optional[DocumentID] = document.get('id')
if inserted_id is None:
raise ValueError("Document is missing an 'id' field")
async with self._lock.writer_lock:
documents = self._load_documents()
documents.append(document)
self._save_documents(documents)
return InsertOneResult(acknowledged=True, inserted_id=inserted_id)
async def update_one(self, filters: QueryFilter, patch: List[JSONPatchOperation], upsert: bool=False) -> UpdateOneResult:
standard_patch = convert_patch(patch)
async with self._lock.writer_lock:
documents = self._load_documents()
for i, doc in enumerate(documents):
if matches_query(filters, doc):
try:
updated_doc = jsonpatch.apply_patch(doc, standard_patch, in_place=False)
except jsonpatch.JsonPatchException as e:
raise ValueError('Invalid JSON patch') from e
documents[i] = cast(TDocument, updated_doc)
self._save_documents(documents)
return UpdateOneResult(acknowledged=True, matched_count=1, modified_count=1, upserted_id=None)
if upsert:
try:
new_doc = jsonpatch.apply_patch({}, standard_patch, in_place=False)
except jsonpatch.JsonPatchException as e:
raise ValueError('Invalid JSON patch for upsert') from e
if 'id' not in new_doc:
raise ValueError("Upserted document is missing an 'id' field")
validate_is_total(new_doc, self._schema)
documents.append(cast(TDocument, new_doc))
self._save_documents(documents)
return UpdateOneResult(acknowledged=True, matched_count=0, modified_count=0, upserted_id=new_doc['id'])
return UpdateOneResult(acknowledged=True, matched_count=0, modified_count=0, upserted_id=None)
async def delete_one(self, filters: QueryFilter) -> DeleteResult[TDocument]:
async with self._lock.writer_lock:
documents = self._load_documents()
for i, doc in enumerate(documents):
if matches_query(filters, doc):
removed = documents.pop(i)
self._save_documents(documents)
return DeleteResult(acknowledged=True, deleted_count=1, deleted_document=removed)
return DeleteResult(acknowledged=True, deleted_count=0, deleted_document=None)
|
class JsonDocumentCollection(DocumentCollection[TDocument]):
def __init__(self, name: str, schema: Type[TDocument], data_dir: Path) -> None:
pass
def _load_documents(self) -> List[TDocument]:
'''Load documents from the JSON file.'''
pass
def _save_documents(self, documents: List[TDocument]) -> None:
'''Save documents to the JSON file.'''
pass
async def find(self, filters: Optional[QueryFilter]=None, projection: Optional[Mapping[str, Projection]]=None, limit: Optional[int]=None, offset: Optional[int]=None, sort: Optional[Sequence[Tuple[str, SortingOrder]]]=None) -> Sequence[TDocument]:
pass
async def insert_one(self, document: TDocument) -> InsertOneResult:
pass
async def update_one(self, filters: QueryFilter, patch: List[JSONPatchOperation], upsert: bool=False) -> UpdateOneResult:
pass
async def delete_one(self, filters: QueryFilter) -> DeleteResult[TDocument]:
pass
| 8
| 2
| 19
| 2
| 15
| 2
| 4
| 0.14
| 1
| 16
| 7
| 0
| 7
| 5
| 7
| 33
| 142
| 22
| 106
| 39
| 89
| 15
| 85
| 27
| 77
| 9
| 5
| 4
| 26
|
327,836
|
flux0-ai/flux0
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/flux0-ai_flux0/packages/nanodb/src/flux0_nanodb/json.py
|
flux0_nanodb.json.JsonDocumentDatabase
|
from flux0_core.async_utils import RWLock
from pathlib import Path
from flux0_nanodb.api import DocumentCollection, DocumentDatabase
from flux0_nanodb.types import DeleteResult, DocumentID, InsertOneResult, JSONPatchOperation, SortingOrder, TDocument, UpdateOneResult
from typing import Any, List, Mapping, Optional, Protocol, Sequence, Tuple, Type, cast
class JsonDocumentDatabase(DocumentDatabase):
def __init__(self, data_dir: str='./data') -> None:
"""
Initialize the JSON document database.
Args:
data_dir: Directory where JSON files will be stored. Defaults to "./data"
"""
self._data_dir = Path(data_dir)
self._collections: dict[str, JsonDocumentCollection[Any]] = {}
self._db_lock = RWLock()
self._data_dir.mkdir(parents=True, exist_ok=True)
async def create_collection(self, name: str, schema: Type[TDocument]) -> DocumentCollection[TDocument]:
async with self._db_lock.writer_lock:
if name in self._collections:
raise ValueError(f"Collection '{name}' already exists")
collection: JsonDocumentCollection[TDocument] = JsonDocumentCollection(name, schema, self._data_dir)
self._collections[name] = collection
return collection
async def get_collection(self, name: str, schema: Type[TDocument]) -> DocumentCollection[TDocument]:
async with self._db_lock.reader_lock:
if name in self._collections:
return cast(JsonDocumentCollection[TDocument], self._collections[name])
file_path = self._data_dir / f'{name}.json'
if not file_path.exists():
raise ValueError(f"Collection '{name}' does not exist")
async with self._db_lock.writer_lock:
if name in self._collections:
return cast(JsonDocumentCollection[TDocument], self._collections[name])
collection: JsonDocumentCollection[TDocument] = JsonDocumentCollection(name, schema, self._data_dir)
self._collections[name] = collection
return collection
async def delete_collection(self, name: str) -> None:
async with self._db_lock.writer_lock:
file_path = self._data_dir / f'{name}.json'
if name in self._collections:
del self._collections[name]
if file_path.exists():
file_path.unlink()
else:
raise ValueError(f"Collection '{name}' does not exist")
|
class JsonDocumentDatabase(DocumentDatabase):
def __init__(self, data_dir: str='./data') -> None:
'''
Initialize the JSON document database.
Args:
data_dir: Directory where JSON files will be stored. Defaults to "./data"
'''
pass
async def create_collection(self, name: str, schema: Type[TDocument]) -> DocumentCollection[TDocument]:
pass
async def get_collection(self, name: str, schema: Type[TDocument]) -> DocumentCollection[TDocument]:
pass
async def delete_collection(self, name: str) -> None:
pass
| 5
| 1
| 16
| 2
| 11
| 3
| 3
| 0.3
| 1
| 8
| 3
| 0
| 4
| 3
| 4
| 27
| 67
| 11
| 43
| 16
| 34
| 13
| 34
| 12
| 29
| 4
| 5
| 2
| 10
|
327,837
|
flux0-ai/flux0
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/flux0-ai_flux0/packages/nanodb/src/flux0_nanodb/memory.py
|
flux0_nanodb.memory.Comparable
|
from typing import Any, List, Mapping, Optional, Protocol, Sequence, Tuple, Type, cast
class Comparable(Protocol):
def __lt__(self, other: Any) -> bool:
...
|
class Comparable(Protocol):
def __lt__(self, other: Any) -> bool:
pass
| 2
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| 2
| 0
| 0
| 1
| 0
| 1
| 25
| 2
| 0
| 2
| 2
| 1
| 0
| 3
| 2
| 1
| 1
| 5
| 0
| 1
|
327,838
|
flux0-ai/flux0
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/flux0-ai_flux0/packages/nanodb/src/flux0_nanodb/memory.py
|
flux0_nanodb.memory.MemoryDocumentCollection
|
import jsonpatch
from flux0_nanodb.types import DeleteResult, DocumentID, InsertOneResult, JSONPatchOperation, SortingOrder, TDocument, UpdateOneResult
from flux0_core.async_utils import RWLock
from flux0_nanodb.api import DocumentCollection, DocumentDatabase
from flux0_nanodb.common import convert_patch, validate_is_total
from flux0_nanodb.query import QueryFilter, matches_query
from flux0_nanodb.projection import Projection, apply_projection
from typing import Any, List, Mapping, Optional, Protocol, Sequence, Tuple, Type, cast
class MemoryDocumentCollection(DocumentCollection[TDocument]):
def __init__(self, name: str, schema: Type[TDocument]) -> None:
self._name = name
self._schema = schema
self._documents: list[TDocument] = []
self._lock = RWLock()
async def find(self, filters: Optional[QueryFilter]=None, projection: Optional[Mapping[str, Projection]]=None, limit: Optional[int]=None, offset: Optional[int]=None, sort: Optional[Sequence[Tuple[str, SortingOrder]]]=None) -> Sequence[TDocument]:
async with self._lock.reader_lock:
docs: Sequence[TDocument] = []
if filters is None:
docs = self._documents
else:
docs = [doc for doc in self._documents if matches_query(filters, doc)]
if sort is not None:
for field, order in reversed(sort):
docs.sort(key=lambda doc: cast(Comparable, doc.get(field, None)), reverse=order == SortingOrder.DESC)
if projection:
docs = [cast(TDocument, apply_projection(doc, projection)) for doc in docs]
if offset is not None:
if offset < 0:
raise ValueError('Offset must be non-negative')
docs = docs[offset:]
if limit is not None:
if limit < 0:
raise ValueError('Limit must be non-negative')
docs = docs[:limit]
return docs
async def insert_one(self, document: TDocument) -> InsertOneResult:
async with self._lock.writer_lock:
self._documents.append(document)
validate_is_total(document, self._schema)
inserted_id: Optional[DocumentID] = document.get('id')
if inserted_id is None:
raise ValueError("Document is missing an 'id' field")
return InsertOneResult(acknowledged=True, inserted_id=inserted_id)
async def update_one(self, filters: QueryFilter, patch: List[JSONPatchOperation], upsert: bool=False) -> UpdateOneResult:
async with self._lock.writer_lock:
standard_patch = convert_patch(patch)
for i, doc in enumerate(self._documents):
if matches_query(filters, doc):
try:
updated_doc = jsonpatch.apply_patch(doc, standard_patch, in_place=False)
except jsonpatch.JsonPatchException as e:
raise ValueError('Invalid JSON patch') from e
self._documents[i] = cast(TDocument, updated_doc)
return UpdateOneResult(acknowledged=True, matched_count=1, modified_count=1, upserted_id=None)
if upsert:
try:
new_doc = jsonpatch.apply_patch({}, standard_patch, in_place=False)
except jsonpatch.JsonPatchException as e:
raise ValueError('Invalid JSON patch for upsert') from e
if 'id' not in new_doc:
raise ValueError("Upserted document is missing an 'id' field")
validate_is_total(new_doc, self._schema)
self._documents.append(cast(TDocument, new_doc))
return UpdateOneResult(acknowledged=True, matched_count=0, modified_count=0, upserted_id=new_doc['id'])
return UpdateOneResult(acknowledged=True, matched_count=0, modified_count=0, upserted_id=None)
async def delete_one(self, filters: QueryFilter) -> DeleteResult[TDocument]:
async with self._lock.writer_lock:
for i, doc in enumerate(self._documents):
if matches_query(filters, doc):
removed = self._documents.pop(i)
return DeleteResult(acknowledged=True, deleted_count=1, deleted_document=removed)
return DeleteResult(acknowledged=True, deleted_count=0, deleted_document=None)
|
class MemoryDocumentCollection(DocumentCollection[TDocument]):
def __init__(self, name: str, schema: Type[TDocument]) -> None:
pass
async def find(self, filters: Optional[QueryFilter]=None, projection: Optional[Mapping[str, Projection]]=None, limit: Optional[int]=None, offset: Optional[int]=None, sort: Optional[Sequence[Tuple[str, SortingOrder]]]=None) -> Sequence[TDocument]:
pass
async def insert_one(self, document: TDocument) -> InsertOneResult:
pass
async def update_one(self, filters: QueryFilter, patch: List[JSONPatchOperation], upsert: bool=False) -> UpdateOneResult:
pass
async def delete_one(self, filters: QueryFilter) -> DeleteResult[TDocument]:
pass
| 6
| 0
| 19
| 1
| 17
| 2
| 4
| 0.12
| 1
| 14
| 7
| 0
| 5
| 4
| 5
| 31
| 102
| 9
| 84
| 29
| 69
| 10
| 63
| 19
| 57
| 9
| 5
| 4
| 22
|
327,839
|
flux0-ai/flux0
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/flux0-ai_flux0/packages/nanodb/src/flux0_nanodb/memory.py
|
flux0_nanodb.memory.MemoryDocumentDatabase
|
from flux0_nanodb.api import DocumentCollection, DocumentDatabase
from flux0_nanodb.types import DeleteResult, DocumentID, InsertOneResult, JSONPatchOperation, SortingOrder, TDocument, UpdateOneResult
from typing import Any, List, Mapping, Optional, Protocol, Sequence, Tuple, Type, cast
class MemoryDocumentDatabase(DocumentDatabase):
def __init__(self) -> None:
self._collections: dict[str, MemoryDocumentCollection[Any]] = {}
async def create_collection(self, name: str, schema: Type[TDocument]) -> DocumentCollection[TDocument]:
if name in self._collections:
raise ValueError(f"Collection '{name}' already exists")
collection: MemoryDocumentCollection[TDocument] = MemoryDocumentCollection(name, schema)
self._collections[name] = collection
return collection
async def get_collection(self, name: str, schema: Type[TDocument]) -> DocumentCollection[TDocument]:
collection = self._collections.get(name)
if collection is None:
raise ValueError(f"Collection '{name}' does not exist")
return cast(MemoryDocumentCollection[TDocument], collection)
async def delete_collection(self, name: str) -> None:
if name in self._collections:
del self._collections[name]
else:
raise ValueError(f"Collection '{name}' does not exist")
|
class MemoryDocumentDatabase(DocumentDatabase):
def __init__(self) -> None:
pass
async def create_collection(self, name: str, schema: Type[TDocument]) -> DocumentCollection[TDocument]:
pass
async def get_collection(self, name: str, schema: Type[TDocument]) -> DocumentCollection[TDocument]:
pass
async def delete_collection(self, name: str) -> None:
pass
| 5
| 0
| 6
| 0
| 6
| 1
| 2
| 0.09
| 1
| 6
| 2
| 0
| 4
| 1
| 4
| 27
| 28
| 3
| 23
| 12
| 14
| 2
| 18
| 8
| 13
| 2
| 5
| 1
| 7
|
327,840
|
flux0-ai/flux0
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/flux0-ai_flux0/packages/nanodb/src/flux0_nanodb/mongodb.py
|
flux0_nanodb.mongodb.MongoDocumentCollection
|
from flux0_nanodb.api import DocumentCollection, DocumentDatabase
from flux0_nanodb.query import And, Comparison, Or, QueryFilter
import uuid
import jsonpatch
from flux0_nanodb.types import DeleteResult, DocumentID, DocumentVersion, InsertOneResult, JSONPatchOperation, SortingOrder, TDocument, UpdateOneResult
from typing import TYPE_CHECKING, Any, Dict, List, Mapping, Optional, Sequence, Tuple, Type, cast
from flux0_nanodb.projection import Projection, apply_projection
class MongoDocumentCollection(DocumentCollection[TDocument]):
"""MongoDB implementation of DocumentCollection using PyMongo async API."""
def __init__(self, collection: Collection[Any], schema: Type[TDocument], ascending_const: Any, descending_const: Any, duplicate_key_error: Any):
self.collection = collection
self.schema = schema
self._ASCENDING = ascending_const
self._DESCENDING = descending_const
self._DuplicateKeyError = duplicate_key_error
def _convert_query_filter_to_mongo(self, query_filter: QueryFilter) -> Dict[str, Any]:
"""Convert our QueryFilter to MongoDB query format."""
if isinstance(query_filter, Comparison):
field = query_filter.path
if field == 'id':
field = '_id'
if query_filter.op == '$eq':
return {field: query_filter.value}
elif query_filter.op == '$ne':
return {field: {'$ne': query_filter.value}}
elif query_filter.op == '$gt':
return {field: {'$gt': query_filter.value}}
elif query_filter.op == '$gte':
return {field: {'$gte': query_filter.value}}
elif query_filter.op == '$lt':
return {field: {'$lt': query_filter.value}}
elif query_filter.op == '$lte':
return {field: {'$lte': query_filter.value}}
elif query_filter.op == '$in':
return {field: {'$in': query_filter.value}}
else:
raise ValueError(f'Unsupported operator: {query_filter.op}')
elif isinstance(query_filter, And):
return {'$and': [self._convert_query_filter_to_mongo(expr) for expr in query_filter.expressions]}
elif isinstance(query_filter, Or):
return {'$or': [self._convert_query_filter_to_mongo(expr) for expr in query_filter.expressions]}
else:
raise ValueError(f'Unsupported query filter type: {type(query_filter)}')
def _convert_projection_to_mongo(self, projection: Mapping[str, Projection]) -> Dict[str, int]:
"""Convert our Projection to MongoDB projection format."""
mongo_projection = {}
for field, proj_type in projection.items():
mongo_field = '_id' if field == 'id' else field
mongo_projection[mongo_field] = 1 if proj_type == Projection.INCLUDE else 0
return mongo_projection
def _convert_sort_to_mongo(self, sort: Sequence[Tuple[str, SortingOrder]]) -> List[Tuple[str, int]]:
"""Convert our sort specification to MongoDB sort format."""
mongo_sort = []
for field, order in sort:
mongo_field = '_id' if field == 'id' else field
mongo_order = self._ASCENDING if order == SortingOrder.ASC else self._DESCENDING
mongo_sort.append((mongo_field, mongo_order))
return mongo_sort
def _convert_from_mongo_doc(self, mongo_doc: Dict[str, Any]) -> TDocument:
"""Convert MongoDB document to our document format."""
if mongo_doc is None:
return None
if '_id' in mongo_doc:
mongo_doc['id'] = DocumentID(str(mongo_doc['_id']))
del mongo_doc['_id']
return cast(TDocument, mongo_doc)
def _convert_to_mongo_doc(self, document: TDocument) -> Dict[str, Any]:
"""Convert our document format to MongoDB document."""
mongo_doc = dict(document)
if 'id' in mongo_doc:
mongo_doc['_id'] = mongo_doc['id']
del mongo_doc['id']
return mongo_doc
async def find(self, filters: Optional[QueryFilter], projection: Optional[Mapping[str, Projection]]=None, limit: Optional[int]=None, offset: Optional[int]=None, sort: Optional[Sequence[Tuple[str, SortingOrder]]]=None) -> Sequence[TDocument]:
"""Find all documents that match the optional filters."""
mongo_query = {}
if filters:
mongo_query = self._convert_query_filter_to_mongo(filters)
mongo_projection = None
if projection:
mongo_projection = self._convert_projection_to_mongo(projection)
cursor = self.collection.find(mongo_query, mongo_projection)
if sort:
mongo_sort = self._convert_sort_to_mongo(sort)
cursor = cursor.sort(mongo_sort)
if offset:
cursor = cursor.skip(offset)
if limit:
cursor = cursor.limit(limit)
results = await cursor.to_list(length=None)
documents = [self._convert_from_mongo_doc(doc) for doc in results]
if projection:
projected_docs = []
for doc in documents:
projected_doc = apply_projection(doc, projection)
projected_docs.append(cast(TDocument, projected_doc))
return projected_docs
return documents
async def insert_one(self, document: TDocument) -> InsertOneResult:
"""Insert a single document into the collection."""
mongo_doc = self._convert_to_mongo_doc(document)
if '_id' not in mongo_doc:
mongo_doc['_id'] = str(uuid.uuid4())
if 'version' not in mongo_doc:
mongo_doc['version'] = DocumentVersion(str(uuid.uuid4()))
try:
result = await self.collection.insert_one(mongo_doc)
return InsertOneResult(acknowledged=result.acknowledged, inserted_id=DocumentID(str(result.inserted_id)))
except self._DuplicateKeyError:
return InsertOneResult(acknowledged=False, inserted_id=DocumentID(str(mongo_doc['_id'])))
async def update_one(self, filters: QueryFilter, patch: List[JSONPatchOperation], upsert: bool=False) -> UpdateOneResult:
"""Apply a JSON Patch to a single document that matches the provided filters."""
mongo_query = self._convert_query_filter_to_mongo(filters)
existing_doc = await self.collection.find_one(mongo_query)
if existing_doc is None and (not upsert):
return UpdateOneResult(acknowledged=True, matched_count=0, modified_count=0, upserted_id=None)
if existing_doc is None and upsert:
new_doc: Dict[str, Any] = {}
new_doc['_id'] = str(uuid.uuid4())
new_doc['version'] = DocumentVersion(str(uuid.uuid4()))
else:
assert existing_doc is not None
new_doc = dict(existing_doc)
if '_id' in new_doc:
new_doc['id'] = str(new_doc['_id'])
del new_doc['_id']
patch_obj = jsonpatch.JsonPatch([dict(op) for op in patch])
try:
patched_doc = patch_obj.apply(new_doc)
except jsonpatch.JsonPatchException as e:
raise ValueError(f'Invalid JSON patch: {e}')
mongo_doc = dict(patched_doc)
if 'id' in mongo_doc:
mongo_doc['_id'] = mongo_doc['id']
del mongo_doc['id']
mongo_doc['version'] = DocumentVersion(str(uuid.uuid4()))
if existing_doc is None:
iresult = await self.collection.insert_one(mongo_doc)
return UpdateOneResult(acknowledged=iresult.acknowledged, matched_count=0, modified_count=0, upserted_id=DocumentID(str(iresult.inserted_id)))
else:
uresult = await self.collection.replace_one(mongo_query, mongo_doc)
return UpdateOneResult(acknowledged=uresult.acknowledged, matched_count=uresult.matched_count, modified_count=uresult.modified_count, upserted_id=None)
async def delete_one(self, filters: QueryFilter) -> DeleteResult[TDocument]:
"""Delete the first document that matches the provided filters."""
mongo_query = self._convert_query_filter_to_mongo(filters)
existing_doc = await self.collection.find_one(mongo_query)
deleted_document = None
if existing_doc:
deleted_document = self._convert_from_mongo_doc(existing_doc)
result = await self.collection.delete_one(mongo_query)
return DeleteResult(acknowledged=result.acknowledged, deleted_count=result.deleted_count, deleted_document=deleted_document)
|
class MongoDocumentCollection(DocumentCollection[TDocument]):
'''MongoDB implementation of DocumentCollection using PyMongo async API.'''
def __init__(self, collection: Collection[Any], schema: Type[TDocument], ascending_const: Any, descending_const: Any, duplicate_key_error: Any):
pass
def _convert_query_filter_to_mongo(self, query_filter: QueryFilter) -> Dict[str, Any]:
'''Convert our QueryFilter to MongoDB query format.'''
pass
def _convert_projection_to_mongo(self, projection: Mapping[str, Projection]) -> Dict[str, int]:
'''Convert our Projection to MongoDB projection format.'''
pass
def _convert_sort_to_mongo(self, sort: Sequence[Tuple[str, SortingOrder]]) -> List[Tuple[str, int]]:
'''Convert our sort specification to MongoDB sort format.'''
pass
def _convert_from_mongo_doc(self, mongo_doc: Dict[str, Any]) -> TDocument:
'''Convert MongoDB document to our document format.'''
pass
def _convert_to_mongo_doc(self, document: TDocument) -> Dict[str, Any]:
'''Convert our document format to MongoDB document.'''
pass
async def find(self, filters: Optional[QueryFilter], projection: Optional[Mapping[str, Projection]]=None, limit: Optional[int]=None, offset: Optional[int]=None, sort: Optional[Sequence[Tuple[str, SortingOrder]]]=None) -> Sequence[TDocument]:
'''Find all documents that match the optional filters.'''
pass
async def insert_one(self, document: TDocument) -> InsertOneResult:
'''Insert a single document into the collection.'''
pass
async def update_one(self, filters: QueryFilter, patch: List[JSONPatchOperation], upsert: bool=False) -> UpdateOneResult:
'''Apply a JSON Patch to a single document that matches the provided filters.'''
pass
async def delete_one(self, filters: QueryFilter) -> DeleteResult[TDocument]:
'''Delete the first document that matches the provided filters.'''
pass
| 11
| 10
| 24
| 3
| 18
| 4
| 5
| 0.2
| 1
| 14
| 8
| 0
| 10
| 5
| 10
| 36
| 252
| 36
| 180
| 66
| 151
| 36
| 122
| 47
| 111
| 12
| 5
| 2
| 47
|
327,841
|
flux0-ai/flux0
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/flux0-ai_flux0/packages/nanodb/src/flux0_nanodb/mongodb.py
|
flux0_nanodb.mongodb.MongoDocumentDatabase
|
from flux0_nanodb.types import DeleteResult, DocumentID, DocumentVersion, InsertOneResult, JSONPatchOperation, SortingOrder, TDocument, UpdateOneResult
from typing import TYPE_CHECKING, Any, Dict, List, Mapping, Optional, Sequence, Tuple, Type, cast
from flux0_nanodb.api import DocumentCollection, DocumentDatabase
class MongoDocumentDatabase(DocumentDatabase):
"""MongoDB implementation of DocumentDatabase using PyMongo async API."""
def __init__(self, client: AsyncMongoClient[Any], database_name: str):
self._ASCENDING, self._DESCENDING, _, self._DuplicateKeyError = _import_mongodb_dependencies()
self.client = client
self.database = client[database_name]
async def create_collection(self, name: str, schema: Type[TDocument]) -> DocumentCollection[TDocument]:
"""Create a new collection with the given name and document schema."""
collection = self.database[name]
return MongoDocumentCollection(collection, schema, self._ASCENDING, self._DESCENDING, self._DuplicateKeyError)
async def get_collection(self, name: str, schema: Type[TDocument]) -> DocumentCollection[TDocument]:
"""Retrieve an existing collection by its name and document schema."""
collection_names = await self.database.list_collection_names()
if name not in collection_names:
raise ValueError(f"Collection '{name}' does not exist")
collection = self.database[name]
return MongoDocumentCollection(collection, schema, self._ASCENDING, self._DESCENDING, self._DuplicateKeyError)
async def delete_collection(self, name: str) -> None:
"""Delete a collection by its name."""
await self.database.drop_collection(name)
|
class MongoDocumentDatabase(DocumentDatabase):
'''MongoDB implementation of DocumentDatabase using PyMongo async API.'''
def __init__(self, client: AsyncMongoClient[Any], database_name: str):
pass
async def create_collection(self, name: str, schema: Type[TDocument]) -> DocumentCollection[TDocument]:
'''Create a new collection with the given name and document schema.'''
pass
async def get_collection(self, name: str, schema: Type[TDocument]) -> DocumentCollection[TDocument]:
'''Retrieve an existing collection by its name and document schema.'''
pass
async def delete_collection(self, name: str) -> None:
'''Delete a collection by its name.'''
pass
| 5
| 4
| 9
| 1
| 6
| 2
| 1
| 0.31
| 1
| 5
| 2
| 0
| 4
| 5
| 4
| 27
| 40
| 6
| 26
| 15
| 17
| 8
| 16
| 11
| 11
| 2
| 5
| 1
| 5
|
327,842
|
flux0-ai/flux0
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/flux0-ai_flux0/packages/nanodb/src/flux0_nanodb/projection.py
|
flux0_nanodb.projection.Projection
|
from enum import Enum
class Projection(Enum):
INCLUDE = 1
EXCLUDE = 0
|
class Projection(Enum):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 49
| 3
| 0
| 3
| 3
| 2
| 0
| 3
| 3
| 2
| 0
| 4
| 0
| 0
|
327,843
|
flux0-ai/flux0
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/flux0-ai_flux0/packages/nanodb/src/flux0_nanodb/query.py
|
flux0_nanodb.query.And
|
from dataclasses import dataclass
from typing import Any, List, Literal, Mapping, Union
@dataclass(frozen=True)
class And:
"""
A logical 'AND' of a list of query expressions.
"""
expressions: List[QueryFilter]
|
@dataclass(frozen=True)
class And:
'''
A logical 'AND' of a list of query expressions.
'''
pass
| 2
| 1
| 0
| 0
| 0
| 0
| 0
| 1.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 6
| 1
| 2
| 1
| 1
| 3
| 2
| 1
| 1
| 0
| 0
| 0
| 0
|
327,844
|
flux0-ai/flux0
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/flux0-ai_flux0/packages/nanodb/src/flux0_nanodb/query.py
|
flux0_nanodb.query.Comparison
|
from typing import Any, List, Literal, Mapping, Union
from dataclasses import dataclass
@dataclass(frozen=True)
class Comparison:
"""
Represents a filter that compares a path to a literal value.
For the "$in" operator, `value` should be a list of literal values.
"""
path: str
op: Operator
value: Union[LiteralValue, List[LiteralValue]]
|
@dataclass(frozen=True)
class Comparison:
'''
Represents a filter that compares a path to a literal value.
For the "$in" operator, `value` should be a list of literal values.
'''
pass
| 2
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 9
| 1
| 4
| 1
| 3
| 4
| 4
| 1
| 3
| 0
| 0
| 0
| 0
|
327,845
|
flux0-ai/flux0
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/flux0-ai_flux0/packages/nanodb/src/flux0_nanodb/query.py
|
flux0_nanodb.query.Or
|
from dataclasses import dataclass
from typing import Any, List, Literal, Mapping, Union
@dataclass(frozen=True)
class Or:
"""
A logical 'OR' of a list of query expressions.
"""
expressions: List[QueryFilter]
|
@dataclass(frozen=True)
class Or:
'''
A logical 'OR' of a list of query expressions.
'''
pass
| 2
| 1
| 0
| 0
| 0
| 0
| 0
| 1.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 6
| 1
| 2
| 1
| 1
| 3
| 2
| 1
| 1
| 0
| 0
| 0
| 0
|
327,846
|
flux0-ai/flux0
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/flux0-ai_flux0/packages/nanodb/src/flux0_nanodb/types.py
|
flux0_nanodb.types.AddOp
|
from typing import Any, Generic, Literal, NewType, Optional, TypedDict, TypeVar, Union
class AddOp(TypedDict):
op: Literal['add']
path: str
value: Any
|
class AddOp(TypedDict):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4
| 0
| 4
| 1
| 3
| 0
| 4
| 1
| 3
| 0
| 1
| 0
| 0
|
327,847
|
flux0-ai/flux0
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/flux0-ai_flux0/packages/nanodb/src/flux0_nanodb/types.py
|
flux0_nanodb.types.BaseDocument
|
from typing import Any, Generic, Literal, NewType, Optional, TypedDict, TypeVar, Union
class BaseDocument(TypedDict, total=False):
id: DocumentID
version: DocumentVersion
|
class BaseDocument(TypedDict, total=False):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3
| 0
| 3
| 1
| 2
| 0
| 3
| 1
| 2
| 0
| 1
| 0
| 0
|
327,848
|
flux0-ai/flux0
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/flux0-ai_flux0/packages/nanodb/src/flux0_nanodb/types.py
|
flux0_nanodb.types.CopyOp
|
from typing import Any, Generic, Literal, NewType, Optional, TypedDict, TypeVar, Union
class CopyOp(TypedDict):
op: Literal['copy']
from_: str
path: str
|
class CopyOp(TypedDict):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4
| 0
| 4
| 1
| 3
| 0
| 4
| 1
| 3
| 0
| 1
| 0
| 0
|
327,849
|
flux0-ai/flux0
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/flux0-ai_flux0/packages/nanodb/src/flux0_nanodb/types.py
|
flux0_nanodb.types.DeleteResult
|
from dataclasses import dataclass
from typing import Any, Generic, Literal, NewType, Optional, TypedDict, TypeVar, Union
@dataclass(frozen=True)
class DeleteResult(Generic[TDocument]):
acknowledged: bool
deleted_count: int
deleted_document: Optional[TDocument]
|
@dataclass(frozen=True)
class DeleteResult(Generic[TDocument]):
pass
| 2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 2
| 4
| 0
| 4
| 1
| 3
| 0
| 4
| 1
| 3
| 0
| 1
| 0
| 0
|
327,850
|
flux0-ai/flux0
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/flux0-ai_flux0/packages/nanodb/src/flux0_nanodb/types.py
|
flux0_nanodb.types.InsertOneResult
|
from dataclasses import dataclass
@dataclass(frozen=True)
class InsertOneResult:
acknowledged: bool
inserted_id: DocumentID
|
@dataclass(frozen=True)
class InsertOneResult:
pass
| 2
| 0
| 0
| 0
| 0
| 0
| 0
| 0.33
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3
| 0
| 3
| 1
| 2
| 1
| 3
| 1
| 2
| 0
| 0
| 0
| 0
|
327,851
|
flux0-ai/flux0
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/flux0-ai_flux0/packages/nanodb/src/flux0_nanodb/types.py
|
flux0_nanodb.types.MoveOp
|
from typing import Any, Generic, Literal, NewType, Optional, TypedDict, TypeVar, Union
class MoveOp(TypedDict):
op: Literal['move']
from_: str
path: str
|
class MoveOp(TypedDict):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4
| 0
| 4
| 1
| 3
| 1
| 4
| 1
| 3
| 0
| 1
| 0
| 0
|
327,852
|
flux0-ai/flux0
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/flux0-ai_flux0/packages/nanodb/src/flux0_nanodb/types.py
|
flux0_nanodb.types.RemoveOp
|
from typing import Any, Generic, Literal, NewType, Optional, TypedDict, TypeVar, Union
class RemoveOp(TypedDict):
op: Literal['remove']
path: str
|
class RemoveOp(TypedDict):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3
| 0
| 3
| 1
| 2
| 0
| 3
| 1
| 2
| 0
| 1
| 0
| 0
|
327,853
|
flux0-ai/flux0
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/flux0-ai_flux0/packages/nanodb/src/flux0_nanodb/types.py
|
flux0_nanodb.types.ReplaceOp
|
from typing import Any, Generic, Literal, NewType, Optional, TypedDict, TypeVar, Union
class ReplaceOp(TypedDict):
op: Literal['replace']
path: str
value: Any
|
class ReplaceOp(TypedDict):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4
| 0
| 4
| 1
| 3
| 0
| 4
| 1
| 3
| 0
| 1
| 0
| 0
|
327,854
|
flux0-ai/flux0
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/flux0-ai_flux0/packages/nanodb/src/flux0_nanodb/types.py
|
flux0_nanodb.types.SortingOrder
|
from enum import Enum
class SortingOrder(Enum):
ASC = True
DESC = False
|
class SortingOrder(Enum):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 49
| 3
| 0
| 3
| 3
| 2
| 0
| 3
| 3
| 2
| 0
| 4
| 0
| 0
|
327,855
|
flux0-ai/flux0
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/flux0-ai_flux0/packages/nanodb/src/flux0_nanodb/types.py
|
flux0_nanodb.types.TestOp
|
from typing import Any, Generic, Literal, NewType, Optional, TypedDict, TypeVar, Union
class TestOp(TypedDict):
op: Literal['test']
path: str
value: Any
|
class TestOp(TypedDict):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4
| 0
| 4
| 1
| 3
| 0
| 4
| 1
| 3
| 0
| 1
| 0
| 0
|
327,856
|
flux0-ai/flux0
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/flux0-ai_flux0/packages/nanodb/src/flux0_nanodb/types.py
|
flux0_nanodb.types.UpdateOneResult
|
from typing import Any, Generic, Literal, NewType, Optional, TypedDict, TypeVar, Union
from dataclasses import dataclass
@dataclass(frozen=True)
class UpdateOneResult:
acknowledged: bool
matched_count: int
modified_count: int
upserted_id: Optional[DocumentID]
|
@dataclass(frozen=True)
class UpdateOneResult:
pass
| 2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 5
| 0
| 5
| 1
| 4
| 0
| 5
| 1
| 4
| 0
| 0
| 0
| 0
|
327,857
|
flux0-ai/flux0
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/flux0-ai_flux0/packages/server/src/flux0_server/app.py
|
flux0_server.app.SPAStaticFiles
|
import os
from fastapi import APIRouter, FastAPI, Request, Response, status
from typing import Any, Awaitable, Callable
from fastapi.staticfiles import StaticFiles
class SPAStaticFiles(StaticFiles):
async def get_response(self, path: str, scope: Any) -> Response:
assert isinstance(self.directory, str), 'Static directory must be a string'
full_path = os.path.join(self.directory, path)
if os.path.isfile(full_path):
return await super().get_response(path, scope)
return await super().get_response('index.html', scope)
|
class SPAStaticFiles(StaticFiles):
async def get_response(self, path: str, scope: Any) -> Response:
pass
| 2
| 0
| 11
| 3
| 6
| 2
| 2
| 0.29
| 1
| 3
| 0
| 0
| 1
| 0
| 1
| 1
| 12
| 3
| 7
| 3
| 5
| 2
| 7
| 3
| 5
| 2
| 1
| 1
| 2
|
327,858
|
flux0-ai/flux0
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/flux0-ai_flux0/packages/server/src/flux0_server/container_factory.py
|
flux0_server.container_factory.ContainerAgentRunnerFactory
|
from flux0_core.agent_runners.api import AgentRunner, AgentRunnerFactory
from typing import override
from lagom import Container
from flux0_core.agents import AgentType
class ContainerAgentRunnerFactory(AgentRunnerFactory):
def __init__(self, container: Container) -> None:
self.container = container
@override
def create_runner(self, agent_type: AgentType) -> AgentRunner:
for defi in self.container.defined_types:
if not isinstance(defi, type):
continue
if issubclass(defi, AgentRunner) and getattr(defi, 'agent_type') == agent_type:
return self.container[defi]
raise ValueError(f'No engine found for agent type {agent_type}')
@override
def runner_exists(self, agent_type: AgentType) -> bool:
for defi in self.container.defined_types:
if not isinstance(defi, type):
continue
if issubclass(defi, AgentRunner) and getattr(defi, 'agent_type') == agent_type:
return True
return False
|
class ContainerAgentRunnerFactory(AgentRunnerFactory):
def __init__(self, container: Container) -> None:
pass
@override
def create_runner(self, agent_type: AgentType) -> AgentRunner:
pass
@override
def runner_exists(self, agent_type: AgentType) -> bool:
pass
| 6
| 0
| 5
| 0
| 5
| 0
| 3
| 0
| 1
| 3
| 1
| 0
| 3
| 1
| 3
| 25
| 21
| 2
| 19
| 9
| 13
| 0
| 17
| 7
| 13
| 4
| 5
| 2
| 9
|
327,859
|
flux0-ai/flux0
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/flux0-ai_flux0/packages/server/src/flux0_server/main.py
|
flux0_server.main.StartupError
|
class StartupError(Exception):
def __init__(self, message: str) -> None:
super().__init__(message)
|
class StartupError(Exception):
def __init__(self, message: str) -> None:
pass
| 2
| 0
| 2
| 0
| 2
| 0
| 1
| 0
| 1
| 2
| 0
| 0
| 1
| 0
| 1
| 11
| 3
| 0
| 3
| 2
| 1
| 0
| 3
| 2
| 1
| 1
| 3
| 0
| 1
|
327,860
|
flux0-ai/flux0
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/flux0-ai_flux0/packages/server/src/flux0_server/replay_agent/replay_agent.py
|
flux0_server.replay_agent.replay_agent.ReplayAgentRunner
|
from flux0_core.agent_runners.context import Context
from flux0_core.recordings import RecordedChunkPayload, RecordedEmittedPayload, RecordedEvent, RecordingId
import uuid
from flux0_core.agent_runners.api import AgentRunner, Deps, agent_runner
from flux0_stream.types import ChunkEvent
from flux0_core.sessions import EventId, Session, SessionUpdateParams, StatusEventData
from typing import AsyncIterator, Optional, Required, Sequence, Tuple, TypedDict, cast
@agent_runner('replay')
class ReplayAgentRunner(AgentRunner):
"""
Replays recorded frames for a session turn:
• Finds the next user-anchored turn after session.metadata.replay.last_streamed_offset
• Streams [start, end) as live events via the EventEmitter
• Paces by recorded created_at deltas (with clamping, factor = pacing)
• Updates last_streamed_offset when done
"""
async def run(self, context: Context, deps: Deps) -> bool:
session = await deps.read_session(context.session_id)
if not session:
deps.logger.error('ReplayAgentRunner: session not found')
return False
rmeta = get_replay_meta(session)
if not rmeta:
deps.logger.error('ReplayAgentRunner: missing replay metadata')
return False
recording_id = rmeta['recording_id']
if not recording_id:
deps.logger.error('ReplayAgentRunner: missing recording_id in session.metadata.replay')
await deps.event_emitter.enqueue_status_event(correlation_id=deps.correlator.correlation_id, data=StatusEventData(type='status', status='completed'))
return False
pacing = rmeta['pacing']
after = rmeta['last_streamed_offset']
rng: Optional[Tuple[int, Optional[int]]] = await deps._recording_store.read_next_turn_range_after_offset(recording_id=recording_id, after_offset=after)
if not rng:
deps.logger.info('ReplayAgentRunner: no more turns to replay')
await deps.event_emitter.enqueue_status_event(correlation_id=deps.correlator.correlation_id, data=StatusEventData(type='status', status='completed'))
return True
start, end = rng
frames: Sequence[RecordedEvent] = await deps._recording_store.read_frames_range(recording_id=recording_id, start_offset_inclusive=start, end_offset_exclusive=end)
if not frames:
deps.logger.warning('ReplayAgentRunner: empty frame range for %s [%s,%s)', recording_id, start, end)
return True
current_correlation = deps.correlator.correlation_id
last_offset_streamed = after
async for f in _paced_iter(frames, pacing):
last_offset_streamed = f.offset
if f.kind == 'chunk':
p = cast(RecordedChunkPayload, f.payload)
meta = p.get('metadata') or {}
ce = ChunkEvent(correlation_id=current_correlation, event_id=p['event_id'], seq=p['seq'], patches=p['patches'], metadata=meta, timestamp=_utc_ts(f.created_at))
await deps.event_emitter.enqueue_event_chunk(ce)
elif f.kind == 'emitted':
ep = cast(RecordedEmittedPayload, f.payload)
etype = ep['type']
if etype == 'status':
data = cast(StatusEventData, ep['data'])
raw_id = cast(str, ep.get('id', ''))
eid = EventId(raw_id) if raw_id else EventId(uuid.uuid4().hex)
await deps.event_emitter.enqueue_status_event(correlation_id=current_correlation, data=data, event_id=eid)
else:
deps.logger.debug('ReplayAgentRunner: skipping emitted type=%s', etype)
else:
deps.logger.debug('ReplayAgentRunner: skipping kind=%s at offset=%s', f.kind, f.offset)
rmeta['last_streamed_offset'] = last_offset_streamed
md = getattr(session, 'metadata', {}) or {}
md['replay'] = rmeta
await deps._session_store.update_session(session.id, SessionUpdateParams(metadata=md))
return True
|
@agent_runner('replay')
class ReplayAgentRunner(AgentRunner):
'''
Replays recorded frames for a session turn:
• Finds the next user-anchored turn after session.metadata.replay.last_streamed_offset
• Streams [start, end) as live events via the EventEmitter
• Paces by recorded created_at deltas (with clamping, factor = pacing)
• Updates last_streamed_offset when done
'''
async def run(self, context: Context, deps: Deps) -> bool:
pass
| 3
| 1
| 116
| 15
| 87
| 16
| 11
| 0.26
| 1
| 11
| 8
| 0
| 1
| 0
| 1
| 22
| 125
| 16
| 88
| 22
| 86
| 23
| 50
| 22
| 48
| 11
| 5
| 3
| 11
|
327,861
|
flux0-ai/flux0
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/flux0-ai_flux0/packages/server/src/flux0_server/replay_agent/replay_agent.py
|
flux0_server.replay_agent.replay_agent.ReplayMeta
|
from typing import AsyncIterator, Optional, Required, Sequence, Tuple, TypedDict, cast
from flux0_core.recordings import RecordedChunkPayload, RecordedEmittedPayload, RecordedEvent, RecordingId
class ReplayMeta(TypedDict, total=False):
recording_id: Required[RecordingId]
pacing: Required[float]
last_streamed_offset: Required[int]
|
class ReplayMeta(TypedDict, total=False):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4
| 0
| 4
| 1
| 3
| 0
| 4
| 1
| 3
| 0
| 1
| 0
| 0
|
327,862
|
flux0-ai/flux0
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/flux0-ai_flux0/packages/server/src/flux0_server/settings.py
|
flux0_server.settings.EnvType
|
import enum
class EnvType(enum.Enum):
PRODUCTION = 'production'
DEVELOPMENT = 'development'
|
class EnvType(enum.Enum):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 49
| 3
| 0
| 3
| 3
| 2
| 0
| 3
| 3
| 2
| 0
| 4
| 0
| 0
|
327,863
|
flux0-ai/flux0
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/flux0-ai_flux0/packages/server/src/flux0_server/settings.py
|
flux0_server.settings.ParsedStoreConfig
|
from typing import List, Optional, Union
from pydantic import BaseModel, Field, field_validator, model_validator
from flux0_core.storage.types import NanoDBStorageType, StorageType
class ParsedStoreConfig(BaseModel):
type: StorageType
mode: Optional[NanoDBStorageType]
uri: Optional[str] = None
database: Optional[str] = None
dir: Optional[str] = None
|
class ParsedStoreConfig(BaseModel):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 82
| 6
| 0
| 6
| 3
| 5
| 3
| 6
| 3
| 5
| 0
| 5
| 0
| 0
|
327,864
|
flux0-ai/flux0
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/flux0-ai_flux0/packages/server/src/flux0_server/settings.py
|
flux0_server.settings.Settings
|
from flux0_api.auth import AuthType
from flux0_core.logging import LogLevel
from pydantic_settings import BaseSettings, SettingsConfigDict
from typing import List, Optional, Union
from pydantic import BaseModel, Field, field_validator, model_validator
class Settings(BaseSettings):
model_config = SettingsConfigDict(env_file='.env', env_prefix='FLUX0_', enable_decoding=False, extra='allow')
env: EnvType = Field(default=EnvType.PRODUCTION)
port: int = Field(default=8080)
auth_type: AuthType = Field(default_factory=lambda: AuthType.NOOP)
log_level: LogLevel = Field(default_factory=lambda: LogLevel.INFO)
db_uri: str = Field(default='nanodb://memory')
modules: List[str] = Field(default_factory=list)
@field_validator('modules', mode='before')
@classmethod
def decode_modules(cls, v: Union[str, List[str]]) -> List[str]:
if isinstance(v, str):
return [module.strip() for module in v.split(',') if module.strip()]
return v
@model_validator(mode='after')
def populate_db_config(self) -> 'Settings':
self.db = parse_store_uri(self.db_uri)
return self
|
class Settings(BaseSettings):
@field_validator('modules', mode='before')
@classmethod
def decode_modules(cls, v: Union[str, List[str]]) -> List[str]:
pass
@model_validator(mode='after')
def populate_db_config(self) -> 'Settings':
pass
| 6
| 0
| 4
| 0
| 4
| 0
| 2
| 0
| 1
| 3
| 2
| 0
| 1
| 1
| 2
| 2
| 22
| 2
| 20
| 13
| 14
| 0
| 15
| 11
| 12
| 2
| 1
| 1
| 3
|
327,865
|
flux0-ai/flux0
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/flux0-ai_flux0/packages/stream/src/flux0_stream/frameworks/langchain.py
|
flux0_stream.frameworks.langchain.RunContext
|
from typing import AsyncIterator, Dict, Optional, cast
class RunContext:
def __init__(self, last_known_event_offset: int) -> None:
self.data: Dict[str, Dict[str, bool]] = {}
self.last_known_event_offset = last_known_event_offset
def set_typing_emitted(self, event_id: str, emitted: bool) -> None:
"""Set the typing_emitted status for a given event_id."""
if event_id not in self.data:
self.data[event_id] = {}
self.data[event_id]['typing_emitted'] = emitted
def get_typing_emitted(self, event_id: str) -> bool:
"""Retrieve the typing_emitted status for a given event_id."""
return self.data.get(event_id, {}).get('typing_emitted', False)
def get_last_known_event_offset(self) -> int:
"""Get the last known event offset."""
return self.last_known_event_offset
def clear(self) -> None:
"""Clear the entire run context."""
self.data.clear()
self.last_known_event_offset = 0
|
class RunContext:
def __init__(self, last_known_event_offset: int) -> None:
pass
def set_typing_emitted(self, event_id: str, emitted: bool) -> None:
'''Set the typing_emitted status for a given event_id.'''
pass
def get_typing_emitted(self, event_id: str) -> bool:
'''Retrieve the typing_emitted status for a given event_id.'''
pass
def get_last_known_event_offset(self) -> int:
'''Get the last known event offset.'''
pass
def clear(self) -> None:
'''Clear the entire run context.'''
pass
| 6
| 4
| 4
| 0
| 3
| 1
| 1
| 0.33
| 0
| 3
| 0
| 0
| 5
| 2
| 5
| 5
| 23
| 4
| 15
| 8
| 9
| 5
| 15
| 8
| 9
| 2
| 0
| 1
| 6
|
327,866
|
flux0-ai/flux0
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/flux0-ai_flux0/packages/stream/src/flux0_stream/types.py
|
flux0_stream.types.AddOperation
|
from flux0_core.types import JSONSerializable
from typing import Literal, Mapping, Optional, TypedDict, Union
class AddOperation(TypedDict):
op: Literal['add']
path: str
value: JSONSerializable
|
class AddOperation(TypedDict):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4
| 0
| 4
| 1
| 3
| 0
| 4
| 1
| 3
| 0
| 1
| 0
| 0
|
327,867
|
flux0-ai/flux0
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/flux0-ai_flux0/packages/stream/src/flux0_stream/types.py
|
flux0_stream.types.ChunkEvent
|
from abc import ABC
from dataclasses import dataclass, field
from typing import Literal, Mapping, Optional, TypedDict, Union
import time
from flux0_core.types import JSONSerializable
from flux0_core.sessions import EventId, EventSource, EventType, MessageEventData, StatusEventData, ToolEventData
@dataclass(frozen=True)
class ChunkEvent(ABC):
"""Represents an incremental update using JSON Patch operations.
Attributes:
correlation_id (str): Unique identifier for the event stream.
event_id (str): Unique identifier for the event.
chunk_id (int): The sequence number of the chunk.
content (str): The actual content of this chunk.
timestamp (float): Time at which the chunk was received.
"""
correlation_id: str
event_id: EventId
seq: int
patches: list[JsonPatchOperation]
metadata: Mapping[str, JSONSerializable] = field(default_factory=dict)
timestamp: float = field(default_factory=time.time)
|
@dataclass(frozen=True)
class ChunkEvent(ABC):
'''Represents an incremental update using JSON Patch operations.
Attributes:
correlation_id (str): Unique identifier for the event stream.
event_id (str): Unique identifier for the event.
chunk_id (int): The sequence number of the chunk.
content (str): The actual content of this chunk.
timestamp (float): Time at which the chunk was received.
'''
pass
| 2
| 1
| 0
| 0
| 0
| 0
| 0
| 1.14
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 20
| 17
| 2
| 7
| 3
| 6
| 8
| 7
| 3
| 6
| 0
| 4
| 0
| 0
|
327,868
|
flux0-ai/flux0
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/flux0-ai_flux0/packages/stream/src/flux0_stream/types.py
|
flux0_stream.types.EmittedEvent
|
from dataclasses import dataclass, field
from typing import Literal, Mapping, Optional, TypedDict, Union
from flux0_core.sessions import EventId, EventSource, EventType, MessageEventData, StatusEventData, ToolEventData
from flux0_core.types import JSONSerializable
import time
@dataclass(frozen=True)
class EmittedEvent:
"""Represents an event emitted by a source.
This is the final form of an event after all chunks have been processed and
closely related to the `Event` class in core.
Attributes:
source (EventSource): The source of the event.
type (EventType): The type of the event.
correlation_id (str): Unique identifier for the event stream.
data (Union[MessageEventData, StatusEventData, ToolEventData]): The event data.
metadata (Optional[Mapping[str, JSONSerializable]]): Additional metadata.
"""
id: EventId
source: EventSource
type: EventType
correlation_id: str
data: Union[MessageEventData, StatusEventData, ToolEventData]
metadata: Optional[Mapping[str, JSONSerializable]] = None
timestamp: float = field(default_factory=time.time)
| null | 2
| 1
| 0
| 0
| 0
| 0
| 0
| 1.25
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 20
| 2
| 8
| 3
| 7
| 10
| 8
| 3
| 7
| 0
| 0
| 0
| 0
|
327,869
|
flux0-ai/flux0
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/flux0-ai_flux0/packages/stream/src/flux0_stream/types.py
|
flux0_stream.types.ReplaceOperation
|
from flux0_core.types import JSONSerializable
from typing import Literal, Mapping, Optional, TypedDict, Union
class ReplaceOperation(TypedDict):
op: Literal['replace']
path: str
value: JSONSerializable
|
class ReplaceOperation(TypedDict):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4
| 0
| 4
| 1
| 3
| 0
| 4
| 1
| 3
| 0
| 1
| 0
| 0
|
327,870
|
flux0-ai/flux0
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/flux0-ai_flux0/examples/langchain_simple/agent.py
|
langchain_simple.agent.LangChainAgentRunner
|
from flux0_stream.frameworks.langchain import RunContext, filter_and_map_events, handle_event
from flux0_core.agent_runners.context import Context
from flux0_core.sessions import StatusEventData
from langchain_core.messages import HumanMessage, SystemMessage
from flux0_core.agent_runners.api import AgentRunner, Deps, agent_runner
from examples.utils.utils import read_user_input
from langchain.chat_models import init_chat_model
@agent_runner('langchain_simple')
class LangChainAgentRunner(AgentRunner):
async def run(self, context: Context, deps: Deps) -> bool:
agent = await deps.read_agent(context.agent_id)
if not agent:
deps.logger.error(f'Agent with ID {context.agent_id} not found')
return False
user_input = await read_user_input(deps, context)
model = init_chat_model('gpt-4.1-nano', model_provider='openai')
messages = [SystemMessage('Translate the following from English into Italian'), HumanMessage(user_input)]
try:
model_events = model.astream_events(messages, stream=True, version='v2')
run_ctx: RunContext = RunContext(last_known_event_offset=0)
async for e in filter_and_map_events(model_events, deps.logger):
await handle_event(agent, deps.correlator.correlation_id, e, deps.event_emitter, deps.logger, run_ctx)
except Exception as e:
await deps.event_emitter.enqueue_status_event(correlation_id=deps.correlator.correlation_id, data=StatusEventData(type='status', status='error', data=str(e)))
return False
finally:
await deps.event_emitter.enqueue_status_event(correlation_id=deps.correlator.correlation_id, data=StatusEventData(type='status', status='completed', acknowledged_offset=0))
return True
|
@agent_runner('langchain_simple')
class LangChainAgentRunner(AgentRunner):
async def run(self, context: Context, deps: Deps) -> bool:
pass
| 3
| 0
| 47
| 4
| 39
| 4
| 4
| 0.1
| 1
| 7
| 4
| 0
| 1
| 0
| 1
| 22
| 48
| 4
| 40
| 9
| 38
| 4
| 19
| 9
| 17
| 4
| 5
| 2
| 4
|
327,871
|
flux0-ai/flux0
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/flux0-ai_flux0/examples/langgraph_weather/weather_agent.py
|
langgraph_weather.weather_agent.WeatherAgentRunner
|
from flux0_core.sessions import StatusEventData
from flux0_core.agent_runners.api import AgentRunner, Deps, agent_runner
from examples.utils.utils import read_user_input
from flux0_core.agent_runners.context import Context
from flux0_stream.frameworks.langchain import RunContext, filter_and_map_events, handle_event
from langchain_core.runnables.config import RunnableConfig
@agent_runner('langgraph_weather')
class WeatherAgentRunner(AgentRunner):
async def run(self, context: Context, deps: Deps) -> bool:
agent = await deps.read_agent(context.agent_id)
if not agent:
raise ValueError(f'Agent with id {context.agent_id} not found')
user_input = await read_user_input(deps, context)
if not user_input:
raise ValueError('No user input found in session events')
deps.logger.info(f'User Input: {user_input} for session {context.session_id} and agent {agent.id}')
input = {'messages': [('user', user_input)]}
config = RunnableConfig(configurable={'thread_id': context.session_id, 'agent_id': agent.id})
try:
model_events = weather_agent.astream_events(input=input, config=config, version='v2')
run_ctx: RunContext = RunContext(last_known_event_offset=0)
async for e in filter_and_map_events(model_events, deps.logger):
await handle_event(agent, deps.correlator.correlation_id, e, deps.event_emitter, deps.logger, run_ctx)
return True
except Exception as e:
await deps.event_emitter.enqueue_status_event(correlation_id=deps.correlator.correlation_id, data=StatusEventData(type='status', status='error', data=str(e)))
return False
finally:
await deps.event_emitter.enqueue_status_event(correlation_id=deps.correlator.correlation_id, data=StatusEventData(type='status', status='completed'))
|
@agent_runner('langgraph_weather')
class WeatherAgentRunner(AgentRunner):
async def run(self, context: Context, deps: Deps) -> bool:
pass
| 3
| 0
| 52
| 4
| 45
| 3
| 5
| 0.07
| 1
| 8
| 4
| 0
| 1
| 0
| 1
| 22
| 53
| 4
| 46
| 8
| 44
| 3
| 21
| 8
| 19
| 5
| 5
| 2
| 5
|
327,872
|
flux0-ai/flux0
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/flux0-ai_flux0/packages/stream/src/flux0_stream/store/memory.py
|
memory.DocInProgress
|
from typing import Dict, List, MutableMapping, Optional, Self, TypedDict, Union, cast
from flux0_core.types import JSONSerializable
class DocInProgress(TypedDict, total=False):
metadata: MutableMapping[str, JSONSerializable]
content: JSONSerializable
|
class DocInProgress(TypedDict, total=False):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3
| 0
| 3
| 1
| 2
| 0
| 3
| 1
| 2
| 0
| 1
| 0
| 0
|
327,873
|
flux0-ai/flux0
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/flux0-ai_flux0/packages/stream/src/flux0_stream/emitter/memory.py
|
memory.InMemoryEventEmitterFactory
|
from flux0_core.sessions import EventId, SessionId, StatusEventData
from flux0_stream.emitter.api import EventEmitter, FinalSubscriber, ProcessedSubscriber
from flux0_core.agents import AgentId
class InMemoryEventEmitterFactory:
def __init__(self, event_emitter: EventEmitter) -> None:
self._event_emitter = event_emitter
async def create_event_emitter(self, emitting_agent_id: AgentId, session_id: SessionId) -> EventEmitter:
return self._event_emitter
|
class InMemoryEventEmitterFactory:
def __init__(self, event_emitter: EventEmitter) -> None:
pass
async def create_event_emitter(self, emitting_agent_id: AgentId, session_id: SessionId) -> EventEmitter:
pass
| 3
| 0
| 4
| 0
| 4
| 0
| 1
| 0.11
| 0
| 1
| 1
| 0
| 2
| 1
| 2
| 2
| 11
| 1
| 9
| 8
| 2
| 1
| 5
| 4
| 2
| 1
| 0
| 0
| 2
|
327,874
|
flux0-ai/flux0
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/flux0-ai_flux0/packages/stream/src/flux0_stream/emitter/memory.py
|
memory.MemoryEventEmitter
|
from flux0_stream.emitter.api import EventEmitter, FinalSubscriber, ProcessedSubscriber
from flux0_stream.types import ChunkEvent, EmittedEvent
from flux0_stream.store.api import EventStore
import asyncio
from typing import Dict, List, Optional, Self, Union
from flux0_core.sessions import EventId, SessionId, StatusEventData
from flux0_core.logging import Logger
class MemoryEventEmitter(EventEmitter):
"""
In-memory implementation of EventEmitter using asyncio.Queue.
- Uses a **single** `asyncio.Queue[QueueMessage]` for processing both event chunks and status updates.
- Handles **event finalization** when receiving a status event with `"ready"` or `"completed"`.
- Supports **subscriber notifications** for both **processed chunks** and **finalized events**.
- Implements a **worker loop** that continuously processes queued messages.
- Ensures **clean shutdown**, processing remaining messages before stopping.
"""
def __init__(self, event_store: EventStore, logger: Logger) -> None:
"""Initializes the event emitter with an event queue and subscriber management."""
self.event_store: EventStore = event_store
self.logger: Logger = logger
self.queue: asyncio.Queue[QueueMessage] = asyncio.Queue()
self.processed_subscribers: Dict[str, List[ProcessedSubscriber]] = {}
self.final_subscribers: Dict[str, List[FinalSubscriber]] = {}
self._worker_task: asyncio.Task[None] = asyncio.create_task(self._worker_loop())
async def enqueue_status_event(self, correlation_id: str, data: StatusEventData, event_id: Optional[EventId]=None) -> None:
"""Enqueues a status event for a specific execution (correlation_id)."""
await self.queue.put(QueueMessage(correlation_id, event_id, data))
async def enqueue_event_chunk(self, chunk: ChunkEvent) -> None:
"""Enqueues an event chunk for processing."""
await self.queue.put(QueueMessage(chunk.correlation_id, chunk.event_id, chunk))
def subscribe_processed(self, correlation_id: str, subscriber: ProcessedSubscriber) -> None:
"""Registers a subscriber to receive event chunks for a specific execution (correlation_id)."""
self.processed_subscribers.setdefault(correlation_id, []).append(subscriber)
def subscribe_final(self, correlation_id: str, subscriber: FinalSubscriber) -> None:
"""Registers a subscriber to receive finalized events for a specific execution (correlation_id)."""
self.final_subscribers.setdefault(correlation_id, []).append(subscriber)
def unsubscribe_processed(self, correlation_id: str, subscriber: ProcessedSubscriber) -> None:
"""Removes a processed chunk subscriber for a specific execution (correlation_id)."""
if correlation_id in self.processed_subscribers:
self.processed_subscribers[correlation_id].remove(subscriber)
def unsubscribe_final(self, correlation_id: str, subscriber: FinalSubscriber) -> None:
"""Removes a finalized event subscriber for a specific execution (correlation_id)."""
if correlation_id in self.final_subscribers:
self.final_subscribers[correlation_id].remove(subscriber)
async def _worker_loop(self) -> None:
"""Background task that processes messages from the queue."""
while True:
message: Optional[QueueMessage] = None
try:
message = await self.queue.get()
if isinstance(message.data, ChunkEvent):
await self._process_event_chunk(message.correlation_id, message.data)
elif isinstance(message.data, dict) and 'status' in message.data:
await self._process_status_event(message.correlation_id, message.event_id, message.data)
except asyncio.CancelledError:
break
except Exception as e:
self.logger.error(f'Error processing message: {e}', exc_info=True)
finally:
if message is not None:
self.queue.task_done()
async def _process_event_chunk(self, correlation_id: str, chunk: ChunkEvent) -> None:
"""Processes an event chunk and notifies processed subscribers."""
await self.event_store.add_chunk(chunk)
if correlation_id in self.processed_subscribers:
for subscriber in self.processed_subscribers[correlation_id]:
await subscriber(chunk)
async def _process_status_event(self, correlation_id: str, event_id: Optional[EventId], data: StatusEventData) -> None:
"""Processes a status event, finalizing events if required."""
is_final: bool = data['status'] in {'ready'}
if is_final:
if event_id is None:
raise ValueError('Event ID is required for finalization')
finalized_event = await self.event_store.finalize_event(correlation_id, event_id)
if finalized_event is None:
raise ValueError(f'Failed to finalize event for event_id: {event_id}')
if correlation_id in self.final_subscribers:
for subscriber in self.final_subscribers[correlation_id]:
await subscriber(finalized_event)
if correlation_id in self.final_subscribers:
for subscriber in self.final_subscribers[correlation_id]:
await subscriber(EmittedEvent(id=event_id if event_id is not None else EventId(''), correlation_id=correlation_id, source='ai_agent', type='status', data=data))
async def shutdown(self) -> None:
"""Shuts down the event emitter, ensuring all queued events are processed."""
self.logger.debug('Shutting down EventEmitter, processing remaining messages...')
self._worker_task.cancel()
try:
await self._worker_task
except asyncio.CancelledError:
pass
try:
while not self.queue.empty():
self.logger.debug('Processing remaining messages...')
message: QueueMessage = self.queue.get_nowait()
if isinstance(message.data, ChunkEvent):
await self._process_event_chunk(message.correlation_id, message.data)
elif isinstance(message.data, dict) and 'status' in message.data:
await self._process_status_event(message.correlation_id, message.event_id, message.data)
self.queue.task_done()
except asyncio.QueueEmpty:
pass
except Exception as e:
self.logger.error(f'Error processing remaining messages: {e}', exc_info=True)
self.logger.debug('EventEmitter shutdown complete.')
async def __aenter__(self) -> Self:
"""Allows the event emitter to be used with an async context manager."""
return self
async def __aexit__(self, exc_type: Optional[type[BaseException]], exc_value: Optional[BaseException], traceback: Optional[object]) -> None:
"""Ensures the event emitter shuts down properly when used in an async context manager."""
await self.shutdown()
|
class MemoryEventEmitter(EventEmitter):
'''
In-memory implementation of EventEmitter using asyncio.Queue.
- Uses a **single** `asyncio.Queue[QueueMessage]` for processing both event chunks and status updates.
- Handles **event finalization** when receiving a status event with `"ready"` or `"completed"`.
- Supports **subscriber notifications** for both **processed chunks** and **finalized events**.
- Implements a **worker loop** that continuously processes queued messages.
- Ensures **clean shutdown**, processing remaining messages before stopping.
'''
def __init__(self, event_store: EventStore, logger: Logger) -> None:
'''Initializes the event emitter with an event queue and subscriber management.'''
pass
async def enqueue_status_event(self, correlation_id: str, data: StatusEventData, event_id: Optional[EventId]=None) -> None:
'''Enqueues a status event for a specific execution (correlation_id).'''
pass
async def enqueue_event_chunk(self, chunk: ChunkEvent) -> None:
'''Enqueues an event chunk for processing.'''
pass
def subscribe_processed(self, correlation_id: str, subscriber: ProcessedSubscriber) -> None:
'''Registers a subscriber to receive event chunks for a specific execution (correlation_id).'''
pass
def subscribe_final(self, correlation_id: str, subscriber: FinalSubscriber) -> None:
'''Registers a subscriber to receive finalized events for a specific execution (correlation_id).'''
pass
def unsubscribe_processed(self, correlation_id: str, subscriber: ProcessedSubscriber) -> None:
'''Removes a processed chunk subscriber for a specific execution (correlation_id).'''
pass
def unsubscribe_final(self, correlation_id: str, subscriber: FinalSubscriber) -> None:
'''Removes a finalized event subscriber for a specific execution (correlation_id).'''
pass
async def _worker_loop(self) -> None:
'''Background task that processes messages from the queue.'''
pass
async def _process_event_chunk(self, correlation_id: str, chunk: ChunkEvent) -> None:
'''Processes an event chunk and notifies processed subscribers.'''
pass
async def _process_status_event(self, correlation_id: str, event_id: Optional[EventId], data: StatusEventData) -> None:
'''Processes a status event, finalizing events if required.'''
pass
async def shutdown(self) -> None:
'''Shuts down the event emitter, ensuring all queued events are processed.'''
pass
async def __aenter__(self) -> Self:
'''Allows the event emitter to be used with an async context manager.'''
pass
async def __aexit__(self, exc_type: Optional[type[BaseException]], exc_value: Optional[BaseException], traceback: Optional[object]) -> None:
'''Ensures the event emitter shuts down properly when used in an async context manager.'''
pass
| 14
| 14
| 11
| 1
| 8
| 2
| 3
| 0.36
| 1
| 12
| 6
| 0
| 13
| 6
| 13
| 39
| 167
| 28
| 105
| 37
| 82
| 38
| 79
| 26
| 65
| 9
| 5
| 3
| 37
|
327,875
|
flux0-ai/flux0
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/flux0-ai_flux0/packages/stream/src/flux0_stream/store/memory.py
|
memory.MemoryEventStore
|
import copy
from flux0_stream.store.api import EventStore
from flux0_stream.patches import ensure_structure_for_patch
from jsonpatch import JsonPatch, apply_patch
from typing import Dict, List, MutableMapping, Optional, Self, TypedDict, Union, cast
import json
from flux0_core.agents import AgentId
from flux0_core.types import JSONSerializable
from flux0_stream.types import ChunkEvent, EmittedEvent
from flux0_core.sessions import ContentPart, EventId, MessageEventData, Participant, ReasoningPart, ToolCall, ToolCallPart, ToolCallPartType, ToolEventData, ToolResult
class MemoryEventStore(EventStore):
"""
In-memory implementation of EventStore.
- Applies JSON patches incrementally.
- Handles out-of-order chunk arrivals using sequence numbers.
- Finalization is instant, as the document is always up-to-date.
"""
def __init__(self) -> None:
self.in_progress_docs: Dict[EventId, DocInProgress] = {}
self.chunk_buffer: Dict[EventId, Dict[int, List[JsonPatch]]] = {}
self.chunk_index_tracker: Dict[EventId, int] = {}
self.finalized_events: Dict[EventId, EmittedEvent] = {}
async def __aenter__(self) -> Self:
"""Allows the event store to be used with an async context manager."""
return self
async def __aexit__(self, exc_type: Optional[type[BaseException]], exc_value: Optional[BaseException], traceback: Optional[object]) -> None:
self.in_progress_docs.clear()
self.chunk_buffer.clear()
self.chunk_index_tracker.clear()
self.finalized_events.clear
async def add_chunk(self, chunk: ChunkEvent) -> None:
"""
Receives a patch chunk and applies it immediately if it's in order.
If out of order, stores it in a buffer for later application.
"""
event_id = chunk.event_id
if event_id not in self.in_progress_docs:
self.in_progress_docs[event_id] = DocInProgress(metadata={**chunk.metadata})
sequence_number = chunk.seq
expected_index = self.chunk_index_tracker.get(event_id, -1) + 1
jpatch_copy = copy.deepcopy(chunk.patches)
jpatch = JsonPatch(jpatch_copy)
doc_in_progress = self.in_progress_docs[event_id]
contains_append = any((op['path'].endswith('/-') for op in jpatch.patch))
if contains_append or sequence_number == expected_index:
doc_in_progress = self.in_progress_docs[event_id]
for patch in jpatch:
content = ensure_structure_for_patch(doc_in_progress.get('content'), patch)
content = apply_patch(content, [patch])
if content is None:
raise ValueError('Document content is missing')
doc_in_progress['content'] = content
self.chunk_index_tracker[event_id] = sequence_number
self.in_progress_docs[event_id] = doc_in_progress
self._apply_buffered_patches(event_id)
else:
if sequence_number not in self.chunk_buffer[event_id]:
self.chunk_buffer[event_id][sequence_number] = []
self.chunk_buffer[event_id][sequence_number].append(jpatch)
def _apply_buffered_patches(self, event_id: EventId) -> None:
"""Applies buffered patches when their missing previous chunks arrive."""
if event_id not in self.chunk_buffer:
return
while self.chunk_index_tracker[event_id] + 1 in self.chunk_buffer.get(event_id, {}):
next_index = self.chunk_index_tracker[event_id] + 1
patches_to_apply = self.chunk_buffer[event_id].pop(next_index)
for patch in patches_to_apply:
self.in_progress_docs[event_id] = apply_patch(self.in_progress_docs[event_id], patch)
self.chunk_index_tracker[event_id] = next_index
if not self.chunk_buffer[event_id]:
del self.chunk_buffer[event_id]
async def finalize_event(self, correlation_id: str, event_id: EventId) -> Optional[EmittedEvent]:
"""
Finalizes an event by returning the fully built document.
Removes it from the in-progress store and marks it as finalized.
"""
if event_id not in self.in_progress_docs:
return None
final_data = self.in_progress_docs.pop(event_id)
self.chunk_buffer.pop(event_id, None)
self.chunk_index_tracker.pop(event_id, None)
meta = final_data.get('metadata', {})
content = final_data.get('content')
agent_id = AgentId(str(meta.pop('agent_id', '')))
agent_name = str(meta.pop('agent_name'))
kind = meta.pop('kind') if 'kind' in meta else None
if not content:
return None
if isinstance(content, str):
str_parts: List[Union[ContentPart, ReasoningPart, ToolCallPart]] = []
if kind and kind == 'reasoning':
str_parts.append(ReasoningPart(type='reasoning', reasoning=content))
else:
str_parts.append(ContentPart(type='content', content=content))
finalized_event = EmittedEvent(correlation_id=correlation_id, id=event_id, source='ai_agent', type='message', data=MessageEventData(type='message', parts=str_parts, participant=Participant(id=agent_id, name=agent_name)), metadata=meta)
elif isinstance(content, list):
if all((isinstance(item, str) for item in content)):
content = ''.join(map(str, content))
list_parts: List[Union[ContentPart, ReasoningPart, ToolCallPart]] = []
if kind and kind == 'reasoning':
list_parts.append(ReasoningPart(type='reasoning', reasoning=content))
else:
list_parts.append(ContentPart(type='content', content=content))
finalized_event = EmittedEvent(correlation_id=correlation_id, id=event_id, source='ai_agent', type='message', data=MessageEventData(type='message', parts=list_parts, participant=Participant(id=agent_id, name=agent_name)), metadata=meta)
elif isinstance(content, dict):
if 'tool_calls' in content:
tool_call_parts: list[dict[str, JSONSerializable]] = cast(list[dict[str, JSONSerializable]], content['tool_calls'])
tcpl: List[Union[ContentPart, ReasoningPart, ToolCallPart]] = []
for tool_call in tool_call_parts:
args = tool_call['args']
if isinstance(args, list):
args_as_list: list[str] = cast(list[str], args)
json_string = ''.join(args_as_list)
final_args = json.loads(json_string)
elif isinstance(args, dict):
final_args = args
else:
raise ValueError('args is not a list or dict')
tool_part = ToolCallPart(type=cast(ToolCallPartType, tool_call['type']), tool_call_id=cast(str, tool_call['tool_call_id']), tool_name=cast(str, tool_call['tool_name']), args=final_args)
tcpl.append(tool_part)
finalized_event = EmittedEvent(correlation_id=correlation_id, id=event_id, source='ai_agent', type='message', data=MessageEventData(type='message', parts=tcpl, participant=Participant(id=agent_id, name=agent_name)))
elif 'tool_call_results' in content:
tool_calls: list[dict[str, JSONSerializable]] = cast(list[dict[str, JSONSerializable]], content['tool_call_results'])
tcl: list[ToolCall] = []
for tool_call in tool_calls:
tc = ToolCall(tool_call_id=cast(str, tool_call['tool_call_id']), tool_name=cast(str, tool_call['tool_name']), args=cast(dict[str, JSONSerializable], tool_call['args']), result=ToolResult(data=tool_call['data']['result'] if isinstance(tool_call['data'], dict) else None, metadata={}, control={'mode': 'auto'}))
tcl.append(tc)
finalized_event = EmittedEvent(correlation_id=correlation_id, id=event_id, source='ai_agent', type='tool', data=ToolEventData(type='tool_call_result', tool_calls=tcl))
else:
parts: List[Union[ContentPart, ReasoningPart, ToolCallPart]] = []
if kind and kind == 'reasoning':
parts.append(ReasoningPart(type='reasoning', reasoning=content))
else:
parts.append(ContentPart(type='content', content=content))
finalized_event = EmittedEvent(correlation_id=correlation_id, id=event_id, source='ai_agent', type='message', data=MessageEventData(type='message', parts=parts, participant=Participant(id=agent_id, name=agent_name)), metadata=meta)
else:
raise ValueError('Finalized event is unrecognized')
self.finalized_events[event_id] = finalized_event
return finalized_event
|
class MemoryEventStore(EventStore):
'''
In-memory implementation of EventStore.
- Applies JSON patches incrementally.
- Handles out-of-order chunk arrivals using sequence numbers.
- Finalization is instant, as the document is always up-to-date.
'''
def __init__(self) -> None:
pass
async def __aenter__(self) -> Self:
'''Allows the event store to be used with an async context manager.'''
pass
async def __aexit__(self, exc_type: Optional[type[BaseException]], exc_value: Optional[BaseException], traceback: Optional[object]) -> None:
pass
async def add_chunk(self, chunk: ChunkEvent) -> None:
'''
Receives a patch chunk and applies it immediately if it's in order.
If out of order, stores it in a buffer for later application.
'''
pass
def _apply_buffered_patches(self, event_id: EventId) -> None:
'''Applies buffered patches when their missing previous chunks arrive.'''
pass
async def finalize_event(self, correlation_id: str, event_id: EventId) -> Optional[EmittedEvent]:
'''
Finalizes an event by returning the fully built document.
Removes it from the in-progress store and marks it as finalized.
'''
pass
| 7
| 5
| 40
| 2
| 32
| 7
| 5
| 0.23
| 1
| 18
| 11
| 0
| 6
| 4
| 6
| 28
| 254
| 20
| 195
| 51
| 181
| 45
| 102
| 44
| 95
| 18
| 5
| 4
| 32
|
327,876
|
flux0-ai/flux0
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/flux0-ai_flux0/packages/stream/src/flux0_stream/emitter/memory.py
|
memory.QueueMessage
|
from flux0_core.sessions import EventId, SessionId, StatusEventData
from flux0_stream.types import ChunkEvent, EmittedEvent
from typing import Dict, List, Optional, Self, Union
from dataclasses import dataclass
@dataclass
class QueueMessage:
"""Represents a message in the event queue, distinguishing between event chunks and status events."""
correlation_id: str
event_id: Optional[EventId]
data: Union[ChunkEvent, StatusEventData]
|
@dataclass
class QueueMessage:
'''Represents a message in the event queue, distinguishing between event chunks and status events.'''
pass
| 2
| 1
| 0
| 0
| 0
| 0
| 0
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 6
| 1
| 4
| 1
| 3
| 1
| 4
| 1
| 3
| 0
| 0
| 0
| 0
|
327,877
|
flux0-ai/flux0
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/flux0-ai_flux0/scripts/psr/custom_parser/monorepo_parser.py
|
monorepo_parser.ConventionalCommitMonorepoParser
|
from semantic_release.commit_parser._base import CommitParser, ParserOptions
from semantic_release.commit_parser.angular import LONG_TYPE_NAMES
from fnmatch import fnmatch
from semantic_release.errors import InvalidParserOptions
import os
from semantic_release.commit_parser.token import ParsedCommit, ParsedMessageResult, ParseError, ParseResult
from pathlib import Path
from semantic_release.commit_parser.util import breaking_re, parse_paragraphs, sort_numerically
from semantic_release.enums import LevelBump
from re import compile as regexp
from functools import reduce
import re
class ConventionalCommitMonorepoParser(CommitParser[ParseResult, ConventionalMonorepoParserOptions]):
""""""
parser_options = ConventionalMonorepoParserOptions
def __init__(self, options: ConventionalMonorepoParserOptions | None=None) -> None:
super().__init__(options)
self.file_selection_filters = []
self.file_ignore_filters = []
for str_path in map(str, self.options.path_filters):
str_filter = str_path[1:] if str_path.startswith('!') else str_path
filter_list = self.file_ignore_filters if str_path.startswith('!') else self.file_selection_filters
filter_list.extend(filter(None, [None if str_path.endswith('*') else f'{str_filter.rstrip(os.sep)}{os.sep}**', None if str_path.endswith(os.sep) else str_filter]))
try:
commit_type_pattern = regexp('(?P<type>%s)' % str.join('|', self.options.allowed_tags))
except re.error as err:
raise InvalidParserOptions(str.join('\n', [f'Invalid options for {self.__class__.__name__}', 'Unable to create regular expression from configured commit-types.', 'Please check the configured commit-types and remove or escape any regular expression characters.'])) from err
self.re_parser = regexp(str.join('', ['^' + commit_type_pattern.pattern, '(?:\\(' + self.options.scope_prefix + '(?P<scope>[^\\n]+)\\))?', '(?P<break>!)?:\\s+', '(?P<subject>[^\\n]+)', '(?:\\n\\n(?P<text>.+))?']), flags=re.DOTALL)
self.mr_selector = regexp('[\\t ]+\\((?:pull request )?(?P<mr_number>[#!]\\d+)\\)[\\t ]*$')
self.issue_selector = regexp(str.join('', ['^(?:clos(?:e|es|ed|ing)|fix(?:es|ed|ing)?|resolv(?:e|es|ed|ing)|implement(?:s|ed|ing)?):', '[\\t ]+(?P<issue_predicate>.+)[\\t ]*$']), flags=re.MULTILINE | re.IGNORECASE)
@staticmethod
def get_default_options() -> ConventionalMonorepoParserOptions:
return ConventionalMonorepoParserOptions()
def commit_body_components_separator(self, accumulator: dict[str, list[str]], text: str) -> dict[str, list[str]]:
if (match := breaking_re.match(text)):
accumulator['breaking_descriptions'].append(match.group(1) or '')
return accumulator
if (match := self.issue_selector.search(text)):
predicate = regexp(',? and | *[,;/& ] *').sub(',', match.group('issue_predicate') or '')
has_number = regexp('\\d+')
new_issue_refs: set[str] = set(filter(lambda issue_str, validator=has_number: validator.search(issue_str), predicate.split(',')))
accumulator['linked_issues'] = sort_numerically(set(accumulator['linked_issues']).union(new_issue_refs))
return accumulator
if text not in accumulator['descriptions']:
accumulator['descriptions'].append(text)
return accumulator
def parse_message(self, message: str) -> ParsedMessageResult | None:
if not (parsed := self.re_parser.match(message)):
return None
parsed_break = parsed.group('break')
parsed_scope = parsed.group('scope')
parsed_subject = parsed.group('subject')
parsed_text = parsed.group('text')
parsed_type = parsed.group('type')
linked_merge_request = ''
if (mr_match := self.mr_selector.search(parsed_subject)):
linked_merge_request = mr_match.group('mr_number')
parsed_subject = self.mr_selector.sub('', parsed_subject).strip()
body_components: dict[str, list[str]] = reduce(self.commit_body_components_separator, [parsed_subject, *parse_paragraphs(parsed_text or '')], {'breaking_descriptions': [], 'descriptions': [], 'linked_issues': []})
level_bump = LevelBump.MAJOR if body_components['breaking_descriptions'] or parsed_break else self.options.tag_to_level.get(parsed_type, self.options.default_bump_level)
return ParsedMessageResult(bump=level_bump, type=parsed_type, category=LONG_TYPE_NAMES.get(parsed_type, parsed_type), scope=parsed_scope, descriptions=tuple(body_components['descriptions']), breaking_descriptions=tuple(body_components['breaking_descriptions']), linked_issues=tuple(body_components['linked_issues']), linked_merge_request=linked_merge_request)
def parse(self, commit: Commit) -> ParseResult:
"""Attempt to parse the commit message with a regular expression into a ParseResult."""
git_root = Path(commit.repo.working_tree_dir or commit.repo.working_dir).absolute().resolve()
relevant_changed_files = []
for rel_git_path in commit.stats.files.keys():
full_path = str(git_root / rel_git_path)
for pass_filter in self.file_selection_filters:
if (select_file := fnmatch(full_path, pass_filter)):
for ignore_filter in self.file_ignore_filters:
if fnmatch(full_path, ignore_filter):
select_file = False
break
if select_file:
relevant_changed_files.append(rel_git_path)
break
pmsg_result = self.parse_message(str(commit.message))
if len(relevant_changed_files) == 0:
if not pmsg_result:
return _logged_parse_error(commit, str.join(' ', [f'Commit {commit.hexsha[:7]} is not scoped with the scope prefix {self.options.scope_prefix}', 'and has no changed files in the path filter(s)', f'relative to the git root {git_root}']) if self.options.scope_prefix and self.options.scope_prefix not in commit.message.split('\n', maxsplit=1)[0] else f'Format Mismatch! Unable to parse commit message: {commit.message!r}')
if not self.options.scope_prefix:
return _logged_parse_error(commit, str.join(' ', [f'Commit {commit.hexsha[:7]} has no changed files in the path filter(s)', f'relative to the git root {git_root}']))
if not pmsg_result:
return _logged_parse_error(commit, f'Commit {commit.hexsha[:7]} scope does not match scope prefix {self.options.scope_prefix}' if self.options.scope_prefix and self.options.scope_prefix not in commit.message.split('\n')[0] else f'Format Mismatch! Unable to parse commit message: {commit.message!r}')
logger.debug('commit %s introduces a %s level_bump', commit.hexsha[:8], pmsg_result.bump)
return ParsedCommit.from_parsed_message_result(commit, pmsg_result)
|
class ConventionalCommitMonorepoParser(CommitParser[ParseResult, ConventionalMonorepoParserOptions]):
''''''
def __init__(self, options: ConventionalMonorepoParserOptions | None=None) -> None:
pass
@staticmethod
def get_default_options() -> ConventionalMonorepoParserOptions:
pass
def commit_body_components_separator(self, accumulator: dict[str, list[str]], text: str) -> dict[str, list[str]]:
pass
def parse_message(self, message: str) -> ParsedMessageResult | None:
pass
def parse_message(self, message: str) -> ParsedMessageResult | None:
'''Attempt to parse the commit message with a regular expression into a ParseResult.'''
pass
| 7
| 2
| 49
| 5
| 38
| 8
| 6
| 0.21
| 1
| 10
| 1
| 0
| 4
| 5
| 5
| 5
| 260
| 29
| 194
| 44
| 183
| 40
| 71
| 34
| 65
| 13
| 1
| 5
| 29
|
327,878
|
flux0-ai/flux0
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/flux0-ai_flux0/scripts/psr/custom_parser/monorepo_parser.py
|
monorepo_parser.ConventionalMonorepoParserOptions
|
from pydantic import Field, field_validator
from typing_extensions import Annotated
from semantic_release.commit_parser._base import CommitParser, ParserOptions
from pathlib import Path
from typing import TYPE_CHECKING, Any, Iterable, Tuple
from semantic_release.enums import LevelBump
from itertools import zip_longest
from pydantic.dataclasses import dataclass
@dataclass
class ConventionalMonorepoParserOptions(ParserOptions):
"""Options dataclass for ConventionalCommitMonorepoParser."""
minor_tags: Tuple[str, ...] = ('feat',)
'Commit-type prefixes that should result in a minor release bump.'
patch_tags: Tuple[str, ...] = ('fix', 'perf')
'Commit-type prefixes that should result in a patch release bump.'
other_allowed_tags: Tuple[str, ...] = ('build', 'chore', 'ci', 'docs', 'style', 'refactor', 'test')
'Commit-type prefixes that are allowed but do not result in a version bump.'
default_bump_level: LevelBump = LevelBump.NO_RELEASE
'The minimum bump level to apply to valid commit message.'
path_filters: Annotated[Tuple[Path, ...], Field(validate_default=True)] = (Path('.'),)
'\n A set of relative paths to filter commits by. Only commits with file changes that\n match these file paths or its subdirectories will be considered valid commits.\n\n Syntax is similar to .gitignore with file path globs and inverse file match globs\n via `!` prefix. Paths should be relative to the current working directory.\n '
scope_prefix: str = ''
'\n A prefix that will be striped from the scope when parsing commit messages.\n\n If set, it will cause unscoped commits to be ignored. Use this in tandem with\n the path_filter option to filter commits by directory and scope.\n '
@field_validator('path_filters', mode='before')
@classmethod
def convert_strs_to_paths(cls, value: Any) -> Tuple[Path]:
values = value if isinstance(value, Iterable) else [value]
results = []
for val in values:
if isinstance(val, (str, Path)):
results.append(Path(val))
continue
raise TypeError(f'Invalid type: {type(val)}, expected str or Path.')
return tuple(results)
@field_validator('path_filters', mode='after')
@classmethod
def resolve_path(cls, dir_paths: tuple[Path, ...]) -> Tuple[Path, ...]:
return tuple([Path(f'!{Path(str_path[1:]).expanduser().absolute().resolve()}') if (str_path := str(path)).startswith('!') else path.expanduser().absolute().resolve() for path in dir_paths])
@property
def tag_to_level(self) -> dict[str, LevelBump]:
"""A mapping of commit tags to the level bump they should result in."""
return self._tag_to_level
@property
def allowed_tags(self) -> tuple[str, ...]:
"""
All commit-type prefixes that are allowed.
These are used to identify a valid commit message. If a commit message does not start with
one of these prefixes, it will not be considered a valid commit message.
:return: A tuple of all allowed commit-type prefixes (ordered from most to least significant)
"""
return tuple(list(self.tag_to_level.keys())[::-1])
def __post_init__(self) -> None:
self._tag_to_level: dict[str, LevelBump] = {str(tag): level for tag, level in [*zip_longest(self.other_allowed_tags, (), fillvalue=self.default_bump_level), *zip_longest(self.patch_tags, (), fillvalue=LevelBump.PATCH), *zip_longest(self.minor_tags, (), fillvalue=LevelBump.MINOR)] if '|' not in str(tag)}
|
@dataclass
class ConventionalMonorepoParserOptions(ParserOptions):
'''Options dataclass for ConventionalCommitMonorepoParser.'''
@field_validator('path_filters', mode='before')
@classmethod
def convert_strs_to_paths(cls, value: Any) -> Tuple[Path]:
pass
@field_validator('path_filters', mode='after')
@classmethod
def resolve_path(cls, dir_paths: tuple[Path, ...]) -> Tuple[Path, ...]:
pass
@property
def tag_to_level(self) -> dict[str, LevelBump]:
'''A mapping of commit tags to the level bump they should result in.'''
pass
@property
def allowed_tags(self) -> tuple[str, ...]:
'''
All commit-type prefixes that are allowed.
These are used to identify a valid commit message. If a commit message does not start with
one of these prefixes, it will not be considered a valid commit message.
:return: A tuple of all allowed commit-type prefixes (ordered from most to least significant)
'''
pass
def __post_init__(self) -> None:
pass
| 13
| 3
| 10
| 1
| 7
| 3
| 2
| 0.53
| 1
| 8
| 0
| 0
| 3
| 1
| 5
| 5
| 102
| 18
| 55
| 20
| 43
| 29
| 24
| 16
| 18
| 4
| 1
| 2
| 9
|
327,879
|
flux0-ai/flux0
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/flux0-ai_flux0/packages/core/src/flux0_core/storage/nanodb_memory.py
|
nanodb_memory.AgentDocumentStore
|
from flux0_nanodb import projection
from typing import DefaultDict, List, Mapping, Optional, Required, Self, Sequence, TypedDict, Union, override
from flux0_nanodb.types import DocumentID, DocumentVersion, JSONPatchOperation, SortingOrder
from flux0_core.ids import gen_id
from datetime import datetime, timezone
from flux0_nanodb.query import And, Comparison, QueryFilter
from flux0_nanodb.api import DocumentCollection, DocumentDatabase
from flux0_core.agents import Agent, AgentId, AgentStore, AgentType, AgentUpdateParams
class AgentDocumentStore(AgentStore):
VERSION = DocumentVersion('0.0.1')
def __init__(self, db: DocumentDatabase):
self.db = db
self._agent_col: DocumentCollection[_AgentDocument]
async def __aenter__(self) -> Self:
self._agent_col = await self.db.create_collection('agents', _AgentDocument)
return self
async def __aexit__(self, exc_type: Optional[type[BaseException]], exc_value: Optional[BaseException], exec_tb: Optional[object]) -> None:
pass
def _serialize_agent(self, agent: Agent) -> _AgentDocument:
return _AgentDocument(id=DocumentID(agent.id), version=self.VERSION, type=agent.type, name=agent.name, description=agent.description, created_at=agent.created_at)
def _deserialize_agent(self, doc: _AgentDocument) -> Agent:
return Agent(id=AgentId(doc['id']), type=doc['type'], name=doc['name'], description=doc['description'], created_at=doc['created_at'])
@override
async def create_agent(self, name: str, type: AgentType, description: Optional[str]=None, created_at: Optional[datetime]=None) -> Agent:
created_at = created_at or datetime.now(timezone.utc)
agent = Agent(id=AgentId(gen_id()), name=name, type=type, description=description, created_at=created_at)
await self._agent_col.insert_one(document=self._serialize_agent(agent))
return agent
@override
async def read_agent(self, agent_id: AgentId) -> Optional[Agent]:
result = await self._agent_col.find(Comparison(path='id', op='$eq', value=agent_id))
return self._deserialize_agent(result[0]) if result else None
@override
async def list_agents(self, offset: int=0, limit: int=10, projection: Optional[List[str]]=None) -> Sequence[Agent]:
if offset != 0 or limit != 10:
raise NotImplementedError('Pagination is not supported')
if projection is not None:
raise NotImplementedError('Projection not supported')
return [self._deserialize_agent(d) for d in await self._agent_col.find(filters=None)]
@override
async def update_agent(self, agent_id: AgentId, params: AgentUpdateParams) -> Agent:
raise NotImplementedError
@override
async def delete_agent(self, agent_id: AgentId) -> bool:
result = await self._agent_col.delete_one(Comparison(path='id', op='$eq', value=agent_id))
return result.deleted_count > 0
|
class AgentDocumentStore(AgentStore):
def __init__(self, db: DocumentDatabase):
pass
async def __aenter__(self) -> Self:
pass
async def __aexit__(self, exc_type: Optional[type[BaseException]], exc_value: Optional[BaseException], exec_tb: Optional[object]) -> None:
pass
def _serialize_agent(self, agent: Agent) -> _AgentDocument:
pass
def _deserialize_agent(self, doc: _AgentDocument) -> Agent:
pass
@override
async def create_agent(self, name: str, type: AgentType, description: Optional[str]=None, created_at: Optional[datetime]=None) -> Agent:
pass
@override
async def read_agent(self, agent_id: AgentId) -> Optional[Agent]:
pass
@override
async def list_agents(self, offset: int=0, limit: int=10, projection: Optional[List[str]]=None) -> Sequence[Agent]:
pass
@override
async def update_agent(self, agent_id: AgentId, params: AgentUpdateParams) -> Agent:
pass
@override
async def delete_agent(self, agent_id: AgentId) -> bool:
pass
| 16
| 0
| 8
| 0
| 8
| 0
| 1
| 0
| 1
| 13
| 6
| 0
| 10
| 2
| 10
| 35
| 99
| 10
| 89
| 54
| 41
| 0
| 33
| 17
| 22
| 3
| 5
| 1
| 13
|
327,880
|
flux0-ai/flux0
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/flux0-ai_flux0/packages/core/src/flux0_core/storage/nanodb_memory.py
|
nanodb_memory.RecordingDocumentStore
|
from flux0_core.sessions import ConsumerId, Event, EventId, EventSource, EventType, MessageEventData, Session, SessionId, SessionMode, SessionStore, SessionUpdateParams, StatusEventData, ToolEventData
from flux0_core.recordings import RecordedChunkPayload, RecordedEmittedPayload, RecordedEvent, RecordedEventId, RecordedEventKind, RecordedEventPayload, RecordedHeaderPayload, RecordingId, RecordingStore, TurnRange
from flux0_nanodb import projection
from typing import DefaultDict, List, Mapping, Optional, Required, Self, Sequence, TypedDict, Union, override
from flux0_nanodb.types import DocumentID, DocumentVersion, JSONPatchOperation, SortingOrder
from flux0_core.ids import gen_id
from datetime import datetime, timezone
from flux0_nanodb.query import And, Comparison, QueryFilter
import asyncio
from flux0_nanodb.api import DocumentCollection, DocumentDatabase
class RecordingDocumentStore(RecordingStore):
VERSION = DocumentVersion('0.0.1')
def __init__(self, db: DocumentDatabase):
self.db = db
self._col: DocumentCollection[_RecordedEventDocument]
self._locks: dict[RecordingId, asyncio.Lock] = DefaultDict(asyncio.Lock)
async def __aenter__(self) -> Self:
self._col = await self.db.create_collection('recorded_events', _RecordedEventDocument)
return self
async def __aexit__(self, exc_type: Optional[type[BaseException]], exc_value: Optional[BaseException], exec_tb: Optional[object]) -> None:
pass
def _serialize(self, ev: RecordedEvent) -> _RecordedEventDocument:
return _RecordedEventDocument(id=DocumentID(ev.id), recording_id=ev.recording_id, version=self.VERSION, offset=ev.offset, kind=ev.kind, created_at=ev.created_at, payload=ev.payload)
def _deserialize(self, doc: _RecordedEventDocument) -> RecordedEvent:
return RecordedEvent(id=RecordedEventId(doc['id']), recording_id=RecordingId(doc['recording_id']), offset=doc['offset'], kind=doc['kind'], created_at=doc['created_at'], payload=doc['payload'])
async def _next_offset(self, recording_id: RecordingId) -> int:
docs = await self._col.find(Comparison(path='recording_id', op='$eq', value=recording_id))
return max((d['offset'] for d in docs)) + 1 if docs else 1
async def _ensure_header_exists(self, recording_id: RecordingId) -> None:
header = await self.read_header_by_recording_id(recording_id)
if header is None:
raise ValueError(f'Recording header not found for {recording_id}')
@override
async def create_recording(self, source_session_id: SessionId, *, recording_id: Optional[RecordingId]=None, created_at: Optional[datetime]=None) -> RecordedEvent:
rid = recording_id or RecordingId(gen_id())
created_at = created_at or datetime.now(timezone.utc)
header_payload: RecordedHeaderPayload = {'source_session_id': source_session_id}
header = RecordedEvent(id=RecordedEventId(gen_id()), recording_id=rid, offset=0, kind='header', created_at=created_at, payload=header_payload)
await self._col.insert_one(self._serialize(header))
return header
@override
async def append_emitted(self, recording_id: RecordingId, payload: RecordedEmittedPayload, *, created_at: Optional[datetime]=None) -> RecordedEvent:
await self._ensure_header_exists(recording_id)
created_at = created_at or datetime.now(timezone.utc)
async with self._locks[recording_id]:
offset = await self._next_offset(recording_id)
ev = RecordedEvent(id=RecordedEventId(gen_id()), recording_id=recording_id, offset=offset, kind='emitted', created_at=created_at, payload=payload)
await self._col.insert_one(self._serialize(ev))
return ev
@override
async def append_chunk(self, recording_id: RecordingId, payload: RecordedChunkPayload, *, created_at: Optional[datetime]=None) -> RecordedEvent:
await self._ensure_header_exists(recording_id)
created_at = created_at or datetime.now(timezone.utc)
offset = await self._next_offset(recording_id)
ev = RecordedEvent(id=RecordedEventId(gen_id()), recording_id=recording_id, offset=offset, kind='chunk', created_at=created_at, payload=payload)
await self._col.insert_one(self._serialize(ev))
return ev
@override
async def read_header_by_source_session_id(self, source_session_id: SessionId) -> Optional[RecordedEvent]:
result = await self._col.find(And(expressions=[Comparison(path='kind', op='$eq', value='header'), Comparison(path='payload.source_session_id', op='$eq', value=source_session_id), Comparison(path='offset', op='$eq', value=0)]))
return self._deserialize(result[0]) if result else None
@override
async def read_header_by_recording_id(self, recording_id: RecordingId) -> Optional[RecordedEvent]:
docs = await self._col.find(And(expressions=[Comparison(path='recording_id', op='$eq', value=recording_id), Comparison(path='offset', op='$eq', value=0), Comparison(path='kind', op='$eq', value='header')]))
return self._deserialize(docs[0]) if docs else None
@override
async def read_next_turn_range_after_offset(self, recording_id: RecordingId, after_offset: int) -> Optional[TurnRange]:
"""
Return the next user-anchored turn that begins strictly AFTER `after_offset`.
A "user anchor" is: kind=='emitted' AND payload.source=='user'.
"""
start_docs = await self._col.find(And(expressions=[Comparison(path='recording_id', op='$eq', value=recording_id), Comparison(path='kind', op='$eq', value='emitted'), Comparison(path='payload.source', op='$eq', value='user'), Comparison(path='offset', op='$gt', value=after_offset)]), projection={'offset': projection.Projection.INCLUDE}, limit=1, sort=[('offset', SortingOrder.ASC)])
if not start_docs:
return None
start = start_docs[0]['offset']
next_docs = await self._col.find(And(expressions=[Comparison(path='recording_id', op='$eq', value=recording_id), Comparison(path='kind', op='$eq', value='emitted'), Comparison(path='payload.source', op='$eq', value='user'), Comparison(path='offset', op='$gt', value=start)]), projection={'offset': projection.Projection.INCLUDE}, limit=1, sort=[('offset', SortingOrder.ASC)])
end = next_docs[0]['offset'] if next_docs else None
return (start, end)
@override
async def read_frames_range(self, recording_id: RecordingId, start_offset_inclusive: int, end_offset_exclusive: Optional[int]=None, *, limit: Optional[int]=None) -> Sequence[RecordedEvent]:
"""
Read frames ordered by offset for [start_offset_inclusive, end_offset_exclusive).
If end_offset_exclusive is None, read to the end (respect `limit` if provided).
"""
filters: list[QueryFilter] = [Comparison(path='recording_id', op='$eq', value=recording_id), Comparison(path='offset', op='$gte', value=start_offset_inclusive)]
if end_offset_exclusive is not None:
filters.append(Comparison(path='offset', op='$lt', value=end_offset_exclusive))
docs = await self._col.find(And(expressions=filters), limit=limit, sort=[('offset', SortingOrder.ASC)])
return [self._deserialize(d) for d in docs]
|
class RecordingDocumentStore(RecordingStore):
def __init__(self, db: DocumentDatabase):
pass
async def __aenter__(self) -> Self:
pass
async def __aexit__(self, exc_type: Optional[type[BaseException]], exc_value: Optional[BaseException], exec_tb: Optional[object]) -> None:
pass
def _serialize(self, ev: RecordedEvent) -> _RecordedEventDocument:
pass
def _deserialize(self, doc: _RecordedEventDocument) -> RecordedEvent:
pass
async def _next_offset(self, recording_id: RecordingId) -> int:
pass
async def _ensure_header_exists(self, recording_id: RecordingId) -> None:
pass
@override
async def create_recording(self, source_session_id: SessionId, *, recording_id: Optional[RecordingId]=None, created_at: Optional[datetime]=None) -> RecordedEvent:
pass
@override
async def append_emitted(self, recording_id: RecordingId, payload: RecordedEmittedPayload, *, created_at: Optional[datetime]=None) -> RecordedEvent:
pass
@override
async def append_chunk(self, recording_id: RecordingId, payload: RecordedChunkPayload, *, created_at: Optional[datetime]=None) -> RecordedEvent:
pass
@override
async def read_header_by_source_session_id(self, source_session_id: SessionId) -> Optional[RecordedEvent]:
pass
@override
async def read_header_by_recording_id(self, recording_id: RecordingId) -> Optional[RecordedEvent]:
pass
@override
async def read_next_turn_range_after_offset(self, recording_id: RecordingId, after_offset: int) -> Optional[TurnRange]:
'''
Return the next user-anchored turn that begins strictly AFTER `after_offset`.
A "user anchor" is: kind=='emitted' AND payload.source=='user'.
'''
pass
@override
async def read_frames_range(self, recording_id: RecordingId, start_offset_inclusive: int, end_offset_exclusive: Optional[int]=None, *, limit: Optional[int]=None) -> Sequence[RecordedEvent]:
'''
Read frames ordered by offset for [start_offset_inclusive, end_offset_exclusive).
If end_offset_exclusive is None, read to the end (respect `limit` if provided).
'''
pass
| 22
| 2
| 15
| 0
| 13
| 1
| 2
| 0.11
| 1
| 18
| 11
| 0
| 14
| 3
| 14
| 41
| 235
| 18
| 197
| 82
| 136
| 22
| 64
| 36
| 49
| 3
| 5
| 1
| 21
|
327,881
|
flux0-ai/flux0
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/flux0-ai_flux0/packages/core/src/flux0_core/storage/nanodb_memory.py
|
nanodb_memory.SessionDocumentStore
|
from flux0_core.sessions import ConsumerId, Event, EventId, EventSource, EventType, MessageEventData, Session, SessionId, SessionMode, SessionStore, SessionUpdateParams, StatusEventData, ToolEventData
from fastapi import HTTPException
from typing import DefaultDict, List, Mapping, Optional, Required, Self, Sequence, TypedDict, Union, override
from flux0_nanodb.types import DocumentID, DocumentVersion, JSONPatchOperation, SortingOrder
from flux0_core.ids import gen_id
from datetime import datetime, timezone
from flux0_core.users import User, UserId, UserStore, UserUpdateParams
from flux0_nanodb.query import And, Comparison, QueryFilter
from flux0_core.types import JSONSerializable
import asyncio
from flux0_nanodb.api import DocumentCollection, DocumentDatabase
from flux0_core.agents import Agent, AgentId, AgentStore, AgentType, AgentUpdateParams
class SessionDocumentStore(SessionStore):
VERSION = DocumentVersion('0.0.1')
def __init__(self, db: DocumentDatabase):
self.db = db
self._session_col: DocumentCollection[_SessionDocument]
self._event_col: DocumentCollection[_EventDocument]
async def __aenter__(self) -> Self:
self._session_col = await self.db.create_collection('sessions', _SessionDocument)
self._event_col = await self.db.create_collection('session_events', _EventDocument)
return self
async def __aexit__(self, exc_type: Optional[type[BaseException]], exc_value: Optional[BaseException], exec_tb: Optional[object]) -> None:
pass
def _serialize_session(self, session: Session) -> _SessionDocument:
return _SessionDocument(id=DocumentID(session.id), version=self.VERSION, agent_id=session.agent_id, user_id=session.user_id, mode=session.mode, title=session.title, consumption_offsets=session.consumption_offsets, created_at=session.created_at, metadata=session.metadata)
def _deserialize_session(self, doc: _SessionDocument) -> Session:
return Session(id=SessionId(doc['id']), agent_id=doc['agent_id'], user_id=doc['user_id'], mode=doc['mode'], title=doc.get('title'), consumption_offsets=doc['consumption_offsets'], created_at=doc['created_at'], metadata=doc.get('metadata', None))
def _serialize_event(self, session_id: SessionId, event: Event) -> _EventDocument:
return _EventDocument(id=DocumentID(event.id), version=self.VERSION, session_id=session_id, source=event.source, type=event.type, offset=event.offset, correlation_id=event.correlation_id, data=event.data, deleted=event.deleted, created_at=event.created_at, metadata=event.metadata)
def _deserialize_event(self, doc: _EventDocument) -> Event:
return Event(id=EventId(doc['id']), source=doc['source'], type=doc['type'], offset=doc['offset'], correlation_id=doc['correlation_id'], data=doc['data'], deleted=doc['deleted'], created_at=doc['created_at'], metadata=doc.get('metadata'))
@override
async def create_session(self, user_id: UserId, agent_id: AgentId, id: Optional[SessionId]=None, mode: Optional[SessionMode]=None, title: Optional[str]=None, metadata: Optional[Mapping[str, JSONSerializable]]=None, created_at: Optional[datetime]=None) -> Session:
if id:
existing = await self.read_session(id)
if existing:
raise HTTPException(status_code=409, detail=f'Session with id {id} already exists')
created_at = created_at or datetime.now(timezone.utc)
consumption_offsets: dict[ConsumerId, int] = {'client': 0}
session = Session(id=id or SessionId(gen_id()), user_id=user_id, agent_id=agent_id, mode=mode or 'auto', title=title, consumption_offsets=consumption_offsets, created_at=created_at, metadata=metadata)
await self._session_col.insert_one(document=self._serialize_session(session))
return session
@override
async def read_session(self, session_id: SessionId) -> Optional[Session]:
result = await self._session_col.find(Comparison(path='id', op='$eq', value=session_id))
return self._deserialize_session(result[0]) if result else None
@override
async def delete_session(self, session_id: SessionId) -> bool:
events = await self.list_events(session_id)
futures = [asyncio.ensure_future(self._event_col.delete_one(Comparison(path='id', op='$eq', value=e.id))) for e in events]
await asyncio.gather(*futures, return_exceptions=False)
result = await self._session_col.delete_one(Comparison(path='id', op='$eq', value=session_id))
return result.deleted_count > 0
@override
async def update_session(self, session_id: SessionId, params: SessionUpdateParams) -> Session:
update_data = {k: v for k, v in params.items() if v is not None}
patch: List[JSONPatchOperation] = [{'op': 'replace', 'path': f'/{k}', 'value': v} for k, v in update_data.items()]
await self._session_col.update_one(Comparison(path='id', op='$eq', value=session_id), patch)
updated = await self.read_session(session_id)
if not updated:
raise ValueError(f'Session not found: {session_id}')
return updated
@override
async def list_sessions(self, agent_id: Optional[AgentId]=None, user_id: Optional[UserId]=None) -> Sequence[Session]:
expressions: List[QueryFilter] = []
if agent_id is not None:
expressions.append(Comparison(path='agent_id', op='$eq', value=str(agent_id)))
if user_id is not None:
expressions.append(Comparison(path='user_id', op='$eq', value=str(user_id)))
query_filter: Optional[QueryFilter] = None
if expressions:
query_filter = And(expressions=expressions)
return [self._deserialize_session(d) for d in await self._session_col.find(query_filter)]
@override
async def create_event(self, session_id: SessionId, source: EventSource, type: EventType, correlation_id: str, data: Union[MessageEventData, StatusEventData, ToolEventData], metadata: Optional[Mapping[str, JSONSerializable]]=None, created_at: Optional[datetime]=None) -> Event:
session = await self.read_session(session_id)
if session is None:
raise ValueError(f'Session not found: {session_id}')
events = await self.list_events(session_id)
offset = len(list(events))
created_at = created_at or datetime.now(timezone.utc)
event = Event(id=EventId(gen_id()), source=source, type=type, offset=offset, correlation_id=correlation_id, data=data, metadata=metadata, deleted=False, created_at=created_at)
await self._event_col.insert_one(document=self._serialize_event(session_id, event))
return event
@override
async def read_event(self, session_id: SessionId, event_id: EventId) -> Optional[Event]:
result = await self._event_col.find(And(expressions=[Comparison(path='id', op='$eq', value=event_id), Comparison(path='session_id', op='$eq', value=session_id)]))
return self._deserialize_event(result[0]) if result else None
@override
async def delete_event(self, event_id: EventId) -> bool:
result = await self._event_col.delete_one(Comparison(path='id', op='$eq', value=event_id))
return result.deleted_count > 0
@override
async def list_events(self, session_id: SessionId, source: Optional[EventSource]=None, correlation_id: Optional[str]=None, types: Sequence[EventType]=[], min_offset: Optional[int]=None, exclude_deleted: bool=True) -> Sequence[Event]:
expressions: List[QueryFilter] = [Comparison(path='session_id', op='$eq', value=session_id)]
if source is not None:
expressions.append(Comparison(path='source', op='$eq', value=source))
if correlation_id is not None:
expressions.append(Comparison(path='correlation_id', op='$eq', value=correlation_id))
if types:
expressions.append(Comparison(path='type', op='$in', value=list(types)))
if min_offset is not None:
expressions.append(Comparison(path='offset', op='$gte', value=min_offset))
if exclude_deleted:
expressions.append(Comparison(path='deleted', op='$eq', value=False))
query_filter: Optional[QueryFilter] = None
if expressions:
query_filter = And(expressions=expressions)
return [self._deserialize_event(d) for d in await self._event_col.find(query_filter)]
|
class SessionDocumentStore(SessionStore):
def __init__(self, db: DocumentDatabase):
pass
async def __aenter__(self) -> Self:
pass
async def __aexit__(self, exc_type: Optional[type[BaseException]], exc_value: Optional[BaseException], exec_tb: Optional[object]) -> None:
pass
def _serialize_session(self, session: Session) -> _SessionDocument:
pass
def _deserialize_session(self, doc: _SessionDocument) -> Session:
pass
def _serialize_event(self, session_id: SessionId, event: Event) -> _EventDocument:
pass
def _deserialize_event(self, doc: _EventDocument) -> Event:
pass
@override
async def create_session(self, user_id: UserId, agent_id: AgentId, id: Optional[SessionId]=None, mode: Optional[SessionMode]=None, title: Optional[str]=None, metadata: Optional[Mapping[str, JSONSerializable]]=None, created_at: Optional[datetime]=None) -> Session:
pass
@override
async def read_session(self, session_id: SessionId) -> Optional[Session]:
pass
@override
async def delete_session(self, session_id: SessionId) -> bool:
pass
@override
async def update_session(self, session_id: SessionId, params: SessionUpdateParams) -> Session:
pass
@override
async def list_sessions(self, agent_id: Optional[AgentId]=None, user_id: Optional[UserId]=None) -> Sequence[Session]:
pass
@override
async def create_event(self, session_id: SessionId, source: EventSource, type: EventType, correlation_id: str, data: Union[MessageEventData, StatusEventData, ToolEventData], metadata: Optional[Mapping[str, JSONSerializable]]=None, created_at: Optional[datetime]=None) -> Event:
pass
@override
async def read_event(self, session_id: SessionId, event_id: EventId) -> Optional[Event]:
pass
@override
async def delete_event(self, event_id: EventId) -> bool:
pass
@override
async def list_events(self, session_id: SessionId, source: Optional[EventSource]=None, correlation_id: Optional[str]=None, types: Sequence[EventType]=[], min_offset: Optional[int]=None, exclude_deleted: bool=True) -> Sequence[Event]:
pass
| 26
| 0
| 15
| 1
| 14
| 0
| 2
| 0.02
| 1
| 21
| 12
| 0
| 16
| 3
| 16
| 45
| 274
| 31
| 239
| 115
| 148
| 4
| 89
| 41
| 72
| 7
| 5
| 2
| 31
|
327,882
|
flux0-ai/flux0
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/flux0-ai_flux0/packages/core/src/flux0_core/storage/nanodb_memory.py
|
nanodb_memory.UserDocumentStore
|
from typing import DefaultDict, List, Mapping, Optional, Required, Self, Sequence, TypedDict, Union, override
from flux0_nanodb.types import DocumentID, DocumentVersion, JSONPatchOperation, SortingOrder
from flux0_core.ids import gen_id
from datetime import datetime, timezone
from flux0_core.users import User, UserId, UserStore, UserUpdateParams
from flux0_nanodb.query import And, Comparison, QueryFilter
from flux0_nanodb.api import DocumentCollection, DocumentDatabase
class UserDocumentStore(UserStore):
VERSION = DocumentVersion('0.0.1')
def __init__(self, db: DocumentDatabase):
self.db = db
self._user_col: DocumentCollection[_UserDocument]
async def __aenter__(self) -> Self:
self._user_col = await self.db.create_collection('users', _UserDocument)
return self
async def __aexit__(self, exc_type: Optional[type[BaseException]], exc_value: Optional[BaseException], exec_tb: Optional[object]) -> None:
pass
def _serialize_user(self, user: User) -> _UserDocument:
return _UserDocument(id=DocumentID(user.id), version=self.VERSION, sub=user.sub, name=user.name, email=user.email, created_at=user.created_at)
def _deserialize_user(self, doc: _UserDocument) -> User:
return User(id=UserId(doc['id']), sub=doc['sub'], name=doc['name'], email=doc.get('email'), created_at=doc['created_at'])
@override
async def create_user(self, sub: str, name: str, email: Optional[str]=None, created_at: Optional[datetime]=None) -> User:
created_at = created_at or datetime.now(timezone.utc)
user = User(id=UserId(gen_id()), sub=sub, name=name, email=email, created_at=created_at)
await self._user_col.insert_one(document=self._serialize_user(user))
return user
@override
async def read_user(self, user_id: UserId) -> Optional[User]:
result = await self._user_col.find(Comparison(path='id', op='$eq', value=user_id))
return self._deserialize_user(result[0]) if result else None
@override
async def read_user_by_sub(self, sub: str) -> Optional[User]:
result = await self._user_col.find(Comparison(path='sub', op='$eq', value=sub))
return self._deserialize_user(result[0]) if result else None
@override
async def update_user(self, user_id: UserId, params: UserUpdateParams) -> User:
raise NotImplementedError
|
class UserDocumentStore(UserStore):
def __init__(self, db: DocumentDatabase):
pass
async def __aenter__(self) -> Self:
pass
async def __aexit__(self, exc_type: Optional[type[BaseException]], exc_value: Optional[BaseException], exec_tb: Optional[object]) -> None:
pass
def _serialize_user(self, user: User) -> _UserDocument:
pass
def _deserialize_user(self, doc: _UserDocument) -> User:
pass
@override
async def create_user(self, sub: str, name: str, email: Optional[str]=None, created_at: Optional[datetime]=None) -> User:
pass
@override
async def read_user(self, user_id: UserId) -> Optional[User]:
pass
@override
async def read_user_by_sub(self, sub: str) -> Optional[User]:
pass
@override
async def update_user(self, user_id: UserId, params: UserUpdateParams) -> User:
pass
| 14
| 0
| 8
| 0
| 8
| 0
| 1
| 0
| 1
| 11
| 6
| 0
| 9
| 2
| 9
| 33
| 86
| 9
| 77
| 47
| 36
| 0
| 27
| 16
| 17
| 2
| 5
| 0
| 11
|
327,883
|
flux0-ai/flux0
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/flux0-ai_flux0/packages/core/src/flux0_core/storage/nanodb_memory.py
|
nanodb_memory._AgentDocument
|
from flux0_core.agents import Agent, AgentId, AgentStore, AgentType, AgentUpdateParams
from flux0_nanodb.types import DocumentID, DocumentVersion, JSONPatchOperation, SortingOrder
from datetime import datetime, timezone
from typing import DefaultDict, List, Mapping, Optional, Required, Self, Sequence, TypedDict, Union, override
class _AgentDocument(TypedDict, total=False):
id: DocumentID
version: DocumentVersion
type: AgentType
name: str
description: Optional[str]
created_at: datetime
|
class _AgentDocument(TypedDict, total=False):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 7
| 0
| 7
| 1
| 6
| 0
| 7
| 1
| 6
| 0
| 1
| 0
| 0
|
327,884
|
flux0-ai/flux0
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/flux0-ai_flux0/packages/core/src/flux0_core/storage/nanodb_memory.py
|
nanodb_memory._EventDocument
|
from flux0_core.sessions import ConsumerId, Event, EventId, EventSource, EventType, MessageEventData, Session, SessionId, SessionMode, SessionStore, SessionUpdateParams, StatusEventData, ToolEventData
from typing import DefaultDict, List, Mapping, Optional, Required, Self, Sequence, TypedDict, Union, override
from flux0_nanodb.types import DocumentID, DocumentVersion, JSONPatchOperation, SortingOrder
from datetime import datetime, timezone
from dataclasses import dataclass
from flux0_core.types import JSONSerializable
@dataclass(frozen=True)
class _EventDocument(TypedDict, total=False):
id: DocumentID
version: DocumentVersion
session_id: SessionId
source: EventSource
type: EventType
offset: int
correlation_id: str
data: Union[MessageEventData, StatusEventData, ToolEventData]
deleted: bool
created_at: datetime
metadata: Optional[Mapping[str, JSONSerializable]]
|
@dataclass(frozen=True)
class _EventDocument(TypedDict, total=False):
pass
| 2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 12
| 0
| 12
| 1
| 11
| 0
| 12
| 1
| 11
| 0
| 1
| 0
| 0
|
327,885
|
flux0-ai/flux0
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/flux0-ai_flux0/packages/core/src/flux0_core/storage/nanodb_memory.py
|
nanodb_memory._RecordedEventDocument
|
from flux0_core.recordings import RecordedChunkPayload, RecordedEmittedPayload, RecordedEvent, RecordedEventId, RecordedEventKind, RecordedEventPayload, RecordedHeaderPayload, RecordingId, RecordingStore, TurnRange
from typing import DefaultDict, List, Mapping, Optional, Required, Self, Sequence, TypedDict, Union, override
from flux0_nanodb.types import DocumentID, DocumentVersion, JSONPatchOperation, SortingOrder
from datetime import datetime, timezone
from dataclasses import dataclass
@dataclass(frozen=True)
class _RecordedEventDocument(TypedDict, total=False):
id: DocumentID
version: DocumentVersion
recording_id: Required[RecordingId]
offset: Required[int]
kind: Required[RecordedEventKind]
created_at: Required[datetime]
payload: Required[RecordedEventPayload]
|
@dataclass(frozen=True)
class _RecordedEventDocument(TypedDict, total=False):
pass
| 2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 8
| 0
| 8
| 1
| 7
| 0
| 8
| 1
| 7
| 0
| 1
| 0
| 0
|
327,886
|
flux0-ai/flux0
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/flux0-ai_flux0/packages/core/src/flux0_core/storage/nanodb_memory.py
|
nanodb_memory._SessionDocument
|
from flux0_core.sessions import ConsumerId, Event, EventId, EventSource, EventType, MessageEventData, Session, SessionId, SessionMode, SessionStore, SessionUpdateParams, StatusEventData, ToolEventData
from flux0_core.types import JSONSerializable
from typing import DefaultDict, List, Mapping, Optional, Required, Self, Sequence, TypedDict, Union, override
from flux0_core.users import User, UserId, UserStore, UserUpdateParams
from datetime import datetime, timezone
from flux0_nanodb.types import DocumentID, DocumentVersion, JSONPatchOperation, SortingOrder
from flux0_core.agents import Agent, AgentId, AgentStore, AgentType, AgentUpdateParams
class _SessionDocument(TypedDict, total=False):
id: DocumentID
version: DocumentVersion
agent_id: AgentId
user_id: UserId
mode: SessionMode
title: Optional[str]
consumption_offsets: Mapping[ConsumerId, int]
created_at: datetime
metadata: Optional[Mapping[str, JSONSerializable]]
|
class _SessionDocument(TypedDict, total=False):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 10
| 0
| 10
| 1
| 9
| 0
| 10
| 1
| 9
| 0
| 1
| 0
| 0
|
327,887
|
flux0-ai/flux0
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/flux0-ai_flux0/packages/core/src/flux0_core/storage/nanodb_memory.py
|
nanodb_memory._UserDocument
|
from flux0_nanodb.types import DocumentID, DocumentVersion, JSONPatchOperation, SortingOrder
from typing import DefaultDict, List, Mapping, Optional, Required, Self, Sequence, TypedDict, Union, override
from datetime import datetime, timezone
class _UserDocument(TypedDict, total=False):
id: DocumentID
version: DocumentVersion
sub: str
name: str
email: Optional[str]
created_at: datetime
|
class _UserDocument(TypedDict, total=False):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 7
| 0
| 7
| 1
| 6
| 0
| 7
| 1
| 6
| 0
| 1
| 0
| 0
|
327,888
|
flux0-ai/flux0
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/flux0-ai_flux0/examples/openai_simple/agent.py
|
openai_simple.agent.OpenAIChatAgentRunner
|
import uuid
from flux0_core.agent_runners.api import AgentRunner, Deps, agent_runner
from flux0_core.sessions import EventId, MessageEventData, StatusEventData
from typing import cast
from openai.types.chat import ChatCompletionSystemMessageParam, ChatCompletionUserMessageParam
from openai import AsyncOpenAI
import time
from flux0_stream.types import ChunkEvent, JsonPatchOperation
import asyncio
from flux0_core.agent_runners.context import Context
@agent_runner('openai_simple')
class OpenAIChatAgentRunner(AgentRunner):
def __init__(self) -> None:
self.client: AsyncOpenAI = AsyncOpenAI()
async def run(self, context: Context, deps: Deps) -> bool:
agent = await deps.read_agent(context.agent_id)
if not agent:
raise ValueError(f'Agent {context.agent_id} not found')
events = await deps.list_session_events(context.session_id)
last_event = events[-1]
if last_event.type != 'message':
return False
user_event_data = cast(MessageEventData, last_event.data)
user_input = next((part['content'] for part in user_event_data['parts'] if part['type'] == 'content'), None)
if not user_input:
raise ValueError('No user input content found')
event_id = EventId(uuid.uuid4().hex)
seq = 0
try:
await deps.event_emitter.enqueue_status_event(correlation_id=deps.correlator.correlation_id, data=StatusEventData(type='status', status='processing', data={'detail': 'Thinking...'}))
await asyncio.sleep(0.5)
await deps.event_emitter.enqueue_status_event(correlation_id=deps.correlator.correlation_id, data=StatusEventData(type='status', status='typing'))
messages: list[ChatCompletionSystemMessageParam | ChatCompletionUserMessageParam] = [ChatCompletionSystemMessageParam(role='system', content='Be concise.'), ChatCompletionUserMessageParam(role='user', content=str(user_input))]
stream = await self.client.chat.completions.create(model='gpt-4o', stream=True, messages=messages)
async for chunk in stream:
delta = chunk.choices[0].delta.content
if delta:
patch: JsonPatchOperation = {'op': 'add', 'path': '/-', 'value': delta}
chunk_event = ChunkEvent(correlation_id=deps.correlator.correlation_id, seq=seq, event_id=event_id, patches=[patch], metadata={'agent_id': agent.id, 'agent_name': agent.name}, timestamp=time.time())
await deps.event_emitter.enqueue_event_chunk(chunk_event)
seq += 1
await deps.event_emitter.enqueue_status_event(correlation_id=deps.correlator.correlation_id, event_id=event_id, data=StatusEventData(type='status', status='ready'))
return True
except Exception as e:
await deps.event_emitter.enqueue_status_event(correlation_id=deps.correlator.correlation_id, data=StatusEventData(type='status', status='error', data=str(e)))
return False
finally:
await deps.event_emitter.enqueue_status_event(correlation_id=deps.correlator.correlation_id, data=StatusEventData(type='status', status='completed', acknowledged_offset=0))
|
@agent_runner('openai_simple')
class OpenAIChatAgentRunner(AgentRunner):
def __init__(self) -> None:
pass
async def run(self, context: Context, deps: Deps) -> bool:
pass
| 4
| 0
| 46
| 5
| 40
| 2
| 4
| 0.05
| 1
| 11
| 5
| 0
| 2
| 1
| 2
| 23
| 94
| 11
| 80
| 18
| 77
| 4
| 36
| 17
| 33
| 7
| 5
| 3
| 8
|
327,889
|
flux0-ai/flux0
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/flux0-ai_flux0/packages/cli/src/flux0_cli/utils/output.py
|
output.OutputFormatter
|
from typing import Optional, Sequence, Union
from rich.table import Table
from rich.console import Console
import json
from jsonpath_ng import parse
import click
from pydantic import BaseModel
class OutputFormatter:
@staticmethod
def format(data: Union[BaseModel, Sequence[BaseModel]], output_format: str='table', jsonpath_expr: Optional[str]=None) -> str:
"""
Format one or more Pydantic models using the specified output format.
"""
models = [data] if isinstance(data, BaseModel) else data
data_dicts = [model.model_dump() for model in models]
if output_format == 'json':
return json.dumps(data_dicts[0] if isinstance(data, BaseModel) else data_dicts, default=str, indent=2)
elif output_format == 'table':
if not data_dicts:
return ''
console = Console()
table = Table(show_header=True, header_style='bold')
headers = list(data_dicts[0].keys())
for header in headers:
table.add_column(header)
for d in data_dicts:
table.add_row(*(str(d.get(header, '')) for header in headers))
console.print(table)
return ''
elif output_format == 'jsonpath':
if jsonpath_expr is None:
jsonpath_expr = '$'
expr = parse(jsonpath_expr)
matches = [match.value for d in data_dicts for match in expr.find(d)]
if not matches:
return json.dumps(None)
elif len(matches) == 1:
return json.dumps(matches[0], default=str, indent=2)
else:
return json.dumps(matches, default=str, indent=2)
else:
raise click.ClickException(f'Unknown output format: {output_format}')
|
class OutputFormatter:
@staticmethod
def format(data: Union[BaseModel, Sequence[BaseModel]], output_format: str='table', jsonpath_expr: Optional[str]=None) -> str:
'''
Format one or more Pydantic models using the specified output format.
'''
pass
| 3
| 1
| 48
| 6
| 38
| 8
| 12
| 0.2
| 0
| 5
| 0
| 0
| 0
| 0
| 1
| 1
| 50
| 6
| 40
| 16
| 33
| 8
| 26
| 11
| 24
| 12
| 0
| 2
| 12
|
327,890
|
flux0-ai/flux0
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/flux0-ai_flux0/examples/static/static_agent.py
|
static.static_agent.StaticAgentRunner
|
import asyncio
from flux0_stream.types import ChunkEvent
from flux0_core.sessions import EventId, StatusEventData
import uuid
from examples.utils.utils import read_user_input, send_processing_event
import time
from flux0_core.agent_runners.context import Context
from flux0_core.agent_runners.api import AgentRunner, Deps, agent_runner
@agent_runner('static_agent')
class StaticAgentRunner(AgentRunner):
async def run(self, context: Context, deps: Deps) -> bool:
agent = await deps.read_agent(context.agent_id)
if not agent:
raise ValueError(f'Agent with id {context.agent_id} not found')
user_input = await read_user_input(deps, context)
if not user_input:
raise ValueError('No user input found in session events')
deps.logger.info(f'User Input: {user_input} for session {context.session_id} and agent {agent.id}')
await asyncio.sleep(2)
await send_processing_event(deps, 'Thinking...')
await asyncio.sleep(2)
await send_processing_event(deps, 'Reasoning...')
event_id = EventId(uuid.uuid4().hex)
tool_call = ChunkEvent(correlation_id=deps.correlator.correlation_id, seq=0, event_id=event_id, patches=[{'op': 'add', 'path': '/tool_calls/0', 'value': {'type': 'tool_call', 'tool_call_id': 'id1234', 'tool_name': 'get_weather', 'args': {'city': 'tel aviv'}}}], timestamp=time.time(), metadata={'agent_id': agent.id, 'agent_name': agent.name})
await deps.event_emitter.enqueue_event_chunk(tool_call)
await deps.event_emitter.enqueue_status_event(correlation_id=deps.correlator.correlation_id, data=StatusEventData(type='status', status='ready'), event_id=event_id)
await asyncio.sleep(1)
event_id = EventId(uuid.uuid4().hex)
tool_call_result = ChunkEvent(correlation_id=deps.correlator.correlation_id, seq=0, event_id=event_id, patches=[{'op': 'add', 'path': '/tool_call_results/0', 'value': {'tool_call_id': 'id1234', 'tool_name': 'get_weather', 'data': {'result': "It's always sunny in Israel!"}, 'args': {'city': 'tel aviv'}}}], timestamp=time.time(), metadata={'agent_id': agent.id, 'agent_name': agent.name})
await deps.event_emitter.enqueue_event_chunk(tool_call_result)
await deps.event_emitter.enqueue_status_event(correlation_id=deps.correlator.correlation_id, data=StatusEventData(type='status', status='ready'), event_id=event_id)
await deps.event_emitter.enqueue_status_event(correlation_id=deps.correlator.correlation_id, data=StatusEventData(type='status', status='typing'))
event_id = EventId(uuid.uuid4().hex)
cec = ChunkEvent(correlation_id=deps.correlator.correlation_id, seq=0, event_id=event_id, patches=[{'op': 'add', 'path': '/-', 'value': f'Hey there! I received your input: {user_input}'}], metadata={'agent_id': agent.id, 'agent_name': agent.name}, timestamp=time.time())
print(f'Sending response: {cec}')
await deps.event_emitter.enqueue_event_chunk(cec)
await deps.event_emitter.enqueue_status_event(correlation_id=deps.correlator.correlation_id, data=StatusEventData(type='status', status='ready'), event_id=event_id)
await deps.event_emitter.enqueue_status_event(correlation_id=deps.correlator.correlation_id, data=StatusEventData(type='status', status='completed'))
return True
|
@agent_runner('static_agent')
class StaticAgentRunner(AgentRunner):
async def run(self, context: Context, deps: Deps) -> bool:
pass
| 3
| 0
| 133
| 11
| 107
| 16
| 3
| 0.15
| 1
| 6
| 4
| 0
| 1
| 0
| 1
| 22
| 134
| 11
| 108
| 8
| 106
| 16
| 30
| 8
| 28
| 3
| 5
| 1
| 3
|
327,891
|
flux0-ai/flux0
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/flux0-ai_flux0/packages/core/src/flux0_core/storage/types.py
|
types.NanoDBStorageType
|
from enum import Enum
class NanoDBStorageType(Enum):
MEMORY = 'nanodb_memory'
JSON = 'nanodb_json'
|
class NanoDBStorageType(Enum):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 49
| 3
| 0
| 3
| 3
| 2
| 0
| 3
| 3
| 2
| 0
| 4
| 0
| 0
|
327,892
|
flux0-ai/flux0
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/flux0-ai_flux0/packages/core/src/flux0_core/storage/types.py
|
types.StorageType
|
from enum import Enum
class StorageType(Enum):
NANODB = 'nanodb'
MONGODB = 'mongodb'
|
class StorageType(Enum):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 49
| 3
| 0
| 3
| 3
| 2
| 0
| 3
| 3
| 2
| 0
| 4
| 0
| 0
|
327,893
|
flux0-ai/flux0
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/flux0-ai_flux0/scripts/utils.py
|
utils.Package
|
from dataclasses import dataclass
from pathlib import Path
from typing import Callable, List, NoReturn, Optional
import subprocess
import sys
@dataclass(frozen=True)
class Package:
name: str
path: Path
def run_command(self, command: str, env: Optional[dict[str, str]]=None) -> int:
"""
Execute a shell command in the package's directory.
Returns the exit code.
"""
print(f'⏳ Running: {command} (in {self.path})')
result = subprocess.run(command, shell=True, cwd=self.path, capture_output=True, text=True, env=env)
if result.stdout:
print(result.stdout)
if result.returncode != 0:
print(result.stderr, file=sys.stderr)
return result.returncode
|
@dataclass(frozen=True)
class Package:
def run_command(self, command: str, env: Optional[dict[str, str]]=None) -> int:
'''
Execute a shell command in the package's directory.
Returns the exit code.
'''
pass
| 3
| 1
| 19
| 0
| 15
| 4
| 3
| 0.22
| 0
| 3
| 0
| 0
| 1
| 0
| 1
| 1
| 23
| 1
| 18
| 3
| 16
| 4
| 11
| 3
| 9
| 3
| 0
| 1
| 3
|
327,894
|
Euro-BioImaging/EuBI-Bridge
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/Euro-BioImaging_EuBI-Bridge/eubi_bridge/base/data_manager.py
|
Euro-BioImaging_EuBI-Bridge.eubi_bridge.base.data_manager.ArrayManager
|
from dask import array as da
import os
from eubi_bridge.utils.convenience import sensitive_glob, is_zarr_group, is_zarr_array, take_filepaths, autocompute_chunk_shape
from pathlib import Path
from typing import Union
from eubi_bridge.ngff.defaults import unit_map, scale_map, default_axes
import zarr
import zarr
import os
class ArrayManager:
essential_omexml_fields = {'physical_size_x', 'physical_size_x_unit', 'physical_size_y', 'physical_size_y_unit', 'physical_size_z', 'physical_size_z_unit', 'time_increment', 'time_increment_unit', 'size_x', 'size_y', 'size_z', 'size_t', 'size_c'}
def __init__(self, path: Union[str, Path]=None, series: int=None, metadata_reader='bfio', **kwargs):
self.path = path
self.series = series
if series is not None:
assert isinstance(self.series, (int, str)), f'The series parameter must be either an integer or string. Selection of multiple series from the same image is currently not supported.'
if self.series is None:
self.series = 0
self._seriesattrs = ''
else:
self._seriesattrs = self.series
self._meta_reader = metadata_reader
self.omemeta = None
if not path is None:
if is_zarr_group(path):
self.img = NGFFImageMeta(self.path)
else:
self.img = PFFImageMeta(self.path, self.series, self._meta_reader)
self.axes = self.img.get_axes()
self.array = None
self.pyr = self.img.pyr
self._channels = None
self.set_arraydata()
def fill_default_meta(self):
if self.array is None:
raise Exception(f'Array is missing. An array needs to be assigned.')
new_scaledict = {}
new_unitdict = {}
values = list(self.scaledict.values())
if not None in values:
return
for ax, value in self.scaledict.items():
if value is None:
if (ax == 'z' or ax == 'y') and self.scaledict['x'] is not None:
new_scaledict[ax] = self.scaledict['x']
new_unitdict[ax] = self.unitdict['x']
else:
new_scaledict[ax] = scale_map[ax]
new_unitdict[ax] = unit_map[ax]
else:
if ax in self.scaledict.keys():
new_scaledict[ax] = self.scaledict[ax]
if ax in self.unitdict.keys():
new_unitdict[ax] = self.unitdict[ax]
new_units = [new_unitdict[ax] for ax in self.axes if ax in new_unitdict]
new_scales = [new_scaledict[ax] for ax in self.axes if ax in new_scaledict]
self.set_arraydata(self.array, self.axes, new_units, new_scales)
return self
def get_pixel_size_basemap(self, t=1, z=1, y=1, x=1, **kwargs):
return {'pixel_size_t': t, 'pixel_size_z': z, 'pixel_size_y': y, 'pixel_size_x': x}
def get_unit_basemap(self, t='second', z='micrometer', y='micrometer', x='micrometer', **kwargs):
return {'unit_t': t, 'unit_z': z, 'unit_y': y, 'unit_x': x}
def update_meta(self, new_scaledict={}, new_unitdict={}):
scaledict = self.img.get_scaledict()
for key, val in new_scaledict.items():
if key in scaledict.keys() and val is not None:
scaledict[key] = val
if 'c' in scaledict:
scales = [scaledict[ax] for ax in self.axes]
else:
scales = [scaledict[ax] for ax in self.caxes]
unitdict = self.img.get_unitdict()
for key, val in new_unitdict.items():
if key in unitdict.keys() and val is not None:
unitdict[key] = val
if 'c' in unitdict:
units = [expand_units(unitdict[ax]) for ax in self.axes]
else:
units = [expand_units(unitdict[ax]) for ax in self.caxes]
self.set_arraydata(array=self.array, axes=self.axes, units=units, scales=scales)
def _ensure_correct_channels(self):
if self.array is None:
return
if self.channels is None:
return
shapedict = dict(zip(list(self.axes), self.array.shape))
csize = shapedict['c']
channelsize = len(self.channels)
if channelsize > csize:
self._channels = [channel for channel in self.channels if channel['label'] is not None]
def fix_bad_channels(self):
chn = ChannelIterator()
for i, channel in enumerate(self.channels):
if channel['label'] is None:
channel = chn.__next__()
self.channels[i] = channel
def set_arraydata(self, array=None, axes=None, units=None, scales=None, **kwargs):
axes = axes or self.img.get_axes()
units = units or self.img.get_units()
scales = scales or self.img.get_scales()
self.axes = axes
if array is not None:
self.array = array
self.ndim = self.array.ndim
assert len(self.axes) == self.ndim
self.caxes = ''.join([ax for ax in axes if ax != 'c'])
if self.array is not None:
self.chunkdict = dict(zip(list(self.axes), self.array.chunksize))
self.shapedict = dict(zip(list(self.axes), self.array.shape))
if 'c' in self.shapedict:
self._ensure_correct_channels()
if len(units) == len(self.axes):
self.unitdict = dict(zip(list(self.axes), units))
elif len(units) == len(self.caxes):
self.unitdict = dict(zip(list(self.caxes), units))
else:
raise Exception(f'Unit length is invalid.')
if len(scales) == len(self.axes):
self.scaledict = dict(zip(list(self.axes), scales))
elif len(scales) == len(self.caxes):
self.scaledict = dict(zip(list(self.caxes), scales))
self.scaledict['c'] = 1
else:
raise Exception(f'Scale length is invalid')
@property
def scales(self):
if self.scaledict.__len__() < len(self.axes):
return [self.scaledict[ax] for ax in self.caxes]
elif self.scaledict.__len__() == len(self.axes):
return [self.scaledict[ax] for ax in self.axes]
else:
raise ValueError
@property
def units(self):
if self.unitdict.__len__() < len(self.axes):
return [self.unitdict[ax] for ax in self.caxes]
elif self.unitdict.__len__() == len(self.axes):
return [self.unitdict[ax] for ax in self.axes]
else:
raise ValueError
@property
def channels(self):
if self._channels is not None:
return self._channels
return self.img.get_channels()
@property
def chunks(self):
return [self.chunkdict[ax] for ax in self.axes]
def sync_pyramid(self, create_omexml_if_not_exists=False):
"""
Synchronizes the scale and unit metadata with the Pyramid (if a Pyramid exists).
The scale metadata is recalculated for all layers based on the shape of each layer.
Also updates the ome-xml metadata to the pyramid.
:return:
"""
if self.pyr is None:
raise Exception(f'No pyramid exists.')
self.pyr.update_scales(**self.scaledict)
self.pyr.update_units(**self.unitdict)
if self.omemeta is None:
self.omemeta = create_ome_xml(image_shape=self.pyr.base_array.shape, axis_order=self.pyr.axes, pixel_size_x=self.pyr.meta.scaledict.get('0', {}).get('x'), pixel_size_y=self.pyr.meta.scaledict.get('0', {}).get('y'), pixel_size_z=self.pyr.meta.scaledict.get('0', {}).get('z'), pixel_size_t=self.pyr.meta.scaledict.get('0', {}).get('t'), unit_x=self.pyr.meta.unit_dict.get('x'), unit_y=self.pyr.meta.unit_dict.get('y'), unit_z=self.pyr.meta.unit_dict.get('z'), unit_t=self.pyr.meta.unit_dict.get('t'), dtype=str(self.pyr.base_array.dtype), image_name=self.pyr.meta.multiscales.get('name', 'Default image'), channel_names=[channel['label'] for channel in self.channels])
if 'OME' in list(self.pyr.gr.keys()) or create_omexml_if_not_exists:
self.save_omexml(self.pyr.gr.store.root, overwrite=True)
self.pyr.meta.save_changes()
def create_omemeta(self):
self.fill_default_meta()
pixel_size_basemap = self.get_pixel_size_basemap(**self.scaledict)
unit_basemap = self.get_unit_basemap(**self.unitdict)
self.omemeta = create_ome_xml(image_shape=self.array.shape, axis_order=self.axes, **pixel_size_basemap, **unit_basemap, dtype=str(self.array.dtype), channel_names=[channel['label'] for channel in self.channels])
self.pixels = self.omemeta.images[0].pixels
missing_fields = self.essential_omexml_fields - self.pixels.model_fields_set
self.pixels.model_fields_set.update(missing_fields)
self.omemeta.images[0].pixels = self.pixels
return self
def save_omexml(self, base_path: str, overwrite=False):
assert self.omemeta is not None, f'No ome-xml exists.'
gr = zarr.group(base_path)
gr.create_group('OME', overwrite=overwrite)
path = os.path.join(gr.store.root, 'OME/METADATA.ome.xml')
with open(path, 'w', encoding='utf-8') as f:
f.write(self.omemeta.to_xml())
if gr.info._zarr_format == 2:
gr['OME'].attrs['series'] = [self._seriesattrs]
else:
gr['OME'].attrs['ome'] = dict(version='0.5', series=[str(self._seriesattrs)])
def squeeze(self):
singlet_axes = [ax for ax, size in self.shapedict.items() if size == 1]
newaxes = ''.join((ax for ax in self.axes if ax not in singlet_axes))
newunits, newscales = ([], [])
assert self.scaledict.__len__() - self.unitdict.__len__() <= 1
for ax in self.axes:
if ax not in singlet_axes:
if ax in self.unitdict.keys():
newunits.append(self.unitdict[ax])
if ax in self.scaledict.keys():
newscales.append(self.scaledict[ax])
newarray = da.squeeze(self.array)
self.set_arraydata(newarray, newaxes, newunits, newscales)
def transpose(self, newaxes):
newaxes = ''.join((ax for ax in newaxes if ax in self.axes))
new_ids = [self.axes.index(ax) for ax in newaxes]
newunits, newscales = ([], [])
assert self.scaledict.__len__() - self.unitdict.__len__() <= 1
for ax in newaxes:
if ax in self.unitdict:
newunits.append(self.unitdict[ax])
if ax in self.scaledict.keys():
newscales.append(self.scaledict[ax])
newarray = self.array.transpose(*new_ids)
self.set_arraydata(newarray, newaxes, newunits, newscales)
def crop(self, trange=None, crange=None, zrange=None, yrange=None, xrange=None):
slicedict = {'t': slice(*trange) if trange is not None else slice(None), 'c': slice(*crange) if crange is not None else slice(None), 'z': slice(*zrange) if zrange is not None else slice(None), 'y': slice(*yrange) if yrange is not None else slice(None), 'x': slice(*xrange) if xrange is not None else slice(None)}
slicedict = {ax: r for ax, r in slicedict.items() if ax in self.axes}
slices = tuple([slicedict[ax] for ax in self.axes])
array = self.array[slices]
self.set_arraydata(array, self.axes, self.units, self.scales)
def to_cupy(self):
try:
import cupy
except:
raise Exception('Cupy not installed but required for this operation.')
array = self.array.map_blocks(cupy.asarray)
self.set_arraydata(array, self.axes, self.units, self.scales)
def split(self):
pass
def get_autocomputed_chunks(self, dtype=None):
array_shape = self.array.shape
dtype = dtype or self.array.dtype
axes = self.axes
chunk_shape = autocompute_chunk_shape(array_shape=array_shape, axes=axes, dtype=dtype)
return chunk_shape
|
class ArrayManager:
def __init__(self, path: Union[str, Path]=None, series: int=None, metadata_reader='bfio', **kwargs):
pass
def fill_default_meta(self):
pass
def get_pixel_size_basemap(self, t=1, z=1, y=1, x=1, **kwargs):
pass
def get_unit_basemap(self, t='second', z='micrometer', y='micrometer', x='micrometer', **kwargs):
pass
def update_meta(self, new_scaledict={}, new_unitdict={}):
pass
def _ensure_correct_channels(self):
pass
def fix_bad_channels(self):
pass
def set_arraydata(self, array=None, axes=None, units=None, scales=None, **kwargs):
pass
@property
def scales(self):
pass
@property
def units(self):
pass
@property
def channels(self):
pass
@property
def chunks(self):
pass
def sync_pyramid(self, create_omexml_if_not_exists=False):
'''
Synchronizes the scale and unit metadata with the Pyramid (if a Pyramid exists).
The scale metadata is recalculated for all layers based on the shape of each layer.
Also updates the ome-xml metadata to the pyramid.
:return:
'''
pass
def create_omemeta(self):
pass
def save_omexml(self, base_path: str, overwrite=False):
pass
def squeeze(self):
pass
def transpose(self, newaxes):
pass
def crop(self, trange=None, crange=None, zrange=None, yrange=None, xrange=None):
pass
def to_cupy(self):
pass
def split(self):
pass
def get_autocomputed_chunks(self, dtype=None):
pass
| 26
| 1
| 16
| 1
| 14
| 1
| 3
| 0.06
| 0
| 14
| 3
| 0
| 21
| 17
| 21
| 21
| 366
| 41
| 312
| 120
| 246
| 18
| 197
| 76
| 174
| 8
| 0
| 3
| 72
|
327,895
|
Euro-BioImaging/EuBI-Bridge
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/Euro-BioImaging_EuBI-Bridge/eubi_bridge/base/data_manager.py
|
Euro-BioImaging_EuBI-Bridge.eubi_bridge.base.data_manager.BatchManager
|
class BatchManager:
def __init__(self, managers):
self.managers = managers
def _collect_scaledict(self, **kwargs):
"""
Retrieves pixel sizes for image dimensions.
Args:
**kwargs: Pixel sizes for time, channel, z, y, and x dimensions.
Returns:
list: Pixel sizes.
"""
t = kwargs.get('time_scale', None)
c = kwargs.get('channel_scale', None)
y = kwargs.get('y_scale', None)
x = kwargs.get('x_scale', None)
z = kwargs.get('z_scale', None)
fulldict = dict(zip('tczyx', [t, c, z, y, x]))
final = {key: val for key, val in fulldict.items() if val is not None}
return final
def _collect_unitdict(self, **kwargs):
"""
Retrieves unit specifications for image dimensions.
Args:
**kwargs: Unit values for time, channel, z, y, and x dimensions.
Returns:
list: Unit values.
"""
t = kwargs.get('time_unit', None)
c = kwargs.get('channel_unit', None)
y = kwargs.get('y_unit', None)
x = kwargs.get('x_unit', None)
z = kwargs.get('z_unit', None)
fulldict = dict(zip('tczyx', [t, c, z, y, x]))
final = {key: val for key, val in fulldict.items() if val is not None}
return final
def _collect_chunks(self, **kwargs):
"""
Retrieves chunk specifications for image dimensions.
Args:
**kwargs: Chunk sizes for time, channel, z, y, and x dimensions.
Returns:
list: Chunk shape.
"""
t = kwargs.get('time_chunk', None)
c = kwargs.get('channel_chunk', None)
y = kwargs.get('y_chunk', None)
x = kwargs.get('x_chunk', None)
z = kwargs.get('z_chunk', None)
fulldict = dict(zip('tczyx', [t, c, z, y, x]))
final = {key: val for key, val in fulldict.items() if val is not None}
return final
def fill_default_meta(self):
for key, manager in self.managers.items():
manager.fill_default_meta()
def squeeze(self):
for key, manager in self.managers.items():
manager.squeeze()
def to_cupy(self):
for key, manager in self.managers.items():
manager.to_cupy()
def crop(self, time_range=None, channel_range=None, z_range=None, y_range=None, x_range=None, **kwargs):
if any([item is not None for item in (time_range, channel_range, z_range, y_range, x_range)]):
for key, manager in self.managers.items():
manager.crop(time_range, channel_range, z_range, y_range, x_range)
def transpose(self, newaxes):
for key, manager in self.managers.items():
manager.transpose(newaxes)
def sync_pyramids(self):
pass
|
class BatchManager:
def __init__(self, managers):
pass
def _collect_scaledict(self, **kwargs):
'''
Retrieves pixel sizes for image dimensions.
Args:
**kwargs: Pixel sizes for time, channel, z, y, and x dimensions.
Returns:
list: Pixel sizes.
'''
pass
def _collect_unitdict(self, **kwargs):
'''
Retrieves unit specifications for image dimensions.
Args:
**kwargs: Unit values for time, channel, z, y, and x dimensions.
Returns:
list: Unit values.
'''
pass
def _collect_chunks(self, **kwargs):
'''
Retrieves chunk specifications for image dimensions.
Args:
**kwargs: Chunk sizes for time, channel, z, y, and x dimensions.
Returns:
list: Chunk shape.
'''
pass
def fill_default_meta(self):
pass
def squeeze(self):
pass
def to_cupy(self):
pass
def crop(self, time_range=None, channel_range=None, z_range=None, y_range=None, x_range=None, **kwargs):
pass
def transpose(self, newaxes):
pass
def sync_pyramids(self):
pass
| 11
| 3
| 9
| 1
| 6
| 2
| 2
| 0.37
| 0
| 2
| 0
| 0
| 10
| 1
| 10
| 10
| 98
| 15
| 62
| 47
| 42
| 23
| 48
| 38
| 37
| 3
| 0
| 2
| 16
|
327,896
|
Euro-BioImaging/EuBI-Bridge
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/Euro-BioImaging_EuBI-Bridge/eubi_bridge/base/data_manager.py
|
Euro-BioImaging_EuBI-Bridge.eubi_bridge.base.data_manager.ChannelIterator
|
class ChannelIterator:
"""
Iterator for generating and managing channel colors.
This class provides a way to iterate through a sequence of channel colors,
generating new colors in a visually distinct sequence when needed.
"""
DEFAULT_COLORS = ['FF0000', '00FF00', '0000FF', 'FF00FF', '00FFFF', 'FFFF00', 'FFFFFF']
def __init__(self, num_channels=0):
"""
Initialize the channel iterator.
Args:
num_channels: Initial number of channels to pre-generate
"""
self._channels = []
self._current_index = 0
self._generate_channels(num_channels)
def _generate_channels(self, count):
"""Generate the specified number of unique channel colors."""
for i in range(len(self._channels), count):
if i < len(self.DEFAULT_COLORS):
color = self.DEFAULT_COLORS[i]
else:
hue = int(i * 137.5 % 360)
r, g, b = self._hsv_to_rgb(hue / 360.0, 1.0, 1.0)
color = f'{int(r * 255):02X}{int(g * 255):02X}{int(b * 255):02X}'
self._channels.append({'label': f'Channel {i + 1}', 'color': color})
@staticmethod
def _hsv_to_rgb(h, s, v):
"""Convert HSV color space to RGB color space."""
h = h * 6.0
i = int(h)
f = h - i
p = v * (1.0 - s)
q = v * (1.0 - s * f)
t = v * (1.0 - s * (1.0 - f))
if i == 0:
return (v, t, p)
elif i == 1:
return (q, v, p)
elif i == 2:
return (p, v, t)
elif i == 3:
return (p, q, v)
elif i == 4:
return (t, p, v)
else:
return (v, p, q)
def __iter__(self):
"""Return the iterator object itself."""
self._current_index = 0
return self
def __next__(self):
"""Return the next channel color."""
if self._current_index >= len(self._channels):
self._generate_channels(len(self._channels) + 1)
if self._current_index < len(self._channels):
result = self._channels[self._current_index]
self._current_index += 1
return result
raise StopIteration
def get_channel(self, index):
"""
Get channel color by index.
Args:
index: Index of the channel to retrieve
Returns:
dict: Channel information with 'label' and 'color' keys
"""
if index >= len(self._channels):
self._generate_channels(index + 1)
return self._channels[index]
def __len__(self):
"""Return the number of generated channels."""
return len(self._channels)
|
class ChannelIterator:
'''
Iterator for generating and managing channel colors.
This class provides a way to iterate through a sequence of channel colors,
generating new colors in a visually distinct sequence when needed.
'''
def __init__(self, num_channels=0):
'''
Initialize the channel iterator.
Args:
num_channels: Initial number of channels to pre-generate
'''
pass
def _generate_channels(self, count):
'''Generate the specified number of unique channel colors.'''
pass
@staticmethod
def _hsv_to_rgb(h, s, v):
'''Convert HSV color space to RGB color space.'''
pass
def __iter__(self):
'''Return the iterator object itself.'''
pass
def __next__(self):
'''Return the next channel color.'''
pass
def get_channel(self, index):
'''
Get channel color by index.
Args:
index: Index of the channel to retrieve
Returns:
dict: Channel information with 'label' and 'color' keys
'''
pass
def __len__(self):
'''Return the number of generated channels.'''
pass
| 9
| 8
| 10
| 1
| 7
| 3
| 2
| 0.52
| 0
| 3
| 0
| 0
| 6
| 2
| 7
| 7
| 96
| 13
| 60
| 22
| 51
| 31
| 45
| 21
| 37
| 6
| 0
| 2
| 17
|
327,897
|
Euro-BioImaging/EuBI-Bridge
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/Euro-BioImaging_EuBI-Bridge/eubi_bridge/base/data_manager.py
|
Euro-BioImaging_EuBI-Bridge.eubi_bridge.base.data_manager.NGFFImageMeta
|
from eubi_bridge.utils.convenience import sensitive_glob, is_zarr_group, is_zarr_array, take_filepaths, autocompute_chunk_shape
from eubi_bridge.ngff.multiscales import Pyramid
class NGFFImageMeta:
def __init__(self, path):
if is_zarr_group(path):
self.pyr = Pyramid().from_ngff(path)
meta = self.pyr.meta
self._meta = meta
self._base_path = self._meta.resolution_paths[0]
else:
raise Exception(f'The given path does not contain an NGFF group.')
def get_axes(self):
return self._meta.axis_order
def get_scales(self):
return self._meta.get_scale(self._base_path)
def get_scaledict(self):
return self._meta.get_scaledict(self._base_path)
def get_units(self):
return self._meta.unit_list
def get_unitdict(self):
return self._meta.unit_dict
def get_channels(self):
if not hasattr(self._meta, 'channels'):
return None
return self._meta.channels
|
class NGFFImageMeta:
def __init__(self, path):
pass
def get_axes(self):
pass
def get_scales(self):
pass
def get_scaledict(self):
pass
def get_units(self):
pass
def get_unitdict(self):
pass
def get_channels(self):
pass
| 8
| 0
| 3
| 0
| 3
| 0
| 1
| 0
| 0
| 2
| 1
| 0
| 7
| 3
| 7
| 7
| 31
| 6
| 25
| 14
| 15
| 0
| 22
| 12
| 14
| 2
| 0
| 1
| 9
|
327,898
|
Euro-BioImaging/EuBI-Bridge
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/Euro-BioImaging_EuBI-Bridge/eubi_bridge/base/data_manager.py
|
Euro-BioImaging_EuBI-Bridge.eubi_bridge.base.data_manager.PFFImageMeta
|
from eubi_bridge.base.readers import read_metadata_via_bioio_bioformats, read_metadata_via_extension, read_metadata_via_bfio
from ome_types.model import OME, Image, Pixels, Channel
class PFFImageMeta:
essential_omexml_fields = {'physical_size_x', 'physical_size_x_unit', 'physical_size_y', 'physical_size_y_unit', 'physical_size_z', 'physical_size_z_unit', 'time_increment', 'time_increment_unit', 'size_x', 'size_y', 'size_z', 'size_t', 'size_c'}
def __init__(self, path, series, meta_reader='bioio'):
if path.endswith('ome') or path.endswith('xml'):
from ome_types import OME
omemeta = OME().from_xml(path)
elif meta_reader == 'bioio':
try:
omemeta = read_metadata_via_extension(path, series=series)
except:
omemeta = read_metadata_via_bioio_bioformats(path, series=series)
elif meta_reader == 'bfio':
try:
omemeta = read_metadata_via_bfio(path)
except:
omemeta = read_metadata_via_bioio_bioformats(path, series=series)
else:
raise ValueError(f'Unsupported metadata reader: {meta_reader}')
if series is None:
series = 0
images = [omemeta.images[series]]
omemeta.images = images
self.omemeta = omemeta
self.pixels = self.omemeta.images[0].pixels
missing_fields = self.essential_omexml_fields - self.pixels.model_fields_set
self.pixels.model_fields_set.update(missing_fields)
self.omemeta.images[0].pixels = self.pixels
self.pyr = None
def get_axes(self):
return 'tczyx'
def get_scaledict(self):
return {'t': self.pixels.time_increment, 'z': self.pixels.physical_size_z, 'y': self.pixels.physical_size_y, 'x': self.pixels.physical_size_x}
def get_scales(self):
scaledict = self.get_scaledict()
caxes = [ax for ax in self.get_axes() if ax != 'c']
return [scaledict[ax] for ax in caxes]
def get_unitdict(self):
return {'t': self.pixels.time_increment_unit.name.lower(), 'z': self.pixels.physical_size_z_unit.name.lower(), 'y': self.pixels.physical_size_y_unit.name.lower(), 'x': self.pixels.physical_size_x_unit.name.lower()}
def get_units(self):
unitdict = self.get_unitdict()
caxes = [ax for ax in self.get_axes() if ax != 'c']
return [unitdict[ax] for ax in caxes]
def get_channels(self):
if not hasattr(self.pixels, 'channels'):
return None
if len(self.pixels.channels) == 0:
return None
if len(self.pixels.channels) < self.pixels.size_c:
chn = ChannelIterator(num_channels=self.pixels.size_c)
channels = chn._channels
elif len(self.pixels.channels) == self.pixels.size_c:
channels = []
for _, channel in enumerate(self.pixels.channels):
color = channel.color.as_hex().upper()
color = expand_hex_shorthand(color)
name = channel.name
channels.append(dict(label=name, color=color))
return channels
|
class PFFImageMeta:
def __init__(self, path, series, meta_reader='bioio'):
pass
def get_axes(self):
pass
def get_scaledict(self):
pass
def get_scales(self):
pass
def get_unitdict(self):
pass
def get_units(self):
pass
def get_channels(self):
pass
| 8
| 0
| 11
| 0
| 11
| 1
| 3
| 0.07
| 0
| 4
| 1
| 0
| 7
| 3
| 7
| 7
| 93
| 6
| 82
| 29
| 69
| 6
| 55
| 25
| 46
| 7
| 0
| 3
| 18
|
327,899
|
Euro-BioImaging/EuBI-Bridge
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/Euro-BioImaging_EuBI-Bridge/eubi_bridge/base/scale.py
|
Euro-BioImaging_EuBI-Bridge.eubi_bridge.base.scale.DownscaleManager
|
import dataclasses
import numpy as np
@dataclasses.dataclass
class DownscaleManager:
base_shape: (list, tuple)
scale_factor: (list, tuple)
n_layers: (list, tuple)
scale: (list, tuple) = None
def __post_init__(self):
ndim = len(self.base_shape)
assert len(self.scale_factor) == ndim
@property
def _scale_ids(self):
return np.arange(self.n_layers).reshape(-1, 1)
@property
def _theoretical_scale_factors(self):
return np.power(self.scale_factor, self._scale_ids)
@property
def output_shapes(self):
shapes = np.floor_divide(self.base_shape, self._theoretical_scale_factors)
shapes[shapes == 0] = 1
return shapes
@property
def scale_factors(self):
return np.true_divide(self.output_shapes[0], self.output_shapes)
@property
def scales(self):
return np.multiply(self.scale, self.scale_factors)
|
@dataclasses.dataclass
class DownscaleManager:
def __post_init__(self):
pass
@property
def _scale_ids(self):
pass
@property
def _theoretical_scale_factors(self):
pass
@property
def output_shapes(self):
pass
@property
def scale_factors(self):
pass
@property
def scales(self):
pass
| 13
| 0
| 3
| 0
| 3
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 6
| 0
| 6
| 6
| 31
| 6
| 25
| 15
| 13
| 0
| 20
| 10
| 13
| 1
| 0
| 0
| 6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.