id
int64 0
328k
| repository_name
stringlengths 7
58
| file_path
stringlengths 9
302
| class_name
stringlengths 5
256
| human_written_code
stringlengths 16
2.16M
| class_skeleton
stringlengths 18
1.49M
⌀ | total_program_units
int64 1
1.76k
| total_doc_str
int64 0
771
| AvgCountLine
float64 0
7.89k
| AvgCountLineBlank
float64 0
297
| AvgCountLineCode
float64 0
7.89k
| AvgCountLineComment
float64 0
7.89k
| AvgCyclomatic
float64 0
130
| CommentToCodeRatio
float64 0
168
| CountClassBase
float64 0
40
| CountClassCoupled
float64 0
583
| CountClassCoupledModified
float64 0
575
| CountClassDerived
float64 0
5.35k
| CountDeclInstanceMethod
float64 0
529
| CountDeclInstanceVariable
float64 0
296
| CountDeclMethod
float64 0
599
| CountDeclMethodAll
float64 0
1.12k
| CountLine
float64 1
40.4k
| CountLineBlank
float64 0
8.16k
| CountLineCode
float64 1
25.7k
| CountLineCodeDecl
float64 1
8.15k
| CountLineCodeExe
float64 0
24.2k
| CountLineComment
float64 0
16.5k
| CountStmt
float64 1
9.71k
| CountStmtDecl
float64 1
8.15k
| CountStmtExe
float64 0
9.69k
| MaxCyclomatic
float64 0
759
| MaxInheritanceTree
float64 0
16
| MaxNesting
float64 0
34
| SumCyclomatic
float64 0
2.9k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
328,100
|
gazorby/strawchemy
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/strawberry/repository/_sync.py
|
strawchemy.strawberry.repository._sync.StrawchemySyncRepository
|
from strawchemy.sqlalchemy.repository import SQLAlchemyGraphQLSyncRepository
from ._base import GraphQLResult, StrawchemyRepository
from typing import TYPE_CHECKING, Any, Optional, TypeVar
from dataclasses import dataclass
from strawchemy.strawberry._utils import default_session_getter, dto_model_from_type, strawberry_contained_user_type
@dataclass
class StrawchemySyncRepository(StrawchemyRepository[T]):
"""Asynchronous repository implementation for GraphQL data access.
This class provides asynchronous methods for querying and mutating data
through GraphQL, using SQLAlchemy's asynchronous API under the hood.
Args:
type: The Strawberry GraphQL type this repository works with
info: The GraphQL resolver info object
session_getter: Callable to get an async database session
session: Optional explicit async database session to use
filter_statement: Optional base SQLAlchemy select statement to apply to all queries
execution_options: Optional execution options for SQLAlchemy
deterministic_ordering: Whether to ensure deterministic ordering of results
"""
type: type[T]
info: Info[Any, Any]
session_getter: SyncSessionGetter = default_session_getter
session: Optional[AnySyncSession] = None
filter_statement: Optional[Select[tuple[Any]]] = None
execution_options: Optional[dict[str, Any]] = None
deterministic_ordering: bool = False
def graphql_repository(self) -> SQLAlchemyGraphQLSyncRepository[Any]:
"""Create and configure the underlying async SQLAlchemy GraphQL repository.
Returns:
A configured SQLAlchemyGraphQLAsyncRepository instance
"""
return SQLAlchemyGraphQLSyncRepository(model=dto_model_from_type(strawberry_contained_user_type(self.type)), session=self.session or self.session_getter(self.info), statement=self.filter_statement, execution_options=self.execution_options, deterministic_ordering=self.deterministic_ordering)
def get_one_or_none(self, filter_input: Optional[BooleanFilterDTO]=None, order_by: Optional[list[OrderByDTO]]=None, distinct_on: Optional[list[EnumDTO]]=None, limit: Optional[int]=None, offset: Optional[int]=None) -> GraphQLResult[Any, T]:
"""Asynchronously get at most one result matching the criteria or None.
Args:
filter_input: Optional filter conditions
order_by: Optional ordering criteria
distinct_on: Optional fields to apply DISTINCT on
limit: Optional maximum number of results to return
offset: Optional number of results to skip
Returns:
A GraphQLResult containing the result or None
"""
query_results = self.graphql_repository().get_one(selection=self._tree, dto_filter=filter_input or None, order_by=list(order_by or []), distinct_on=distinct_on, limit=limit, offset=offset, query_hooks=self._query_hooks)
return GraphQLResult(query_results, self._tree)
def get_one(self, filter_input: Optional[BooleanFilterDTO]=None, order_by: Optional[list[OrderByDTO]]=None, distinct_on: Optional[list[EnumDTO]]=None, limit: Optional[int]=None, offset: Optional[int]=None) -> GraphQLResult[Any, T]:
"""Asynchronously get exactly one result matching the criteria.
Args:
filter_input: Optional filter conditions
order_by: Optional ordering criteria
distinct_on: Optional fields to apply DISTINCT on
limit: Optional maximum number of results to return
offset: Optional number of results to skip
Returns:
A GraphQLResult containing the single result
Raises:
NoResultFound: If no results are found
MultipleResultsFound: If multiple results are found
"""
query_results = self.graphql_repository().get_one(selection=self._tree, dto_filter=filter_input or None, order_by=list(order_by or []), distinct_on=distinct_on, limit=limit, offset=offset, query_hooks=self._query_hooks)
return GraphQLResult(query_results, self._tree)
def get_by_id(self, **kwargs: Any) -> GraphQLResult[Any, T]:
"""Asynchronously get an entity by its primary key.
Args:
**kwargs: Primary key field names and values
Returns:
A GraphQLResult containing the found entity
Raises:
NoResultFound: If no entity with the given ID exists
"""
query_results = self.graphql_repository().get_by_id(selection=self._tree, query_hooks=self._query_hooks, **kwargs)
return GraphQLResult(query_results, self._tree)
def list(self, filter_input: Optional[BooleanFilterDTO]=None, order_by: Optional[list[OrderByDTO]]=None, distinct_on: Optional[list[EnumDTO]]=None, limit: Optional[int]=None, offset: Optional[int]=None) -> GraphQLResult[Any, T]:
"""Asynchronously get a list of entities matching the criteria.
Args:
filter_input: Optional filter conditions
order_by: Optional ordering criteria
distinct_on: Optional fields to apply DISTINCT on
limit: Optional maximum number of results to return
offset: Optional number of results to skip
Returns:
A GraphQLResult containing the list of matching entities
"""
query_results = self.graphql_repository().list(selection=self._tree, dto_filter=filter_input or None, order_by=list(order_by or []), distinct_on=distinct_on, limit=limit, offset=offset, query_hooks=self._query_hooks)
return GraphQLResult(query_results, self._tree)
def create(self, data: Input[InputModel]) -> GraphQLResult[InputModel, T]:
"""Asynchronously create a new entity.
Args:
data: The input data for the new entity
Returns:
A GraphQLResult containing the created entity
"""
query_results = self.graphql_repository().create(data, self._tree)
return GraphQLResult(query_results, self._tree)
def upsert(self, data: Input[InputModel], filter_input: Optional[BooleanFilterDTO]=None, update_fields: Optional[list[EnumDTO]]=None, conflict_fields: Optional[EnumDTO]=None) -> GraphQLResult[InputModel, T]:
"""Asynchronously insert or update an entity.
Args:
data: The input data for the entity
filter_input: Optional filter to find existing entity
update_fields: Optional fields to update if entity exists
conflict_fields: Optional fields to detect conflicts on
Returns:
A GraphQLResult containing the upserted entity
"""
query_results = self.graphql_repository().upsert(data, self._tree, update_fields, conflict_fields, filter_input)
return GraphQLResult(query_results, self._tree)
def update_by_id(self, data: Input[InputModel]) -> GraphQLResult[InputModel, T]:
"""Asynchronously update an entity by its ID.
Args:
data: The input data containing the ID and fields to update
Returns:
A GraphQLResult containing the updated entity
Raises:
NoResultFound: If no entity with the given ID exists
"""
query_results = self.graphql_repository().update_by_ids(data, self._tree)
return GraphQLResult(query_results, self._tree)
def update_by_filter(self, data: Input[InputModel], filter_input: BooleanFilterDTO) -> GraphQLResult[InputModel, T]:
"""Asynchronously update entities matching the given filter.
Args:
data: The input data containing fields to update
filter_input: The filter criteria to select entities to update
Returns:
A GraphQLResult containing the updated entities
"""
query_results = self.graphql_repository().update_by_filter(data, filter_input, self._tree)
return GraphQLResult(query_results, self._tree)
def delete(self, filter_input: Optional[BooleanFilterDTO]) -> GraphQLResult[Any, T]:
"""Asynchronously delete entities matching the given filter.
Args:
filter_input: The filter criteria to select entities to delete
Returns:
A GraphQLResult containing the deleted entities
"""
query_results = self.graphql_repository().delete(self._tree, filter_input or None)
return GraphQLResult(query_results, self._tree)
|
@dataclass
class StrawchemySyncRepository(StrawchemyRepository[T]):
'''Asynchronous repository implementation for GraphQL data access.
This class provides asynchronous methods for querying and mutating data
through GraphQL, using SQLAlchemy's asynchronous API under the hood.
Args:
type: The Strawberry GraphQL type this repository works with
info: The GraphQL resolver info object
session_getter: Callable to get an async database session
session: Optional explicit async database session to use
filter_statement: Optional base SQLAlchemy select statement to apply to all queries
execution_options: Optional execution options for SQLAlchemy
deterministic_ordering: Whether to ensure deterministic ordering of results
'''
def graphql_repository(self) -> SQLAlchemyGraphQLSyncRepository[Any]:
'''Create and configure the underlying async SQLAlchemy GraphQL repository.
Returns:
A configured SQLAlchemyGraphQLAsyncRepository instance
'''
pass
def get_one_or_none(self, filter_input: Optional[BooleanFilterDTO]=None, order_by: Optional[list[OrderByDTO]]=None, distinct_on: Optional[list[EnumDTO]]=None, limit: Optional[int]=None, offset: Optional[int]=None) -> GraphQLResult[Any, T]:
'''Asynchronously get at most one result matching the criteria or None.
Args:
filter_input: Optional filter conditions
order_by: Optional ordering criteria
distinct_on: Optional fields to apply DISTINCT on
limit: Optional maximum number of results to return
offset: Optional number of results to skip
Returns:
A GraphQLResult containing the result or None
'''
pass
def get_one_or_none(self, filter_input: Optional[BooleanFilterDTO]=None, order_by: Optional[list[OrderByDTO]]=None, distinct_on: Optional[list[EnumDTO]]=None, limit: Optional[int]=None, offset: Optional[int]=None) -> GraphQLResult[Any, T]:
'''Asynchronously get exactly one result matching the criteria.
Args:
filter_input: Optional filter conditions
order_by: Optional ordering criteria
distinct_on: Optional fields to apply DISTINCT on
limit: Optional maximum number of results to return
offset: Optional number of results to skip
Returns:
A GraphQLResult containing the single result
Raises:
NoResultFound: If no results are found
MultipleResultsFound: If multiple results are found
'''
pass
def get_by_id(self, **kwargs: Any) -> GraphQLResult[Any, T]:
'''Asynchronously get an entity by its primary key.
Args:
**kwargs: Primary key field names and values
Returns:
A GraphQLResult containing the found entity
Raises:
NoResultFound: If no entity with the given ID exists
'''
pass
def list(self, filter_input: Optional[BooleanFilterDTO]=None, order_by: Optional[list[OrderByDTO]]=None, distinct_on: Optional[list[EnumDTO]]=None, limit: Optional[int]=None, offset: Optional[int]=None) -> GraphQLResult[Any, T]:
'''Asynchronously get a list of entities matching the criteria.
Args:
filter_input: Optional filter conditions
order_by: Optional ordering criteria
distinct_on: Optional fields to apply DISTINCT on
limit: Optional maximum number of results to return
offset: Optional number of results to skip
Returns:
A GraphQLResult containing the list of matching entities
'''
pass
def create(self, data: Input[InputModel]) -> GraphQLResult[InputModel, T]:
'''Asynchronously create a new entity.
Args:
data: The input data for the new entity
Returns:
A GraphQLResult containing the created entity
'''
pass
def upsert(self, data: Input[InputModel], filter_input: Optional[BooleanFilterDTO]=None, update_fields: Optional[list[EnumDTO]]=None, conflict_fields: Optional[EnumDTO]=None) -> GraphQLResult[InputModel, T]:
'''Asynchronously insert or update an entity.
Args:
data: The input data for the entity
filter_input: Optional filter to find existing entity
update_fields: Optional fields to update if entity exists
conflict_fields: Optional fields to detect conflicts on
Returns:
A GraphQLResult containing the upserted entity
'''
pass
def update_by_id(self, data: Input[InputModel]) -> GraphQLResult[InputModel, T]:
'''Asynchronously update an entity by its ID.
Args:
data: The input data containing the ID and fields to update
Returns:
A GraphQLResult containing the updated entity
Raises:
NoResultFound: If no entity with the given ID exists
'''
pass
def update_by_filter(self, data: Input[InputModel], filter_input: BooleanFilterDTO) -> GraphQLResult[InputModel, T]:
'''Asynchronously update entities matching the given filter.
Args:
data: The input data containing fields to update
filter_input: The filter criteria to select entities to update
Returns:
A GraphQLResult containing the updated entities
'''
pass
def delete(self, filter_input: Optional[BooleanFilterDTO]) -> GraphQLResult[Any, T]:
'''Asynchronously delete entities matching the given filter.
Args:
filter_input: The filter criteria to select entities to delete
Returns:
A GraphQLResult containing the deleted entities
'''
pass
| 12
| 11
| 19
| 2
| 9
| 8
| 1
| 0.98
| 1
| 8
| 5
| 0
| 10
| 2
| 10
| 17
| 226
| 36
| 96
| 54
| 58
| 94
| 37
| 25
| 26
| 1
| 2
| 0
| 10
|
328,101
|
gazorby/strawchemy
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/types.py
|
strawchemy.types.DefaultOffsetPagination
|
from dataclasses import dataclass
@dataclass(eq=True, frozen=True)
class DefaultOffsetPagination:
limit: int = 100
offset: int = 0
|
@dataclass(eq=True, frozen=True)
class DefaultOffsetPagination:
pass
| 2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3
| 0
| 3
| 3
| 2
| 0
| 3
| 3
| 2
| 0
| 0
| 0
| 0
|
328,102
|
gazorby/strawchemy
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/typing.py
|
strawchemy.typing.DataclassProtocol
|
from typing import TYPE_CHECKING, Any, ClassVar, Literal, Protocol, Union
class DataclassProtocol(Protocol):
__dataclass_fields__: ClassVar[dict[str, Any]]
|
class DataclassProtocol(Protocol):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 24
| 2
| 0
| 2
| 1
| 1
| 0
| 2
| 1
| 1
| 0
| 5
| 0
| 0
|
328,103
|
gazorby/strawchemy
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/validation/base.py
|
strawchemy.validation.base.InputValidationError
|
from typing import TYPE_CHECKING, Any, Generic, Protocol, TypeVar
class InputValidationError(Exception):
"""Exception raised when input validation fails.
This exception wraps the original validation error and provides a method to convert
it to a GraphQL-compatible error type.
Attributes:
validation: The validation protocol instance that failed
exception: The original exception that was raised during validation
"""
def __init__(self, validation: ValidationProtocol[Any], exception: Exception) -> None:
"""Initialize with the validation instance and original exception.
Args:
validation: The validation protocol instance that failed
exception: The original exception that was raised
"""
self.validation = validation
self.exception = exception
def graphql_type(self) -> ValidationErrorType:
"""Convert the validation error to a GraphQL-compatible error type.
Returns:
A GraphQL-compatible error type that can be returned in a response
"""
return self.validation.to_error(self.exception)
|
class InputValidationError(Exception):
'''Exception raised when input validation fails.
This exception wraps the original validation error and provides a method to convert
it to a GraphQL-compatible error type.
Attributes:
validation: The validation protocol instance that failed
exception: The original exception that was raised during validation
'''
def __init__(self, validation: ValidationProtocol[Any], exception: Exception) -> None:
'''Initialize with the validation instance and original exception.
Args:
validation: The validation protocol instance that failed
exception: The original exception that was raised
'''
pass
def graphql_type(self) -> ValidationErrorType:
'''Convert the validation error to a GraphQL-compatible error type.
Returns:
A GraphQL-compatible error type that can be returned in a response
'''
pass
| 3
| 3
| 8
| 1
| 3
| 5
| 1
| 2.67
| 1
| 3
| 2
| 0
| 2
| 2
| 2
| 12
| 28
| 6
| 6
| 5
| 3
| 16
| 6
| 5
| 3
| 1
| 3
| 0
| 2
|
328,104
|
gazorby/strawchemy
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/validation/base.py
|
strawchemy.validation.base.ValidationProtocol
|
from typing import TYPE_CHECKING, Any, Generic, Protocol, TypeVar
class ValidationProtocol(Protocol, Generic[T]):
"""Protocol defining the interface for validation classes.
This protocol specifies the required methods that validation classes must implement
to be compatible with Strawchemy's validation system.
"""
def validate(self, **kwargs: Any) -> MappedDTO[T]:
"""Validate the input data and return a mapped DTO if successful.
Args:
**kwargs: The input data to validate
Returns:
A mapped DTO containing the validated data
Raises:
Exception: If validation fails
"""
raise NotImplementedError
def to_error(self, exception: Any) -> ValidationErrorType:
"""Convert a validation exception to a GraphQL-compatible error type.
Args:
exception: The exception to convert
Returns:
A GraphQL-compatible error type
"""
raise NotImplementedError
|
class ValidationProtocol(Protocol, Generic[T]):
'''Protocol defining the interface for validation classes.
This protocol specifies the required methods that validation classes must implement
to be compatible with Strawchemy's validation system.
'''
def validate(self, **kwargs: Any) -> MappedDTO[T]:
'''Validate the input data and return a mapped DTO if successful.
Args:
**kwargs: The input data to validate
Returns:
A mapped DTO containing the validated data
Raises:
Exception: If validation fails
'''
pass
def to_error(self, exception: Any) -> ValidationErrorType:
'''Convert a validation exception to a GraphQL-compatible error type.
Args:
exception: The exception to convert
Returns:
A GraphQL-compatible error type
'''
pass
| 3
| 3
| 12
| 3
| 2
| 7
| 1
| 3.6
| 2
| 4
| 2
| 1
| 2
| 0
| 2
| 26
| 31
| 8
| 5
| 3
| 2
| 18
| 5
| 3
| 2
| 1
| 5
| 0
| 2
|
328,105
|
gazorby/strawchemy
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/validation/pydantic.py
|
strawchemy.validation.pydantic.MappedPydanticGraphQLDTO
|
from strawchemy.dto.base import ModelT
from strawchemy.dto.backend.pydantic import MappedPydanticDTO, PydanticDTOBackend
from strawchemy.strawberry.dto import StrawchemyDTOAttributes
from typing import TYPE_CHECKING, Any, ClassVar, Optional
class MappedPydanticGraphQLDTO(StrawchemyDTOAttributes, MappedPydanticDTO[ModelT]):
__strawchemy_filter__: ClassVar[Optional[type[Any]]] = None
__strawchemy_order_by__: ClassVar[Optional[type[Any]]] = None
|
class MappedPydanticGraphQLDTO(StrawchemyDTOAttributes, MappedPydanticDTO[ModelT]):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2
| 0
| 0
| 0
| 0
| 0
| 0
| 85
| 3
| 0
| 3
| 3
| 2
| 0
| 3
| 3
| 2
| 0
| 7
| 0
| 0
|
328,106
|
gazorby/strawchemy
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/validation/pydantic.py
|
strawchemy.validation.pydantic.PydanticMapper
|
from functools import partial
from strawchemy.dto.backend.pydantic import MappedPydanticDTO, PydanticDTOBackend
class PydanticMapper:
"""Provides methods to generate Pydantic models for input validation.
This class leverages a `StrawchemyInputValidationFactory` to create
Pydantic models tailored for different input scenarios such as creation,
update by primary key, and update by filter.
"""
def __init__(self, strawchemy: Strawchemy) -> None:
"""Initializes the PydanticMapper.
Args:
strawchemy: An instance of the Strawchemy class.
"""
pydantic_backend = PydanticDTOBackend(MappedPydanticGraphQLDTO)
self._strawchemy: Strawchemy = strawchemy
'The Strawchemy instance used for schema introspection.'
self._validation_factory: StrawchemyInputValidationFactory = StrawchemyInputValidationFactory(self._strawchemy, pydantic_backend)
'Factory for creating input validation Pydantic models.'
self.create = partial(self._validation_factory.input, mode='create_input')
"Generates a Pydantic model for 'create' input validation."
self.pk_update = partial(self._validation_factory.input, mode='update_by_pk_input')
"Generates a Pydantic model for 'update_by_pk' input validation."
self.filter_update = partial(self._validation_factory.input, mode='update_by_filter_input')
"Generates a Pydantic model for 'update_by_filter' input validation."
|
class PydanticMapper:
'''Provides methods to generate Pydantic models for input validation.
This class leverages a `StrawchemyInputValidationFactory` to create
Pydantic models tailored for different input scenarios such as creation,
update by primary key, and update by filter.
'''
def __init__(self, strawchemy: Strawchemy) -> None:
'''Initializes the PydanticMapper.
Args:
strawchemy: An instance of the Strawchemy class.
'''
pass
| 2
| 2
| 20
| 2
| 9
| 9
| 1
| 1.4
| 0
| 4
| 3
| 0
| 1
| 5
| 1
| 1
| 28
| 4
| 10
| 8
| 8
| 14
| 8
| 8
| 6
| 1
| 0
| 0
| 1
|
328,107
|
gazorby/strawchemy
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/validation/pydantic.py
|
strawchemy.validation.pydantic.PydanticValidation
|
from pydantic import ValidationError
from strawchemy.strawberry.mutation.types import LocalizedErrorType, ValidationErrorType
from .base import InputValidationError, T, ValidationProtocol
from strawchemy.utils import snake_to_lower_camel_case
from typing import TYPE_CHECKING, Any, ClassVar, Optional
from dataclasses import dataclass
from typing_extensions import override
from strawchemy.dto.backend.pydantic import MappedPydanticDTO, PydanticDTOBackend
@dataclass
class PydanticValidation(ValidationProtocol[T]):
model: type[MappedPydanticDTO[T]]
to_camel: bool = True
@classmethod
def _to_localized_error(cls, errors: ErrorDetails, to_camel: bool) -> LocalizedErrorType:
return LocalizedErrorType(loc=[snake_to_lower_camel_case(str(loc)) if to_camel else str(loc) for loc in errors['loc']], message=errors['msg'], type=errors['type'])
@override
def validate(self, **kwargs: Any) -> MappedDTO[T]:
try:
return self.model.model_validate(kwargs)
except ValidationError as error:
raise InputValidationError(self, error) from error
@override
def to_error(self, exception: ValidationError) -> ValidationErrorType:
return ValidationErrorType(errors=[self._to_localized_error(err, self.to_camel) for err in exception.errors()])
|
@dataclass
class PydanticValidation(ValidationProtocol[T]):
@classmethod
def _to_localized_error(cls, errors: ErrorDetails, to_camel: bool) -> LocalizedErrorType:
pass
@override
def validate(self, **kwargs: Any) -> MappedDTO[T]:
pass
@override
def to_error(self, exception: ValidationError) -> ValidationErrorType:
pass
| 8
| 0
| 4
| 0
| 4
| 0
| 2
| 0
| 1
| 8
| 4
| 0
| 2
| 0
| 3
| 29
| 22
| 3
| 19
| 9
| 12
| 0
| 12
| 5
| 8
| 2
| 6
| 1
| 5
|
328,108
|
gazorby/strawchemy
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/validation/pydantic.py
|
strawchemy.validation.pydantic.StrawchemyInputValidationFactory
|
from strawchemy.strawberry.factories.types import InputFactory
from typing_extensions import override
from sqlalchemy.orm import DeclarativeBase
from typing import TYPE_CHECKING, Any, ClassVar, Optional
from strawchemy.dto.utils import read_partial
from collections.abc import Callable
from .base import InputValidationError, T, ValidationProtocol
class StrawchemyInputValidationFactory(InputFactory[MappedPydanticGraphQLDTO[Any]]):
@override
def _resolve_type(self, field: DTOFieldDefinition[DeclarativeBase, QueryableAttribute[Any]], dto_config: DTOConfig, node: Node[Relation[DeclarativeBase, MappedPydanticGraphQLDTO[Any]], None], *, mode: GraphQLPurpose, **factory_kwargs: Any) -> Any:
if not field.is_relation:
return self._resolve_basic_type(field, dto_config)
return self._resolve_relation_type(field, dto_config, node, mode=mode, **factory_kwargs)
if TYPE_CHECKING:
@override
def input(self, model: type[DeclarativeT], *, mode: GraphQLPurpose, include: Optional[IncludeFields]=None, exclude: Optional[ExcludeFields]=None, partial: Optional[bool]=None, type_map: Optional[Mapping[Any, Any]]=None, aliases: Optional[Mapping[str, str]]=None, alias_generator: Optional[Callable[[str], str]]=None, name: Optional[str]=None, description: Optional[str]=None, directives: Optional[Sequence[object]]=(), override: bool=False, purpose: Purpose=Purpose.WRITE, **kwargs: Any) -> Callable[[type[Any]], type[MappedPydanticGraphQLDTO[DeclarativeT]]]:
...
@override
def factory(self, model: type[DeclarativeT], dto_config: DTOConfig=read_partial, base: Optional[type[Any]]=None, name: Optional[str]=None, parent_field_def: Optional[DTOFieldDefinition[DeclarativeBase, QueryableAttribute[Any]]]=None, current_node: Optional[Node[Relation[Any, MappedPydanticGraphQLDTO[T]], None]]=None, raise_if_no_fields: bool=False, tags: Optional[set[str]]=None, backend_kwargs: Optional[dict[str, Any]]=None, *, description: Optional[str]=None, mode: GraphQLPurpose, **kwargs: Any) -> type[MappedPydanticGraphQLDTO[DeclarativeT]]:
return super().factory(model, dto_config, base, name, parent_field_def, current_node, raise_if_no_fields, tags, backend_kwargs=backend_kwargs, description=description or f'{mode.capitalize()} validation type', mode=mode, register_type=False, **kwargs)
|
class StrawchemyInputValidationFactory(InputFactory[MappedPydanticGraphQLDTO[Any]]):
@override
def _resolve_type(self, field: DTOFieldDefinition[DeclarativeBase, QueryableAttribute[Any]], dto_config: DTOConfig, node: Node[Relation[DeclarativeBase, MappedPydanticGraphQLDTO[Any]], None], *, mode: GraphQLPurpose, **factory_kwargs: Any) -> Any:
pass
@override
def input(self, model: type[DeclarativeT], *, mode: GraphQLPurpose, include: Optional[IncludeFields]=None, exclude: Optional[ExcludeFields]=None, partial: Optional[bool]=None, type_map: Optional[Mapping[Any, Any]]=None, aliases: Optional[Mapping[str, str]]=None, alias_generator: Optional[Callable[[str], str]]=None, name: Optional[str]=None, description: Optional[str]=None, directives: Optional[Sequence[object]]=(), override:
pass
@override
def factory(self, model: type[DeclarativeT], dto_config: DTOConfig=read_partial, base: Optional[type[Any]]=None, name: Optional[str]=None, parent_field_def: Optional[DTOFieldDefinition[DeclarativeBase, QueryableAttribute[Any]]]=None, current_node: Optional[Node[Relation[Any, MappedPydanticGraphQLDTO[T]], None]]=None, raise_if_no_fields: bool=False, tags: Optional[set[str]]=None, backend_kwargs: Optional[dict[str, Any]]=None, *, description: Optional[str]=None, mode: GraphQLPurpose, **kwargs: Any) -> type[MappedPydanticGraphQLDTO[DeclarativeT]]:
pass
| 7
| 0
| 20
| 0
| 20
| 0
| 1
| 0
| 1
| 16
| 6
| 0
| 3
| 0
| 3
| 55
| 67
| 3
| 64
| 46
| 19
| 0
| 10
| 4
| 6
| 2
| 6
| 1
| 4
|
328,109
|
XAI-liacs/BLADE
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/XAI-liacs_BLADE/iohblade/Problem.py
|
iohblade.Problem.Problem
|
import shutil
from .utils import TimeoutException
from abc import ABC, abstractmethod
import tempfile
import subprocess
from .solution import Solution
import multiprocessing
from pathlib import Path
import os
import numpy as np
class Problem(ABC):
"""
Abstract problem class.
"""
def __init__(self, logger=None, training_instances=None, test_instances=None, name='Problem', eval_timeout=6000, dependencies=None, imports=None):
"""
Initializes a problem instance with logging and dataset references.
Args:
logger (Logger, optional): Logger object for tracking solutions.
training_instances (list, optional): List of training problem instances.
test_instances (list, optional): List of test problem instances.
name (str, optional): Name of the problem.
eval_timeout (int, optional): Number of seconds before a timeout error is raised.
budget (int): number of algorithms are allowed to be generated per run.
dependencies (list, optional): a list of pypi packages to install before evaluation.
imports (string, optional): the python string to manage imports in the evaluation file.
"""
self.logger = logger
self.logger_dir = ''
self.training_instances = training_instances if training_instances else []
self.test_instances = test_instances if test_instances else []
self.task_prompt = 'Write the problem description part here.'
self.example_prompt = 'Write an example code here.'
self.format_prompt = 'Write the format description part here.'
self.name = name
self.eval_timeout = eval_timeout
self.dependencies = BASE_DEPENDENCIES.copy()
if dependencies:
self.dependencies.extend(dependencies)
if imports is None:
self.imports = 'import numpy as np\n'
else:
self.imports = imports
self._env_path: Path | None = None
self._python_bin: Path | None = None
self.func_name = '__call__'
self.init_inputs = ['budget', 'dim']
self.func_inputs = ['func']
self.func_outputs = ['f_opt', 'x_opt']
def __call__(self, solution: Solution, logger=None):
"""
Evaluates a solution on training instances and updates its fitness and feedback.
Args:
solution (Solution): Solution object to be evaluated.
logger (RunLogger, optional): The RunLogger object attached to the problem to keep track of evaluations.
Returns:
Solution: The evaluated solution with updated fitness and scores.
"""
if logger != None:
print('LOGGER is NOT NONE (UNEXPECTED)')
self.logger = logger
if self.logger != None:
if self.logger.budget_exhausted():
solution.set_scores(-np.inf, feedback='Budget is exhausted.', error='Budget is exhausted.')
return solution
try:
self._ensure_env()
parent_conn, child_conn = multiprocessing.Pipe()
process = multiprocessing.Process(target=evaluate_in_subprocess, args=(self, child_conn, solution))
process.start()
process.join(timeout=self.eval_timeout)
if process.is_alive():
raise TimeoutException(f'Evaluation timed out after {self.eval_timeout} seconds.')
if parent_conn.poll():
result = parent_conn.recv()
if isinstance(result, Exception):
raise result
elif isinstance(result, Solution):
solution = result
elif isinstance(result, str):
solution.set_scores(-np.inf, feedback=f'An error occurred: {result}.', error=result)
else:
raise Exception('No Solution object or string returned.')
else:
raise Exception('Evaluation failed without an exception.')
except Exception as e:
solution.set_scores(-np.inf, feedback=f'An exception occurred: {e}.', error=f'An exception occurred: {e}.')
finally:
try:
process.terminate()
process.join()
except Exception:
pass
if self.logger is not None:
self.logger.log_individual(solution)
return solution
def _ensure_env(self):
"""Create the virtual environment for evaluations if it does not exist."""
if self._env_path is not None:
return
import virtualenv
env_dir = tempfile.mkdtemp(prefix='blade_env_')
self._env_path = Path(env_dir)
virtualenv.cli_run([env_dir])
self._python_bin = self._env_path / ('Scripts' if os.name == 'nt' else 'bin') / 'python'
deps = getattr(self, 'dependencies', [])
if deps:
subprocess.run([str(self._python_bin), '-m', 'pip', 'install', *deps], check=True, capture_output=True, text=True)
def cleanup(self):
try:
if self._env_path and self._env_path.exists():
shutil.rmtree(self._env_path)
except Exception:
pass
def set_logger(self, logger):
"""
Sets the logger for this problem.
"""
self.logger = logger
if logger != None:
self.logger_dir = logger.get_log_dir()
def get_prompt(self):
"""
Get the full prompt describing the problem and how to format the answer.
"""
return self.task_prompt + self.example_prompt + self.format_prompt
@abstractmethod
def evaluate(self, solution: Solution):
"""
Evaluates a solution on training instances and updates its fitness and feedback.
Args:
solution (Solution): Solution object to be evaluated.
"""
pass
@abstractmethod
def test(self, solution: Solution):
"""
Performs a complete evaluation on test instances and returns the fitness score.
Args:
solution (Solution): Solution object to be tested.
"""
pass
@abstractmethod
def to_dict(self):
"""
Returns a dictionary representation of the problem including all parameters.
Returns:
dict: Dictionary representation of the problem.
"""
pass
|
class Problem(ABC):
'''
Abstract problem class.
'''
def __init__(self, logger=None, training_instances=None, test_instances=None, name='Problem', eval_timeout=6000, dependencies=None, imports=None):
'''
Initializes a problem instance with logging and dataset references.
Args:
logger (Logger, optional): Logger object for tracking solutions.
training_instances (list, optional): List of training problem instances.
test_instances (list, optional): List of test problem instances.
name (str, optional): Name of the problem.
eval_timeout (int, optional): Number of seconds before a timeout error is raised.
budget (int): number of algorithms are allowed to be generated per run.
dependencies (list, optional): a list of pypi packages to install before evaluation.
imports (string, optional): the python string to manage imports in the evaluation file.
'''
pass
def __call__(self, solution: Solution, logger=None):
'''
Evaluates a solution on training instances and updates its fitness and feedback.
Args:
solution (Solution): Solution object to be evaluated.
logger (RunLogger, optional): The RunLogger object attached to the problem to keep track of evaluations.
Returns:
Solution: The evaluated solution with updated fitness and scores.
'''
pass
def _ensure_env(self):
'''Create the virtual environment for evaluations if it does not exist.'''
pass
def cleanup(self):
pass
def set_logger(self, logger):
'''
Sets the logger for this problem.
'''
pass
def get_prompt(self):
'''
Get the full prompt describing the problem and how to format the answer.
'''
pass
@abstractmethod
def evaluate(self, solution: Solution):
'''
Evaluates a solution on training instances and updates its fitness and feedback.
Args:
solution (Solution): Solution object to be evaluated.
'''
pass
@abstractmethod
def test(self, solution: Solution):
'''
Performs a complete evaluation on test instances and returns the fitness score.
Args:
solution (Solution): Solution object to be tested.
'''
pass
@abstractmethod
def to_dict(self):
'''
Returns a dictionary representation of the problem including all parameters.
Returns:
dict: Dictionary representation of the problem.
'''
pass
| 13
| 9
| 21
| 2
| 14
| 6
| 3
| 0.44
| 1
| 5
| 2
| 0
| 9
| 17
| 9
| 29
| 205
| 23
| 128
| 47
| 105
| 56
| 86
| 33
| 75
| 12
| 4
| 3
| 30
|
328,110
|
XAI-liacs/BLADE
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/XAI-liacs_BLADE/iohblade/Solution.py
|
iohblade.Solution.Solution
|
import json
import uuid
import numpy as np
class Solution:
"""
Represents a candidate solution (an individual) in the evolutionary algorithm.
Each individual has properties such as code, fitness, feedback, and metadata for additional information.
"""
def __init__(self, code='', name='', description='', configspace=None, generation=0, parent_ids=[], operator=None, task_prompt=''):
"""
Initializes an individual with optional attributes.
Args:
code (str): The code of the individual.
name (str): The name of the individual (typically the class name in the code).
description (str): A short description of the individual (e.g., algorithm's purpose or behavior).
configspace (Optional[ConfigSpace]): Optional configuration space for HPO.
generation (int): The generation this individual belongs to.
parent_ids (list): UUID of the parent individuals in a list.
operator (str): Optional identifier of the LLM operation that created this individual.
task_prompt (str): The task prompt used to generate this solution.
"""
self.id = str(uuid.uuid4())
self.code = code
self.name = name
self.description = description
self.configspace = configspace
self.generation = generation
self.fitness = -np.inf
self.feedback = ''
self.error = ''
self.parent_ids = parent_ids
self.metadata = {}
self.operator = operator
self.task_prompt = task_prompt
def __getstate__(self):
return self.to_dict()
def __setstate__(self, state):
self.__dict__.update(state)
if self.configspace == '':
self.configspace = None
def set_operator(self, operator):
"""
Sets the operator name that generated this individual.
Args:
operator (str): The name of the operator (for logging purposes).
"""
self.operator = operator
def add_metadata(self, key, value):
"""
Adds key-value pairs to the metadata dictionary.
Args:
key (str): The key for the metadata.
value: The value associated with the key.
"""
self.metadata[key] = value
def get_metadata(self, key):
"""
Get a metadata item from the dictionary.
Args:
key (str): The key for the metadata to obtain.
"""
return self.metadata[key] if key in self.metadata.keys() else None
def set_scores(self, fitness, feedback='', error=''):
self.fitness = fitness
self.feedback = feedback
self.error = error
return self
def get_summary(self):
"""
Returns a string summary of this solution's key attributes.
Returns:
str: A string representing the solution in a summary format.
"""
return f'{self.name}: {self.description} (Score: {self.fitness})'
def copy(self):
"""
Returns a copy of this solution, with a new unique ID and a reference to the current solution as its parent.
Returns:
Individual: A new instance of Individual with the same attributes but a different ID.
"""
new_solution = Solution(code=self.code, name=self.name, description=self.description, configspace=self.configspace, generation=self.generation + 1, parent_ids=[self.id], operator=self.operator, task_prompt=self.task_prompt)
new_solution.metadata = self.metadata.copy()
return new_solution
def empty_copy(self):
"""
Returns a copy of this solution, with a new unique ID and a reference to the current solution as its parent but without other fields.
Returns:
Individual: A new instance of Individual with the same attributes but a different ID.
"""
new_solution = Solution(code='', name='', description='', configspace=None, generation=self.generation + 1, parent_ids=[self.id], operator=self.operator)
return new_solution
def to_dict(self):
"""
Converts the individual to a dictionary.
Returns:
dict: A dictionary representation of the individual.
"""
try:
cs = self.configspace
cs = cs.to_serialized_dict()
except Exception:
cs = ''
return {'id': self.id, 'fitness': self.fitness, 'name': self.name, 'description': self.description, 'code': self.code, 'configspace': cs, 'generation': self.generation, 'feedback': self.feedback, 'error': self.error, 'parent_ids': self.parent_ids, 'operator': self.operator, 'metadata': self.metadata, 'task_prompt': self.task_prompt}
def from_dict(self, data):
"""
Updates the Solution instance from a dictionary.
Args:
data (dict): A dictionary representation of the individual.
Returns:
None
"""
configspace = data.get('configspace', None)
if isinstance(configspace, dict):
try:
configspace = ConfigSpace()
configspace.from_serialized_dict(data['configspace'])
except Exception as e:
print(f'Warning: Failed to deserialize configspace - {e}')
configspace = None
self.id = data.get('id')
self.fitness = data.get('fitness')
self.name = data.get('name')
self.description = data.get('description')
self.code = data.get('code')
self.configspace = configspace
self.generation = data.get('generation')
self.feedback = data.get('feedback')
self.error = data.get('error')
self.parent_ids = data.get('parent_ids', [])
self.operator = data.get('operator')
self.metadata = data.get('metadata', {})
def to_json(self):
"""
Converts the individual to a JSON string.
Returns:
str: A JSON string representation of the individual.
"""
return json.dumps(self.to_dict(), default=str, indent=4)
|
class Solution:
'''
Represents a candidate solution (an individual) in the evolutionary algorithm.
Each individual has properties such as code, fitness, feedback, and metadata for additional information.
'''
def __init__(self, code='', name='', description='', configspace=None, generation=0, parent_ids=[], operator=None, task_prompt=''):
'''
Initializes an individual with optional attributes.
Args:
code (str): The code of the individual.
name (str): The name of the individual (typically the class name in the code).
description (str): A short description of the individual (e.g., algorithm's purpose or behavior).
configspace (Optional[ConfigSpace]): Optional configuration space for HPO.
generation (int): The generation this individual belongs to.
parent_ids (list): UUID of the parent individuals in a list.
operator (str): Optional identifier of the LLM operation that created this individual.
task_prompt (str): The task prompt used to generate this solution.
'''
pass
def __getstate__(self):
pass
def __setstate__(self, state):
pass
def set_operator(self, operator):
'''
Sets the operator name that generated this individual.
Args:
operator (str): The name of the operator (for logging purposes).
'''
pass
def add_metadata(self, key, value):
'''
Adds key-value pairs to the metadata dictionary.
Args:
key (str): The key for the metadata.
value: The value associated with the key.
'''
pass
def get_metadata(self, key):
'''
Get a metadata item from the dictionary.
Args:
key (str): The key for the metadata to obtain.
'''
pass
def set_scores(self, fitness, feedback='', error=''):
pass
def get_summary(self):
'''
Returns a string summary of this solution's key attributes.
Returns:
str: A string representing the solution in a summary format.
'''
pass
def copy(self):
'''
Returns a copy of this solution, with a new unique ID and a reference to the current solution as its parent.
Returns:
Individual: A new instance of Individual with the same attributes but a different ID.
'''
pass
def empty_copy(self):
'''
Returns a copy of this solution, with a new unique ID and a reference to the current solution as its parent but without other fields.
Returns:
Individual: A new instance of Individual with the same attributes but a different ID.
'''
pass
def to_dict(self):
'''
Converts the individual to a dictionary.
Returns:
dict: A dictionary representation of the individual.
'''
pass
def from_dict(self, data):
'''
Updates the Solution instance from a dictionary.
Args:
data (dict): A dictionary representation of the individual.
Returns:
None
'''
pass
def to_json(self):
'''
Converts the individual to a JSON string.
Returns:
str: A JSON string representation of the individual.
'''
pass
| 14
| 11
| 16
| 1
| 9
| 6
| 1
| 0.71
| 0
| 3
| 0
| 0
| 10
| 12
| 10
| 10
| 172
| 22
| 91
| 37
| 71
| 65
| 61
| 26
| 50
| 3
| 0
| 2
| 14
|
328,111
|
XAI-liacs/BLADE
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/XAI-liacs_BLADE/iohblade/baselines/modcma.py
|
iohblade.baselines.modcma.ModularCMAES
|
from modcma import c_maes
class ModularCMAES:
"""
Baseline implementation of CMA-ES with active update.
Can be extended later to cover all Modular CMAES options.
"""
def __init__(self, budget=10000, dim=10, **kwargs):
self.budget = budget
self.dim = dim
self.modules = c_maes.parameters.Modules()
self.modules.matrix_adaptation = c_maes.options.MatrixAdaptationType.COVARIANCE
self.modules.active = True
self.settings = c_maes.parameters.Settings(dim, self.modules, **kwargs)
self.parameters = c_maes.Parameters(self.settings)
self.cma = c_maes.ModularCMAES(self.parameters)
def __call__(self, func):
return self.cma.run(func)
|
class ModularCMAES:
'''
Baseline implementation of CMA-ES with active update.
Can be extended later to cover all Modular CMAES options.
'''
def __init__(self, budget=10000, dim=10, **kwargs):
pass
def __call__(self, func):
pass
| 3
| 1
| 8
| 0
| 6
| 2
| 1
| 0.67
| 0
| 0
| 0
| 0
| 2
| 6
| 2
| 2
| 22
| 2
| 12
| 9
| 9
| 8
| 12
| 9
| 9
| 1
| 0
| 0
| 2
|
328,112
|
XAI-liacs/BLADE
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/XAI-liacs_BLADE/iohblade/experiment.py
|
iohblade.experiment.Experiment
|
import numpy as np
from abc import ABC, abstractmethod
import logging
import copy
from .loggers import ExperimentLogger
import sys
from concurrent.futures import ThreadPoolExecutor, as_completed
import contextlib
class Experiment(ABC):
"""
Abstract class for an entire experiment, running multiple algorithms on multiple problems.
"""
def __init__(self, methods: list, problems: list, runs=5, budget=100, seeds=None, show_stdout=False, exp_logger=None, n_jobs=1):
"""
Initializes an experiment with multiple methods and problems.
Args:
methods (list): List of method instances.
problems (list): List of problem instances.
runs (int): Number of runs for each method.
budget (int): Number of evaluations per run for each method.
seeds (list, optional): The exact seeds to use for the runs, len(seeds) overwrites the number of runs if set.
show_stdout (bool): Whether to show stdout and stderr (standard output) or not.
exp_logger (ExperimentLogger, optiona): The logger object, can be a standard file logger or a WandB or MLFlow logger.
n_jobs (int): Number of runs to execute in parallel.
"""
self.methods = methods
self.problems = problems
self.runs = runs
self.budget = budget
if seeds is None:
self.seeds = np.arange(runs)
else:
self.seeds = seeds
self.runs = len(seeds)
self.show_stdout = show_stdout
self.n_jobs = n_jobs
if exp_logger is None:
exp_logger = ExperimentLogger('results/experiment')
self.exp_logger = exp_logger
def _clear_console(self) -> None:
"""Clear the console using ANSI escape codes."""
print('\x1bc', end='')
def _print_run_overview(self) -> None:
"""Pretty print the planned runs and their status."""
runs = getattr(self.exp_logger, 'progress', {}).get('runs', [])
header = f"{'Method':<15} {'Problem':<15} {'Seed':<5} Status"
lines = ['Run overview:', header, '-' * len(header)]
for r in runs:
if r.get('end_time'):
status = '✅'
elif r.get('start_time'):
status = '🔄'
else:
status = '⏳'
lines.append(f"{r['method_name']:<15} {r['problem_name']:<15} {r['seed']:<5} {status}")
print('\n'.join(lines))
def _refresh_console(self) -> None:
"""Clear the console and show the banner and run overview."""
with contextlib.redirect_stdout(sys.__stdout__):
self._clear_console()
self._print_welcome_message()
self._print_run_overview()
sys.__stdout__.flush()
def _print_welcome_message(self) -> None:
"""Print a welcome banner with instructions."""
message = f'\n{BLADE_ASCII}\nWelcome to BLADE!\nYou can inspect this experiment in your browser by running:\n uv run iohblade-webapp\n\nWhile BLADE hides most output from experiments by default, some logs or warnings may still appear.\n'
print(message)
def __call__(self):
"""
Runs the experiment by executing each method on each problem.
"""
total_runs = len(self.problems) * len(self.methods) * len(self.seeds)
if hasattr(self.exp_logger, 'start_progress'):
self.exp_logger.start_progress(total_runs, methods=self.methods, problems=self.problems, seeds=self.seeds, budget=self.budget)
if not self.show_stdout:
logging.disable(logging.CRITICAL)
self._refresh_console()
else:
self._print_welcome_message()
self._print_run_overview()
tasks = {}
with ThreadPoolExecutor(max_workers=self.n_jobs) as executor:
for problem in self.problems:
for method in self.methods:
for seed in self.seeds:
np.random.seed(seed)
if hasattr(self.exp_logger, 'is_run_pending') and (not self.exp_logger.is_run_pending(method, problem, seed)):
continue
m_copy = copy.deepcopy(method)
p_copy = copy.deepcopy(problem)
logger = self.exp_logger.open_run(m_copy, p_copy, self.budget, seed)
future = executor.submit(self._run_single, m_copy, p_copy, logger, seed)
tasks[future] = (m_copy, p_copy, logger, seed)
for fut in as_completed(tasks):
method, problem, logger, seed = tasks[fut]
solution = fut.result()
self.exp_logger.add_run(method, problem, method.llm, solution, log_dir=logger.dirname, seed=seed)
problem.cleanup()
if not self.show_stdout:
self._refresh_console()
else:
self._print_run_overview()
return
def _run_single(self, method, problem, logger, seed):
np.random.seed(seed)
method.llm.set_logger(logger)
if hasattr(logger, 'start_run'):
logger.start_run(method.llm)
if self.show_stdout:
problem._ensure_env()
result = method(problem)
else:
with contextlib.redirect_stdout(None):
with contextlib.redirect_stderr(None):
problem._ensure_env()
result = method(problem)
if hasattr(logger, 'finish_run'):
logger.finish_run(result)
return result
|
class Experiment(ABC):
'''
Abstract class for an entire experiment, running multiple algorithms on multiple problems.
'''
def __init__(self, methods: list, problems: list, runs=5, budget=100, seeds=None, show_stdout=False, exp_logger=None, n_jobs=1):
'''
Initializes an experiment with multiple methods and problems.
Args:
methods (list): List of method instances.
problems (list): List of problem instances.
runs (int): Number of runs for each method.
budget (int): Number of evaluations per run for each method.
seeds (list, optional): The exact seeds to use for the runs, len(seeds) overwrites the number of runs if set.
show_stdout (bool): Whether to show stdout and stderr (standard output) or not.
exp_logger (ExperimentLogger, optiona): The logger object, can be a standard file logger or a WandB or MLFlow logger.
n_jobs (int): Number of runs to execute in parallel.
'''
pass
def _clear_console(self) -> None:
'''Clear the console using ANSI escape codes.'''
pass
def _print_run_overview(self) -> None:
'''Pretty print the planned runs and their status.'''
pass
def _refresh_console(self) -> None:
'''Clear the console and show the banner and run overview.'''
pass
def _print_welcome_message(self) -> None:
'''Print a welcome banner with instructions.'''
pass
def __call__(self):
'''
Runs the experiment by executing each method on each problem.
'''
pass
def _run_single(self, method, problem, logger, seed):
pass
| 8
| 7
| 21
| 1
| 18
| 3
| 3
| 0.18
| 1
| 4
| 1
| 2
| 7
| 8
| 7
| 27
| 158
| 11
| 125
| 44
| 107
| 23
| 78
| 33
| 70
| 9
| 4
| 5
| 21
|
328,113
|
XAI-liacs/BLADE
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/XAI-liacs_BLADE/iohblade/experiment.py
|
iohblade.experiment.MA_BBOB_Experiment
|
from .problems import MA_BBOB
class MA_BBOB_Experiment(Experiment):
def __init__(self, methods: list, show_stdout=False, runs=5, budget=100, seeds=None, dims=[2, 5], budget_factor=2000, exp_logger=None, n_jobs=1, **kwargs):
"""
Initializes an experiment on MA-BBOB.
Args:
methods (list): List of method instances.
show_stdout (bool): Whether to show stdout and stderr (standard output) or not.
runs (int): Number of runs for each method.
budget (int): Number of algorithm evaluations per run per method.
seeds (list, optional): Seeds for each run.
dims (list): List of problem dimensions.
budget_factor (int): Budget factor for the problems.
**kwargs: Additional keyword arguments for the MA_BBOB problem.
exp_logger (ExperimentLogger): The logger to store the data.
n_jobs (int): Number of runs to execute in parallel.
"""
super().__init__(methods, [MA_BBOB(dims=dims, budget_factor=budget_factor, name='MA_BBOB', **kwargs)], runs=runs, budget=budget, seeds=seeds, show_stdout=show_stdout, exp_logger=exp_logger, n_jobs=n_jobs)
|
class MA_BBOB_Experiment(Experiment):
def __init__(self, methods: list, show_stdout=False, runs=5, budget=100, seeds=None, dims=[2, 5], budget_factor=2000, exp_logger=None, n_jobs=1, **kwargs):
'''
Initializes an experiment on MA-BBOB.
Args:
methods (list): List of method instances.
show_stdout (bool): Whether to show stdout and stderr (standard output) or not.
runs (int): Number of runs for each method.
budget (int): Number of algorithm evaluations per run per method.
seeds (list, optional): Seeds for each run.
dims (list): List of problem dimensions.
budget_factor (int): Budget factor for the problems.
**kwargs: Additional keyword arguments for the MA_BBOB problem.
exp_logger (ExperimentLogger): The logger to store the data.
n_jobs (int): Number of runs to execute in parallel.
'''
pass
| 2
| 1
| 45
| 1
| 30
| 14
| 1
| 0.45
| 1
| 3
| 1
| 0
| 1
| 0
| 1
| 28
| 46
| 1
| 31
| 14
| 17
| 14
| 3
| 2
| 1
| 1
| 5
| 0
| 1
|
328,114
|
XAI-liacs/BLADE
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/XAI-liacs_BLADE/iohblade/llm.py
|
iohblade.llm.Claude_LLM
|
import time
import anthropic
import logging
import copy
class Claude_LLM(LLM):
"""A manager class for handling requests to Anthropic's Claude models."""
def __init__(self, api_key, model='claude-3-haiku-20240307', base_url=None, temperature=0.8, max_tokens=4096, **kwargs):
"""Initializes the LLM manager with an API key and model name."""
super().__init__(api_key, model, base_url, **kwargs)
self.temperature = temperature
self.max_tokens = max_tokens
self._client_kwargs = {'api_key': api_key}
if base_url:
self._client_kwargs['base_url'] = base_url
self.client = anthropic.Anthropic(**self._client_kwargs)
logging.getLogger('anthropic').setLevel(logging.ERROR)
def _query(self, session_messages, max_retries: int=5, default_delay: int=10):
"""Sends a conversation history to the configured model and returns the response text."""
attempt = 0
while True:
try:
response = self.client.messages.create(model=self.model, messages=session_messages, temperature=self.temperature, max_tokens=self.max_tokens)
content = response.content
if isinstance(content, list):
parts = []
for block in content:
parts.append(getattr(block, 'text', block.get('text', '')))
return ''.join(parts)
return content
except anthropic.RateLimitError as err:
attempt += 1
if attempt > max_retries:
raise
retry_after = None
if getattr(err, 'response', None) is not None:
retry_after = err.response.headers.get('Retry-After')
wait = int(retry_after) if retry_after else default_delay * attempt
time.sleep(wait)
except (anthropic.APITimeoutError, anthropic.APIConnectionError, anthropic.APIError) as err:
attempt += 1
if attempt > max_retries:
raise
time.sleep(default_delay * attempt)
def __getstate__(self):
state = self.__dict__.copy()
state.pop('client', None)
return state
def __setstate__(self, state):
self.__dict__.update(state)
self.client = anthropic.Anthropic(**self._client_kwargs)
def __deepcopy__(self, memo):
cls = self.__class__
new = cls.__new__(cls)
memo[id(self)] = new
for k, v in self.__dict__.items():
if k == 'client':
continue
setattr(new, k, copy.deepcopy(v, memo))
new.client = anthropic.Anthropic(**new._client_kwargs)
return new
|
class Claude_LLM(LLM):
'''A manager class for handling requests to Anthropic's Claude models.'''
def __init__(self, api_key, model='claude-3-haiku-20240307', base_url=None, temperature=0.8, max_tokens=4096, **kwargs):
'''Initializes the LLM manager with an API key and model name.'''
pass
def _query(self, session_messages, max_retries: int=5, default_delay: int=10):
'''Sends a conversation history to the configured model and returns the response text.'''
pass
def __getstate__(self):
pass
def __setstate__(self, state):
pass
def __deepcopy__(self, memo):
pass
| 6
| 3
| 15
| 1
| 14
| 0
| 3
| 0.06
| 1
| 8
| 0
| 0
| 5
| 5
| 5
| 35
| 84
| 10
| 70
| 31
| 56
| 4
| 53
| 21
| 47
| 10
| 5
| 4
| 17
|
328,115
|
XAI-liacs/BLADE
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/XAI-liacs_BLADE/iohblade/llm.py
|
iohblade.llm.DeepSeek_LLM
|
import openai
class DeepSeek_LLM(OpenAI_LLM):
"""A manager class for the DeepSeek chat models."""
def __init__(self, api_key, model='deepseek-chat', temperature=0.8, **kwargs):
"""Initializes DeepSeek LLM with required base URL."""
super().__init__(api_key, model=model, temperature=temperature, **kwargs)
self.base_url = 'https://api.deepseek.com'
self._client_kwargs['base_url'] = self.base_url
self.client = openai.OpenAI(**self._client_kwargs)
|
class DeepSeek_LLM(OpenAI_LLM):
'''A manager class for the DeepSeek chat models.'''
def __init__(self, api_key, model='deepseek-chat', temperature=0.8, **kwargs):
'''Initializes DeepSeek LLM with required base URL.'''
pass
| 2
| 2
| 6
| 0
| 5
| 1
| 1
| 0.33
| 1
| 2
| 0
| 0
| 1
| 2
| 1
| 36
| 9
| 1
| 6
| 4
| 4
| 2
| 6
| 4
| 4
| 1
| 6
| 0
| 1
|
328,116
|
XAI-liacs/BLADE
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/XAI-liacs_BLADE/iohblade/llm.py
|
iohblade.llm.Dummy_LLM
|
class Dummy_LLM(LLM):
def __init__(self, model='DUMMY', **kwargs):
"""
Initializes the DUMMY LLM manager with a model name. This is a placeholder
and does not connect to any LLM provider. It is used for testing purposes only.
Args:
model (str, optional): model abbreviation. Defaults to "DUMMY".
Has no effect, just a placeholder.
"""
super().__init__('', model, None, **kwargs)
def _query(self, session_messages):
"""
Sends a conversation history to DUMMY model and returns a random response text.
Args:
session_messages (list of dict): A list of message dictionaries with keys
"role" (e.g. "user", "assistant") and "content" (the message text).
Returns:
str: The text content of the LLM's response.
"""
big_message = ''
for msg in session_messages:
big_message += msg['content'] + '\n'
response = "This is a dummy response from the DUMMY LLM. It does not connect to any LLM provider.\nIt is used for testing purposes only.\n# Description: A simple random search algorithm that samples points uniformly in the search space and returns the best found solution.\n# Code:\n```python\nimport numpy as np\n\nclass RandomSearch:\n def __init__(self, budget=10000, dim=10):\n self.budget = budget\n self.dim = dim\n self.f_opt = np.inf\n self.x_opt = None\n\n def __call__(self, func):\n for i in range(self.budget):\n x = np.random.uniform(func.bounds.lb, func.bounds.ub)\n f = func(x)\n if f < self.f_opt:\n self.f_opt = f\n self.x_opt = x\n return self.f_opt, self.x_opt\n```\n# Configuration Space:\n```python\n{\n 'budget': {'type': 'int', 'lower': 1000, 'upper': 100000},\n 'dim': {'type': 'int', 'lower': 1, 'upper': 100}\n}\n```\n"
return response
|
class Dummy_LLM(LLM):
def __init__(self, model='DUMMY', **kwargs):
'''
Initializes the DUMMY LLM manager with a model name. This is a placeholder
and does not connect to any LLM provider. It is used for testing purposes only.
Args:
model (str, optional): model abbreviation. Defaults to "DUMMY".
Has no effect, just a placeholder.
'''
pass
def _query(self, session_messages):
'''
Sends a conversation history to DUMMY model and returns a random response text.
Args:
session_messages (list of dict): A list of message dictionaries with keys
"role" (e.g. "user", "assistant") and "content" (the message text).
Returns:
str: The text content of the LLM's response.
'''
pass
| 3
| 2
| 30
| 4
| 17
| 10
| 2
| 0.56
| 1
| 1
| 0
| 0
| 2
| 0
| 2
| 32
| 61
| 8
| 34
| 6
| 31
| 19
| 9
| 6
| 6
| 2
| 5
| 1
| 3
|
328,117
|
XAI-liacs/BLADE
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/XAI-liacs_BLADE/iohblade/llm.py
|
iohblade.llm.Gemini_LLM
|
import copy
import re
import time
from google import genai
class Gemini_LLM(LLM):
"""
A manager class for handling requests to Google's Gemini models.
"""
def __init__(self, api_key, model='gemini-2.0-flash', generation_config=None, **kwargs):
"""
Initializes the LLM manager with an API key and model name.
Args:
api_key (str): api key for authentication.
model (str, optional): model abbreviation. Defaults to "gemini-2.0-flash".
Options are: "gemini-1.5-flash","gemini-2.0-flash", and others from Googles models library.
"""
super().__init__(api_key, model, None, **kwargs)
if generation_config is None:
generation_config = {'temperature': 1, 'top_p': 0.95, 'top_k': 64, 'max_output_tokens': 65536, 'response_mime_type': 'text/plain'}
self.client = genai.Client(api_key=api_key)
self.api_key = api_key
self.generation_config = generation_config
def _query(self, session_messages, max_retries: int=5, default_delay: int=10, **kwargs):
"""
Sends the conversation history to Gemini, retrying on 429 ResourceExhausted exceptions.
Args:
session_messages (list[dict]): [{"role": str, "content": str}, …]
max_retries (int): how many times to retry before giving up.
default_delay (int): fallback sleep when the error has no retry_delay.
kwargs: The generation_config is provided in __init__, to change a set
of these config, or adding extra parameters, use kwargs, or
additional named arguements to function.
Returns:
str: model's reply.
"""
history = [{'role': m['role'], 'parts': [m['content']]} for m in session_messages[:-1]]
last = session_messages[-1]['content']
attempt = 0
while True:
try:
config = self.generation_config.copy()
config.update(**kwargs)
chat = self.client.chats.create(model=self.model, history=history, config=config)
response = chat.send_message(last)
return response.text
except Exception as err:
attempt += 1
if attempt > max_retries:
raise
delay = getattr(err, 'retry_delay', None)
if delay is not None:
wait = delay.seconds + 1
else:
m = re.search('retry_delay\\s*{\\s*seconds:\\s*(\\d+)', str(err))
wait = int(m.group(1)) if m else default_delay * attempt
time.sleep(wait)
def __getstate__(self):
state = self.__dict__.copy()
state.pop('client', None)
return state
def __setstate__(self, state):
self.__dict__.update(state)
self.client = genai.Client(api_key=self.api_key)
def __deepcopy__(self, memo):
cls = self.__class__
new = cls.__new__(cls)
memo[id(self)] = new
for k, v in self.__dict__.items():
if k == 'client':
continue
setattr(new, k, copy.deepcopy(v, memo))
new.client = genai.Client(api_key=new.api_key)
return new
|
class Gemini_LLM(LLM):
'''
A manager class for handling requests to Google's Gemini models.
'''
def __init__(self, api_key, model='gemini-2.0-flash', generation_config=None, **kwargs):
'''
Initializes the LLM manager with an API key and model name.
Args:
api_key (str): api key for authentication.
model (str, optional): model abbreviation. Defaults to "gemini-2.0-flash".
Options are: "gemini-1.5-flash","gemini-2.0-flash", and others from Googles models library.
'''
pass
def _query(self, session_messages, max_retries: int=5, default_delay: int=10, **kwargs):
'''
Sends the conversation history to Gemini, retrying on 429 ResourceExhausted exceptions.
Args:
session_messages (list[dict]): [{"role": str, "content": str}, …]
max_retries (int): how many times to retry before giving up.
default_delay (int): fallback sleep when the error has no retry_delay.
kwargs: The generation_config is provided in __init__, to change a set
of these config, or adding extra parameters, use kwargs, or
additional named arguements to function.
Returns:
str: model's reply.
'''
pass
def __getstate__(self):
pass
def __setstate__(self, state):
pass
def __deepcopy__(self, memo):
pass
| 6
| 3
| 32
| 4
| 19
| 10
| 4
| 0.59
| 1
| 4
| 0
| 0
| 2
| 3
| 2
| 32
| 70
| 10
| 39
| 16
| 34
| 23
| 26
| 12
| 23
| 6
| 5
| 3
| 8
|
328,118
|
XAI-liacs/BLADE
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/XAI-liacs_BLADE/iohblade/llm.py
|
iohblade.llm.LLM
|
from abc import ABC, abstractmethod
from .solution import Solution
import logging
from tokencost import calculate_completion_cost, calculate_prompt_cost, count_message_tokens, count_string_tokens
import re
class LLM(ABC):
def __init__(self, api_key, model='', base_url='', code_pattern=None, name_pattern=None, desc_pattern=None, cs_pattern=None, logger=None):
"""
Initializes the LLM manager with an API key, model name and base_url.
Args:
api_key (str): api key for authentication.
model (str, optional): model abbreviation.
base_url (str, optional): The url to call the API from.
code_pattern (str, optional): The regex pattern to extract code from the response.
name_pattern (str, optional): The regex pattern to extract the class name from the response.
desc_pattern (str, optional): The regex pattern to extract the description from the response.
cs_pattern (str, optional): The regex pattern to extract the configuration space from the response.
logger (Logger, optional): A logger object to log the conversation.
"""
self.base_url = base_url
self.api_key = api_key
self.model = model
self.logger = logger
self.log = self.logger != None
self.code_pattern = code_pattern if code_pattern != None else '```(?:python)?\\n(.*?)\\n```'
self.name_pattern = name_pattern if name_pattern != None else 'class\\s*(\\w*)(?:\\(\\w*\\))?\\:'
self.desc_pattern = desc_pattern if desc_pattern != None else '#\\s*Description\\s*:\\s*(.*)'
self.cs_pattern = cs_pattern if cs_pattern != None else 'space\\s*:\\s*\\n*```\\n*(?:python)?\\n(.*?)\\n```'
logging.getLogger('tokencost').setLevel(logging.ERROR)
@abstractmethod
def _query(self, session: list, **kwargs):
"""
Sends a conversation history to the configured model and returns the response text.
Args:
session (list of dict): A list of message dictionaries with keys
"role" (e.g. "user", "assistant") and "content" (the message text).
Returns:
str: The text content of the LLM's response.
"""
pass
def query(self, session: list):
"""
Sends a conversation history to the configured model and returns the response text.
Args:
session_messages (list of dict): A list of message dictionaries with keys
"role" (e.g. "user", "assistant") and "content" (the message text).
Returns:
str: The text content of the LLM's response.
"""
if self.log:
try:
cost = calculate_prompt_cost(session, self.model)
except Exception:
cost = 0
try:
tokens = count_message_tokens(session, model=self.model)
except Exception:
tokens = 0
self.logger.log_conversation('client', '\n'.join([d['content'] for d in session]), cost, tokens)
message = self._query(session)
if self.log:
try:
cost = calculate_completion_cost(message, self.model)
except Exception:
cost = 0
try:
tokens = count_string_tokens(prompt=message, model=self.model)
except Exception:
tokens = 0
self.logger.log_conversation(self.model, message, cost, tokens)
return message
def set_logger(self, logger):
"""
Sets the logger object to log the conversation.
Args:
logger (Logger): A logger object to log the conversation.
"""
self.logger = logger
self.log = True
def sample_solution(self, session_messages: list, parent_ids=[], HPO=False, base_code: str | None=None, diff_mode: bool=False, **kwargs):
"""
Interacts with a language model to generate or mutate solutions based on the provided session messages.
Args:
session_messages (list): A list of dictionaries with keys 'role' and 'content' to simulate a conversation with the language model.
parent_ids (list, optional): The id of the parent the next sample will be generated from (if any).
HPO (boolean, optional): If HPO is enabled, a configuration space will also be extracted (if possible).
base_code and diff_mode are for now only there to support latest LLaMEA, they are not implemented yet.
Returns:
tuple: A tuple containing the new algorithm code, its class name, its full descriptive name and an optional configuration space object.
**kwargs: Additional LLM settings that can be used at query time.
Raises:
NoCodeException: If the language model fails to return any code.
Exception: Captures and logs any other exceptions that occur during the interaction.
"""
message = self.query(session_messages, **kwargs)
code = self.extract_algorithm_code(message)
name = self.extract_classname(code)
desc = self.extract_algorithm_description(message)
cs = None
if HPO:
cs = self.extract_configspace(message)
new_individual = Solution(name=name, description=desc, configspace=cs, code=code, parent_ids=parent_ids)
return new_individual
def extract_classname(self, code):
"""Extract the Python class name from a given code string (if possible).
Args:
code (string): The code string to extract from.
Returns:
classname (string): The Python class name or empty string.
"""
try:
return re.findall('class\\s*(\\w*)(?:\\(\\w*\\))?\\:', code, re.IGNORECASE)[0]
except Exception as e:
return ''
def extract_configspace(self, message):
"""
Extracts the configuration space definition in json from a given message string using regular expressions.
Args:
message (str): The message string containing the algorithm code.
Returns:
ConfigSpace: Extracted configuration space object.
"""
if ConfigurationSpace == None:
raise Exception('Please install the ConfigSpace package first.')
pattern = 'space\\s*:\\s*\\n*```\\n*(?:python)?\\n(.*?)\\n```'
c = None
for m in re.finditer(pattern, message, re.DOTALL | re.IGNORECASE):
try:
from ConfigSpace import ConfigurationSpace
c = ConfigurationSpace(eval(m.group(1)))
except Exception:
pass
return c
def extract_algorithm_code(self, message):
"""
Extracts algorithm code from a given message string using regular expressions.
Args:
message (str): The message string containing the algorithm code.
Returns:
str: Extracted algorithm code.
Raises:
NoCodeException: If no code block is found within the message.
"""
pattern = '```(?:python)?\\n(.*?)\\n```'
match = re.search(pattern, message, re.DOTALL | re.IGNORECASE)
if match:
return match.group(1)
else:
return 'raise Exception("Could not extract generated code. The code should be encapsulated with ``` in your response.")'
def extract_algorithm_description(self, message):
"""
Extracts algorithm description from a given message string using regular expressions.
Args:
message (str): The message string containing the algorithm name and code.
Returns:
str: Extracted algorithm name or empty string.
"""
pattern = '#\\s*Description\\s*:\\s*(.*)'
match = re.search(pattern, message, re.IGNORECASE)
if match:
return match.group(1)
else:
return ''
def to_dict(self):
"""
Returns a dictionary representation of the LLM including all parameters.
Returns:
dict: Dictionary representation of the LLM.
"""
return {'model': self.model, 'code_pattern': self.code_pattern, 'name_pattern': self.name_pattern, 'desc_pattern': self.desc_pattern, 'cs_pattern': self.cs_pattern}
|
class LLM(ABC):
def __init__(self, api_key, model='', base_url='', code_pattern=None, name_pattern=None, desc_pattern=None, cs_pattern=None, logger=None):
'''
Initializes the LLM manager with an API key, model name and base_url.
Args:
api_key (str): api key for authentication.
model (str, optional): model abbreviation.
base_url (str, optional): The url to call the API from.
code_pattern (str, optional): The regex pattern to extract code from the response.
name_pattern (str, optional): The regex pattern to extract the class name from the response.
desc_pattern (str, optional): The regex pattern to extract the description from the response.
cs_pattern (str, optional): The regex pattern to extract the configuration space from the response.
logger (Logger, optional): A logger object to log the conversation.
'''
pass
@abstractmethod
def _query(self, session: list, **kwargs):
'''
Sends a conversation history to the configured model and returns the response text.
Args:
session (list of dict): A list of message dictionaries with keys
"role" (e.g. "user", "assistant") and "content" (the message text).
Returns:
str: The text content of the LLM's response.
'''
pass
def query(self, session: list):
'''
Sends a conversation history to the configured model and returns the response text.
Args:
session_messages (list of dict): A list of message dictionaries with keys
"role" (e.g. "user", "assistant") and "content" (the message text).
Returns:
str: The text content of the LLM's response.
'''
pass
def set_logger(self, logger):
'''
Sets the logger object to log the conversation.
Args:
logger (Logger): A logger object to log the conversation.
'''
pass
def sample_solution(self, session_messages: list, parent_ids=[], HPO=False, base_code: str | None=None, diff_mode: bool=False, **kwargs):
'''
Interacts with a language model to generate or mutate solutions based on the provided session messages.
Args:
session_messages (list): A list of dictionaries with keys 'role' and 'content' to simulate a conversation with the language model.
parent_ids (list, optional): The id of the parent the next sample will be generated from (if any).
HPO (boolean, optional): If HPO is enabled, a configuration space will also be extracted (if possible).
base_code and diff_mode are for now only there to support latest LLaMEA, they are not implemented yet.
Returns:
tuple: A tuple containing the new algorithm code, its class name, its full descriptive name and an optional configuration space object.
**kwargs: Additional LLM settings that can be used at query time.
Raises:
NoCodeException: If the language model fails to return any code.
Exception: Captures and logs any other exceptions that occur during the interaction.
'''
pass
def extract_classname(self, code):
'''Extract the Python class name from a given code string (if possible).
Args:
code (string): The code string to extract from.
Returns:
classname (string): The Python class name or empty string.
'''
pass
def extract_configspace(self, message):
'''
Extracts the configuration space definition in json from a given message string using regular expressions.
Args:
message (str): The message string containing the algorithm code.
Returns:
ConfigSpace: Extracted configuration space object.
'''
pass
def extract_algorithm_code(self, message):
'''
Extracts algorithm code from a given message string using regular expressions.
Args:
message (str): The message string containing the algorithm code.
Returns:
str: Extracted algorithm code.
Raises:
NoCodeException: If no code block is found within the message.
'''
pass
def extract_algorithm_description(self, message):
'''
Extracts algorithm description from a given message string using regular expressions.
Args:
message (str): The message string containing the algorithm name and code.
Returns:
str: Extracted algorithm name or empty string.
'''
pass
def to_dict(self):
'''
Returns a dictionary representation of the LLM including all parameters.
Returns:
dict: Dictionary representation of the LLM.
'''
pass
| 12
| 10
| 23
| 3
| 13
| 8
| 3
| 0.65
| 1
| 3
| 1
| 13
| 10
| 9
| 10
| 30
| 241
| 34
| 127
| 49
| 104
| 83
| 81
| 37
| 69
| 7
| 4
| 2
| 27
|
328,119
|
XAI-liacs/BLADE
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/XAI-liacs_BLADE/iohblade/llm.py
|
iohblade.llm.Ollama_LLM
|
import time
import ollama
class Ollama_LLM(LLM):
def __init__(self, model='llama3.2', **kwargs):
"""
Initializes the Ollama LLM manager with a model name. See https://ollama.com/search for models.
Args:
model (str, optional): model abbreviation. Defaults to "llama3.2".
See for options: https://ollama.com/search.
"""
super().__init__('', model, None, **kwargs)
def _query(self, session_messages, max_retries: int=5, default_delay: int=10, **kwargs):
"""
Sends a conversation history to the configured model and returns the response text.
Args:
session_messages (list of dict): A list of message dictionaries with keys
"role" (e.g. "user", "assistant") and "content" (the message text).
**kwargs Can be used to add additional `chat` parameters to the query,
These queries are seperate queries placed under `option` parameter as
documented in https://github.com/ollama/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values
Returns:
str: The text content of the LLM's response.
"""
big_message = ''
for msg in session_messages:
big_message += msg['content'] + '\n'
attempt = 0
while True:
try:
response = ollama.chat(model=self.model, messages=[{'role': 'user', 'content': big_message}], options=kwargs)
return response['message']['content']
except ollama.ResponseError as err:
attempt += 1
if attempt > max_retries or err.status_code not in (429, 500, 503):
raise
time.sleep(default_delay * attempt)
|
class Ollama_LLM(LLM):
def __init__(self, model='llama3.2', **kwargs):
'''
Initializes the Ollama LLM manager with a model name. See https://ollama.com/search for models.
Args:
model (str, optional): model abbreviation. Defaults to "llama3.2".
See for options: https://ollama.com/search.
'''
pass
def _query(self, session_messages, max_retries: int=5, default_delay: int=10, **kwargs):
'''
Sends a conversation history to the configured model and returns the response text.
Args:
session_messages (list of dict): A list of message dictionaries with keys
"role" (e.g. "user", "assistant") and "content" (the message text).
**kwargs Can be used to add additional `chat` parameters to the query,
These queries are seperate queries placed under `option` parameter as
documented in https://github.com/ollama/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values
Returns:
str: The text content of the LLM's response.
'''
pass
| 3
| 2
| 20
| 3
| 10
| 8
| 3
| 0.75
| 1
| 2
| 0
| 0
| 2
| 1
| 2
| 32
| 41
| 6
| 20
| 9
| 17
| 15
| 17
| 7
| 14
| 5
| 5
| 3
| 6
|
328,120
|
XAI-liacs/BLADE
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/XAI-liacs_BLADE/iohblade/llm.py
|
iohblade.llm.OpenAI_LLM
|
import time
import openai
import copy
import logging
class OpenAI_LLM(LLM):
"""
A manager class for handling requests to OpenAI's GPT models.
"""
def __init__(self, api_key, model='gpt-4-turbo', temperature=0.8, **kwargs):
"""
Initializes the LLM manager with an API key and model name.
Args:
api_key (str): api key for authentication.
model (str, optional): model abbreviation. Defaults to "gpt-4-turbo".
Options are: gpt-3.5-turbo, gpt-4-turbo, gpt-4o, and others from OpeNAI models library.
"""
super().__init__(api_key, model, None, **kwargs)
self._client_kwargs = dict(api_key=api_key)
self.client = openai.OpenAI(**self._client_kwargs)
logging.getLogger('openai').setLevel(logging.ERROR)
logging.getLogger('httpx').setLevel(logging.ERROR)
self.temperature = temperature
def _query(self, session_messages, max_retries: int=5, default_delay: int=10, **kwargs):
"""
Sends a conversation history to the configured model and returns the response text.
Args:
session_messages (list of dict): A list of message dictionaries with keys
"role" (e.g. "user", "assistant") and "content" (the message text).
**kwargs: To add access to additional parameters that are avaiable
in openai's client.chat.completions.create, we allow users to add
their own parameters.
Returns:
str: The text content of the LLM's response.
"""
attempt = 0
while True:
try:
temperature = self.temperature
if 'temperature' in kwargs:
temperature = kwargs['temperature']
kwargs.pop('temperature')
response = self.client.chat.completions.create(model=self.model, messages=session_messages, temperature=temperature, **kwargs)
return response.choices[0].message.content
except openai.RateLimitError as err:
attempt += 1
if attempt > max_retries:
raise
retry_after = None
if getattr(err, 'response', None) is not None:
retry_after = err.response.headers.get('Retry-After')
wait = int(retry_after) if retry_after else default_delay * attempt
time.sleep(wait)
except (openai.APITimeoutError, openai.APIConnectionError, openai.APIError) as err:
attempt += 1
if attempt > max_retries:
raise
time.sleep(default_delay * attempt)
def __getstate__(self):
"""Return the picklable part of the instance."""
state = self.__dict__.copy()
state.pop('client', None)
return state
def __setstate__(self, state):
"""Restore from a pickled state."""
self.__dict__.update(state)
self.client = openai.OpenAI(**self._client_kwargs)
def __deepcopy__(self, memo):
"""Explicit deepcopy that skips the client and recreates it."""
cls = self.__class__
new = cls.__new__(cls)
memo[id(self)] = new
for k, v in self.__dict__.items():
if k == 'client':
continue
setattr(new, k, copy.deepcopy(v, memo))
new.client = openai.OpenAI(**new._client_kwargs)
return new
|
class OpenAI_LLM(LLM):
'''
A manager class for handling requests to OpenAI's GPT models.
'''
def __init__(self, api_key, model='gpt-4-turbo', temperature=0.8, **kwargs):
'''
Initializes the LLM manager with an API key and model name.
Args:
api_key (str): api key for authentication.
model (str, optional): model abbreviation. Defaults to "gpt-4-turbo".
Options are: gpt-3.5-turbo, gpt-4-turbo, gpt-4o, and others from OpeNAI models library.
'''
pass
def _query(self, session_messages, max_retries: int=5, default_delay: int=10, **kwargs):
'''
Sends a conversation history to the configured model and returns the response text.
Args:
session_messages (list of dict): A list of message dictionaries with keys
"role" (e.g. "user", "assistant") and "content" (the message text).
**kwargs: To add access to additional parameters that are avaiable
in openai's client.chat.completions.create, we allow users to add
their own parameters.
Returns:
str: The text content of the LLM's response.
'''
pass
def __getstate__(self):
'''Return the picklable part of the instance.'''
pass
def __setstate__(self, state):
'''Restore from a pickled state.'''
pass
def __deepcopy__(self, memo):
'''Explicit deepcopy that skips the client and recreates it.'''
pass
| 6
| 6
| 16
| 2
| 11
| 5
| 3
| 0.49
| 1
| 8
| 0
| 1
| 5
| 4
| 5
| 35
| 91
| 13
| 55
| 19
| 49
| 27
| 45
| 17
| 39
| 8
| 5
| 3
| 14
|
328,121
|
XAI-liacs/BLADE
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/XAI-liacs_BLADE/iohblade/loggers/base.py
|
iohblade.loggers.base.ExperimentLogger
|
from ..solution import Solution
from ..utils import convert_to_serializable
import json
import jsonlines
from ..problem import Problem
import os
from ..method import Method
from ConfigSpace.read_and_write import json as cs_json
from threading import Lock
from ..llm import LLM
from datetime import datetime
import pandas as pd
from functools import partial
class ExperimentLogger:
"""
Logs an entire experiment of multiple runs.
"""
def __init__(self, name='', read=False):
"""Create or load an experiment logging directory.
If ``read`` is ``True`` the logger is opened in read only mode.
Otherwise a new directory is created unless ``name`` already exists, in
which case the existing directory is used so runs can be restarted.
Parameters
----------
name: str
Path to the experiment directory.
read: bool
If ``True`` open the directory for reading only.
"""
self.dirs = []
self._lock = Lock()
if read:
self.dirs.append(name)
self.dirname = name
self._load_progress()
return
if os.path.exists(name):
self.dirname = name
self.dirs.append(self.dirname)
self._load_progress()
if not hasattr(self, 'progress') or not self.progress:
self.progress = {'start_time': datetime.now().isoformat(), 'end_time': None, 'current': 0, 'total': 0}
self._write_progress()
else:
self.dirname = self.create_log_dir(name)
self.dirs.append(self.dirname)
self.progress = {'start_time': datetime.now().isoformat(), 'end_time': None, 'current': 0, 'total': 0}
self._write_progress()
def add_read_dir(self, dir_path: str):
"""
Register another finished experiment so that *this* logger will read it
when you call get_data(), get_problem_data(), etc.
"""
if not os.path.isdir(dir_path):
raise ValueError(f'{dir_path} is not a directory')
if not os.path.isfile(os.path.join(dir_path, 'experimentlog.jsonl')):
raise ValueError('No experimentlog.jsonl found in the given directory')
if dir_path not in self.dirs:
self.dirs.append(dir_path)
def create_log_dir(self, name=''):
"""Create a unique directory for a new experiment."""
dirname = f'{name}'
tempi = 0
while os.path.exists(dirname):
tempi += 1
dirname = f'{name}-{tempi}'
os.mkdir(dirname)
return dirname
def _before_open_run(self, run_name, method, problem, budget, seed):
"""Hook executed before a run is opened."""
return None
def _create_run_logger(self, run_name, budget, progress_cb):
"""Create and return the run logger for a run."""
from .base import RunLogger
return RunLogger(name=run_name, root_dir=self.dirname, budget=budget, progress_callback=progress_cb)
def open_run(self, method, problem, budget=100, seed=0):
"""
Opens (starts) a new run for logging.
Typically call this right before your run, so that the RunLogger can log step data.
"""
run_name = f'{method.name}-{problem.name}-{seed}'
self._before_open_run(run_name, method, problem, budget, seed)
progress_cb = partial(self.increment_eval, method.name, problem.name, seed)
self.run_logger = self._create_run_logger(run_name, budget, progress_cb)
problem.set_logger(self.run_logger)
with self._lock:
entry = self._get_run_entry(method.name, problem.name, seed)
if entry is None:
entry = {'method_name': method.name, 'problem_name': problem.name, 'seed': seed, 'budget': int(budget), 'evaluations': 0, 'start_time': None, 'end_time': None, 'log_dir': None}
self.progress.setdefault('runs', []).append(entry)
if entry.get('start_time') and (not entry.get('end_time')) and entry.get('log_dir'):
prev = os.path.join(self.dirname, entry['log_dir'])
if os.path.exists(prev):
import shutil
shutil.rmtree(prev)
entry['evaluations'] = 0
entry['start_time'] = datetime.now().isoformat()
entry['log_dir'] = os.path.relpath(self.run_logger.dirname, self.dirname)
entry['end_time'] = None
self._write_progress()
return self.run_logger
def add_run(self, method: Method, problem: Problem, llm: LLM, solution: Solution, log_dir='', seed=None):
"""
Adds a run to the experiment log.
Args:
method (Method): The method used in the run.
problem (Problem): The problem used in the run.
llm (LLM): The llm used in the run.
solution (Solution): The solution found in the run.
log_dir (str): The directory where the run is logged.
seed (int): The seed used in the run.
"""
rel_log_dir = os.path.relpath(log_dir, self.dirname)
run_object = {'method_name': method.name, 'problem_name': problem.name, 'llm_name': llm.model, 'method': method.to_dict(), 'problem': problem.to_dict(), 'llm': llm.to_dict(), 'solution': solution.to_dict(), 'log_dir': rel_log_dir, 'seed': seed}
with jsonlines.open(f'{self.dirname}/experimentlog.jsonl', 'a') as file:
file.write(convert_to_serializable(run_object))
with self._lock:
entry = self._get_run_entry(method.name, problem.name, seed)
if entry is not None:
entry['end_time'] = datetime.now().isoformat()
entry['log_dir'] = rel_log_dir
self._write_progress()
self.increment_progress()
def get_data(self):
"""
Retrieves the data from the experiment log and returns a pandas dataframe.
Returns:
dataframe: Pandas DataFrame of the experimentlog.
"""
frames = []
for d in self.dirs:
path = os.path.join(d, 'experimentlog.jsonl')
if os.path.exists(path):
frames.append(pd.read_json(path, lines=True))
return pd.concat(frames, ignore_index=True) if frames else pd.DataFrame()
def get_problem_data(self, problem_name):
"""
Retrieves the data for a specific method and problem from the experiment log.
Args:
problem_name (str): The name of the problem.
Returns:
list: List of run data for the specified method and problem.
"""
bigdf = pd.DataFrame()
for d in self.dirs:
exp_log = os.path.join(d, 'experimentlog.jsonl')
if not os.path.exists(exp_log):
continue
with jsonlines.open(exp_log) as file:
for line in file:
if line['problem_name'] != problem_name:
continue
logdir = os.path.join(d, line['log_dir'])
run_log = os.path.join(logdir, 'log.jsonl')
if os.path.exists(run_log):
df = pd.read_json(run_log, lines=True)
df['method_name'] = line['method_name']
df['problem_name'] = line['problem_name']
df['seed'] = line['seed']
df['_id'] = df.index
bigdf = pd.concat([bigdf, df], ignore_index=True)
return bigdf
def get_methods_problems(self):
"""
Retrieves the list of methods and problems used in the experiment.
Returns:
tuple: Tuple of lists containing the method and problem names.
"""
methods, problems = (set(), set())
for d in self.dirs:
exp_log = os.path.join(d, 'experimentlog.jsonl')
if not os.path.exists(exp_log):
continue
with jsonlines.open(exp_log) as file:
for line in file:
methods.add(line['method_name'])
problems.add(line['problem_name'])
return (list(methods), list(problems))
def _get_run_entry(self, method_name, problem_name, seed):
"""Return the run progress entry matching the identifiers."""
for r in self.progress.get('runs', []):
if r.get('method_name') == method_name and r.get('problem_name') == problem_name and (r.get('seed') == seed):
return r
return None
def _progress_path(self):
return os.path.join(self.dirname, 'progress.json')
def _write_progress(self):
with open(self._progress_path(), 'w') as f:
json.dump(self.progress, f)
def _load_progress(self):
path = self._progress_path()
if os.path.exists(path):
with open(path) as f:
self.progress = json.load(f)
else:
self.progress = {}
def start_progress(self, total_runs: int, methods=None, problems=None, seeds=None, budget=None):
"""Initialize progress tracking with experiment configuration."""
with self._lock:
if os.path.exists(self._progress_path()):
self._load_progress()
existing = {(r['method_name'], r['problem_name'], r['seed']) for r in self.progress.get('runs', [])}
expected = {(m.name, p.name, int(s)) for m in methods for p in problems for s in seeds}
if existing and existing != expected:
raise ValueError('Existing progress does not match experiment setup')
if not existing:
self.progress = {'start_time': datetime.now().isoformat(), 'end_time': None, 'current': 0, 'total': int(total_runs), 'runs': []}
for m in methods:
for p in problems:
for s in seeds:
self.progress['runs'].append({'method_name': m.name, 'problem_name': p.name, 'seed': int(s), 'budget': int(budget), 'evaluations': 0, 'start_time': None, 'end_time': None, 'log_dir': None})
else:
self.progress = {'start_time': datetime.now().isoformat(), 'end_time': None, 'current': 0, 'total': int(total_runs), 'runs': []}
for m in methods:
for p in problems:
for s in seeds:
self.progress['runs'].append({'method_name': m.name, 'problem_name': p.name, 'seed': int(s), 'budget': int(budget), 'evaluations': 0, 'start_time': None, 'end_time': None, 'log_dir': None})
self._write_progress()
def increment_progress(self):
"""Recalculate and write progress based on run entries."""
with self._lock:
finished = sum((1 for r in self.progress.get('runs', []) if r.get('end_time')))
self.progress['current'] = finished
total = self.progress.get('total', 0)
if total and finished >= total and (self.progress.get('end_time') is None):
self.progress['end_time'] = datetime.now().isoformat()
self._write_progress()
def increment_eval(self, method_name, problem_name, seed):
with self._lock:
entry = self._get_run_entry(method_name, problem_name, seed)
if entry is not None:
entry['evaluations'] = entry.get('evaluations', 0) + 1
self._write_progress()
def is_run_pending(self, method, problem, seed):
entry = self._get_run_entry(method.name, problem.name, seed)
if entry is None:
return True
return entry.get('end_time') is None
def __getstate__(self):
state = self.__dict__.copy()
state['_lock'] = None
return state
def __setstate__(self, state):
self.__dict__.update(state)
self._lock = Lock()
|
class ExperimentLogger:
'''
Logs an entire experiment of multiple runs.
'''
def __init__(self, name='', read=False):
'''Create or load an experiment logging directory.
If ``read`` is ``True`` the logger is opened in read only mode.
Otherwise a new directory is created unless ``name`` already exists, in
which case the existing directory is used so runs can be restarted.
Parameters
----------
name: str
Path to the experiment directory.
read: bool
If ``True`` open the directory for reading only.
'''
pass
def add_read_dir(self, dir_path: str):
'''
Register another finished experiment so that *this* logger will read it
when you call get_data(), get_problem_data(), etc.
'''
pass
def create_log_dir(self, name=''):
'''Create a unique directory for a new experiment.'''
pass
def _before_open_run(self, run_name, method, problem, budget, seed):
'''Hook executed before a run is opened.'''
pass
def _create_run_logger(self, run_name, budget, progress_cb):
'''Create and return the run logger for a run.'''
pass
def open_run(self, method, problem, budget=100, seed=0):
'''
Opens (starts) a new run for logging.
Typically call this right before your run, so that the RunLogger can log step data.
'''
pass
def add_run(self, method: Method, problem: Problem, llm: LLM, solution: Solution, log_dir='', seed=None):
'''
Adds a run to the experiment log.
Args:
method (Method): The method used in the run.
problem (Problem): The problem used in the run.
llm (LLM): The llm used in the run.
solution (Solution): The solution found in the run.
log_dir (str): The directory where the run is logged.
seed (int): The seed used in the run.
'''
pass
def get_data(self):
'''
Retrieves the data from the experiment log and returns a pandas dataframe.
Returns:
dataframe: Pandas DataFrame of the experimentlog.
'''
pass
def get_problem_data(self, problem_name):
'''
Retrieves the data for a specific method and problem from the experiment log.
Args:
problem_name (str): The name of the problem.
Returns:
list: List of run data for the specified method and problem.
'''
pass
def get_methods_problems(self):
'''
Retrieves the list of methods and problems used in the experiment.
Returns:
tuple: Tuple of lists containing the method and problem names.
'''
pass
def _get_run_entry(self, method_name, problem_name, seed):
'''Return the run progress entry matching the identifiers.'''
pass
def _progress_path(self):
pass
def _write_progress(self):
pass
def _load_progress(self):
pass
def start_progress(self, total_runs: int, methods=None, problems=None, seeds=None, budget=None):
'''Initialize progress tracking with experiment configuration.'''
pass
def increment_progress(self):
'''Recalculate and write progress based on run entries.'''
pass
def increment_eval(self, method_name, problem_name, seed):
pass
def is_run_pending(self, method, problem, seed):
pass
def __getstate__(self):
pass
def __setstate__(self, state):
pass
| 21
| 14
| 19
| 1
| 15
| 3
| 3
| 0.23
| 0
| 12
| 5
| 1
| 18
| 5
| 18
| 18
| 364
| 33
| 271
| 73
| 241
| 61
| 168
| 57
| 148
| 10
| 0
| 6
| 55
|
328,122
|
XAI-liacs/BLADE
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/XAI-liacs_BLADE/iohblade/loggers/base.py
|
iohblade.loggers.base.RunLogger
|
import os
import jsonlines
from ConfigSpace.read_and_write import json as cs_json
from ..utils import convert_to_serializable
from datetime import datetime
class RunLogger:
"""
Logs an LLM-driven optimization run.
"""
def __init__(self, name='', root_dir='', budget=100, progress_callback=None):
"""
Initializes an instance of the RunLogger.
Sets up a new logging directory named with the current date and time.
Args:
name (str): The name of the experiment.
root_dir (str): The directory to create the log folder in.
budget (int): The evaluation budget (how many algorithms can be generated and evaluated per run).
"""
self.dirname = self.create_log_dir(name, root_dir)
self.attempt = 0
self.budget = budget
self._progress_callback = progress_callback
def get_log_dir(self):
"""
Returns the directory where the run is logged.
"""
return self.dirname
def create_log_dir(self, name='', root_dir=''):
"""
Creates a new directory for logging runs based on the current date and time.
Also creates subdirectories for IOH experimenter data and code files.
Args:
name (str): The name of the run.
root_dir (str): The directory to create the log folder in.
Returns:
str: The name of the created directory.
"""
model_name = name.split('/')[-1]
dirname = f'run-{name}'
dirname = os.path.join(root_dir, dirname)
if not os.path.exists(root_dir):
os.mkdir(root_dir)
tempi = 0
while os.path.exists(dirname):
tempi += 1
dirname = f'run-{name}-{tempi}'
dirname = os.path.join(root_dir, dirname)
os.mkdir(dirname)
return dirname
def budget_exhausted(self):
"""
Get the number of lines in the log file and return True if the number of lines matches or exceeded the budget.
"""
count = 0
if not os.path.isfile(f'{self.dirname}/log.jsonl'):
return False
with open(f'{self.dirname}/log.jsonl', 'r') as f:
for _ in f:
count += 1
return count >= self.budget
def log_conversation(self, role, content, cost=0.0, tokens=0):
"""
Logs the given conversation content into a conversation log file.
Args:
role (str): Who (the llm or user) said the content.
content (str): The conversation content to be logged.
cost (float, optional): The cost of the conversation.
tokens (int, optional): Number of tokens used.
"""
conversation_object = {'role': role, 'time': f'{datetime.now()}', 'content': content, 'cost': float(cost), 'tokens': int(tokens)}
with jsonlines.open(f'{self.dirname}/conversationlog.jsonl', 'a') as file:
file.write(conversation_object)
def log_population(self, population):
"""
Logs the given population to code, configspace and the general log file.
Args:
population (list): List of individual solutions
"""
for p in population:
self.log_individual(p)
def log_individual(self, individual):
"""
Logs the given individual in a general logfile.
Args:
individual (Solution): potential solution to be logged.
"""
ind_dict = individual.to_dict()
with jsonlines.open(f'{self.dirname}/log.jsonl', 'a') as file:
file.write(convert_to_serializable(ind_dict))
if self._progress_callback:
self._progress_callback()
def log_code(self, individual):
"""
Logs the provided code into a file, uniquely named based on the attempt number and algorithm name.
Args:
individual (Solution): potential solution to be logged.
"""
if not os.path.exists(f'{self.dirname}/code'):
os.makedirs(f'{self.dirname}/code')
with open(f'{self.dirname}/code/{individual.id}-{individual.name}.py', 'w') as file:
file.write(individual.code)
def log_configspace(self, individual):
"""
Logs the provided configuration space (str) into a file, uniquely named based on the attempt number and algorithm name.
Args:
individual (Solution): potential solution to be logged.
"""
with open(f'{self.dirname}/configspace/{individual.id}-{individual.name}.py', 'w') as file:
if individual.configspace != None:
file.write(cs_json.write(individual.configspace))
else:
file.write('Failed to extract config space')
self.attempt += 1
|
class RunLogger:
'''
Logs an LLM-driven optimization run.
'''
def __init__(self, name='', root_dir='', budget=100, progress_callback=None):
'''
Initializes an instance of the RunLogger.
Sets up a new logging directory named with the current date and time.
Args:
name (str): The name of the experiment.
root_dir (str): The directory to create the log folder in.
budget (int): The evaluation budget (how many algorithms can be generated and evaluated per run).
'''
pass
def get_log_dir(self):
'''
Returns the directory where the run is logged.
'''
pass
def create_log_dir(self, name='', root_dir=''):
'''
Creates a new directory for logging runs based on the current date and time.
Also creates subdirectories for IOH experimenter data and code files.
Args:
name (str): The name of the run.
root_dir (str): The directory to create the log folder in.
Returns:
str: The name of the created directory.
'''
pass
def budget_exhausted(self):
'''
Get the number of lines in the log file and return True if the number of lines matches or exceeded the budget.
'''
pass
def log_conversation(self, role, content, cost=0.0, tokens=0):
'''
Logs the given conversation content into a conversation log file.
Args:
role (str): Who (the llm or user) said the content.
content (str): The conversation content to be logged.
cost (float, optional): The cost of the conversation.
tokens (int, optional): Number of tokens used.
'''
pass
def log_population(self, population):
'''
Logs the given population to code, configspace and the general log file.
Args:
population (list): List of individual solutions
'''
pass
def log_individual(self, individual):
'''
Logs the given individual in a general logfile.
Args:
individual (Solution): potential solution to be logged.
'''
pass
def log_code(self, individual):
'''
Logs the provided code into a file, uniquely named based on the attempt number and algorithm name.
Args:
individual (Solution): potential solution to be logged.
'''
pass
def log_configspace(self, individual):
'''
Logs the provided configuration space (str) into a file, uniquely named based on the attempt number and algorithm name.
Args:
individual (Solution): potential solution to be logged.
'''
pass
| 10
| 10
| 14
| 1
| 7
| 6
| 2
| 0.88
| 0
| 3
| 0
| 1
| 9
| 4
| 9
| 9
| 138
| 19
| 64
| 27
| 54
| 56
| 53
| 22
| 43
| 3
| 0
| 2
| 17
|
328,123
|
XAI-liacs/BLADE
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/XAI-liacs_BLADE/iohblade/loggers/mlflow.py
|
iohblade.loggers.mlflow.MLFlowExperimentLogger
|
from .base import ExperimentLogger, RunLogger
import os
from ..llm import LLM
from ..solution import Solution
from ..utils import convert_to_serializable
import json
from ..method import Method
from ..problem import Problem
from ConfigSpace.read_and_write import json as cs_json
class MLFlowExperimentLogger(ExperimentLogger):
"""
An ExperimentLogger subclass that keeps the original file-based logging,
and also logs runs to MLflow. The idea is:
- Call open_run() at the start of a run (this calls mlflow.start_run()).
- Run your optimization, logging step-level data via MLFlowRunLogger.
- Call add_run() at the end, which logs final info and ends the MLflow run.
"""
def __init__(self, name='', read=False, mlflow_tracking_uri=None):
"""
Args:
name (str): The name of the experiment (used as the MLflow experiment name).
read (bool): If True, read the existing log directory for file-based logs only.
mlflow_tracking_uri (str): The MLflow Tracking URI (e.g. 'file:/tmp/mlruns',
or your remote server).
"""
if mlflow is None:
raise ImportError('MLflow is not installed. Install with `pip install mlflow`.') from _import_error
super().__init__(name=name, read=read)
if mlflow_tracking_uri:
mlflow.set_tracking_uri(mlflow_tracking_uri)
try:
self.experiment_id = mlflow.create_experiment(name)
except mlflow.exceptions.MlflowException:
self.experiment_id = mlflow.get_experiment_by_name(name).experiment_id
self._mlflow_run_active = False
self._current_run_id = None
def _before_open_run(self, run_name, method, problem, budget, seed):
run = mlflow.start_run(experiment_id=self.experiment_id, run_name=run_name)
self._mlflow_run_active = True
self._current_run_id = run.info.run_id
def _create_run_logger(self, run_name, budget, progress_cb):
return MLFlowRunLogger(name=run_name, root_dir=self.dirname, budget=budget, progress_callback=progress_cb)
def open_run(self, method, problem, budget=100, seed=0):
return super().open_run(method, problem, budget, seed)
def add_run(self, method: Method, problem: Problem, llm: LLM, solution: Solution, log_dir='', seed=None):
"""
Normally called at the end of a run.
1) Logs final run metadata to MLflow
2) Ends the MLflow run
3) Calls super().add_run(...) so we keep the file-based logs
"""
if not self._mlflow_run_active:
run_name = f'{method.name}_{problem.name}_seed{seed}'
run = mlflow.start_run(experiment_id=self.experiment_id, run_name=run_name)
self._mlflow_run_active = True
self._current_run_id = run.info.run_id
mlflow.log_param('method_name', method.name)
mlflow.log_param('problem_name', problem.name)
mlflow.log_param('llm_name', llm.model)
if seed is not None:
mlflow.log_param('seed', seed)
final_fitness = solution.fitness if solution.fitness is not None else float('nan')
mlflow.log_metric('final_fitness', final_fitness)
rel_log_dir = os.path.relpath(log_dir, self.dirname)
run_object = {'method_name': method.name, 'problem_name': problem.name, 'llm_name': llm.model, 'method': method.to_dict(), 'problem': problem.to_dict(), 'llm': llm.to_dict(), 'solution': solution.to_dict(), 'log_dir': rel_log_dir, 'seed': seed}
mlflow.log_text(json.dumps(convert_to_serializable(run_object), indent=2), artifact_file='final_run_object.json')
mlflow.end_run()
self._mlflow_run_active = False
self._current_run_id = None
super().add_run(method=method, problem=problem, llm=llm, solution=solution, log_dir=log_dir, seed=seed)
|
class MLFlowExperimentLogger(ExperimentLogger):
'''
An ExperimentLogger subclass that keeps the original file-based logging,
and also logs runs to MLflow. The idea is:
- Call open_run() at the start of a run (this calls mlflow.start_run()).
- Run your optimization, logging step-level data via MLFlowRunLogger.
- Call add_run() at the end, which logs final info and ends the MLflow run.
'''
def __init__(self, name='', read=False, mlflow_tracking_uri=None):
'''
Args:
name (str): The name of the experiment (used as the MLflow experiment name).
read (bool): If True, read the existing log directory for file-based logs only.
mlflow_tracking_uri (str): The MLflow Tracking URI (e.g. 'file:/tmp/mlruns',
or your remote server).
'''
pass
def _before_open_run(self, run_name, method, problem, budget, seed):
pass
def _create_run_logger(self, run_name, budget, progress_cb):
pass
def open_run(self, method, problem, budget=100, seed=0):
pass
def add_run(self, method: Method, problem: Problem, llm: LLM, solution: Solution, log_dir='', seed=None):
'''
Normally called at the end of a run.
1) Logs final run metadata to MLflow
2) Ends the MLflow run
3) Calls super().add_run(...) so we keep the file-based logs
'''
pass
| 6
| 3
| 38
| 3
| 26
| 10
| 3
| 0.46
| 1
| 7
| 5
| 0
| 3
| 5
| 3
| 21
| 126
| 12
| 79
| 24
| 67
| 36
| 42
| 15
| 38
| 4
| 1
| 1
| 9
|
328,124
|
XAI-liacs/BLADE
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/XAI-liacs_BLADE/iohblade/loggers/mlflow.py
|
iohblade.loggers.mlflow.MLFlowRunLogger
|
from .base import ExperimentLogger, RunLogger
import json
from ..utils import convert_to_serializable
from datetime import datetime
from ConfigSpace.read_and_write import json as cs_json
class MLFlowRunLogger(RunLogger):
"""
A RunLogger subclass that logs data to MLflow *and* to file,
relying on the fact that the MLFlowExperimentLogger has opened a run.
"""
def __init__(self, name='', root_dir='', budget=100, progress_callback=None):
super().__init__(name, root_dir, budget, progress_callback=progress_callback)
def log_conversation(self, role, content, cost=0.0, tokens=0):
"""
Logs conversation details to MLflow, plus calls super() to keep the local file logs if you wish.
"""
conversation = {'role': role, 'time': str(datetime.now()), 'content': content, 'cost': float(cost), 'tokens': int(tokens)}
mlflow.log_text(json.dumps(conversation), artifact_file=f'conversation_{self.attempt}.json')
self.attempt += 1
super().log_conversation(role, content, cost, tokens)
def log_individual(self, individual):
"""
Logs an individual solution to MLflow, then calls super() to keep the normal file logging.
"""
ind_dict = individual.to_dict()
if 'fitness' in ind_dict:
mlflow.log_metric('fitness', ind_dict['fitness'])
mlflow.log_text(json.dumps(convert_to_serializable(ind_dict)), artifact_file=f'solution_{individual.id}.json')
super().log_individual(individual)
def log_code(self, individual):
"""
Log code as a text artifact in MLflow, plus the normal .py file in your local run folder.
"""
mlflow.log_text(individual.code, artifact_file=f'code_{individual.id}.py')
super().log_code(individual)
def log_configspace(self, individual):
"""
If there's a config space, log it to MLflow as well as the local file.
"""
if individual.configspace is not None:
cs_text = cs_json.write(individual.configspace)
else:
cs_text = 'Failed to extract config space'
mlflow.log_text(cs_text, artifact_file=f'configspace_{individual.id}.json')
super().log_configspace(individual)
def budget_exhausted(self):
"""
Optionally still rely on the file-based approach for counting lines in log.jsonl
or store a separate counter. For now, we call super() to preserve the old logic.
"""
return super().budget_exhausted()
|
class MLFlowRunLogger(RunLogger):
'''
A RunLogger subclass that logs data to MLflow *and* to file,
relying on the fact that the MLFlowExperimentLogger has opened a run.
'''
def __init__(self, name='', root_dir='', budget=100, progress_callback=None):
pass
def log_conversation(self, role, content, cost=0.0, tokens=0):
'''
Logs conversation details to MLflow, plus calls super() to keep the local file logs if you wish.
'''
pass
def log_individual(self, individual):
'''
Logs an individual solution to MLflow, then calls super() to keep the normal file logging.
'''
pass
def log_code(self, individual):
'''
Log code as a text artifact in MLflow, plus the normal .py file in your local run folder.
'''
pass
def log_configspace(self, individual):
'''
If there's a config space, log it to MLflow as well as the local file.
'''
pass
def budget_exhausted(self):
'''
Optionally still rely on the file-based approach for counting lines in log.jsonl
or store a separate counter. For now, we call super() to preserve the old logic.
'''
pass
| 7
| 6
| 11
| 1
| 6
| 4
| 1
| 0.76
| 1
| 5
| 0
| 0
| 6
| 0
| 6
| 15
| 76
| 11
| 37
| 10
| 30
| 28
| 25
| 10
| 18
| 2
| 1
| 1
| 8
|
328,125
|
XAI-liacs/BLADE
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/XAI-liacs_BLADE/iohblade/method.py
|
iohblade.method.Method
|
from .problem import Problem
from abc import ABC, abstractmethod
from .llm import LLM
class Method(ABC):
def __init__(self, llm: LLM, budget, name='Method'):
"""
Initializes a method (optimization algorithm) instance.
Args:
llm (LLM): LLM instance to be used.
budget (int): Budget of evaluations.
name (str): Name of the method (or variation).
"""
self.llm = llm
self.budget = budget
self.name = name
@abstractmethod
def __call__(self, problem: Problem):
"""
Executes the search algorithm and returns the best found solution.
Args:
problem (Problem): Problem instance being optimized.
"""
pass
@abstractmethod
def to_dict(self):
"""
Returns a dictionary representation of the method including all parameters.
Returns:
dict: Dictionary representation of the method.
"""
pass
|
class Method(ABC):
def __init__(self, llm: LLM, budget, name='Method'):
'''
Initializes a method (optimization algorithm) instance.
Args:
llm (LLM): LLM instance to be used.
budget (int): Budget of evaluations.
name (str): Name of the method (or variation).
'''
pass
@abstractmethod
def __call__(self, problem: Problem):
'''
Executes the search algorithm and returns the best found solution.
Args:
problem (Problem): Problem instance being optimized.
'''
pass
@abstractmethod
def to_dict(self):
'''
Returns a dictionary representation of the method including all parameters.
Returns:
dict: Dictionary representation of the method.
'''
pass
| 6
| 3
| 9
| 1
| 3
| 6
| 1
| 1.55
| 1
| 2
| 2
| 14
| 3
| 3
| 3
| 23
| 33
| 5
| 11
| 9
| 5
| 17
| 9
| 7
| 5
| 1
| 4
| 0
| 3
|
328,126
|
XAI-liacs/BLADE
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/XAI-liacs_BLADE/iohblade/methods/LLaMEA.py
|
iohblade.methods.LLaMEA.LLaMEA
|
from ..method import Method
from ..problem import Problem
from ..llm import LLM
from llamea import LLaMEA as LLAMEA_Algorithm
class LLaMEA(Method):
def __init__(self, llm: LLM, budget: int, name='LLaMEA', **kwargs):
"""
Initializes the LLaMEA algorithm within the benchmarking framework.
Args:
problem (Problem): The problem instance to optimize.
llm (LLM): The LLM instance to use for solution generation.
budget (int): The maximum number of evaluations.
name (str): The name of the method.
kwargs: Additional arguments for configuring LLaMEA.
"""
super().__init__(llm, budget, name)
self.kwargs = kwargs
def __call__(self, problem: Problem):
"""
Executes the evolutionary search process via LLaMEA.
Returns:
Solution: The best solution found.
"""
self.llamea_instance = LLAMEA_Algorithm(f=problem, llm=self.llm, role_prompt='You are an excellent Python programmer.', task_prompt=problem.task_prompt, example_prompt=problem.example_prompt, output_format_prompt=problem.format_prompt, log=None, budget=self.budget, max_workers=1, **self.kwargs)
return self.llamea_instance.run()
def to_dict(self):
"""
Returns a dictionary representation of the method including all parameters.
Returns:
dict: Dictionary representation of the method.
"""
return {'method_name': self.name if self.name != None else 'LLaMEA', 'budget': self.budget, 'kwargs': self.kwargs}
|
class LLaMEA(Method):
def __init__(self, llm: LLM, budget: int, name='LLaMEA', **kwargs):
'''
Initializes the LLaMEA algorithm within the benchmarking framework.
Args:
problem (Problem): The problem instance to optimize.
llm (LLM): The LLM instance to use for solution generation.
budget (int): The maximum number of evaluations.
name (str): The name of the method.
kwargs: Additional arguments for configuring LLaMEA.
'''
pass
def __call__(self, problem: Problem):
'''
Executes the evolutionary search process via LLaMEA.
Returns:
Solution: The best solution found.
'''
pass
def to_dict(self):
'''
Returns a dictionary representation of the method including all parameters.
Returns:
dict: Dictionary representation of the method.
'''
pass
| 4
| 3
| 15
| 1
| 8
| 8
| 1
| 0.96
| 1
| 4
| 2
| 0
| 3
| 4
| 3
| 26
| 48
| 5
| 24
| 8
| 20
| 23
| 9
| 6
| 5
| 2
| 5
| 0
| 4
|
328,127
|
XAI-liacs/BLADE
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/XAI-liacs_BLADE/iohblade/methods/eoh.py
|
iohblade.methods.eoh.EoH
|
import re
from ..solution import Solution
import json
from ..problem import Problem
from ..method import Method
from ..llm import LLM
import os
class EoH(Method):
def __init__(self, llm: LLM, budget: int, name='EoH', **kwargs):
"""
Initializes the EoH algorithm within the benchmarking framework.
Args:
problem (Problem): The problem instance to optimize.
llm (LLM): The LLM instance to use for solution generation.
budget (int): The maximum number of evaluations.
name (str): The name of the method.
kwargs: Additional arguments for configuring EoH.
"""
super().__init__(llm, budget, name)
self.kwargs = kwargs
def __call__(self, problem: Problem):
"""
Executes the evolutionary search process via EoH.
Returns:
Solution: The best solution found.
"""
if eoh_main is None:
raise ImportError('EoH package is not installed, , please install it using `poetry install --with methods`.')
if eoh_evolution is not None:
eoh_evolution.InterfaceLLM = lambda *args, **kwargs: _BladeInterfaceLLM(self.llm)
paras = Paras()
paras.set_paras(method='eoh', problem=_BladeProblemAdapter(problem), llm_api_endpoint='unused', llm_api_key='unused', llm_model=self.llm.model, ec_pop_size=self.kwargs.get('pop_size', 4), ec_n_pop=max(1, self.budget // self.kwargs.get('pop_size', 4)), exp_output_path=self.kwargs.get('output_path', './'), exp_n_proc=1, exp_debug_mode=False)
evol = eoh_main.EVOL(paras, prob=paras.problem)
evol.run()
result_file = os.path.join(paras.exp_output_path, 'results', 'pops_best', f'population_generation_{paras.ec_n_pop}.json')
if not os.path.exists(result_file):
raise FileNotFoundError(result_file)
with open(result_file) as f:
best = json.load(f)
if isinstance(best, list):
best = best[0]
code = best.get('code', '')
desc = best.get('algorithm', '')
name_match = re.search('class\\s+(\\w+)', code)
name = name_match.group(1) if name_match else 'OptimizationAlgorithm'
solution = Solution(code=code, name=name, description=desc)
solution.set_scores(-best.get('objective', 0))
return solution
def to_dict(self):
"""
Returns a dictionary representation of the method including all parameters.
Returns:
dict: Dictionary representation of the method.
"""
return {'method_name': self.name if self.name != None else 'EoH', 'budget': self.budget, 'kwargs': self.kwargs}
|
class EoH(Method):
def __init__(self, llm: LLM, budget: int, name='EoH', **kwargs):
'''
Initializes the EoH algorithm within the benchmarking framework.
Args:
problem (Problem): The problem instance to optimize.
llm (LLM): The LLM instance to use for solution generation.
budget (int): The maximum number of evaluations.
name (str): The name of the method.
kwargs: Additional arguments for configuring EoH.
'''
pass
def __call__(self, problem: Problem):
'''
Executes the evolutionary search process via EoH.
Returns:
Solution: The best solution found.
'''
pass
def to_dict(self):
'''
Returns a dictionary representation of the method including all parameters.
Returns:
dict: Dictionary representation of the method.
'''
pass
| 4
| 3
| 27
| 3
| 17
| 7
| 3
| 0.38
| 1
| 10
| 5
| 0
| 3
| 1
| 3
| 26
| 85
| 12
| 53
| 15
| 49
| 20
| 29
| 14
| 25
| 6
| 5
| 1
| 9
|
328,128
|
XAI-liacs/BLADE
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/XAI-liacs_BLADE/iohblade/methods/eoh.py
|
iohblade.methods.eoh._BladeInterfaceLLM
|
from ..llm import LLM
class _BladeInterfaceLLM:
"""Provide EoH with the BLADE LLM interface."""
def __init__(self, llm: LLM):
self.llm = llm
def get_response(self, prompt_content):
return self.llm.query([{'role': 'user', 'content': prompt_content}])
|
class _BladeInterfaceLLM:
'''Provide EoH with the BLADE LLM interface.'''
def __init__(self, llm: LLM):
pass
def get_response(self, prompt_content):
pass
| 3
| 1
| 2
| 0
| 2
| 0
| 1
| 0.2
| 0
| 1
| 1
| 0
| 2
| 1
| 2
| 2
| 8
| 2
| 5
| 4
| 2
| 1
| 5
| 4
| 2
| 1
| 0
| 0
| 2
|
328,129
|
XAI-liacs/BLADE
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/XAI-liacs_BLADE/iohblade/methods/eoh.py
|
iohblade.methods.eoh._BladeProblemAdapter
|
from ..solution import Solution
from ..utils import class_info, first_class_name
from ..problem import Problem
class _BladeProblemAdapter:
"""Wraps a BLADE problem for use with EoH."""
def __init__(self, problem: Problem):
self.problem = problem
self.prompts = _BladePrompts(problem)
def evaluate(self, code_string):
solution = Solution(code=code_string, name=first_class_name(code_string) or 'AlgorithmName', description=class_info(code_string)[1] or 'No description provided.')
solution = self.problem(solution)
return -solution.fitness
|
class _BladeProblemAdapter:
'''Wraps a BLADE problem for use with EoH.'''
def __init__(self, problem: Problem):
pass
def evaluate(self, code_string):
pass
| 3
| 1
| 7
| 0
| 7
| 1
| 1
| 0.14
| 0
| 3
| 3
| 0
| 2
| 2
| 2
| 2
| 17
| 2
| 14
| 6
| 11
| 2
| 8
| 6
| 5
| 1
| 0
| 0
| 2
|
328,130
|
XAI-liacs/BLADE
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/XAI-liacs_BLADE/iohblade/methods/eoh.py
|
iohblade.methods.eoh._BladePrompts
|
from ..problem import Problem
class _BladePrompts:
"""Adapter exposing the prompt interface expected by EoH."""
def __init__(self, problem: Problem):
self.problem = problem
def get_task(self):
return self.problem.task_prompt
def get_func_name(self):
return self.problem.func_name
def get_func_inputs(self):
return self.problem.func_inputs
def get_init_inputs(self):
return self.problem.init_inputs
def get_func_outputs(self):
return self.problem.func_outputs
def get_inout_inf(self):
return f"Implement a Python class called `AlgorithmName` with an __init__(self, {', '.join(self.get_init_inputs())}) and a function {self.get_func_name()}(self, {', '.join(self.get_func_inputs())}) returning {', '.join(self.get_func_outputs())}."
def get_other_inf(self):
return self.problem.task_prompt + self.problem.example_prompt + self.problem.format_prompt
|
class _BladePrompts:
'''Adapter exposing the prompt interface expected by EoH.'''
def __init__(self, problem: Problem):
pass
def get_task(self):
pass
def get_func_name(self):
pass
def get_func_inputs(self):
pass
def get_init_inputs(self):
pass
def get_func_outputs(self):
pass
def get_inout_inf(self):
pass
def get_other_inf(self):
pass
| 9
| 1
| 3
| 0
| 3
| 0
| 1
| 0.04
| 0
| 1
| 1
| 0
| 8
| 1
| 8
| 8
| 34
| 8
| 25
| 10
| 16
| 1
| 17
| 10
| 8
| 1
| 0
| 0
| 8
|
328,131
|
XAI-liacs/BLADE
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/XAI-liacs_BLADE/iohblade/methods/funsearch.py
|
iohblade.methods.funsearch.funsearch
|
from ..method import Method
from ..llm import LLM
from ..problem import Problem
class funsearch(Method):
def __init__(self, llm: LLM, budget: int, name='funsearch', **kwargs):
"""
Initializes the funsearch algorithm within the benchmarking framework.
Args:
problem (Problem): The problem instance to optimize.
llm (LLM): The LLM instance to use for solution generation.
budget (int): The maximum number of evaluations.
name (str): The name of the method.
kwargs: Additional arguments for configuring funsearch.
"""
super().__init__(llm, budget, name)
self.kwargs = kwargs
def __call__(self, problem: Problem):
"""
Executes the evolutionary search process via funsearch.
Returns:
Solution: The best solution found.
"""
raise NotImplementedError('funsearch is not implemented yet')
def to_dict(self):
"""
Returns a dictionary representation of the method including all parameters.
Returns:
dict: Dictionary representation of the method.
"""
return {'method_name': self.name if self.name != None else 'funsearch', 'budget': self.budget, 'kwargs': self.kwargs}
|
class funsearch(Method):
def __init__(self, llm: LLM, budget: int, name='funsearch', **kwargs):
'''
Initializes the funsearch algorithm within the benchmarking framework.
Args:
problem (Problem): The problem instance to optimize.
llm (LLM): The LLM instance to use for solution generation.
budget (int): The maximum number of evaluations.
name (str): The name of the method.
kwargs: Additional arguments for configuring funsearch.
'''
pass
def __call__(self, problem: Problem):
'''
Executes the evolutionary search process via funsearch.
Returns:
Solution: The best solution found.
'''
pass
def to_dict(self):
'''
Returns a dictionary representation of the method including all parameters.
Returns:
dict: Dictionary representation of the method.
'''
pass
| 4
| 3
| 11
| 1
| 4
| 6
| 1
| 1.58
| 1
| 5
| 2
| 0
| 3
| 1
| 3
| 26
| 36
| 5
| 12
| 5
| 8
| 19
| 8
| 5
| 4
| 2
| 5
| 0
| 4
|
328,132
|
XAI-liacs/BLADE
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/XAI-liacs_BLADE/iohblade/methods/llamea.py
|
iohblade.methods.llamea.LLaMEA
|
from ..llm import LLM
from ..method import Method
from llamea import LLaMEA as LLAMEA_Algorithm
from ..problem import Problem
class LLaMEA(Method):
def __init__(self, llm: LLM, budget: int, name='LLaMEA', **kwargs):
"""
Initializes the LLaMEA algorithm within the benchmarking framework.
Args:
problem (Problem): The problem instance to optimize.
llm (LLM): The LLM instance to use for solution generation.
budget (int): The maximum number of evaluations.
name (str): The name of the method.
kwargs: Additional arguments for configuring LLaMEA.
"""
super().__init__(llm, budget, name)
self.kwargs = kwargs
def __call__(self, problem: Problem):
"""
Executes the evolutionary search process via LLaMEA.
Returns:
Solution: The best solution found.
"""
self.llamea_instance = LLAMEA_Algorithm(f=problem, llm=self.llm, role_prompt='You are an excellent Python programmer.', task_prompt=problem.task_prompt, example_prompt=problem.example_prompt, output_format_prompt=problem.format_prompt, log=None, budget=self.budget, max_workers=1, **self.kwargs)
return self.llamea_instance.run()
def to_dict(self):
"""
Returns a dictionary representation of the method including all parameters.
Returns:
dict: Dictionary representation of the method.
"""
return {'method_name': self.name if self.name != None else 'LLaMEA', 'budget': self.budget, 'kwargs': self.kwargs}
|
class LLaMEA(Method):
def __init__(self, llm: LLM, budget: int, name='LLaMEA', **kwargs):
'''
Initializes the LLaMEA algorithm within the benchmarking framework.
Args:
problem (Problem): The problem instance to optimize.
llm (LLM): The LLM instance to use for solution generation.
budget (int): The maximum number of evaluations.
name (str): The name of the method.
kwargs: Additional arguments for configuring LLaMEA.
'''
pass
def __call__(self, problem: Problem):
'''
Executes the evolutionary search process via LLaMEA.
Returns:
Solution: The best solution found.
'''
pass
def to_dict(self):
'''
Returns a dictionary representation of the method including all parameters.
Returns:
dict: Dictionary representation of the method.
'''
pass
| 4
| 3
| 15
| 1
| 8
| 8
| 1
| 0.96
| 1
| 4
| 2
| 0
| 3
| 4
| 3
| 26
| 48
| 5
| 24
| 8
| 20
| 23
| 9
| 6
| 5
| 2
| 5
| 0
| 4
|
328,133
|
XAI-liacs/BLADE
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/XAI-liacs_BLADE/iohblade/methods/random_search.py
|
iohblade.methods.random_search.RandomSearch
|
from ..llm import LLM
from ..problem import Problem
from ..method import Method
class RandomSearch(Method):
def __init__(self, llm: LLM, budget, name='RandomSearch', **kwargs):
"""
Initializes the LLaMEA algorithm within the benchmarking framework.
Args:
llm (LLM): The LLM instance to use for solution generation.
budget (int): The maximum number of evaluations.
kwargs: Additional arguments for configuring LLaMEA.
"""
super().__init__(llm, budget, name)
def __call__(self, problem: Problem):
"""
Executes the evolutionary search process via LLaMEA.
Returns:
Solution: The best solution found.
"""
best_solution = None
for i in range(self.budget):
solution = self.llm.sample_solution([{'role': 'client', 'content': problem.get_prompt()}])
solution = problem(solution)
if best_solution is None or solution.fitness > best_solution.fitness:
best_solution = solution
return best_solution
def to_dict(self):
"""
Returns a dictionary representation of the method including all parameters.
Returns:
dict: Dictionary representation of the method.
"""
return {'method_name': self.name if self.name != None else 'RandomSearch', 'budget': self.budget}
|
class RandomSearch(Method):
def __init__(self, llm: LLM, budget, name='RandomSearch', **kwargs):
'''
Initializes the LLaMEA algorithm within the benchmarking framework.
Args:
llm (LLM): The LLM instance to use for solution generation.
budget (int): The maximum number of evaluations.
kwargs: Additional arguments for configuring LLaMEA.
'''
pass
def __call__(self, problem: Problem):
'''
Executes the evolutionary search process via LLaMEA.
Returns:
Solution: The best solution found.
'''
pass
def to_dict(self):
'''
Returns a dictionary representation of the method including all parameters.
Returns:
dict: Dictionary representation of the method.
'''
pass
| 4
| 3
| 12
| 1
| 6
| 6
| 2
| 0.94
| 1
| 4
| 2
| 0
| 3
| 0
| 3
| 26
| 40
| 5
| 18
| 7
| 14
| 17
| 13
| 7
| 9
| 3
| 5
| 2
| 6
|
328,134
|
XAI-liacs/BLADE
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/XAI-liacs_BLADE/iohblade/methods/reevo.py
|
iohblade.methods.reevo.ReEvo
|
from typing import Any
from ..llm import LLM
from ..method import Method
from ..problem import Problem
from ..utils import class_info, first_class_name
from ..solution import Solution
class ReEvo(Method):
"""Wrapper for the ReEvo baseline."""
def __init__(self, llm: LLM, budget: int, name: str='ReEvo', **kwargs: Any):
super().__init__(llm, budget, name)
self.kwargs = kwargs
def _eval_population(self, reevo: Any, population: list[dict], problem: Problem):
for response_id in range(len(population)):
individual = population[response_id]
reevo.function_evals += 1
if individual.get('code') is None:
individual['exec_success'] = False
individual['obj'] = float('inf')
continue
solution = Solution(code=individual['code'], name=first_class_name(individual['code']) or 'AlgorithmName', description=class_info(individual['code'])[1] or 'No description provided.')
solution = problem(solution)
if solution.error != '':
individual['exec_success'] = False
individual['obj'] = -solution.fitness
population[response_id] = reevo.mark_invalid_individual(individual, solution.error)
continue
individual['obj'] = -solution.fitness
individual['exec_success'] = True
population[response_id] = individual
return population
def __call__(self, problem: Problem):
if ReEvoAlgorithm is None:
raise ImportError('reevo package is not installed, please install it using `poetry install --with methods`.')
from omegaconf import OmegaConf
cfg_dict = {'max_fe': self.budget, 'pop_size': self.kwargs.get('pop_size', 10), 'init_pop_size': self.kwargs.get('init_pop_size', 20), 'mutation_rate': self.kwargs.get('mutation_rate', 0.5), 'timeout': self.kwargs.get('timeout', 20), 'problem': {'problem_name': problem.name, 'description': problem.task_prompt, 'problem_size': getattr(problem, 'dim', 1), 'func_name': 'AlgorithmName', 'seed_func': problem.example_prompt, 'func_signature': f'{problem.func_name}(self)', 'obj_type': 'max', 'problem_type': 'blade', 'func_desc': '', 'external_knowledge': ''}}
cfg = OmegaConf.create(cfg_dict)
client = _BladeReEvoClient(self.llm)
reevo = ReEvoAlgorithm(cfg, root_dir=self.kwargs.get('output_path', './'), generator_llm=client)
reevo.evaluate_population = lambda pop: self._eval_population(reevo, pop, problem)
reevo.init_population()
code, _ = reevo.evolve()
name = first_class_name(code) or 'AlgorithmName'
sol = Solution(code=code, name=name)
sol.set_scores(-reevo.best_obj_overall, '', '')
return sol
def to_dict(self):
return {'method_name': self.name if self.name is not None else 'ReEvo', 'budget': self.budget, 'kwargs': self.kwargs}
|
class ReEvo(Method):
'''Wrapper for the ReEvo baseline.'''
def __init__(self, llm: LLM, budget: int, name: str='ReEvo', **kwargs: Any):
pass
def _eval_population(self, reevo: Any, population: list[dict], problem: Problem):
pass
def __call__(self, problem: Problem):
pass
def to_dict(self):
pass
| 5
| 1
| 20
| 1
| 19
| 1
| 2
| 0.05
| 1
| 13
| 4
| 0
| 4
| 1
| 4
| 27
| 87
| 8
| 75
| 17
| 69
| 4
| 40
| 17
| 34
| 4
| 5
| 2
| 9
|
328,135
|
XAI-liacs/BLADE
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/XAI-liacs_BLADE/iohblade/methods/reevo.py
|
iohblade.methods.reevo._BladeReEvoClient
|
from ..llm import LLM
class _BladeReEvoClient(BaseClient):
"""Adapter that exposes the interface expected by ReEvo."""
def __init__(self, llm: LLM, temperature: float=1.0) -> None:
super().__init__(model=llm.model, temperature=temperature)
self.llm = llm
def _chat_completion_api(self, messages: list[dict], temperature: float, n: int=1):
responses = []
for _ in range(n):
content = self.llm.query(messages)
responses.append({'message': {'role': 'assistant', 'content': content}})
return responses
def multi_chat_completion(self, messages_list, n=1, temperature=None):
"""Sequentially generate responses for many independent conversations.
Parameters
----------
messages_list : list[list[dict]] | list[dict]
Either *one* conversation (list[dict]) or a list of conversations.
n : int, default=1
Number of completions **per conversation**. For multiple
conversations `n` must remain 1.
temperature : float | None, default=None
Sampling temperature.
Returns
-------
list[str]
The content field of each returned message, flattened.
"""
if not isinstance(messages_list, list):
raise TypeError('messages_list must be a list')
if messages_list and (not isinstance(messages_list[0], list)):
messages_list = [messages_list]
if len(messages_list) > 1 and n != 1:
raise ValueError('Currently, only n=1 is supported for multi‑chat completion.')
contents = []
for msgs in messages_list:
for choice in self.chat_completion(n=n, messages=msgs, temperature=temperature):
contents.append(choice['message']['content'])
return contents
|
class _BladeReEvoClient(BaseClient):
'''Adapter that exposes the interface expected by ReEvo.'''
def __init__(self, llm: LLM, temperature: float=1.0) -> None:
pass
def _chat_completion_api(self, messages: list[dict], temperature: float, n: int=1):
pass
def multi_chat_completion(self, messages_list, n=1, temperature=None):
'''Sequentially generate responses for many independent conversations.
Parameters
----------
messages_list : list[list[dict]] | list[dict]
Either *one* conversation (list[dict]) or a list of conversations.
n : int, default=1
Number of completions **per conversation**. For multiple
conversations `n` must remain 1.
temperature : float | None, default=None
Sampling temperature.
Returns
-------
list[str]
The content field of each returned message, flattened.
'''
pass
| 4
| 2
| 19
| 1
| 11
| 7
| 3
| 0.67
| 1
| 9
| 1
| 0
| 3
| 1
| 3
| 3
| 61
| 7
| 33
| 18
| 22
| 22
| 22
| 11
| 18
| 6
| 1
| 2
| 9
|
328,136
|
XAI-liacs/BLADE
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/XAI-liacs_BLADE/iohblade/misc/ast.py
|
iohblade.misc.ast.BuildAST
|
import ast
import networkx as nx
class BuildAST(ast.NodeVisitor):
def __init__(self):
"""
Class to build a directed graph representation (networkx.DiGraph) of a Python Abstract
Syntax Tree (AST). Each AST node is represented as a graph node, and edges indicate
parent-child relationships in the AST.
"""
self.graph = nx.DiGraph()
self.current_node = 0
self.node_stack = []
def generic_visit(self, node):
"""
Visits each node in the AST. Adds the node to the graph, and connects it with an edge to
its parent node. Uses a stack to keep track of the parent-child relationship.
"""
node_id = self.current_node
self.graph.add_node(node_id, label=type(node).__name__)
if self.node_stack:
parent_id = self.node_stack[-1]
self.graph.add_edge(parent_id, node_id)
self.node_stack.append(node_id)
self.current_node += 1
super().generic_visit(node)
self.node_stack.pop()
def build_graph(self, root):
"""
Builds and returns the directed graph (networkx.DiGraph) from the AST root node by
visiting each node in the tree.
Args:
root (ast.AST): The root of the AST from which to build the graph.
Returns:
networkx.DiGraph: A directed graph representing the AST.
"""
self.visit(root)
return self.graph
|
class BuildAST(ast.NodeVisitor):
def __init__(self):
'''
Class to build a directed graph representation (networkx.DiGraph) of a Python Abstract
Syntax Tree (AST). Each AST node is represented as a graph node, and edges indicate
parent-child relationships in the AST.
'''
pass
def generic_visit(self, node):
'''
Visits each node in the AST. Adds the node to the graph, and connects it with an edge to
its parent node. Uses a stack to keep track of the parent-child relationship.
'''
pass
def build_graph(self, root):
'''
Builds and returns the directed graph (networkx.DiGraph) from the AST root node by
visiting each node in the tree.
Args:
root (ast.AST): The root of the AST from which to build the graph.
Returns:
networkx.DiGraph: A directed graph representing the AST.
'''
pass
| 4
| 3
| 13
| 2
| 6
| 6
| 1
| 0.94
| 1
| 2
| 0
| 0
| 3
| 3
| 3
| 6
| 43
| 8
| 18
| 9
| 14
| 17
| 18
| 9
| 14
| 2
| 2
| 1
| 4
|
328,137
|
XAI-liacs/BLADE
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/XAI-liacs_BLADE/iohblade/problem.py
|
iohblade.problem.Problem
|
from abc import ABC, abstractmethod
import tempfile
import os
from .utils import TimeoutException
import multiprocessing
from .solution import Solution
from pathlib import Path
import numpy as np
import subprocess
import shutil
class Problem(ABC):
"""
Abstract problem class.
"""
def __init__(self, logger=None, training_instances=None, test_instances=None, name='Problem', eval_timeout=6000, dependencies=None, imports=None):
"""
Initializes a problem instance with logging and dataset references.
Args:
logger (Logger, optional): Logger object for tracking solutions.
training_instances (list, optional): List of training problem instances.
test_instances (list, optional): List of test problem instances.
name (str, optional): Name of the problem.
eval_timeout (int, optional): Number of seconds before a timeout error is raised.
budget (int): number of algorithms are allowed to be generated per run.
dependencies (list, optional): a list of pypi packages to install before evaluation.
imports (string, optional): the python string to manage imports in the evaluation file.
"""
self.logger = logger
self.logger_dir = ''
self.training_instances = training_instances if training_instances else []
self.test_instances = test_instances if test_instances else []
self.task_prompt = 'Write the problem description part here.'
self.example_prompt = 'Write an example code here.'
self.format_prompt = 'Write the format description part here.'
self.name = name
self.eval_timeout = eval_timeout
self.dependencies = BASE_DEPENDENCIES.copy()
if dependencies:
self.dependencies.extend(dependencies)
if imports is None:
self.imports = 'import numpy as np\n'
else:
self.imports = imports
self._env_path: Path | None = None
self._python_bin: Path | None = None
self.func_name = '__call__'
self.init_inputs = ['budget', 'dim']
self.func_inputs = ['func']
self.func_outputs = ['f_opt', 'x_opt']
def __call__(self, solution: Solution, logger=None):
"""
Evaluates a solution on training instances and updates its fitness and feedback.
Args:
solution (Solution): Solution object to be evaluated.
logger (RunLogger, optional): The RunLogger object attached to the problem to keep track of evaluations.
Returns:
Solution: The evaluated solution with updated fitness and scores.
"""
if logger != None:
print('LOGGER is NOT NONE (UNEXPECTED)')
self.logger = logger
if self.logger != None:
if self.logger.budget_exhausted():
solution.set_scores(-np.inf, feedback='Budget is exhausted.', error='Budget is exhausted.')
return solution
try:
self._ensure_env()
parent_conn, child_conn = multiprocessing.Pipe()
process = multiprocessing.Process(target=evaluate_in_subprocess, args=(self, child_conn, solution))
process.start()
process.join(timeout=self.eval_timeout)
if process.is_alive():
raise TimeoutException(f'Evaluation timed out after {self.eval_timeout} seconds.')
if parent_conn.poll():
result = parent_conn.recv()
if isinstance(result, Exception):
raise result
elif isinstance(result, Solution):
solution = result
elif isinstance(result, str):
solution.set_scores(-np.inf, feedback=f'An error occurred: {result}.', error=result)
else:
raise Exception('No Solution object or string returned.')
else:
raise Exception('Evaluation failed without an exception.')
except Exception as e:
solution.set_scores(-np.inf, feedback=f'An exception occurred: {e}.', error=f'An exception occurred: {e}.')
finally:
try:
process.terminate()
process.join()
except Exception:
pass
if self.logger is not None:
self.logger.log_individual(solution)
return solution
def _ensure_env(self):
"""Create the virtual environment for evaluations if it does not exist."""
if self._env_path is not None:
return
import virtualenv
env_dir = tempfile.mkdtemp(prefix='blade_env_')
self._env_path = Path(env_dir)
virtualenv.cli_run([env_dir])
self._python_bin = self._env_path / ('Scripts' if os.name == 'nt' else 'bin') / 'python'
deps = getattr(self, 'dependencies', [])
if deps:
subprocess.run([str(self._python_bin), '-m', 'pip', 'install', *deps], check=True, capture_output=True, text=True)
def cleanup(self):
try:
if self._env_path and self._env_path.exists():
shutil.rmtree(self._env_path)
except Exception:
pass
def set_logger(self, logger):
"""
Sets the logger for this problem.
"""
self.logger = logger
if logger != None:
self.logger_dir = logger.get_log_dir()
def get_prompt(self):
"""
Get the full prompt describing the problem and how to format the answer.
"""
return self.task_prompt + self.example_prompt + self.format_prompt
@abstractmethod
def evaluate(self, solution: Solution):
"""
Evaluates a solution on training instances and updates its fitness and feedback.
Args:
solution (Solution): Solution object to be evaluated.
"""
pass
@abstractmethod
def test(self, solution: Solution):
"""
Performs a complete evaluation on test instances and returns the fitness score.
Args:
solution (Solution): Solution object to be tested.
"""
pass
@abstractmethod
def to_dict(self):
"""
Returns a dictionary representation of the problem including all parameters.
Returns:
dict: Dictionary representation of the problem.
"""
pass
|
class Problem(ABC):
'''
Abstract problem class.
'''
def __init__(self, logger=None, training_instances=None, test_instances=None, name='Problem', eval_timeout=6000, dependencies=None, imports=None):
'''
Initializes a problem instance with logging and dataset references.
Args:
logger (Logger, optional): Logger object for tracking solutions.
training_instances (list, optional): List of training problem instances.
test_instances (list, optional): List of test problem instances.
name (str, optional): Name of the problem.
eval_timeout (int, optional): Number of seconds before a timeout error is raised.
budget (int): number of algorithms are allowed to be generated per run.
dependencies (list, optional): a list of pypi packages to install before evaluation.
imports (string, optional): the python string to manage imports in the evaluation file.
'''
pass
def __call__(self, solution: Solution, logger=None):
'''
Evaluates a solution on training instances and updates its fitness and feedback.
Args:
solution (Solution): Solution object to be evaluated.
logger (RunLogger, optional): The RunLogger object attached to the problem to keep track of evaluations.
Returns:
Solution: The evaluated solution with updated fitness and scores.
'''
pass
def _ensure_env(self):
'''Create the virtual environment for evaluations if it does not exist.'''
pass
def cleanup(self):
pass
def set_logger(self, logger):
'''
Sets the logger for this problem.
'''
pass
def get_prompt(self):
'''
Get the full prompt describing the problem and how to format the answer.
'''
pass
@abstractmethod
def evaluate(self, solution: Solution):
'''
Evaluates a solution on training instances and updates its fitness and feedback.
Args:
solution (Solution): Solution object to be evaluated.
'''
pass
@abstractmethod
def test(self, solution: Solution):
'''
Performs a complete evaluation on test instances and returns the fitness score.
Args:
solution (Solution): Solution object to be tested.
'''
pass
@abstractmethod
def to_dict(self):
'''
Returns a dictionary representation of the problem including all parameters.
Returns:
dict: Dictionary representation of the problem.
'''
pass
| 13
| 9
| 21
| 2
| 14
| 6
| 3
| 0.44
| 1
| 5
| 2
| 14
| 9
| 17
| 9
| 29
| 205
| 23
| 128
| 47
| 105
| 56
| 86
| 33
| 75
| 12
| 4
| 3
| 30
|
328,138
|
XAI-liacs/BLADE
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/XAI-liacs_BLADE/iohblade/problems/AutoML.py
|
iohblade.problems.AutoML.AutoML
|
import sklearn
from sklearn.datasets import load_breast_cancer
from ..solution import Solution
from ..problem import BASE_DEPENDENCIES, Problem
from sklearn.metrics import accuracy_score
class AutoML(Problem):
"""
Problem class for evaluating AutoML pipelines (sample).
"""
def __init__(self, logger=None, datasets=None, name='AutoML-breast_cancer', eval_timeout=360, dependencies=None, imports=None):
if dependencies is None:
dependencies = ['pandas==2.2.3', 'polars==1.31.0', 'scikit-learn==1.3.0']
if imports is None:
imports = 'import pandas as pd\nimport polars\nimport sklearn\n'
X, y = load_breast_cancer(return_X_y=True)
self.X_train, self.X_test, self.y_train, self.y_test = sklearn.model_selection.train_test_split(X, y, random_state=1)
super().__init__(logger, [(self.X_train, self.y_train)], [(self.X_test, self.y_test)], name, eval_timeout, dependencies, imports)
self.func_name = '__call__'
self.init_inputs = ['X', 'y']
self.func_inputs = ['X']
self.func_outputs = ['y_pred']
self.task_prompt = f'\nYou are a highly skilled computer scientist in the field machine learning. Your task is to design novel machine learning pipelines for a given dataset and task.\nThe pipeline in this case should handle a breast cancer classification task. Your task is to write the Python code. The code should contain an `__init__(self, X, y)` function that trains a machine learning model and the function `def __call__(self, X)`, which should predict the samples in X and return the predictions.\nThe training data X has shape {self.X_train.shape} and y has shape {self.y_train.shape}.\n'
self.example_prompt = '\nAn example code structure is as follows:\n```python\nimport numpy as np\nimport sklearn\n\nclass AlgorithmName:\n "Template for a ML pipeline"\n\n def __init__(self, X, y):\n self.train(X, y)\n\n def train(self, X, y):\n # Standardize the feature data\n scaler = sklearn.preprocessing.StandardScaler()\n X_train = scaler.fit_transform(X)\n\n # Let\'s create and train a logistic regression model\n lr_model = sklearn.linear_model.LogisticRegression()\n lr_model.fit(X_train, y)\n self.model = lr_model\n \n def __call__(self, X):\n # predict using the trained model\n return self.model.predict(X)\n```\n'
self.format_prompt = '\n\nGive an excellent and novel ML pipeline to solve this task and also give it a one-line description, describing the main idea. Give the response in the format:\n# Description: <short-description>\n# Code: \n```python\n<code>\n```\n'
def get_prompt(self):
"""
Returns the problem description and answer format.
"""
return self.task_prompt + self.example_prompt + self.format_prompt
def evaluate(self, solution: Solution, test=False, ioh_dir=''):
"""
Evaluates a solution on the kernel tuner benchmark using AOCC.
"""
code = solution.code
algorithm_name = solution.name
exec(code, globals())
algorithm = None
algorithm = globals()[algorithm_name](self.X_train, self.y_train)
y_pred = algorithm(self.X_test)
score = accuracy_score(self.y_test, y_pred)
solution.set_scores(score, f'The algorithm {algorithm_name} scored {score:.3f} on accuracy (higher is better, 1.0 is the best).')
return solution
def test(self, solution: Solution, ioh_dir=''):
"""
Runs the solution on test instances and returns the fitness score.
"""
return self.evaluate(solution, True, ioh_dir)
def to_dict(self):
"""
Converts the problem to a dictionary.
"""
return {'name': self.name}
|
class AutoML(Problem):
'''
Problem class for evaluating AutoML pipelines (sample).
'''
def __init__(self, logger=None, datasets=None, name='AutoML-breast_cancer', eval_timeout=360, dependencies=None, imports=None):
pass
def get_prompt(self):
'''
Returns the problem description and answer format.
'''
pass
def evaluate(self, solution: Solution, test=False, ioh_dir=''):
'''
Evaluates a solution on the kernel tuner benchmark using AOCC.
'''
pass
def test(self, solution: Solution, ioh_dir=''):
'''
Runs the solution on test instances and returns the fitness score.
'''
pass
def to_dict(self):
'''
Converts the problem to a dictionary.
'''
pass
| 6
| 5
| 23
| 3
| 17
| 4
| 1
| 0.25
| 1
| 2
| 1
| 0
| 5
| 11
| 5
| 34
| 125
| 19
| 85
| 31
| 71
| 21
| 32
| 20
| 26
| 3
| 5
| 1
| 7
|
328,139
|
XAI-liacs/BLADE
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/XAI-liacs_BLADE/iohblade/problems/automl.py
|
iohblade.problems.automl.AutoML
|
from sklearn.metrics import accuracy_score
from sklearn.datasets import load_breast_cancer
from ..solution import Solution
import sklearn
from ..problem import BASE_DEPENDENCIES, Problem
class AutoML(Problem):
"""
Problem class for evaluating AutoML pipelines (sample).
"""
def __init__(self, logger=None, datasets=None, name='AutoML-breast_cancer', eval_timeout=360, dependencies=None, imports=None):
if dependencies is None:
dependencies = ['pandas==2.2.3', 'polars==1.31.0', 'scikit-learn==1.3.0']
if imports is None:
imports = 'import pandas as pd\nimport polars\nimport sklearn\n'
X, y = load_breast_cancer(return_X_y=True)
self.X_train, self.X_test, self.y_train, self.y_test = sklearn.model_selection.train_test_split(X, y, random_state=1)
super().__init__(logger, [(self.X_train, self.y_train)], [(self.X_test, self.y_test)], name, eval_timeout, dependencies, imports)
self.func_name = '__call__'
self.init_inputs = ['X', 'y']
self.func_inputs = ['X']
self.func_outputs = ['y_pred']
self.task_prompt = f'\nYou are a highly skilled computer scientist in the field machine learning. Your task is to design novel machine learning pipelines for a given dataset and task.\nThe pipeline in this case should handle a breast cancer classification task. Your task is to write the Python code. The code should contain an `__init__(self, X, y)` function that trains a machine learning model and the function `def __call__(self, X)`, which should predict the samples in X and return the predictions.\nThe training data X has shape {self.X_train.shape} and y has shape {self.y_train.shape}.\n'
self.example_prompt = '\nAn example code structure is as follows:\n```python\nimport numpy as np\nimport sklearn\n\nclass AlgorithmName:\n "Template for a ML pipeline"\n\n def __init__(self, X, y):\n self.train(X, y)\n\n def train(self, X, y):\n # Standardize the feature data\n scaler = sklearn.preprocessing.StandardScaler()\n X_train = scaler.fit_transform(X)\n\n # Let\'s create and train a logistic regression model\n lr_model = sklearn.linear_model.LogisticRegression()\n lr_model.fit(X_train, y)\n self.model = lr_model\n \n def __call__(self, X):\n # predict using the trained model\n return self.model.predict(X)\n```\n'
self.format_prompt = '\n\nGive an excellent and novel ML pipeline to solve this task and also give it a one-line description, describing the main idea. Give the response in the format:\n# Description: <short-description>\n# Code: \n```python\n<code>\n```\n'
def get_prompt(self):
"""
Returns the problem description and answer format.
"""
return self.task_prompt + self.example_prompt + self.format_prompt
def evaluate(self, solution: Solution, test=False, ioh_dir=''):
"""
Evaluates a solution on the kernel tuner benchmark using AOCC.
"""
code = solution.code
algorithm_name = solution.name
exec(code, globals())
algorithm = None
algorithm = globals()[algorithm_name](self.X_train, self.y_train)
y_pred = algorithm(self.X_test)
score = accuracy_score(self.y_test, y_pred)
solution.set_scores(score, f'The algorithm {algorithm_name} scored {score:.3f} on accuracy (higher is better, 1.0 is the best).')
return solution
def test(self, solution: Solution, ioh_dir=''):
"""
Runs the solution on test instances and returns the fitness score.
"""
return self.evaluate(solution, True, ioh_dir)
def to_dict(self):
"""
Converts the problem to a dictionary.
"""
return {'name': self.name}
|
class AutoML(Problem):
'''
Problem class for evaluating AutoML pipelines (sample).
'''
def __init__(self, logger=None, datasets=None, name='AutoML-breast_cancer', eval_timeout=360, dependencies=None, imports=None):
pass
def get_prompt(self):
'''
Returns the problem description and answer format.
'''
pass
def evaluate(self, solution: Solution, test=False, ioh_dir=''):
'''
Evaluates a solution on the kernel tuner benchmark using AOCC.
'''
pass
def test(self, solution: Solution, ioh_dir=''):
'''
Runs the solution on test instances and returns the fitness score.
'''
pass
def to_dict(self):
'''
Converts the problem to a dictionary.
'''
pass
| 6
| 5
| 23
| 3
| 17
| 4
| 1
| 0.25
| 1
| 2
| 1
| 0
| 5
| 11
| 5
| 34
| 125
| 19
| 85
| 31
| 71
| 21
| 32
| 20
| 26
| 3
| 5
| 1
| 7
|
328,140
|
XAI-liacs/BLADE
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/XAI-liacs_BLADE/iohblade/problems/bbob_sboxcost.py
|
iohblade.problems.bbob_sboxcost.BBOB_SBOX
|
from ..problem import BASE_DEPENDENCIES, Problem
from ioh import logger as ioh_logger
import numpy as np
from ..utils import OverBudgetException, aoc_logger, correct_aoc
from ioh import get_problem
from ..solution import Solution
import ioh
class BBOB_SBOX(Problem):
"""
Problem class for evaluating optimization algorithms on the SBOX-COST or BBOB benchmark. See also https://github.com/sbox-cost/Examples
Black-Box Optimization Benchmarking (BBOB) problem set, which contains 24 noiseless real-valued test functions supported on [-5, 5]^n, where n is the dimensionality.
These problems were orginally proposed by Hansen et. al. in [FinckHRA10] and was implemented as the core component of the COmparing Continous Optimizer (COCO) platform [HansenARMTB20].
We took the implementation of those 24 functions in https://github.com/numbbo/coco/tree/master/code-experiments/src (v2.2) and adopted those to our framework.
[HansenARMTB20] Nikolaus Hansen, Anne Auger, Raymond Ros, Olaf Mersmann, Tea Tusar, and Dimo Brockhoff. “COCO: A platform for comparing continuous optimizers in a black-box setting.” Optimization Methods and Software (2020): 1-31.
[FinckHRA10] Steffen Finck, Nikolaus Hansen, Raymond Ros, and Anne Auger. “Real-parameter black-box optimization benchmarking 2009: Presentation of the noiseless functions.” Technical Report 2009/20, Research Center PPE, 2009. Updated February, 2010.
"""
def __init__(self, logger=None, training_instances=None, test_instances=None, name='SBOX_COST', eval_timeout=120, dims=[2, 5], budget_factor=2000, specific_fid=None, specific_group=None, problem_type=ioh.ProblemClass.SBOX, full_ioh_log=False, ioh_dir='', dependencies=None, imports=None):
"""
Initializes the MA-BBOB problem instance.
Args:
logger (RunLogger): The logger to use for logging.
training_instances (list): A list of tuples with (fid=function id, iid=instance id) for training instances to use.
test_instances (list): The indices of test instances to use. A list of tuples with (fid=function id, iid=instance id).
name (str): The name of the problem.
eval_timeout (int): The evaluation timeout in seconds.
dims (list): The dimensionalities of the problem instances to run on.
budget_factor (int): The factor to multiply the dimensionality with to get the budget.
specific_fid (int): The specific function id to use. If not None, additional information is added to the prompt about the function.
specific_group (int): The specific function group (1,2,3,4,5) to use. If not None, additional information is added to the prompt about the function group.
problem_type (ioh.ProblemClass): The type of problem to use. Can be SBOX or BBOB.
full_ioh_log (bool): If set to True, additional IOH logs are being kept for each run and each algorithm.
dependencies (list, optional): a list of pypi packages to install before evaluation.
imports (string, optional): the python string to manage imports in the evaluation file.
"""
if dependencies is None:
dependencies = ['pandas==2.2.3', 'ioh==0.3.19', 'configspace==1.2.1', 'smac==2.3.1']
if imports is None:
imports = 'import numpy as np\nimport ioh\nimport pandas as pd\n'
if training_instances is None:
training_instances = [(f, i) for f in range(1, 25) for i in range(1, 6)]
if test_instances is None:
test_instances = [(f, i) for f in range(1, 25) for i in range(5, 16)]
super().__init__(logger, training_instances, test_instances, name, eval_timeout, dependencies)
self.dims = dims
self.budget_factor = budget_factor
self.specific_fid = specific_fid
self.specific_group = specific_group
self.full_ioh_log = full_ioh_log
self.ioh_dir = ioh_dir
self.func_name = '__call__'
self.init_inputs = ['budget', 'dim']
self.func_inputs = ['func']
self.func_outputs = ['f_opt', 'x_opt']
function_groups = ['Separable Functions', 'Functions with low or moderate conditioning', 'Functions with high conditioning and unimodal', 'Multi-modal functions with adequate global structure', 'Multi-modal functions with weak global structure']
functions = ['f1: Sphere Function', 'f2: Separable Ellipsoidal Function', 'f3: Rastrigin Function', 'f4: Büche-Rastrigin Function', 'f5: Linear Slope', 'f6: Attractive Sector Function', 'f7: Step Ellipsoidal Function', 'f8: Rosenbrock Function, original', 'f9: Rosenbrock Function, rotated', 'f10: Ellipsoidal Function', 'f11: Discus Function', 'f12: Bent Cigar Function', 'f13: Sharp Ridge Function', 'f14: Different Powers Function', 'f15: Rastrigin Function', 'f16: Weierstrass Function', "f17: Schaffer's F7 Function", "f18: Schaffer's F7 Function, moderately ill-conditioned", 'f19: Composite Griewank-Rosenbrock Function F8F2', 'f20: Schwefel Function', "f21: Gallagher's Gaussian 101-me Peaks Function", "f22: Gallagher's Gaussian 21-hi Peaks Function", 'f23: Katsuura Function', 'f24: Lunacek bi-Rastrigin Function']
self.problem_type = problem_type
self.benchmark_name = 'SBOX-COST test suite of noiseless box-constrained functions.' if problem_type == ioh.ProblemClass.SBOX else 'BBOB test suite of noiseless functions.'
box_constrained = 'box-constrained' if problem_type == ioh.ProblemClass.SBOX else 'unconstrained'
extra_prompt = f'The optimization algorithm should handle a wide range of tasks, which is evaluated on the {self.benchmark_name}'
if self.specific_fid is not None and self.specific_fid < 25 and (self.specific_fid > 0):
extra_prompt = f'The optimization algorithm should work on different instances of noiseless {box_constrained} functions. Specifically function: {functions[self.specific_fid - 1]}.'
elif self.specific_group is not None and self.specific_group < 6 and (self.specific_group > 0):
extra_prompt = f'The optimization algorithm should work on different instances of noiseless {box_constrained} functions. Specifically it should work well for {function_groups[self.specific_group - 1]}.'
else:
extra_prompt = f'The optimization algorithm should work on different instances of noiseless {box_constrained} functions.'
self.task_prompt = f'\nYou are a Python expert working on a new optimization algorithm.\nYour task is to develop a novel heuristic optimization algorithm for continuous optimization problems.\n{extra_prompt} Your task is to write the optimization algorithm in Python code. \nEach of the optimization functions has a search space between -5.0 (lower bound) and 5.0 (upper bound). The dimensionality can be varied.\nThe code should contain an `__init__(self, budget, dim)` function with optional additional arguments and the function `def __call__(self, func)`, which should optimize the black box function `func` using `self.budget` function evaluations.\nThe func() can only be called as many times as the budget allows, not more. \n'
self.example_prompt = '\nAn example of such code (a simple random search), is as follows:\n```python\nimport numpy as np\n\nclass RandomSearch:\n def __init__(self, budget=10000, dim=10):\n self.budget = budget\n self.dim = dim\n\n def __call__(self, func):\n self.f_opt = np.inf\n self.x_opt = None\n for i in range(self.budget):\n x = np.random.uniform(func.bounds.lb, func.bounds.ub)\n \n f = func(x)\n if f < self.f_opt:\n self.f_opt = f\n self.x_opt = x\n \n return self.f_opt, self.x_opt\n```\n '
self.format_prompt = '\nGive an excellent and novel heuristic algorithm to solve this task and also give it a one-line description, describing the main idea. Give the response in the format:\n# Description: <short-description>\n# Code: \n```python\n<code>\n```\n'
def get_prompt(self):
"""
Returns the problem description and answer format.
"""
return self.task_prompt + self.example_prompt + self.format_prompt
def evaluate(self, solution: Solution, test=False):
"""
Evaluates a solution on the SBOX or BBOB benchmark using AOCC.
"""
auc_mean = 0
auc_std = 0
code = solution.code
algorithm_name = solution.name
algorithm_id = solution.id
safe_globals = {'np': np}
local_env = {}
exec(code, safe_globals, local_env)
algorithm = None
try:
l2_temp = aoc_logger(100, upper=100.0, triggers=[ioh_logger.trigger.ALWAYS])
problem = get_problem(1, instance=1, dimension=2, problem_class=self.problem_type)
problem.attach_logger(l2_temp)
algorithm = local_env[algorithm_name](budget=100, dim=2)
algorithm(problem)
except OverBudgetException:
pass
instances = self.test_instances if test else self.training_instances
aucs = []
for dim in self.dims:
for instance in instances:
fid, iid = instance
budget = self.budget_factor * dim
f_new = get_problem(fid, instance=iid, dimension=dim, problem_class=self.problem_type)
l2 = aoc_logger(budget, upper=100.0, triggers=[ioh_logger.trigger.ALWAYS])
if test or self.full_ioh_log:
l1 = ioh.logger.Analyzer(root=self.ioh_dir, folder_name=algorithm_id, algorithm_name=algorithm_id, store_positions=True, triggers=[ioh_logger.trigger.ALWAYS])
combined_logger = ioh.logger.Combine([l1, l2])
f_new.attach_logger(combined_logger)
else:
f_new.attach_logger(l2)
try:
algorithm = local_env[algorithm_name](budget=budget, dim=dim)
algorithm(f_new)
except OverBudgetException:
pass
aucs.append(correct_aoc(f_new, l2, budget))
l2.reset(f_new)
f_new.reset()
auc_mean = np.mean(aucs)
auc_std = np.std(aucs)
solution.add_metadata('aucs', aucs)
solution.set_scores(auc_mean, f'The algorithm {algorithm_name} scored {auc_mean:.3f} on AOCC (higher is better, 1.0 is the best).')
return solution
def test(self, solution: Solution):
"""
Runs the solution on test instances and returns the fitness score.
"""
return self.evaluate(solution, True)
def to_dict(self):
"""
Converts the problem to a dictionary.
"""
return {'name': self.name, 'dims': self.dims, 'training_instances': self.training_instances, 'test_instances': self.test_instances, 'budget_factor': self.budget_factor, 'problem_type': 'SBOX' if self.problem_type == ioh.ProblemClass.SBOX else 'BBOB', 'specific_fid': self.specific_fid, 'specific_group': self.specific_group}
|
class BBOB_SBOX(Problem):
'''
Problem class for evaluating optimization algorithms on the SBOX-COST or BBOB benchmark. See also https://github.com/sbox-cost/Examples
Black-Box Optimization Benchmarking (BBOB) problem set, which contains 24 noiseless real-valued test functions supported on [-5, 5]^n, where n is the dimensionality.
These problems were orginally proposed by Hansen et. al. in [FinckHRA10] and was implemented as the core component of the COmparing Continous Optimizer (COCO) platform [HansenARMTB20].
We took the implementation of those 24 functions in https://github.com/numbbo/coco/tree/master/code-experiments/src (v2.2) and adopted those to our framework.
[HansenARMTB20] Nikolaus Hansen, Anne Auger, Raymond Ros, Olaf Mersmann, Tea Tusar, and Dimo Brockhoff. “COCO: A platform for comparing continuous optimizers in a black-box setting.” Optimization Methods and Software (2020): 1-31.
[FinckHRA10] Steffen Finck, Nikolaus Hansen, Raymond Ros, and Anne Auger. “Real-parameter black-box optimization benchmarking 2009: Presentation of the noiseless functions.” Technical Report 2009/20, Research Center PPE, 2009. Updated February, 2010.
'''
def __init__(self, logger=None, training_instances=None, test_instances=None, name='SBOX_COST', eval_timeout=120, dims=[2, 5], budget_factor=2000, specific_fid=None, specific_group=None, problem_type=ioh.ProblemClass.SBOX, full_ioh_log=False, ioh_dir='', dependencies=None, imports=None):
'''
Initializes the MA-BBOB problem instance.
Args:
logger (RunLogger): The logger to use for logging.
training_instances (list): A list of tuples with (fid=function id, iid=instance id) for training instances to use.
test_instances (list): The indices of test instances to use. A list of tuples with (fid=function id, iid=instance id).
name (str): The name of the problem.
eval_timeout (int): The evaluation timeout in seconds.
dims (list): The dimensionalities of the problem instances to run on.
budget_factor (int): The factor to multiply the dimensionality with to get the budget.
specific_fid (int): The specific function id to use. If not None, additional information is added to the prompt about the function.
specific_group (int): The specific function group (1,2,3,4,5) to use. If not None, additional information is added to the prompt about the function group.
problem_type (ioh.ProblemClass): The type of problem to use. Can be SBOX or BBOB.
full_ioh_log (bool): If set to True, additional IOH logs are being kept for each run and each algorithm.
dependencies (list, optional): a list of pypi packages to install before evaluation.
imports (string, optional): the python string to manage imports in the evaluation file.
'''
pass
def get_prompt(self):
'''
Returns the problem description and answer format.
'''
pass
def evaluate(self, solution: Solution, test=False):
'''
Evaluates a solution on the SBOX or BBOB benchmark using AOCC.
'''
pass
def test(self, solution: Solution):
'''
Runs the solution on test instances and returns the fitness score.
'''
pass
def to_dict(self):
'''
Converts the problem to a dictionary.
'''
pass
| 6
| 6
| 53
| 3
| 43
| 8
| 4
| 0.22
| 1
| 5
| 3
| 0
| 5
| 15
| 5
| 34
| 284
| 27
| 214
| 61
| 192
| 47
| 84
| 45
| 78
| 9
| 5
| 3
| 20
|
328,141
|
XAI-liacs/BLADE
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/XAI-liacs_BLADE/iohblade/problems/kerneltuner.py
|
iohblade.problems.kerneltuner.Kerneltuner
|
import os
from ..problem import BASE_DEPENDENCIES, Problem
from pathlib import Path
from ..solution import Solution
class Kerneltuner(Problem):
"""
Problem class for evaluating optimization algorithms on kernel tuner real world benchmark.
Note that this problem requires additional installation steps.
"""
def __init__(self, logger=None, gpus=None, kernels=None, name='kerneltuner', eval_timeout=600, budget=1000, cache_dir='/data/neocortex/repos/benchmark_hub/', extra_info=False, dependencies=None, imports=None):
"""
Initializes the Kerneltuner problem instance.
Args:
logger (RunLogger): The logger to use for logging.
gpus (list): The gpus to train on.
kernels (list): The kernels (applications) to train on.
name (str): The name of the problem.
eval_timeout (int): The evaluation timeout in seconds.
budget (int): The budget for the optimization algorithms/
cache_dir (str): The directory that contains the kernel tuner data files.
extra_info (bool): If True, additional information about the problem is added to the prompt. Only works for one kernel.
"""
if dependencies is None:
dependencies = ['kernel-tuner @ git+https://github.com/XAI-liacs/kernel_tuner.git@hyperparametertuning_custom_strategies', 'autotuning-methodology @ git+https://github.com/AutoTuningAssociation/autotuning_methodology.git@6a9a50a5a49bc104469b3b753fd43a5324241702', 'pandas==2.2.3', 'ioh==0.3.19', 'configspace==1.2.1', 'smac==2.3.1']
if imports is None:
imports = 'import numpy as np'
self.applications = ['gemm', 'convolution', 'dedispersion', 'hotspot']
if gpus is None:
self.gpus = ['A100', 'A4000', 'A6000', 'MI250X', 'W6600', 'W7800']
else:
self.gpus = gpus
if kernels is None:
self.kernels = self.applications
else:
self.kernels = kernels
self.training_instances = []
self.test_instances = []
for gpu in self.gpus:
for kernel in self.kernels:
self.training_instances.append(f'{kernel}-{gpu}')
self.test_instances.append(f'{kernel}-{gpu}')
self.cache_dir = cache_dir
super().__init__(logger, self.training_instances, self.test_instances, name, eval_timeout, dependencies)
self.budget = budget
self.task_prompt = '\nYou are a highly skilled computer scientist in the field of natural computing and hardware kernel tuning. Your task is to design novel metaheuristic algorithms to solve kernel tuner problems (integer, variable dimension, contraint).\nThe optimization algorithm should handle a kernel tuning task. Your task is to write the optimization algorithm in Python code. The code should inherit the `OptAlg` class and contain an `__init__(self, budget=5000)` function with optional arguments and the function `def __call__(self, func, searchspace)`, which should optimize the black box function `func` till the `func.budget_spent_fraction` is 1.0.\nThe `searchspace` object can be used to sample random instances, neighbouring instances using `searchspace.get_neighbors(param_config: tuple, neighbor_method=\'Hamming\')` where neighbor_method can be any of ["strictly-adjacent", "adjacent", "Hamming"] and to check validity of parameter settings using `searchspace.is_param_config_valid(tuple(instance))`, nothing else. The dimensionality can be varied.\nIn addition, the variable `tune_params` is a dictionary containing the tuning parameters with their ranges and constraints, it can be obtained directly from the searchspace object `searchspace.tune_params`. The algorithm should be able to handle any number of tuning parameters, and the search space can be continuous or discrete. \n\n'
if len(self.kernels) == 1 and extra_info:
input_filepath = Path(f'{self.cache_dir}kernels/{self.kernels[0]}_milo.json')
self.task_prompt += '\nThe kernel to tune is ' + self.kernels[0] + '. The search space specification is as follows:\n'
with open(input_filepath, 'r') as f:
self.task_prompt += f.read()
else:
self.task_prompt += 'The algorithm should be able to handle any type of kernel tuning problem, including but not limited to vector addition, matrix multiplication, and convolution.\n'
self.example_prompt = '\nAn example code structure with helper functions is as follows:\n```python\nimport numpy as np\nimport random\n\nclass AlgorithmName(OptAlg):\n "Template for a kernel-tune algorithm"\n\n def __init__(self, budget=5000):\n # any parameters used in the search algorithm.\n self.param = None\n\n def __call__(self, func, searchspace):\n #this is not really the budget, but the size of the search space. The budget is dynamic and we can see how much fraction we used with `func.budget_spent_fraction`.\n self.budget = searchspace.size\n self.searchspace = searchspace\n self.tune_params = searchspace.tune_params.copy()\n\n self.f_opt = np.inf\n self.x_opt = None\n # create initial population and run the search till func.budget_spent_fraction is 1.0.\n # evaluate a solution using `func(x)` where `x` is a list of parameter values.\n # then return the best solution found (tuple, x_opt, f_opt) at the end of the search.\n return self.x_opt, self.f_opt\n\n def generate_population(self, pop_size=10):\n "We can use a constraint-aware random sampling method (optional), get_random_sample always returns valid configurations."\n pop = list(list(p) for p in self.searchspace.get_random_sample(pop_size))\n return pop\n\n def get_neighbour(self, solution):\n "We can easily get a random neighbour with hamming distance 1 using the searchspace provided method (for example)."\n neighbors = self.searchspace.get_neighbors(tuple(solution), neighbor_method="Hamming")\n if len(neighbors) > 0:\n return list(random.choice(neighbors))\n return solution\n\n def repair(self, solution):\n "It is possible that at some point a configuration is not valid (due to mutation, crossover etc). "\n if not self.searchspace.is_param_config_valid(tuple(solution)):\n # solution is not valid, try to repair it\n # search for valid configurations neighboring this config\n # start from strictly-adjacent to increasingly allowing more neighbors\n for neighbor_method in ["strictly-adjacent", "adjacent", "Hamming"]:\n neighbors = self.searchspace.get_neighbors_no_cache(tuple(solution), neighbor_method=neighbor_method)\n # if we have found valid neighboring configurations, select one at random\n if len(neighbors) > 0:\n new_solution = list(random.choice(neighbors))\n return new_solution\n return solution\n```\n'
self.format_prompt = '\n\nGive an excellent and novel heuristic algorithm to solve this task and also give it a one-line description, describing the main idea. Give the response in the format:\n# Description: <short-description>\n# Code: \n```python\n<code>\n```\n'
def get_prompt(self):
"""
Returns the problem description and answer format.
"""
return self.task_prompt + self.example_prompt + self.format_prompt
def evaluate(self, solution: Solution, test=False):
repeats = 5
path = Path(os.path.join(self.logger_dir, 'evaluation', solution.id))
path.mkdir(parents=True, exist_ok=True)
code = solution.code
algorithm_name = solution.name
exec(code, globals())
strategy = globals()[algorithm_name]()
gpus = self.gpus
folder = f'{self.cache_dir}kernels'
applications = []
for app in self.kernels:
applications.append({'name': f'{app}_milo', 'folder': folder, 'input_file': f'{app}_milo.json'})
alg_code = f'\nimport os\nimport numpy as np\nimport random\nimport re\nimport json\nimport time\nimport traceback\nimport math\n\nfrom kernel_tuner import util\nfrom kernel_tuner.searchspace import Searchspace\nfrom kernel_tuner.strategies.common import CostFunc\nfrom kernel_tuner.strategies.wrapper import OptAlg\n\n{solution.code}\n\n'
solution_path = os.path.join(self.logger_dir, 'evaluation', solution.id, 'code.py')
with open(solution_path, 'w') as f:
f.write(alg_code)
strategy: str = solution.name
hyperparams = []
searchspace_strategies = [{'autotuner': 'KernelTuner', 'name': strategy, 'display_name': strategy.replace('_', ' ').capitalize(), 'search_method': strategy, 'search_method_hyperparameters': hyperparams, 'custom_search_method_path': solution_path}]
override = {'experimental_groups_defaults': {'parent_folder': str(path), 'repeats': repeats, 'samples': 32, 'minimum_fraction_of_budget_valid': 0.01, 'pattern_for_full_search_space_filenames': {'regex': '/data/neocortex/repos/benchmark_hub/cachefiles/${applications}/${gpus}_T4.json'}}}
name = solution.id
experiments_filepath = generate_experiment_file(name, path, searchspace_strategies, applications, gpus, override=override, generate_unique_file=False, overwrite_existing_file=True)
scores = get_strategy_scores(str(experiments_filepath))
score = scores[list(scores.keys())[0]]['score']
solution.set_scores(score, f'The algorithm {solution.name} scored {score:.3f} (higher is better).')
return solution
def test(self, solution: Solution, ioh_dir=''):
"""
Runs the solution on test instances and returns the fitness score.
To evaluate kernel tuner solutions, use `autotuning_visualize <path to test file>`.
"""
return self.evaluate(solution, True, ioh_dir)
def to_dict(self):
"""
Converts the problem to a dictionary.
"""
return {'name': self.name, 'training_instances': self.training_instances, 'test_instances': self.test_instances, 'budget': self.budget}
|
class Kerneltuner(Problem):
'''
Problem class for evaluating optimization algorithms on kernel tuner real world benchmark.
Note that this problem requires additional installation steps.
'''
def __init__(self, logger=None, gpus=None, kernels=None, name='kerneltuner', eval_timeout=600, budget=1000, cache_dir='/data/neocortex/repos/benchmark_hub/', extra_info=False, dependencies=None, imports=None):
'''
Initializes the Kerneltuner problem instance.
Args:
logger (RunLogger): The logger to use for logging.
gpus (list): The gpus to train on.
kernels (list): The kernels (applications) to train on.
name (str): The name of the problem.
eval_timeout (int): The evaluation timeout in seconds.
budget (int): The budget for the optimization algorithms/
cache_dir (str): The directory that contains the kernel tuner data files.
extra_info (bool): If True, additional information about the problem is added to the prompt. Only works for one kernel.
'''
pass
def get_prompt(self):
'''
Returns the problem description and answer format.
'''
pass
def evaluate(self, solution: Solution, test=False):
pass
def test(self, solution: Solution, ioh_dir=''):
'''
Runs the solution on test instances and returns the fitness score.
To evaluate kernel tuner solutions, use `autotuning_visualize <path to test file>`.
'''
pass
def to_dict(self):
'''
Converts the problem to a dictionary.
'''
pass
| 6
| 5
| 54
| 6
| 40
| 9
| 3
| 0.24
| 1
| 5
| 1
| 0
| 5
| 10
| 5
| 34
| 282
| 34
| 203
| 51
| 185
| 49
| 64
| 37
| 58
| 8
| 5
| 2
| 13
|
328,142
|
XAI-liacs/BLADE
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/XAI-liacs_BLADE/iohblade/problems/mabbob.py
|
iohblade.problems.mabbob.MA_BBOB
|
import numpy as np
from ..problem import BASE_DEPENDENCIES, Problem
from ioh import get_problem
import os
from ..solution import Solution
from ioh import logger as ioh_logger
from ..utils import OverBudgetException, aoc_logger, correct_aoc
import ioh
import pandas as pd
class MA_BBOB(Problem):
"""
Problem class for evaluating optimization algorithms on the MA-BBOB benchmark.
"""
def __init__(self, logger=None, training_instances=None, test_instances=None, name='MA_BBOB', eval_timeout=60, dims=[2, 5], budget_factor=2000, dependencies=None, imports=None):
"""
Initializes the MA-BBOB problem instance.
Args:
logger (RunLogger): The logger to use for logging.
training_instances (list): The indices of training instances to use.
test_instances (list): The indices of test instances to use.
name (str): The name of the problem.
eval_timeout (int): The evaluation timeout in seconds.
dims (list): The dimensionalities of the problem instances to run on.
budget_factor (int): The factor to multiply the dimensionality with to get the budget.
"""
if dependencies is None:
dependencies = ['pandas==2.2.3', 'ioh==0.3.19', 'configspace==1.2.1', 'smac==2.3.1']
if imports is None:
imports = 'import numpy as np\nimport ioh\n'
if training_instances is None:
training_instances = range(0, 20)
if test_instances is None:
test_instances = range(20, 120)
super().__init__(logger, training_instances, test_instances, name, eval_timeout, dependencies)
self.dims = dims
self.budget_factor = budget_factor
self.func_name = '__call__'
self.init_inputs = ['budget', 'dim']
self.func_inputs = ['func']
self.func_outputs = ['f_opt', 'x_opt']
self.task_prompt = '\nYou are a Python developer working on a new optimization algorithm.\nYour task is to develop a novel heuristic optimization algorithm for continuous optimization problems.\nThe optimization algorithm should handle a wide range of tasks, which is evaluated on the Many Affine BBOB test suite of noiseless functions. Your task is to write the optimization algorithm in Python code. \nEach of the optimization functions has a search space between -5.0 (lower bound) and 5.0 (upper bound). The dimensionality can be varied.\nThe code should contain an `__init__(self, budget, dim)` function with optional additional arguments and the function `def __call__(self, func)`, which should optimize the black box function `func` using `self.budget` function evaluations.\nThe func() can only be called as many times as the budget allows, not more. \n'
self.example_prompt = '\nAn example of such code (a simple random search), is as follows:\n```python\nimport numpy as np\n\nclass RandomSearch:\n def __init__(self, budget=10000, dim=10):\n self.budget = budget\n self.dim = dim\n\n def __call__(self, func):\n self.f_opt = np.inf\n self.x_opt = None\n for i in range(self.budget):\n x = np.random.uniform(func.bounds.lb, func.bounds.ub)\n \n f = func(x)\n if f < self.f_opt:\n self.f_opt = f\n self.x_opt = x\n \n return self.f_opt, self.x_opt\n```\n'
self.format_prompt = '\nGive an excellent and novel heuristic algorithm to solve this task and also give it a one-line description, describing the main idea. Give the response in the format:\n# Description: <short-description>\n# Code: \n```python\n<code>\n```\n'
base_path = os.path.dirname(__file__)
self.weights = pd.read_csv(os.path.join(base_path, 'mabbob', 'weights.csv'), index_col=0)
self.iids = pd.read_csv(os.path.join(base_path, 'mabbob', 'iids.csv'), index_col=0)
self.opt_locs = pd.read_csv(os.path.join(base_path, 'mabbob', 'opt_locs.csv'), index_col=0)
def get_prompt(self):
"""
Returns the problem description and answer format.
"""
return self.task_prompt + self.example_prompt + self.format_prompt
def evaluate(self, solution: Solution, test=False, ioh_dir=''):
"""
Evaluates a solution on the MA-BBOB benchmark using AOCC.
"""
auc_mean = 0
auc_std = 0
code = solution.code
algorithm_name = solution.name
safe_globals = {'np': np}
local_env = {}
exec(code, safe_globals, local_env)
algorithm = None
try:
l2_temp = aoc_logger(100, upper=100.0, triggers=[ioh_logger.trigger.ALWAYS])
problem = get_problem(11, 1, 2)
problem.attach_logger(l2_temp)
algorithm = local_env[algorithm_name](budget=100, dim=2)
algorithm(problem)
except OverBudgetException:
pass
instances = self.test_instances if test else self.training_instances
aucs = []
for dim in self.dims:
for idx in instances:
budget = self.budget_factor * dim
f_new = ioh.problem.ManyAffine(xopt=np.array(self.opt_locs.iloc[idx])[:dim], weights=np.array(self.weights.iloc[idx]), instances=np.array(self.iids.iloc[idx], dtype=int), n_variables=dim)
f_new.set_id(100)
f_new.set_instance(idx)
l2 = aoc_logger(budget, upper=100.0, triggers=[ioh_logger.trigger.ALWAYS])
if test:
l1 = ioh.logger.Analyzer(root=ioh_dir, folder_name=algorithm_name, algorithm_name=algorithm_name)
combined_logger = ioh.logger.Combine([l1, l2])
f_new.attach_logger(combined_logger)
else:
f_new.attach_logger(l2)
try:
algorithm = local_env[algorithm_name](budget=budget, dim=dim)
algorithm(f_new)
except OverBudgetException:
aucs.append(0)
break
aucs.append(correct_aoc(f_new, l2, budget))
l2.reset(f_new)
if test:
l1.reset(f_new)
f_new.reset()
auc_mean = np.mean(aucs)
auc_std = np.std(aucs)
solution.add_metadata('aucs', aucs)
solution.set_scores(auc_mean, f'The algorithm {algorithm_name} scored {auc_mean:.3f} on AOCC (higher is better, 1.0 is the best).')
return solution
def test(self, solution: Solution, ioh_dir=''):
"""
Runs the solution on test instances and returns the fitness score.
"""
return self.evaluate(solution, True, ioh_dir)
def to_dict(self):
"""
Converts the problem to a dictionary.
"""
return {'name': self.name, 'dims': self.dims, 'training_instances': self.training_instances, 'test_instances': self.test_instances, 'budget_factor': self.budget_factor}
|
class MA_BBOB(Problem):
'''
Problem class for evaluating optimization algorithms on the MA-BBOB benchmark.
'''
def __init__(self, logger=None, training_instances=None, test_instances=None, name='MA_BBOB', eval_timeout=60, dims=[2, 5], budget_factor=2000, dependencies=None, imports=None):
'''
Initializes the MA-BBOB problem instance.
Args:
logger (RunLogger): The logger to use for logging.
training_instances (list): The indices of training instances to use.
test_instances (list): The indices of test instances to use.
name (str): The name of the problem.
eval_timeout (int): The evaluation timeout in seconds.
dims (list): The dimensionalities of the problem instances to run on.
budget_factor (int): The factor to multiply the dimensionality with to get the budget.
'''
pass
def get_prompt(self):
'''
Returns the problem description and answer format.
'''
pass
def evaluate(self, solution: Solution, test=False, ioh_dir=''):
'''
Evaluates a solution on the MA-BBOB benchmark using AOCC.
'''
pass
def test(self, solution: Solution, ioh_dir=''):
'''
Runs the solution on test instances and returns the fitness score.
'''
pass
def to_dict(self):
'''
Converts the problem to a dictionary.
'''
pass
| 6
| 6
| 39
| 3
| 30
| 6
| 3
| 0.23
| 1
| 6
| 3
| 0
| 5
| 12
| 5
| 34
| 203
| 20
| 151
| 48
| 134
| 34
| 77
| 37
| 71
| 8
| 5
| 3
| 16
|
328,143
|
XAI-liacs/BLADE
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/XAI-liacs_BLADE/iohblade/problems/photonics.py
|
iohblade.problems.photonics.Photonics
|
from ioh import get_problem
from ..solution import Solution
from .photonic_instances import algorithmic_insights, get_photonic_instance, problem_descriptions
import ioh
from ioh import logger as ioh_logger
import numpy as np
from ..problem import BASE_DEPENDENCIES, Problem
from ..utils import OverBudgetException, aoc_logger, correct_aoc
class Photonics(Problem):
"""
Problem class for evaluating optimization algorithms on different Real-World Photnoics applications.
"""
def __init__(self, logger, problem_type='bragg', test_instances=None, name='Bragg', eval_timeout=3600, budget_factor=500, seeds=5, dependencies=None, imports=None):
"""
Initializes the MA-BBOB problem instance.
Args:
logger (RunLogger): The logger to use for logging.
problem_type (str): The name of the problem instance, can be one of: "bragg", "ellipsometry" or "photovoltaic".
name (str): The name of the problem.
eval_timeout (int): The evaluation timeout in seconds.
budget_factor (int): The factor to multiply the dimensionality with to get the budget.
seeds (int): Number of random runs.
"""
if dependencies is None:
dependencies = ['ioh==0.3.19', 'pandas==2.2.3']
if imports is None:
imports = 'import numpy as np\nimport ioh\n'
if problem_type not in ['bragg', 'ellipsometry', 'photovoltaic']:
raise Exception("problem_type should be either 'bragg', 'ellipsometry' or 'photovoltaic'.")
self.problem_type = problem_type
self.problem = get_photonic_instance(self.problem_type)
self.func_name = '__call__'
self.init_inputs = ['budget', 'dim']
self.func_inputs = ['func']
self.func_outputs = ['f_opt', 'x_opt']
super().__init__(logger, [self.problem], [self.problem], name, eval_timeout, dependencies)
self.budget_factor = budget_factor
self.description_prompt = problem_descriptions[self.problem_type]
self.extra_prompt = algorithmic_insights[self.problem_type]
self.seeds = seeds
self.task_prompt = '\nYou are a Python developer and AI and physics researcher.\nYour task is to develop a novel heuristic optimization algorithm for photonic optimization problems.\nThe code should contain an `__init__(self, budget, dim)` function with optional additional arguments and the function `def __call__(self, func)`, which should optimize the black box function `func` using `self.budget` function evaluations.\nThe func() can only be called as many times as the budget allows, not more. \n'
self.example_prompt = '\nAn example of such code (a simple random search), is as follows:\n```python\nimport numpy as np\n\nclass RandomSearch:\n def __init__(self, budget=10000, dim=10):\n self.budget = budget\n self.dim = dim\n\n def __call__(self, func):\n self.f_opt = np.inf\n self.x_opt = None\n for i in range(self.budget):\n x = np.random.uniform(func.bounds.lb, func.bounds.ub)\n \n f = func(x)\n if f < self.f_opt:\n self.f_opt = f\n self.x_opt = x\n \n return self.f_opt, self.x_opt\n```\n '
self.format_prompt = '\nGive an excellent and novel heuristic algorithm to solve this task and also give it a one-line description, describing the main idea. Give the response in the format:\n# Description: <short-description>\n# Code: \n```python\n<code>\n```\n'
def get_prompt(self):
"""
Returns the problem description and answer format.
"""
return self.task_prompt + self.description_prompt + self.example_prompt + self.extra_prompt + self.format_prompt
def evaluate(self, solution: Solution, test=False, ioh_dir=''):
"""
Evaluates a solution on the different problems.
"""
if self.problem_type == 'bragg':
auc_lower = 0.1648
auc_upper = 1.0
elif self.problem_type == 'ellipsometry':
auc_lower = 1e-08
auc_upper = 40.0
elif self.problem_type == 'photovoltaic':
auc_lower = 0.1
auc_upper = 1.0
auc_mean = 0
auc_std = 0
dim = self.problem.meta_data.n_variables
code = solution.code
algorithm_name = solution.name
safe_globals = {'np': np}
local_env = {}
exec(code, safe_globals, local_env)
algorithm = None
try:
l2_temp = aoc_logger(100, upper=100.0, triggers=[ioh_logger.trigger.ALWAYS])
problem = get_problem(11, 1, 2)
problem.attach_logger(l2_temp)
algorithm = local_env[algorithm_name](budget=100, dim=2)
algorithm(problem)
except OverBudgetException:
pass
aucs = []
l2 = aoc_logger(budget, upper=100.0, triggers=[ioh_logger.trigger.ALWAYS])
if test:
l1 = ioh.logger.Analyzer(root=ioh_dir, folder_name=algorithm_name, algorithm_name=algorithm_name)
combined_logger = ioh.logger.Combine([l1, l2])
self.problem.attach_logger(combined_logger)
else:
self.problem.attach_logger(l2)
for seed in self.seeds:
budget = self.budget_factor * dim
try:
algorithm = local_env[algorithm_name](budget=budget, dim=dim)
algorithm(self.problem)
except OverBudgetException:
aucs.append(0)
break
aucs.append(correct_aoc(self.problem, l2, budget))
l2.reset(self.problem)
if test:
l1.reset(self.problem)
self.problem.reset()
auc_mean = np.mean(aucs)
auc_std = np.std(aucs)
solution.add_metadata('aucs', aucs)
solution.set_scores(auc_mean, f'The algorithm {algorithm_name} scored {auc_mean:.3f} on AOCC (higher is better, 1.0 is the best).')
return solution
def test(self, solution: Solution, ioh_dir=''):
"""
Runs the solution on test instances and returns the fitness score.
"""
return self.evaluate(solution, True, ioh_dir)
def to_dict(self):
"""
Converts the problem to a dictionary.
"""
return {'name': self.name, 'dims': self.dims, 'problem_type': self.problem_type, 'budget_factor': self.budget_factor}
|
class Photonics(Problem):
'''
Problem class for evaluating optimization algorithms on different Real-World Photnoics applications.
'''
def __init__(self, logger, problem_type='bragg', test_instances=None, name='Bragg', eval_timeout=3600, budget_factor=500, seeds=5, dependencies=None, imports=None):
'''
Initializes the MA-BBOB problem instance.
Args:
logger (RunLogger): The logger to use for logging.
problem_type (str): The name of the problem instance, can be one of: "bragg", "ellipsometry" or "photovoltaic".
name (str): The name of the problem.
eval_timeout (int): The evaluation timeout in seconds.
budget_factor (int): The factor to multiply the dimensionality with to get the budget.
seeds (int): Number of random runs.
'''
pass
def get_prompt(self):
'''
Returns the problem description and answer format.
'''
pass
def evaluate(self, solution: Solution, test=False, ioh_dir=''):
'''
Evaluates a solution on the different problems.
'''
pass
def test(self, solution: Solution, ioh_dir=''):
'''
Runs the solution on test instances and returns the fitness score.
'''
pass
def to_dict(self):
'''
Converts the problem to a dictionary.
'''
pass
| 6
| 6
| 37
| 3
| 28
| 6
| 3
| 0.22
| 1
| 5
| 3
| 0
| 5
| 13
| 5
| 34
| 195
| 22
| 143
| 47
| 126
| 31
| 78
| 36
| 72
| 9
| 5
| 2
| 16
|
328,144
|
XAI-liacs/BLADE
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/XAI-liacs_BLADE/iohblade/problems/photonics_instances/problems/brag_mirror.py
|
iohblade.problems.photonics_instances.problems.brag_mirror.brag_mirror
|
import PyMoosh as pm
import numpy as np
from .photonic_problem import photonic_problem
class brag_mirror(photonic_problem):
def __init__(self, nb_layers, target_wl, mat_env, mat1, mat2):
"""
Args:
mat_env (float): environment ref. index
mat1 (float): material 1 ref. index
mat2 (float): material 2 ref. index
"""
super().__init__()
self.n = nb_layers
self.nb_layers = nb_layers
self.target_wl = target_wl
self.mat_env = mat_env
self.mat1 = mat1
self.mat2 = mat2
self.min_thick = 0
self.max_thick = target_wl / (2 * mat1)
self.lb = np.array([self.min_thick] * self.n)
self.ub = np.array([self.max_thick] * self.n)
def setup_structure(self, x):
"""helper to create pymoosh structure object, alternating 2 materials
Args:
x (list): list of thicknesses, top layer first
Returns:
PyMoosh.structure: multi-layer structure object
"""
x = list(x)
materials = [self.mat_env ** 2, self.mat1 ** 2, self.mat2 ** 2]
stack = [0] + [2, 1] * (self.n // 2) + [2]
thicknesses = [0.0] + x + [0.0]
structure = pm.Structure(materials, stack, np.array(thicknesses), verbose=False)
return structure
def __call__(self, x):
"""cost function: maximize reflectance of a layer-stack
Args:
x (list): thicknesses of all the layers, starting with the upper one.
Returns:
float: 1 - Reflectivity at target wavelength
"""
x = np.clip(x, self.lb, self.ub)
structure = self.setup_structure(x)
_, R = pm.coefficient_I(structure, self.target_wl, 0.0, 0)
cost = 1 - R
return cost
|
class brag_mirror(photonic_problem):
def __init__(self, nb_layers, target_wl, mat_env, mat1, mat2):
'''
Args:
mat_env (float): environment ref. index
mat1 (float): material 1 ref. index
mat2 (float): material 2 ref. index
'''
pass
def setup_structure(self, x):
'''helper to create pymoosh structure object, alternating 2 materials
Args:
x (list): list of thicknesses, top layer first
Returns:
PyMoosh.structure: multi-layer structure object
'''
pass
def __call__(self, x):
'''cost function: maximize reflectance of a layer-stack
Args:
x (list): thicknesses of all the layers, starting with the upper one.
Returns:
float: 1 - Reflectivity at target wavelength
'''
pass
| 4
| 3
| 18
| 2
| 10
| 7
| 1
| 0.73
| 1
| 2
| 0
| 0
| 3
| 10
| 3
| 6
| 58
| 7
| 30
| 21
| 26
| 22
| 26
| 21
| 22
| 1
| 1
| 0
| 3
|
328,145
|
XAI-liacs/BLADE
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/XAI-liacs_BLADE/iohblade/problems/photonics_instances/problems/ellipsometry_inverse.py
|
iohblade.problems.photonics_instances.problems.ellipsometry_inverse.ellipsometry
|
from .photonic_problem import photonic_problem
import numpy as np
import PyMoosh as pm
class ellipsometry(photonic_problem):
def __init__(self, mat_env, mat_substrate, nb_layers, min_thick, max_thick, min_eps, max_eps, wavelengths, angle):
self.n = nb_layers * 2
self.mat_env = mat_env
self.mat_substrate = mat_substrate
self.nb_layers = nb_layers
self.min_thick = min_thick
self.max_thick = max_thick
self.min_eps = min_eps
self.max_eps = max_eps
self.wavelengths = wavelengths
self.angle = angle
self.lb = np.array([self.min_eps] * self.nb_layers + [self.min_thick] * self.nb_layers)
self.ub = np.array([self.max_eps] * self.nb_layers + [self.max_thick] * self.nb_layers)
x_ref = np.random.uniform(self.lb, self.ub, self.n)
struct_ref = self.setup_structure(x_ref)
self.ref_ellipso = np.zeros(len(self.wavelengths), dtype=complex)
for i, wav in enumerate(self.wavelengths):
r_s, _, _, _ = pm.coefficient(struct_ref, wav, self.angle, 0)
r_p, _, _, _ = pm.coefficient(struct_ref, wav, self.angle, 1)
self.ref_ellipso[i] = r_p / r_s
def setup_structure(self, x):
"""helper to create pymoosh structure object with user-defined
thicknesses and materials
Args:
X (list): long list of material permittivities and thicknesses
(first half / second half)
mat_env (float, str): material of environment (above stack)
mat_substrate (float, str): material of substrate (below stack)
Returns:
PyMoosh.structure: multi-layer structure object
"""
x = list(x)
materials = [self.mat_env] + [_m for _m in x[:self.nb_layers]] + [self.mat_substrate]
stack = [i for i in range(self.nb_layers + 2)]
thicknesses = np.array([0] + [_t for _t in x[self.nb_layers:]] + [0])
structure = pm.Structure(materials, stack, np.array(thicknesses), verbose=False)
return structure
def __call__(self, x):
"""cost function: MAE between simulated and measured (ref) spectrum
Args:
x (list): materials (first half) & thicknesses (second half) of all
layers
ref_ellipso (np.ndarray): reference spectrum at `eval_wls`
wavelengths (np.ndarray): wavelengths to evaluate
Returns:
float: Reflectivity at target wavelength
"""
x = np.clip(x, self.lb, self.ub)
structure = self.setup_structure(x)
ellips = np.zeros(len(self.wavelengths), dtype=complex)
for i, wav in enumerate(self.wavelengths):
r_s, _, _, _ = pm.coefficient(structure, wav, self.angle, 0)
r_p, _, _, _ = pm.coefficient(structure, wav, self.angle, 1)
ellips[i] = r_p / r_s
cost = np.sum(np.abs(ellips - self.ref_ellipso))
return cost
|
class ellipsometry(photonic_problem):
def __init__(self, mat_env, mat_substrate, nb_layers, min_thick, max_thick, min_eps, max_eps, wavelengths, angle):
pass
def setup_structure(self, x):
'''helper to create pymoosh structure object with user-defined
thicknesses and materials
Args:
X (list): long list of material permittivities and thicknesses
(first half / second half)
mat_env (float, str): material of environment (above stack)
mat_substrate (float, str): material of substrate (below stack)
Returns:
PyMoosh.structure: multi-layer structure object
'''
pass
def __call__(self, x):
'''cost function: MAE between simulated and measured (ref) spectrum
Args:
x (list): materials (first half) & thicknesses (second half) of all
layers
ref_ellipso (np.ndarray): reference spectrum at `eval_wls`
wavelengths (np.ndarray): wavelengths to evaluate
Returns:
float: Reflectivity at target wavelength
'''
pass
| 4
| 2
| 27
| 1
| 18
| 8
| 2
| 0.44
| 1
| 4
| 0
| 0
| 3
| 13
| 3
| 6
| 85
| 6
| 55
| 43
| 40
| 24
| 38
| 32
| 34
| 2
| 1
| 1
| 5
|
328,146
|
XAI-liacs/BLADE
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/XAI-liacs_BLADE/iohblade/problems/photonics_instances/problems/grating2D.py
|
iohblade.problems.photonics_instances.problems.grating2D.grating2D
|
from .photonic_problem import photonic_problem
import numpy as np
from scipy.linalg import toeplitz
class grating2D(photonic_problem):
def __init__(self, nb_layers, min_w, max_w, min_thick, max_thick, min_p, max_p):
super().__init__()
self.nb_layers = nb_layers
self.n = 3 * nb_layers
self.min_w = min_w
self.max_w = max_w
self.min_thick = min_thick
self.max_thick = max_thick
self.min_p = min_p
self.max_p = max_p
self.lb = np.array([min_w] * nb_layers + [min_thick] * nb_layers + [min_p] * nb_layers)
self.ub = np.array([max_w] * nb_layers + [max_thick] * nb_layers + [max_p] * nb_layers)
def cascade(self, T, U):
n = int(T.shape[1] / 2)
J = np.linalg.inv(np.eye(n) - np.matmul(U[0:n, 0:n], T[n:2 * n, n:2 * n]))
K = np.linalg.inv(np.eye(n) - np.matmul(T[n:2 * n, n:2 * n], U[0:n, 0:n]))
S = np.block([[T[0:n, 0:n] + np.matmul(np.matmul(np.matmul(T[0:n, n:2 * n], J), U[0:n, 0:n]), T[n:2 * n, 0:n]), np.matmul(np.matmul(T[0:n, n:2 * n], J), U[0:n, n:2 * n])], [np.matmul(np.matmul(U[n:2 * n, 0:n], K), T[n:2 * n, 0:n]), U[n:2 * n, n:2 * n] + np.matmul(np.matmul(np.matmul(U[n:2 * n, 0:n], K), T[n:2 * n, n:2 * n]), U[0:n, n:2 * n])]])
return S
def c_bas(self, A, V, h):
n = int(A.shape[1] / 2)
D = np.diag(np.exp(1j * V * h))
S = np.block([[A[0:n, 0:n], np.matmul(A[0:n, n:2 * n], D)], [np.matmul(D, A[n:2 * n, 0:n]), np.matmul(np.matmul(D, A[n:2 * n, n:2 * n]), D)]])
return S
def marche(self, a, b, p, n, x):
l = np.zeros(n, dtype=np.complex128)
m = np.zeros(n, dtype=np.complex128)
tmp = 1 / (2 * np.pi * np.arange(1, n)) * (np.exp(-2 * 1j * np.pi * p * np.arange(1, n)) - 1) * np.exp(-2 * 1j * np.pi * np.arange(1, n) * x)
l[1:n] = 1j * (a - b) * tmp
l[0] = p * a + (1 - p) * b
m[0] = l[0]
m[1:n] = 1j * (b - a) * np.conj(tmp)
T = toeplitz(l, m)
return T
def creneau(self, k0, a0, pol, e1, e2, a, n, x0):
nmod = int(n / 2)
alpha = np.diag(a0 + 2 * np.pi * np.arange(-nmod, nmod + 1))
if pol == 0:
M = alpha * alpha - k0 * k0 * self.marche(e1, e2, a, n, x0)
L, E = np.linalg.eig(M)
L = np.sqrt(-L + 0j)
L = (1 - 2 * (np.imag(L) < -1e-15)) * L
P = np.block([[E], [np.matmul(E, np.diag(L))]])
else:
U = self.marche(1 / e1, 1 / e2, a, n, x0)
T = np.linalg.inv(U)
M = np.matmul(np.matmul(np.matmul(T, alpha), np.linalg.inv(self.marche(e1, e2, a, n, x0))), alpha) - k0 * k0 * T
L, E = np.linalg.eig(M)
L = np.sqrt(-L + 0j)
L = (1 - 2 * (np.imag(L) < -1e-15)) * L
P = np.block([[E], [np.matmul(np.matmul(U, E), np.diag(L))]])
return (P, L)
def homogene(self, k0, a0, pol, epsilon, n):
nmod = int(n / 2)
valp = np.sqrt(epsilon * k0 * k0 - (a0 + 2 * np.pi * np.arange(-nmod, nmod + 1)) ** 2 + 0j)
valp = valp * (1 - 2 * (valp < 0)) * (pol / epsilon + (1 - pol))
P = np.block([[np.eye(n)], [np.diag(valp)]])
return (P, valp)
def interface(self, P, Q):
n = int(P.shape[1])
S = np.matmul(np.linalg.inv(np.block([[P[0:n, 0:n], -Q[0:n, 0:n]], [P[n:2 * n, 0:n], Q[n:2 * n, 0:n]]])), np.block([[-P[0:n, 0:n], Q[0:n, 0:n]], [P[n:2 * n, 0:n], Q[n:2 * n, 0:n]]]))
return S
def __call__(self, x):
lam_blue = 449.5897
pol = 1
d = 600.521475
nmod = 25
e2 = 2.4336
n = 2 * nmod + 1
n_motifs = int(x.size / 3)
x = x / d
h = x[n_motifs:2 * n_motifs]
x0 = x[2 * n_motifs:3 * n_motifs]
a = x[0:n_motifs]
spacers = np.zeros(a.size)
l = lam_blue / d
k0 = 2 * np.pi / l
P, V = self.homogene(k0, 0, pol, 1, n)
S = np.block([[np.zeros([n, n]), np.eye(n, dtype=np.complex128)], [np.eye(n), np.zeros([n, n])]])
for j in range(0, n_motifs):
Pc, Vc = self.creneau(k0, 0, pol, e2, 1, a[j], n, x0[j])
S = self.cascade(S, self.interface(P, Pc))
S = self.c_bas(S, Vc, h[j])
S = self.cascade(S, self.interface(Pc, P))
S = self.c_bas(S, V, spacers[j])
Pc, Vc = self.homogene(k0, 0, pol, e2, n)
S = self.cascade(S, self.interface(P, Pc))
R = np.zeros(3, dtype=float)
for j in range(-1, 2):
R[j] = abs(S[j + nmod, nmod]) ** 2 * np.real(V[j + nmod]) / k0
cost = 1 - (R[-1] + R[1]) / 2 + R[0] / 2
return cost
|
class grating2D(photonic_problem):
def __init__(self, nb_layers, min_w, max_w, min_thick, max_thick, min_p, max_p):
pass
def cascade(self, T, U):
pass
def c_bas(self, A, V, h):
pass
def marche(self, a, b, p, n, x):
pass
def creneau(self, k0, a0, pol, e1, e2, a, n, x0):
pass
def homogene(self, k0, a0, pol, epsilon, n):
pass
def interface(self, P, Q):
pass
def __call__(self, x):
pass
| 9
| 0
| 20
| 0
| 20
| 0
| 1
| 0.03
| 1
| 4
| 0
| 0
| 8
| 10
| 8
| 11
| 172
| 10
| 158
| 61
| 149
| 4
| 91
| 61
| 82
| 3
| 1
| 1
| 11
|
328,147
|
XAI-liacs/BLADE
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/XAI-liacs_BLADE/iohblade/problems/photonics_instances/problems/photonic_problem.py
|
iohblade.problems.photonics_instances.problems.photonic_problem.photonic_problem
|
class photonic_problem:
def __init__(self):
self.n = None
def setup_structure(self):
pass
def __call__(self):
pass
|
class photonic_problem:
def __init__(self):
pass
def setup_structure(self):
pass
def __call__(self):
pass
| 4
| 0
| 2
| 0
| 2
| 0
| 1
| 0
| 0
| 0
| 0
| 5
| 3
| 1
| 3
| 3
| 9
| 2
| 7
| 5
| 3
| 0
| 7
| 5
| 3
| 1
| 0
| 0
| 3
|
328,148
|
XAI-liacs/BLADE
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/XAI-liacs_BLADE/iohblade/problems/photonics_instances/problems/plasmonic_nanostructure.py
|
iohblade.problems.photonics_instances.problems.plasmonic_nanostructure.plasmonic_nanostructure
|
import warnings
from pyGDM2 import linear, structures, tools, visu
from .photonic_problem import photonic_problem
import numpy as np
class plasmonic_nanostructure(photonic_problem):
def __init__(self, element_sim, method, verbose=0):
"""
Args:
mat_env (float): environment ref. index
mat_substrate (str): substrate material
nb_layers (int): number of layers
min_thick (float): minimum thickness
max_thick (float): maximum thickness
min_eps (float): minimum permittivity
max_eps (float): maximum permittivity
wavelengths (np.array): wavelengths
angle (float): angle
"""
super().__init__()
self.n = 20
self.element_sim = element_sim
self.method = method
self.verbose = verbose
def setup_structure(self, x):
"""helper to create structure, from positions of gold elements
each positions in units of discretization steps
Args:
XY_coords_blocks (list): list gold element positions (x1,x2,x3,...,y1,y2,....)
element_sim (`pyGDM2.core.simulation`): single element simulation
Returns:
pyGDM2.structures.struct: instance of nano-geometry class
"""
x_new = x * 5.0
n = len(x_new) // 2
x_list = x_new[:n]
y_list = x_new[n:]
pos = np.transpose([x_list, y_list])
struct_list = []
for _p in pos:
x_new, y = _p
_s = self.element_sim.struct.copy()
DX = _s.geometry[:, 0].max() - _s.geometry[:, 0].min() + _s.step
DY = _s.geometry[:, 1].max() - _s.geometry[:, 1].min() + _s.step
_s = structures.shift(_s, np.array([DX * int(x_new), DY * int(y), 0.0]))
if np.abs(x_new) >= 1 or np.abs(y) >= 1:
struct_list.append(_s)
if len(struct_list) == 0:
struct_list.append(_s + [DX, DY, 0])
full_struct = structures.combine_geometries(struct_list, step=self.element_sim.struct.step)
full_sim = self.element_sim.copy()
full_sim.struct = full_struct
return full_sim
def __call__(self, x):
"""cost function: maximize scattering towards small solid angle
Args:
x (list): optimization params --> pos of elements
element_sim (`pyGDM2.core.simulation`): single element simulation
method (str): pyGDM2 solver method
Returns:
float: 1 - Reflectivity at target wavelength
"""
sim = self.setup_structure(x)
sim.scatter(method=self.method, verbose=self.verbose)
warnings.filterwarnings('ignore')
Nteta, Nphi = (18, 32)
NtetaW, NphiW = (4, 5)
Delta_angle = np.pi * 10 / 180
I_full = linear.farfield(sim, field_index=0, return_value='int_Etot', phimin=0, phimax=2 * np.pi, tetamin=0, tetamax=np.pi / 2, Nteta=Nteta, Nphi=Nphi)
I_window = linear.farfield(sim, field_index=0, return_value='int_Etot', phimin=-np.pi / 6, phimax=np.pi / 6 + np.pi / 3 / NphiW, tetamin=np.pi / 2 - Delta_angle, tetamax=np.pi / 2 + Delta_angle, Nteta=NtetaW, Nphi=NphiW)
cost = -1 * (I_window / I_full)
if self.verbose:
print('cost: {:.5f}'.format(cost))
return cost
|
class plasmonic_nanostructure(photonic_problem):
def __init__(self, element_sim, method, verbose=0):
'''
Args:
mat_env (float): environment ref. index
mat_substrate (str): substrate material
nb_layers (int): number of layers
min_thick (float): minimum thickness
max_thick (float): maximum thickness
min_eps (float): minimum permittivity
max_eps (float): maximum permittivity
wavelengths (np.array): wavelengths
angle (float): angle
'''
pass
def setup_structure(self, x):
'''helper to create structure, from positions of gold elements
each positions in units of discretization steps
Args:
XY_coords_blocks (list): list gold element positions (x1,x2,x3,...,y1,y2,....)
element_sim (`pyGDM2.core.simulation`): single element simulation
Returns:
pyGDM2.structures.struct: instance of nano-geometry class
'''
pass
def __call__(self, x):
'''cost function: maximize scattering towards small solid angle
Args:
x (list): optimization params --> pos of elements
element_sim (`pyGDM2.core.simulation`): single element simulation
method (str): pyGDM2 solver method
Returns:
float: 1 - Reflectivity at target wavelength
'''
pass
| 4
| 3
| 35
| 4
| 21
| 11
| 2
| 0.56
| 1
| 2
| 0
| 0
| 3
| 4
| 3
| 6
| 110
| 14
| 63
| 28
| 59
| 35
| 41
| 28
| 37
| 4
| 1
| 2
| 7
|
328,149
|
XAI-liacs/BLADE
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/XAI-liacs_BLADE/iohblade/problems/photonics_instances/problems/sophisticated_antireflection_design.py
|
iohblade.problems.photonics_instances.problems.sophisticated_antireflection_design.sophisticated_antireflection_design
|
import PyMoosh as pm
from .photonic_problem import photonic_problem
import numpy as np
class sophisticated_antireflection_design(photonic_problem):
def __init__(self, nb_layers, min_thick, max_thick, wl_min, wl_max, thick_aSi=30000, number_pts=300, pola=0, incidence=0):
super().__init__()
self.n = nb_layers
self.nb_layers = nb_layers
self.min_thick = min_thick
self.max_thick = max_thick
self.lb = np.array([self.min_thick] * self.n)
self.ub = np.array([self.max_thick] * self.n)
self.wl_min = wl_min
self.wl_max = wl_max
self.thick_aSi = thick_aSi
self.number_pts = number_pts
self.pola = pola
self.incidence = incidence
def setup_structure(self, x):
"""helper to create pymoosh structure object, alternating 2 materials
the substrate is amorphous silicon and the light is incident through air (n=1).
The structure is made of alternating layers of eps=2 and eps=3.
Args:
X (list): long list of thicknesses
Returns:
PyMoosh.structure: multi-layer structure object
"""
x = list(x)
materials = [1.0, 2.0, 3.0, 'SiA']
stack = [0] + [1, 2] * (self.n // 2) + [3]
thicknesses = [0] + x + [self.thick_aSi]
structure = pm.Structure(materials, stack, np.array(thicknesses), verbose=False)
return structure
def __call__(self, x):
"""cost function: (negative) efficiency of solar cell
Args:
x (list): materials (first half) & thicknesses (second half) of all
layers
wl_min, wl_max (float): spectral limits of efficiency evaluation
Returns:
float: 1 - Reflectivity at target wavelength
"""
x = np.clip(x, self.lb, self.ub)
structure = self.setup_structure(x)
active_lay = len(x) + 1
eff, _, _, _, _, _ = pm.photo(structure, self.incidence, self.pola, self.wl_min, self.wl_max, active_lay, self.number_pts)
cost = 1 - eff
return cost
|
class sophisticated_antireflection_design(photonic_problem):
def __init__(self, nb_layers, min_thick, max_thick, wl_min, wl_max, thick_aSi=30000, number_pts=300, pola=0, incidence=0):
pass
def setup_structure(self, x):
'''helper to create pymoosh structure object, alternating 2 materials
the substrate is amorphous silicon and the light is incident through air (n=1).
The structure is made of alternating layers of eps=2 and eps=3.
Args:
X (list): long list of thicknesses
Returns:
PyMoosh.structure: multi-layer structure object
'''
pass
def __call__(self, x):
'''cost function: (negative) efficiency of solar cell
Args:
x (list): materials (first half) & thicknesses (second half) of all
layers
wl_min, wl_max (float): spectral limits of efficiency evaluation
Returns:
float: 1 - Reflectivity at target wavelength
'''
pass
| 4
| 2
| 24
| 2
| 16
| 7
| 1
| 0.42
| 1
| 2
| 0
| 0
| 3
| 12
| 3
| 6
| 75
| 7
| 48
| 35
| 33
| 20
| 29
| 24
| 25
| 1
| 1
| 0
| 3
|
328,150
|
XAI-liacs/BLADE
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/XAI-liacs_BLADE/iohblade/solution.py
|
iohblade.solution.Solution
|
import numpy as np
import json
import uuid
class Solution:
"""
Represents a candidate solution (an individual) in the evolutionary algorithm.
Each individual has properties such as code, fitness, feedback, and metadata for additional information.
"""
def __init__(self, code='', name='', description='', configspace=None, generation=0, parent_ids=[], operator=None, task_prompt=''):
"""
Initializes an individual with optional attributes.
Args:
code (str): The code of the individual.
name (str): The name of the individual (typically the class name in the code).
description (str): A short description of the individual (e.g., algorithm's purpose or behavior).
configspace (Optional[ConfigSpace]): Optional configuration space for HPO.
generation (int): The generation this individual belongs to.
parent_ids (list): UUID of the parent individuals in a list.
operator (str): Optional identifier of the LLM operation that created this individual.
task_prompt (str): The task prompt used to generate this solution.
"""
self.id = str(uuid.uuid4())
self.code = code
self.name = name
self.description = description
self.configspace = configspace
self.generation = generation
self.fitness = -np.inf
self.feedback = ''
self.error = ''
self.parent_ids = parent_ids
self.metadata = {}
self.operator = operator
self.task_prompt = task_prompt
def __getstate__(self):
return self.to_dict()
def __setstate__(self, state):
self.__dict__.update(state)
if self.configspace == '':
self.configspace = None
def set_operator(self, operator):
"""
Sets the operator name that generated this individual.
Args:
operator (str): The name of the operator (for logging purposes).
"""
self.operator = operator
def add_metadata(self, key, value):
"""
Adds key-value pairs to the metadata dictionary.
Args:
key (str): The key for the metadata.
value: The value associated with the key.
"""
self.metadata[key] = value
def get_metadata(self, key):
"""
Get a metadata item from the dictionary.
Args:
key (str): The key for the metadata to obtain.
"""
return self.metadata[key] if key in self.metadata.keys() else None
def set_scores(self, fitness, feedback='', error=''):
self.fitness = fitness
self.feedback = feedback
self.error = error
return self
def get_summary(self):
"""
Returns a string summary of this solution's key attributes.
Returns:
str: A string representing the solution in a summary format.
"""
return f'{self.name}: {self.description} (Score: {self.fitness})'
def copy(self):
"""
Returns a copy of this solution, with a new unique ID and a reference to the current solution as its parent.
Returns:
Individual: A new instance of Individual with the same attributes but a different ID.
"""
new_solution = Solution(code=self.code, name=self.name, description=self.description, configspace=self.configspace, generation=self.generation + 1, parent_ids=[self.id], operator=self.operator, task_prompt=self.task_prompt)
new_solution.metadata = self.metadata.copy()
return new_solution
def empty_copy(self):
"""
Returns a copy of this solution, with a new unique ID and a reference to the current solution as its parent but without other fields.
Returns:
Individual: A new instance of Individual with the same attributes but a different ID.
"""
new_solution = Solution(code='', name='', description='', configspace=None, generation=self.generation + 1, parent_ids=[self.id], operator=self.operator)
return new_solution
def to_dict(self):
"""
Converts the individual to a dictionary.
Returns:
dict: A dictionary representation of the individual.
"""
try:
cs = self.configspace
cs = cs.to_serialized_dict()
except Exception:
cs = ''
return {'id': self.id, 'fitness': self.fitness, 'name': self.name, 'description': self.description, 'code': self.code, 'configspace': cs, 'generation': self.generation, 'feedback': self.feedback, 'error': self.error, 'parent_ids': self.parent_ids, 'operator': self.operator, 'metadata': self.metadata, 'task_prompt': self.task_prompt}
def from_dict(self, data):
"""
Updates the Solution instance from a dictionary.
Args:
data (dict): A dictionary representation of the individual.
Returns:
None
"""
configspace = data.get('configspace', None)
if isinstance(configspace, dict):
try:
configspace = ConfigSpace()
configspace.from_serialized_dict(data['configspace'])
except Exception as e:
print(f'Warning: Failed to deserialize configspace - {e}')
configspace = None
self.id = data.get('id')
self.fitness = data.get('fitness')
self.name = data.get('name')
self.description = data.get('description')
self.code = data.get('code')
self.configspace = configspace
self.generation = data.get('generation')
self.feedback = data.get('feedback')
self.error = data.get('error')
self.parent_ids = data.get('parent_ids', [])
self.operator = data.get('operator')
self.metadata = data.get('metadata', {})
def to_json(self):
"""
Converts the individual to a JSON string.
Returns:
str: A JSON string representation of the individual.
"""
return json.dumps(self.to_dict(), default=str, indent=4)
|
class Solution:
'''
Represents a candidate solution (an individual) in the evolutionary algorithm.
Each individual has properties such as code, fitness, feedback, and metadata for additional information.
'''
def __init__(self, code='', name='', description='', configspace=None, generation=0, parent_ids=[], operator=None, task_prompt=''):
'''
Initializes an individual with optional attributes.
Args:
code (str): The code of the individual.
name (str): The name of the individual (typically the class name in the code).
description (str): A short description of the individual (e.g., algorithm's purpose or behavior).
configspace (Optional[ConfigSpace]): Optional configuration space for HPO.
generation (int): The generation this individual belongs to.
parent_ids (list): UUID of the parent individuals in a list.
operator (str): Optional identifier of the LLM operation that created this individual.
task_prompt (str): The task prompt used to generate this solution.
'''
pass
def __getstate__(self):
pass
def __setstate__(self, state):
pass
def set_operator(self, operator):
'''
Sets the operator name that generated this individual.
Args:
operator (str): The name of the operator (for logging purposes).
'''
pass
def add_metadata(self, key, value):
'''
Adds key-value pairs to the metadata dictionary.
Args:
key (str): The key for the metadata.
value: The value associated with the key.
'''
pass
def get_metadata(self, key):
'''
Get a metadata item from the dictionary.
Args:
key (str): The key for the metadata to obtain.
'''
pass
def set_scores(self, fitness, feedback='', error=''):
pass
def get_summary(self):
'''
Returns a string summary of this solution's key attributes.
Returns:
str: A string representing the solution in a summary format.
'''
pass
def copy(self):
'''
Returns a copy of this solution, with a new unique ID and a reference to the current solution as its parent.
Returns:
Individual: A new instance of Individual with the same attributes but a different ID.
'''
pass
def empty_copy(self):
'''
Returns a copy of this solution, with a new unique ID and a reference to the current solution as its parent but without other fields.
Returns:
Individual: A new instance of Individual with the same attributes but a different ID.
'''
pass
def to_dict(self):
'''
Converts the individual to a dictionary.
Returns:
dict: A dictionary representation of the individual.
'''
pass
def from_dict(self, data):
'''
Updates the Solution instance from a dictionary.
Args:
data (dict): A dictionary representation of the individual.
Returns:
None
'''
pass
def to_json(self):
'''
Converts the individual to a JSON string.
Returns:
str: A JSON string representation of the individual.
'''
pass
| 14
| 11
| 16
| 1
| 9
| 6
| 1
| 0.71
| 0
| 3
| 0
| 0
| 10
| 12
| 10
| 10
| 172
| 22
| 91
| 37
| 71
| 65
| 61
| 26
| 50
| 3
| 0
| 2
| 14
|
328,151
|
XAI-liacs/BLADE
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/XAI-liacs_BLADE/iohblade/utils.py
|
iohblade.utils.NoCodeException
|
class NoCodeException(Exception):
"""Could not extract generated code."""
pass
|
class NoCodeException(Exception):
'''Could not extract generated code.'''
pass
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 10
| 4
| 1
| 2
| 1
| 1
| 1
| 2
| 1
| 1
| 0
| 3
| 0
| 0
|
328,152
|
XAI-liacs/BLADE
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/XAI-liacs_BLADE/iohblade/utils.py
|
iohblade.utils.OverBudgetException
|
class OverBudgetException(Exception):
"""The algorithm tried to do more evaluations than allowed."""
pass
|
class OverBudgetException(Exception):
'''The algorithm tried to do more evaluations than allowed.'''
pass
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 10
| 4
| 1
| 2
| 1
| 1
| 1
| 2
| 1
| 1
| 0
| 3
| 0
| 0
|
328,153
|
XAI-liacs/BLADE
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/XAI-liacs_BLADE/iohblade/utils.py
|
iohblade.utils.ThresholdReachedException
|
class ThresholdReachedException(Exception):
"""The algorithm reached the lower threshold."""
pass
|
class ThresholdReachedException(Exception):
'''The algorithm reached the lower threshold.'''
pass
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 10
| 4
| 1
| 2
| 1
| 1
| 1
| 2
| 1
| 1
| 0
| 3
| 0
| 0
|
328,154
|
XAI-liacs/BLADE
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/XAI-liacs_BLADE/iohblade/utils.py
|
iohblade.utils.TimeoutException
|
class TimeoutException(Exception):
"""Custom exception for handling timeouts."""
pass
|
class TimeoutException(Exception):
'''Custom exception for handling timeouts.'''
pass
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 10
| 4
| 1
| 2
| 1
| 1
| 1
| 2
| 1
| 1
| 0
| 3
| 0
| 0
|
328,155
|
XAI-liacs/BLADE
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/XAI-liacs_BLADE/iohblade/utils.py
|
iohblade.utils.aoc_logger
|
import numpy as np
class aoc_logger(logger.AbstractLogger):
"""aoc_logger class implementing the logging module for ioh."""
def __init__(self, budget, lower=1e-08, upper=100000000.0, scale_log=True, stop_on_threshold=False, *args, **kwargs):
"""Initialize the logger.
Args:
budget (int): Evaluation budget for calculating aoc.
"""
super().__init__(*args, **kwargs)
self.aoc = 0
self.lower = lower
self.upper = upper
self.budget = budget
self.stop_on_threshold = stop_on_threshold
self.transform = lambda x: np.log10(x) if scale_log else lambda x: x
def __call__(self, log_info: LogInfo):
"""Subscalculate the aoc.
Args:
log_info (ioh.LogInfo): info about current values.
"""
if log_info.evaluations > self.budget:
raise OverBudgetException
if log_info.evaluations == self.budget:
return
if self.stop_on_threshold and abs(log_info.raw_y_best) < self.lower:
raise ThresholdReachedException
y_value = np.clip(log_info.raw_y_best, self.lower, self.upper)
self.aoc += (self.transform(y_value) - self.transform(self.lower)) / (self.transform(self.upper) - self.transform(self.lower))
def reset(self, func):
super().reset()
self.aoc = 0
|
class aoc_logger(logger.AbstractLogger):
'''aoc_logger class implementing the logging module for ioh.'''
def __init__(self, budget, lower=1e-08, upper=100000000.0, scale_log=True, stop_on_threshold=False, *args, **kwargs):
'''Initialize the logger.
Args:
budget (int): Evaluation budget for calculating aoc.
'''
pass
def __call__(self, log_info: LogInfo):
'''Subscalculate the aoc.
Args:
log_info (ioh.LogInfo): info about current values.
'''
pass
def reset(self, func):
pass
| 4
| 3
| 14
| 1
| 10
| 3
| 2
| 0.28
| 1
| 3
| 2
| 0
| 3
| 6
| 3
| 3
| 46
| 5
| 32
| 20
| 19
| 9
| 21
| 11
| 17
| 4
| 1
| 1
| 6
|
328,156
|
XAI-liacs/BLADE
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/XAI-liacs_BLADE/iohblade/utils.py
|
iohblade.utils.budget_logger
|
class budget_logger(logger.AbstractLogger):
"""budget_logger class implementing the logging module for ioh."""
def __init__(self, budget, *args, **kwargs):
"""Initialize the logger.
Args:
budget (int): Evaluation budget for calculating aoc.
"""
super().__init__(*args, **kwargs)
self.budget = budget
def __call__(self, log_info: LogInfo):
"""Subscalculate the aoc.
Args:
log_info (ioh.LogInfo): info about current values.
"""
if log_info.evaluations > self.budget:
raise OverBudgetException
def reset(self):
super().reset()
|
class budget_logger(logger.AbstractLogger):
'''budget_logger class implementing the logging module for ioh.'''
def __init__(self, budget, *args, **kwargs):
'''Initialize the logger.
Args:
budget (int): Evaluation budget for calculating aoc.
'''
pass
def __call__(self, log_info: LogInfo):
'''Subscalculate the aoc.
Args:
log_info (ioh.LogInfo): info about current values.
'''
pass
def reset(self):
pass
| 4
| 3
| 8
| 1
| 4
| 3
| 1
| 0.64
| 1
| 2
| 1
| 0
| 3
| 1
| 3
| 3
| 28
| 5
| 14
| 10
| 5
| 9
| 9
| 5
| 5
| 2
| 1
| 1
| 4
|
328,157
|
BottlecapDave/HomeAssistant-TargetTimeframes
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/BottlecapDave_HomeAssistant-TargetTimeframes/custom_components/target_timeframes/config_flow.py
|
custom_components.target_timeframes.config_flow.RollingTargetTimePeriodSubentryFlowHandler
|
from .config.rolling_target_timeframe import merge_rolling_target_timeframe_config, validate_rolling_target_timeframe_config
from typing import Any
from .const import CONFIG_DATA_UNIQUE_ID, CONFIG_KIND, CONFIG_KIND_ROLLING_TARGET_RATE, CONFIG_KIND_TARGET_RATE, CONFIG_DATA_SOURCE_NAME, CONFIG_TARGET_NAME, CONFIG_VERSION, DATA_SCHEMA_ROLLING_TARGET_TIME_PERIOD, DATA_SCHEMA_SOURCE, DATA_SCHEMA_TARGET_TIME_PERIOD, DOMAIN
from homeassistant.config_entries import ConfigFlow, ConfigEntry, ConfigSubentryFlow, SubentryFlowResult
class RollingTargetTimePeriodSubentryFlowHandler(ConfigSubentryFlow):
async def async_step_user(self, user_input: dict[str, Any] | None=None) -> SubentryFlowResult:
"""Setup a target based on the provided user input"""
config = dict(user_input) if user_input is not None else None
errors = validate_rolling_target_timeframe_config(config) if config is not None else {}
if len(errors) < 1 and user_input is not None:
config[CONFIG_KIND] = CONFIG_KIND_ROLLING_TARGET_RATE
return self.async_create_entry(title=f'{config[CONFIG_TARGET_NAME]} (rolling target)', data=config)
return self.async_show_form(step_id='user', data_schema=self.add_suggested_values_to_schema(DATA_SCHEMA_ROLLING_TARGET_TIME_PERIOD, user_input if user_input is not None else {}), errors=errors)
async def async_step_reconfigure(self, user_input: dict[str, Any] | None=None):
config = merge_rolling_target_timeframe_config(self._get_reconfigure_subentry().data, user_input)
errors = validate_rolling_target_timeframe_config(config)
if len(errors) < 1 and user_input is not None:
return self.async_update_reload_and_abort(self._get_entry(), self._get_reconfigure_subentry(), data_updates=config)
return self.async_show_form(step_id='reconfigure', data_schema=self.add_suggested_values_to_schema(DATA_SCHEMA_ROLLING_TARGET_TIME_PERIOD, config), errors=errors)
|
class RollingTargetTimePeriodSubentryFlowHandler(ConfigSubentryFlow):
async def async_step_user(self, user_input: dict[str, Any] | None=None) -> SubentryFlowResult:
'''Setup a target based on the provided user input'''
pass
async def async_step_reconfigure(self, user_input: dict[str, Any] | None=None):
pass
| 3
| 1
| 21
| 2
| 18
| 1
| 4
| 0.05
| 1
| 3
| 0
| 0
| 2
| 0
| 2
| 2
| 44
| 5
| 37
| 9
| 32
| 2
| 14
| 7
| 11
| 5
| 1
| 1
| 7
|
328,158
|
BottlecapDave/HomeAssistant-TargetTimeframes
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/BottlecapDave_HomeAssistant-TargetTimeframes/custom_components/target_timeframes/config_flow.py
|
custom_components.target_timeframes.config_flow.TargetTimePeriodSubentryFlowHandler
|
from .const import CONFIG_DATA_UNIQUE_ID, CONFIG_KIND, CONFIG_KIND_ROLLING_TARGET_RATE, CONFIG_KIND_TARGET_RATE, CONFIG_DATA_SOURCE_NAME, CONFIG_TARGET_NAME, CONFIG_VERSION, DATA_SCHEMA_ROLLING_TARGET_TIME_PERIOD, DATA_SCHEMA_SOURCE, DATA_SCHEMA_TARGET_TIME_PERIOD, DOMAIN
from homeassistant.config_entries import ConfigFlow, ConfigEntry, ConfigSubentryFlow, SubentryFlowResult
from .config.target_timeframe import merge_target_timeframe_config, validate_target_timeframe_config
from typing import Any
class TargetTimePeriodSubentryFlowHandler(ConfigSubentryFlow):
async def async_step_user(self, user_input: dict[str, Any] | None=None) -> SubentryFlowResult:
config = dict(user_input) if user_input is not None else None
errors = validate_target_timeframe_config(config) if config is not None else {}
if len(errors) < 1 and user_input is not None:
config[CONFIG_KIND] = CONFIG_KIND_TARGET_RATE
return self.async_create_entry(title=f'{config[CONFIG_TARGET_NAME]} (target)', data=config)
return self.async_show_form(step_id='user', data_schema=self.add_suggested_values_to_schema(DATA_SCHEMA_TARGET_TIME_PERIOD, user_input if user_input is not None else {}), errors=errors)
async def async_step_reconfigure(self, user_input: dict[str, Any] | None=None):
config = merge_target_timeframe_config(self._get_reconfigure_subentry().data, user_input)
errors = validate_target_timeframe_config(config)
if len(errors) < 1 and user_input is not None:
return self.async_update_reload_and_abort(self._get_entry(), self._get_reconfigure_subentry(), data_updates=config)
return self.async_show_form(step_id='reconfigure', data_schema=self.add_suggested_values_to_schema(DATA_SCHEMA_TARGET_TIME_PERIOD, config), errors=errors)
|
class TargetTimePeriodSubentryFlowHandler(ConfigSubentryFlow):
async def async_step_user(self, user_input: dict[str, Any] | None=None) -> SubentryFlowResult:
pass
async def async_step_reconfigure(self, user_input: dict[str, Any] | None=None):
pass
| 3
| 0
| 21
| 2
| 18
| 1
| 4
| 0.03
| 1
| 3
| 0
| 0
| 2
| 0
| 2
| 2
| 44
| 6
| 37
| 9
| 32
| 1
| 14
| 7
| 11
| 5
| 1
| 1
| 7
|
328,159
|
BottlecapDave/HomeAssistant-TargetTimeframes
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/BottlecapDave_HomeAssistant-TargetTimeframes/custom_components/target_timeframes/config_flow.py
|
custom_components.target_timeframes.config_flow.TargetTimeframesConfigFlow
|
from homeassistant.core import callback
from .config.data_source import validate_source_config
from .const import CONFIG_DATA_UNIQUE_ID, CONFIG_KIND, CONFIG_KIND_ROLLING_TARGET_RATE, CONFIG_KIND_TARGET_RATE, CONFIG_DATA_SOURCE_NAME, CONFIG_TARGET_NAME, CONFIG_VERSION, DATA_SCHEMA_ROLLING_TARGET_TIME_PERIOD, DATA_SCHEMA_SOURCE, DATA_SCHEMA_TARGET_TIME_PERIOD, DOMAIN
from typing import Any
from uuid import uuid4
from homeassistant.config_entries import ConfigFlow, ConfigEntry, ConfigSubentryFlow, SubentryFlowResult
class TargetTimeframesConfigFlow(ConfigFlow, domain=DOMAIN):
"""Config flow."""
VERSION = CONFIG_VERSION
async def async_step_user(self, user_input):
"""Setup based on user config"""
errors = {}
if user_input is not None:
errors = validate_source_config(user_input)
if len(errors) < 1:
user_input[CONFIG_DATA_UNIQUE_ID] = str(uuid4())
await self.async_set_unique_id(user_input[CONFIG_DATA_UNIQUE_ID])
self._abort_if_unique_id_mismatch()
return self.async_create_entry(title=f'{user_input[CONFIG_DATA_SOURCE_NAME]}', data=user_input)
return self.async_show_form(step_id='user', data_schema=DATA_SCHEMA_SOURCE, errors=errors)
async def async_step_reconfigure(self, user_input: dict[str, Any] | None=None):
config = dict()
config.update(self._get_reconfigure_entry().data)
errors = {}
if user_input is not None:
config.update(user_input)
errors = validate_source_config(config)
if len(errors) < 1:
await self.async_set_unique_id(config[CONFIG_DATA_UNIQUE_ID])
self._abort_if_unique_id_mismatch()
return self.async_update_reload_and_abort(self._get_reconfigure_entry(), data_updates=config)
return self.async_show_form(step_id='reconfigure', data_schema=self.add_suggested_values_to_schema(DATA_SCHEMA_SOURCE, config), errors=errors)
@classmethod
@callback
def async_get_supported_subentry_types(cls, config_entry: ConfigEntry) -> dict[str, type[ConfigSubentryFlow]]:
"""Return subentries supported by this integration."""
return {'target_time_period': TargetTimePeriodSubentryFlowHandler, 'rolling_target_time_period': RollingTargetTimePeriodSubentryFlowHandler}
|
class TargetTimeframesConfigFlow(ConfigFlow, domain=DOMAIN):
'''Config flow.'''
async def async_step_user(self, user_input):
'''Setup based on user config'''
pass
async def async_step_reconfigure(self, user_input: dict[str, Any] | None=None):
pass
@classmethod
@callback
def async_get_supported_subentry_types(cls, config_entry: ConfigEntry) -> dict[str, type[ConfigSubentryFlow]]:
'''Return subentries supported by this integration.'''
pass
| 6
| 3
| 19
| 3
| 15
| 1
| 2
| 0.1
| 2
| 6
| 2
| 0
| 2
| 0
| 3
| 3
| 67
| 12
| 50
| 11
| 42
| 5
| 26
| 8
| 22
| 3
| 1
| 2
| 7
|
328,160
|
BottlecapDave/HomeAssistant-TargetTimeframes
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/BottlecapDave_HomeAssistant-TargetTimeframes/custom_components/target_timeframes/entities/data_source.py
|
custom_components.target_timeframes.entities.data_source.TargetTimePeriodDataSource
|
from homeassistant.const import STATE_UNAVAILABLE, STATE_UNKNOWN
from homeassistant.exceptions import ServiceValidationError
from ..const import DOMAIN, EVENT_DATA_SOURCE
from homeassistant.core import HomeAssistant, callback
from homeassistant.components.sensor import RestoreSensor, SensorDeviceClass
from ..storage.data_source_data import async_save_cached_data_source_data
from ..utils.data_source_data import DataSourceItem, merge_data_source_data, validate_data_source_data
from ..utils.attributes import dict_to_typed_dict
from homeassistant.helpers.entity import generate_entity_id
from homeassistant.util.dt import utcnow, now
from homeassistant.core import HomeAssistant, callback
class TargetTimePeriodDataSource(RestoreSensor):
"""Sensor for displaying a target time period data source"""
_unrecorded_attributes = frozenset({'data'})
def __init__(self, hass: HomeAssistant, source_id: str):
"""Init sensor."""
self._hass = hass
self._state = None
self._source_id = source_id
self._attributes = {'data_source_id': source_id}
self.entity_id = generate_entity_id('sensor.{}', self.unique_id, hass=hass)
@property
def unique_id(self):
"""The id of the sensor."""
return f'target_timeframes_{self._source_id}_data_source_last_updated'
@property
def name(self):
"""Name of the sensor."""
return f'Data source last updated ({self._source_id})'
@property
def icon(self):
"""Icon of the sensor."""
return 'mdi:clock'
@property
def device_class(self):
"""The type of sensor"""
return SensorDeviceClass.TIMESTAMP
@property
def extra_state_attributes(self):
"""Attributes of the sensor."""
return self._attributes
@property
def native_value(self):
return self._state
async def async_added_to_hass(self):
"""Call when entity about to be added to hass."""
await super().async_added_to_hass()
state = await self.async_get_last_state()
last_sensor_state = await self.async_get_last_sensor_data()
if state is not None and last_sensor_state is not None and (self._state is None):
self._state = None if state.state in (STATE_UNAVAILABLE, STATE_UNKNOWN) else last_sensor_state.native_value
self._attributes = dict_to_typed_dict(state.attributes)
_LOGGER.debug(f'Restored state: {self._state}')
@callback
async def async_update_target_timeframe_data_source(self, data, replace_all_existing_data=False):
"""Update target timeframe data source"""
result = validate_data_source_data(data, self._source_id)
if result.success == False:
raise ServiceValidationError(translation_domain=DOMAIN, translation_key='invalid_data_source_data', translation_placeholders={'error': result.error_message})
data_source_data = result.data if replace_all_existing_data else merge_data_source_data(now(), result.data, list(map(lambda x: DataSourceItem.parse_obj(x), self._attributes['data'])) if 'data' in self._attributes else None)
await async_save_cached_data_source_data(self._hass, self._source_id, data_source_data)
data_dict = list(map(lambda x: x.dict(), data_source_data))
self._attributes['data'] = data_dict
self._state = utcnow()
self.async_write_ha_state()
self._hass.data.setdefault(DOMAIN, {})
self._hass.data[DOMAIN].setdefault(self._source_id, {})
self._hass.data[DOMAIN][self._source_id] = data_dict
self._hass.bus.async_fire(EVENT_DATA_SOURCE, {'data_source_id': self._source_id})
|
class TargetTimePeriodDataSource(RestoreSensor):
'''Sensor for displaying a target time period data source'''
def __init__(self, hass: HomeAssistant, source_id: str):
'''Init sensor.'''
pass
@property
def unique_id(self):
'''The id of the sensor.'''
pass
@property
def name(self):
'''Name of the sensor.'''
pass
@property
def icon(self):
'''Icon of the sensor.'''
pass
@property
def device_class(self):
'''The type of sensor'''
pass
@property
def extra_state_attributes(self):
'''Attributes of the sensor.'''
pass
@property
def native_value(self):
pass
async def async_added_to_hass(self):
'''Call when entity about to be added to hass.'''
pass
@callback
async def async_update_target_timeframe_data_source(self, data, replace_all_existing_data=False):
'''Update target timeframe data source'''
pass
| 17
| 9
| 9
| 1
| 7
| 1
| 2
| 0.14
| 1
| 5
| 1
| 0
| 9
| 5
| 9
| 9
| 97
| 18
| 69
| 28
| 52
| 10
| 42
| 21
| 32
| 4
| 1
| 1
| 14
|
328,161
|
BottlecapDave/HomeAssistant-TargetTimeframes
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/BottlecapDave_HomeAssistant-TargetTimeframes/custom_components/target_timeframes/entities/rolling_target_timeframe.py
|
custom_components.target_timeframes.entities.rolling_target_timeframe.TargetTimeframesRollingTargetRate
|
from homeassistant.util.dt import utcnow, now
from . import calculate_continuous_times, calculate_intermittent_times, compare_config, create_weighting, extract_config, get_rolling_applicable_time_periods, get_target_time_period_info, should_evaluate_target_timeframes
from homeassistant.components.binary_sensor import BinarySensorEntity
from homeassistant.const import STATE_UNAVAILABLE, STATE_UNKNOWN
import voluptuous as vol
from homeassistant.helpers.entity import generate_entity_id
from homeassistant.helpers.restore_state import RestoreEntity
from .repairs import check_for_errors
import math
from ..utils.attributes import dict_to_typed_dict
from homeassistant.helpers import translation
from ..config.rolling_target_timeframe import validate_rolling_target_timeframe_config
from ..const import CONFIG_ROLLING_TARGET_HOURS_LOOK_AHEAD, CONFIG_TARGET_CALCULATE_WITH_INCOMPLETE_DATA, CONFIG_TARGET_DANGEROUS_SETTINGS, CONFIG_TARGET_DEFAULT_MINIMUM_REQUIRED_MINUTES_IN_SLOT, CONFIG_TARGET_MINIMUM_REQUIRED_MINUTES_IN_SLOT, CONFIG_TARGET_TARGET_TIMES_EVALUATION_MODE, CONFIG_TARGET_HOURS_MODE, CONFIG_TARGET_MAX_VALUE, CONFIG_TARGET_MIN_VALUE, CONFIG_TARGET_NAME, CONFIG_TARGET_HOURS, CONFIG_TARGET_TYPE, CONFIG_TARGET_ROLLING_TARGET, CONFIG_TARGET_LATEST_VALUES, CONFIG_TARGET_OFFSET, CONFIG_TARGET_TYPE_CONTINUOUS, CONFIG_TARGET_TYPE_INTERMITTENT, CONFIG_TARGET_WEIGHTING, CONFIG_TARGET_FIND_HIGHEST_VALUES, DOMAIN, EVENT_DATA_SOURCE
from homeassistant.core import HomeAssistant, callback
class TargetTimeframesRollingTargetRate(BinarySensorEntity, RestoreEntity):
"""Sensor for calculating when a target should be turned on or off."""
def __init__(self, hass: HomeAssistant, data_source_id: str, config_entry, config_subentry, config, initial_data):
"""Init sensor."""
self._state = None
self._config_entry = config_entry
self._config_subentry = config_subentry
self._config = config
self._attributes = self._config.copy()
self._last_evaluated = None
self._data_source_id = data_source_id
self._attributes['data_source_id'] = self._data_source_id
self.update_default_attributes()
self._data_source_data = initial_data if initial_data is not None else []
self._target_timeframes = []
self._hass = hass
self.entity_id = generate_entity_id('binary_sensor.{}', self.unique_id, hass=hass)
@property
def unique_id(self):
"""The id of the sensor."""
return f'target_timeframes_{self._data_source_id}_{self._config[CONFIG_TARGET_NAME]}'
@property
def name(self):
"""Name of the sensor."""
return f'{self._config[CONFIG_TARGET_NAME]} ({self._data_source_id})'
@property
def icon(self):
"""Icon of the sensor."""
return 'mdi:camera-timer'
@property
def extra_state_attributes(self):
"""Attributes of the sensor."""
return self._attributes
@property
def is_on(self):
return self._state
async def async_update(self):
"""Determines if the target rate sensor is active."""
if not self.enabled:
return
if CONFIG_TARGET_OFFSET in self._config:
offset = self._config[CONFIG_TARGET_OFFSET]
else:
offset = None
current_local_date = now()
check_for_errors(self._hass, self._config)
current_date = utcnow()
should_evaluate = should_evaluate_target_timeframes(current_date, self._target_timeframes, self._config[CONFIG_TARGET_TARGET_TIMES_EVALUATION_MODE])
if should_evaluate:
_LOGGER.debug(f'{self._config[CONFIG_TARGET_NAME]} - {(len(self._data_source_data) if self._data_source_data is not None else None)} time periods found')
if len(self._data_source_data) > 0:
find_last_time_periods = False
if CONFIG_TARGET_LATEST_VALUES in self._config:
find_last_time_periods = self._config[CONFIG_TARGET_LATEST_VALUES]
target_hours = float(self._config[CONFIG_TARGET_HOURS])
find_highest_values = False
if CONFIG_TARGET_FIND_HIGHEST_VALUES in self._config:
find_highest_values = self._config[CONFIG_TARGET_FIND_HIGHEST_VALUES]
min_value = None
if CONFIG_TARGET_MIN_VALUE in self._config:
min_value = self._config[CONFIG_TARGET_MIN_VALUE]
max_value = None
if CONFIG_TARGET_MAX_VALUE in self._config:
max_value = self._config[CONFIG_TARGET_MAX_VALUE]
calculate_with_incomplete_data = False
if CONFIG_TARGET_DANGEROUS_SETTINGS in self._config and CONFIG_TARGET_CALCULATE_WITH_INCOMPLETE_DATA in self._config[CONFIG_TARGET_DANGEROUS_SETTINGS]:
calculate_with_incomplete_data = self._config[CONFIG_TARGET_DANGEROUS_SETTINGS][CONFIG_TARGET_CALCULATE_WITH_INCOMPLETE_DATA]
minimum_slot_minutes = CONFIG_TARGET_DEFAULT_MINIMUM_REQUIRED_MINUTES_IN_SLOT
if CONFIG_TARGET_DANGEROUS_SETTINGS in self._config and CONFIG_TARGET_MINIMUM_REQUIRED_MINUTES_IN_SLOT in self._config[CONFIG_TARGET_DANGEROUS_SETTINGS]:
minimum_slot_minutes = self._config[CONFIG_TARGET_DANGEROUS_SETTINGS][CONFIG_TARGET_MINIMUM_REQUIRED_MINUTES_IN_SLOT]
applicable_time_periods = get_rolling_applicable_time_periods(current_local_date, self._data_source_data, self._config[CONFIG_ROLLING_TARGET_HOURS_LOOK_AHEAD], minimum_slot_minutes, calculate_with_incomplete_data, self._config[CONFIG_TARGET_NAME])
if applicable_time_periods is not None:
number_of_slots = math.ceil(target_hours * 2)
weighting = create_weighting(self._config[CONFIG_TARGET_WEIGHTING] if CONFIG_TARGET_WEIGHTING in self._config else None, number_of_slots)
if self._config[CONFIG_TARGET_TYPE] == CONFIG_TARGET_TYPE_CONTINUOUS:
self._target_timeframes = calculate_continuous_times(applicable_time_periods, target_hours, find_highest_values, find_last_time_periods, min_value, max_value, weighting, self._config[CONFIG_TARGET_HOURS_MODE], self._config[CONFIG_TARGET_NAME])
elif self._config[CONFIG_TARGET_TYPE] == CONFIG_TARGET_TYPE_INTERMITTENT:
self._target_timeframes = calculate_intermittent_times(applicable_time_periods, target_hours, find_highest_values, find_last_time_periods, min_value, max_value, self._config[CONFIG_TARGET_HOURS_MODE], self._config[CONFIG_TARGET_NAME])
else:
_LOGGER.error(f'{self._config[CONFIG_TARGET_NAME]} - Unexpected target type: {self._config[CONFIG_TARGET_TYPE]}')
self._attributes['target_times'] = self._target_timeframes
self._attributes['target_times_last_evaluated'] = current_date
_LOGGER.debug(f'{self._config[CONFIG_TARGET_NAME]} - calculated rates: {self._target_timeframes}')
self._attributes['time_periods_incomplete'] = applicable_time_periods is None or len(applicable_time_periods) < target_hours * 2
active_result = get_target_time_period_info(current_date, self._target_timeframes, offset)
self._attributes['overall_average_value'] = active_result['overall_average_value']
self._attributes['overall_min_value'] = active_result['overall_min_value']
self._attributes['overall_max_value'] = active_result['overall_max_value']
self._attributes['current_duration_in_hours'] = active_result['current_duration_in_hours']
self._attributes['current_average_value'] = active_result['current_average_value']
self._attributes['current_min_value'] = active_result['current_min_value']
self._attributes['current_max_value'] = active_result['current_max_value']
self._attributes['next_time'] = active_result['next_time']
self._attributes['next_duration_in_hours'] = active_result['next_duration_in_hours']
self._attributes['next_average_value'] = active_result['next_average_value']
self._attributes['next_min_value'] = active_result['next_min_value']
self._attributes['next_max_value'] = active_result['next_max_value']
self._attributes['data_source_id'] = self._data_source_id
self._state = active_result['is_active']
_LOGGER.debug(f'{self._config[CONFIG_TARGET_NAME]} - calculated: {self._state}')
self._attributes = dict_to_typed_dict(self._attributes)
@callback
def _async_handle_event(self, event) -> None:
if event.data is not None and 'data_source_id' in event.data and (event.data['data_source_id'] == self._data_source_id):
self._data_source_data = self._hass.data[DOMAIN][self._data_source_id]
async def async_added_to_hass(self):
"""Call when entity about to be added to hass."""
await super().async_added_to_hass()
state = await self.async_get_last_state()
if state is not None and self._state is None:
self._state = None if state.state in (STATE_UNAVAILABLE, STATE_UNKNOWN) or state.state is None else state.state.lower() == 'on'
self._attributes = dict_to_typed_dict(state.attributes, [])
self._target_timeframes = self._attributes['target_times'] if 'target_times' in self._attributes else []
if compare_config(self._config, self._attributes) == False:
self._state = False
self._attributes = self._config.copy()
self.update_default_attributes()
self._target_timeframes = None
_LOGGER.debug(f'{self._config[CONFIG_TARGET_NAME]} - Restored state: {self._state}')
self.async_on_remove(self._hass.bus.async_listen(EVENT_DATA_SOURCE, self._async_handle_event))
@callback
async def async_update_rolling_target_timeframe_config(self, target_hours=None, target_look_ahead_hours=None, target_offset=None, target_minimum_value=None, target_maximum_value=None, target_weighting=None, persist_changes=False):
"""Update sensors config"""
_LOGGER.debug(f'{self._config[CONFIG_TARGET_NAME]} - async_update_rolling_target_timeframe_config called: {self._config}')
config = dict(self._config)
if target_hours is not None:
config.update({CONFIG_TARGET_HOURS: target_hours if isinstance(target_hours, str) == False else target_hours.strip('"') if target_hours != '' else None})
if target_look_ahead_hours is not None:
config.update({CONFIG_ROLLING_TARGET_HOURS_LOOK_AHEAD: target_look_ahead_hours if isinstance(target_look_ahead_hours, str) == False else target_look_ahead_hours.strip('"') if target_look_ahead_hours != '' else None})
if target_offset is not None:
config.update({CONFIG_TARGET_OFFSET: target_offset if isinstance(target_offset, str) == False else target_offset.strip('"') if target_offset != '' else None})
if target_minimum_value is not None:
config.update({CONFIG_TARGET_MIN_VALUE: target_minimum_value if isinstance(target_minimum_value, str) == False else target_minimum_value.strip('"') if target_minimum_value != '' else None})
if target_maximum_value is not None:
config.update({CONFIG_TARGET_MAX_VALUE: target_maximum_value if isinstance(target_maximum_value, str) == False else target_maximum_value.strip('"') if target_maximum_value != '' else None})
if target_weighting is not None:
config.update({CONFIG_TARGET_WEIGHTING: target_weighting if isinstance(target_weighting, str) == False else target_weighting.strip('"') if target_weighting != '' else None})
errors = validate_rolling_target_timeframe_config(config)
keys = list(errors.keys())
if len(keys) > 0:
translations = await translation.async_get_translations(self._hass, self._hass.config.language, 'config_subentries', {DOMAIN})
raise vol.Invalid(translations[f'component.{DOMAIN}.config_subentries.rolling_target_time_period.error.{errors[keys[0]]}'])
self._config = config
self._attributes = self._config.copy()
self.update_default_attributes()
self._target_timeframes = []
await self.async_update()
self.async_write_ha_state()
if persist_changes:
updatable_keys = [CONFIG_TARGET_HOURS, CONFIG_ROLLING_TARGET_HOURS_LOOK_AHEAD, CONFIG_TARGET_OFFSET, CONFIG_TARGET_MIN_VALUE, CONFIG_TARGET_MAX_VALUE, CONFIG_TARGET_WEIGHTING]
new_config_data = {**self._config_subentry.data}
new_config_data.update(extract_config(config, updatable_keys))
self._hass.config_entries.async_update_subentry(self._config_entry, self._config_subentry, data=new_config_data)
def update_default_attributes(self):
"""Update the default attributes."""
self._attributes['data_source_id'] = self._data_source_id
is_rolling_target = True
if CONFIG_TARGET_ROLLING_TARGET in self._config:
is_rolling_target = self._config[CONFIG_TARGET_ROLLING_TARGET]
self._attributes[CONFIG_TARGET_ROLLING_TARGET] = is_rolling_target
find_last_rates = False
if CONFIG_TARGET_LATEST_VALUES in self._config:
find_last_rates = self._config[CONFIG_TARGET_LATEST_VALUES]
self._attributes[CONFIG_TARGET_LATEST_VALUES] = find_last_rates
calculate_with_incomplete_data = False
if CONFIG_TARGET_DANGEROUS_SETTINGS in self._config and CONFIG_TARGET_CALCULATE_WITH_INCOMPLETE_DATA in self._config[CONFIG_TARGET_DANGEROUS_SETTINGS]:
calculate_with_incomplete_data = self._config[CONFIG_TARGET_DANGEROUS_SETTINGS][CONFIG_TARGET_CALCULATE_WITH_INCOMPLETE_DATA]
self._attributes[CONFIG_TARGET_CALCULATE_WITH_INCOMPLETE_DATA] = calculate_with_incomplete_data
if CONFIG_TARGET_DANGEROUS_SETTINGS in self._attributes:
del self._attributes[CONFIG_TARGET_DANGEROUS_SETTINGS]
|
class TargetTimeframesRollingTargetRate(BinarySensorEntity, RestoreEntity):
'''Sensor for calculating when a target should be turned on or off.'''
def __init__(self, hass: HomeAssistant, data_source_id: str, config_entry, config_subentry, config, initial_data):
'''Init sensor.'''
pass
@property
def unique_id(self):
'''The id of the sensor.'''
pass
@property
def name(self):
'''Name of the sensor.'''
pass
@property
def icon(self):
'''Icon of the sensor.'''
pass
@property
def extra_state_attributes(self):
'''Attributes of the sensor.'''
pass
@property
def is_on(self):
pass
async def async_update(self):
'''Determines if the target rate sensor is active.'''
pass
@callback
def _async_handle_event(self, event) -> None:
pass
async def async_added_to_hass(self):
'''Call when entity about to be added to hass.'''
pass
@callback
async def async_update_rolling_target_timeframe_config(self, target_hours=None, target_look_ahead_hours=None, target_offset=None, target_minimum_value=None, target_maximum_value=None, target_weighting=None, persist_changes=False):
'''Update sensors config'''
pass
def update_default_attributes(self):
'''Update the default attributes.'''
pass
| 19
| 10
| 24
| 4
| 18
| 2
| 5
| 0.1
| 2
| 5
| 0
| 0
| 11
| 11
| 11
| 11
| 284
| 55
| 209
| 55
| 190
| 20
| 150
| 48
| 138
| 21
| 1
| 4
| 56
|
328,162
|
BottlecapDave/HomeAssistant-TargetTimeframes
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/BottlecapDave_HomeAssistant-TargetTimeframes/custom_components/target_timeframes/entities/target_timeframe.py
|
custom_components.target_timeframes.entities.target_timeframe.TargetTimeframesTargetRate
|
import voluptuous as vol
from homeassistant.helpers import translation
from ..const import CONFIG_TARGET_CALCULATE_WITH_INCOMPLETE_DATA, CONFIG_TARGET_DANGEROUS_SETTINGS, CONFIG_TARGET_DEFAULT_MINIMUM_REQUIRED_MINUTES_IN_SLOT, CONFIG_TARGET_MINIMUM_REQUIRED_MINUTES_IN_SLOT, CONFIG_TARGET_TARGET_TIMES_EVALUATION_MODE, CONFIG_TARGET_TARGET_TIMES_EVALUATION_MODE_ALL_IN_PAST, CONFIG_TARGET_HOURS_MODE, CONFIG_TARGET_MAX_VALUE, CONFIG_TARGET_MIN_VALUE, CONFIG_TARGET_NAME, CONFIG_TARGET_HOURS, CONFIG_TARGET_TYPE, CONFIG_TARGET_START_TIME, CONFIG_TARGET_END_TIME, CONFIG_TARGET_ROLLING_TARGET, CONFIG_TARGET_LATEST_VALUES, CONFIG_TARGET_FIND_HIGHEST_VALUES, CONFIG_TARGET_OFFSET, CONFIG_TARGET_TYPE_CONTINUOUS, CONFIG_TARGET_TYPE_INTERMITTENT, CONFIG_TARGET_WEIGHTING, DOMAIN, EVENT_DATA_SOURCE
from homeassistant.helpers.entity import generate_entity_id
import math
from ..config.target_timeframe import validate_target_timeframe_config
from homeassistant.components.binary_sensor import BinarySensorEntity
from homeassistant.util.dt import utcnow, now
from homeassistant.helpers.restore_state import RestoreEntity
from . import calculate_continuous_times, calculate_intermittent_times, compare_config, create_weighting, extract_config, get_fixed_applicable_time_periods, get_start_and_end_times, get_target_time_period_info, is_target_timeframe_complete_in_period, should_evaluate_target_timeframes
from .repairs import check_for_errors
from ..utils.attributes import dict_to_typed_dict
from homeassistant.const import STATE_UNAVAILABLE, STATE_UNKNOWN
from homeassistant.core import HomeAssistant, callback
class TargetTimeframesTargetRate(BinarySensorEntity, RestoreEntity):
"""Sensor for calculating when a target should be turned on or off."""
def __init__(self, hass: HomeAssistant, data_source_id: str, config_entry, config_subentry, config, initial_data):
"""Init sensor."""
self._state = None
self._config_entry = config_entry
self._config_subentry = config_subentry
self._config = config
self._attributes = self._config.copy()
self._last_evaluated = None
self._data_source_id = data_source_id
self.update_default_attributes()
self._data_source_data = initial_data if initial_data is not None else []
self._target_timeframes = []
self._hass = hass
self.entity_id = generate_entity_id('binary_sensor.{}', self.unique_id, hass=hass)
@property
def unique_id(self):
"""The id of the sensor."""
return f'target_timeframes_{self._data_source_id}_{self._config[CONFIG_TARGET_NAME]}'
@property
def name(self):
"""Name of the sensor."""
return f'{self._config[CONFIG_TARGET_NAME]} ({self._data_source_id})'
@property
def icon(self):
"""Icon of the sensor."""
return 'mdi:camera-timer'
@property
def extra_state_attributes(self):
"""Attributes of the sensor."""
return self._attributes
@property
def is_on(self):
return self._state
async def async_update(self):
"""Determines if the target rate sensor is active."""
if not self.enabled:
return
if CONFIG_TARGET_OFFSET in self._config:
offset = self._config[CONFIG_TARGET_OFFSET]
else:
offset = None
current_local_date = now()
check_for_errors(self._hass, self._config)
current_date = utcnow()
evaluation_mode = self._config[CONFIG_TARGET_TARGET_TIMES_EVALUATION_MODE] if CONFIG_TARGET_TARGET_TIMES_EVALUATION_MODE in self._config else CONFIG_TARGET_TARGET_TIMES_EVALUATION_MODE_ALL_IN_PAST
should_evaluate = should_evaluate_target_timeframes(current_date, self._target_timeframes, evaluation_mode)
if should_evaluate:
_LOGGER.debug(f'{self._config[CONFIG_TARGET_NAME]} - {(len(self._data_source_data) if self._data_source_data is not None else None)} time periods found')
if len(self._data_source_data) > 0:
start_time = None
if CONFIG_TARGET_START_TIME in self._config:
start_time = self._config[CONFIG_TARGET_START_TIME]
end_time = None
if CONFIG_TARGET_END_TIME in self._config:
end_time = self._config[CONFIG_TARGET_END_TIME]
is_rolling_target = True
if CONFIG_TARGET_ROLLING_TARGET in self._config:
is_rolling_target = self._config[CONFIG_TARGET_ROLLING_TARGET]
find_last_rates = False
if CONFIG_TARGET_LATEST_VALUES in self._config:
find_last_rates = self._config[CONFIG_TARGET_LATEST_VALUES]
target_hours = float(self._config[CONFIG_TARGET_HOURS])
find_highest_values = False
if CONFIG_TARGET_FIND_HIGHEST_VALUES in self._config:
find_highest_values = self._config[CONFIG_TARGET_FIND_HIGHEST_VALUES]
min_rate = None
if CONFIG_TARGET_MIN_VALUE in self._config:
min_rate = self._config[CONFIG_TARGET_MIN_VALUE]
max_rate = None
if CONFIG_TARGET_MAX_VALUE in self._config:
max_rate = self._config[CONFIG_TARGET_MAX_VALUE]
calculate_with_incomplete_data = False
if CONFIG_TARGET_DANGEROUS_SETTINGS in self._config and CONFIG_TARGET_CALCULATE_WITH_INCOMPLETE_DATA in self._config[CONFIG_TARGET_DANGEROUS_SETTINGS]:
calculate_with_incomplete_data = self._config[CONFIG_TARGET_DANGEROUS_SETTINGS][CONFIG_TARGET_CALCULATE_WITH_INCOMPLETE_DATA]
minimum_slot_minutes = CONFIG_TARGET_DEFAULT_MINIMUM_REQUIRED_MINUTES_IN_SLOT
if CONFIG_TARGET_DANGEROUS_SETTINGS in self._config and CONFIG_TARGET_MINIMUM_REQUIRED_MINUTES_IN_SLOT in self._config[CONFIG_TARGET_DANGEROUS_SETTINGS]:
minimum_slot_minutes = self._config[CONFIG_TARGET_DANGEROUS_SETTINGS][CONFIG_TARGET_MINIMUM_REQUIRED_MINUTES_IN_SLOT]
target_start, target_end = get_start_and_end_times(current_local_date, start_time, end_time, minimum_slot_minutes, self._config[CONFIG_TARGET_NAME])
applicable_time_periods = get_fixed_applicable_time_periods(target_start, target_end, self._data_source_data, calculate_with_incomplete_data, self._config[CONFIG_TARGET_NAME])
applicable_target_start, applicable_target_end = get_start_and_end_times(current_local_date, start_time, end_time, None, self._config[CONFIG_TARGET_NAME])
is_target_timeframe_complete = is_rolling_target == False and is_target_timeframe_complete_in_period(current_local_date, applicable_target_start, applicable_target_end, self._target_timeframes, self._config[CONFIG_TARGET_NAME])
if applicable_time_periods is not None and is_target_timeframe_complete == False:
number_of_slots = math.ceil(target_hours * 2)
weighting = create_weighting(self._config[CONFIG_TARGET_WEIGHTING] if CONFIG_TARGET_WEIGHTING in self._config else None, number_of_slots)
proposed_target_timeframes = None
if self._config[CONFIG_TARGET_TYPE] == CONFIG_TARGET_TYPE_CONTINUOUS:
proposed_target_timeframes = calculate_continuous_times(applicable_time_periods, target_hours, find_highest_values, find_last_rates, min_rate, max_rate, weighting, self._config[CONFIG_TARGET_HOURS_MODE], self._config[CONFIG_TARGET_NAME])
elif self._config[CONFIG_TARGET_TYPE] == CONFIG_TARGET_TYPE_INTERMITTENT:
proposed_target_timeframes = calculate_intermittent_times(applicable_time_periods, target_hours, find_highest_values, find_last_rates, min_rate, max_rate, self._config[CONFIG_TARGET_HOURS_MODE], self._config[CONFIG_TARGET_NAME])
else:
_LOGGER.error(f'{self._config[CONFIG_TARGET_NAME]} - Unexpected target type: {self._config[CONFIG_TARGET_TYPE]}')
self._target_timeframes = proposed_target_timeframes
self._attributes['target_times'] = self._target_timeframes
self._attributes['target_times_last_evaluated'] = current_date
_LOGGER.debug(f'{self._config[CONFIG_TARGET_NAME]} - calculated rates: {self._target_timeframes}')
self._attributes['time_periods_incomplete'] = applicable_time_periods is None or len(applicable_time_periods) < target_hours * 2
active_result = get_target_time_period_info(current_date, self._target_timeframes, offset)
self._attributes['overall_average_value'] = active_result['overall_average_value']
self._attributes['overall_min_value'] = active_result['overall_min_value']
self._attributes['overall_max_value'] = active_result['overall_max_value']
self._attributes['current_duration_in_hours'] = active_result['current_duration_in_hours']
self._attributes['current_average_value'] = active_result['current_average_value']
self._attributes['current_min_value'] = active_result['current_min_value']
self._attributes['current_max_value'] = active_result['current_max_value']
self._attributes['next_time'] = active_result['next_time']
self._attributes['next_duration_in_hours'] = active_result['next_duration_in_hours']
self._attributes['next_average_value'] = active_result['next_average_value']
self._attributes['next_min_value'] = active_result['next_min_value']
self._attributes['next_max_value'] = active_result['next_max_value']
self._attributes['data_source_id'] = self._data_source_id
self._state = active_result['is_active']
_LOGGER.debug(f'{self._config[CONFIG_TARGET_NAME]} - calculated: {self._state}')
self._attributes = dict_to_typed_dict(self._attributes)
@callback
def _async_handle_event(self, event) -> None:
if event.data is not None and 'data_source_id' in event.data and (event.data['data_source_id'] == self._data_source_id):
self._data_source_data = self._hass.data[DOMAIN][self._data_source_id]
async def async_added_to_hass(self):
"""Call when entity about to be added to hass."""
await super().async_added_to_hass()
state = await self.async_get_last_state()
if state is not None and self._state is None:
self._state = None if state.state in (STATE_UNAVAILABLE, STATE_UNKNOWN) or state.state is None else state.state.lower() == 'on'
self._attributes = dict_to_typed_dict(state.attributes, [])
self._target_timeframes = self._attributes['target_times'] if 'target_times' in self._attributes else []
if compare_config(self._config, self._attributes) == False:
self._state = False
self._attributes = self._config.copy()
self.update_default_attributes()
self._target_timeframes = None
_LOGGER.debug(f'{self._config[CONFIG_TARGET_NAME]} - Restored state: {self._state}')
self.async_on_remove(self._hass.bus.async_listen(EVENT_DATA_SOURCE, self._async_handle_event))
@callback
async def async_update_target_timeframe_config(self, target_start_time=None, target_end_time=None, target_hours=None, target_offset=None, target_minimum_value=None, target_maximum_value=None, target_weighting=None, persist_changes=False):
"""Update sensors config"""
_LOGGER.debug(f'{self._config[CONFIG_TARGET_NAME]} - async_update_target_timeframe_config called: {self._config}')
config = dict(self._config)
if target_hours is not None:
config.update({CONFIG_TARGET_HOURS: target_hours if isinstance(target_hours, str) == False else target_hours.strip('"') if target_hours != '' else None})
if target_start_time is not None:
config.update({CONFIG_TARGET_START_TIME: target_start_time if isinstance(target_start_time, str) == False else target_start_time.strip('"') if target_start_time != '' else None})
if target_end_time is not None:
config.update({CONFIG_TARGET_END_TIME: target_end_time if isinstance(target_end_time, str) == False else target_end_time.strip('"') if target_end_time != '' else None})
if target_offset is not None:
config.update({CONFIG_TARGET_OFFSET: target_offset if isinstance(target_offset, str) == False else target_offset.strip('"') if target_offset != '' else None})
if target_minimum_value is not None:
config.update({CONFIG_TARGET_MIN_VALUE: target_minimum_value if isinstance(target_minimum_value, str) == False else target_minimum_value.strip('"') if target_minimum_value != '' else None})
if target_maximum_value is not None:
config.update({CONFIG_TARGET_MAX_VALUE: target_maximum_value if isinstance(target_maximum_value, str) == False else target_maximum_value.strip('"') if target_maximum_value != '' else None})
if target_weighting is not None:
config.update({CONFIG_TARGET_WEIGHTING: target_weighting if isinstance(target_weighting, str) == False else target_weighting.strip('"') if target_weighting != '' else None})
errors = validate_target_timeframe_config(config)
keys = list(errors.keys())
if len(keys) > 0:
translations = await translation.async_get_translations(self._hass, self._hass.config.language, 'config_subentries', {DOMAIN})
raise vol.Invalid(translations[f'component.{DOMAIN}.config_subentries.target_time_period.error.{errors[keys[0]]}'])
self._config = config
self._attributes = self._config.copy()
self.update_default_attributes()
self._target_timeframes = []
await self.async_update()
self.async_write_ha_state()
if persist_changes:
updatable_keys = [CONFIG_TARGET_HOURS, CONFIG_TARGET_START_TIME, CONFIG_TARGET_END_TIME, CONFIG_TARGET_OFFSET, CONFIG_TARGET_MIN_VALUE, CONFIG_TARGET_MAX_VALUE, CONFIG_TARGET_WEIGHTING]
new_config_data = {**self._config_subentry.data}
new_config_data.update(extract_config(config, updatable_keys))
self._hass.config_entries.async_update_subentry(self._config_entry, self._config_subentry, data=new_config_data)
def update_default_attributes(self):
"""Update the default attributes."""
self._attributes['data_source_id'] = self._data_source_id
is_rolling_target = True
if CONFIG_TARGET_ROLLING_TARGET in self._config:
is_rolling_target = self._config[CONFIG_TARGET_ROLLING_TARGET]
self._attributes[CONFIG_TARGET_ROLLING_TARGET] = is_rolling_target
find_last_rates = False
if CONFIG_TARGET_LATEST_VALUES in self._config:
find_last_rates = self._config[CONFIG_TARGET_LATEST_VALUES]
self._attributes[CONFIG_TARGET_LATEST_VALUES] = find_last_rates
calculate_with_incomplete_data = False
if CONFIG_TARGET_DANGEROUS_SETTINGS in self._config and CONFIG_TARGET_CALCULATE_WITH_INCOMPLETE_DATA in self._config[CONFIG_TARGET_DANGEROUS_SETTINGS]:
calculate_with_incomplete_data = self._config[CONFIG_TARGET_DANGEROUS_SETTINGS][CONFIG_TARGET_CALCULATE_WITH_INCOMPLETE_DATA]
self._attributes[CONFIG_TARGET_CALCULATE_WITH_INCOMPLETE_DATA] = calculate_with_incomplete_data
minimum_required_minutes_in_slot = CONFIG_TARGET_DEFAULT_MINIMUM_REQUIRED_MINUTES_IN_SLOT
if CONFIG_TARGET_DANGEROUS_SETTINGS in self._config and CONFIG_TARGET_MINIMUM_REQUIRED_MINUTES_IN_SLOT in self._config[CONFIG_TARGET_DANGEROUS_SETTINGS]:
minimum_required_minutes_in_slot = self._config[CONFIG_TARGET_DANGEROUS_SETTINGS][CONFIG_TARGET_MINIMUM_REQUIRED_MINUTES_IN_SLOT]
self._attributes[CONFIG_TARGET_MINIMUM_REQUIRED_MINUTES_IN_SLOT] = minimum_required_minutes_in_slot
if CONFIG_TARGET_DANGEROUS_SETTINGS in self._attributes:
del self._attributes[CONFIG_TARGET_DANGEROUS_SETTINGS]
|
class TargetTimeframesTargetRate(BinarySensorEntity, RestoreEntity):
'''Sensor for calculating when a target should be turned on or off.'''
def __init__(self, hass: HomeAssistant, data_source_id: str, config_entry, config_subentry, config, initial_data):
'''Init sensor.'''
pass
@property
def unique_id(self):
'''The id of the sensor.'''
pass
@property
def name(self):
'''Name of the sensor.'''
pass
@property
def icon(self):
'''Icon of the sensor.'''
pass
@property
def extra_state_attributes(self):
'''Attributes of the sensor.'''
pass
@property
def is_on(self):
pass
async def async_update(self):
'''Determines if the target rate sensor is active.'''
pass
@callback
def _async_handle_event(self, event) -> None:
pass
async def async_added_to_hass(self):
'''Call when entity about to be added to hass.'''
pass
@callback
async def async_update_target_timeframe_config(self, target_start_time=None, target_end_time=None, target_hours=None, target_offset=None, target_minimum_value=None, target_maximum_value=None, target_weighting=None, persist_changes=False):
'''Update sensors config'''
pass
def update_default_attributes(self):
'''Update the default attributes.'''
pass
| 19
| 10
| 28
| 5
| 21
| 2
| 6
| 0.09
| 2
| 5
| 0
| 0
| 11
| 11
| 11
| 11
| 324
| 62
| 241
| 64
| 222
| 21
| 170
| 57
| 158
| 24
| 1
| 4
| 64
|
328,163
|
BottlecapDave/HomeAssistant-TargetTimeframes
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/BottlecapDave_HomeAssistant-TargetTimeframes/custom_components/target_timeframes/utils/data_source_data.py
|
custom_components.target_timeframes.utils.data_source_data.DataSourceItem
|
from typing import Any
from datetime import datetime, timedelta
from pydantic import BaseModel
class DataSourceItem(BaseModel):
start: datetime
end: datetime
value: float
metadata: Any
|
class DataSourceItem(BaseModel):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 82
| 5
| 0
| 5
| 1
| 4
| 0
| 5
| 1
| 4
| 0
| 5
| 0
| 0
|
328,164
|
BottlecapDave/HomeAssistant-TargetTimeframes
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/BottlecapDave_HomeAssistant-TargetTimeframes/custom_components/target_timeframes/utils/data_source_data.py
|
custom_components.target_timeframes.utils.data_source_data.ValidateDataSourceDataResult
|
class ValidateDataSourceDataResult:
def __init__(self, success: bool, data_source_id: str, data: list[DataSourceItem]=[], error_message: str | None=None):
self.success = success
self.data = data
self.data_source_id = data_source_id
self.error_message = error_message
|
class ValidateDataSourceDataResult:
def __init__(self, success: bool, data_source_id: str, data: list[DataSourceItem]=[], error_message: str | None=None):
pass
| 2
| 0
| 5
| 0
| 5
| 0
| 1
| 0
| 0
| 4
| 1
| 0
| 1
| 4
| 1
| 1
| 7
| 1
| 6
| 6
| 4
| 0
| 6
| 6
| 4
| 1
| 0
| 0
| 1
|
328,165
|
BottlecapDave/HomeAssistant-TargetTimeframes
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/BottlecapDave_HomeAssistant-TargetTimeframes/custom_components/target_timeframes/storage/data_source_data.py
|
data_source_data.DataSourceData
|
from ..utils.data_source_data import DataSourceItem
from pydantic import BaseModel
class DataSourceData(BaseModel):
data: list[DataSourceItem]
|
class DataSourceData(BaseModel):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 82
| 2
| 0
| 2
| 1
| 1
| 0
| 2
| 1
| 1
| 0
| 5
| 0
| 0
|
328,166
|
marutilai/Katalyst
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/marutilai_Katalyst/src/katalyst/katalyst_core/utils/checkpointer_manager.py
|
checkpointer_manager.CheckpointerManager
|
from langgraph.checkpoint.base import BaseCheckpointSaver
from typing import Optional
class CheckpointerManager:
"""Singleton manager for the global checkpointer."""
_instance: Optional['CheckpointerManager'] = None
_checkpointer: Optional[BaseCheckpointSaver] = None
def __new__(cls):
if cls._instance is None:
cls._instance = super().__new__(cls)
return cls._instance
def set_checkpointer(self, checkpointer: BaseCheckpointSaver):
"""Set the global checkpointer."""
self._checkpointer = checkpointer
def get_checkpointer(self) -> Optional[BaseCheckpointSaver]:
"""Get the global checkpointer."""
return self._checkpointer
|
class CheckpointerManager:
'''Singleton manager for the global checkpointer.'''
def __new__(cls):
pass
def set_checkpointer(self, checkpointer: BaseCheckpointSaver):
'''Set the global checkpointer.'''
pass
def get_checkpointer(self) -> Optional[BaseCheckpointSaver]:
'''Get the global checkpointer.'''
pass
| 4
| 3
| 3
| 0
| 3
| 1
| 1
| 0.27
| 0
| 1
| 0
| 0
| 3
| 0
| 3
| 3
| 18
| 4
| 11
| 6
| 7
| 3
| 11
| 6
| 7
| 2
| 0
| 1
| 4
|
328,167
|
marutilai/Katalyst
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/marutilai_Katalyst/src/katalyst/katalyst_core/config/llm_config.py
|
config.llm_config.LLMConfig
|
import os
from typing import Dict, Optional
class LLMConfig:
"""Manages LLM configuration with provider profiles and overrides."""
def __init__(self):
self._provider = None
self._profile = None
self._custom_models = {}
self._timeout = None
self._load_config()
def _load_config(self):
"""Load configuration from environment variables."""
self._provider = os.getenv('KATALYST_LLM_PROVIDER', os.getenv('KATALYST_LITELLM_PROVIDER', 'openai')).lower()
self._profile = os.getenv('KATALYST_LLM_PROFILE', self._provider).lower()
if self._profile not in PROVIDER_PROFILES:
logger.warning(f"LLM profile '{self._profile}' not found, using provider '{self._provider}' profile")
self._profile = self._provider
if self._profile not in PROVIDER_PROFILES:
available = ', '.join(PROVIDER_PROFILES.keys())
raise ValueError(f"Unknown provider profile '{self._profile}'. Available: {available}")
if os.getenv('KATALYST_REASONING_MODEL'):
self._custom_models['reasoning'] = os.getenv('KATALYST_REASONING_MODEL')
if os.getenv('KATALYST_EXECUTION_MODEL'):
self._custom_models['execution'] = os.getenv('KATALYST_EXECUTION_MODEL')
if os.getenv('KATALYST_LLM_MODEL_FALLBACK'):
self._custom_models['fallback'] = os.getenv('KATALYST_LLM_MODEL_FALLBACK')
try:
self._timeout = int(os.getenv('KATALYST_LLM_TIMEOUT', os.getenv('KATALYST_LITELLM_TIMEOUT', '0')))
except ValueError:
self._timeout = 0
logger.debug(f'LLM Config loaded - Provider: {self._provider}, Profile: {self._profile}, Custom models: {self._custom_models}')
def get_model_for_component(self, component: str) -> str:
"""
Get the appropriate model for a given component.
Args:
component: Component name (e.g., 'planner', 'executor')
Returns:
Model identifier string
"""
model_type = COMPONENT_MODEL_MAPPING.get(component.lower(), COMPONENT_MODEL_MAPPING['default'])
if model_type in self._custom_models:
return self._custom_models[model_type]
profile = PROVIDER_PROFILES[self._profile]
return profile.get(model_type, profile['execution'])
def get_provider(self) -> str:
"""Get the configured provider."""
return self._provider
def get_timeout(self) -> int:
"""Get the configured timeout in seconds."""
if self._timeout > 0:
return self._timeout
profile = PROVIDER_PROFILES[self._profile]
return profile.get('default_timeout', 45)
def get_fallback_models(self) -> list[str]:
"""Get list of fallback models."""
if 'fallback' in self._custom_models:
return [self._custom_models['fallback']]
profile = PROVIDER_PROFILES[self._profile]
return [profile.get('fallback', profile['execution'])]
def get_api_base(self) -> Optional[str]:
"""Get the API base URL if configured for the provider."""
api_base = os.getenv('KATALYST_LLM_API_BASE')
if api_base:
return api_base
profile = PROVIDER_PROFILES.get(self._profile, {})
return profile.get('api_base')
def get_config_summary(self) -> Dict[str, any]:
"""Get a summary of the current configuration."""
summary = {'provider': self._provider, 'profile': self._profile, 'timeout': self.get_timeout(), 'models': {'reasoning': self.get_model_for_component('planner'), 'execution': self.get_model_for_component('executor'), 'fallback': self.get_fallback_models()[0]}, 'custom_overrides': self._custom_models}
api_base = self.get_api_base()
if api_base:
summary['api_base'] = api_base
return summary
|
class LLMConfig:
'''Manages LLM configuration with provider profiles and overrides.'''
def __init__(self):
pass
def _load_config(self):
'''Load configuration from environment variables.'''
pass
def get_model_for_component(self, component: str) -> str:
'''
Get the appropriate model for a given component.
Args:
component: Component name (e.g., 'planner', 'executor')
Returns:
Model identifier string
'''
pass
def get_provider(self) -> str:
'''Get the configured provider.'''
pass
def get_timeout(self) -> int:
'''Get the configured timeout in seconds.'''
pass
def get_fallback_models(self) -> list[str]:
'''Get list of fallback models.'''
pass
def get_api_base(self) -> Optional[str]:
'''Get the API base URL if configured for the provider.'''
pass
def get_config_summary(self) -> Dict[str, any]:
'''Get a summary of the current configuration.'''
pass
| 9
| 8
| 15
| 2
| 10
| 3
| 2
| 0.35
| 0
| 4
| 0
| 0
| 8
| 4
| 8
| 8
| 126
| 21
| 78
| 22
| 69
| 27
| 57
| 22
| 48
| 7
| 0
| 1
| 19
|
328,168
|
marutilai/Katalyst
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/marutilai_Katalyst/src/katalyst/katalyst_core/utils/error_handling.py
|
error_handling.ErrorType
|
from enum import Enum
class ErrorType(Enum):
"""
Error types used across the Katalyst agent system.
These are used to create structured error messages that the LLM can understand.
"""
TOOL_ERROR = 'TOOL_ERROR'
PARSING_ERROR = 'PARSING_ERROR'
LLM_ERROR = 'LLM_ERROR'
REPLAN_REQUESTED = 'REPLAN_REQUESTED'
SANDBOX_VIOLATION = 'SANDBOX_VIOLATION'
ML_ANALYSIS_REQUIRED = 'ML_ANALYSIS_REQUIRED'
EXPLORATION_SUMMARY_MISSING = 'EXPLORATION_SUMMARY_MISSING'
EXPLORATION_FINDINGS_NOT_READ = 'EXPLORATION_FINDINGS_NOT_READ'
|
class ErrorType(Enum):
'''
Error types used across the Katalyst agent system.
These are used to create structured error messages that the LLM can understand.
'''
pass
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1.33
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 49
| 14
| 1
| 9
| 9
| 8
| 12
| 9
| 9
| 8
| 0
| 4
| 0
| 0
|
328,169
|
marutilai/Katalyst
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/marutilai_Katalyst/src/katalyst/katalyst_core/utils/exceptions.py
|
exceptions.SandboxViolationError
|
class SandboxViolationError(Exception):
"""
Raised when a tool attempts to access a path outside the project directory.
This exception enforces the security sandbox that restricts all file
operations to within the project root directory.
"""
def __init__(self, attempted_path: str, project_root: str):
self.attempted_path = attempted_path
self.project_root = project_root
super().__init__(f"Access denied: Path '{attempted_path}' is outside the project directory '{project_root}'. All file operations must remain within the project directory for security.")
|
class SandboxViolationError(Exception):
'''
Raised when a tool attempts to access a path outside the project directory.
This exception enforces the security sandbox that restricts all file
operations to within the project root directory.
'''
def __init__(self, attempted_path: str, project_root: str):
pass
| 2
| 1
| 7
| 0
| 7
| 0
| 1
| 0.63
| 1
| 2
| 0
| 0
| 1
| 2
| 1
| 11
| 15
| 2
| 8
| 4
| 6
| 5
| 5
| 4
| 3
| 1
| 3
| 0
| 1
|
328,170
|
marutilai/Katalyst
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/marutilai_Katalyst/src/katalyst/katalyst_core/utils/exceptions.py
|
exceptions.UserInputRequiredException
|
from typing import List, Optional
class UserInputRequiredException(Exception):
"""
Raised when a tool needs user input during execution.
This exception is used to interrupt agent execution and return control
to the main REPL for handling user interaction.
"""
def __init__(self, question: str, suggested_responses: List[str], tool_name: str='request_user_input'):
self.question = question
self.suggested_responses = suggested_responses
self.tool_name = tool_name
super().__init__(f'User input required: {question}')
|
class UserInputRequiredException(Exception):
'''
Raised when a tool needs user input during execution.
This exception is used to interrupt agent execution and return control
to the main REPL for handling user interaction.
'''
def __init__(self, question: str, suggested_responses: List[str], tool_name: str='request_user_input'):
pass
| 2
| 1
| 10
| 0
| 10
| 0
| 1
| 0.45
| 1
| 2
| 0
| 0
| 1
| 3
| 1
| 11
| 18
| 2
| 11
| 10
| 4
| 5
| 6
| 5
| 4
| 1
| 3
| 0
| 1
|
328,171
|
marutilai/Katalyst
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/marutilai_Katalyst/src/katalyst/app/execution_controller.py
|
katalyst.app.execution_controller.ExecutionController
|
from katalyst.katalyst_core.utils.logger import get_logger
import time
from typing import Callable
import sys
from rich.console import Console
import signal
import threading
class ExecutionController:
"""
Manages execution state and provides global Ctrl+C (SIGINT) handler functionality.
Handles:
- Single Ctrl+C: Cancels current operation
- Double Ctrl+C: Exits Katalyst completely
"""
def __init__(self):
self.logger = get_logger()
self.console = Console()
self._cancelled = threading.Event()
self._original_sigint_handler = None
self._last_interrupt_time = 0
self._interrupt_count = 0
self._double_press_window = 0.5
def is_cancelled(self) -> bool:
"""Check if execution has been cancelled."""
return self._cancelled.is_set()
def cancel(self):
"""Cancel the current execution."""
self._cancelled.set()
self.logger.info('Execution cancelled by user')
def reset(self):
"""Reset the cancellation state for a new execution."""
self._cancelled.clear()
def check_cancelled(self, context: str=''):
"""
Check if execution is cancelled and raise exception if so.
Args:
context: Optional context string for logging
Raises:
KeyboardInterrupt: If execution has been cancelled
"""
if self.is_cancelled():
msg = f"Execution cancelled{(f' during {context}' if context else '')}"
self.logger.info(msg)
raise KeyboardInterrupt(msg)
def setup_signal_handlers(self):
"""Setup signal handlers for graceful interruption."""
def signal_handler(signum, frame):
current_time = time.time()
time_since_last = current_time - self._last_interrupt_time
if time_since_last <= self._double_press_window:
self._interrupt_count += 1
if self._interrupt_count >= 2:
self.logger.info('Double Ctrl+C detected - Exiting Katalyst')
self.console.print('\n\n[bold red]Double interrupt detected. Exiting Katalyst...[/bold red]')
self.console.print('[green]Goodbye![/green]')
sys.exit(0)
else:
self._interrupt_count = 1
self.logger.info('Received interrupt signal (Ctrl+C) - Press again to exit')
self.console.print('\n[yellow]Execution cancelled. Press Ctrl+C again to exit Katalyst.[/yellow]')
self.cancel()
self._last_interrupt_time = current_time
self._original_sigint_handler = signal.signal(signal.SIGINT, signal_handler)
def restore_signal_handlers(self):
"""Restore original signal handlers."""
if self._original_sigint_handler is not None:
signal.signal(signal.SIGINT, self._original_sigint_handler)
self._original_sigint_handler = None
def wrap_execution(self, func: Callable, *args, **kwargs):
"""
Wrap a function execution with cancellation checking.
Args:
func: Function to execute
*args: Positional arguments for func
**kwargs: Keyword arguments for func
Returns:
Result of func if not cancelled
Raises:
KeyboardInterrupt: If execution is cancelled
"""
self.reset()
self.setup_signal_handlers()
try:
self.check_cancelled('initialization')
result = func(*args, **kwargs)
self.check_cancelled('completion')
return result
finally:
self.restore_signal_handlers()
|
class ExecutionController:
'''
Manages execution state and provides global Ctrl+C (SIGINT) handler functionality.
Handles:
- Single Ctrl+C: Cancels current operation
- Double Ctrl+C: Exits Katalyst completely
'''
def __init__(self):
pass
def is_cancelled(self) -> bool:
'''Check if execution has been cancelled.'''
pass
def cancel(self):
'''Cancel the current execution.'''
pass
def reset(self):
'''Reset the cancellation state for a new execution.'''
pass
def check_cancelled(self, context: str=''):
'''
Check if execution is cancelled and raise exception if so.
Args:
context: Optional context string for logging
Raises:
KeyboardInterrupt: If execution has been cancelled
'''
pass
def setup_signal_handlers(self):
'''Setup signal handlers for graceful interruption.'''
pass
def signal_handler(signum, frame):
pass
def restore_signal_handlers(self):
'''Restore original signal handlers.'''
pass
def wrap_execution(self, func: Callable, *args, **kwargs):
'''
Wrap a function execution with cancellation checking.
Args:
func: Function to execute
*args: Positional arguments for func
**kwargs: Keyword arguments for func
Returns:
Result of func if not cancelled
Raises:
KeyboardInterrupt: If execution is cancelled
'''
pass
| 10
| 8
| 13
| 2
| 8
| 4
| 2
| 0.68
| 0
| 5
| 0
| 0
| 8
| 7
| 8
| 8
| 110
| 22
| 53
| 21
| 43
| 36
| 51
| 21
| 41
| 3
| 0
| 2
| 14
|
328,172
|
marutilai/Katalyst
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/marutilai_Katalyst/src/katalyst/app/main.py
|
katalyst.app.main.ReplInterruptHandler
|
import time
import os
class ReplInterruptHandler:
"""Handles interrupt signals for the REPL with double-press detection."""
def __init__(self, console):
self.console = console
self.last_interrupt_time = 0
self.interrupt_count = 0
self.double_press_window = 0.5
def __call__(self, signum, frame):
"""Handle SIGINT with double-press detection."""
current_time = time.time()
time_since_last = current_time - self.last_interrupt_time
if time_since_last <= self.double_press_window and self.interrupt_count >= 1:
self.console.print('\n\n[bold red]Double interrupt detected. Exiting Katalyst...[/bold red]')
self.console.print('[green]Goodbye![/green]')
os._exit(0)
else:
self.interrupt_count = 1
self.last_interrupt_time = current_time
self.console.print('\n[yellow]Press Ctrl+C again to exit Katalyst.[/yellow]')
raise KeyboardInterrupt()
|
class ReplInterruptHandler:
'''Handles interrupt signals for the REPL with double-press detection.'''
def __init__(self, console):
pass
def __call__(self, signum, frame):
'''Handle SIGINT with double-press detection.'''
pass
| 3
| 2
| 13
| 1
| 11
| 2
| 2
| 0.23
| 0
| 1
| 0
| 0
| 2
| 4
| 2
| 2
| 29
| 3
| 22
| 9
| 19
| 5
| 17
| 9
| 14
| 2
| 0
| 1
| 3
|
328,173
|
marutilai/Katalyst
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/marutilai_Katalyst/src/katalyst/app/ui/input_handler.py
|
katalyst.app.ui.input_handler.InputHandler
|
import os
from rich.console import Console
from rich.table import Table
import sys
from typing import List, Optional, Callable, Union, Dict, Any
from rich.syntax import Syntax
from rich.prompt import Prompt, Confirm
from rich.panel import Panel
class InputHandler:
"""Unified handler for all user input operations in Katalyst."""
def __init__(self, console: Optional[Console]=None):
"""
Initialize the InputHandler.
Args:
console: Optional Rich Console instance. Creates new one if not provided.
"""
self.console = console or Console()
def prompt_text(self, prompt: str, default: Optional[str]=None, password: bool=False, show_default: bool=True) -> str:
"""
Get text input from user with consistent styling.
Args:
prompt: The prompt message to display
default: Default value if user presses Enter
password: Hide input for passwords
show_default: Whether to show the default value in prompt
Returns:
User's input as string
"""
return Prompt.ask(prompt, default=default, password=password, show_default=show_default, console=self.console)
def prompt_choice(self, prompt: str, choices: List[str], default: Optional[str]=None, show_choices: bool=True) -> str:
"""
Get a choice from a list of options.
Args:
prompt: The prompt message
choices: List of valid choices
default: Default choice if user presses Enter
show_choices: Whether to show available choices
Returns:
Selected choice as string
"""
return Prompt.ask(prompt, choices=choices, default=default, show_choices=show_choices, console=self.console)
def prompt_menu(self, title: str, options: List[Union[str, Dict[str, Any]]], prompt_text: str='Select an option', show_numbers: bool=True, allow_custom: bool=False, custom_prompt: str='Enter custom value') -> Union[str, int]:
"""
Display a menu with numbered options and get user selection.
Args:
title: Title for the menu
options: List of options (strings or dicts with 'label' and 'value')
prompt_text: Text to show when asking for selection
show_numbers: Whether to show numbers next to options
allow_custom: Allow user to enter custom value
custom_prompt: Prompt for custom value entry
Returns:
Selected option value or index (if options are strings)
"""
table = Table(show_header=False, box=None, padding=(0, 2))
option_values = []
for idx, option in enumerate(options, 1):
if isinstance(option, dict):
label = option.get('label', str(option))
value = option.get('value', option)
description = option.get('description', '')
else:
label = str(option)
value = option
description = ''
option_values.append(value)
if show_numbers:
number = f'[bold cyan]{idx}.[/bold cyan]'
table.add_row(number, label, f'[dim]{description}[/dim]' if description else '')
else:
table.add_row(label, f'[dim]{description}[/dim]' if description else '')
if allow_custom:
custom_idx = len(options) + 1
if show_numbers:
table.add_row(f'[bold cyan]{custom_idx}.[/bold cyan]', '[italic]Enter custom value[/italic]')
else:
table.add_row('[italic]Enter custom value[/italic]')
self.console.print(Panel(table, title=f'[bold]{title}[/bold]', expand=False))
if show_numbers:
valid_choices = [str(i) for i in range(1, len(options) + 1)]
if allow_custom:
valid_choices.append(str(custom_idx))
choice = self.prompt_choice(prompt_text, choices=valid_choices, show_choices=False)
choice_idx = int(choice) - 1
if allow_custom and choice_idx == len(options):
return self.prompt_text(custom_prompt)
return option_values[choice_idx]
else:
return self.prompt_text(prompt_text)
def confirm(self, prompt: str, default: bool=True, show_default: bool=True) -> bool:
"""
Get yes/no confirmation from user.
Args:
prompt: The confirmation prompt
default: Default value if user presses Enter
show_default: Whether to show the default value
Returns:
True for yes, False for no
"""
return Confirm.ask(prompt, default=default, show_default=show_default, console=self.console)
def show_file_preview(self, file_path: str, content: str, syntax: Optional[str]=None, line_numbers: bool=True, max_lines: Optional[int]=None) -> None:
"""
Display a file preview with syntax highlighting.
Args:
file_path: Path to the file being shown
content: Content to display
syntax: Language for syntax highlighting (auto-detected if None)
line_numbers: Whether to show line numbers
max_lines: Maximum number of lines to show (None for all)
"""
if syntax is None:
ext = os.path.splitext(file_path)[1].lstrip('.')
syntax = ext if ext else 'text'
lines = content.split('\n')
if max_lines and len(lines) > max_lines:
lines = lines[:max_lines]
truncated = True
else:
truncated = False
syntax_view = Syntax('\n'.join(lines), syntax, line_numbers=line_numbers, theme='monokai')
title = f'Preview: {file_path}'
if truncated:
title += f' (showing first {max_lines} lines)'
self.console.print(Panel(syntax_view, title=title, expand=False))
def prompt_file_approval(self, file_path: str, content: str, exists: bool=False, show_diff: bool=False, old_content: Optional[str]=None) -> bool:
"""
Enhanced file write approval with preview.
Args:
file_path: Path to the file
content: New content to write
exists: Whether file already exists
show_diff: Whether to show diff (requires old_content)
old_content: Existing content for diff display
Returns:
True if approved, False otherwise
"""
action = 'overwrite' if exists else 'create'
self.console.print(f'\n[bold yellow]Katalyst wants to {action} file:[/bold yellow] {file_path}')
self.show_file_preview(file_path, content, max_lines=50)
if show_diff and old_content is not None:
self.console.print('\n[bold]Changes:[/bold]')
old_lines = old_content.split('\n')
new_lines = content.split('\n')
if len(old_lines) != len(new_lines):
self.console.print(f'[dim]Line count: {len(old_lines)} → {len(new_lines)}[/dim]')
return self.confirm(f'Proceed with {action}?', default=True)
def show_status(self, message: str, status: str='info', title: Optional[str]=None) -> None:
"""
Display a status message with appropriate styling.
Args:
message: The message to display
status: Status type (info, success, warning, error)
title: Optional title for the message panel
"""
style_map = {'info': 'blue', 'success': 'green', 'warning': 'yellow', 'error': 'red'}
style = style_map.get(status, 'white')
if title:
self.console.print(Panel(message, title=title, border_style=style))
else:
self.console.print(f'[{style}]{message}[/{style}]')
def prompt_with_suggestions(self, question: str, suggestions: List[str], allow_custom: bool=True, show_descriptions: bool=False) -> str:
"""
Enhanced version of request_user_input with better display.
Args:
question: Question to ask the user
suggestions: List of suggested responses
allow_custom: Whether to allow custom answers
show_descriptions: Show descriptions for suggestions
Returns:
User's answer as string
"""
self.console.print(f'\n[bold cyan]Katalyst Question:[/bold cyan]')
self.console.print(Panel(question, expand=False))
options = []
for suggestion in suggestions:
if isinstance(suggestion, dict):
options.append(suggestion)
else:
options.append({'label': suggestion, 'value': suggestion})
return self.prompt_menu(title='Suggested Answers', options=options, prompt_text='Your answer (number or custom)', allow_custom=allow_custom, custom_prompt=f'Your answer to: {question}')
def prompt_arrow_menu(self, title: str, options: List[Union[str, Dict[str, Any]]], show_search_hint: bool=False, multi_select: bool=False, preselected_indices: Optional[List[int]]=None, quit_keys: List[str]=['escape', 'q']) -> Optional[Union[str, List[str]]]:
"""
Display an interactive menu with arrow key navigation.
Args:
title: Title for the menu
options: List of options (strings or dicts with 'label' and 'value')
show_search_hint: Whether to show search functionality hint
multi_select: Enable multi-selection mode
preselected_indices: Indices of pre-selected items (for multi-select)
quit_keys: Keys that will cancel selection
Returns:
Selected option value(s) or None if cancelled
"""
try:
from simple_term_menu import TerminalMenu
except ImportError:
self.console.print('[yellow]Arrow key navigation not available. Using numbered menu.[/yellow]')
return self.prompt_menu(title, options, show_numbers=True)
menu_entries = []
values = []
for option in options:
if isinstance(option, dict):
label = option.get('label', str(option))
value = option.get('value', option)
description = option.get('description', '')
if description:
menu_entries.append(f'{label} - {description}')
else:
menu_entries.append(label)
values.append(value)
else:
menu_entries.append(str(option))
values.append(option)
self.console.print(f'\n[bold]{title}[/bold]')
if show_search_hint and (not multi_select):
self.console.print('[dim]Use ↑↓ to navigate, Enter to select, Esc to cancel[/dim]')
elif multi_select:
self.console.print('[dim]Use ↑↓ to navigate, Space to toggle, Enter to confirm, Esc to cancel[/dim]')
else:
self.console.print('[dim]Use ↑↓ to navigate, Enter to select, Esc to cancel[/dim]')
if not (hasattr(sys.stdin, 'isatty') and sys.stdin.isatty()):
self.console.print('[yellow]Not in interactive terminal. Using numbered menu.[/yellow]')
return self.prompt_menu(title, options, show_numbers=True)
try:
menu_cursor_style = ('fg_cyan', 'bold')
terminal_menu = TerminalMenu(menu_entries, title='', multi_select=multi_select, show_multi_select_hint=multi_select, preselected_entries=preselected_indices, quit_keys=quit_keys, menu_cursor_style=menu_cursor_style, clear_screen=False, cursor_index=0)
menu_entry_index = terminal_menu.show()
except (OSError, IOError, RuntimeError) as e:
self.console.print(f'[yellow]Arrow navigation failed ({type(e).__name__}). Using numbered menu.[/yellow]')
return self.prompt_menu(title, options, show_numbers=True)
if menu_entry_index is None:
return None
if multi_select:
if isinstance(menu_entry_index, tuple):
return [values[i] for i in menu_entry_index]
else:
return []
else:
return values[menu_entry_index]
|
class InputHandler:
'''Unified handler for all user input operations in Katalyst.'''
def __init__(self, console: Optional[Console]=None):
'''
Initialize the InputHandler.
Args:
console: Optional Rich Console instance. Creates new one if not provided.
'''
pass
def prompt_text(self, prompt: str, default: Optional[str]=None, password: bool=False, show_default: bool=True) -> str:
'''
Get text input from user with consistent styling.
Args:
prompt: The prompt message to display
default: Default value if user presses Enter
password: Hide input for passwords
show_default: Whether to show the default value in prompt
Returns:
User's input as string
'''
pass
def prompt_choice(self, prompt: str, choices: List[str], default: Optional[str]=None, show_choices: bool=True) -> str:
'''
Get a choice from a list of options.
Args:
prompt: The prompt message
choices: List of valid choices
default: Default choice if user presses Enter
show_choices: Whether to show available choices
Returns:
Selected choice as string
'''
pass
def prompt_menu(self, title: str, options: List[Union[str, Dict[str, Any]]], prompt_text: str='Select an option', show_numbers: bool=True, allow_custom: bool=False, custom_prompt: str='Enter custom value') -> Union[str, int]:
'''
Display a menu with numbered options and get user selection.
Args:
title: Title for the menu
options: List of options (strings or dicts with 'label' and 'value')
prompt_text: Text to show when asking for selection
show_numbers: Whether to show numbers next to options
allow_custom: Allow user to enter custom value
custom_prompt: Prompt for custom value entry
Returns:
Selected option value or index (if options are strings)
'''
pass
def confirm(self, prompt: str, default: bool=True, show_default: bool=True) -> bool:
'''
Get yes/no confirmation from user.
Args:
prompt: The confirmation prompt
default: Default value if user presses Enter
show_default: Whether to show the default value
Returns:
True for yes, False for no
'''
pass
def show_file_preview(self, file_path: str, content: str, syntax: Optional[str]=None, line_numbers: bool=True, max_lines: Optional[int]=None) -> None:
'''
Display a file preview with syntax highlighting.
Args:
file_path: Path to the file being shown
content: Content to display
syntax: Language for syntax highlighting (auto-detected if None)
line_numbers: Whether to show line numbers
max_lines: Maximum number of lines to show (None for all)
'''
pass
def prompt_file_approval(self, file_path: str, content: str, exists: bool=False, show_diff: bool=False, old_content: Optional[str]=None) -> bool:
'''
Enhanced file write approval with preview.
Args:
file_path: Path to the file
content: New content to write
exists: Whether file already exists
show_diff: Whether to show diff (requires old_content)
old_content: Existing content for diff display
Returns:
True if approved, False otherwise
'''
pass
def show_status(self, message: str, status: str='info', title: Optional[str]=None) -> None:
'''
Display a status message with appropriate styling.
Args:
message: The message to display
status: Status type (info, success, warning, error)
title: Optional title for the message panel
'''
pass
def prompt_with_suggestions(self, question: str, suggestions: List[str], allow_custom: bool=True, show_descriptions: bool=False) -> str:
'''
Enhanced version of request_user_input with better display.
Args:
question: Question to ask the user
suggestions: List of suggested responses
allow_custom: Whether to allow custom answers
show_descriptions: Show descriptions for suggestions
Returns:
User's answer as string
'''
pass
def prompt_arrow_menu(self, title: str, options: List[Union[str, Dict[str, Any]]], show_search_hint: bool=False, multi_select: bool=False, preselected_indices: Optional[List[int]]=None, quit_keys: List[str]=['escape', 'q']) -> Optional[Union[str, List[str]]]:
'''
Display an interactive menu with arrow key navigation.
Args:
title: Title for the menu
options: List of options (strings or dicts with 'label' and 'value')
show_search_hint: Whether to show search functionality hint
multi_select: Enable multi-selection mode
preselected_indices: Indices of pre-selected items (for multi-select)
quit_keys: Keys that will cancel selection
Returns:
Selected option value(s) or None if cancelled
'''
pass
| 11
| 11
| 41
| 5
| 25
| 12
| 4
| 0.49
| 0
| 18
| 0
| 0
| 10
| 1
| 10
| 10
| 424
| 58
| 247
| 98
| 183
| 122
| 122
| 45
| 110
| 12
| 0
| 3
| 41
|
328,174
|
marutilai/Katalyst
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/marutilai_Katalyst/src/katalyst/data_science_agent/kernel_manager_simple.py
|
katalyst.data_science_agent.kernel_manager_simple.SimpleKernelManager
|
from queue import Empty
from typing import Dict, Any, Optional
import time
import os
from jupyter_client import KernelManager
from katalyst.katalyst_core.utils.logger import get_logger
class SimpleKernelManager:
"""Simple kernel manager without complex state management."""
def __init__(self):
self.logger = get_logger('kernel_manager')
self.kernel_manager: Optional[KernelManager] = None
self.kernel_client = None
def ensure_kernel(self) -> None:
"""Ensure kernel is running."""
if self.kernel_manager and self.kernel_manager.is_alive():
return
if self.kernel_manager:
try:
if self.kernel_client:
self.kernel_client.stop_channels()
self.kernel_manager.shutdown_kernel(now=True)
time.sleep(0.5)
except Exception:
pass
self.kernel_manager = None
self.kernel_client = None
self.logger.info('[KERNEL] Starting new Jupyter kernel...')
self.kernel_manager = KernelManager()
self.kernel_manager.start_kernel()
self.kernel_client = self.kernel_manager.client()
self.kernel_client.start_channels()
self.kernel_client.wait_for_ready(timeout=10)
self.logger.info('[KERNEL] Kernel started successfully')
def execute_code(self, code: str, timeout: int=120) -> Dict[str, Any]:
"""Execute code in the kernel."""
self.ensure_kernel()
env_timeout = os.getenv('KATALYST_KERNEL_TIMEOUT')
if env_timeout:
try:
timeout = int(env_timeout)
except ValueError:
pass
msg_id = self.kernel_client.execute(code)
outputs = []
errors = []
data = {}
start_time = time.time()
while True:
try:
if time.time() - start_time > timeout:
self.logger.warning(f'[KERNEL] Code execution timed out after {timeout}s')
errors.append({'ename': 'TimeoutError', 'evalue': f'Code execution exceeded {timeout} seconds', 'traceback': []})
break
msg = self.kernel_client.get_iopub_msg(timeout=1)
if msg.get('parent_header', {}).get('msg_id') != msg_id:
continue
msg_type = msg.get('header', {}).get('msg_type', '')
content = msg.get('content', {})
if msg_type == 'stream':
outputs.append(content.get('text', ''))
elif msg_type == 'error':
errors.append({'ename': content.get('ename', 'Error'), 'evalue': content.get('evalue', 'Unknown error'), 'traceback': content.get('traceback', [])})
elif msg_type == 'execute_result':
if 'text/plain' in content.get('data', {}):
outputs.append(content['data']['text/plain'])
data.update(content.get('data', {}))
elif msg_type == 'display_data':
data.update(content.get('data', {}))
elif msg_type == 'status':
if content.get('execution_state') == 'idle':
break
except Empty:
continue
except Exception as e:
self.logger.error(f'[KERNEL] Error processing message: {e}')
errors.append({'ename': 'KernelError', 'evalue': str(e), 'traceback': []})
break
return {'success': len(errors) == 0, 'outputs': outputs, 'errors': errors, 'data': data}
def restart_kernel(self) -> None:
"""Restart the kernel."""
self.logger.info('[KERNEL] Restarting kernel...')
if self.kernel_manager:
try:
self.kernel_client.stop_channels()
self.kernel_manager.shutdown_kernel(now=True)
time.sleep(0.5)
except Exception:
pass
self.kernel_manager = None
self.kernel_client = None
self.ensure_kernel()
def shutdown(self) -> None:
"""Shutdown the kernel."""
if self.kernel_manager:
self.logger.info('[KERNEL] Shutting down kernel...')
try:
if self.kernel_client:
self.kernel_client.stop_channels()
if self.kernel_manager.is_alive():
self.kernel_manager.shutdown_kernel(now=True)
except Exception:
pass
finally:
self.kernel_manager = None
self.kernel_client = None
|
class SimpleKernelManager:
'''Simple kernel manager without complex state management.'''
def __init__(self):
pass
def ensure_kernel(self) -> None:
'''Ensure kernel is running.'''
pass
def execute_code(self, code: str, timeout: int=120) -> Dict[str, Any]:
'''Execute code in the kernel.'''
pass
def restart_kernel(self) -> None:
'''Restart the kernel.'''
pass
def shutdown(self) -> None:
'''Shutdown the kernel.'''
pass
| 6
| 5
| 28
| 3
| 22
| 3
| 6
| 0.16
| 0
| 6
| 0
| 0
| 5
| 3
| 5
| 5
| 148
| 19
| 111
| 19
| 105
| 18
| 89
| 18
| 83
| 15
| 0
| 4
| 29
|
328,175
|
marutilai/Katalyst
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/marutilai_Katalyst/src/katalyst/katalyst_core/utils/langchain_models.py
|
langchain_models.DummyResponse
|
from pydantic import BaseModel
class DummyResponse(BaseModel):
response: str
|
class DummyResponse(BaseModel):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 82
| 2
| 0
| 2
| 1
| 1
| 0
| 2
| 1
| 1
| 0
| 5
| 0
| 0
|
328,176
|
marutilai/Katalyst
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/marutilai_Katalyst/src/katalyst/katalyst_core/utils/langchain_models.py
|
langchain_models.RetryChatLiteLLMRouter
|
from tenacity import retry, stop_after_attempt, wait_fixed, retry_if_exception_type
from pydantic import BaseModel
from typing import Optional, Any
from langchain_litellm import ChatLiteLLMRouter
class RetryChatLiteLLMRouter(ChatLiteLLMRouter):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def _convert_tool_choice(self, tool_choice):
"""Convert LangChain tool_choice values to OpenAI-compatible values"""
if tool_choice == 'any':
return 'auto'
return tool_choice
def _validate_and_return_structured_response(self, response: Any, response_format: BaseModel) -> BaseModel:
"""Validate and return structured response"""
try:
if hasattr(response, 'content'):
logger.debug(f'Response: {response}')
content = response.content
else:
content = str(response)
try:
return response_format.model_validate_json(content)
except Exception:
logger.debug(f"Direct validation failed, falling back to extraction for model {getattr(self, 'model', '')}, response content: {content}")
except Exception as e:
logger.error(f'Failed to validate structured response: {e}')
logger.error(f'Response content: {content[:500]}...')
raise ValueError(f'Failed to parse structured response: {e}')
@retry(stop=stop_after_attempt(3), wait=wait_fixed(30), retry=retry_if_exception_type(lambda exc: isinstance(exc, Exception) and (not isinstance(exc, getattr(__import__('katalyst.katalyst_core.utils.exceptions', fromlist=['UserInputRequiredException']), 'UserInputRequiredException', Exception)))), reraise=True, before_sleep=lambda retry_state: logger.warning(f"[Retry] Retrying invoke due to: {(retry_state.outcome.exception() if retry_state.outcome else 'unknown error')} (attempt {retry_state.attempt_number})"))
def invoke(self, *args, **kwargs):
response = super().invoke(*args, **kwargs)
response_format = kwargs.pop('response_format', None)
if response_format:
return self._validate_and_return_structured_response(response, response_format)
return response
@retry(stop=stop_after_attempt(3), wait=wait_fixed(30), retry=retry_if_exception_type(lambda exc: isinstance(exc, Exception) and (not isinstance(exc, getattr(__import__('katalyst.katalyst_core.utils.exceptions', fromlist=['UserInputRequiredException']), 'UserInputRequiredException', Exception)))), reraise=True, before_sleep=lambda retry_state: logger.warning(f"[Retry] Retrying invoke due to: {(retry_state.outcome.exception() if retry_state.outcome else 'unknown error')} (attempt {retry_state.attempt_number})"))
async def ainvoke(self, *args, **kwargs):
response = await super().ainvoke(*args, **kwargs)
return response
|
class RetryChatLiteLLMRouter(ChatLiteLLMRouter):
def __init__(self, *args, **kwargs):
pass
def _convert_tool_choice(self, tool_choice):
'''Convert LangChain tool_choice values to OpenAI-compatible values'''
pass
def _validate_and_return_structured_response(self, response: Any, response_format: BaseModel) -> BaseModel:
'''Validate and return structured response'''
pass
@retry(stop=stop_after_attempt(3), wait=wait_fixed(30), retry=retry_if_exception_type(lambda exc: isinstance(exc, Exception) and (not isinstance(exc, getattr(__import__('katalyst.katalyst_core.utils.exceptions', fromlist=['UserInputRequiredException']), 'UserInputRequiredException', Exception)))), reraise=True, before_sleep=lambda retry_state: logger.warning(f"[Retry] Retrying invoke due to: {(retry_state.outcome.exception() if retry_state.outcome else 'unknown error')} (attempt {retry_state.attempt_number})"))
def invoke(self, *args, **kwargs):
pass
@retry(stop=stop_after_attempt(3), wait=wait_fixed(30), retry=retry_if_exception_type(lambda exc: isinstance(exc, Exception) and (not isinstance(exc, getattr(__import__('katalyst.katalyst_core.utils.exceptions', fromlist=['UserInputRequiredException']), 'UserInputRequiredException', Exception)))), reraise=True, before_sleep=lambda retry_state: logger.warning(f"[Retry] Retrying invoke due to: {(retry_state.outcome.exception() if retry_state.outcome else 'unknown error')} (attempt {retry_state.attempt_number})"))
async def ainvoke(self, *args, **kwargs):
pass
| 8
| 2
| 8
| 0
| 7
| 1
| 2
| 0.09
| 1
| 6
| 0
| 0
| 5
| 0
| 5
| 5
| 61
| 4
| 53
| 31
| 27
| 5
| 30
| 10
| 24
| 4
| 1
| 2
| 10
|
328,177
|
marutilai/Katalyst
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/marutilai_Katalyst/src/katalyst/katalyst_core/utils/models.py
|
models.EnhancedPlannerOutput
|
from pydantic import BaseModel, Field
from typing import List, Optional, Dict
class EnhancedPlannerOutput(BaseModel):
subtasks: List[TaskInfo] = Field(..., description='List of subtasks generated by the planner LLM, each as a single actionable instruction with task type classification.')
|
class EnhancedPlannerOutput(BaseModel):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 82
| 5
| 0
| 5
| 2
| 4
| 0
| 2
| 2
| 1
| 0
| 5
| 0
| 0
|
328,178
|
marutilai/Katalyst
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/marutilai_Katalyst/src/katalyst/katalyst_core/utils/models.py
|
models.PlannerOutput
|
from typing import List, Optional, Dict
from pydantic import BaseModel, Field
class PlannerOutput(BaseModel):
subtasks: List[str] = Field(..., description='List of subtasks generated by the planner LLM, each as a single actionable instruction.')
|
class PlannerOutput(BaseModel):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 82
| 5
| 0
| 5
| 2
| 4
| 0
| 2
| 2
| 1
| 0
| 5
| 0
| 0
|
328,179
|
marutilai/Katalyst
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/marutilai_Katalyst/src/katalyst/katalyst_core/utils/models.py
|
models.ReplannerOutput
|
from typing import List, Optional, Dict
from pydantic import BaseModel, Field
class ReplannerOutput(BaseModel):
is_complete: bool = Field(..., description='True if the overall goal is achieved, False otherwise.')
subtasks: List[str] = Field(default_factory=list, description='New list of subtasks if not complete. Empty if is_complete is True.')
|
class ReplannerOutput(BaseModel):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 82
| 8
| 0
| 8
| 3
| 7
| 0
| 3
| 3
| 2
| 0
| 5
| 0
| 0
|
328,180
|
marutilai/Katalyst
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/marutilai_Katalyst/src/katalyst/katalyst_core/utils/models.py
|
models.RequestUserInputArgs
|
from typing import List, Optional, Dict
from pydantic import BaseModel, Field
class RequestUserInputArgs(BaseModel):
"""Arguments for the request_user_input tool."""
question_to_ask_user: str = Field(..., description='The question to ask the user')
suggested_responses: List[str] = Field(..., description='List of suggested answer options. Must be non-empty.', min_items=1)
|
class RequestUserInputArgs(BaseModel):
'''Arguments for the request_user_input tool.'''
pass
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0.14
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 82
| 9
| 1
| 7
| 3
| 6
| 1
| 3
| 3
| 2
| 0
| 5
| 0
| 0
|
328,181
|
marutilai/Katalyst
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/marutilai_Katalyst/src/katalyst/katalyst_core/utils/models.py
|
models.TaskInfo
|
from pydantic import BaseModel, Field
class TaskInfo(BaseModel):
description: str = Field(..., description='The task to be performed')
task_type: TaskType = Field(..., description='Classification of the task')
|
class TaskInfo(BaseModel):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 82
| 3
| 0
| 3
| 3
| 2
| 0
| 3
| 3
| 2
| 0
| 5
| 0
| 0
|
328,182
|
marutilai/Katalyst
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/marutilai_Katalyst/src/katalyst/katalyst_core/utils/models.py
|
models.TaskType
|
from enum import Enum
class TaskType(str, Enum):
TEST_CREATION = 'test_creation'
REFACTOR = 'refactor'
DOCUMENTATION = 'documentation'
DATA_EXPLORATION = 'data_exploration'
FEATURE_ENGINEERING = 'feature_engineering'
MODEL_TRAINING = 'model_training'
MODEL_EVALUATION = 'model_evaluation'
OTHER = 'other'
|
class TaskType(str, Enum):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0.33
| 2
| 0
| 0
| 0
| 0
| 0
| 0
| 115
| 14
| 2
| 9
| 9
| 8
| 3
| 9
| 9
| 8
| 0
| 4
| 0
| 0
|
328,183
|
marutilai/Katalyst
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/marutilai_Katalyst/src/katalyst/katalyst_core/state.py
|
state.KatalystState
|
from typing import List, Tuple, Optional, Union, Callable, Dict, Any, Set
from katalyst.app import config
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.messages import BaseMessage
from pydantic import BaseModel, Field
class KatalystState(BaseModel):
task: str = Field(..., description='Top-level user request that kicks off the whole run.')
auto_approve: bool = Field(False, description='If True, file-writing tools skip interactive confirmation.')
project_root_cwd: str = Field(..., description='The CWD from which Katalyst was launched.')
user_input_fn: Optional[Callable[[str], str]] = Field(default=None, exclude=True, description='Function to use for user input (not persisted).')
execution_mode: str = Field(default='action', description="Current mode: 'plan' (analysis only via conversation agent) or 'action' (full execution with all agents).")
task_queue: List[str] = Field(default_factory=list, description='Remaining tasks produced by the planner.')
task_idx: int = Field(0, description='Index of the task currently being executed (0-based).')
original_plan: Optional[List[str]] = Field(default=None, description='The initial plan created by the planner.')
messages: List[BaseMessage] = Field(default_factory=list, description='Accumulated messages for the persistent agent conversation')
agent_outcome: Optional[Union[AgentAction, AgentFinish]] = Field(None, description='Output of the latest LLM call: • AgentAction → invoke tool\n• AgentFinish → task completed')
completed_tasks: List[Tuple[str, str]] = Field(default_factory=list, description='(task, summary) tuples appended after each task finishes.')
tool_execution_history: List[Dict[str, str]] = Field(default_factory=list, description='Concise history of all tool executions across all tasks. Each entry contains: task, tool_name, status (success/error), summary. Used by replanner to understand full execution context.')
error_message: Optional[str] = Field(None, description='Captured exception text with trace (fed back into LLM for self-repair).')
next_agent: Optional[str] = Field(None, description='Next agent to route to (used by router node)')
plan_feedback: Optional[str] = Field(None, description='User feedback about the generated plan to be incorporated in replanning.')
response: Optional[str] = Field(None, description='Direct response from conversation agent or other response-oriented agents.')
allowed_external_paths: Set[str] = Field(default_factory=set, description='External file paths explicitly mentioned by user that can bypass sandbox.')
needs_user_input: bool = Field(False, description='Flag indicating the agent needs user input to continue.')
user_input_required: Optional[Dict[str, Any]] = Field(None, description='Details about the user input needed (question, suggested_responses, tool_name).')
user_input_response: Optional[str] = Field(None, description="User's response to the input request.")
inner_cycles: int = Field(0, description='Count of agent↔tool cycles in the current task.')
max_inner_cycles: int = Field(default=config.MAX_INNER_CYCLES, description='Abort inner loop once this many cycles are hit.')
outer_cycles: int = Field(0, description='Count of planner→replanner cycles for the whole run.')
max_outer_cycles: int = Field(default=config.MAX_OUTER_CYCLES, description='Abort outer loop once this many cycles are hit.')
class Config:
arbitrary_types_allowed = True
| null | 2
| 0
| 0
| 0
| 0
| 0
| 0
| 0.09
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 82
| 115
| 8
| 99
| 27
| 97
| 9
| 27
| 27
| 25
| 0
| 5
| 0
| 0
|
328,184
|
marutilai/Katalyst
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/marutilai_Katalyst/src/katalyst/katalyst_core/utils/task_manager.py
|
task_manager.TaskManager
|
from pathlib import Path
from typing import List, Dict, Any, Optional, Tuple, Set
import json
from katalyst.app.config import KATALYST_DIR
from katalyst.katalyst_core.utils.logger import get_logger
class TaskManager:
"""Manages tasks with automatic persistence to disk (Singleton)."""
_instance = None
def __new__(cls, file_path: Optional[Path]=None):
"""Ensure only one instance exists (Singleton pattern)."""
if cls._instance is None:
cls._instance = super(TaskManager, cls).__new__(cls)
cls._instance._initialized = False
return cls._instance
def __init__(self, file_path: Optional[Path]=None):
"""
Initialize TaskManager (only runs once due to singleton).
Args:
file_path: Optional custom path for task storage.
Defaults to .katalyst/tasks.json
"""
if self._initialized:
return
self.logger = get_logger()
self.file_path = file_path or KATALYST_DIR / 'tasks.json'
self._tasks: List[Dict[str, Any]] = []
self._loaded = False
self._initialized = True
@property
def tasks(self) -> List[Dict[str, Any]]:
"""Get all tasks, loading from disk if needed."""
if not self._loaded:
self.load()
return self._tasks
@property
def pending(self) -> List[Dict[str, Any]]:
"""Get pending tasks."""
return [t for t in self.tasks if t.get('status') == 'pending']
@property
def in_progress(self) -> List[Dict[str, Any]]:
"""Get in-progress tasks."""
return [t for t in self.tasks if t.get('status') == 'in_progress']
@property
def completed(self) -> List[Dict[str, Any]]:
"""Get completed tasks."""
return [t for t in self.tasks if t.get('status') == 'completed']
def load(self) -> bool:
"""
Load tasks from disk.
Returns:
True if loaded successfully
"""
try:
if not self.file_path.exists():
self.logger.debug(f'[TASK_MANAGER] No existing task file at {self.file_path}')
self._tasks = []
self._loaded = True
return True
with open(self.file_path, 'r') as f:
data = json.load(f)
if not isinstance(data, dict) or 'tasks' not in data:
self.logger.warning('[TASK_MANAGER] Invalid task file format')
self._tasks = []
self._loaded = True
return False
self._tasks = data['tasks']
self._loaded = True
self.logger.info(f'[TASK_MANAGER] Loaded {len(self._tasks)} tasks from previous session')
if self.pending or self.in_progress:
self.logger.info(f'[TASK_MANAGER] Status: {len(self.pending)} pending, {len(self.in_progress)} in progress, {len(self.completed)} completed')
return True
except Exception as e:
self.logger.error(f'[TASK_MANAGER] Failed to load tasks: {e}')
self._tasks = []
self._loaded = True
return False
def save(self) -> bool:
"""
Save tasks to disk.
Returns:
True if saved successfully
"""
try:
self.file_path.parent.mkdir(exist_ok=True)
with open(self.file_path, 'w') as f:
json.dump({'version': '1.0', 'tasks': self._tasks}, f, indent=2)
self.logger.debug(f'[TASK_MANAGER] Saved {len(self._tasks)} tasks to {self.file_path}')
return True
except Exception as e:
self.logger.error(f'[TASK_MANAGER] Failed to save tasks: {e}')
return False
def clear(self) -> bool:
"""
Clear all tasks and remove storage file.
Returns:
True if cleared successfully
"""
try:
self._tasks = []
if self.file_path.exists():
self.file_path.unlink()
self.logger.debug('[TASK_MANAGER] Cleared task storage')
return True
except Exception as e:
self.logger.error(f'[TASK_MANAGER] Failed to clear tasks: {e}')
return False
def add(self, content: str, status: str='pending', priority: str='medium', task_id: Optional[str]=None) -> Dict[str, Any]:
"""
Add a new task.
Args:
content: Task description
status: Task status (pending, in_progress, completed)
priority: Task priority (low, medium, high)
task_id: Optional ID, will generate if not provided
Returns:
The created task
"""
if not task_id:
max_id = max([int(t['id']) for t in self.tasks if t.get('id', '').isdigit()] + [0])
task_id = str(max_id + 1)
task = {'id': task_id, 'content': content, 'status': status, 'priority': priority}
self._tasks.append(task)
self.save()
return task
def update(self, task_id: str, **updates) -> Optional[Dict[str, Any]]:
"""
Update a task by ID.
Args:
task_id: ID of task to update
**updates: Fields to update (content, status, priority)
Returns:
Updated task if found, None otherwise
"""
for task in self._tasks:
if task.get('id') == task_id:
task.update(updates)
self.save()
return task
return None
def get_by_id(self, task_id: str) -> Optional[Dict[str, Any]]:
"""Get a task by ID."""
for task in self._tasks:
if task.get('id') == task_id:
return task
return None
def set_tasks(self, tasks: List[Dict[str, Any]]) -> bool:
"""
Replace all tasks with a new list.
Args:
tasks: New task list
Returns:
True if saved successfully
"""
self._tasks = tasks
self._loaded = True
return self.save()
def get_summary(self) -> str:
"""
Get a human-readable summary of tasks.
Returns:
Formatted summary string
"""
if not self.tasks:
return 'No tasks found'
summary_lines = []
if self.in_progress:
summary_lines.append('In Progress:')
for t in self.in_progress:
summary_lines.append(f" - {t['content']}")
if self.pending:
summary_lines.append('Pending:')
for t in self.pending[:5]:
summary_lines.append(f" - {t['content']}")
if len(self.pending) > 5:
summary_lines.append(f' ... and {len(self.pending) - 5} more')
if self.completed:
summary_lines.append(f'Completed: {len(self.completed)} tasks')
return '\n'.join(summary_lines) if summary_lines else 'All tasks completed!'
@classmethod
def get_instance(cls) -> 'TaskManager':
"""Get the singleton instance of TaskManager."""
if cls._instance is None:
cls._instance = TaskManager()
return cls._instance
def build_task_hierarchy(self, state: 'KatalystState', include_progress: bool=True) -> List[str]:
"""
Build a hierarchical view of all tasks showing parent-child relationships.
Args:
state: The current Katalyst state
include_progress: Whether to include checkmarks for completed tasks
Returns:
List of formatted task lines
"""
lines = []
completed_task_names = {task[0] for task in state.completed_tasks} if include_progress else set()
all_tasks = []
if state.original_plan:
all_tasks.extend(state.original_plan)
for task in state.task_queue:
if task not in all_tasks:
all_tasks.append(task)
for task_name, _ in state.completed_tasks:
if task_name not in all_tasks:
all_tasks.append(task_name)
for task_idx, task in enumerate(all_tasks):
task_num = task_idx + 1
is_completed = task in completed_task_names
marker = '✓' if is_completed and include_progress else ' '
lines.append(f'{marker} {task_num}. {task}')
return lines
def get_task_progress_display(self, state: 'KatalystState') -> str:
"""
Generate a complete task progress display with header and formatting.
Args:
state: The current Katalyst state
Returns:
Formatted progress display string
"""
all_task_count = len(set(list(state.original_plan or []) + list(state.task_queue) + [task[0] for task in state.completed_tasks]))
total_tasks = all_task_count
completed_count = len(state.completed_tasks)
lines = [f"\n{'=' * 60}", f'=== Task Progress ({completed_count}/{total_tasks} completed) ===', f"{'=' * 60}"]
lines.extend(self.build_task_hierarchy(state, include_progress=True))
lines.append(f"{'=' * 60}\n")
return '\n'.join(lines)
|
class TaskManager:
'''Manages tasks with automatic persistence to disk (Singleton).'''
def __new__(cls, file_path: Optional[Path]=None):
'''Ensure only one instance exists (Singleton pattern).'''
pass
def __init__(self, file_path: Optional[Path]=None):
'''
Initialize TaskManager (only runs once due to singleton).
Args:
file_path: Optional custom path for task storage.
Defaults to .katalyst/tasks.json
'''
pass
@property
def tasks(self) -> List[Dict[str, Any]]:
'''Get all tasks, loading from disk if needed.'''
pass
@property
def pending(self) -> List[Dict[str, Any]]:
'''Get pending tasks.'''
pass
@property
def in_progress(self) -> List[Dict[str, Any]]:
'''Get in-progress tasks.'''
pass
@property
def completed(self) -> List[Dict[str, Any]]:
'''Get completed tasks.'''
pass
def load(self) -> bool:
'''
Load tasks from disk.
Returns:
True if loaded successfully
'''
pass
def save(self) -> bool:
'''
Save tasks to disk.
Returns:
True if saved successfully
'''
pass
def clear(self) -> bool:
'''
Clear all tasks and remove storage file.
Returns:
True if cleared successfully
'''
pass
def add(self, content: str, status: str='pending', priority: str='medium', task_id: Optional[str]=None) -> Dict[str, Any]:
'''
Add a new task.
Args:
content: Task description
status: Task status (pending, in_progress, completed)
priority: Task priority (low, medium, high)
task_id: Optional ID, will generate if not provided
Returns:
The created task
'''
pass
def update(self, task_id: str, **updates) -> Optional[Dict[str, Any]]:
'''
Update a task by ID.
Args:
task_id: ID of task to update
**updates: Fields to update (content, status, priority)
Returns:
Updated task if found, None otherwise
'''
pass
def get_by_id(self, task_id: str) -> Optional[Dict[str, Any]]:
'''Get a task by ID.'''
pass
def set_tasks(self, tasks: List[Dict[str, Any]]) -> bool:
'''
Replace all tasks with a new list.
Args:
tasks: New task list
Returns:
True if saved successfully
'''
pass
def get_summary(self) -> str:
'''
Get a human-readable summary of tasks.
Returns:
Formatted summary string
'''
pass
@classmethod
def get_instance(cls) -> 'TaskManager':
'''Get the singleton instance of TaskManager.'''
pass
def build_task_hierarchy(self, state: 'KatalystState', include_progress: bool=True) -> List[str]:
'''
Build a hierarchical view of all tasks showing parent-child relationships.
Args:
state: The current Katalyst state
include_progress: Whether to include checkmarks for completed tasks
Returns:
List of formatted task lines
'''
pass
def get_task_progress_display(self, state: 'KatalystState') -> str:
'''
Generate a complete task progress display with header and formatting.
Args:
state: The current Katalyst state
Returns:
Formatted progress display string
'''
pass
| 23
| 18
| 17
| 3
| 9
| 5
| 3
| 0.55
| 0
| 11
| 0
| 0
| 16
| 5
| 17
| 17
| 320
| 63
| 166
| 54
| 142
| 92
| 141
| 43
| 123
| 9
| 0
| 2
| 49
|
328,185
|
marutilai/Katalyst
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/marutilai_Katalyst/src/katalyst/katalyst_core/utils/tool_repetition_detector.py
|
tool_repetition_detector.ToolRepetitionDetector
|
from pydantic import BaseModel, Field, ConfigDict
from collections import deque
import hashlib
import json
from typing import Dict, Any, Tuple
class ToolRepetitionDetector(BaseModel):
"""
Detects repetitive tool calls to prevent infinite loops.
This detector tracks recent tool calls and identifies when the agent
is stuck in a loop by calling the same tool with identical inputs
multiple times.
"""
recent_calls: deque[Tuple[str, str]] = Field(default_factory=lambda: deque(maxlen=5), description='Recent tool calls stored as (tool_name, input_hash) tuples')
repetition_threshold: int = Field(default=3, description='Number of identical calls before flagging as repetition')
model_config = ConfigDict(arbitrary_types_allowed=True)
def _hash_input(self, tool_input: Dict[str, Any]) -> str:
"""
Create a hash of tool input for comparison.
Args:
tool_input: Dictionary of tool input parameters
Returns:
MD5 hash of the normalized input
"""
try:
normalized = json.dumps(tool_input, sort_keys=True)
except (TypeError, ValueError):
normalized = str(tool_input)
return hashlib.md5(normalized.encode()).hexdigest()
def check(self, tool_name: str, tool_input: Dict[str, Any]) -> bool:
"""
Check if this tool call is a repetition.
Args:
tool_name: Name of the tool being called
tool_input: Input parameters for the tool
Returns:
True if OK to proceed (not a repetition), False if repetition detected
"""
input_hash = self._hash_input(tool_input)
current_call = (tool_name, input_hash)
repetition_count = sum((1 for call in self.recent_calls if call == current_call))
self.recent_calls.append(current_call)
return repetition_count < self.repetition_threshold
def reset(self):
"""Reset the detector (e.g., when starting a new task)."""
self.recent_calls.clear()
def get_repetition_count(self, tool_name: str, tool_input: Dict[str, Any]) -> int:
"""
Get the number of times this exact call has been made recently.
Args:
tool_name: Name of the tool
tool_input: Input parameters for the tool
Returns:
Number of times this call appears in recent history
"""
input_hash = self._hash_input(tool_input)
target_call = (tool_name, input_hash)
return sum((1 for call in self.recent_calls if call == target_call))
def is_consecutive_duplicate(self, tool_name: str, tool_input: Dict[str, Any]) -> bool:
"""
Check if this is an immediate back-to-back duplicate call.
Args:
tool_name: Name of the tool
tool_input: Input parameters for the tool
Returns:
True if this is the exact same call as the previous one
"""
if not self.recent_calls:
return False
input_hash = self._hash_input(tool_input)
current_call = (tool_name, input_hash)
return self.recent_calls[-1] == current_call
|
class ToolRepetitionDetector(BaseModel):
'''
Detects repetitive tool calls to prevent infinite loops.
This detector tracks recent tool calls and identifies when the agent
is stuck in a loop by calling the same tool with identical inputs
multiple times.
'''
def _hash_input(self, tool_input: Dict[str, Any]) -> str:
'''
Create a hash of tool input for comparison.
Args:
tool_input: Dictionary of tool input parameters
Returns:
MD5 hash of the normalized input
'''
pass
def check(self, tool_name: str, tool_input: Dict[str, Any]) -> bool:
'''
Check if this tool call is a repetition.
Args:
tool_name: Name of the tool being called
tool_input: Input parameters for the tool
Returns:
True if OK to proceed (not a repetition), False if repetition detected
'''
pass
def reset(self):
'''Reset the detector (e.g., when starting a new task).'''
pass
def get_repetition_count(self, tool_name: str, tool_input: Dict[str, Any]) -> int:
'''
Get the number of times this exact call has been made recently.
Args:
tool_name: Name of the tool
tool_input: Input parameters for the tool
Returns:
Number of times this call appears in recent history
'''
pass
def is_consecutive_duplicate(self, tool_name: str, tool_input: Dict[str, Any]) -> bool:
'''
Check if this is an immediate back-to-back duplicate call.
Args:
tool_name: Name of the tool
tool_input: Input parameters for the tool
Returns:
True if this is the exact same call as the previous one
'''
pass
| 6
| 6
| 15
| 3
| 5
| 8
| 1
| 1.35
| 1
| 6
| 0
| 0
| 5
| 0
| 5
| 87
| 101
| 21
| 34
| 18
| 28
| 46
| 28
| 17
| 22
| 2
| 5
| 1
| 7
|
328,186
|
saya-ashen/Dingent
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/saya-ashen_Dingent/src/dingent/cli/cli.py
|
dingent.cli.cli.ProjectInitializer
|
from pathlib import Path
from rich import print
from cookiecutter.exceptions import RepositoryNotFound
import typer
from cookiecutter.main import cookiecutter
class ProjectInitializer:
"""Handles the logic for the 'init' command."""
def __init__(self, project_name, template, checkout):
self.project_name = project_name
self.template = template
self.checkout = checkout
self.project_path = None
def run(self):
"""Executes the entire project initialization workflow."""
try:
self._create_from_template()
self._print_final_summary()
except RepositoryNotFound:
print(f'[bold red]\n❌ Error: Repository not found at {REPO_URL}[/bold red]')
print('[bold red]\nPlease check the URL and your network connection.[/bold red]')
raise typer.Exit()
except Exception as e:
print(f'[bold red]\nAn unexpected error occurred: {e}[/bold red]')
raise typer.Exit()
def _create_from_template(self):
"""Builds the project using Cookiecutter."""
print(f'[bold green]🚀 Initializing project from Git repository: {REPO_URL}[/bold green]')
template_dir = f'templates/{self.template}'
created_path = cookiecutter(REPO_URL, directory=template_dir, checkout=self.checkout, extra_context={'project_slug': self.project_name}, output_dir='.')
self.project_path = Path(created_path)
print(f'[bold green]✅ Project created at {self.project_path}[/bold green]')
def _print_final_summary(self):
"""Prints the final success message and next steps."""
final_project_name = self.project_path.name
print('[bold green]\n🎉 Project initialized successfully![/bold green]')
print('\nNext steps:')
print(f' 1. Change into the project directory: cd {final_project_name}')
print(' 2. Start all services: dingent run')
|
class ProjectInitializer:
'''Handles the logic for the 'init' command.'''
def __init__(self, project_name, template, checkout):
pass
def run(self):
'''Executes the entire project initialization workflow.'''
pass
def _create_from_template(self):
'''Builds the project using Cookiecutter.'''
pass
def _print_final_summary(self):
'''Prints the final success message and next steps.'''
pass
| 5
| 4
| 9
| 0
| 9
| 1
| 2
| 0.11
| 0
| 2
| 0
| 0
| 4
| 4
| 4
| 4
| 43
| 4
| 35
| 13
| 30
| 4
| 29
| 12
| 24
| 3
| 0
| 1
| 6
|
328,187
|
saya-ashen/Dingent
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/saya-ashen_Dingent/src/dingent/cli/cli.py
|
dingent.cli.cli.Service
|
from pathlib import Path
import subprocess
class Service:
def __init__(self, name: str, command: list[str], cwd: Path, color: str, env: dict[str, str] | None=None, open_browser_hint: bool=False):
self.name = name
self.command = command
self.cwd = cwd
self.color = color
self.env = env or {}
self.open_browser_hint = open_browser_hint
self.process: subprocess.Popen | None = None
|
class Service:
def __init__(self, name: str, command: list[str], cwd: Path, color: str, env: dict[str, str] | None=None, open_browser_hint: bool=False):
pass
| 2
| 0
| 16
| 0
| 16
| 0
| 1
| 0
| 0
| 6
| 0
| 0
| 1
| 7
| 1
| 1
| 17
| 0
| 17
| 17
| 7
| 0
| 9
| 9
| 7
| 1
| 0
| 0
| 1
|
328,188
|
saya-ashen/Dingent
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/saya-ashen_Dingent/src/dingent/cli/cli.py
|
dingent.cli.cli.ServiceSupervisor
|
import os
import re
import typer
import webbrowser
from rich import print
import threading
import subprocess
from rich.text import Text
import time
import queue
class ServiceSupervisor:
def __init__(self, services: list[Service], auto_open_frontend: bool=True):
self.services = services
self.auto_open_frontend = auto_open_frontend
self.log_queue: queue.Queue[tuple[str, str]] = queue.Queue()
self._browser_opened = False
self._stop_event = threading.Event()
def start_all(self):
print('[bold cyan]🚀 Starting services...[/bold cyan]')
for svc in self.services:
self._start_service(svc)
t = threading.Thread(target=self._log_loop, daemon=True)
t.start()
print('[bold green]✓ All services started. Real-time logs below (Ctrl+C to exit).[/bold green]')
try:
while not self._stop_event.is_set():
for svc in self.services:
if svc.process and svc.process.poll() is not None:
print(f'\n[bold red]Service {svc.name} has exited with code {svc.process.returncode}. Shutting down other services...[/bold red]')
self.stop_all()
raise typer.Exit(1)
time.sleep(0.3)
except KeyboardInterrupt:
if not hasattr(self, '_shutting_down'):
self._shutting_down = True
print('\n[bold yellow]Received interrupt signal. Shutting down services (press Ctrl+C again to force quit)...[/bold yellow]')
try:
self.stop_all()
except KeyboardInterrupt:
print('\n[bold red]Second interrupt: Forcibly terminating all processes now.[/bold red]')
self.stop_all(force=True)
else:
print('\n[bold red]Received interrupt again, force quitting...[/bold red]')
self.stop_all(force=True)
def stop_all(self, force: bool=False):
self._stop_event.set()
for svc in reversed(self.services):
if svc.process and svc.process.poll() is None:
_terminate_process_tree(svc.process, svc.name, force=force)
print('[bold blue]🛑 All processes have been terminated.[/bold blue]')
global _TEMP_DIRS
for td in _TEMP_DIRS:
try:
td.cleanup()
except Exception:
pass
_TEMP_DIRS.clear()
def _start_service(self, svc: Service):
env = {**os.environ, **svc.env}
popen_kwargs = {'cwd': str(svc.cwd), 'stdout': subprocess.PIPE, 'stderr': subprocess.STDOUT, 'env': env, 'text': True, 'bufsize': 1, 'errors': 'replace'}
if os.name == 'posix':
popen_kwargs['start_new_session'] = True
else:
popen_kwargs['creationflags'] = subprocess.CREATE_NEW_PROCESS_GROUP
try:
svc.process = subprocess.Popen(svc.command, **popen_kwargs)
except FileNotFoundError:
print(f'[bold red]❌ Failed to start {svc.name}: Command not found: {svc.command[0]}[/bold red]')
raise typer.Exit(1)
threading.Thread(target=self._stream_reader, args=(svc,), daemon=True).start()
print(f"[bold green]✓ {svc.name} (PID {svc.process.pid}) started: {' '.join(svc.command)}[/bold green]")
def _stream_reader(self, svc: Service):
assert svc.process and svc.process.stdout
for line in iter(svc.process.stdout.readline, ''):
if not line:
break
self.log_queue.put((svc.name, line.rstrip('\n')))
try:
svc.process.stdout.close()
except Exception:
pass
def _log_loop(self):
port_regex = re.compile('http://localhost:(\\d+)')
while not self._stop_event.is_set():
try:
name, line = self.log_queue.get(timeout=0.2)
except queue.Empty:
continue
svc = next((s for s in self.services if s.name == name), None)
color = svc.color if svc else 'white'
text = Text.from_markup(f'[{color}][{name.upper():^8}][/]: {line}')
print(text)
if svc and svc.open_browser_hint and self.auto_open_frontend and (not self._browser_opened):
m = port_regex.search(line)
if m:
url = f'http://localhost:{m.group(1)}'
print(f'[bold blue]🌐 Opening browser: {url}[/bold blue]')
try:
webbrowser.open_new_tab(url)
self._browser_opened = True
except Exception:
print('[yellow]⚠️ Could not open browser automatically.[/yellow]')
|
class ServiceSupervisor:
def __init__(self, services: list[Service], auto_open_frontend: bool=True):
pass
def start_all(self):
pass
def stop_all(self, force: bool=False):
pass
def _start_service(self, svc: Service):
pass
def _stream_reader(self, svc: Service):
pass
def _log_loop(self):
pass
| 7
| 0
| 17
| 1
| 17
| 0
| 5
| 0
| 0
| 14
| 1
| 0
| 6
| 6
| 6
| 6
| 109
| 9
| 100
| 28
| 92
| 0
| 90
| 28
| 82
| 8
| 0
| 4
| 28
|
328,189
|
saya-ashen/Dingent
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/saya-ashen_Dingent/src/dingent/cli/context.py
|
dingent.cli.context.CliContext
|
from functools import cached_property
from dingent.core.config_manager import ConfigManager
from importlib import resources
from typing import TYPE_CHECKING, cast
from pathlib import Path
from dingent.core.utils import find_project_root
from dingent.core.log_manager import LogManager
class CliContext:
def __init__(self):
"""
The __init__ method is now empty. All properties are loaded lazily
when they are first accessed.
"""
@cached_property
def config_manager(self) -> 'ConfigManager':
"""Lazily gets the config_manager from the app_context."""
log_manager = LogManager()
config_manager = ConfigManager(self.project_root, log_manager)
return config_manager
@cached_property
def _config(self) -> 'AppSettings':
"""Lazily gets the settings from the config_manager."""
return self.config_manager.get_settings()
@property
def project_root(self) -> Path | None:
"""This property now depends on the lazy config_manager."""
project_root = find_project_root()
return project_root
@property
def backend_port(self) -> int | None:
"""This property now depends on the lazy _config property."""
return self._config.backend_port
@property
def frontend_port(self) -> int | None:
"""This property also depends on the lazy _config property."""
return self._config.frontend_port
@property
def frontend_path(self) -> Path:
"""This property does not depend on app_context and remains unchanged."""
frontend_dir = resources.files('dingent').joinpath('static', 'frontend')
return cast(Path, frontend_dir)
|
class CliContext:
def __init__(self):
'''
The __init__ method is now empty. All properties are loaded lazily
when they are first accessed.
'''
pass
@cached_property
def config_manager(self) -> 'ConfigManager':
'''Lazily gets the config_manager from the app_context.'''
pass
@cached_property
def _config(self) -> 'AppSettings':
'''Lazily gets the settings from the config_manager.'''
pass
@property
def project_root(self) -> Path | None:
'''This property now depends on the lazy config_manager.'''
pass
@property
def backend_port(self) -> int | None:
'''This property now depends on the lazy _config property.'''
pass
@property
def frontend_port(self) -> int | None:
'''This property also depends on the lazy _config property.'''
pass
@property
def frontend_path(self) -> Path:
'''This property does not depend on app_context and remains unchanged.'''
pass
| 14
| 7
| 4
| 0
| 2
| 1
| 1
| 0.42
| 0
| 4
| 2
| 0
| 7
| 0
| 7
| 7
| 40
| 6
| 24
| 18
| 10
| 10
| 18
| 12
| 10
| 1
| 0
| 0
| 7
|
328,190
|
saya-ashen/Dingent
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/saya-ashen_Dingent/src/dingent/core/assistant_manager.py
|
dingent.core.assistant_manager.Assistant
|
from .plugin_manager import PluginInstance, PluginManager
from langchain_mcp_adapters.tools import load_mcp_tools
from contextlib import AsyncExitStack, asynccontextmanager
from collections.abc import Callable, Iterable
from .settings import AssistantSettings
class Assistant:
"""
运行期 Assistant。
destinations: 当前活动 Workflow 中(经 workflow_manager.instantiate_workflow_assistants 构建后)
此 Assistant 可直接到达的下游 Assistant 名称(或 ID)列表。
单一活动 Workflow 场景下,可以直接覆盖。
"""
def __init__(self, assistant_id: str, name: str, description: str, plugin_instances: dict[str, PluginInstance], log_method: Callable):
self.id = assistant_id
self.name = name
self.description = description
self.plugin_instances = plugin_instances
self.destinations: list[str] = []
self._log_method = log_method
@classmethod
async def create(cls, plugin_manager: PluginManager, settings: AssistantSettings, log_method: Callable) -> Assistant:
plugin_instances: dict[str, PluginInstance] = {}
enabled_plugins = [p for p in settings.plugins if p.enabled]
for pconf in enabled_plugins:
try:
inst = await plugin_manager.create_instance(pconf)
plugin_instances[pconf.plugin_id or pconf.plugin_id] = inst
except Exception as e:
log_method('error', 'Create plugin instance failed (assistant={name} plugin={pid}): {e}', context={'name': settings.name, 'pid': getattr(pconf, 'plugin_id', pconf.plugin_id), 'e': e})
continue
return cls(settings.id, settings.name, settings.description or '', plugin_instances, log_method)
@asynccontextmanager
async def load_tools_langgraph(self):
"""
返回 langgraph 期望的 tool 列表(普通 Tool 对象)。
"""
tools: list = []
async with AsyncExitStack() as stack:
for inst in self.plugin_instances.values():
client = await stack.enter_async_context(inst.mcp_client)
session = client.session
_tools = await load_mcp_tools(session)
tools.extend(_tools)
yield tools
@asynccontextmanager
async def load_tools(self):
"""
返回带可直接运行 run(arguments) 的 RunnableTool 列表。
"""
runnable: list[RunnableTool] = []
async with AsyncExitStack() as stack:
for inst in self.plugin_instances.values():
client = await stack.enter_async_context(inst.mcp_client)
tools = await client.list_tools()
for t in tools:
async def call_tool(arguments: dict, _client=client, _t=t):
return await _client.call_tool(_t.name, arguments=arguments)
runnable.append(RunnableTool(tool=t, run=call_tool))
yield runnable
async def aclose(self):
for inst in self.plugin_instances.values():
try:
await inst.aclose()
except Exception as e:
self._log_method('warning', 'Error closing plugin instance (assistant={name}): {e}', context={'name': self.name, 'e': e})
self.plugin_instances.clear()
|
class Assistant:
'''
运行期 Assistant。
destinations: 当前活动 Workflow 中(经 workflow_manager.instantiate_workflow_assistants 构建后)
此 Assistant 可直接到达的下游 Assistant 名称(或 ID)列表。
单一活动 Workflow 场景下,可以直接覆盖。
'''
def __init__(self, assistant_id: str, name: str, description: str, plugin_instances: dict[str, PluginInstance], log_method: Callable):
pass
@classmethod
async def create(cls, plugin_manager: PluginManager, settings: AssistantSettings, log_method: Callable) -> Assistant:
pass
@asynccontextmanager
async def load_tools_langgraph(self):
'''
返回 langgraph 期望的 tool 列表(普通 Tool 对象)。
'''
pass
@asynccontextmanager
async def load_tools_langgraph(self):
'''
返回带可直接运行 run(arguments) 的 RunnableTool 列表。
'''
pass
async def call_tool(arguments: dict, _client=client, _t=t):
pass
async def aclose(self):
pass
| 10
| 3
| 11
| 0
| 10
| 1
| 2
| 0.2
| 0
| 10
| 4
| 0
| 4
| 6
| 5
| 5
| 79
| 7
| 60
| 42
| 43
| 12
| 46
| 28
| 39
| 3
| 0
| 3
| 13
|
328,191
|
saya-ashen/Dingent
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/saya-ashen_Dingent/src/dingent/core/assistant_manager.py
|
dingent.core.assistant_manager.AssistantManager
|
from collections.abc import Callable, Iterable
from .settings import AssistantSettings
from pydantic import BaseModel, ValidationError
from .config_manager import ConfigManager
import asyncio
from .plugin_manager import PluginInstance, PluginManager
from dingent.core.log_manager import LogManager
class AssistantManager:
"""
Assistant 运行期实例管理器(与 ConfigManager 解耦,仅消费其数据):
- 负责按需(Lazy)创建 Assistant 实例(包含插件实例)。
- 订阅 ConfigManager 的 on_change 事件,自动感知配置变更:
* 删除的助手 -> 关闭并移除实例
* 修改的助手(配置 hash 变化)-> 关闭并重建(可配置策略)
* 新增的助手 -> 不主动创建(按需)
- 提供 rebuild() 进行全量重建。
- 提供 refresh_settings() 仅刷新配置映射,不影响现有实例(除非原配置已不存在)。
"""
def __init__(self, config_manager: ConfigManager, plugin_manager: PluginManager, log_manager: LogManager, *, auto_recreate_on_change: bool=True, compare_plugins_only: bool=False):
"""
auto_recreate_on_change: True 则当 assistant 配置有变化时(基于 hash 比较)自动重建实例
compare_plugins_only: True 则仅当插件相关配置变化时才重建(忽略描述、名称变更)
"""
self._config_manager = config_manager
self._plugin_manager = plugin_manager
self._log_manager = log_manager
self._assistants: dict[str, Assistant] = {}
self._settings_map: dict[str, AssistantSettings] = {}
self._settings_hash: dict[str, str] = {}
self._lock = asyncio.Lock()
self._auto_recreate = auto_recreate_on_change
self._compare_plugins_only = compare_plugins_only
self._load_settings_initial()
self._config_manager.register_on_change(self._on_config_change)
def _load_settings_initial(self):
settings_list = self._config_manager.list_assistants()
self._settings_map = {s.id: s for s in settings_list}
self._settings_hash = {s.id: self._hash_settings(s) for s in settings_list}
def _hash_settings(self, s: AssistantSettings) -> str:
"""
计算一个轻量 hash,用于判断 assistant 配置是否变化。
若 compare_plugins_only = True,则仅基于插件列表及其字段。
"""
if self._compare_plugins_only:
payload = [(p.plugin_id, p.enabled, p.tools_default_enabled, sorted(p.tools or []), sorted(p.config.keys()) if p.config else None) for p in sorted(s.plugins, key=lambda x: x.plugin_id or x.plugin_id)]
else:
payload = s.model_dump(mode='json', exclude_none=True)
import hashlib
import json
raw = json.dumps(payload, sort_keys=True, ensure_ascii=False)
return hashlib.sha256(raw.encode('utf-8')).hexdigest()
async def _build_assistant_instance(self, settings: AssistantSettings) -> Assistant:
return await Assistant.create(self._plugin_manager, settings, self._log_manager.log_with_context)
async def _maybe_recreate_assistant(self, assistant_id: str, new_settings: AssistantSettings):
"""
根据 hash 判断是否需要重建。
"""
old_hash = self._settings_hash.get(assistant_id)
new_hash = self._hash_settings(new_settings)
self._settings_map[assistant_id] = new_settings
self._settings_hash[assistant_id] = new_hash
if assistant_id not in self._assistants:
return
if not self._auto_recreate:
return
if old_hash == new_hash:
return
old_instance = self._assistants.pop(assistant_id, None)
if old_instance:
try:
await old_instance.aclose()
except Exception as e:
self._log_manager.log_with_context('warning', 'Error closing old assistant {assistant_id}: {e}', context={'assistant_id': assistant_id, 'e': e})
if new_settings.enabled:
try:
new_inst = await self._build_assistant_instance(new_settings)
self._assistants[assistant_id] = new_inst
self._log_manager.log_with_context('info', "Assistant '{name}' recreated due to config change.", context={'name': new_settings.name})
except Exception as e:
self._log_manager.log_with_context('error', "Failed to recreate assistant '{name}': {e}", context={'name': new_settings.name, 'e': e})
def _on_config_change(self, old_settings, new_settings):
"""
ConfigManager on_change 回调(同步调用) -> 这里封装成异步处理。
old_settings/new_settings 为 AppSettings。
我们只关心 assistants 列表。
"""
try:
loop = asyncio.get_running_loop()
loop.create_task(self._handle_config_change_async(old_settings, new_settings))
except RuntimeError:
asyncio.run(self._handle_config_change_async(old_settings, new_settings))
async def _handle_config_change_async(self, old_app, new_app):
async with self._lock:
new_map = {a.id: a for a in new_app.assistants}
old_ids = set(self._settings_map.keys())
new_ids = set(new_map.keys())
removed = old_ids - new_ids
added = new_ids - old_ids
kept = old_ids & new_ids
for rid in removed:
inst = self._assistants.pop(rid, None)
self._settings_map.pop(rid, None)
self._settings_hash.pop(rid, None)
if inst:
try:
await inst.aclose()
self._log_manager.log_with_context('info', "Assistant '{name}' closed (removed).", context={'name': inst.name})
except Exception as e:
self._log_manager.log_with_context('warning', "Error closing removed assistant '{id}': {e}", context={'id': rid, 'e': e})
for nid in added:
self._settings_map[nid] = new_map[nid]
self._settings_hash[nid] = self._hash_settings(new_map[nid])
self._log_manager.log_with_context('info', "Assistant '{name}' added.", context={'name': new_map[nid].name})
for kid in kept:
await self._maybe_recreate_assistant(kid, new_map[kid])
async def get_assistant(self, assistant_id: str) -> Assistant:
async with self._lock:
settings = self._settings_map.get(assistant_id)
if not settings or not settings.enabled:
raise ValueError(f"Assistant '{assistant_id}' not found or disabled.")
if assistant_id in self._assistants:
return self._assistants[assistant_id]
try:
inst = await self._build_assistant_instance(settings)
except ValidationError as e:
raise ValueError(f"Assistant settings invalid '{assistant_id}': {e}") from e
except Exception as e:
raise RuntimeError(f"Failed to build assistant '{assistant_id}': {e}") from e
self._assistants[assistant_id] = inst
return inst
async def get_all_assistants(self, *, only_enabled: bool=True, preload: bool=False) -> dict[str, Assistant]:
"""
返回(并可选预加载)所有 Assistant 实例。
preload=True 会为符合条件的配置全部实例化。
"""
async with self._lock:
target_ids = [aid for aid, s in self._settings_map.items() if s.enabled or not only_enabled]
if preload:
for aid in target_ids:
if aid not in self._assistants:
try:
inst = await self._build_assistant_instance(self._settings_map[aid])
self._assistants[aid] = inst
except Exception as e:
self._log_manager.log_with_context('error', "Preload assistant '{name}' failed: {e}", context={'name': self._settings_map[aid].name, 'e': e})
return {aid: self._assistants[aid] for aid in target_ids if aid in self._assistants}
async def preload(self, assistant_ids: Iterable[str] | None=None):
"""
预加载指定(或全部)助手实例。
"""
async with self._lock:
if assistant_ids is None:
assistant_ids = list(self._settings_map.keys())
for aid in assistant_ids:
s = self._settings_map.get(aid)
if not s or not s.enabled:
continue
if aid not in self._assistants:
try:
self._assistants[aid] = await self._build_assistant_instance(s)
except Exception as e:
self._log_manager.log_with_context('error', "Preload assistant '{name}' failed: {e}", context={'name': s.name, 'e': e})
async def reload_assistant(self, assistant_id: str) -> Assistant | None:
"""
强制重新加载单个 Assistant 实例。
此方法会从 ConfigManager 重新读取该 Assistant 的最新配置。
- 如果实例正在运行,它将被关闭并根据新配置重建。
- 如果实例未运行,它将根据新配置被加载。
- 如果配置中该助手已被禁用或删除,将确保实例被关闭且不再提供。
Args:
assistant_id: 要重新加载的助手的 ID。
Returns:
重新加载后的 Assistant 实例。如果该助手在新配置中被禁用,则返回 None。
Raises:
ValueError: 如果在配置中找不到对应的 assistant_id。
"""
async with self._lock:
try:
new_settings = self._config_manager.get_assistant(assistant_id)
if not new_settings:
raise ValueError(f"Assistant '{assistant_id}' not found in configuration.")
except (AttributeError, NotImplementedError):
all_settings = self._config_manager.list_assistants()
new_settings = next((s for s in all_settings if s.id == assistant_id), None)
if not new_settings:
raise ValueError(f"Assistant '{assistant_id}' not found in configuration.")
old_instance = self._assistants.pop(assistant_id, None)
if old_instance:
self._log_manager.log_with_context('info', "Closing existing instance of assistant '{id}' for reload.", context={'id': assistant_id})
try:
await old_instance.aclose()
except Exception as e:
self._log_manager.log_with_context('warning', "Error closing old assistant '{id}' during reload: {e}", context={'id': assistant_id, 'e': e})
self._settings_map[assistant_id] = new_settings
self._settings_hash[assistant_id] = self._hash_settings(new_settings)
if new_settings.enabled:
self._log_manager.log_with_context('info', "Reloading assistant '{name}' (ID: {id}).", context={'name': new_settings.name, 'id': assistant_id})
try:
new_inst = await self._build_assistant_instance(new_settings)
self._assistants[assistant_id] = new_inst
return new_inst
except Exception as e:
self._log_manager.log_with_context('error', "Failed to create new instance for assistant '{name}' during reload: {e}", context={'name': new_settings.name, 'e': e})
raise RuntimeError(f"Failed to build reloaded assistant '{assistant_id}'") from e
else:
self._log_manager.log_with_context('info', "Assistant '{name}' (ID: {id}) is disabled and will not be reloaded.", context={'name': new_settings.name, 'id': assistant_id})
return None
async def rebuild(self):
"""
全量重建:关闭所有已加载实例,重新读取全部 settings,并保持 lazy。
(如果希望全部立即创建,可以 rebuild() 后再调用 preload())
"""
async with self._lock:
await self._close_all_locked()
self._load_settings_initial()
self._log_manager.log_with_context('info', 'AssistantManager rebuild completed. assistants={count}', context={'count': len(self._settings_map)})
async def refresh_settings_only(self):
"""
仅刷新配置映射,不关闭已存在实例(如果其 id 仍存在)。
已删除的 assistant 会被关闭。
"""
async with self._lock:
new_list = self._config_manager.list_assistants()
new_map = {a.id: a for a in new_list}
removed = set(self._settings_map.keys()) - set(new_map.keys())
for rid in removed:
inst = self._assistants.pop(rid, None)
if inst:
try:
await inst.aclose()
except Exception as e:
self._log_manager.log_with_context('warning', "Error closing removed assistant '{id}': {e}", context={'id': rid, 'e': e})
self._settings_hash.pop(rid, None)
for aid, aset in new_map.items():
self._settings_map[aid] = aset
self._settings_hash[aid] = self._hash_settings(aset)
self._log_manager.log_with_context('info', 'Assistant settings refreshe. total={total} removed={removed}', context={'total': len(self._settings_map), 'removed': len(removed)})
async def close_assistant(self, assistant_id: str) -> bool:
"""
主动关闭某个实例(不会删除配置),下次访问时会重新创建。
"""
async with self._lock:
inst = self._assistants.pop(assistant_id, None)
if not inst:
return False
try:
await inst.aclose()
except Exception as e:
self._log_manager.log_with_context('warning', "Error closing assistant '{id}': {e}", context={'id': assistant_id, 'e': e})
return True
async def aclose(self):
async with self._lock:
await self._close_all_locked()
async def _close_all_locked(self):
for inst in self._assistants.values():
try:
await inst.aclose()
except Exception as e:
self._log_manager.log_with_context('warning', "Error closing assistant '{name}': {e}", context={'name': inst.name, 'e': e})
self._assistants.clear()
async def set_destinations(self, mapping: dict[str, list[str]], *, clear_others: bool=True):
"""
mapping: assistant_id -> destinations 列表
clear_others: 若 True,未出现在 mapping 中的已加载实例 destinations 清空
"""
async with self._lock:
for aid, dests in mapping.items():
inst = self._assistants.get(aid)
if inst:
inst.destinations = list(dests)
if clear_others:
untouched = set(self._assistants.keys()) - set(mapping.keys())
for aid in untouched:
self._assistants[aid].destinations = []
def list_assistant_settings(self, *, only_enabled: bool=False) -> list[AssistantSettings]:
"""
返回当前缓存的 settings 副本(不访问磁盘)。
"""
result = []
for s in self._settings_map.values():
if only_enabled and (not s.enabled):
continue
result.append(s.model_copy(deep=True))
return result
def get_assistant_settings(self, assistant_id: str) -> AssistantSettings | None:
s = self._settings_map.get(assistant_id)
return s.model_copy(deep=True) if s else None
def detach(self):
"""
从 ConfigManager 取消订阅(若生命周期结束)。
"""
try:
self._config_manager.unregister_on_change(self._on_config_change)
except Exception:
pass
|
class AssistantManager:
'''
Assistant 运行期实例管理器(与 ConfigManager 解耦,仅消费其数据):
- 负责按需(Lazy)创建 Assistant 实例(包含插件实例)。
- 订阅 ConfigManager 的 on_change 事件,自动感知配置变更:
* 删除的助手 -> 关闭并移除实例
* 修改的助手(配置 hash 变化)-> 关闭并重建(可配置策略)
* 新增的助手 -> 不主动创建(按需)
- 提供 rebuild() 进行全量重建。
- 提供 refresh_settings() 仅刷新配置映射,不影响现有实例(除非原配置已不存在)。
'''
def __init__(self, config_manager: ConfigManager, plugin_manager: PluginManager, log_manager: LogManager, *, auto_recreate_on_change: bool=True, compare_plugins_only: bool=False):
'''
auto_recreate_on_change: True 则当 assistant 配置有变化时(基于 hash 比较)自动重建实例
compare_plugins_only: True 则仅当插件相关配置变化时才重建(忽略描述、名称变更)
'''
pass
def _load_settings_initial(self):
pass
def _hash_settings(self, s: AssistantSettings) -> str:
'''
计算一个轻量 hash,用于判断 assistant 配置是否变化。
若 compare_plugins_only = True,则仅基于插件列表及其字段。
'''
pass
async def _build_assistant_instance(self, settings: AssistantSettings) -> Assistant:
pass
async def _maybe_recreate_assistant(self, assistant_id: str, new_settings: AssistantSettings):
'''
根据 hash 判断是否需要重建。
'''
pass
def _on_config_change(self, old_settings, new_settings):
'''
ConfigManager on_change 回调(同步调用) -> 这里封装成异步处理。
old_settings/new_settings 为 AppSettings。
我们只关心 assistants 列表。
'''
pass
async def _handle_config_change_async(self, old_app, new_app):
pass
async def get_assistant(self, assistant_id: str) -> Assistant:
pass
async def get_all_assistants(self, *, only_enabled: bool=True, preload: bool=False) -> dict[str, Assistant]:
'''
返回(并可选预加载)所有 Assistant 实例。
preload=True 会为符合条件的配置全部实例化。
'''
pass
async def preload(self, assistant_ids: Iterable[str] | None=None):
'''
预加载指定(或全部)助手实例。
'''
pass
async def reload_assistant(self, assistant_id: str) -> Assistant | None:
'''
强制重新加载单个 Assistant 实例。
此方法会从 ConfigManager 重新读取该 Assistant 的最新配置。
- 如果实例正在运行,它将被关闭并根据新配置重建。
- 如果实例未运行,它将根据新配置被加载。
- 如果配置中该助手已被禁用或删除,将确保实例被关闭且不再提供。
Args:
assistant_id: 要重新加载的助手的 ID。
Returns:
重新加载后的 Assistant 实例。如果该助手在新配置中被禁用,则返回 None。
Raises:
ValueError: 如果在配置中找不到对应的 assistant_id。
'''
pass
async def rebuild(self):
'''
全量重建:关闭所有已加载实例,重新读取全部 settings,并保持 lazy。
(如果希望全部立即创建,可以 rebuild() 后再调用 preload())
'''
pass
async def refresh_settings_only(self):
'''
仅刷新配置映射,不关闭已存在实例(如果其 id 仍存在)。
已删除的 assistant 会被关闭。
'''
pass
async def close_assistant(self, assistant_id: str) -> bool:
'''
主动关闭某个实例(不会删除配置),下次访问时会重新创建。
'''
pass
async def aclose(self):
pass
async def _close_all_locked(self):
pass
async def set_destinations(self, mapping: dict[str, list[str]], *, clear_others: bool=True):
'''
mapping: assistant_id -> destinations 列表
clear_others: 若 True,未出现在 mapping 中的已加载实例 destinations 清空
'''
pass
def list_assistant_settings(self, *, only_enabled: bool=False) -> list[AssistantSettings]:
'''
返回当前缓存的 settings 副本(不访问磁盘)。
'''
pass
def get_assistant_settings(self, assistant_id: str) -> AssistantSettings | None:
pass
def detach(self):
'''
从 ConfigManager 取消订阅(若生命周期结束)。
'''
pass
| 21
| 14
| 17
| 1
| 12
| 4
| 4
| 0.43
| 0
| 16
| 5
| 0
| 20
| 9
| 20
| 20
| 391
| 43
| 244
| 91
| 213
| 106
| 219
| 74
| 196
| 8
| 0
| 5
| 71
|
328,192
|
saya-ashen/Dingent
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/saya-ashen_Dingent/src/dingent/core/assistant_manager.py
|
dingent.core.assistant_manager.RunnableTool
|
from collections.abc import Callable, Iterable
from typing import Any
from mcp.types import Tool
from pydantic import BaseModel, ValidationError
class RunnableTool(BaseModel):
tool: Tool
run: Callable[[dict], Any]
|
class RunnableTool(BaseModel):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 82
| 3
| 0
| 3
| 1
| 2
| 0
| 3
| 1
| 2
| 0
| 5
| 0
| 0
|
328,193
|
saya-ashen/Dingent
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/saya-ashen_Dingent/src/dingent/core/config_manager.py
|
dingent.core.config_manager.AssistantNotFoundError
|
class AssistantNotFoundError(ConfigError, KeyError):
pass
|
class AssistantNotFoundError(ConfigError, KeyError):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2
| 0
| 0
| 0
| 0
| 0
| 0
| 13
| 2
| 0
| 2
| 1
| 1
| 0
| 2
| 1
| 1
| 0
| 5
| 0
| 0
|
328,194
|
saya-ashen/Dingent
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/saya-ashen_Dingent/src/dingent/core/config_manager.py
|
dingent.core.config_manager.ConfigError
|
class ConfigError(Exception):
pass
|
class ConfigError(Exception):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 2
| 0
| 0
| 0
| 10
| 2
| 0
| 2
| 1
| 1
| 0
| 2
| 1
| 1
| 0
| 3
| 0
| 0
|
328,195
|
saya-ashen/Dingent
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/saya-ashen_Dingent/src/dingent/core/config_manager.py
|
dingent.core.config_manager.ConfigManager
|
import time
from contextlib import contextmanager
import tomlkit
import yaml
from threading import RLock
from .settings import AppSettings, AssistantSettings
from typing import Any
from copy import deepcopy
from .types import AssistantCreate, AssistantUpdate, PluginUserConfig
import shutil
from pathlib import Path
from pydantic import SecretStr, ValidationError
from dingent.core.secret_manager import SecretManager
class ConfigManager:
"""
职责聚焦版:
- 负责拆分式配置文件的 I/O(global / assistants / plugins)
- 提供线程安全获取与更新
- 提供快照/恢复、事务批处理、迁移框架、on_change 订阅
- 不做:插件业务逻辑(增删单个插件)、工作流对象管理、跨服务协作
目录结构:
project_root/
dingent.toml
config/
assistants/{assistant_id}.yaml
plugins/{plugin_name}/{assistant_id}.yaml
workflows/ (ConfigManager 不写入,不解析内部结构,只保留 AppSettings 中已有字段)
"""
def __init__(self, project_root: Path, log_manager, backup_dir_name: str='.config_backups', max_backups: int=5, auto_migrate: bool=True, target_config_version: int=1):
self.project_root = Path(project_root)
self.secret_manager = SecretManager(self.project_root)
self.log_manager = log_manager
self._global_config_path = self.project_root / GLOBAL_CONFIG_FILE
self._config_root = self.project_root / 'config'
self._assistants_dir = self._config_root / ASSISTANTS_DIR_NAME
self._plugins_dir = self._config_root / PLUGINS_DIR_NAME
self._workflows_dir = self._config_root / WORKFLOWS_DIR_NAME
self._backup_root = self.project_root / backup_dir_name
self._max_backups = max_backups
self._lock = RLock()
self._on_change_callbacks: list[OnChangeCallback] = []
raw_data = self._load_all()
if auto_migrate:
raw_data = self._maybe_migrate(raw_data, target_config_version)
self._settings = self._validate(raw_data)
self.log_manager.log_with_context('info', 'ConfigManager initialized with {count} assistants.', context={'count': len(self._settings.assistants)})
def get_settings(self) -> AppSettings:
with self._lock:
return self._settings.model_copy(deep=True)
def list_assistants(self) -> list[AssistantSettings]:
with self._lock:
return [a.model_copy(deep=True) for a in self._settings.assistants]
def get_assistant(self, assistant_id: str) -> AssistantSettings | None:
with self._lock:
return next((a.model_copy(deep=True) for a in self._settings.assistants if a.id == assistant_id), None)
def upsert_assistant(self, data: AssistantCreate | AssistantUpdate | dict) -> AssistantSettings:
"""
如果 id 存在则更新;如果不存在则创建。
- 更新时:使用 _clean_patch 过滤掉全为 '*'(例如 '********')的字段或结构,不覆盖原值。
- 创建时:同样会清理;若清理后缺少必填字段将触发校验错误。
"""
if isinstance(data, AssistantCreate | AssistantUpdate):
raw_patch = data.model_dump(exclude_unset=True)
else:
raw_patch = dict(data)
patch = _process_secrets_recursively(raw_patch, path=[], secret_manager=self.secret_manager, action=SecretAction.SAVE)
with self._lock:
old_settings = self._settings
assistants_map = {a.id: a for a in old_settings.assistants}
a_id = raw_patch.get('id') or patch.get('id')
if a_id and a_id in assistants_map:
if not patch or (set(patch.keys()) == {'id'} and len(patch) == 1):
return assistants_map[a_id].model_copy(deep=True)
base = assistants_map[a_id].model_dump()
merged = deep_merge(base, patch)
new_assistant = AssistantSettings.model_validate(merged)
new_list = []
for a in old_settings.assistants:
new_list.append(new_assistant if a.id == a_id else a)
else:
if 'id' not in patch:
raise ValueError("Creating a new assistant requires an 'id'.")
create_obj = AssistantCreate.model_validate(patch)
new_assistant = AssistantSettings.model_validate(create_obj.model_dump())
new_list = list(old_settings.assistants) + [new_assistant]
new_app = old_settings.model_copy(update={'assistants': new_list})
self._replace_settings(old_settings, new_app)
return new_assistant.model_copy(deep=True)
def delete_assistant(self, assistant_id: str) -> bool:
with self._lock:
old = self._settings
new_list = [a for a in old.assistants if a.id != assistant_id]
if len(new_list) == len(old.assistants):
return False
new_app = old.model_copy(update={'assistants': new_list})
self._replace_settings(old, new_app)
self._delete_assistant_files(assistant_id)
return True
def update_global(self, new_settings: dict[str, Any]) -> AppSettings:
"""
更新全局顶层字段(包含 default_assistant, llm 等),不会对 assistants 做任何修改。
只支持全量更新
"""
with self._lock:
old = self._settings
base = old.model_dump(exclude_none=True)
patch = _clean_patch(new_settings) or {}
merged = deep_merge(base, patch)
merged['assistants'] = base['assistants']
try:
new_app = AppSettings.model_validate(merged)
except ValidationError as e:
raise ValueError(f'Global patch invalid: {e}') from e
self._replace_settings(old, new_app)
return new_app.model_copy(deep=True)
def update_plugins_for_assistant(self, assistant_id: str, plugin_configs: list[PluginUserConfig]) -> AssistantSettings:
"""
仅提供“整体替换”能力,不做增删业务细化(由外部服务组合)。
"""
with self._lock:
old = self._settings
target = next((a for a in old.assistants if a.id == assistant_id), None)
if not target:
raise ValueError(f"Assistant '{assistant_id}' not found.")
new_plugins: list[PluginUserConfig] = []
for pc in plugin_configs:
if isinstance(pc, PluginUserConfig):
new_plugins.append(pc)
else:
new_plugins.append(PluginUserConfig.model_validate(pc))
new_assistant = target.model_copy(update={'plugins': new_plugins})
updated_assistants = []
for a in old.assistants:
updated_assistants.append(new_assistant if a.id == assistant_id else a)
new_app = old.model_copy(update={'assistants': updated_assistants})
self._replace_settings(old, new_app)
return new_assistant.model_copy(deep=True)
@contextmanager
def transaction(self):
"""
在上下文内多次修改 settings(使用 self._settings = ... 或暴露的 API),退出时统一保存。
如果中途出现异常则不写入磁盘。
"""
with self._lock:
original = self._settings
working_copy = original.model_copy(deep=True)
self._settings = working_copy
try:
yield working_copy
self._replace_settings(original, working_copy, already_locked=True)
except Exception:
self._settings = original
raise
def export_snapshot(self) -> dict:
"""
返回完整可 JSON 序列化的配置字典(含 assistants/plugins)。
"""
with self._lock:
return self._settings.model_dump(mode='json')
def import_snapshot(self, data: dict, overwrite: bool=True) -> AppSettings:
"""
从完整配置字典导入;默认覆盖(overwrite=True)。
"""
with self._lock:
base = self._settings.model_dump() if not overwrite else {}
merged = deep_merge(base, data) if not overwrite else data
new_app = self._validate(merged)
old = self._settings
self._replace_settings(old, new_app)
return new_app.model_copy(deep=True)
def dry_run_merge(self, patch: dict[str, Any]) -> tuple[bool, ValidationError | None]:
"""
测试一个 patch 能否成功合并并通过校验。
"""
with self._lock:
base = self._settings.model_dump()
merged = deep_merge(base, patch)
try:
AppSettings.model_validate(merged)
return (True, None)
except ValidationError as e:
return (False, e)
def register_on_change(self, callback: OnChangeCallback) -> None:
with self._lock:
if callback not in self._on_change_callbacks:
self._on_change_callbacks.append(callback)
def unregister_on_change(self, callback: OnChangeCallback) -> None:
with self._lock:
if callback in self._on_change_callbacks:
self._on_change_callbacks.remove(callback)
def _maybe_migrate(self, raw: dict, target_version: int) -> dict:
current_version = int(raw.get('config_version') or 1)
if current_version >= target_version:
return raw
self.log_manager.log_with_context('info', 'Migrating config {from_v} -> {to_v}.', context={'from_v': current_version, 'to_v': target_version})
migrated = migration_registry.migrate(raw, current_version, target_version)
if 'config_version' not in migrated:
migrated['config_version'] = target_version
return migrated
def _validate(self, raw: dict) -> AppSettings:
try:
return AppSettings.model_validate(raw)
except ValidationError as e:
self.log_manager.log_with_context('error', 'Configuration validation failed: {error}', context={'error': str(e)})
raise
def _load_all(self) -> dict:
global_part = self._load_global()
assistants_raw = self._load_assistants()
plugin_map = self._load_plugin_instances()
assistants: list[dict] = []
for a_id, a_data in assistants_raw.items():
copy_data = deepcopy(a_data)
copy_data['plugins'] = plugin_map.get(a_id, [])
assistants.append(copy_data)
if 'workflows' not in global_part:
global_part['workflows'] = []
global_part['assistants'] = assistants
return _process_secrets_recursively(global_part, [], self.secret_manager, action=SecretAction.LOAD)
def _load_global(self) -> dict:
if not self._global_config_path.is_file():
self.log_manager.log_with_context('warning', 'Global config file missing at {path}, using defaults.', context={'path': str(self._global_config_path)})
return {}
try:
text = self._global_config_path.read_text('utf-8')
doc = tomlkit.parse(text).unwrap()
return doc
except Exception as e:
self.log_manager.log_with_context('error', 'Failed to read global config: {error}', context={'error': str(e)})
return {}
def _load_assistants(self) -> dict[str, dict]:
result: dict[str, dict] = {}
if not self._assistants_dir.is_dir():
return result
for f in self._assistants_dir.glob('*.yaml'):
try:
data = yaml.safe_load(f.read_text('utf-8')) or {}
if not isinstance(data, dict):
self.log_manager.log_with_context('warning', 'Assistant file {file} top-level not dict. Skip.', context={'file': str(f)})
continue
aid = data.get('id')
if not aid:
self.log_manager.log_with_context('warning', 'Assistant file {file} missing id. Skip.', context={'file': str(f)})
continue
data.pop('plugins', None)
result[aid] = data
except Exception as e:
self.log_manager.log_with_context('error', 'Read assistant file {file} failed: {error}', context={'file': str(f), 'error': str(e)})
return result
def _load_plugin_instances(self) -> dict[str, list[dict]]:
mapping: dict[str, list[dict]] = {}
if not self._plugins_dir.is_dir():
return mapping
for pdir in self._plugins_dir.iterdir():
if not pdir.is_dir():
continue
plugin_name = pdir.name
for cfg in pdir.glob('*.yaml'):
try:
pdata = yaml.safe_load(cfg.read_text('utf-8')) or {}
if not isinstance(pdata, dict):
continue
aid = pdata.get('assistant_id')
if not aid:
self.log_manager.log_with_context('warning', 'Plugin instance {file} missing assistant_id. Skip.', context={'file': str(cfg)})
continue
if pdata.get('plugin_name') != plugin_name:
pdata['plugin_name'] = plugin_name
if not pdata.get('name'):
pdata['name'] = plugin_name
mapping.setdefault(aid, []).append(pdata)
except Exception as e:
self.log_manager.log_with_context('error', 'Read plugin instance file {file} failed: {error}', context={'file': str(cfg), 'error': str(e)})
return mapping
def _replace_settings(self, old: AppSettings, new: AppSettings, already_locked: bool=False) -> None:
"""
将内存配置替换并持久化到文件;触发 on_change 回调。
"""
lock_cm = self._lock if not already_locked else None
if lock_cm:
lock_cm.acquire()
try:
if old is new:
pass
self._settings = new
self._persist()
self._emit_change(old, new)
finally:
if lock_cm:
lock_cm.release()
def _persist(self) -> None:
self._write_backup()
settings_dict = self._settings.model_dump(exclude_none=True)
persistable_data = _process_secrets_recursively(settings_dict, [], self.secret_manager, action=SecretAction.SAVE)
assistants = persistable_data.pop('assistants', [])
self._write_global(persistable_data)
self._write_assistants_and_plugins(assistants)
def _write_global(self, global_part: dict[str, Any]) -> None:
self._global_config_path.parent.mkdir(parents=True, exist_ok=True)
if self._global_config_path.is_file():
doc = tomlkit.parse(self._global_config_path.read_text('utf-8'))
else:
doc = tomlkit.document()
for k in list(doc.keys()):
if k not in global_part:
pass
doc.update(global_part)
self._global_config_path.write_text(tomlkit.dumps(doc), 'utf-8')
def _write_assistants_and_plugins(self, assistants: list[dict]) -> None:
self._assistants_dir.mkdir(parents=True, exist_ok=True)
self._plugins_dir.mkdir(parents=True, exist_ok=True)
desired_assistant_ids = set()
desired_plugin_files: set[Path] = set()
for a in assistants:
a_id = a.get('id')
if not a_id:
self.log_manager.log_with_context('warning', 'Assistant without id discarded during save.')
continue
desired_assistant_ids.add(a_id)
a_copy = dict(a)
plugin_list = a_copy.pop('plugins', [])
(self._assistants_dir / f'{a_id}.yaml').write_text(yaml.safe_dump(a_copy, allow_unicode=True, sort_keys=False), 'utf-8')
for p in plugin_list:
p_copy = dict(p)
p_copy['assistant_id'] = a_id
plugin_id = p_copy.get('plugin_id')
if not plugin_id:
self.log_manager.log_with_context('warning', 'Plugin config for assistant {aid} missing plugin_id. Discarded.', context={'aid': a_id})
continue
p_dir = self._plugins_dir / plugin_id
p_dir.mkdir(parents=True, exist_ok=True)
pf = p_dir / f'{a_id}.yaml'
desired_plugin_files.add(pf)
pf.write_text(yaml.safe_dump(p_copy, allow_unicode=True, sort_keys=False), 'utf-8')
for old_file in self._assistants_dir.glob('*.yaml'):
if old_file.stem not in desired_assistant_ids:
try:
old_file.unlink()
except Exception as e:
self.log_manager.log_with_context('error', 'Remove stale assistant file {file} failed: {error}', context={'file': str(old_file), 'error': str(e)})
for pdir in self._plugins_dir.iterdir():
if not pdir.is_dir():
continue
for cfg in pdir.glob('*.yaml'):
if cfg not in desired_plugin_files:
try:
cfg.unlink()
except Exception as e:
self.log_manager.log_with_context('error', 'Remove stale plugin instance {file} failed: {error}', context={'file': str(cfg), 'error': str(e)})
try:
if not any(pdir.iterdir()):
pdir.rmdir()
except Exception:
pass
def _delete_assistant_files(self, assistant_id: str) -> None:
f = self._assistants_dir / f'{assistant_id}.yaml'
if f.is_file():
try:
f.unlink()
except Exception as e:
self.log_manager.log_with_context('error', 'Remove assistant file {file} failed: {error}', context={'file': str(f), 'error': str(e)})
if self._plugins_dir.is_dir():
for pdir in self._plugins_dir.iterdir():
if not pdir.is_dir():
continue
inst = pdir / f'{assistant_id}.yaml'
if inst.is_file():
try:
inst.unlink()
except Exception as e:
self.log_manager.log_with_context('error', 'Remove plugin instance {file} failed: {error}', context={'file': str(inst), 'error': str(e)})
try:
if not any(pdir.iterdir()):
pdir.rmdir()
except Exception:
pass
def _write_backup(self) -> None:
try:
self._backup_root.mkdir(parents=True, exist_ok=True)
ts = time.strftime('%Y%m%d-%H%M%S')
backup_dir = self._backup_root / ts
backup_dir.mkdir()
if self._global_config_path.exists():
shutil.copy2(self._global_config_path, backup_dir / self._global_config_path.name)
for d in (self._assistants_dir, self._plugins_dir):
if d.exists():
target_sub = backup_dir / d.name
shutil.copytree(d, target_sub)
backups = sorted(self._backup_root.iterdir(), key=lambda p: p.name, reverse=True)
for old in backups[self._max_backups:]:
if old.is_dir():
shutil.rmtree(old, ignore_errors=True)
except Exception as e:
self.log_manager.log_with_context('warning', 'Write config backup failed: {error}', context={'error': str(e)})
def _emit_change(self, old: AppSettings, new: AppSettings) -> None:
if old is new:
pass
for cb in list(self._on_change_callbacks):
try:
cb(old, new)
except Exception as e:
self.log_manager.log_with_context('error', 'on_change callback error: {error}', context={'error': str(e)})
|
class ConfigManager:
'''
职责聚焦版:
- 负责拆分式配置文件的 I/O(global / assistants / plugins)
- 提供线程安全获取与更新
- 提供快照/恢复、事务批处理、迁移框架、on_change 订阅
- 不做:插件业务逻辑(增删单个插件)、工作流对象管理、跨服务协作
目录结构:
project_root/
dingent.toml
config/
assistants/{assistant_id}.yaml
plugins/{plugin_name}/{assistant_id}.yaml
workflows/ (ConfigManager 不写入,不解析内部结构,只保留 AppSettings 中已有字段)
'''
def __init__(self, project_root: Path, log_manager, backup_dir_name: str='.config_backups', max_backups: int=5, auto_migrate: bool=True, target_config_version: int=1):
pass
def get_settings(self) -> AppSettings:
pass
def list_assistants(self) -> list[AssistantSettings]:
pass
def get_assistant(self, assistant_id: str) -> AssistantSettings | None:
pass
def upsert_assistant(self, data: AssistantCreate | AssistantUpdate | dict) -> AssistantSettings:
'''
如果 id 存在则更新;如果不存在则创建。
- 更新时:使用 _clean_patch 过滤掉全为 '*'(例如 '********')的字段或结构,不覆盖原值。
- 创建时:同样会清理;若清理后缺少必填字段将触发校验错误。
'''
pass
def delete_assistant(self, assistant_id: str) -> bool:
pass
def update_global(self, new_settings: dict[str, Any]) -> AppSettings:
'''
更新全局顶层字段(包含 default_assistant, llm 等),不会对 assistants 做任何修改。
只支持全量更新
'''
pass
def update_plugins_for_assistant(self, assistant_id: str, plugin_configs: list[PluginUserConfig]) -> AssistantSettings:
'''
仅提供“整体替换”能力,不做增删业务细化(由外部服务组合)。
'''
pass
@contextmanager
def transaction(self):
'''
在上下文内多次修改 settings(使用 self._settings = ... 或暴露的 API),退出时统一保存。
如果中途出现异常则不写入磁盘。
'''
pass
def export_snapshot(self) -> dict:
'''
返回完整可 JSON 序列化的配置字典(含 assistants/plugins)。
'''
pass
def import_snapshot(self, data: dict, overwrite: bool=True) -> AppSettings:
'''
从完整配置字典导入;默认覆盖(overwrite=True)。
'''
pass
def dry_run_merge(self, patch: dict[str, Any]) -> tuple[bool, ValidationError | None]:
'''
测试一个 patch 能否成功合并并通过校验。
'''
pass
def register_on_change(self, callback: OnChangeCallback) -> None:
pass
def unregister_on_change(self, callback: OnChangeCallback) -> None:
pass
def _maybe_migrate(self, raw: dict, target_version: int) -> dict:
pass
def _validate(self, raw: dict) -> AppSettings:
pass
def _load_all(self) -> dict:
pass
def _load_global(self) -> dict:
pass
def _load_assistants(self) -> dict[str, dict]:
pass
def _load_plugin_instances(self) -> dict[str, list[dict]]:
pass
def _replace_settings(self, old: AppSettings, new: AppSettings, already_locked: bool=False) -> None:
'''
将内存配置替换并持久化到文件;触发 on_change 回调。
'''
pass
def _persist(self) -> None:
pass
def _write_global(self, global_part: dict[str, Any]) -> None:
pass
def _write_assistants_and_plugins(self, assistants: list[dict]) -> None:
pass
def _delete_assistant_files(self, assistant_id: str) -> None:
pass
def _write_backup(self) -> None:
pass
def _emit_change(self, old: AppSettings, new: AppSettings) -> None:
pass
| 29
| 9
| 16
| 1
| 13
| 2
| 4
| 0.21
| 0
| 18
| 7
| 0
| 27
| 13
| 27
| 27
| 476
| 48
| 358
| 144
| 321
| 74
| 337
| 125
| 309
| 15
| 0
| 4
| 105
|
328,196
|
saya-ashen/Dingent
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/saya-ashen_Dingent/src/dingent/core/config_manager.py
|
dingent.core.config_manager.ConfigMigrationRegistry
|
from collections.abc import Callable
class ConfigMigrationRegistry:
def __init__(self):
self._migrations: dict[int, Callable[[dict], dict]] = {}
def register(self, from_version: int, func: Callable[[dict], dict]) -> None:
self._migrations[from_version] = func
def migrate(self, data: dict, current_version: int, target_version: int) -> dict:
"""
顺序执行 from_version -> from_version+1 ... 直到 target_version
若中间缺失迁移函数则抛出异常。
"""
version = current_version
while version < target_version:
if version not in self._migrations:
raise MigrationError(f'Missing migration function for version {version} -> {version + 1}')
data = self._migrations[version](data)
version += 1
return data
|
class ConfigMigrationRegistry:
def __init__(self):
pass
def register(self, from_version: int, func: Callable[[dict], dict]) -> None:
pass
def migrate(self, data: dict, current_version: int, target_version: int) -> dict:
'''
顺序执行 from_version -> from_version+1 ... 直到 target_version
若中间缺失迁移函数则抛出异常。
'''
pass
| 4
| 1
| 5
| 0
| 4
| 1
| 2
| 0.31
| 0
| 4
| 1
| 0
| 3
| 1
| 3
| 3
| 19
| 2
| 13
| 6
| 9
| 4
| 13
| 6
| 9
| 3
| 0
| 2
| 5
|
328,197
|
saya-ashen/Dingent
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/saya-ashen_Dingent/src/dingent/core/config_manager.py
|
dingent.core.config_manager.ConfigUpdateError
|
class ConfigUpdateError(ConfigError):
pass
|
class ConfigUpdateError(ConfigError):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 10
| 2
| 0
| 2
| 1
| 1
| 0
| 2
| 1
| 1
| 0
| 4
| 0
| 0
|
328,198
|
saya-ashen/Dingent
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/saya-ashen_Dingent/src/dingent/core/config_manager.py
|
dingent.core.config_manager.MigrationError
|
class MigrationError(Exception):
pass
|
class MigrationError(Exception):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 10
| 2
| 0
| 2
| 1
| 1
| 0
| 2
| 1
| 1
| 0
| 3
| 0
| 0
|
328,199
|
saya-ashen/Dingent
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/saya-ashen_Dingent/src/dingent/core/config_manager.py
|
dingent.core.config_manager.SecretAction
|
from enum import Enum, auto
class SecretAction(Enum):
SAVE = auto()
LOAD = auto()
|
class SecretAction(Enum):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0.67
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 49
| 3
| 0
| 3
| 3
| 2
| 2
| 3
| 3
| 2
| 0
| 4
| 0
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.