id
int64
0
328k
repository_name
stringlengths
7
58
file_path
stringlengths
9
302
class_name
stringlengths
5
256
human_written_code
stringlengths
16
2.16M
class_skeleton
stringlengths
18
1.49M
total_program_units
int64
1
1.76k
total_doc_str
int64
0
771
AvgCountLine
float64
0
7.89k
AvgCountLineBlank
float64
0
297
AvgCountLineCode
float64
0
7.89k
AvgCountLineComment
float64
0
7.89k
AvgCyclomatic
float64
0
130
CommentToCodeRatio
float64
0
168
CountClassBase
float64
0
40
CountClassCoupled
float64
0
583
CountClassCoupledModified
float64
0
575
CountClassDerived
float64
0
5.35k
CountDeclInstanceMethod
float64
0
529
CountDeclInstanceVariable
float64
0
296
CountDeclMethod
float64
0
599
CountDeclMethodAll
float64
0
1.12k
CountLine
float64
1
40.4k
CountLineBlank
float64
0
8.16k
CountLineCode
float64
1
25.7k
CountLineCodeDecl
float64
1
8.15k
CountLineCodeExe
float64
0
24.2k
CountLineComment
float64
0
16.5k
CountStmt
float64
1
9.71k
CountStmtDecl
float64
1
8.15k
CountStmtExe
float64
0
9.69k
MaxCyclomatic
float64
0
759
MaxInheritanceTree
float64
0
16
MaxNesting
float64
0
34
SumCyclomatic
float64
0
2.9k
327,900
Euro-BioImaging/EuBI-Bridge
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/Euro-BioImaging_EuBI-Bridge/eubi_bridge/base/scale.py
Euro-BioImaging_EuBI-Bridge.eubi_bridge.base.scale.Downscaler
import dask.array as da import dataclasses @dataclasses.dataclass class Downscaler: array: da.Array scale_factor: (list, tuple) n_layers: int scale: (list, tuple) = None output_chunks: (list, tuple) = None backend: str = 'numpy' downscale_method: str = 'simple' def __post_init__(self): self.param_names = ['array', 'scale_factor', 'n_layers', 'scale', 'output_chunks', 'backend', 'downscale_method'] self.update() def get_method(self): if self.downscale_method == 'simple': method = simple_downscale elif self.downscale_method == 'mean': method = mean_downscale elif self.downscale_method == 'median': method = mean_downscale else: raise NotImplementedError(f"Currently, only 'simple', 'mean' and 'median' methods are implemented.") return method def run(self): self.method = self.get_method() assert isinstance(self.array, da.Array) self.dm = DownscaleManager(self.array.shape, self.scale_factor, self.n_layers, self.scale) if self.output_chunks is None: self.output_chunks = [self.array.chunksize] * self.n_layers downscaled = [] for idx, (scale_factor, chunks) in enumerate(zip(self.dm.scale_factors, self.output_chunks)): if idx == 0: downscaled.append(self.array) else: factor = tuple((int(x) for x in scale_factor)) res1 = self.method(self.array, scale_factor=factor) downscaled.append(res1) self.downscaled_arrays = {str(i): arr for i, arr in enumerate(downscaled)} return self def update(self, **kwargs): for key, value in kwargs.items(): if key in self.param_names: self.__setattr__(key, value) else: warnings.warn(f"The given parameter name '{key}' is not valid, ignoring it..") self.run() return self
@dataclasses.dataclass class Downscaler: def __post_init__(self): pass def get_method(self): pass def run(self): pass def update(self, **kwargs): pass
6
0
11
0
10
0
3
0
0
7
1
0
4
4
4
4
54
5
49
19
44
0
40
19
35
4
0
2
12
327,901
Euro-BioImaging/EuBI-Bridge
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/Euro-BioImaging_EuBI-Bridge/eubi_bridge/base/writers.py
Euro-BioImaging_EuBI-Bridge.eubi_bridge.base.writers.CompressorConfig
from dataclasses import dataclass @dataclass class CompressorConfig: name: str = 'blosc' params: dict = None def __post_init__(self): self.params = self.params or {}
@dataclass class CompressorConfig: def __post_init__(self): pass
3
0
2
0
2
0
1
0
0
0
0
0
1
0
1
1
6
1
5
4
3
0
5
4
3
1
0
0
1
327,902
Euro-BioImaging/EuBI-Bridge
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/Euro-BioImaging_EuBI-Bridge/eubi_bridge/ebridge.py
Euro-BioImaging_EuBI-Bridge.eubi_bridge.ebridge.EuBIBridge
import tempfile from pathlib import Path import dask import time from eubi_bridge.utils.metadata_utils import print_printable, get_printables from multiprocessing.pool import ThreadPool from typing import Union import copy from eubi_bridge.utils.convenience import take_filepaths, is_zarr_group import os import zarr from distributed import LocalCluster, Client from eubi_bridge.ebridge_base import BridgeBase, downscale import os import pprint import shutil from dask_jobqueue import SLURMCluster import psutil class EuBIBridge: """ EuBIBridge is a conversion tool for bioimage datasets, allowing for both unary and aggregative conversion of image data collections to OME-Zarr format. Attributes: config_gr (zarr.Group): Configuration settings stored in a Zarr group. config (dict): Dictionary representation of configuration settings for cluster, conversion, and downscaling. dask_config (dict): Dictionary representation of configuration settings for dask.distributed. root_defaults (dict): Installation defaults of configuration settings for cluster, conversion, and downscaling. root_dask_defaults (dict): Installation defaults of configuration settings for dask.distributed. """ def __init__(self, configpath=f"{os.path.expanduser('~')}/.eubi_bridge"): """ Initializes the EuBIBridge class and loads or sets up default configuration. Args: configpath (str, optional): Path to store configuration settings. Defaults to the home directory. """ root_dask_defaults = {'distributed.adaptive.interval': '1s', 'distributed.adaptive.maximum': '.inf', 'distributed.adaptive.minimum': 0, 'distributed.adaptive.target-duration': '5s', 'distributed.adaptive.wait-count': 3, 'distributed.admin.event-loop': 'tornado', 'distributed.admin.large-graph-warning-threshold': '10MB', 'distributed.admin.log-format': '%(asctime)s - %(name)s - %(levelname)s - %(message)s', 'distributed.admin.log-length': 10000, 'distributed.admin.low-level-log-length': 1000, 'distributed.admin.max-error-length': 10000, 'distributed.admin.pdb-on-err': False, 'distributed.admin.system-monitor.disk': True, 'distributed.admin.system-monitor.gil.enabled': True, 'distributed.admin.system-monitor.gil.interval': '1ms', 'distributed.admin.system-monitor.host-cpu': False, 'distributed.admin.system-monitor.interval': '500ms', 'distributed.admin.system-monitor.log-length': 7200, 'distributed.admin.tick.cycle': '1s', 'distributed.admin.tick.interval': '20ms', 'distributed.admin.tick.limit': '3s', 'distributed.client.heartbeat': '5s', 'distributed.client.preload': [], 'distributed.client.preload-argv': [], 'distributed.client.scheduler-info-interval': '2s', 'distributed.client.security-loader': None, 'distributed.comm.compression': False, 'distributed.comm.default-scheme': 'tcp', 'distributed.comm.offload': '10MiB', 'distributed.comm.require-encryption': None, 'distributed.comm.retry.count': 0, 'distributed.comm.retry.delay.max': '20s', 'distributed.comm.retry.delay.min': '1s', 'distributed.comm.shard': '64MiB', 'distributed.comm.socket-backlog': 2048, 'distributed.comm.timeouts.connect': '30s', 'distributed.comm.timeouts.tcp': '30s', 'distributed.comm.tls.ca-file': None, 'distributed.comm.tls.ciphers': None, 'distributed.comm.tls.client.cert': None, 'distributed.comm.tls.client.key': None, 'distributed.comm.tls.max-version': None, 'distributed.comm.tls.min-version': 1.2, 'distributed.comm.tls.scheduler.cert': None, 'distributed.comm.tls.scheduler.key': None, 'distributed.comm.tls.worker.cert': None, 'distributed.comm.tls.worker.key': None, 'distributed.comm.ucx.create-cuda-context': None, 'distributed.comm.ucx.cuda-copy': None, 'distributed.comm.ucx.environment': {}, 'distributed.comm.ucx.infiniband': None, 'distributed.comm.ucx.nvlink': None, 'distributed.comm.ucx.rdmacm': None, 'distributed.comm.ucx.tcp': None, 'distributed.comm.websockets.shard': '8MiB', 'distributed.comm.zstd.level': 3, 'distributed.comm.zstd.threads': 0, 'distributed.dashboard.export-tool': False, 'distributed.dashboard.graph-max-items': 5000, 'distributed.dashboard.link': '{scheme}://{host}:{port}/status', 'distributed.dashboard.prometheus.namespace': 'dask', 'distributed.deploy.cluster-repr-interval': '500ms', 'distributed.deploy.lost-worker-timeout': '15s', 'distributed.diagnostics.computations.ignore-files': ['runpy\\.py', 'pytest', 'py\\.test', 'pytest-script\\.py', '_pytest', 'pycharm', 'vscode_pytest', 'get_output_via_markers\\.py'], 'distributed.diagnostics.computations.ignore-modules': ['asyncio', 'functools', 'threading', 'datashader', 'dask', 'debugpy', 'distributed', 'coiled', 'cudf', 'cuml', 'matplotlib', 'pluggy', 'prefect', 'rechunker', 'xarray', 'xgboost', 'xdist', '__channelexec__', 'execnet'], 'distributed.diagnostics.computations.max-history': 100, 'distributed.diagnostics.computations.nframes': 0, 'distributed.diagnostics.cudf': False, 'distributed.diagnostics.erred-tasks.max-history': 100, 'distributed.diagnostics.nvml': True, 'distributed.nanny.environ': {}, 'distributed.nanny.pre-spawn-environ.MALLOC_TRIM_THRESHOLD_': 65536, 'distributed.nanny.pre-spawn-environ.MKL_NUM_THREADS': 1, 'distributed.nanny.pre-spawn-environ.OMP_NUM_THREADS': 1, 'distributed.nanny.pre-spawn-environ.OPENBLAS_NUM_THREADS': 1, 'distributed.nanny.preload': [], 'distributed.nanny.preload-argv': [], 'distributed.p2p.comm.buffer': '1 GiB', 'distributed.p2p.comm.concurrency': 10, 'distributed.p2p.comm.message-bytes-limit': '2 MiB', 'distributed.p2p.comm.retry.count': 10, 'distributed.p2p.comm.retry.delay.max': '30s', 'distributed.p2p.comm.retry.delay.min': '1s', 'distributed.p2p.storage.buffer': '100 MiB', 'distributed.p2p.storage.disk': True, 'distributed.p2p.threads': None, 'distributed.rmm.pool-size': None, 'distributed.scheduler.active-memory-manager.interval': '2s', 'distributed.scheduler.active-memory-manager.measure': 'optimistic', 'distributed.scheduler.active-memory-manager.policies': [{'class': 'distributed.active_memory_manager.ReduceReplicas'}], 'distributed.scheduler.active-memory-manager.start': True, 'distributed.scheduler.allowed-failures': 3, 'distributed.scheduler.allowed-imports': ['dask', 'distributed'], 'distributed.scheduler.bandwidth': '100000000', 'distributed.scheduler.blocked-handlers': [], 'distributed.scheduler.contact-address': None, 'distributed.scheduler.dashboard.bokeh-application.allow_websocket_origin': ['*'], 'distributed.scheduler.dashboard.bokeh-application.check_unused_sessions_milliseconds': 500, 'distributed.scheduler.dashboard.bokeh-application.keep_alive_milliseconds': 500, 'distributed.scheduler.dashboard.status.task-stream-length': 1000, 'distributed.scheduler.dashboard.tasks.task-stream-length': 100000, 'distributed.scheduler.dashboard.tls.ca-file': None, 'distributed.scheduler.dashboard.tls.cert': None, 'distributed.scheduler.dashboard.tls.key': None, 'distributed.scheduler.default-data-size': '1kiB', 'distributed.scheduler.default-task-durations.rechunk-split': '1us', 'distributed.scheduler.default-task-durations.split-shuffle': '1us', 'distributed.scheduler.default-task-durations.split-stage': '1us', 'distributed.scheduler.default-task-durations.split-taskshuffle': '1us', 'distributed.scheduler.events-cleanup-delay': '1h', 'distributed.scheduler.http.routes': ['distributed.http.scheduler.prometheus', 'distributed.http.scheduler.info', 'distributed.http.scheduler.json', 'distributed.http.health', 'distributed.http.proxy', 'distributed.http.statics'], 'distributed.scheduler.idle-timeout': None, 'distributed.scheduler.locks.lease-timeout': '30s', 'distributed.scheduler.locks.lease-validation-interval': '10s', 'distributed.scheduler.no-workers-timeout': None, 'distributed.scheduler.preload': [], 'distributed.scheduler.preload-argv': [], 'distributed.scheduler.rootish-taskgroup': 5, 'distributed.scheduler.rootish-taskgroup-dependencies': 5, 'distributed.scheduler.unknown-task-duration': '500ms', 'distributed.scheduler.validate': False, 'distributed.scheduler.work-stealing': True, 'distributed.scheduler.work-stealing-interval': '1s', 'distributed.scheduler.worker-saturation': 1.1, 'distributed.scheduler.worker-ttl': '5 minutes', 'distributed.version': 2, 'distributed.worker.blocked-handlers': [], 'distributed.worker.connections.incoming': 10, 'distributed.worker.connections.outgoing': 50, 'distributed.worker.daemon': True, 'distributed.worker.http.routes': ['distributed.http.worker.prometheus', 'distributed.http.health', 'distributed.http.statics'], 'distributed.worker.lifetime.duration': None, 'distributed.worker.lifetime.restart': False, 'distributed.worker.lifetime.stagger': '0 seconds', 'distributed.worker.memory.max-spill': False, 'distributed.worker.memory.monitor-interval': '100ms', 'distributed.worker.memory.pause': 0.8, 'distributed.worker.memory.rebalance.measure': 'optimistic', 'distributed.worker.memory.rebalance.recipient-max': 0.6, 'distributed.worker.memory.rebalance.sender-min': 0.3, 'distributed.worker.memory.rebalance.sender-recipient-gap': 0.1, 'distributed.worker.memory.recent-to-old-time': '30s', 'distributed.worker.memory.spill': 0.7, 'distributed.worker.memory.spill-compression': 'auto', 'distributed.worker.memory.target': 0.6, 'distributed.worker.memory.terminate': 0.95, 'distributed.worker.memory.transfer': 0.1, 'distributed.worker.multiprocessing-method': 'spawn', 'distributed.worker.preload': [], 'distributed.worker.preload-argv': [], 'distributed.worker.profile.cycle': '1000ms', 'distributed.worker.profile.enabled': True, 'distributed.worker.profile.interval': '10ms', 'distributed.worker.profile.low-level': False, 'distributed.worker.resources': {}, 'distributed.worker.transfer.message-bytes-limit': '50MB', 'distributed.worker.use-file-locking': True, 'distributed.worker.validate': False} defaults = dict(cluster=dict(n_jobs=4, threads_per_worker=1, memory_limit='auto', temp_dir='auto', no_worker_restart=False, verbose=False, no_distributed=False, on_slurm=False), readers=dict(as_mosaic=False, view_index=0, phase_index=0, illumination_index=0, scene_index=0, rotation_index=0, mosaic_tile_index=0, sample_index=0, use_bioformats_readers=False), conversion=dict(zarr_format=2, auto_chunk=False, target_chunk_mb=1, time_chunk=1, channel_chunk=1, z_chunk=96, y_chunk=96, x_chunk=96, time_shard_coef=1, channel_shard_coef=1, z_shard_coef=3, y_shard_coef=3, x_shard_coef=3, time_range=None, channel_range=None, z_range=None, y_range=None, x_range=None, dimension_order='tczyx', compressor='blosc', compressor_params={}, overwrite=False, use_tensorstore=False, use_gpu=False, rechunk_method='tasks', trim_memory=False, metadata_reader='bfio', save_omexml=True, squeeze=False, dtype=None), downscale=dict(time_scale_factor=1, channel_scale_factor=1, z_scale_factor=2, y_scale_factor=2, x_scale_factor=2, n_layers=None, min_dimension_size=64, downscale_method='simple')) self.root_defaults = defaults self.root_dask_defaults = root_dask_defaults config_gr = zarr.open_group(configpath, mode='a') config = config_gr.attrs for key in defaults.keys(): if key not in config.keys(): config[key] = {} for subkey in defaults[key].keys(): if subkey not in config[key].keys(): config[key][subkey] = defaults[key][subkey] config_gr.attrs[key] = config[key] self.config = dict(config_gr.attrs) if not 'dask_config' in config_gr.keys(): config_gr.create_group('dask_config') dask_config = config_gr['dask_config'].attrs for key in root_dask_defaults.keys(): if key not in dask_config.keys(): dask_config[key] = root_dask_defaults[key] config_gr['dask_config'].attrs.update(dict(dask_config)) self.dask_config = dict(config_gr['dask_config'].attrs) self.config_gr = config_gr self._dask_temp_dir = None self.client = None def _optimize_dask_config(self): """Optimize Dask configuration for maximum conversion speed. This configuration is tuned for high-performance data processing with Dask, focusing on maximizing throughput while maintaining system stability. The settings are optimized for I/O and CPU-bound workloads. """ total_memory = psutil.virtual_memory().total total_cores = psutil.cpu_count(logical=False) or psutil.cpu_count() or 4 memory_target = float(os.getenv('DASK_MEMORY_TARGET', '0.8')) memory_spill = float(os.getenv('DASK_MEMORY_SPILL', '0.9')) memory_pause = float(os.getenv('DASK_MEMORY_PAUSE', '0.95')) dask.config.set({'optimization.fuse.active': True, 'optimization.fuse.ave-width': 10, 'optimization.fuse.subgraphs': True, 'optimization.fuse.rename-keys': True, 'optimization.culling.active': True, 'optimization.rewrite.fuse': True, 'distributed.worker.memory.target': memory_target, 'distributed.worker.memory.spill': memory_spill, 'distributed.worker.memory.pause': memory_pause, 'distributed.worker.memory.terminate': 0.98, 'distributed.worker.memory.monitor-interval': '50ms' if total_memory < 32 * 1024 ** 3 else '100ms', 'distributed.worker.memory.recent-to-old-time': '3s', 'distributed.comm.compression': 'auto', 'distributed.comm.retry.count': 2, 'distributed.comm.timeouts.connect': '60s', 'distributed.comm.timeouts.tcp': '120s', 'distributed.comm.shard': '64MiB' if total_memory > 64 * 1024 ** 3 else '32MiB', 'distributed.comm.offload': '2GiB' if total_memory > 128 * 1024 ** 3 else '1GiB', 'distributed.scheduler.work-stealing': True, 'distributed.scheduler.work-stealing-interval': '5ms' if total_cores > 8 else '10ms', 'distributed.scheduler.bandwidth': 5000000000.0, 'distributed.scheduler.default-task-durations': {'rechunk-split': '1ms', 'rechunk-merge': '1ms', 'from-delayed': '1ms'}, 'distributed.worker.profile.enabled': False, 'distributed.worker.threads': min(4, max(2, total_cores // 4)), 'distributed.worker.memory.rebalance.measure': 'optimistic', 'distributed.worker.memory.rebalance.recipient-max': 0.8, 'distributed.worker.memory.rebalance.sender-min': 0.3, 'distributed.client.heartbeat': '10s', 'distributed.client.scheduler-info-interval': '5s', 'distributed.comm.zstd.level': 1, 'distributed.worker.use-file-locking': False}) def _get_optimal_worker_config(self, n_jobs=None, threads_per_worker=None, memory_limit=None, **kwargs): """Calculate optimal worker configuration for conversion speed.""" import psutil import math total_cores = psutil.cpu_count(logical=False) or psutil.cpu_count() or 4 total_memory = psutil.virtual_memory().total if threads_per_worker is None: threads_per_worker = min(8, max(2, total_cores // 2 or 1)) if n_jobs is None: n_workers = max(1, min((total_cores - 2) // max(1, threads_per_worker), 32)) else: n_workers = max(1, n_jobs) if memory_limit is None: reserved_memory = total_memory * 0.1 memory_per_worker = (total_memory - reserved_memory) / n_workers memory_limit = f'{memory_per_worker / 1024 ** 3:.1f}GB' if isinstance(memory_limit, str) and 'GB' in memory_limit: gb = float(memory_limit.replace('GB', '')) memory_limit = f'{max(2.0, gb)}GB' return {'n_workers': n_workers, 'threads_per_worker': threads_per_worker, 'memory_limit': memory_limit} def reset_config(self): """ Resets the cluster, conversion and downscale parameters to the installation defaults. """ self.config_gr.attrs.update(self.root_defaults) self.config = dict(self.config_gr.attrs) def reset_dask_config(self): """ Resets the dask configuration parameters to the installation defaults. """ self.config_gr['dask_config'].attrs.update(self.root_dask_defaults) self.dask_config = dict(self.config_gr['dask_config'].attrs) def show_config(self): """ Displays the current cluster, conversion, and downscale parameters. """ pprint.pprint(self.config) def show_dask_config(self): """ Displays the current dask.distributed parameters. """ pprint.pprint(self.dask_config) def show_root_defaults(self): """ Displays the installation defaults for cluster, conversion, and downscale parameters. """ pprint.pprint(self.root_defaults) def show_root_dask_defaults(self): """ Displays the installation defaults for dask.distributed. """ pprint.pprint(self.root_dask_defaults) def _collect_params(self, param_type, **kwargs): """ Gathers parameters from the configuration, allowing for overrides. Args: param_type (str): The type of parameters to collect (e.g., 'cluster', 'conversion', 'downscale'). **kwargs: Parameter values that may override defaults. Returns: dict: Collected parameters. """ params = {} for key in self.config[param_type].keys(): if key in kwargs.keys(): params[key] = kwargs[key] else: params[key] = self.config[param_type][key] return params def configure_cluster(self, memory_limit: str='default', n_jobs: int='default', no_worker_restart: bool='default', on_slurm: bool='default', temp_dir: str='default', threads_per_worker: int='default', no_distributed: bool='default', verbose: bool='default'): """ Updates cluster configuration settings. To update the current default value for a parameter, provide that parameter with a value other than 'default'. The following parameters can be configured: - memory_limit (str, optional): Memory limit per worker. - n_jobs (int, optional): Number of parallel jobs. - no_worker_restart (bool, optional): Whether to prevent worker restarts. - on_slurm (bool, optional): Whether running on a SLURM cluster. - temp_dir (str, optional): Temporary directory for Dask workers. - threads_per_worker (int, optional): Number of threads per worker. - verbose (bool, optional): Enables detailed logging. Args: memory_limit (str, optional): Memory limit per worker. n_jobs (int, optional): Number of parallel jobs. no_worker_restart (bool, optional): Whether to prevent worker restarts. on_slurm (bool, optional): Whether running on a SLURM cluster. temp_dir (str, optional): Temporary directory for Dask workers. threads_per_worker (int, optional): Number of threads per worker. verbose (bool, optional): Enables detailed logging. Returns: None """ params = {'memory_limit': memory_limit, 'n_jobs': n_jobs, 'no_worker_restart': no_worker_restart, 'on_slurm': on_slurm, 'temp_dir': temp_dir, 'threads_per_worker': threads_per_worker, 'no_distributed': no_distributed, 'verbose': verbose} for key in params: if key in self.config['cluster'].keys(): if params[key] != 'default': self.config['cluster'][key] = params[key] self.config_gr.attrs['cluster'] = self.config['cluster'] def configure_readers(self, as_mosaic: bool='default', view_index: int='default', phase_index: int='default', illumination_index: int='default', scene_index: int='default', rotation_index: int='default', mosaic_tile_index: int='default', sample_index: int='default', use_bioformats_readers: bool='default'): """ Updates reader configuration settings. To update the current default value for a parameter, provide that parameter with a value other than 'default'. Returns: None """ params = {'as_mosaic': as_mosaic, 'view_index': view_index, 'phase_index': phase_index, 'illumination_index': illumination_index, 'scene_index': scene_index, 'rotation_index': rotation_index, 'mosaic_tile_index': mosaic_tile_index, 'sample_index': sample_index, 'use_bioformats_readers': use_bioformats_readers} for key in params: if key in self.config['readers'].keys(): if params[key] != 'default': self.config['readers'][key] = params[key] self.config_gr.attrs['readers'] = self.config['readers'] def configure_conversion(self, zarr_format: int='default', auto_chunk: bool='default', target_chunk_mb: float='default', compressor: str='default', compressor_params: dict='default', time_chunk: int='default', channel_chunk: int='default', z_chunk: int='default', y_chunk: int='default', x_chunk: int='default', time_shard_coef: int='default', channel_shard_coef: int='default', z_shard_coef: int='default', y_shard_coef: int='default', x_shard_coef: int='default', time_range: int='default', channel_range: int='default', z_range: int='default', y_range: int='default', x_range: int='default', dimension_order: str='default', overwrite: bool='default', rechunk_method: str='default', trim_memory: bool='default', use_tensorstore: bool='default', use_gpu: bool='default', metadata_reader: str='default', save_omexml: bool='default', squeeze: bool='default', dtype: str='default'): """ Updates conversion configuration settings. To update the current default value for a parameter, provide that parameter with a value other than 'default'. The following parameters can be configured: - compressor (str, optional): Compression algorithm. - compressor_params (dict, optional): Parameters for the compressor. - output_chunks (list, optional): Chunk size for output. - overwrite (bool, optional): Whether to overwrite existing data. - rechunk_method (str, optional): Method used for rechunking. - trim_memory (bool, optional): Whether to trim memory usage. - use_tensorstore (bool, optional): Whether to use TensorStore for writing. - save_omexml (bool, optional): Whether to create a METADATA.ome.xml file. Args: compressor (str, optional): Compression algorithm. compressor_params (dict, optional): Parameters for the compressor. output_chunks (list, optional): Chunk size for output. overwrite (bool, optional): Whether to overwrite existing data. rechunk_method (str, optional): Method used for rechunking. trim_memory (bool, optional): Whether to trim memory usage. use_tensorstore (bool, optional): Whether to use TensorStore for storage. save_omexml (bool, optional): Whether to create a METADATA.ome.xml file. Returns: None """ params = {'zarr_format': zarr_format, 'auto_chunk': auto_chunk, 'target_chunk_mb': target_chunk_mb, 'compressor': compressor, 'compressor_params': compressor_params, 'time_chunk': time_chunk, 'channel_chunk': channel_chunk, 'z_chunk': z_chunk, 'y_chunk': y_chunk, 'x_chunk': x_chunk, 'time_shard_coef': time_shard_coef, 'channel_shard_coef': channel_shard_coef, 'z_shard_coef': z_shard_coef, 'y_shard_coef': y_shard_coef, 'x_shard_coef': x_shard_coef, 'time_range': time_range, 'channel_range': channel_range, 'z_range': z_range, 'y_range': y_range, 'x_range': x_range, 'dimension_order': dimension_order, 'overwrite': overwrite, 'rechunk_method': rechunk_method, 'trim_memory': trim_memory, 'use_tensorstore': use_tensorstore, 'use_gpu': use_gpu, 'metadata_reader': metadata_reader, 'save_omexml': save_omexml, 'squeeze': squeeze, 'dtype': dtype} for key in params: if key in self.config['conversion'].keys(): if params[key] != 'default': self.config['conversion'][key] = params[key] self.config_gr.attrs['conversion'] = self.config['conversion'] def configure_downscale(self, downscale_method: str='default', n_layers: int='default', min_dimension_size: int='default', time_scale_factor: int='default', channel_scale_factor: int='default', z_scale_factor: int='default', y_scale_factor: int='default', x_scale_factor: int='default'): """ Updates downscaling configuration settings. To update the current default value for a parameter, provide that parameter with a value other than 'default'. The following parameters can be configured: - downscale_method (str, optional): Downscaling algorithm. - n_layers (int, optional): Number of downscaling layers. - scale_factor (list, optional): Scaling factors for each dimension. Args: downscale_method (str, optional): Downscaling algorithm. n_layers (int, optional): Number of downscaling layers. scale_factor (list, optional): Scaling factors for each dimension. Returns: None """ params = {'downscale_method': downscale_method, 'n_layers': n_layers, 'min_dimension_size': min_dimension_size, 'time_scale_factor': time_scale_factor, 'channel_scale_factor': channel_scale_factor, 'z_scale_factor': z_scale_factor, 'y_scale_factor': y_scale_factor, 'x_scale_factor': x_scale_factor} for key in params: if key in self.config['downscale'].keys(): if params[key] != 'default': self.config['downscale'][key] = params[key] self.config_gr.attrs['downscale'] = self.config['downscale'] def _set_dask_temp_dir(self, temp_dir='auto'): if self._dask_temp_dir is not None: self._dask_temp_dir.cleanup() if temp_dir in ('auto', None): temp_dir = tempfile.TemporaryDirectory() else: os.makedirs(temp_dir, exist_ok=True) temp_dir = tempfile.TemporaryDirectory(dir=temp_dir) self._dask_temp_dir = temp_dir return self def _start_cluster(self, n_jobs: int=4, threads_per_worker: int=1, memory_limit: str='auto', temp_dir='auto', verbose=False, on_slurm=False, no_distributed=False, config_kwargs={}, **kwargs): config_dict = copy.deepcopy(self.dask_config) config_dict.update(**config_kwargs) scheduler_options = {'allowed_failures': 100, 'idle_timeout': '1h', 'worker_ttl': '1d'} self._set_dask_temp_dir(temp_dir) dask.config.set(config_dict) if no_distributed: config_dict.update(scheduler='threads', pool=ThreadPool(n_jobs)) dask.config.set(config_dict) logger.info(f'Process running locally via multithreading.') else: self._optimize_dask_config() if memory_limit == 'auto': reserve_fraction = kwargs.get('reserve_memory_fraction', 0.1) min_per_worker = kwargs.get('min_memory_per_worker', 1 * 1024 ** 3) total_mem = psutil.virtual_memory().total reserved_mem = total_mem * reserve_fraction available_mem = max(0, total_mem - reserved_mem) mem_per_worker = max(available_mem / n_jobs, min_per_worker) mem_gb = mem_per_worker / (1 * 1024 ** 3) memory_limit = f'{mem_gb} GB' logger.info(f'{memory_limit} memory has been allocated per worker.') if on_slurm: logger.info(f'Process running on Slurm.') cluster = SLURMCluster(cores=threads_per_worker, processes=1, nanny=False, scheduler_options=scheduler_options, n_workers=n_jobs, memory=memory_limit, local_directory=f'{self._dask_temp_dir.name}') else: logger.info(f'Process running on local cluster.') cluster = LocalCluster(n_workers=n_jobs, threads_per_worker=threads_per_worker, nanny=False, scheduler_kwargs=scheduler_options, memory_limit=memory_limit, local_directory=f'{self._dask_temp_dir.name}') cluster.scale(n_jobs) self.client = Client(cluster) if verbose: logger.info(self.client.cluster) return self def to_zarr(self, input_path: Union[Path, str], output_path: Union[Path, str], includes=None, excludes=None, time_tag: Union[str, tuple]=None, channel_tag: Union[str, tuple]=None, z_tag: Union[str, tuple]=None, y_tag: Union[str, tuple]=None, x_tag: Union[str, tuple]=None, concatenation_axes: Union[int, tuple, str]=None, **kwargs): """ Converts image data to OME-Zarr format and optionally applies downscaling. Args: input_path (Union[Path, str]): Path to input file or directory. output_path (Union[Path, str]): Directory, in which the output OME-Zarrs will be written. includes (str, optional): Filename patterns to filter for. excludes (str, optional): Filename patterns to filter against. time_tag (Union[str, tuple], optional): Time dimension tag. channel_tag (Union[str, tuple], optional): Channel dimension tag. z_tag (Union[str, tuple], optional): Z dimension tag. y_tag (Union[str, tuple], optional): Y dimension tag. x_tag (Union[str, tuple], optional): X dimension tag. concatenation_axes (Union[int, tuple, str], optional): Axes, along which the images will be concatenated. **kwargs: Additional configuration overrides. Raises: Exception: If no files are found in the input path. Prints: Process logs including conversion and downscaling time. Returns: None """ t0 = time.time() self.cluster_params = self._collect_params('cluster', **kwargs) self.readers_params = self._collect_params('readers', **kwargs) self.conversion_params = self._collect_params('conversion', **kwargs) self.downscale_params = self._collect_params('downscale', **kwargs) if self.conversion_params['use_gpu'] and self.conversion_params['use_tensorstore']: raise ValueError('Tensorstore is not supported for GPU arrays.') logger.info(f'Base conversion initiated.') paths = take_filepaths(input_path, includes=includes, excludes=excludes) filepaths = sorted(list(paths)) verified_for_cluster = verify_filepaths_for_cluster(filepaths) if not verified_for_cluster or self.readers_params['use_bioformats_readers']: self.cluster_params['no_distributed'] = True soft_start_jvm() cluster_is_true = not self.cluster_params['no_distributed'] if cluster_is_true: chunks_yx = None else: chunks_yx = tuple([self.conversion_params['y_chunk'], self.conversion_params['x_chunk']]) self._start_cluster(**self.cluster_params) series = self.readers_params['scene_index'] base = BridgeBase(input_path, excludes=excludes, includes=includes, series=series, zarr_format=self.conversion_params['zarr_format'], verbose=self.cluster_params['verbose']) base.read_dataset(verified_for_cluster=cluster_is_true, chunks_yx=chunks_yx, readers_params=self.readers_params) base.digest(time_tag=time_tag, channel_tag=channel_tag, z_tag=z_tag, y_tag=y_tag, x_tag=x_tag, axes_of_concatenation=concatenation_axes, **kwargs) logger.info(f'Metadata was extracted') verbose = base._verbose if 'region_shape' in kwargs.keys(): self.conversion_params['region_shape'] = kwargs.get('region_shape') if verbose: print(f'Cluster params:') pprint.pprint(self.cluster_params) print(f'Readers params:') pprint.pprint(self.readers_params) print(f'Conversion params:') pprint.pprint(self.conversion_params) print(f'Downscale params:') pprint.pprint(self.downscale_params) temp_dir = base._dask_temp_dir self.conversion_params['temp_dir'] = temp_dir self.downscale_params['temp_dir'] = temp_dir if self.client is not None: base.client = self.client base.set_dask_temp_dir(self._dask_temp_dir) self.base_results = base.write_arrays(output_path, compute=True, verbose=verbose, **self.conversion_params) logger.info(f'Base conversion finished.') t1 = time.time() logger.info(f'Elapsed for base conversion: {(t1 - t0) / 60} min.') n_layers = self.downscale_params['n_layers'] if n_layers in (None, 'default', 'auto') or n_layers > 1: logger.info(f'Downscaling initiated.') _ = downscale(self.base_results, **self.downscale_params, auto_chunk=kwargs.get('auto_chunk', self.conversion_params['auto_chunk']), target_chunk_mb=kwargs.get('target_chunk_mb', self.conversion_params['target_chunk_mb']), zarr_format=self.conversion_params['zarr_format'], rechunk_method=self.conversion_params['rechunk_method'], use_tensorstore=self.conversion_params['use_tensorstore'], compressor=self.conversion_params['compressor'], compressor_params=self.conversion_params['compressor_params'], verbose=verbose) logger.info(f'Downscaling finished.') if self.client is not None: self.client.shutdown() self.client.close() if isinstance(self._dask_temp_dir, tempfile.TemporaryDirectory): shutil.rmtree(self._dask_temp_dir.name) else: shutil.rmtree(self._dask_temp_dir) t1 = time.time() logger.info(f'Elapsed for conversion + downscaling: {(t1 - t0) / 60} min.') def show_pixel_meta(self, input_path: Union[Path, str], includes=None, excludes=None, series: int=None, **kwargs): """ Print pixel-level metadata for all datasets in the 'input_path'. Args: input_path (Union[Path, str]): Path to input file or directory. output_path (Union[Path, str]): Directory, in which the output OME-Zarrs will be written. includes (str, optional): Filename patterns to filter for. excludes (str, optional): Filename patterns to filter against. **kwargs: Additional configuration overrides. Raises: Exception: If no files are found in the input path. Prints: Process logs including conversion and downscaling time. Returns: None """ self.cluster_params = self._collect_params('cluster', **kwargs) self.readers_params = self._collect_params('readers', **kwargs) self.conversion_params = self._collect_params('conversion', **kwargs) paths = take_filepaths(input_path, includes=includes, excludes=excludes) filepaths = sorted(list(paths)) verified_for_cluster = verify_filepaths_for_cluster(filepaths) if not verified_for_cluster: self.cluster_params['no_distributed'] = True self._start_cluster(**self.cluster_params) series = self.readers_params['scene_index'] base = BridgeBase(input_path, excludes=excludes, includes=includes, series=series) base.read_dataset(verified_for_cluster, readers_params=self.readers_params) base.digest() temp_dir = base._dask_temp_dir self.conversion_params['temp_dir'] = temp_dir if self.client is not None: base.client = self.client base.set_dask_temp_dir(self._dask_temp_dir) printables = {path: get_printables(manager.axes, manager.shapedict, manager.scaledict, manager.unitdict) for path, manager in base.batchdata.managers.items()} for path, printable in printables.items(): print('---------') print(f'') print(f"Metadata for '{path}':") print_printable(printable) if self.client is not None: self.client.shutdown() self.client.close() if isinstance(self._dask_temp_dir, tempfile.TemporaryDirectory): shutil.rmtree(self._dask_temp_dir.name) else: shutil.rmtree(self._dask_temp_dir) def update_pixel_meta(self, input_path: Union[Path, str], includes=None, excludes=None, time_scale: (int, float)=None, z_scale: (int, float)=None, y_scale: (int, float)=None, x_scale: (int, float)=None, time_unit: str=None, z_unit: str=None, y_unit: str=None, x_unit: str=None, **kwargs): """ Updates pixel metadata for image files located at the specified input path. Args: input_path (Union[Path, str]): Path to input file or directory. includes (optional): Filename patterns to include. excludes (optional): Filename patterns to exclude. series (int, optional): Series index to process. time_scale, z_scale, y_scale, x_scale ((int, float), optional): Scaling factors for the respective dimensions. time_unit, z_unit, y_unit, x_unit (str, optional): Units for the respective dimensions. **kwargs: Additional parameters for cluster and conversion configuration. Returns: None """ self.cluster_params = self._collect_params('cluster', **kwargs) self.readers_params = self._collect_params('readers', **kwargs) self.conversion_params = self._collect_params('conversion', **kwargs) paths = take_filepaths(input_path, includes=includes, excludes=excludes) filepaths = sorted(list(paths)) verified_for_cluster = verify_filepaths_for_cluster(filepaths) if not verified_for_cluster: self.cluster_params['no_distributed'] = True cluster_is_true = not self.cluster_params['no_distributed'] self._start_cluster(**self.cluster_params) series = self.readers_params['scene_index'] base = BridgeBase(input_path, excludes=excludes, includes=includes, series=series) base.read_dataset(verified_for_cluster, readers_params=self.readers_params) pixel_meta_kwargs_ = dict(time_scale=time_scale, z_scale=z_scale, y_scale=y_scale, x_scale=x_scale, time_unit=time_unit, z_unit=z_unit, y_unit=y_unit, x_unit=x_unit) pixel_meta_kwargs = {key: val for key, val in pixel_meta_kwargs_.items() if val is not None} base.digest(**pixel_meta_kwargs) logger.info(f'Metadata was extracted') if self.client is not None: base.client = self.client base.set_dask_temp_dir(self._dask_temp_dir) for path, manager in base.batchdata.managers.items(): if is_zarr_group(manager.path): manager.sync_pyramid(self.conversion_params['save_omexml']) else: logger.info(f'Cannot update metadata for non-zarr path: {path}') if self.client is not None: self.client.shutdown() self.client.close() if isinstance(self._dask_temp_dir, tempfile.TemporaryDirectory): shutil.rmtree(self._dask_temp_dir.name) else: shutil.rmtree(self._dask_temp_dir)
class EuBIBridge: ''' EuBIBridge is a conversion tool for bioimage datasets, allowing for both unary and aggregative conversion of image data collections to OME-Zarr format. Attributes: config_gr (zarr.Group): Configuration settings stored in a Zarr group. config (dict): Dictionary representation of configuration settings for cluster, conversion, and downscaling. dask_config (dict): Dictionary representation of configuration settings for dask.distributed. root_defaults (dict): Installation defaults of configuration settings for cluster, conversion, and downscaling. root_dask_defaults (dict): Installation defaults of configuration settings for dask.distributed. ''' def __init__(self, configpath=f"{os.path.expanduser('~')}/.eubi_bridge"): ''' Initializes the EuBIBridge class and loads or sets up default configuration. Args: configpath (str, optional): Path to store configuration settings. Defaults to the home directory. ''' pass def _optimize_dask_config(self): '''Optimize Dask configuration for maximum conversion speed. This configuration is tuned for high-performance data processing with Dask, focusing on maximizing throughput while maintaining system stability. The settings are optimized for I/O and CPU-bound workloads. ''' pass def _get_optimal_worker_config(self, n_jobs=None, threads_per_worker=None, memory_limit=None, **kwargs): '''Calculate optimal worker configuration for conversion speed.''' pass def reset_config(self): ''' Resets the cluster, conversion and downscale parameters to the installation defaults. ''' pass def reset_dask_config(self): ''' Resets the dask configuration parameters to the installation defaults. ''' pass def show_config(self): ''' Displays the current cluster, conversion, and downscale parameters. ''' pass def show_dask_config(self): ''' Displays the current dask.distributed parameters. ''' pass def show_root_defaults(self): ''' Displays the installation defaults for cluster, conversion, and downscale parameters. ''' pass def show_root_dask_defaults(self): ''' Displays the installation defaults for dask.distributed. ''' pass def _collect_params(self, param_type, **kwargs): ''' Gathers parameters from the configuration, allowing for overrides. Args: param_type (str): The type of parameters to collect (e.g., 'cluster', 'conversion', 'downscale'). **kwargs: Parameter values that may override defaults. Returns: dict: Collected parameters. ''' pass def configure_cluster(self, memory_limit: str='default', n_jobs: int='default', no_worker_restart: bool='default', on_slurm: bool='default', temp_dir: str='default', threads_per_worker: int='default', no_distributed: bool='default', verbose: bool='default'): ''' Updates cluster configuration settings. To update the current default value for a parameter, provide that parameter with a value other than 'default'. The following parameters can be configured: - memory_limit (str, optional): Memory limit per worker. - n_jobs (int, optional): Number of parallel jobs. - no_worker_restart (bool, optional): Whether to prevent worker restarts. - on_slurm (bool, optional): Whether running on a SLURM cluster. - temp_dir (str, optional): Temporary directory for Dask workers. - threads_per_worker (int, optional): Number of threads per worker. - verbose (bool, optional): Enables detailed logging. Args: memory_limit (str, optional): Memory limit per worker. n_jobs (int, optional): Number of parallel jobs. no_worker_restart (bool, optional): Whether to prevent worker restarts. on_slurm (bool, optional): Whether running on a SLURM cluster. temp_dir (str, optional): Temporary directory for Dask workers. threads_per_worker (int, optional): Number of threads per worker. verbose (bool, optional): Enables detailed logging. Returns: None ''' pass def configure_readers(self, as_mosaic: bool='default', view_index: int='default', phase_index: int='default', illumination_index: int='default', scene_index: int='default', rotation_index: int='default', mosaic_tile_index: int='default', sample_index: int='default', use_bioformats_readers: bool='default'): ''' Updates reader configuration settings. To update the current default value for a parameter, provide that parameter with a value other than 'default'. Returns: None ''' pass def configure_conversion(self, zarr_format: int='default', auto_chunk: bool='default', target_chunk_mb: float='default', compressor: str='default', compressor_params: dict='default', time_chunk: int='default', channel_chunk: int='default', z_chunk: int='default', y_chunk: int='default', x_chunk: int='default', time_shard_coef: int='default', channel_shard_coef: int='default', z_shard_coef: int='default', y_shard_coef: int='default', x_shard_coef: int='default', time_range: int='default', channel_range: int='default', z_range: int='default', y_range: int='default', x_range: int='default', dimension_order: str='default', overwrite: bool='default', rechunk_method: str='default', trim_memory: bool='default', use_tensorstore: bool='default', use_gpu: bool='default', metadata_reader: str='default', save_omexml: bool='default', squeeze: bool='default', dtype: str='default'): ''' Updates conversion configuration settings. To update the current default value for a parameter, provide that parameter with a value other than 'default'. The following parameters can be configured: - compressor (str, optional): Compression algorithm. - compressor_params (dict, optional): Parameters for the compressor. - output_chunks (list, optional): Chunk size for output. - overwrite (bool, optional): Whether to overwrite existing data. - rechunk_method (str, optional): Method used for rechunking. - trim_memory (bool, optional): Whether to trim memory usage. - use_tensorstore (bool, optional): Whether to use TensorStore for writing. - save_omexml (bool, optional): Whether to create a METADATA.ome.xml file. Args: compressor (str, optional): Compression algorithm. compressor_params (dict, optional): Parameters for the compressor. output_chunks (list, optional): Chunk size for output. overwrite (bool, optional): Whether to overwrite existing data. rechunk_method (str, optional): Method used for rechunking. trim_memory (bool, optional): Whether to trim memory usage. use_tensorstore (bool, optional): Whether to use TensorStore for storage. save_omexml (bool, optional): Whether to create a METADATA.ome.xml file. Returns: None ''' pass def configure_downscale(self, downscale_method: str='default', n_layers: int='default', min_dimension_size: int='default', time_scale_factor: int='default', channel_scale_factor: int='default', z_scale_factor: int='default', y_scale_factor: int='default', x_scale_factor: int='default'): ''' Updates downscaling configuration settings. To update the current default value for a parameter, provide that parameter with a value other than 'default'. The following parameters can be configured: - downscale_method (str, optional): Downscaling algorithm. - n_layers (int, optional): Number of downscaling layers. - scale_factor (list, optional): Scaling factors for each dimension. Args: downscale_method (str, optional): Downscaling algorithm. n_layers (int, optional): Number of downscaling layers. scale_factor (list, optional): Scaling factors for each dimension. Returns: None ''' pass def _set_dask_temp_dir(self, temp_dir='auto'): pass def _start_cluster(self, n_jobs: int=4, threads_per_worker: int=1, memory_limit: str='auto', temp_dir='auto', verbose=False, on_slurm=False, no_distributed=False, config_kwargs={}, **kwargs): pass def to_zarr(self, input_path: Union[Path, str], output_path: Union[Path, str], includes=None, excludes=None, time_tag: Union[str, tuple]=None, channel_tag: Union[str, tuple]=None, z_tag: Union[str, tuple]=None, y_tag: Union[str, tuple]=None, x_tag: Union[str, tuple]=None, concatenation_axes: Union[int, tuple, str]=None, **kwargs): ''' Converts image data to OME-Zarr format and optionally applies downscaling. Args: input_path (Union[Path, str]): Path to input file or directory. output_path (Union[Path, str]): Directory, in which the output OME-Zarrs will be written. includes (str, optional): Filename patterns to filter for. excludes (str, optional): Filename patterns to filter against. time_tag (Union[str, tuple], optional): Time dimension tag. channel_tag (Union[str, tuple], optional): Channel dimension tag. z_tag (Union[str, tuple], optional): Z dimension tag. y_tag (Union[str, tuple], optional): Y dimension tag. x_tag (Union[str, tuple], optional): X dimension tag. concatenation_axes (Union[int, tuple, str], optional): Axes, along which the images will be concatenated. **kwargs: Additional configuration overrides. Raises: Exception: If no files are found in the input path. Prints: Process logs including conversion and downscaling time. Returns: None ''' pass def show_pixel_meta(self, input_path: Union[Path, str], includes=None, excludes=None, series: int=None, **kwargs): ''' Print pixel-level metadata for all datasets in the 'input_path'. Args: input_path (Union[Path, str]): Path to input file or directory. output_path (Union[Path, str]): Directory, in which the output OME-Zarrs will be written. includes (str, optional): Filename patterns to filter for. excludes (str, optional): Filename patterns to filter against. **kwargs: Additional configuration overrides. Raises: Exception: If no files are found in the input path. Prints: Process logs including conversion and downscaling time. Returns: None ''' pass def update_pixel_meta(self, input_path: Union[Path, str], includes=None, excludes=None, time_scale: (int, float)=None, z_scale: ''' Updates pixel metadata for image files located at the specified input path. Args: input_path (Union[Path, str]): Path to input file or directory. includes (optional): Filename patterns to include. excludes (optional): Filename patterns to exclude. series (int, optional): Series index to process. time_scale, z_scale, y_scale, x_scale ((int, float), optional): Scaling factors for the respective dimensions. time_unit, z_unit, y_unit, x_unit (str, optional): Units for the respective dimensions. **kwargs: Additional parameters for cluster and conversion configuration. Returns: None ''' pass
20
18
55
6
38
12
4
0.48
0
12
1
0
19
12
19
19
1,188
131
728
204
603
349
263
101
241
10
0
4
74
327,903
Euro-BioImaging/EuBI-Bridge
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/Euro-BioImaging_EuBI-Bridge/eubi_bridge/ebridge_base.py
Euro-BioImaging_EuBI-Bridge.eubi_bridge.ebridge_base.BridgeBase
from eubi_bridge.fileset_io import BatchFile import os import tempfile import dask from eubi_bridge.base.data_manager import BatchManager from eubi_bridge.base.readers import read_single_image_asarray from eubi_bridge.utils.convenience import take_filepaths from pathlib import Path from typing import Tuple, Union import dask.array as da from eubi_bridge.base.writers import store_arrays class BridgeBase: def __init__(self, input_path: Union[str, Path], includes=None, excludes=None, metadata_path=None, series=None, client=None, zarr_format=2, verbose=False): """ Initialize the BridgeBase class. This class is the main entry point for converting and processing image data. Args: input_path (Union[str, Path]): Path to the input file or directory. includes (optional): Patterns of filenames to include. excludes (optional): Patterns of filenames to exclude. metadata_path (optional): Path to metadata file if any. series (optional): Series index or name to process. client (optional): Dask client for parallel processing. zarr_format (int, optional): Zarr format version. Defaults to 2. verbose (bool, optional): Enable verbose logging. Defaults to False. """ if not input_path.startswith(os.sep): input_path = os.path.abspath(input_path) self._input_path = input_path self._includes = includes self._excludes = excludes self._metadata_path = metadata_path self._series = series self._dask_temp_dir = None self._zarr_format = zarr_format self._verbose = verbose self.vmeta = None self._cluster_params = None self.client = client self.fileset = None self.pixel_metadata = None self.fileset = None if self._series is not None: assert isinstance(self._series, (int, str)), 'The series parameter must be either an integer or string. Selection of multiple series from the same image is currently not supported.' def set_dask_temp_dir(self, temp_dir: Union[str, Path]='auto'): """ Set the temporary directory to store dask intermediate results. If the argument is 'auto' or None, the function will create a temporary directory in the system's default temporary directory and store the path in self._dask_temp_dir. If the argument is a string or Path, the function will create a temporary directory at the given path and store the path in self._dask_temp_dir. Parameters ---------- temp_dir : Union[str, Path], optional The name of the temporary directory to store the intermediate results. Defaults to 'auto'. """ if isinstance(temp_dir, tempfile.TemporaryDirectory): self._dask_temp_dir = temp_dir return if temp_dir in ('auto', None): temp_dir = tempfile.TemporaryDirectory(delete=False) elif isinstance(temp_dir, (str, Path)): os.makedirs(temp_dir, exist_ok=True) temp_dir = tempfile.TemporaryDirectory(dir=temp_dir, delete=False) else: raise TypeError(f'Invalid temp_dir argument: {temp_dir}') self._dask_temp_dir = temp_dir def read_dataset(self, verified_for_cluster, chunks_yx=None, readers_params={}): """ - If the input path is a directory, can read single or multiple files from it. - If the input path is a file, can read a single image from it. - If the input path is a file with multiple series, can currently only read one series from it. Reading multiple series is currently not supported. - If the input path is a csv file with filepaths and conversion parameters, can read the filepaths and conversion parameters from it. :return: """ input_path = self._input_path includes = self._includes excludes = self._excludes metadata_path = self._metadata_path series = self._series zarr_format = self._zarr_format verbose = self._verbose _input_is_csv = False if input_path.endswith('.csv'): _input_is_csv = True self.filepaths = take_filepaths(input_path, includes, excludes) if os.path.isfile(input_path) or input_path.endswith('.zarr'): dirname = os.path.dirname(input_path) basename = os.path.basename(input_path) input_path = f'{dirname}/*{basename}' self._input_path = input_path if not _input_is_csv: self.filepaths = take_filepaths(input_path, includes, excludes) if series is None or series == 0: try: readers_params.pop('scene_index') except: pass futures = [read_single_image_asarray(path, chunks_yx=chunks_yx, verified_for_cluster=verified_for_cluster, zarr_format=zarr_format, verbose=verbose, **readers_params) for path in self.filepaths] self.arrays = dask.compute(*futures) if metadata_path is None: self.metadata_path = self.filepaths[0] else: self.metadata_path = metadata_path def digest(self, time_tag: Union[str, tuple]=None, channel_tag: Union[str, tuple]=None, z_tag: Union[str, tuple]=None, y_tag: Union[str, tuple]=None, x_tag: Union[str, tuple]=None, axes_of_concatenation: Union[int, tuple, str]=None, metadata_reader: str='bfio', **kwargs): """ Digest the input data. This optionally involves concatenating multiple images along specified axes. Parameters ---------- time_tag : Union[str, tuple], optional The tag for the time axis. Defaults to None. channel_tag : Union[str, tuple], optional The tag for the channel axis. Defaults to None. z_tag : Union[str, tuple], optional The tag for the z axis. Defaults to None. y_tag : Union[str, tuple], optional The tag for the y axis. Defaults to None. x_tag : Union[str, tuple], optional The tag for the x axis. Defaults to None. axes_of_concatenation : Union[int, tuple, str], optional The axes to concatenate. Defaults to None. Examples -------- >>> # For a set of files with patterns in the file path in the following format: >>> # ["timepoint0_channel0_slice0.tif",...,"timepoint12_channel2_slice25.tif"] ] >>> # Concatenate only along specific axes: >>> bridge.digest( ... time_tag='timepoint', ... channel_tag='channel', ... z_tag='slice_', ... axes_of_concatenation='tz' # Concatenate only time and z dimensions ... ) """ axes = 'tczyx' tags = (time_tag, channel_tag, z_tag, y_tag, x_tag) self.batchfile = BatchFile(self.filepaths, arrays=self.arrays, axis_tag0=time_tag, axis_tag1=channel_tag, axis_tag2=z_tag, axis_tag3=y_tag, axis_tag4=x_tag) axdict = dict(zip(axes, tags)) if axes_of_concatenation is None: axes_of_concatenation = [] axlist = [axes.index(x) for x in axes_of_concatenation if x in axes] self.batchfile._construct_managers(axes=axlist, series=self._series, metadata_reader=metadata_reader, **kwargs) self.batchfile._construct_channel_managers(series=self._series, metadata_reader=metadata_reader, **kwargs) self.batchfile._complete_process(axlist) self.digested_arrays, self.digested_arrays_sample_paths, self.managers = self.batchfile.get_output_dicts(self._input_path) self._compute_pixel_metadata(**kwargs) return self def _compute_pixel_metadata(self, **kwargs): """Compute and update pixel metadata for the digested arrays. Args: series: Series identifier metadata_reader: Reader to use for metadata (default: 'bfio') **kwargs: Additional metadata including units and scales """ assert self.digested_arrays is not None assert self.digested_arrays_sample_paths is not None assert self.managers is not None unit_mapping = {'time_unit': 't', 'channel_unit': 'c', 'z_unit': 'z', 'y_unit': 'y', 'x_unit': 'x'} scale_mapping = {'time_scale': 't', 'channel_scale': 'c', 'z_scale': 'z', 'y_scale': 'y', 'x_scale': 'x'} update_unitdict = {unit_mapping[k]: v for k, v in kwargs.items() if k in unit_mapping} update_scaledict = {scale_mapping[k]: v for k, v in kwargs.items() if k in scale_mapping} for name, arr in self.digested_arrays.items(): path = self.digested_arrays_sample_paths[name] manager = self.managers[name] manager.set_arraydata(arr) manager.update_meta(new_unitdict=update_unitdict, new_scaledict=update_scaledict) self.batchdata = BatchManager(self.managers) self.batchdata.fill_default_meta() def squeeze_dataset(self): self.batchdata.squeeze() def transpose_dataset(self, dimension_order=Union[str, tuple, list]): """ Transpose the dataset according to the given dimension order. Parameters ---------- dimension_order : Union[str, tuple, list] The order of the dimensions in the transposed array. """ self.batchdata.transpose(newaxes=dimension_order) def crop_dataset(self, **kwargs): self.batchdata.crop(**kwargs) def to_cupy(self): self.batchdata.to_cupy() def _prepare_array_metadata(self, batch_manager, sample_path_mapping, autochunk=True): """Prepare metadata dictionaries for array storage. Args: batch_manager: BatchManager instance containing array data sample_path_mapping: Dictionary mapping array names to their file paths Returns: Tuple containing dictionaries for arrays, scales, axes, units, and chunks """ array_data = {} dimension_scales = {} dimension_axes = {} dimension_units = {} chunk_configs = {} channel_meta = {} for array_name, file_path in sample_path_mapping.items(): manager = batch_manager.managers[array_name] array_data[array_name] = {'0': manager.array} dimension_scales[array_name] = {'0': manager.scales} dimension_axes[array_name] = {'0': manager.axes} dimension_units[array_name] = {'0': manager.units} chunk_configs[array_name] = {'0': manager.chunks} channel_meta[array_name] = {'0': manager.channels} return (array_data, dimension_scales, dimension_axes, dimension_units, chunk_configs, channel_meta) def _create_output_path_mapping(self, output_dir, nested_data, sample_paths): """Create flattened path mappings for the given data dictionary. Args: output_dir: Base output directory nested_data: Nested dictionary of data to be flattened sample_paths: Dictionary mapping array names to their file paths Returns: Dictionary with output file paths as keys """ return {os.path.join(output_dir, f'{array_name}.zarr' if not array_name.endswith('zarr') else array_name, str(level)): value for array_name, subdict in nested_data.items() for level, value in subdict.items()} def _process_chunking_configurations(self, chunk_sizes, shard_coefficients, axis_mappings, chunk_mappings): """Process chunk and shard configurations for each array. Args: chunk_sizes: Dictionary of chunk sizes per dimension shard_coefficients: Dictionary of shard coefficients per dimension axis_mappings: Dictionary mapping output paths to their dimension axes chunk_mappings: Dictionary mapping output paths to their chunk configurations Returns: Tuple of (updated_chunk_sizes, updated_shard_coefficients) """ processed_chunk_sizes = {} processed_shard_coeffs = {} for output_path, chunk_config in chunk_mappings.items(): axes = axis_mappings[output_path] final_chunk_sizes = [] final_shard_coeffs = [] for axis in axes: chunk_size = chunk_sizes[axis] or chunk_config[axes.index(axis)] final_chunk_sizes.append(chunk_size) final_shard_coeffs.append(shard_coefficients[axis]) processed_chunk_sizes[output_path] = final_chunk_sizes processed_shard_coeffs[output_path] = final_shard_coeffs return (processed_chunk_sizes, processed_shard_coeffs) def write_arrays(self, output_dir, compute=True, use_tensorstore=False, rechunk_method='tasks', **kwargs): """Write processed arrays to storage. Args: output_dir: Base output directory compute: Whether to compute the result immediately use_tensorstore: Whether to use tensorstore for storage rechunk_method: Method to use for rechunking ('auto', 'rechunker', etc.) **kwargs: Additional arguments for array storage Returns: Results of the storage operation """ output_dir = os.path.abspath(output_dir) storage_options = kwargs.copy() if storage_options.get('use_gpu', False): self.to_cupy() if storage_options.get('squeeze', False): self.squeeze_dataset() if storage_options.get('dimension_order'): self.transpose_dataset(storage_options['dimension_order']) storage_options.update({'zarr_format': self._zarr_format, 'verbose': self._verbose}) self.crop_dataset(**storage_options) batch_manager = self.batchdata sample_path_mapping = self.digested_arrays_sample_paths assert batch_manager is not None, "The 'batchdata' must be computed before writing arrays" array_data, dimension_scales, dimension_axes, dimension_units, chunk_configs, channel_meta = self._prepare_array_metadata(batch_manager, sample_path_mapping) path_mappings = {'arrays': self._create_output_path_mapping(output_dir, array_data, sample_path_mapping), 'scales': self._create_output_path_mapping(output_dir, dimension_scales, sample_path_mapping), 'axes': self._create_output_path_mapping(output_dir, dimension_axes, sample_path_mapping), 'units': self._create_output_path_mapping(output_dir, dimension_units, sample_path_mapping), 'chunks': self._create_output_path_mapping(output_dir, chunk_configs, sample_path_mapping), 'channels': self._create_output_path_mapping(output_dir, channel_meta, sample_path_mapping)} chunk_sizes = {'t': storage_options.get('time_chunk'), 'c': storage_options.get('channel_chunk'), 'z': storage_options.get('z_chunk'), 'y': storage_options.get('y_chunk'), 'x': storage_options.get('x_chunk')} shard_coefficients = {'t': storage_options.get('time_shard_coef', 1), 'c': storage_options.get('channel_shard_coef', 1), 'z': storage_options.get('z_shard_coef', 3), 'y': storage_options.get('y_shard_coef', 3), 'x': storage_options.get('x_shard_coef', 3)} processed_chunk_sizes, processed_shard_coeffs = self._process_chunking_configurations(chunk_sizes, shard_coefficients, path_mappings['axes'], path_mappings['chunks']) storage_results = store_arrays(path_mappings['arrays'], output_dir, axes=path_mappings['axes'], scales=path_mappings['scales'], units=path_mappings['units'], output_chunks=processed_chunk_sizes, output_shard_coefficients=processed_shard_coeffs, use_tensorstore=use_tensorstore, compute=compute, rechunk_method=rechunk_method, channel_meta=path_mappings['channels'] or None, **storage_options) self.flatarrays = path_mappings['arrays'] if storage_options.get('save_omexml', False): manager_paths = {os.path.join(output_dir, f'{name}.zarr' if not name.endswith('zarr') else name): batch_manager.managers[name] for name, file_path in sample_path_mapping.items()} for output_path, manager in manager_paths.items(): if manager.omemeta is None: manager.create_omemeta() manager.save_omexml(output_path) return storage_results
class BridgeBase: def __init__(self, input_path: Union[str, Path], includes=None, excludes=None, metadata_path=None, series=None, client=None, zarr_format=2, verbose=False): ''' Initialize the BridgeBase class. This class is the main entry point for converting and processing image data. Args: input_path (Union[str, Path]): Path to the input file or directory. includes (optional): Patterns of filenames to include. excludes (optional): Patterns of filenames to exclude. metadata_path (optional): Path to metadata file if any. series (optional): Series index or name to process. client (optional): Dask client for parallel processing. zarr_format (int, optional): Zarr format version. Defaults to 2. verbose (bool, optional): Enable verbose logging. Defaults to False. ''' pass def set_dask_temp_dir(self, temp_dir: Union[str, Path]='auto'): ''' Set the temporary directory to store dask intermediate results. If the argument is 'auto' or None, the function will create a temporary directory in the system's default temporary directory and store the path in self._dask_temp_dir. If the argument is a string or Path, the function will create a temporary directory at the given path and store the path in self._dask_temp_dir. Parameters ---------- temp_dir : Union[str, Path], optional The name of the temporary directory to store the intermediate results. Defaults to 'auto'. ''' pass def read_dataset(self, verified_for_cluster, chunks_yx=None, readers_params={}): ''' - If the input path is a directory, can read single or multiple files from it. - If the input path is a file, can read a single image from it. - If the input path is a file with multiple series, can currently only read one series from it. Reading multiple series is currently not supported. - If the input path is a csv file with filepaths and conversion parameters, can read the filepaths and conversion parameters from it. :return: ''' pass def digest(self, time_tag: Union[str, tuple]=None, channel_tag: Union[str, tuple]=None, z_tag: Union[str, tuple]=None, y_tag: Union[str, tuple]=None, x_tag: Union[str, tuple]=None, axes_of_concatenation: Union[int, tuple, str]=None, metadata_reader: str='bfio', **kwargs): ''' Digest the input data. This optionally involves concatenating multiple images along specified axes. Parameters ---------- time_tag : Union[str, tuple], optional The tag for the time axis. Defaults to None. channel_tag : Union[str, tuple], optional The tag for the channel axis. Defaults to None. z_tag : Union[str, tuple], optional The tag for the z axis. Defaults to None. y_tag : Union[str, tuple], optional The tag for the y axis. Defaults to None. x_tag : Union[str, tuple], optional The tag for the x axis. Defaults to None. axes_of_concatenation : Union[int, tuple, str], optional The axes to concatenate. Defaults to None. Examples -------- >>> # For a set of files with patterns in the file path in the following format: >>> # ["timepoint0_channel0_slice0.tif",...,"timepoint12_channel2_slice25.tif"] ] >>> # Concatenate only along specific axes: >>> bridge.digest( ... time_tag='timepoint', ... channel_tag='channel', ... z_tag='slice_', ... axes_of_concatenation='tz' # Concatenate only time and z dimensions ... ) ''' pass def _compute_pixel_metadata(self, **kwargs): '''Compute and update pixel metadata for the digested arrays. Args: series: Series identifier metadata_reader: Reader to use for metadata (default: 'bfio') **kwargs: Additional metadata including units and scales ''' pass def squeeze_dataset(self): pass def transpose_dataset(self, dimension_order=Union[str, tuple, list]): ''' Transpose the dataset according to the given dimension order. Parameters ---------- dimension_order : Union[str, tuple, list] The order of the dimensions in the transposed array. ''' pass def crop_dataset(self, **kwargs): pass def to_cupy(self): pass def _prepare_array_metadata(self, batch_manager, sample_path_mapping, autochunk=True): '''Prepare metadata dictionaries for array storage. Args: batch_manager: BatchManager instance containing array data sample_path_mapping: Dictionary mapping array names to their file paths Returns: Tuple containing dictionaries for arrays, scales, axes, units, and chunks ''' pass def _create_output_path_mapping(self, output_dir, nested_data, sample_paths): '''Create flattened path mappings for the given data dictionary. Args: output_dir: Base output directory nested_data: Nested dictionary of data to be flattened sample_paths: Dictionary mapping array names to their file paths Returns: Dictionary with output file paths as keys ''' pass def _process_chunking_configurations(self, chunk_sizes, shard_coefficients, axis_mappings, chunk_mappings): '''Process chunk and shard configurations for each array. Args: chunk_sizes: Dictionary of chunk sizes per dimension shard_coefficients: Dictionary of shard coefficients per dimension axis_mappings: Dictionary mapping output paths to their dimension axes chunk_mappings: Dictionary mapping output paths to their chunk configurations Returns: Tuple of (updated_chunk_sizes, updated_shard_coefficients) ''' pass def write_arrays(self, output_dir, compute=True, use_tensorstore=False, rechunk_method='tasks', **kwargs): '''Write processed arrays to storage. Args: output_dir: Base output directory compute: Whether to compute the result immediately use_tensorstore: Whether to use tensorstore for storage rechunk_method: Method to use for rechunking ('auto', 'rechunker', etc.) **kwargs: Additional arguments for array storage Returns: Results of the storage operation ''' pass
14
10
37
4
22
10
3
0.45
0
11
2
0
13
22
13
13
494
73
293
134
237
133
156
83
142
8
0
3
37
327,904
Euro-BioImaging/EuBI-Bridge
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/Euro-BioImaging_EuBI-Bridge/eubi_bridge/fileset_io.py
Euro-BioImaging_EuBI-Bridge.eubi_bridge.fileset_io.BatchFile
from dask import delayed from natsort import natsorted import os import dask.array as da from eubi_bridge.base.data_manager import ArrayManager, ChannelIterator from typing import Dict, Iterable, List, Union import copy class BatchFile: def __init__(self, filepaths: Iterable[str], shapes: Iterable[tuple | list]=None, axis_tag0: Union[str, tuple]=None, axis_tag1: Union[str, tuple]=None, axis_tag2: Union[str, tuple]=None, axis_tag3: Union[str, tuple]=None, axis_tag4: Union[str, tuple]=None, arrays: Iterable[da.Array]=None): self.fileset = FileSet(filepaths, shapes=shapes, axis_tag0=axis_tag0, axis_tag1=axis_tag1, axis_tag2=axis_tag2, axis_tag3=axis_tag3, axis_tag4=axis_tag4, arrays=arrays) self.managers = None self.channel_managers = None def split_channel_groups(self): fileset = self.fileset sub_filesets = {} axis_tags = copy.deepcopy(fileset.axis_tags) if all([item is None for item in axis_tags]): groups = copy.deepcopy(fileset.path_dict) for key, value in groups.items(): groups[key] = [value] elif fileset.axis_tags[1] is None: groups = copy.deepcopy(fileset.group) else: axis_tags[1] = None groups = fileset._split_by(fileset.axis_tags[1]) return groups def _construct_managers(self, axes: Iterable[int]=[], series: int=None, metadata_reader: str='bfio', **kwargs): for axis in axes: self.fileset.concatenate_along(axis) arrays_ = self.fileset.get_concatenated_arrays() self.sample_paths = natsorted(arrays_.keys()) managers = {path: delayed(ArrayManager)(path, series=series, metadata_reader=metadata_reader, **kwargs) for path in self.sample_paths} self.managers = build_managers_dict(**managers).compute() return self.managers def _fuse_channels(self): channelsdict = {key: self.channel_managers[key].channels for key in natsorted(self.channel_managers.keys())} channelslist = [] for key in natsorted(channelsdict.keys()): channelslist.extend(channelsdict[key]) for path, manager in self.managers.items(): manager._channels = channelslist self.managers[path] = manager def _construct_channel_managers(self, series: int=None, metadata_reader: str='bfio', **kwargs): grs = self.split_channel_groups() self.channel_sample_paths = natsorted([grs[grname][0] for grname in grs]) managers = {path: delayed(ArrayManager)(path, series=series, metadata_reader=metadata_reader, **kwargs) for path in self.channel_sample_paths} self.channel_managers = build_managers_dict(**managers).compute() for path, manager in self.channel_managers.items(): manager._ensure_correct_channels() manager.fix_bad_channels() return self.channel_managers def _complete_process(self, axes: Iterable[int]=[]): if self.managers is None: raise ValueError('Managers have not been constructed in advance.') if self.channel_managers is None: raise ValueError('Channel managers have not been constructed in advance.') if 1 in axes: self._fuse_channels() self.channels_per_output = {manager.path: manager.channels for manager in self.managers.values()} def _update_nonunique_channel_colors(self, channels): colors = [channel['color'] for channel in channels] if len(set(colors)) < len(colors): chn = ChannelIterator(num_channels=len(colors)) for channel, _channel in zip(channels, chn._channels): channel['color'] = _channel['color'] return channels def get_output_dicts(self, root_path, path_separator: str='-'): fileset = self.fileset root_path_ = os.path.normpath(root_path).split(os.sep) root_path_top = [] for item in root_path_: if '*' in item: break root_path_top.append(item) if os.name == 'nt': drive, _ = os.path.splitdrive(root_path) root_path = os.path.join(drive + os.sep, *root_path_top) else: root_path = os.path.join(os.sep, *root_path_top) arrays_ = fileset.get_concatenated_arrays() arrays, channels, sample_paths, managers = ({}, {}, {}, {}) for key, vals in arrays_.items(): updated_key, arr = vals new_key = os.path.relpath(updated_key, root_path) new_key = os.path.splitext(new_key)[0] new_key = new_key.replace(os.sep, path_separator) arrays[new_key] = arrays_[key][1] sample_paths[new_key] = key self.channels_per_output[key] = self._update_nonunique_channel_colors(self.channels_per_output[key]) channels[new_key] = self.channels_per_output[key] managers[new_key] = self.managers[key] managers[new_key]._channels = self.channels_per_output[key] return (arrays, sample_paths, managers)
class BatchFile: def __init__(self, filepaths: Iterable[str], shapes: Iterable[tuple | list]=None, axis_tag0: Union[str, tuple]=None, axis_tag1: Union[str, tuple]=None, axis_tag2: Union[str, tuple]=None, axis_tag3: Union[str, tuple]=None, axis_tag4: Union[str, tuple]=None, arrays: Iterable[da.Array]=None): pass def split_channel_groups(self): pass def _construct_managers(self, axes: Iterable[int]=[], series: int=None, metadata_reader: str='bfio', **kwargs): pass def _fuse_channels(self): pass def _construct_channel_managers(self, series: int=None, metadata_reader: str='bfio', **kwargs): pass def _complete_process(self, axes: Iterable[int]=[]): pass def _update_nonunique_channel_colors(self, channels): pass def get_output_dicts(self, root_path, path_separator: str='-'): pass
9
0
20
2
18
1
3
0.06
0
10
3
0
8
6
8
8
174
25
142
67
108
8
83
42
74
5
0
2
24
327,905
Euro-BioImaging/EuBI-Bridge
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/Euro-BioImaging_EuBI-Bridge/eubi_bridge/fileset_io.py
Euro-BioImaging_EuBI-Bridge.eubi_bridge.fileset_io.FileSet
from natsort import natsorted import copy import numpy as np import dask.array as da from typing import Dict, Iterable, List, Union class FileSet: """ A class to manage file paths and their shapes for multi-dimensional data. This class handles file paths and their corresponding array data, supporting operations like concatenation along specified axes. It's designed to work with up to 5 dimensions (t, c, z, y, x). """ AXIS_DICT = {0: 't', 1: 'c', 2: 'z', 3: 'y', 4: 'x'} def __init__(self, filepaths: Iterable[str], shapes: Iterable[tuple | list]=None, axis_tag0: Union[str, tuple]=None, axis_tag1: Union[str, tuple]=None, axis_tag2: Union[str, tuple]=None, axis_tag3: Union[str, tuple]=None, axis_tag4: Union[str, tuple]=None, arrays: Iterable[da.Array]=None): """ Initialize the FileSet class. Args: filepaths: The file paths of the arrays. shapes: The shapes of the arrays. Required if arrays is not provided. axis_tag0-4: Tags for each axis (t, c, z, y, x). arrays: The arrays. If provided, shapes can be None. """ if shapes is None and arrays is None: raise ValueError('Either shapes or arrays must be provided.') if arrays is not None: self.array_dict = dict(zip(filepaths, arrays)) shapes = [arr.shape for arr in arrays] else: self.array_dict = None self.shape_dict = dict(zip(filepaths, shapes)) self.axis_tags = [axis_tag0, axis_tag1, axis_tag2, axis_tag3, axis_tag4] self.dimension_tags = [] self.specified_axes = [] for axis, tag in enumerate(self.axis_tags): if tag is not None: self.dimension_tags.append(tag) self.specified_axes.append(axis) self.group = {'': list(filepaths)} self.slice_dict = {path: tuple((slice(0, size) for size in shape)) for path, shape in self.shape_dict.items()} self.path_dict = dict(zip(filepaths, filepaths)) def get_numerics_per_dimension_tag(self, dimension_tag: str) -> List[str]: """ Extract numeric values from filepaths for a given dimension tag. Args: dimension_tag (str): The dimension tag to extract numerics for (e.g., 't' for time). Returns: list: List of numeric strings extracted from the filepaths. Example: >>> f = FileSet(['file_t0001_channel1.ome.tif', 'file_t0002_channel2.ome.tif']) >>> f.get_numerics_per_dimension_tag('t') ['0001', '0002'] """ filepaths = list(self.group.values())[0] matches = get_matches(f'{dimension_tag}\\d+', filepaths) spans = [match.string[match.start():match.end()] for match in matches] numerics = [get_numerics(span)[0] for span in spans] return numerics def _csplit_by(self, tup: tuple) -> dict: """ Split the filepaths in the group by the given dimension tags. Args: tup (tuple): A tuple of dimension tags to split by. Returns: dict: The split group as a dictionary. """ group = copy.deepcopy(self.group) for key, filepaths in group.items(): alpha_dict = {key: [] for key in tup} for tag in tup: matches = get_matches(f'{tag}', filepaths) spans = [match.string[match.start():match.end()] for match in matches] matched_paths = [match.string for match in matches] alpha = copy.deepcopy(spans) alpha_categories = np.unique(alpha).tolist() assert len(alpha_categories) == 1, f'Number of categories is not 1: {alpha_categories}' alpha_tag = alpha_categories[0] alpha_dict[alpha_tag] = matched_paths group = alpha_dict return group def _split_by(self, *args): """ Split the filepaths in the group by the given dimension tags. Args: *args (str): The dimension tags to split by. Returns: dict: The split group as a dictionary. """ group = copy.deepcopy(self.group) for dim in args: if dim not in self.dimension_tags: raise ValueError(f"The dimension '{dim}' is not among the given dimension_tags.") if isinstance(dim, (tuple, list)): group = self._csplit_by(dim) else: numeric_dict = {} for key, filepaths in group.items(): matches = get_matches(f'{dim}\\d+', filepaths) spans = [match.string[match.start():match.end()] for match in matches] spans = [span.replace(dim, '') for span in spans] numerics = [get_numerics(span)[0] for span in spans] numeric_categories = np.unique(numerics).tolist() for idx, num in enumerate(numerics): for i, category in enumerate(numeric_categories): if num == category: if key != '': tag_key = ''.join([key, '-', dim, num]) else: tag_key = ''.join([dim, num]) if not tag_key in numeric_dict: numeric_dict[tag_key] = [] numeric_dict[tag_key].append(filepaths[idx]) group = numeric_dict return group def concatenate_along(self, axis: int) -> dict: """ Concatenate arrays along the specified axis. Args: axis: The axis along which to concatenate the arrays. Returns: dict: The grouped file paths after concatenation. Raises: ValueError: If the axis is not among the given dimension tags. """ dimension_tag = self.axis_tags[axis] if dimension_tag not in self.dimension_tags: raise ValueError(f"The dimension '{dimension_tag}' is not among the given dimension_tags.") to_split = [tag for tag in self.dimension_tags if tag != dimension_tag] group = self._split_by(*to_split) for key, paths in group.items(): sorted_paths = natsorted(paths) group_slices = [self.slice_dict[path] for path in sorted_paths] group_shapes = [self.shape_dict[path] for path in sorted_paths] group_reduced_paths = [self.path_dict[path] for path in sorted_paths] new_slices = accumulate_slices_along_axis(group_shapes, axis, group_slices) new_shape = concatenate_shapes_along_axis(group_shapes, axis) new_reduced_path = reduce_paths_flexible(group_reduced_paths, dimension_tag, replace_with=f'_{self.AXIS_DICT[axis]}set') new_reduced_paths = [new_reduced_path] * len(group_reduced_paths) if self.array_dict is not None: group_arrays = [self.array_dict[path] for path in sorted_paths] new_array = da.concatenate(group_arrays, axis=axis) for path, slc, reduced_path in zip(sorted_paths, new_slices, new_reduced_paths): self.slice_dict[path] = slc self.shape_dict[path] = new_shape self.path_dict[path] = reduced_path if self.array_dict is not None: self.array_dict[path] = new_array return group def get_concatenated_arrays(self) -> Dict[str, tuple]: """ Get a dictionary of concatenated arrays with their metadata. Returns: dict: A dictionary where keys are input paths and values are tuples of (updated_path, array_data). """ unique_paths = [] unique_input_paths = [] unique_ids = [] for key in natsorted(self.path_dict): path = self.path_dict[key] if path not in unique_paths: unique_input_paths.append(key) unique_paths.append(path) unique_ids.append(key) unique_arrays = [self.array_dict[path] for path in unique_ids] return {key: (path, arr) for key, path, arr in zip(unique_input_paths, unique_paths, unique_arrays)}
class FileSet: ''' A class to manage file paths and their shapes for multi-dimensional data. This class handles file paths and their corresponding array data, supporting operations like concatenation along specified axes. It's designed to work with up to 5 dimensions (t, c, z, y, x). ''' def __init__(self, filepaths: Iterable[str], shapes: Iterable[tuple | list]=None, axis_tag0: Union[str, tuple]=None, axis_tag1: Union[str, tuple]=None, axis_tag2: Union[str, tuple]=None, axis_tag3: Union[str, tuple]=None, axis_tag4: Union[str, tuple]=None, arrays: Iterable[da.Array]=None): ''' Initialize the FileSet class. Args: filepaths: The file paths of the arrays. shapes: The shapes of the arrays. Required if arrays is not provided. axis_tag0-4: Tags for each axis (t, c, z, y, x). arrays: The arrays. If provided, shapes can be None. ''' pass def get_numerics_per_dimension_tag(self, dimension_tag: str) -> List[str]: ''' Extract numeric values from filepaths for a given dimension tag. Args: dimension_tag (str): The dimension tag to extract numerics for (e.g., 't' for time). Returns: list: List of numeric strings extracted from the filepaths. Example: >>> f = FileSet(['file_t0001_channel1.ome.tif', 'file_t0002_channel2.ome.tif']) >>> f.get_numerics_per_dimension_tag('t') ['0001', '0002'] ''' pass def _csplit_by(self, tup: tuple) -> dict: ''' Split the filepaths in the group by the given dimension tags. Args: tup (tuple): A tuple of dimension tags to split by. Returns: dict: The split group as a dictionary. ''' pass def _split_by(self, *args): ''' Split the filepaths in the group by the given dimension tags. Args: *args (str): The dimension tags to split by. Returns: dict: The split group as a dictionary. ''' pass def concatenate_along(self, axis: int) -> dict: ''' Concatenate arrays along the specified axis. Args: axis: The axis along which to concatenate the arrays. Returns: dict: The grouped file paths after concatenation. Raises: ValueError: If the axis is not among the given dimension tags. ''' pass def get_concatenated_arrays(self) -> Dict[str, tuple]: ''' Get a dictionary of concatenated arrays with their metadata. Returns: dict: A dictionary where keys are input paths and values are tuples of (updated_path, array_data). ''' pass
7
7
37
5
21
12
5
0.6
0
9
0
0
6
8
6
6
249
39
132
73
115
79
103
63
96
10
0
7
28
327,906
Euro-BioImaging/EuBI-Bridge
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/Euro-BioImaging_EuBI-Bridge/eubi_bridge/ngff/multiscales.py
Euro-BioImaging_EuBI-Bridge.eubi_bridge.ngff.multiscales.NGFFMetadataHandler
import zarr import copy import numpy as np from typing import Optional, Dict, List, Any, Tuple, Union, Iterable, ClassVar from pathlib import Path class NGFFMetadataHandler: """Class for handling NGFF metadata in zarr groups.""" SUPPORTED_VERSIONS: ClassVar[List[str]] = ['0.4', '0.5'] def __init__(self) -> None: """Initialize an empty metadata handler.""" self.zarr_group: Optional[zarr.Group] = None self.metadata: Optional[Dict[str, Any]] = None self._pending_changes: bool = False self.version: Optional[str] = None self.zarr_format: Optional[int] = None def __enter__(self) -> 'NGFFMetadataHandler': return self def __exit__(self, exc_type, exc_val, exc_tb) -> None: if self._pending_changes: self.save_changes() @property def multiscales(self) -> Dict[str, Any]: """Get the multiscales metadata.""" if not self.metadata or 'multiscales' not in self.metadata: raise RuntimeError('No multiscales metadata available') return self.metadata['multiscales'][0] @property def omero(self) -> Dict[str, Any]: """Get the multiscales metadata.""" if not self.metadata or 'omero' not in self.metadata: raise RuntimeError('No omero metadata available') return self.metadata['omero'] def _validate_version_and_format(self, version: str, zarr_format: int) -> None: """Validate version and zarr format compatibility.""" if version not in self.SUPPORTED_VERSIONS: raise ValueError(f'Unsupported version {version}. Supported versions: {self.SUPPORTED_VERSIONS}') if zarr_format not in (2, 3): raise ValueError(f'Unsupported Zarr format: {zarr_format}') if version == '0.5' and zarr_format != 3: raise ValueError('NGFF version 0.5 requires Zarr format 3') def _validate_axis_inputs(self, axis_order: str, units: Optional[List[str]]) -> None: """Validate axis order and units inputs.""" if not all((ax in 'tczyx' for ax in axis_order)): raise ValueError('Invalid axis order. Must contain only t,c,z,y,x') if units is not None: if not len(axis_order) - len(units) in [0, 1]: raise ValueError('Number of units must match number of axes except channel') elif len(axis_order) - len(units) == 1: if 'c' not in axis_order: raise ValueError('Only channel axis can be kept without a unit.') def _get_dataset(self, path: str) -> Optional[Dict[str, Any]]: """Helper method to find dataset by path.""" path = str(path) for dataset in self.multiscales['datasets']: if dataset['path'] == path: return dataset return None def _update_coordinate_transformation(self, dataset: Dict[str, Any], transform_type: str, values: List[float]) -> None: """Update or add a coordinate transformation.""" for transform in dataset['coordinateTransformations']: if transform['type'] == transform_type: transform[transform_type] = values break else: if transform_type == 'scale': dataset['coordinateTransformations'].insert(0, {'type': transform_type, transform_type: values}) else: dataset['coordinateTransformations'].append({'type': transform_type, transform_type: values}) def get_metadata_state(self) -> Dict[str, Any]: """Get a copy of current metadata state.""" if self.metadata is None: raise RuntimeError('No metadata loaded or created') return copy.deepcopy(self.metadata) def get_summary(self) -> Dict[str, Any]: """Get a summary of the metadata.""" if not self.metadata: raise RuntimeError('No metadata available') return {'version': self.version, 'zarr_format': self.zarr_format, 'axes': self._axis_names, 'units': self._units, 'n_datasets': len(self.multiscales['datasets']), 'name': self.multiscales['name']} def create_new(self, version: str='0.5', name: str='Series 0') -> 'NGFFMetadataHandler': """Create a new metadata handler with empty metadata of specified version.""" self._validate_version_and_format(version, 3 if version == '0.5' else 2) multiscale_metadata = {'name': name, 'axes': [], 'datasets': [], 'metadata': {}} if version == '0.5': self.metadata = {'version': version, 'multiscales': [multiscale_metadata], 'omero': {'channels': [], 'rdefs': {'defaultT': 0, 'model': 'greyscale', 'defaultZ': 0}}, '_creator': {'name': 'NGFFMetadataHandler', 'version': '1.0'}} else: multiscale_metadata['version'] = version self.metadata = {'_creator': {'name': 'NGFFMetadataHandler', 'version': '1.0'}, 'multiscales': [multiscale_metadata], 'omero': {'channels': [], 'rdefs': {'defaultT': 0, 'model': 'greyscale', 'defaultZ': 0}}} self.version = version self.zarr_format = 3 if version == '0.5' else 2 self._pending_changes = True return self def connect_to_group(self, store: Union[zarr.Group, str, Path], mode: str='a') -> None: """Connect to a zarr group for reading/writing metadata.""" if not isinstance(store, (zarr.Group, str, Path)): raise ValueError('Store must be a zarr group or path') if isinstance(store, zarr.Group): self.zarr_group = store elif is_zarr_group(store): self.zarr_group = zarr.open_group(store, mode=mode) else: zarr_version = self.zarr_format if self.zarr_format else 2 self.zarr_group = zarr.open_group(store, mode=mode, zarr_version=zarr_version) store_format = self.zarr_group.info._zarr_format self.zarr_format = store_format self.version = '0.5' if store_format == 3 else '0.4' self._validate_version_and_format(self.version, store_format) def read_metadata(self): """Read metadata from connected zarr group.""" if self.zarr_group is None: raise RuntimeError('No zarr group connected. Call connect_to_group first.') if 'ome' in self.zarr_group.attrs: self.metadata = self.zarr_group.attrs['ome'] self.version = self.metadata['version'] elif 'multiscales' in self.zarr_group.attrs: self.metadata = {'multiscales': self.zarr_group.attrs['multiscales']} self.version = self.metadata['multiscales'][0]['version'] else: raise ValueError('No valid metadata found in zarr group') if 'omero' in self.zarr_group.attrs: self.metadata['omero'] = self.zarr_group.attrs['omero'] self.zarr_format = 3 if self.version == '0.5' else 2 self._pending_changes = False return self def save_changes(self) -> None: """Save current metadata to connected zarr group.""" if not self._pending_changes: return if self.zarr_group is None: raise RuntimeError('No zarr group connected. Call connect_to_group first.') if self.metadata.get('version', '') == '0.5': self.zarr_group.attrs['ome'] = self.metadata else: self.zarr_group.attrs['multiscales'] = self.metadata['multiscales'] if 'omero' in self.metadata: self.zarr_group.attrs['omero'] = self.metadata['omero'] if '_creator' in self.metadata: self.zarr_group.attrs['_creator'] = self.metadata['_creator'] self._pending_changes = False def update_all_datasets(self, scale: Optional[List[float]]=None, translation: Optional[List[float]]=None) -> None: """Update all datasets with new scale and/or translation values.""" for dataset in self.multiscales['datasets']: if scale is not None: self._update_coordinate_transformation(dataset, 'scale', scale) if translation is not None: self._update_coordinate_transformation(dataset, 'translation', translation) self._pending_changes = True def autocompute_omerometa(self, n_channels: int, dtype) -> None: """Add multiple channels to the OMERO metadata.""" omero_meta = generate_channel_metadata(n_channels, dtype) self.metadata['omero'] = omero_meta['omero'] self._pending_changes = True def add_channel(self, color: str='808080', label: str=None, dtype=None) -> None: """Add a channel to the OMERO metadata.""" assert dtype is not None, f'dtype cannot be None' min = 0 if np.issubdtype(dtype, np.integer): max = int(np.iinfo(dtype).max) elif np.issubdtype(dtype, np.floating): max = float(np.finfo(dtype).max) else: raise ValueError(f'Unsupported dtype {dtype}') if 'omero' not in self.metadata: self.metadata['omero'] = {'channels': [], 'rdefs': {'defaultT': 0, 'model': 'greyscale', 'defaultZ': 0}} channel = {'color': color, 'coefficient': 1, 'active': True, 'label': label or f"Channel {len(self.metadata['omero']['channels'])}", 'window': {'min': min, 'max': max, 'start': min, 'end': max}, 'family': 'linear', 'inverted': False} self.metadata['omero']['channels'].append(channel) self._pending_changes = True def get_channels(self) -> List[Dict[str, Any]]: """ Get a list of all channels with their labels and colors. Returns: List[Dict[str, Any]]: A list of dictionaries, where each dictionary contains 'label' and 'color' keys for a channel. """ if 'omero' not in self.metadata or 'channels' not in self.metadata['omero']: return [] return [{'label': channel.get('label', f'Channel {i}'), 'color': channel.get('color', '808080')} for i, channel in enumerate(self.metadata['omero']['channels'])] def parse_axes(self, axis_order: str, units: Optional[List[str]]=None) -> None: """Update axes information with new axis order and units.""" if self.metadata is None: raise RuntimeError('No metadata loaded or created.') self._validate_axis_inputs(axis_order, units) if units is None: units = [None] * len(axis_order) if len(axis_order) - len(units) == 1: if 'c' in axis_order: idx = axis_order.index('c') units.insert(idx, None) new_axes = [] for ax_name, unit in zip(axis_order, units): axis_data = {'name': ax_name, 'type': {'t': 'time', 'c': 'channel', 'z': 'space', 'y': 'space', 'x': 'space'}.get(ax_name, 'custom')} if unit is not None: axis_data['unit'] = unit new_axes.append(axis_data) self.metadata['multiscales'][0]['axes'] = new_axes self._pending_changes = True def add_dataset(self, path: Union[str, int], scale: Iterable[Union[int, float]], translation: Optional[Iterable[Union[int, float]]]=None, overwrite: bool=False) -> None: """Add a dataset with scale and optional translation.""" path = str(path) scale = list(map(float, scale)) if translation is not None: translation = list(map(float, translation)) dataset_data = {'path': path, 'coordinateTransformations': [{'type': 'scale', 'scale': scale}]} if translation is not None: dataset_data['coordinateTransformations'].append({'type': 'translation', 'translation': translation}) existing_paths = self.get_resolution_paths() if path in existing_paths: if not overwrite: raise ValueError(f"Dataset path '{path}' already exists") idx = existing_paths.index(path) self.metadata['multiscales'][0]['datasets'][idx] = dataset_data else: self.metadata['multiscales'][0]['datasets'].append(dataset_data) self.metadata['multiscales'][0]['datasets'].sort(key=lambda x: int(x['path']) if x['path'].isdigit() else float('inf')) self._pending_changes = True def update_scale(self, path: Union[str, int], scale: Iterable[Union[int, float]]) -> None: """Update scale for a specific dataset.""" dataset = self._get_dataset(str(path)) if dataset: self._update_coordinate_transformation(dataset, 'scale', list(map(float, scale))) self._pending_changes = True def update_translation(self, path: Union[str, int], translation: Iterable[Union[int, float]]) -> None: """Update translation for a specific dataset.""" dataset = self._get_dataset(str(path)) if dataset: self._update_coordinate_transformation(dataset, 'translation', list(map(float, translation))) self._pending_changes = True def get_resolution_paths(self) -> List[str]: """Get paths to all resolution levels.""" return [ds['path'] for ds in self.multiscales['datasets']] @property def _axis_names(self) -> List[str]: """Get list of axis names.""" return [ax['name'] for ax in self.multiscales['axes']] @property def axis_order(self) -> str: """Get axis names as str.""" return ''.join(self._axis_names) @property def _units(self) -> Dict[str, Optional[str]]: """Get dictionary of axis units.""" return {ax['name']: ax.get('unit') for ax in self.multiscales['axes']} @property def unit_dict(self): return self._units @property def unit_list(self): return [self._units[ax] for ax in self._axis_names] @property def ndim(self) -> int: return len(self.axis_order) @property def resolution_paths(self) -> List[str]: return [item['path'] for item in self.multiscales['datasets']] @property def nlayers(self) -> int: return len(self.resolution_paths) @property def channels(self): return self.get_channels() def validate_metadata(self) -> bool: """Validate current metadata structure.""" if not self.metadata: return False try: if self.version == '0.5': if not all((key in self.metadata for key in {'version', 'multiscales'})): return False else: if 'multiscales' not in self.metadata: return False if 'version' not in self.metadata['multiscales'][0]: return False required_keys = {'name', 'axes', 'datasets'} return all((key in self.multiscales for key in required_keys)) except (KeyError, IndexError, TypeError): return False def get_scaledict(self, pth: Union[str, int]): idx = self.resolution_paths.index(pth) scale = self.multiscales['datasets'][idx]['coordinateTransformations'][0]['scale'] return dict(zip(self.axis_order, scale)) def get_base_scaledict(self): basepath = self.resolution_paths[0] return self.get_scaledict(basepath) def get_scale(self, pth: Union[str, int]): scaledict = self.get_scaledict(pth) return [scaledict[ax] for ax in self.axis_order] def get_base_scale(self): basepath = self.resolution_paths[0] return self.get_scale(basepath) def set_scale(self, pth: Union[str, int]='auto', scale: Union[tuple, list, dict]='auto'): if isinstance(scale, tuple): scale = list(scale) ch_index = self.axis_order.index('c') scale[ch_index] = 1 elif hasattr(scale, 'tolist'): scale = scale.tolist() elif isinstance(scale, dict): assert all([ax in self.axis_order for ax in scale]) fullscale = self.get_scale(pth) scaledict = dict(zip(self.axis_order, fullscale)) scaledict.update(**scale) scale = [scaledict[ax] for ax in self.axis_order] if pth == 'auto': pth = self.resolution_paths[0] if scale == 'auto': pth = self.scales[pth] idx = self.resolution_paths.index(pth) self.multiscales['datasets'][idx]['coordinateTransformations'][0]['scale'] = scale self._pending_changes = True return def update_scales(self, reference_scale: Union[tuple, list], scale_factors: dict): for pth, factor in scale_factors.items(): new_scale = np.multiply(factor, reference_scale) self.set_scale(pth, new_scale) return self def update_unitlist(self, unitlist=None): if isinstance(unitlist, tuple): unitlist = list(unitlist) assert isinstance(unitlist, list) self.parse_axes(self.axis_order, unitlist) return self @property def scales(self): scales = {} for pth in self.resolution_paths: scl = self.get_scale(pth) scales[pth] = scl return scales @property def scaledict(self): scales = {} for pth in self.resolution_paths: scl = self.get_scaledict(pth) scales[pth] = scl return scales def retag(self, new_tag: str): self.multiscales['name'] = new_tag self._pending_changes = True return self
class NGFFMetadataHandler: '''Class for handling NGFF metadata in zarr groups.''' def __init__(self) -> None: '''Initialize an empty metadata handler.''' pass def __enter__(self) -> 'NGFFMetadataHandler': pass def __exit__(self, exc_type, exc_val, exc_tb) -> None: pass @property def multiscales(self) -> Dict[str, Any]: '''Get the multiscales metadata.''' pass @property def omero(self) -> Dict[str, Any]: '''Get the multiscales metadata.''' pass def _validate_version_and_format(self, version: str, zarr_format: int) -> None: '''Validate version and zarr format compatibility.''' pass def _validate_axis_inputs(self, axis_order: str, units: Optional[List[str]]) -> None: '''Validate axis order and units inputs.''' pass def _get_dataset(self, path: str) -> Optional[Dict[str, Any]]: '''Helper method to find dataset by path.''' pass def _update_coordinate_transformation(self, dataset: Dict[str, Any], transform_type: str, values: List[float]) -> None: '''Update or add a coordinate transformation.''' pass def get_metadata_state(self) -> Dict[str, Any]: '''Get a copy of current metadata state.''' pass def get_summary(self) -> Dict[str, Any]: '''Get a summary of the metadata.''' pass def create_new(self, version: str='0.5', name: str='Series 0') -> 'NGFFMetadataHandler': '''Create a new metadata handler with empty metadata of specified version.''' pass def connect_to_group(self, store: Union[zarr.Group, str, Path], mode: str='a') -> None: '''Connect to a zarr group for reading/writing metadata.''' pass def read_metadata(self): '''Read metadata from connected zarr group.''' pass def save_changes(self) -> None: '''Save current metadata to connected zarr group.''' pass def update_all_datasets(self, scale: Optional[List[float]]=None, translation: Optional[List[float]]=None) -> None: '''Update all datasets with new scale and/or translation values.''' pass def autocompute_omerometa(self, n_channels: int, dtype) -> None: '''Add multiple channels to the OMERO metadata.''' pass def add_channel(self, color: str='808080', label: str=None, dtype=None) -> None: '''Add a channel to the OMERO metadata.''' pass def get_channels(self) -> List[Dict[str, Any]]: ''' Get a list of all channels with their labels and colors. Returns: List[Dict[str, Any]]: A list of dictionaries, where each dictionary contains 'label' and 'color' keys for a channel. ''' pass def parse_axes(self, axis_order: str, units: Optional[List[str]]=None) -> None: '''Update axes information with new axis order and units.''' pass def add_dataset(self, path: Union[str, int], scale: Iterable[Union[int, float]], translation: Optional[Iterable[Union[int, float]]]=None, overwrite: bool=False) -> None: '''Add a dataset with scale and optional translation.''' pass def update_scale(self, path: Union[str, int], scale: Iterable[Union[int, float]]) -> None: '''Update scale for a specific dataset.''' pass def update_translation(self, path: Union[str, int], translation: Iterable[Union[int, float]]) -> None: '''Update translation for a specific dataset.''' pass def get_resolution_paths(self) -> List[str]: '''Get paths to all resolution levels.''' pass @property def _axis_names(self) -> List[str]: '''Get list of axis names.''' pass @property def axis_order(self) -> str: '''Get axis names as str.''' pass @property def _units(self) -> Dict[str, Optional[str]]: '''Get dictionary of axis units.''' pass @property def unit_dict(self): pass @property def unit_list(self): pass @property def ndim(self) -> int: pass @property def resolution_paths(self) -> List[str]: pass @property def nlayers(self) -> int: pass @property def channels(self): pass def validate_metadata(self) -> bool: '''Validate current metadata structure.''' pass def get_scaledict(self, pth: Union[str, int]): pass def get_base_scaledict(self): pass def get_scaledict(self, pth: Union[str, int]): pass def get_base_scaledict(self): pass def set_scale(self, pth: Union[str, int]='auto', scale: Union[tuple, list, dict]='auto'): pass def update_scales(self, reference_scale: Union[tuple, list], scale_factors: dict): pass def update_unitlist(self, unitlist=None): pass @property def scales(self): pass @property def scaledict(self): pass def retag(self, new_tag: str): pass
58
27
11
1
9
1
3
0.13
0
19
0
0
44
5
44
44
530
71
415
133
323
52
276
86
231
7
0
3
114
327,907
Euro-BioImaging/EuBI-Bridge
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/Euro-BioImaging_EuBI-Bridge/eubi_bridge/ngff/multiscales.py
Euro-BioImaging_EuBI-Bridge.eubi_bridge.ngff.multiscales.Pyramid
import numpy as np import dask.array as da from eubi_bridge.base.scale import Downscaler import zarr from pathlib import Path class Pyramid: def __init__(self, gr: (zarr.Group, zarr.storage.StoreLike, Path, str)=None): self.meta = None self.gr = None if gr is not None: self.from_ngff(gr) def __repr__(self): return f'NGFF with {self.nlayers} layers.' def from_ngff(self, gr): self.meta = NGFFMetadataHandler() self.meta.connect_to_group(gr) self.meta.read_metadata() self.gr = self.meta.zarr_group return self def to_ngff(self, store: (zarr.Group, zarr.storage.StoreLike, Path, str), version: str='0.5'): newmeta = NGFFMetadataHandler() if is_zarr_group(store): self.gr = zarr.open_group(store, mode='a') newmeta.connect_to_group(self.gr) else: self.meta.create_new(version=version, name='Series 0') newmeta.save_changes() self.meta = newmeta return self @property def axes(self): return self.meta.axis_order @property def nlayers(self): return self.meta.nlayers @property def layers(self): return {path: self.gr[path] for path in self.gr.array_keys()} def get_dask_data(self): return {str(path): da.from_zarr(self.layers[path]) for path in self.gr.array_keys()} @property def dask_arrays(self): return self.get_dask_data() @property def base_array(self): return self.dask_arrays['0'] def update_scales(self, **kwargs): """ Automatically updates all pixel values for all layers based on provided pixel values for specific axes corresponding to the top resolution layer. :param kwargs: :return: """ hard = kwargs.get('hard', False) new_scaledict = self.meta.get_base_scaledict() for ax in self.meta.axis_order: if ax in kwargs: new_scaledict[ax] = kwargs.get(ax) new_scale = [new_scaledict[ax] for ax in self.meta.axis_order] shapes = [self.layers[key].shape for key in self.meta.resolution_paths] scale_factors = np.divide(shapes[0], shapes) scale_factordict = {pth: scale for pth, scale in zip(self.meta.resolution_paths, scale_factors.tolist())} self.meta.update_scales(reference_scale=new_scale, scale_factors=scale_factordict) if hard: self.meta.save_changes() return def update_units(self, **kwargs): """ Automatically updates all pixel units based on provided unit strings for each axis. :param kwargs: :return: """ hard = kwargs.get('hard', False) new_unitdict = self.meta.unit_dict for ax in self.meta.axis_order: if ax in kwargs: new_unitdict[ax] = kwargs.get(ax) new_unitlist = [new_unitdict[ax] for ax in self.meta.axis_order] self.meta.update_unitlist(unitlist=new_unitlist) @property def tag(self): return self.multimeta[0]['name'] def retag(self, new_tag: str, hard=False): self.meta.retag(new_tag) if hard: self.meta.save_changes() return self def update_downscaler(self, scale_factor=None, n_layers=1, downscale_method='simple', backend='numpy', **kwargs): min_dimension_size = kwargs.get('min_dimension_size', 64) darr = self.base_array shape = darr.shape if n_layers in (None, 'default', 'auto'): n_layers = calculate_n_layers(shape, scale_factor, min_dimension_size) if scale_factor is None: scale_factor = tuple([defaults.scale_factor_map[key] for key in self.axes]) scale = self.meta.scales['0'] scale_factor = tuple(np.minimum(darr.shape, scale_factor)) self.downscaler = Downscaler(array=darr, scale_factor=scale_factor, n_layers=n_layers, scale=scale, downscale_method=downscale_method, backend=backend) return self
class Pyramid: def __init__(self, gr: (zarr.Group, zarr.storage.StoreLike, Path, str)=None): pass def __repr__(self): pass def from_ngff(self, gr): pass def to_ngff(self, store: (zarr.Group, zarr.storage.StoreLike, Path, str), version: pass @property def axes(self): pass @property def nlayers(self): pass @property def layers(self): pass def get_dask_data(self): pass @property def dask_arrays(self): pass @property def base_array(self): pass def update_scales(self, **kwargs): ''' Automatically updates all pixel values for all layers based on provided pixel values for specific axes corresponding to the top resolution layer. :param kwargs: :return: ''' pass def update_units(self, **kwargs): ''' Automatically updates all pixel units based on provided unit strings for each axis. :param kwargs: :return: ''' pass @property def tag(self): pass def retag(self, new_tag: str, hard=False): pass def update_downscaler(self, scale_factor=None, n_layers=1, downscale_method='simple', backend='numpy', **kwargs): pass
22
2
8
0
7
1
2
0.29
0
6
2
0
15
3
15
15
162
18
112
59
72
32
76
35
60
4
0
2
25
327,908
Euro-BioImaging/EuBI-Bridge
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/Euro-BioImaging_EuBI-Bridge/eubi_bridge/utils/dask_client_plugins.py
Euro-BioImaging_EuBI-Bridge.eubi_bridge.utils.dask_client_plugins.ForcefulShutdownPlugin
from distributed.diagnostics.plugin import SchedulerPlugin from distributed.diagnostics.plugin import SchedulerPlugin class ForcefulShutdownPlugin(SchedulerPlugin): def start(self, scheduler): for idx, worker in dask_scheduler.workers.items(): print(f'Worker shutting down: {idx}') scheduler.remove_worker(worker.address, stimulus_id='shutdown')
class ForcefulShutdownPlugin(SchedulerPlugin): def start(self, scheduler): pass
2
0
4
0
4
0
2
0
1
0
0
0
1
0
1
1
5
0
5
3
3
0
5
3
3
2
1
1
2
327,909
Euro-BioImaging/EuBI-Bridge
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/Euro-BioImaging_EuBI-Bridge/eubi_bridge/utils/dask_client_plugins.py
Euro-BioImaging_EuBI-Bridge.eubi_bridge.utils.dask_client_plugins.ShutdownOnWorkerRemovalPlugin
import asyncio from distributed.diagnostics.plugin import SchedulerPlugin from distributed.diagnostics.plugin import SchedulerPlugin class ShutdownOnWorkerRemovalPlugin(SchedulerPlugin): async def remove_worker(self, scheduler, worker, **kwargs): for worker in scheduler.workers.values(): await scheduler.remove_worker(worker.address, stimulus_id='shutdown') print(f'Worker {worker} has been removed. Shutting down process.') await asyncio.sleep(5) await scheduler.close()
class ShutdownOnWorkerRemovalPlugin(SchedulerPlugin): async def remove_worker(self, scheduler, worker, **kwargs): pass
2
0
6
0
6
0
2
0
1
0
0
0
1
0
1
1
7
0
7
2
5
0
7
2
5
2
1
1
2
327,910
Euro-BioImaging/EuBI-Bridge
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/Euro-BioImaging_EuBI-Bridge/eubi_bridge/utils/dask_client_plugins.py
Euro-BioImaging_EuBI-Bridge.eubi_bridge.utils.dask_client_plugins.ShutdownPlugin
import asyncio from distributed.diagnostics.plugin import SchedulerPlugin from distributed.diagnostics.plugin import SchedulerPlugin class ShutdownPlugin(SchedulerPlugin): def __init__(self, output_folder, timeout=60, verbose=False): self.output_folder = output_folder self.timeout = timeout self.verbose = verbose def start(self, scheduler): asyncio.create_task(shutdown_callback(scheduler, self.output_folder, self.timeout, self.verbose))
class ShutdownPlugin(SchedulerPlugin): def __init__(self, output_folder, timeout=60, verbose=False): pass def start(self, scheduler): pass
3
0
3
0
3
0
1
0
1
0
0
0
2
3
2
2
8
1
7
6
4
0
7
6
4
1
1
0
2
327,911
gazorby/strawchemy
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/config/base.py
strawchemy.config.base.StrawchemyConfig
from typing import TYPE_CHECKING, Optional from strawchemy.strawberry.repository import StrawchemySyncRepository from strawchemy.sqlalchemy.inspector import SQLAlchemyGraphQLInspector from strawchemy.strawberry import default_session_getter from dataclasses import dataclass, field @dataclass class StrawchemyConfig: """Global configuration for Strawchemy. Attributes: dialect: The SQLAlchemy dialect being used. session_getter: Function to retrieve SQLAlchemy session from strawberry `Info` object. auto_snake_case: Automatically convert snake cased names to camel case. repository_type: Repository class to use for auto resolvers. filter_overrides: Override default filters with custom filters. execution_options: SQLAlchemy execution options for repository operations. pagination_default_limit: Default pagination limit when `pagination=True`. pagination: Enable/disable pagination on list resolvers. default_id_field_name: Name for primary key fields arguments on primary key resolvers. deterministic_ordering: Force deterministic ordering for list resolvers. inspector: The SQLAlchemyGraphQLInspector instance. """ dialect: SupportedDialect session_getter: AnySessionGetter = default_session_getter 'Function to retrieve SQLAlchemy session from strawberry `Info` object.' auto_snake_case: bool = True 'Automatically convert snake cased names to camel case' repository_type: AnyRepository = StrawchemySyncRepository 'Repository class to use for auto resolvers.' filter_overrides: Optional[FilterMap] = None 'Override default filters with custom filters.' execution_options: Optional[dict[str, Any]] = None 'SQLAlchemy execution options for repository operations.' pagination_default_limit: int = 100 'Default pagination limit when `pagination=True`.' pagination: bool = False 'Enable/disable pagination on list resolvers.' default_id_field_name: str = 'id' 'Name for primary key fields arguments on primary key resolvers.' deterministic_ordering: bool = True 'Force deterministic ordering for list resolvers.' inspector: SQLAlchemyGraphQLInspector = field(init=False) def __post_init__(self) -> None: """Initializes the SQLAlchemyGraphQLInspector after the dataclass is created.""" self.inspector = SQLAlchemyGraphQLInspector(self.dialect, filter_overrides=self.filter_overrides)
@dataclass class StrawchemyConfig: '''Global configuration for Strawchemy. Attributes: dialect: The SQLAlchemy dialect being used. session_getter: Function to retrieve SQLAlchemy session from strawberry `Info` object. auto_snake_case: Automatically convert snake cased names to camel case. repository_type: Repository class to use for auto resolvers. filter_overrides: Override default filters with custom filters. execution_options: SQLAlchemy execution options for repository operations. pagination_default_limit: Default pagination limit when `pagination=True`. pagination: Enable/disable pagination on list resolvers. default_id_field_name: Name for primary key fields arguments on primary key resolvers. deterministic_ordering: Force deterministic ordering for list resolvers. inspector: The SQLAlchemyGraphQLInspector instance. ''' def __post_init__(self) -> None: '''Initializes the SQLAlchemyGraphQLInspector after the dataclass is created.''' pass
3
2
3
0
2
1
1
1.71
0
1
1
0
1
0
1
1
42
4
14
12
12
24
14
12
12
1
0
0
1
327,912
gazorby/strawchemy
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/config/databases.py
strawchemy.config.databases.DatabaseFeatures
from dataclasses import dataclass, field from typing import TYPE_CHECKING, Protocol from strawchemy.exceptions import StrawchemyError @dataclass(frozen=True) class DatabaseFeatures(Protocol): """Defines a protocol for database-specific features. Attributes: dialect: The SQL dialect supported by these features. supports_lateral: Whether the database supports LATERAL joins. supports_distinct_on: Whether the database supports DISTINCT ON. supports_json: Whether the database supports JSON operations. supports_null_ordering: Whether the database supports NULLS FIRST/LAST ordering. aggregation_functions: A set of supported aggregation function names. """ dialect: SupportedDialect supports_lateral: bool = False supports_distinct_on: bool = False supports_json: bool = True supports_null_ordering: bool = False aggregation_functions: set[AggregationFunction] = field(default_factory=lambda: {'min', 'max', 'sum', 'avg', 'count', 'stddev_samp', 'stddev_pop', 'var_samp', 'var_pop'}) @classmethod def new(cls, dialect: SupportedDialect) -> DatabaseFeatures: """Factory method to create a DatabaseFeatures instance for the given dialect. Args: dialect: The SQL dialect. Returns: A DatabaseFeatures instance for the specified dialect. Raises: StrawchemyError: If the dialect is unsupported. """ if dialect == 'postgresql': return PostgresFeatures() if dialect == 'mysql': return MySQLFeatures() if dialect == 'sqlite': return SQLiteFeatures() msg = 'Unsupported dialect' raise StrawchemyError(msg)
@dataclass(frozen=True) class DatabaseFeatures(Protocol): '''Defines a protocol for database-specific features. Attributes: dialect: The SQL dialect supported by these features. supports_lateral: Whether the database supports LATERAL joins. supports_distinct_on: Whether the database supports DISTINCT ON. supports_json: Whether the database supports JSON operations. supports_null_ordering: Whether the database supports NULLS FIRST/LAST ordering. aggregation_functions: A set of supported aggregation function names. ''' @classmethod def new(cls, dialect: SupportedDialect) -> DatabaseFeatures: '''Factory method to create a DatabaseFeatures instance for the given dialect. Args: dialect: The SQL dialect. Returns: A DatabaseFeatures instance for the specified dialect. Raises: StrawchemyError: If the dialect is unsupported. ''' pass
4
2
20
3
9
8
4
0.59
1
4
4
3
0
0
1
25
52
6
29
9
26
17
16
8
14
4
5
1
4
327,913
gazorby/strawchemy
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/config/databases.py
strawchemy.config.databases.MySQLFeatures
from dataclasses import dataclass, field @dataclass(frozen=True) class MySQLFeatures(DatabaseFeatures): """Database features specific to MySQL.""" dialect: SupportedDialect = 'mysql'
@dataclass(frozen=True) class MySQLFeatures(DatabaseFeatures): '''Database features specific to MySQL.''' pass
2
1
0
0
0
0
0
0.5
1
0
0
0
0
0
0
25
4
1
2
2
1
1
2
2
1
0
6
0
0
327,914
gazorby/strawchemy
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/config/databases.py
strawchemy.config.databases.PostgresFeatures
from dataclasses import dataclass, field @dataclass(frozen=True) class PostgresFeatures(DatabaseFeatures): """Database features specific to PostgreSQL.""" dialect: SupportedDialect = 'postgresql' supports_distinct_on: bool = True supports_lateral: bool = True supports_null_ordering: bool = True
@dataclass(frozen=True) class PostgresFeatures(DatabaseFeatures): '''Database features specific to PostgreSQL.''' pass
2
1
0
0
0
0
0
0.2
1
0
0
0
0
0
0
25
7
1
5
5
4
1
5
5
4
0
6
0
0
327,915
gazorby/strawchemy
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/config/databases.py
strawchemy.config.databases.SQLiteFeatures
from dataclasses import dataclass, field @dataclass(frozen=True) class SQLiteFeatures(DatabaseFeatures): """Database features specific to SQLite.""" dialect: SupportedDialect = 'sqlite' aggregation_functions: set[AggregationFunction] = field(default_factory=lambda: {'min', 'max', 'sum', 'avg', 'count'})
@dataclass(frozen=True) class SQLiteFeatures(DatabaseFeatures): '''Database features specific to SQLite.''' pass
2
1
0
0
0
0
0
0.2
1
0
0
0
0
0
0
25
7
1
5
3
4
1
3
3
2
0
6
0
0
327,916
gazorby/strawchemy
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/dto/backend/pydantic.py
strawchemy.dto.backend.pydantic.MappedPydanticDTO
from strawchemy.dto.base import DTOBackend, DTOBase, DTOFieldDefinition, MappedDTO, ModelFieldT, ModelT class MappedPydanticDTO(_PydanticDTOBase, MappedDTO[ModelT]): ...
class MappedPydanticDTO(_PydanticDTOBase, MappedDTO[ModelT]): pass
1
0
0
0
0
0
0
0
2
0
0
1
0
0
0
85
1
0
1
1
1
0
2
1
1
0
6
0
0
327,917
gazorby/strawchemy
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/dto/backend/pydantic.py
strawchemy.dto.backend.pydantic.PydanticDTO
from strawchemy.dto.base import DTOBackend, DTOBase, DTOFieldDefinition, MappedDTO, ModelFieldT, ModelT class PydanticDTO(_PydanticDTOBase, DTOBase[ModelT]): ...
class PydanticDTO(_PydanticDTOBase, DTOBase[ModelT]): pass
1
0
0
0
0
0
0
0
2
0
0
0
0
0
0
84
1
0
1
1
1
0
2
1
1
0
6
0
0
327,918
gazorby/strawchemy
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/dto/backend/pydantic.py
strawchemy.dto.backend.pydantic.PydanticDTOBackend
from pydantic import BaseModel, BeforeValidator, ConfigDict, create_model from strawchemy.utils import get_annotations from strawchemy.dto.base import DTOBackend, DTOBase, DTOFieldDefinition, MappedDTO, ModelFieldT, ModelT from inspect import getmodule from strawchemy.dto.types import DTOMissing from pydantic.fields import Field, FieldInfo from typing import TYPE_CHECKING, Annotated, Any, Optional, TypeVar from typing_extensions import override class PydanticDTOBackend(DTOBackend[PydanticDTOT]): """Implements DTO factory using pydantic.""" def __init__(self, dto_base: type[PydanticDTOT]) -> None: self.dto_base = dto_base def _construct_field_info(self, field_def: DTOFieldDefinition[ModelT, ModelFieldT]) -> FieldInfo: """Build a `FieldInfo instance reflecting the given field_def.""" kwargs: dict[str, Any] = {} if field_def.required: kwargs['default'] = ... elif field_def.default_factory is not DTOMissing: kwargs['default_factory'] = field_def.default_factory elif field_def.default is not DTOMissing: kwargs['default'] = field_def.default if field_def.purpose_config.alias: kwargs['alias'] = field_def.model_field_name return Field(**kwargs) @override def update_forward_refs(self, dto: type[PydanticDTOT], namespace: dict[str, type[PydanticDTOT]]) -> Optional[bool]: dto.model_rebuild(_types_namespace=namespace, raise_errors=False) @override def build(self, name: str, model: type[ModelT], field_definitions: Iterable[DTOFieldDefinition[ModelT, ModelFieldT]], base: Optional[type[Any]]=None, config_dict: Optional[ConfigDict]=None, docstring: bool=True, **kwargs: Any) -> type[PydanticDTOT]: fields: dict[str, tuple[Any, FieldInfo]] = {} base_annotations = get_annotations(base) if base else {} for field_def in field_definitions: field_type = field_def.type_ validator: Optional[BeforeValidator] = None if field_def.purpose_config.validator: validator = BeforeValidator(field_def.purpose_config.validator) if validator: field_type = Annotated[field_type, validator] fields[field_def.name] = (field_type, self._construct_field_info(field_def)) for f_name in base_annotations: field_info: FieldInfo = Field() attribute = getattr(base, f_name, DTOMissing) if attribute is not DTOMissing: field_info = attribute if isinstance(attribute, FieldInfo) else Field(default=attribute) field_type = fields[f_name][0] if f_name in fields else base_annotations[f_name] fields[f_name] = (field_type, field_info) module = __name__ if (model_module := getmodule(self.dto_base)): module = model_module.__name__ dto = create_model(name, __base__=(self.dto_base,), __config__=None, __module__=module, __validators__=None, __doc__=f'Pydantic generated DTO for {model.__name__} model' if docstring else None, __cls_kwargs__=None, **fields) if config_dict: cls_body = {'model_config': config_dict} if config_dict else {} return type(dto.__name__, (dto,), cls_body) return dto
class PydanticDTOBackend(DTOBackend[PydanticDTOT]): '''Implements DTO factory using pydantic.''' def __init__(self, dto_base: type[PydanticDTOT]) -> None: pass def _construct_field_info(self, field_def: DTOFieldDefinition[ModelT, ModelFieldT]) -> FieldInfo: '''Build a `FieldInfo instance reflecting the given field_def.''' pass @override def update_forward_refs(self, dto: type[PydanticDTOT], namespace: dict[str, type[PydanticDTOT]]) -> Optional[bool]: pass @override def build(self, name: str, model: type[ModelT], field_definitions: Iterable[DTOFieldDefinition[ModelT, ModelFieldT]], base: Optional[type[Any]]=None, config_dict: Optional[ConfigDict]=None, docstring: bool=True, **kwargs: Any) -> type[PydanticDTOT]: pass
7
2
17
1
15
1
5
0.05
1
12
2
0
4
1
4
31
74
9
62
30
46
3
40
18
35
13
6
2
20
327,919
gazorby/strawchemy
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/dto/backend/pydantic.py
strawchemy.dto.backend.pydantic._PydanticDTOBase
from pydantic import BaseModel, BeforeValidator, ConfigDict, create_model class _PydanticDTOBase(BaseModel): model_config = ConfigDict(from_attributes=True, arbitrary_types_allowed=True, populate_by_name=True)
class _PydanticDTOBase(BaseModel): pass
1
0
0
0
0
0
0
0
1
0
0
2
0
0
0
82
2
0
2
2
1
0
2
2
1
0
5
0
0
327,920
gazorby/strawchemy
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/dto/backend/strawberry.py
strawchemy.dto.backend.strawberry.FieldInfo
from strawchemy.dto.types import DTOMissing from typing import TYPE_CHECKING, Any, Optional, TypeVar, Union, get_origin from strawberry.types.field import StrawberryField from dataclasses import dataclass @dataclass class FieldInfo: name: str type: Any field: Union[StrawberryField, type[DTOMissing]] = DTOMissing
@dataclass class FieldInfo: pass
2
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
4
0
4
2
3
0
4
2
3
0
0
0
0
327,921
gazorby/strawchemy
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/dto/backend/strawberry.py
strawchemy.dto.backend.strawberry.MappedStrawberryDTO
from strawchemy.dto.base import DTOBackend, DTOBase, MappedDTO, ModelFieldT, ModelT class MappedStrawberryDTO(MappedDTO[ModelT]): ...
class MappedStrawberryDTO(MappedDTO[ModelT]): pass
1
0
0
0
0
0
0
0
1
0
0
1
0
0
0
3
1
0
1
1
1
0
2
1
1
0
3
0
0
327,922
gazorby/strawchemy
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/dto/backend/strawberry.py
strawchemy.dto.backend.strawberry.StrawberrryDTOBackend
from __future__ import annotations import strawberry from inspect import getmodule from types import new_class from strawchemy.dto.base import DTOBackend, DTOBase, MappedDTO, ModelFieldT, ModelT from typing import TYPE_CHECKING, Any, Optional, TypeVar, Union, get_origin from strawchemy.utils import get_annotations from strawberry.types.field import StrawberryField from strawchemy.dto.types import DTOMissing from typing_extensions import override class StrawberrryDTOBackend(DTOBackend[AnnotatedDTOT]): def __init__(self, dto_base: type[AnnotatedDTOT]) -> None: self.dto_base = dto_base base_cls = origin if (origin := get_origin(dto_base)) else dto_base self._base_annotations = {name: value for name, value in get_annotations(base_cls).items() if not self._is_private_attribute(name)} def _construct_field_info(self, field_def: DTOFieldDefinition[ModelT, ModelFieldT]) -> FieldInfo: strawberry_field: Optional[StrawberryField] = None if field_def.default_factory is not DTOMissing: if isinstance(field_def.default_factory(), (list, tuple)): strawberry_field = strawberry.field(default_factory=list) else: strawberry_field = strawberry.field(default=strawberry.UNSET) if field_def.default is not DTOMissing: strawberry_field = strawberry.field(default=field_def.default) if strawberry_field: return FieldInfo(field_def.name, field_def.type_, strawberry_field) return FieldInfo(field_def.name, field_def.type_) @classmethod def _is_private_attribute(cls, name: str) -> bool: return name.startswith(('__strawchemy', '__dto')) @override def copy(self, dto: type[AnnotatedDTOT], name: str) -> type[AnnotatedDTOT]: annotations = get_annotations(dto) attributes = {name: getattr(dto, name) for name in annotations if hasattr(dto, name)} attributes |= {name: value for name, value in dto.__dict__.items() if isinstance(value, StrawberryField) or self._is_private_attribute(name)} def _exec_body(namespace: dict[str, Any]) -> dict[str, Any]: namespace['__module__'] = dto.__module__ namespace['__annotations__'] = annotations namespace.update(attributes) return namespace return new_class(name, (self.dto_base,), exec_body=_exec_body) @override def build(self, name: str, model: type[Any], field_definitions: Iterable[DTOFieldDefinition[Any, ModelFieldT]], base: Optional[type[Any]]=None, **kwargs: Any) -> type[AnnotatedDTOT]: fields: list[FieldInfo] = [] dto_field_definitions: dict[str, DTOFieldDefinition[Any, ModelFieldT]] = {} for field in field_definitions: dto_field_definitions[field.name] = field fields.append(self._construct_field_info(field)) module = __name__ if (model_module := getmodule(self.dto_base)): module = model_module.__name__ bases = (base, self.dto_base) if base else (self.dto_base,) annotations = self._base_annotations | {field.name: field.type for field in fields} attributes = {field.name: field.field for field in fields if field.field is not DTOMissing} base_attributes = {name: getattr(self.dto_base, name) for name in self._base_annotations if hasattr(self.dto_base, name)} doc = f'DTO generated to be decorated by strawberry for {model.__name__} model' if base: annotations |= get_annotations(base) attributes |= {name: value for name, value in base.__dict__.items() if isinstance(value, StrawberryField)} doc = base.__doc__ or doc def _exec_body(namespace: dict[str, Any]) -> dict[str, Any]: namespace['__module__'] = module namespace['__doc__'] = doc namespace['__annotations__'] = annotations namespace['__dto_field_definitions__'] = dto_field_definitions namespace['__dto_model__'] = model namespace.update(base_attributes | attributes) return namespace return new_class(name, bases=bases, exec_body=_exec_body)
class StrawberrryDTOBackend(DTOBackend[AnnotatedDTOT]): def __init__(self, dto_base: type[AnnotatedDTOT]) -> None: pass def _construct_field_info(self, field_def: DTOFieldDefinition[ModelT, ModelFieldT]) -> FieldInfo: pass @classmethod def _is_private_attribute(cls, name: str) -> bool: pass @override def copy(self, dto: type[AnnotatedDTOT], name: str) -> type[AnnotatedDTOT]: pass def _exec_body(namespace: dict[str, Any]) -> dict[str, Any]: pass @override def build(self, name: str, model: type[Any], field_definitions: Iterable[DTOFieldDefinition[Any, ModelFieldT]], base: Optional[type[Any]]=None, **kwargs: Any) -> type[AnnotatedDTOT]: pass def _exec_body(namespace: dict[str, Any]) -> dict[str, Any]: pass
11
0
13
1
12
0
2
0
1
10
3
0
4
2
5
32
86
12
74
34
56
0
55
23
47
5
6
2
16
327,923
gazorby/strawchemy
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/dto/backend/strawberry.py
strawchemy.dto.backend.strawberry.StrawberryDTO
from strawchemy.dto.base import DTOBackend, DTOBase, MappedDTO, ModelFieldT, ModelT class StrawberryDTO(DTOBase[ModelT]): ...
class StrawberryDTO(DTOBase[ModelT]): pass
1
0
0
0
0
0
0
0
1
0
0
1
0
0
0
2
1
0
1
1
1
0
2
1
1
0
2
0
0
327,924
gazorby/strawchemy
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/dto/base.py
strawchemy.dto.base.DTOBackend
from typing import TYPE_CHECKING, Annotated, ClassVar, ForwardRef, Generic, Optional, Protocol, TypeVar, Union, cast, get_args, get_origin, get_type_hints, runtime_checkable import contextlib from types import new_class class DTOBackend(Protocol, Generic[DTOBaseT]): dto_base: type[DTOBaseT] def build(self, name: str, model: type[Any], field_definitions: Iterable[DTOFieldDefinition[Any, Any]], base: Optional[type[Any]]=None, **kwargs: Any) -> type[DTOBaseT]: """Build a Data transfer object (DTO) from an SQAlchemy model. This inner factory is invoked by the public factory() method Args: name: Current DTO name model: SQLAlchemy model from which to generate the DTO field_definitions: Iterable of dto field generated for this model dto_config: DTO config base: Base class from which the DTO must inherit kwargs: Keyword arguments passed to needed to build the DTO Returns: A DTO generated after the given model. """ raise NotImplementedError def update_forward_refs(self, dto: type[DTOBaseT], namespace: dict[str, type[DTOBaseT]]) -> Optional[bool]: """Update forward refs for the given DTO. Args: dto: DTO with forward references namespace: Dict that include Raises: NotImplementedError: _description_ """ with contextlib.suppress(NameError): dto.__annotations__ = get_type_hints(dto, localns={**TYPING_NS, **namespace}, include_extras=True) def copy(self, dto: type[DTOBaseT], name: str) -> type[DTOBaseT]: return new_class(name, (dto,))
class DTOBackend(Protocol, Generic[DTOBaseT]): def build(self, name: str, model: type[Any], field_definitions: Iterable[DTOFieldDefinition[Any, Any]], base: Optional[type[Any]]=None, **kwargs: Any) -> type[DTOBaseT]: '''Build a Data transfer object (DTO) from an SQAlchemy model. This inner factory is invoked by the public factory() method Args: name: Current DTO name model: SQLAlchemy model from which to generate the DTO field_definitions: Iterable of dto field generated for this model dto_config: DTO config base: Base class from which the DTO must inherit kwargs: Keyword arguments passed to needed to build the DTO Returns: A DTO generated after the given model. ''' pass def update_forward_refs(self, dto: type[DTOBaseT], namespace: dict[str, type[DTOBaseT]]) -> Optional[bool]: '''Update forward refs for the given DTO. Args: dto: DTO with forward references namespace: Dict that include Raises: NotImplementedError: _description_ ''' pass def copy(self, dto: type[DTOBaseT], name: str) -> type[DTOBaseT]: pass
4
2
13
2
5
6
1
1.19
2
9
1
3
3
0
3
27
44
9
16
11
5
19
9
4
5
1
5
1
3
327,925
gazorby/strawchemy
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/dto/base.py
strawchemy.dto.base.DTOBase
from typing import TYPE_CHECKING, Annotated, ClassVar, ForwardRef, Generic, Optional, Protocol, TypeVar, Union, cast, get_args, get_origin, get_type_hints, runtime_checkable from .types import DTOAuto, DTOConfig, DTOFieldConfig, DTOMissing, DTOSkip, DTOUnset, ExcludeFields, IncludeFields, Purpose, PurposeConfig class DTOBase(Generic[ModelT]): """Base class to define DTO mapping classes.""" if TYPE_CHECKING: __dto_model__: type[ModelT] __dto_config__: ClassVar[DTOConfig] __dto_field_definitions__: ClassVar[dict[str, DTOFieldDefinition[Any, Any]]] __dto_tags__: set[str]
class DTOBase(Generic[ModelT]): '''Base class to define DTO mapping classes.''' pass
1
1
0
0
0
0
0
0.2
1
0
0
4
0
0
0
2
7
1
5
1
4
1
5
1
4
0
1
1
0
327,926
gazorby/strawchemy
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/dto/base.py
strawchemy.dto.base.DTOFactory
from strawchemy.graph import Node from __future__ import annotations from typing import TYPE_CHECKING, Annotated, ClassVar, ForwardRef, Generic, Optional, Protocol, TypeVar, Union, cast, get_args, get_origin, get_type_hints, runtime_checkable import warnings from strawchemy.dto.exceptions import DTOError, EmptyDTOError from .utils import config from dataclasses import dataclass, field from .types import DTOAuto, DTOConfig, DTOFieldConfig, DTOMissing, DTOSkip, DTOUnset, ExcludeFields, IncludeFields, Purpose, PurposeConfig from typing_extensions import Self, TypeAlias, override from contextlib import suppress from strawchemy.utils import is_type_hint_optional, non_optional_type_hint from collections import defaultdict class DTOFactory(Generic[ModelT, ModelFieldT, DTOBaseT]): """Base class for implementing DTO factory. Provide methods to inspect SQLAlchemy models and iterating over fields to convert. """ def __init__(self, inspector: ModelInspector[ModelT, ModelFieldT], backend: DTOBackend[DTOBaseT], handle_cycles: bool=True, type_map: Optional[dict[Any, Any]]=None) -> None: """Initialize internal state to keep track of generated DTOs.""" self.dtos: dict[str, type[DTOBaseT]] = {} self.handle_cycles: bool = handle_cycles self.inspector = inspector self.backend = backend self.type_map = type_map or {} self._dto_cache: dict[Hashable, type[DTOBaseT]] = {} self._unresolved_refs: defaultdict[str, list[type[DTOBaseT]]] = defaultdict(list) self._scoped_dto_names: dict[Hashable, str] = {} def should_exclude_field(self, field: DTOFieldDefinition[Any, ModelFieldT], dto_config: DTOConfig, node: Node[Relation[Any, DTOBaseT], None], has_override: bool) -> bool: """Whether the model field should be excluded from the dto or not.""" explictly_excluded = node.is_root and field.model_field_name in dto_config.exclude explicitly_included = node.is_root and field.model_field_name in dto_config.include if dto_config.purpose is Purpose.WRITE and (not explicitly_included): explictly_excluded = explictly_excluded or not field.init if dto_config.include == 'all' and (not explictly_excluded): explicitly_included = True excluded = dto_config.purpose not in field.allowed_purposes if node.is_root: excluded = excluded or (explictly_excluded or not explicitly_included) else: excluded = excluded or explictly_excluded return not has_override and excluded def _resolve_basic_type(self, field: DTOFieldDefinition[ModelT, ModelFieldT], dto_config: DTOConfig) -> Any: type_hint = self.type_map.get(field.type_hint, field.type_) overriden_by_type_map = field.type_hint in dto_config.type_overrides or field.type_hint in self.type_map if overriden_by_type_map or field.has_type_override: return type_hint if not field.has_type_override and field.complete and is_type_hint_optional(type_hint): type_hint = non_optional_type_hint(type_hint) return type_hint def _resolve_relation_type(self, field: DTOFieldDefinition[ModelT, ModelFieldT], dto_config: DTOConfig, node: Node[Relation[ModelT, DTOBaseT], None], **factory_kwargs: Any) -> Any: type_hint = self.type_map.get(field.type_hint, field.type_) relation_model = self.inspector.relation_model(field.model_field) dto_name = self._scoped_dto_names.get(self._scoped_cache_key(relation_model, dto_config), self.dto_name(relation_model.__name__, dto_config, node)) relation_child = Relation(relation_model, name=dto_name) parent = node.find_parent(lambda parent: parent.value == relation_child) if relation_model is node.value.model: dto = Self field.self_reference = True elif parent is not None: dto = ForwardRef(parent.value.name) if self.handle_cycles: node.value.forward_refs.append(Reference(parent.value.name, parent)) field.related_dto = dto else: child = node.insert_child(relation_child) dto = self.factory(model=relation_model, dto_config=dto_config, base=None, name=dto_name, parent_field_def=field, current_node=child, **factory_kwargs) field.related_dto = dto if field.uselist: dto = list[dto] if is_type_hint_optional(type_hint) and (not field.complete) or field.partial: return Optional[dto] return dto def _resolve_type(self, field: DTOFieldDefinition[ModelT, ModelFieldT], dto_config: DTOConfig, node: Node[Relation[ModelT, DTOBaseT], None], **factory_kwargs: Any) -> Any: """Recursively resolve the type hint to a valid pydantic type.""" if not field.is_relation: return self._resolve_basic_type(field, dto_config) return self._resolve_relation_type(field, dto_config, node, **factory_kwargs) def _node_or_root(self, model: type[Any], name: str, node: Optional[Node[Relation[Any, DTOBaseT], None]]=None) -> Node[Relation[Any, DTOBaseT], None]: return Node(Relation(model=model, name=name)) if node is None else node def _base_cache_key(self, dto_config: DTOConfig) -> Hashable: return frozenset([(dto_config.purpose, dto_config.partial, dto_config.alias_generator), tuple(dto_config.type_overrides.items())]) def _root_cache_key(self, dto_config: DTOConfig) -> Hashable: root_key = [frozenset(dto_config.include if dto_config.include != 'all' else ()), frozenset(dto_config.exclude), frozenset(dto_config.aliases.items()), frozenset(dto_config.annotation_overrides.items())] return frozenset((key for key in root_key if key)) def _scoped_cache_key(self, model: type[Any], dto_config: DTOConfig) -> Hashable: return frozenset([(model, self._base_cache_key(dto_config), frozenset())]) def _cache_key(self, model: type[Any], dto_config: DTOConfig, node: Node[Relation[Any, DTOBaseT], None], **factory_kwargs: Any) -> Hashable: base_key = self._base_cache_key(dto_config) node_key = frozenset() if node.is_root and dto_config.scope != 'global': node_key = self._root_cache_key(dto_config) return (model, base_key, node_key) def _factory(self, name: str, model: type[ModelT], dto_config: DTOConfig, node: Node[Relation[Any, DTOBaseT], None], base: Optional[type[Any]]=None, parent_field_def: Optional[DTOFieldDefinition[ModelT, ModelFieldT]]=None, raise_if_no_fields: bool=False, backend_kwargs: Optional[dict[str, Any]]=None, **kwargs: Any) -> type[DTOBaseT]: self_ref_fields: list[DTOFieldDefinition[ModelT, ModelFieldT]] = [] field_definitions_dict: dict[str, DTOFieldDefinition[ModelT, ModelFieldT]] = {} def _gen() -> Iterable[DTOFieldDefinition[ModelT, ModelFieldT]]: iterable = self.iter_field_definitions(name=name, model=model, dto_config=dto_config, base=base, node=node, raise_if_no_fields=raise_if_no_fields, **kwargs) for field_def in iterable: yield field_def field_definitions_dict[field_def.name] = field_def if field_def.self_reference: self_ref_fields.append(field_def) dto = self.backend.build(name=name, model=model, field_definitions=_gen(), base=base, **backend_kwargs or {}) dto.__dto_field_definitions__ = field_definitions_dict for field_def in self_ref_fields: field_def.related_dto = dto return dto def type_hint_namespace(self) -> dict[str, Any]: return TYPING_NS | self.dtos def dto_name(self, base_name: str, dto_config: DTOConfig, node: Optional[Node[Relation[Any, DTOBaseT], None]]=None) -> str: return f'{base_name}{dto_config.purpose.value.capitalize()}DTO' def root_dto_name(self, model: type[ModelT], dto_config: DTOConfig, node: Optional[Node[Relation[Any, DTOBaseT], None]]=None) -> str: return self.dto_name(model.__name__, dto_config, node) def iter_field_definitions(self, name: str, model: type[ModelT], dto_config: DTOConfig, base: Optional[type[DTOBase[ModelT]]], node: Node[Relation[ModelT, DTOBaseT], None], raise_if_no_fields: bool=False, **factory_kwargs: Any) -> Generator[DTOFieldDefinition[ModelT, ModelFieldT], None, None]: no_fields = True annotations: dict[str, Any] = dto_config.annotation_overrides if base: with suppress(NameError): base.__annotations__ = self.inspector.get_type_hints(base) annotations = base.__annotations__ | dto_config.annotation_overrides for model_field_name, field_def in self.inspector.field_definitions(model, dto_config): has_override = model_field_name in annotations has_auto_override = has_override and annotations[model_field_name] is DTOAuto if has_override and annotations[model_field_name] is not DTOAuto: no_fields = False field_def.type_ = annotations[model_field_name] if self.should_exclude_field(field_def, dto_config, node, has_override): continue if not has_override or has_auto_override: no_fields = False field_def.type_ = self._resolve_type(field_def, dto_config, node, **factory_kwargs) if field_def.type_ is DTOSkip: continue yield field_def no_fields = False if no_fields: msg = f'{name} DTO generated from {model.__qualname__} have no fields' if raise_if_no_fields: raise EmptyDTOError(msg) warnings.warn(msg, stacklevel=2) def factory(self, model: type[ModelT], dto_config: DTOConfig, base: Optional[type[Any]]=None, name: Optional[str]=None, parent_field_def: Optional[DTOFieldDefinition[ModelT, ModelFieldT]]=None, current_node: Optional[Node[Relation[Any, DTOBaseT], None]]=None, raise_if_no_fields: bool=False, tags: Optional[set[str]]=None, backend_kwargs: Optional[dict[str, Any]]=None, **kwargs: Any) -> type[DTOBaseT]: """Build a Data transfer object (DTO) from an SQAlchemy model.""" dto_config = dto_config.with_base_annotations(base) if base else dto_config if not name: name = base.__name__ if base else self.root_dto_name(model, dto_config, current_node) node = self._node_or_root(model, name, current_node) scoped_cache_key = self._scoped_cache_key(model, dto_config) if not dto_config.exclude_from_scope else DTOUnset cache_key = self._cache_key(model, dto_config, node, **kwargs) if dto_config.scope == 'global': self._scoped_dto_names[self._scoped_cache_key(model, dto_config)] = name if (dto := self._dto_cache.get(cache_key)) or (dto := self._dto_cache.get(scoped_cache_key)): return self.backend.copy(dto, name) if node.is_root else dto dto = self._factory(name, model, dto_config, node, base, parent_field_def, raise_if_no_fields, backend_kwargs, **kwargs) dto.__dto_config__ = dto_config dto.__dto_model__ = model dto.__dto_tags__ = tags or set() self.dtos[name] = dto if node.is_root and base is not None: self.dtos[base.__name__] = dto node.value.dto = dto if self.handle_cycles and node.value.dto: for incomplete_dto in self._unresolved_refs.pop(name, []): self.backend.update_forward_refs(incomplete_dto, self.type_hint_namespace()) self.backend.update_forward_refs(dto, self.type_hint_namespace()) for ref in node.value.forward_refs: self._unresolved_refs[ref.name].append(dto) self._dto_cache[cache_key] = dto if dto_config.scope is not None: self._dto_cache[self._scoped_cache_key(model, dto_config)] = dto return dto def decorator(self, model: type[ModelT], purpose: Purpose, include: Optional[IncludeFields]=None, exclude: Optional[ExcludeFields]=None, partial: Optional[bool]=None, type_map: Optional[Mapping[Any, Any]]=None, aliases: Optional[Mapping[str, str]]=None, alias_generator: Optional[Callable[[str], str]]=None, **kwargs: Any) -> Callable[[type[Any]], type[DTOBaseT]]: def wrapper(class_: type[Any]) -> type[DTOBaseT]: return self.factory(model=model, dto_config=config(purpose=purpose, include=include, exclude=exclude, partial=partial, type_map=type_map, aliases=aliases, alias_generator=alias_generator), base=class_, name=class_.__name__, **kwargs) return wrapper
class DTOFactory(Generic[ModelT, ModelFieldT, DTOBaseT]): '''Base class for implementing DTO factory. Provide methods to inspect SQLAlchemy models and iterating over fields to convert. ''' def __init__(self, inspector: ModelInspector[ModelT, ModelFieldT], backend: DTOBackend[DTOBaseT], handle_cycles: bool=True, type_map: Optional[dict[Any, Any]]=None) -> None: '''Initialize internal state to keep track of generated DTOs.''' pass def should_exclude_field(self, field: DTOFieldDefinition[Any, ModelFieldT], dto_config: DTOConfig, node: Node[Relation[Any, DTOBaseT], None], has_override: bool) -> bool: '''Whether the model field should be excluded from the dto or not.''' pass def _resolve_basic_type(self, field: DTOFieldDefinition[ModelT, ModelFieldT], dto_config: DTOConfig) -> Any: pass def _resolve_relation_type(self, field: DTOFieldDefinition[ModelT, ModelFieldT], dto_config: DTOConfig, node: Node[Relation[ModelT, DTOBaseT], None], **factory_kwargs: Any) -> Any: pass def _resolve_type(self, field: DTOFieldDefinition[ModelT, ModelFieldT], dto_config: DTOConfig, node: Node[Relation[ModelT, DTOBaseT], None], **factory_kwargs: Any) -> Any: '''Recursively resolve the type hint to a valid pydantic type.''' pass def _node_or_root(self, model: type[Any], name: str, node: Optional[Node[Relation[Any, DTOBaseT], None]]=None) -> Node[Relation[Any, DTOBaseT], None]: pass def _base_cache_key(self, dto_config: DTOConfig) -> Hashable: pass def _root_cache_key(self, dto_config: DTOConfig) -> Hashable: pass def _scoped_cache_key(self, model: type[Any], dto_config: DTOConfig) -> Hashable: pass def _cache_key(self, model: type[Any], dto_config: DTOConfig, node: Node[Relation[Any, DTOBaseT], None], **factory_kwargs: Any) -> Hashable: pass def _factory(self, name: str, model: type[ModelT], dto_config: DTOConfig, node: Node[Relation[Any, DTOBaseT], None], base: Optional[type[Any]]=None, parent_field_def: Optional[DTOFieldDefinition[ModelT, ModelFieldT]]=None, raise_if_no_fields: bool=False, backend_kwargs: Optional[dict[str, Any]]=None, **kwargs: Any) -> type[DTOBaseT]: pass def _gen() -> Iterable[DTOFieldDefinition[ModelT, ModelFieldT]]: pass def type_hint_namespace(self) -> dict[str, Any]: pass def dto_name(self, base_name: str, dto_config: DTOConfig, node: Optional[Node[Relation[Any, DTOBaseT], None]]=None) -> str: pass def root_dto_name(self, model: type[ModelT], dto_config: DTOConfig, node: Optional[Node[Relation[Any, DTOBaseT], None]]=None) -> str: pass def iter_field_definitions(self, name: str, model: type[ModelT], dto_config: DTOConfig, base: Optional[type[DTOBase[ModelT]]], node: Node[Relation[ModelT, DTOBaseT], None], raise_if_no_fields: bool=False, **factory_kwargs: Any) -> Generator[DTOFieldDefinition[ModelT, ModelFieldT], None, None]: pass def factory(self, model: type[ModelT], dto_config: DTOConfig, base: Optional[type[Any]]=None, name: Optional[str]=None, parent_field_def: Optional[DTOFieldDefinition[ModelT, ModelFieldT]]=None, current_node: Optional[Node[Relation[Any, DTOBaseT], None]]=None, raise_if_no_fields: bool=False, tags: Optional[set[str]]=None, backend_kwargs: Optional[dict[str, Any]]=None, **kwargs: Any) -> type[DTOBaseT]: '''Build a Data transfer object (DTO) from an SQAlchemy model.''' pass def decorator(self, model: type[ModelT], purpose: Purpose, include: Optional[IncludeFields]=None, exclude: Optional[ExcludeFields]=None, partial: Optional[bool]=None, type_map: Optional[Mapping[Any, Any]]=None, aliases: Optional[Mapping[str, str]]=None, alias_generator: Optional[Callable[[str], str]]=None, **kwargs: Any) -> Callable[[type[Any]], type[DTOBaseT]]: pass def wrapper(class_: type[Any]) -> type[DTOBaseT]: pass
20
5
22
2
20
1
3
0.04
1
28
12
2
13
7
13
15
322
40
270
130
179
12
136
54
120
10
1
3
49
327,927
gazorby/strawchemy
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/dto/base.py
strawchemy.dto.base.DTOFieldDefinition
from .types import DTOAuto, DTOConfig, DTOFieldConfig, DTOMissing, DTOSkip, DTOUnset, ExcludeFields, IncludeFields, Purpose, PurposeConfig from typing_extensions import Self, TypeAlias, override from strawchemy.dto.exceptions import DTOError, EmptyDTOError from .utils import config from typing import TYPE_CHECKING, Annotated, ClassVar, ForwardRef, Generic, Optional, Protocol, TypeVar, Union, cast, get_args, get_origin, get_type_hints, runtime_checkable from dataclasses import dataclass, field @dataclass class DTOFieldDefinition(Generic[ModelT, ModelFieldT]): dto_config: DTOConfig model: type[ModelT] model_field_name: str _name: str = field(init=False) type_hint: Any is_relation: bool = False config: DTOFieldConfig = field(default_factory=DTOFieldConfig) _model_field: Union[ModelFieldT, type[DTOMissing]] = DTOMissing related_model: Optional[type[ModelT]] = None related_dto: Optional[Union[type[DTOBase[ModelT]], ForwardRef]] = None self_reference: bool = False uselist: bool = False init: bool = True type_hint_override: Any = DTOMissing partial: Optional[bool] = None alias: Optional[str] = None default: Any = DTOMissing default_factory: Union[Callable[..., Any], type[DTOMissing]] = DTOMissing _type: Any = DTOMissing def __post_init__(self) -> None: self._name = self.model_field_name if self.purpose_config.partial is not None: self.partial = self.purpose_config.partial if self.purpose_config.alias is not None: self._name = self.purpose_config.alias self.alias = self.purpose_config.alias if self.purpose_config.type_override is not DTOMissing: self.type_hint_override = self.purpose_config.type_override if self.dto_config.partial is not None: self.partial = self.dto_config.partial if (alias := self.dto_config.alias(self.model_field_name)) is not None: self._name = alias self.alias = alias if (type_override_ := self.dto_config.type_overrides.get(self.type_hint, DTOMissing)) is not DTOMissing: self.type_hint_override = type_override_ if self.partial: self.default = self.dto_config.partial_default @property def model_field(self) -> ModelFieldT: if self._model_field is DTOMissing: msg = 'Field does not have a model_field set' raise DTOError(msg) return self._model_field @model_field.setter def model_field(self, value: ModelFieldT) -> None: self._model_field = value @property def has_model_field(self) -> bool: return self._model_field is not DTOMissing @property def model_identity(self) -> Union[type[ModelT], ModelFieldT]: try: return self.model_field except DTOError: return self.model @property def purpose_config(self) -> PurposeConfig: return self.config.purpose_config(self.dto_config) @property def name(self) -> str: return self._name @property def type_(self) -> Any: if self._type is not DTOMissing: return self._type type_hint = self.type_hint_override if self.has_type_override else self.type_hint return Optional[type_hint] if self.partial else type_hint @type_.setter def type_(self, value: Any) -> None: self._type = value @property def has_type_override(self) -> bool: return self.type_hint_override is not DTOMissing @property def allowed_purposes(self) -> set[Purpose]: return self.config.purposes @property def complete(self) -> bool: return self.dto_config.purpose is Purpose.COMPLETE and Purpose.COMPLETE in self.allowed_purposes @property def required(self) -> bool: required_by_purpose = self.dto_config.purpose is Purpose.READ or (self.dto_config.purpose is Purpose.COMPLETE and Purpose.COMPLETE in self.allowed_purposes) return required_by_purpose and (not self.partial) @override def __repr__(self) -> str: return f'{self.__class__.__name__}({self.name}, {self.type_})'
@dataclass class DTOFieldDefinition(Generic[ModelT, ModelFieldT]): def __post_init__(self) -> None: pass @property def model_field(self) -> ModelFieldT: pass @model_field.setter def model_field(self) -> ModelFieldT: pass @property def has_model_field(self) -> bool: pass @property def model_identity(self) -> Union[type[ModelT], ModelFieldT]: pass @property def purpose_config(self) -> PurposeConfig: pass @property def name(self) -> str: pass @property def type_(self) -> Any: pass @type_.setter def type_(self) -> Any: pass @property def has_type_override(self) -> bool: pass @property def allowed_purposes(self) -> set[Purpose]: pass @property def complete(self) -> bool: pass @property def required(self) -> bool: pass @override def __repr__(self) -> str: pass
29
0
4
0
4
0
2
0.02
1
9
4
1
14
0
14
16
112
21
89
48
61
2
74
33
59
8
1
1
26
327,928
gazorby/strawchemy
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/dto/base.py
strawchemy.dto.base.MappedDTO
from typing_extensions import Self, TypeAlias, override import dataclasses from typing import TYPE_CHECKING, Annotated, ClassVar, ForwardRef, Generic, Optional, Protocol, TypeVar, Union, cast, get_args, get_origin, get_type_hints, runtime_checkable from dataclasses import dataclass, field from .types import DTOAuto, DTOConfig, DTOFieldConfig, DTOMissing, DTOSkip, DTOUnset, ExcludeFields, IncludeFields, Purpose, PurposeConfig class MappedDTO(DTOBase[ModelT]): """Base class to define DTO mapping classes.""" def to_mapped(self, visitor: Optional[VisitorProtocol[ModelT]]=None, override: Optional[dict[str, Any]]=None, level: int=0) -> ModelT: """Create an instance of `self.__d_model__`. Fill the bound SQLAlchemy model recursively with values from this dataclass. """ model_kwargs: dict[str, Any] = {} override = override or {} dc_fields: dict[str, dataclasses.Field[Any]] = {} if dataclasses.is_dataclass(self.__dto_model__): dc_fields = {f.name: f for f in dataclasses.fields(self.__dto_model__)} for name, field_def in self.__dto_field_definitions__.items(): if (value := override.get(name, DTOMissing)) and value is not DTOMissing: model_kwargs[name] = value continue if (field := dc_fields.get(name)) and (not field.init): continue if TYPE_CHECKING: value: Union[Union[Union[Union[ModelT, ToMappedProtocol[Any]], list[ModelT]], list[ToMappedProtocol[Any]]], type[DTOMissing]] value = getattr(self, name) if isinstance(value, (list, tuple)): value = [dto.to_mapped(visitor, level=level + 1) if isinstance(dto, ToMappedProtocol) else cast('ModelT', dto) for dto in value] if isinstance(value, ToMappedProtocol): value = value.to_mapped(visitor, level=level + 1) if visitor is not None: value = visitor.field_value(self, field_def, value, level + 1) if value is DTOUnset or value is self.__dto_config__.unset_sentinel: continue model_kwargs[field_def.model_field_name] = value model_kwargs |= override try: return visitor.model(self, self.__dto_model__, model_kwargs, override, level + 1) if visitor else self.__dto_model__(**model_kwargs) except TypeError as error: original_message = error.args[0] if isinstance(error.args[0], str) else repr(error) msg = f'{original_message} (model: {self.__dto_model__.__name__})' raise TypeError(msg) from error
class MappedDTO(DTOBase[ModelT]): '''Base class to define DTO mapping classes.''' def to_mapped(self, visitor: Optional[VisitorProtocol[ModelT]]=None, override: Optional[dict[str, Any]]=None, level: int=0) -> ModelT: '''Create an instance of `self.__d_model__`. Fill the bound SQLAlchemy model recursively with values from this dataclass. ''' pass
2
2
59
8
48
3
14
0.08
1
13
4
2
1
0
1
3
62
9
49
15
42
4
32
7
30
14
2
2
14
327,929
gazorby/strawchemy
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/dto/base.py
strawchemy.dto.base.ModelInspector
from strawchemy.graph import Node from strawchemy.utils import is_type_hint_optional, non_optional_type_hint from .types import DTOAuto, DTOConfig, DTOFieldConfig, DTOMissing, DTOSkip, DTOUnset, ExcludeFields, IncludeFields, Purpose, PurposeConfig from typing import TYPE_CHECKING, Annotated, ClassVar, ForwardRef, Generic, Optional, Protocol, TypeVar, Union, cast, get_args, get_origin, get_type_hints, runtime_checkable class ModelInspector(Protocol, Generic[ModelT, ModelFieldT]): def field_definitions(self, model: type[Any], dto_config: DTOConfig) -> Iterable[tuple[str, DTOFieldDefinition[ModelT, ModelFieldT]]]: ... def id_field_definitions(self, model: type[Any], dto_config: DTOConfig) -> list[tuple[str, DTOFieldDefinition[ModelT, ModelFieldT]]]: ... def field_definition(self, model_field: ModelFieldT, dto_config: DTOConfig) -> DTOFieldDefinition[ModelT, ModelFieldT]: ... def get_type_hints(self, type_: type[Any], include_extras: bool=True) -> dict[str, Any]: ... def relation_model(self, model_field: ModelFieldT) -> type[Any]: ... def model_field_type(self, field_definition: DTOFieldDefinition[ModelT, ModelFieldT]) -> Any: type_hint = field_definition.type_hint_override if field_definition.has_type_override else field_definition.type_hint if get_origin(type_hint) is Annotated: return get_args(type_hint)[0] return non_optional_type_hint(type_hint) def relation_cycle(self, field: DTOFieldDefinition[Any, ModelFieldT], node: Node[Relation[ModelT, Any], None]) -> bool: ... def has_default(self, model_field: ModelFieldT) -> bool: ... def required(self, model_field: ModelFieldT) -> bool: ... def is_foreign_key(self, model_field: ModelFieldT) -> bool: ... def is_primary_key(self, model_field: ModelFieldT) -> bool: ... def reverse_relation_required(self, model_field: ModelFieldT) -> bool: ...
class ModelInspector(Protocol, Generic[ModelT, ModelFieldT]): def field_definitions(self, model: type[Any], dto_config: DTOConfig) -> Iterable[tuple[str, DTOFieldDefinition[ModelT, ModelFieldT]]]: pass def id_field_definitions(self, model: type[Any], dto_config: DTOConfig) -> list[tuple[str, DTOFieldDefinition[ModelT, ModelFieldT]]]: pass def field_definitions(self, model: type[Any], dto_config: DTOConfig) -> Iterable[tuple[str, DTOFieldDefinition[ModelT, ModelFieldT]]]: pass def get_type_hints(self, type_: type[Any], include_extras: bool=True) -> dict[str, Any]: pass def relation_model(self, model_field: ModelFieldT) -> type[Any]: pass def model_field_type(self, field_definition: DTOFieldDefinition[ModelT, ModelFieldT]) -> Any: pass def relation_cycle(self, field: DTOFieldDefinition[Any, ModelFieldT], node: Node[Relation[ModelT, Any], None]) -> bool: pass def has_default(self, model_field: ModelFieldT) -> bool: pass def required(self, model_field: ModelFieldT) -> bool: pass def is_foreign_key(self, model_field: ModelFieldT) -> bool: pass def is_primary_key(self, model_field: ModelFieldT) -> bool: pass def reverse_relation_required(self, model_field: ModelFieldT) -> bool: pass
13
0
2
0
2
0
1
0
2
12
4
1
12
0
12
36
38
11
27
22
17
0
28
14
15
3
5
1
14
327,930
gazorby/strawchemy
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/dto/base.py
strawchemy.dto.base.Reference
from dataclasses import dataclass, field from typing import TYPE_CHECKING, Annotated, ClassVar, ForwardRef, Generic, Optional, Protocol, TypeVar, Union, cast, get_args, get_origin, get_type_hints, runtime_checkable from typing_extensions import Self, TypeAlias, override from strawchemy.graph import Node @dataclass class Reference(Generic[T, DTOBaseT]): name: str node: Node[Relation[T, DTOBaseT], None] @override def __repr__(self) -> str: return f'{self.__class__.__name__}({self.name})'
@dataclass class Reference(Generic[T, DTOBaseT]): @override def __repr__(self) -> str: pass
4
0
2
0
2
0
1
0
1
1
0
0
1
0
1
3
7
1
6
3
3
0
5
2
3
1
1
0
1
327,931
gazorby/strawchemy
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/dto/base.py
strawchemy.dto.base.Relation
from dataclasses import dataclass, field from typing import TYPE_CHECKING, Annotated, ClassVar, ForwardRef, Generic, Optional, Protocol, TypeVar, Union, cast, get_args, get_origin, get_type_hints, runtime_checkable from typing_extensions import Self, TypeAlias, override @dataclass class Relation(Generic[T, DTOBaseT]): model: type[T] = field(compare=True) name: str = field(compare=False) dto: Optional[type[DTOBaseT]] = field(default=None, compare=False) forward_refs: list[Reference[T, DTOBaseT]] = field(default_factory=list, compare=False) @override def __repr__(self) -> str: return f'{self.__class__.__name__}({self.model.__name__})'
@dataclass class Relation(Generic[T, DTOBaseT]): @override def __repr__(self) -> str: pass
4
0
2
0
2
0
1
0
1
1
0
0
1
0
1
3
9
1
8
7
5
0
7
6
5
1
1
0
1
327,932
gazorby/strawchemy
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/dto/base.py
strawchemy.dto.base.ToMappedProtocol
from typing import TYPE_CHECKING, Annotated, ClassVar, ForwardRef, Generic, Optional, Protocol, TypeVar, Union, cast, get_args, get_origin, get_type_hints, runtime_checkable @runtime_checkable class ToMappedProtocol(Protocol, Generic[ModelT]): def to_mapped(self, visitor: Optional[VisitorProtocol[ModelT]]=None, override: Optional[dict[str, Any]]=None, level: int=0) -> Any: ...
@runtime_checkable class ToMappedProtocol(Protocol, Generic[ModelT]): def to_mapped(self, visitor: Optional[VisitorProtocol[ModelT]]=None, override: Optional[dict[str, Any]]=None, level: int=0) -> Any: pass
3
0
6
0
6
0
1
0
2
5
1
5
1
0
1
25
7
0
7
7
1
0
3
2
1
1
5
0
1
327,933
gazorby/strawchemy
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/dto/base.py
strawchemy.dto.base.VisitorProtocol
from typing import TYPE_CHECKING, Annotated, ClassVar, ForwardRef, Generic, Optional, Protocol, TypeVar, Union, cast, get_args, get_origin, get_type_hints, runtime_checkable class VisitorProtocol(Protocol, Generic[ModelT]): def field_value(self, parent: ToMappedProtocol[ModelT], field: DTOFieldDefinition[Any, Any], value: Any, level: int) -> Any: ... def model(self, parent: ToMappedProtocol[ModelT], model_cls: type[ModelT], params: dict[str, Any], override: dict[str, Any], level: int) -> ModelT: ...
class VisitorProtocol(Protocol, Generic[ModelT]): def field_value(self, parent: ToMappedProtocol[ModelT], field: DTOFieldDefinition[Any, Any], value: Any, level: int) -> Any: pass def model(self, parent: ToMappedProtocol[ModelT], model_cls: type[ModelT], params: dict[str, Any], override: dict[str, Any], level: int) -> ModelT: pass
3
0
6
0
6
0
1
0
2
6
2
1
2
0
2
26
13
1
12
12
2
0
5
3
2
1
5
0
2
327,934
gazorby/strawchemy
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/dto/exceptions.py
strawchemy.dto.exceptions.DTOError
class DTOError(Exception): ...
class DTOError(Exception): pass
1
0
0
0
0
0
0
0
1
0
0
2
0
0
0
10
1
0
1
1
1
0
2
1
1
0
3
0
0
327,935
gazorby/strawchemy
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/dto/exceptions.py
strawchemy.dto.exceptions.EmptyDTOError
class EmptyDTOError(DTOError): ...
class EmptyDTOError(DTOError): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
10
1
0
1
1
1
0
2
1
1
0
4
0
0
327,936
gazorby/strawchemy
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/dto/exceptions.py
strawchemy.dto.exceptions.ModelInspectorError
class ModelInspectorError(DTOError): ...
class ModelInspectorError(DTOError): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
10
1
0
1
1
1
0
2
1
1
0
4
0
0
327,937
gazorby/strawchemy
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/dto/inspectors/sqlalchemy.py
strawchemy.dto.inspectors.sqlalchemy.SQLAlchemyInspector
from typing_extensions import TypeIs, override from sqlalchemy.orm import NO_VALUE, ColumnProperty, DeclarativeBase, Mapped, MappedAsDataclass, MappedSQLExpression, Mapper, QueryableAttribute, RelationshipDirection, RelationshipProperty, registry from inspect import getmodule, signature from strawchemy.dto.types import DTOConfig, DTOFieldConfig, DTOMissing, DTOUnset, Purpose import builtins from typing import TYPE_CHECKING, Any, Optional, TypeVar, Union, cast, get_args, get_origin, get_type_hints from strawchemy.dto.base import TYPING_NS, DTOFieldDefinition, ModelInspector, Relation from dataclasses import MISSING as DATACLASS_MISSING from sqlalchemy import Column, PrimaryKeyConstraint, Sequence, SQLColumnExpression, Table, UniqueConstraint, event, inspect, orm, sql from sqlalchemy.dialects import postgresql from strawchemy.utils import is_type_hint_optional from strawchemy.constants import GEO_INSTALLED from strawchemy.dto.exceptions import ModelInspectorError from dataclasses import Field, fields import contextlib from strawchemy.dto.constants import DTO_INFO_KEY class SQLAlchemyInspector(ModelInspector[DeclarativeBase, QueryableAttribute[Any]]): def __init__(self, registries: Optional[list[registry]]=None) -> None: """Initialize internal state to keep track of generated DTOs.""" self._mapped_classes_map: dict[str, type[DeclarativeBase]] = {} self._registries: list[registry] = registries or [] self._model_modules: set[ModuleType] = set() self._model_type_hints: dict[type[DeclarativeBase], dict[str, Any]] = {} event.listens_for(Mapper, 'after_mapper_constructed')(self._add_registry_listener) def _update_mapped_classes(self, mapper: Mapper[Any]) -> None: if mapper.registry not in self._registries: self._registries.append(mapper.registry) self._mapped_classes_map |= self._mapped_classes_from_registry(mapper.registry) def _add_registry_listener(self, mapper: Mapper[Any], class_: type[Any]) -> None: self._update_mapped_classes(mapper) def _mapped_classes_from_registry(self, registry: registry) -> dict[str, type[Any]]: return {m.class_.__name__: m.class_ for m in list(registry.mappers)} def _localns(self, type_: type[Any]) -> dict[str, Any]: """Build namespace for resolving forward refs of the given type. Args: type_: The type for which to build the namespace Returns: A dict suitable to pass to `get_type_hints` to resolve forward refs of the given model """ localns: dict[str, Any] = {} localns.update(TYPING_NS) localns.update(_SQLA_NS) localns.update(self._mapped_classes) model_module = getmodule(type_) if model_module is not None: self._model_modules.add(model_module) for module in self._model_modules: localns.update(vars(module)) return localns @classmethod def _dataclass_fields(cls, model: type[MappedAsDataclass]) -> dict[str, Field[Any]]: return {f.name: f for f in fields(model)} @property def _mapped_classes(self) -> dict[str, type[DeclarativeBase]]: """Get mapped classes across all added registries. Returns: A mapping of class name -> SQLAlchemy mapped class. """ if not self._mapped_classes_map: for registry in self._registries: self._mapped_classes_map.update(self._mapped_classes_from_registry(registry)) return self._mapped_classes_map def _uselist(self, elem: MapperProperty[Any]) -> bool: return bool(elem.uselist) if self._is_relationship(elem) else False def _is_init(self, model: type[DeclarativeBase], name: str) -> bool: if issubclass(model, MappedAsDataclass): field = self._dataclass_fields(model).get(name) return field.init if field is not None else False return True @classmethod def _is_relationship(cls, elem: Union[Union[MapperProperty[Any], Column[Any]], RelationshipProperty[Any]]) -> TypeIs[RelationshipProperty[Any]]: return isinstance(elem, RelationshipProperty) @classmethod def _is_column(cls, elem: Any) -> TypeIs[Union[ColumnProperty[Any], Column[Any]]]: return isinstance(elem, (ColumnProperty, Column)) @classmethod def _column_or_relationship(cls, attribute: MapperProperty[Any]) -> Union[Union[Column[Any], RelationshipProperty[Any]], SQLColumnExpression[Any]]: try: return attribute.parent.mapper.columns[attribute.key] except KeyError: return attribute.parent.mapper.relationships[attribute.key] @classmethod def _defaults(cls, attribute: MapperProperty[Any]) -> tuple[Union[Any, type[DTOMissing]], Union[Callable[..., Any], type[DTOMissing]]]: default, default_factory = (DTOMissing, DTOMissing) model = attribute.parent.class_ element = cls._column_or_relationship(attribute) if issubclass(model, MappedAsDataclass) and (field := cls._dataclass_fields(model).get(attribute.key)) and (field.default_factory is not DATACLASS_MISSING): default_factory = field.default_factory default_factory = getattr(element, 'default_factory', DTOMissing) if default_factory is DTOMissing else default_factory default = getattr(element, 'default', DTOMissing) if default is DTOMissing else default if isinstance(element, Column): if default is not DTOMissing and default is not None: if default.is_scalar: default = default.arg elif default.is_callable: default_callable = default.arg.__func__ if isinstance(default.arg, staticmethod) else default.arg if hasattr(builtins, default_callable.__name__) or 'context' not in signature(default_callable).parameters: def default_factory(): return default.arg({}) elif isinstance(default, Sequence): default = DTOUnset else: msg = 'Unexpected default type' raise ValueError(msg) elif default is None and (not element.nullable): default = DTOMissing elif isinstance(element, RelationshipProperty) and default is DTOMissing and element.uselist: default_factory = list elif default is DTOMissing: default = None if default_factory is not DTOMissing: return (DTOMissing, default_factory) return (default, default_factory) def _field_config(self, elem: MapperProperty[Any]) -> DTOFieldConfig: config = cast('DTOFieldConfig', elem.class_attribute.info.get(DTO_INFO_KEY, DTOFieldConfig())) if isinstance(elem, MappedSQLExpression): config.purposes = {Purpose.READ} return config @classmethod def _resolve_model_type_hint(cls, type_: type[Any]) -> Any: type_hint = type_ if get_origin(type_hint) is Mapped: type_hint, = get_args(type_hint) return type_hint def _relationship_required(self, prop: RelationshipProperty[Any]) -> bool: if prop.direction is RelationshipDirection.MANYTOONE: return any((not column.nullable for column in prop.local_columns)) return False def _field_definitions_from_columns(self, model: type[DeclarativeBase], columns: Iterable[Column[Any]], dto_config: DTOConfig) -> list[tuple[str, DTOFieldDefinition[DeclarativeBase, QueryableAttribute[Any]]]]: mapper = inspect(model) type_hints = self.get_type_hints(model) return [(column.key, self.field_definition(mapper.attrs[column.key].class_attribute, dto_config, type_hint=type_hints.get(column.key, DTOMissing))) for column in columns] @classmethod def pk_attributes(cls, mapper: Mapper[Any]) -> list[QueryableAttribute[Any]]: return [mapper.attrs[column.key].class_attribute for column in mapper.primary_key] @classmethod def loaded_attributes(cls, model: DeclarativeBase) -> set[str]: return {name for name, attr in inspect(model).attrs.items() if attr.loaded_value is not NO_VALUE} @override def get_type_hints(self, type_: Any, include_extras: bool=True) -> dict[str, Any]: if (type_hints := self._model_type_hints.get(type_)): return type_hints if issubclass(type_, DeclarativeBase): self._update_mapped_classes(inspect(type_)) type_hints = get_type_hints(type_, localns=self._localns(type_), include_extras=include_extras) self._model_type_hints[type_] = type_hints return type_hints @override def field_definition(self, model_field: QueryableAttribute[T], dto_config: DTOConfig, type_hint: Any=DTOMissing) -> DTOFieldDefinition[DeclarativeBase, QueryableAttribute[T]]: mapper = model_field.parent.mapper relation_model = None prop = mapper.attrs[model_field.key] elem = prop if isinstance(prop, MappedSQLExpression) else mapper.attrs[model_field.key] config = self._field_config(elem) if dto_config.exclude_defaults: default, default_factory = (DTOMissing, DTOMissing) else: default, default_factory = self._defaults(elem) uselist = self._uselist(elem) is_relation = self._is_relationship(elem) with contextlib.suppress(ModelInspectorError): relation_model = self.relation_model(prop.class_attribute) if type_hint is DTOMissing: if isinstance(prop, RelationshipProperty): type_hint = prop.argument elif isinstance(prop, Column): type_hint = prop.type.python_type elif isinstance(prop, ColumnProperty) and len(prop.columns) == 1: type_hint = prop.columns[0].type.python_type else: type_hint = self.get_type_hints(mapper.class_).get(model_field.key, DTOMissing) type_hint = self._resolve_model_type_hint(type_hint) if GEO_INSTALLED and (column_prop := mapper.columns.get(model_field.key)) is not None: from geoalchemy2 import Geometry if isinstance(column_prop.type, Geometry) and column_prop.type.geometry_type is not None and (column_prop.type.geometry_type in _shapely_geometry_map): geo_type_hint = _shapely_geometry_map[column_prop.type.geometry_type] type_hint = Optional[geo_type_hint] if is_type_hint_optional(type_hint) else geo_type_hint return DTOFieldDefinition(type_hint=type_hint, model=mapper.class_, model_field_name=model_field.key, uselist=uselist, config=config, dto_config=dto_config, init=self._is_init(mapper.class_, model_field.key), is_relation=is_relation, default=default, default_factory=default_factory, related_model=relation_model, _model_field=model_field) @override def field_definitions(self, model: type[DeclarativeBase], dto_config: DTOConfig) -> Generator[tuple[str, DTOFieldDefinition[DeclarativeBase, QueryableAttribute[Any]]]]: mapper = inspect(model) type_hints = self.get_type_hints(model) for prop in mapper.attrs: mapper_attr = mapper.attrs[prop.key] type_hint = type_hints.get(prop.key, DTOMissing) yield (prop.key, self.field_definition(mapper_attr.class_attribute, dto_config, type_hint=type_hint)) @override def id_field_definitions(self, model: type[DeclarativeBase], dto_config: DTOConfig) -> list[tuple[str, DTOFieldDefinition[DeclarativeBase, QueryableAttribute[Any]]]]: mapper = inspect(model) return self._field_definitions_from_columns(model, mapper.primary_key, dto_config) @override def relation_model(self, model_field: QueryableAttribute[Any]) -> type[DeclarativeBase]: if self._is_relationship(model_field.property): return model_field.property.entity.mapper.class_ msg = f'{model_field} is not a relationship' raise ModelInspectorError(msg) @override def model_field_type(self, field_definition: DTOFieldDefinition[DeclarativeBase, QueryableAttribute[Any]]) -> Any: try: return field_definition.model_field.type.python_type except NotImplementedError: return super().model_field_type(field_definition) @override def relation_cycle(self, field: DTOFieldDefinition[DeclarativeBase, QueryableAttribute[Any]], node: Node[Relation[DeclarativeBase, Any], None]) -> bool: if not self._is_relationship(field.model_field.property): return False parent_relationships: set[RelationshipProperty[Any]] = set() for parent in node.iter_parents(): for relationship in parent.value.model.__mapper__.relationships: parent_relationships.add(relationship) return any((relationship in parent_relationships for relationship in field.model_field.property._reverse_property)) @override def has_default(self, model_field: QueryableAttribute[Any]) -> bool: return any((default is not DTOMissing for default in self._defaults(model_field.property))) @override def required(self, model_field: QueryableAttribute[Any]) -> bool: if self._is_column(model_field.property): return any((not column.nullable for column, _ in model_field.property.columns_to_assign)) if self._is_relationship(model_field.property): return self._relationship_required(model_field.property) return False @override def is_foreign_key(self, model_field: QueryableAttribute[Any]) -> bool: return self._is_column(model_field.property) and any((column.foreign_keys for column in model_field.property.columns)) @override def is_primary_key(self, model_field: QueryableAttribute[Any]) -> bool: return self._is_column(model_field.property) and any((column.primary_key for column in model_field.property.columns)) @override def reverse_relation_required(self, model_field: QueryableAttribute[Any]) -> bool: if not self._is_relationship(model_field.property): return False return any((self._relationship_required(relationship) for relationship in model_field.property._reverse_property)) @classmethod def unique_constraints(cls, model: type[DeclarativeBase]) -> list[ColumnCollectionConstraint]: if not isinstance(model.__table__, Table): return [] return [constraint for constraint in model.__table__.constraints if isinstance(constraint, (PrimaryKeyConstraint, UniqueConstraint, postgresql.ExcludeConstraint))]
class SQLAlchemyInspector(ModelInspector[DeclarativeBase, QueryableAttribute[Any]]): def __init__(self, registries: Optional[list[registry]]=None) -> None: '''Initialize internal state to keep track of generated DTOs.''' pass def _update_mapped_classes(self, mapper: Mapper[Any]) -> None: pass def _add_registry_listener(self, mapper: Mapper[Any], class_: type[Any]) -> None: pass def _mapped_classes_from_registry(self, registry: registry) -> dict[str, type[Any]]: pass def _localns(self, type_: type[Any]) -> dict[str, Any]: '''Build namespace for resolving forward refs of the given type. Args: type_: The type for which to build the namespace Returns: A dict suitable to pass to `get_type_hints` to resolve forward refs of the given model ''' pass @classmethod def _dataclass_fields(cls, model: type[MappedAsDataclass]) -> dict[str, Field[Any]]: pass @property def _mapped_classes_from_registry(self, registry: registry) -> dict[str, type[Any]]: '''Get mapped classes across all added registries. Returns: A mapping of class name -> SQLAlchemy mapped class. ''' pass def _uselist(self, elem: MapperProperty[Any]) -> bool: pass def _is_init(self, model: type[DeclarativeBase], name: str) -> bool: pass @classmethod def _is_relationship(cls, elem: Union[Union[MapperProperty[Any], Column[Any]], RelationshipProperty[Any]]) -> TypeIs[RelationshipProperty[Any]]: pass @classmethod def _is_column(cls, elem: Any) -> TypeIs[Union[ColumnProperty[Any], Column[Any]]]: pass @classmethod def _column_or_relationship(cls, attribute: MapperProperty[Any]) -> Union[Union[Column[Any], RelationshipProperty[Any]], SQLColumnExpression[Any]]: pass @classmethod def _defaults(cls, attribute: MapperProperty[Any]) -> tuple[Union[Any, type[DTOMissing]], Union[Callable[..., Any], type[DTOMissing]]]: pass def default_factory(): pass def _field_config(self, elem: MapperProperty[Any]) -> DTOFieldConfig: pass @classmethod def _resolve_model_type_hint(cls, type_: type[Any]) -> Any: pass def _relationship_required(self, prop: RelationshipProperty[Any]) -> bool: pass def _field_definitions_from_columns(self, model: type[DeclarativeBase], columns: Iterable[Column[Any]], dto_config: DTOConfig) -> list[tuple[str, DTOFieldDefinition[DeclarativeBase, QueryableAttribute[Any]]]]: pass @classmethod def pk_attributes(cls, mapper: Mapper[Any]) -> list[QueryableAttribute[Any]]: pass @classmethod def loaded_attributes(cls, model: DeclarativeBase) -> set[str]: pass @override def get_type_hints(self, type_: Any, include_extras: bool=True) -> dict[str, Any]: pass @override def field_definition(self, model_field: QueryableAttribute[T], dto_config: DTOConfig, type_hint: Any=DTOMissing) -> DTOFieldDefinition[DeclarativeBase, QueryableAttribute[T]]: pass @override def field_definitions(self, model: type[DeclarativeBase], dto_config: DTOConfig) -> Generator[tuple[str, DTOFieldDefinition[DeclarativeBase, QueryableAttribute[Any]]]]: pass @override def id_field_definitions(self, model: type[DeclarativeBase], dto_config: DTOConfig) -> list[tuple[str, DTOFieldDefinition[DeclarativeBase, QueryableAttribute[Any]]]]: pass @override def relation_model(self, model_field: QueryableAttribute[Any]) -> type[DeclarativeBase]: pass @override def model_field_type(self, field_definition: DTOFieldDefinition[DeclarativeBase, QueryableAttribute[Any]]) -> Any: pass @override def relation_cycle(self, field: DTOFieldDefinition[DeclarativeBase, QueryableAttribute[Any]], node: Node[Relation[DeclarativeBase, Any], None]) -> bool: pass @override def has_default(self, model_field: QueryableAttribute[Any]) -> bool: pass @override def required(self, model_field: QueryableAttribute[Any]) -> bool: pass @override def is_foreign_key(self, model_field: QueryableAttribute[Any]) -> bool: pass @override def is_primary_key(self, model_field: QueryableAttribute[Any]) -> bool: pass @override def reverse_relation_required(self, model_field: QueryableAttribute[Any]) -> bool: pass @classmethod def unique_constraints(cls, model: type[DeclarativeBase]) -> list[ColumnCollectionConstraint]: pass
56
3
9
0
8
1
2
0.08
1
26
9
1
23
4
32
68
338
45
274
116
200
22
177
71
143
15
6
4
79
327,938
gazorby/strawchemy
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/dto/types.py
strawchemy.dto.types.DTOAuto
from typing import TYPE_CHECKING, Any, Literal, Optional, Union, final, get_type_hints @final class DTOAuto: ...
@final class DTOAuto: pass
2
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
1
1
1
0
2
1
1
0
0
0
0
327,939
gazorby/strawchemy
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/dto/types.py
strawchemy.dto.types.DTOConfig
from typing import TYPE_CHECKING, Any, Literal, Optional, Union, final, get_type_hints from dataclasses import dataclass, field from strawchemy.utils import get_annotations import dataclasses @dataclass class DTOConfig: """Control the generated DTO. This class holds configuration settings that influence how a Data Transfer Object (DTO) is generated by the DTO factory. It allows customization of field inclusion/exclusion, optionality, type hints, and field aliasing based on the intended purpose (read, write, etc.) of the DTO. Attributes: purpose: Configure the DTO for "read", "write", or "complete" operations. Determines which fields from the source model are included based on their `DTOFieldConfig`. include: Explicitly include fields from the source model in the generated DTO. Can be a list or set of field names, or the literal "all" to include all fields not explicitly excluded. Defaults to an empty set. exclude: Explicitly exclude fields from the source model. Can be a list or set of field names. Defaults to an empty set. Setting this implicitly sets `include` to "all". partial: If True, makes all fields in the generated DTO optional. Defaults to None. partial_default: The default value assigned to fields when `partial` is True and the field is not provided. Defaults to None. unset_sentinel: A sentinel object used to represent fields that are not set, particularly useful when distinguishing between a field explicitly set to `None` and a field that was not provided at all. Defaults to `DTO_UNSET`. type_overrides: A mapping to override the type annotations for specific fields in the generated DTO. Keys can be field names or types, values are the overriding types. Defaults to an empty dict. annotation_overrides: A dictionary to directly set or override the type annotations for specific fields by name. Defaults to an empty dict. aliases: A mapping of source model field names to their desired names (aliases) in the generated DTO. Defaults to an empty dict. Mutually exclusive with `alias_generator`. alias_generator: A callable that accepts a field name and returns its alias for the generated DTO. Defaults to None. Mutually exclusive with `aliases`. Raises: ValueError: If both `aliases` and `alias_generator` are provided, or if `exclude` is set while `include` is also set to a specific list/set (i.e., not "all" or empty). """ purpose: Purpose 'Configure the DTO for "read" or "write" operations.' include: IncludeFields = field(default_factory=set) 'Explicitly include fields from the generated DTO.' exclude: ExcludeFields = field(default_factory=set) 'Explicitly exclude fields from the generated DTO. Implies `include="all"`.' partial: Optional[bool] = None 'Make all field optional.' partial_default: Any = None unset_sentinel: Any = DTOUnset type_overrides: Mapping[Any, Any] = field(default_factory=dict) annotation_overrides: dict[str, Any] = field(default_factory=dict) aliases: Mapping[str, str] = field(default_factory=dict) exclude_defaults: bool = False alias_generator: Optional[Callable[[str], str]] = None scope: Optional[DTOScope] = None exclude_from_scope: bool = False tags: set[str] = field(default_factory=set) def __post_init__(self) -> None: if self.aliases and self.alias_generator is not None: msg = 'You must set `aliases` or `alias_generator`, not both' raise ValueError(msg) if self.include and self.include != 'all' and self.exclude: msg = "When using `exclude` you must set `include='all' or leave it unset`" raise ValueError(msg) if self.exclude: self.include = 'all' def copy_with(self, purpose: Union[Purpose, type[DTOUnset]]=DTOUnset, include: Optional[IncludeFields]=None, exclude: Optional[ExcludeFields]=None, partial: Union[bool, None, type[DTOUnset]]=DTOUnset, unset_sentinel: Union[Any, type[DTOUnset]]=DTOUnset, type_overrides: Union[Mapping[Any, Any], type[DTOUnset]]=DTOUnset, annotation_overrides: Union[dict[str, Any], type[DTOUnset]]=DTOUnset, aliases: Union[Mapping[str, str], type[DTOUnset]]=DTOUnset, exclude_defaults: Union[bool, type[DTOUnset]]=DTOUnset, alias_generator: Union[Callable[[str], str], type[DTOUnset]]=DTOUnset, partial_default: Union[Any, type[DTOUnset]]=DTOUnset, scope: Union[DTOScope, type[DTOUnset]]=DTOUnset, exclude_from_scope: Union[bool, type[DTOUnset]]=DTOUnset, tags: Union[set[str], type[DTOUnset]]=DTOUnset) -> DTOConfig: """Create a copy of the DTOConfig with the specified changes.""" if include is None and exclude is None: include, exclude = (self.include, self.exclude) else: include = include or [] exclude = exclude or [] return DTOConfig(include=include, exclude=exclude, purpose=self.purpose if purpose is DTOUnset else purpose, partial=self.partial if partial is DTOUnset else partial, unset_sentinel=self.unset_sentinel if unset_sentinel is DTOUnset else unset_sentinel, type_overrides=self.type_overrides if type_overrides is DTOUnset else type_overrides, annotation_overrides=self.annotation_overrides if annotation_overrides is DTOUnset else annotation_overrides, aliases=self.aliases if aliases is DTOUnset else aliases, exclude_defaults=self.exclude_defaults if exclude_defaults is DTOUnset else exclude_defaults, alias_generator=self.alias_generator if alias_generator is DTOUnset else alias_generator, partial_default=self.partial_default if partial_default is DTOUnset else partial_default, scope=self.scope if scope is DTOUnset else scope, exclude_from_scope=self.exclude_from_scope if exclude_from_scope is DTOUnset else exclude_from_scope, tags=self.tags if tags is DTOUnset else tags) def with_base_annotations(self, base: type[Any]) -> DTOConfig: """Merge type annotations from a base class into this DTOConfig. Args: base: The base class to extract type annotations from Returns: A new DTOConfig instance with: - Type annotations from the base class merged into annotation_overrides - Updated include set to include all fields if exclude is specified or include was "all" The method handles two cases: 1. When include is "all" or exclude is specified: All fields from the base class are included 2. When specific fields are included: Only those fields are added to the include set """ include: set[str] = set(self.include) if self.include != 'all' else set() include_all = self.include == 'all' or self.exclude annotation_overrides: dict[str, Any] = self.annotation_overrides try: base_annotations = get_type_hints(base, include_extras=True) except NameError: base_annotations = get_annotations(base) for name, annotation in base_annotations.items(): if not include_all: include.add(name) annotation_overrides[name] = annotation return dataclasses.replace(self, include='all' if include_all else include, annotation_overrides=annotation_overrides) def alias(self, name: str) -> Optional[str]: if self.aliases: return self.aliases.get(name) if self.alias_generator is not None: return self.alias_generator(name) return None
@dataclass class DTOConfig: '''Control the generated DTO. This class holds configuration settings that influence how a Data Transfer Object (DTO) is generated by the DTO factory. It allows customization of field inclusion/exclusion, optionality, type hints, and field aliasing based on the intended purpose (read, write, etc.) of the DTO. Attributes: purpose: Configure the DTO for "read", "write", or "complete" operations. Determines which fields from the source model are included based on their `DTOFieldConfig`. include: Explicitly include fields from the source model in the generated DTO. Can be a list or set of field names, or the literal "all" to include all fields not explicitly excluded. Defaults to an empty set. exclude: Explicitly exclude fields from the source model. Can be a list or set of field names. Defaults to an empty set. Setting this implicitly sets `include` to "all". partial: If True, makes all fields in the generated DTO optional. Defaults to None. partial_default: The default value assigned to fields when `partial` is True and the field is not provided. Defaults to None. unset_sentinel: A sentinel object used to represent fields that are not set, particularly useful when distinguishing between a field explicitly set to `None` and a field that was not provided at all. Defaults to `DTO_UNSET`. type_overrides: A mapping to override the type annotations for specific fields in the generated DTO. Keys can be field names or types, values are the overriding types. Defaults to an empty dict. annotation_overrides: A dictionary to directly set or override the type annotations for specific fields by name. Defaults to an empty dict. aliases: A mapping of source model field names to their desired names (aliases) in the generated DTO. Defaults to an empty dict. Mutually exclusive with `alias_generator`. alias_generator: A callable that accepts a field name and returns its alias for the generated DTO. Defaults to None. Mutually exclusive with `aliases`. Raises: ValueError: If both `aliases` and `alias_generator` are provided, or if `exclude` is set while `include` is also set to a specific list/set (i.e., not "all" or empty). ''' def __post_init__(self) -> None: pass def copy_with(self, purpose: Union[Purpose, type[DTOUnset]]=DTOUnset, include: Optional[IncludeFields]=None, exclude: Optional[ExcludeFields]=None, partial: Union[bool, None, type[DTOUnset]]=DTOUnset, unset_sentinel: Union[Any, type[DTOUnset]]=DTOUnset, type_overrides: Union[Mapping[Any, Any], type[DTOUnset]]=DTOUnset, annotation_overrides: Union[dict[str, Any], type[DTOUnset]]=DTOUnset, aliases: Union[Mapping[str, str], type[DTOUnset]]=DTOUnset, exclude_defaults: Union[bool, type[DTOUnset]]=DTOUnset, alias_generator: Union[Callable[[str], str], type[DTOUnset]]=DTOUnset, partial_default: Union[Any, type[DTOUnset]]=DTOUnset, scope: Union[DTOScope, type[DTOUnset]]=DTOUnset, exclude_from_scope: Union[bool, type[DTOUnset]]=DTOUnset, tags: Union[set[str], type[DTOUnset]]=DTOUnset) -> DTOConfig: '''Create a copy of the DTOConfig with the specified changes.''' pass def with_base_annotations(self, base: type[Any]) -> DTOConfig: '''Merge type annotations from a base class into this DTOConfig. Args: base: The base class to extract type annotations from Returns: A new DTOConfig instance with: - Type annotations from the base class merged into annotation_overrides - Updated include set to include all fields if exclude is specified or include was "all" The method handles two cases: 1. When include is "all" or exclude is specified: All fields from the base class are included 2. When specific fields are included: Only those fields are added to the include set ''' pass def alias(self, name: str) -> Optional[str]: pass
6
3
20
1
16
3
6
0.72
0
11
1
0
4
0
4
4
142
11
76
34
58
55
46
21
41
11
0
2
24
327,940
gazorby/strawchemy
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/dto/types.py
strawchemy.dto.types.DTOFieldConfig
from dataclasses import dataclass, field @dataclass class DTOFieldConfig: """For configuring DTO behavior on SQLAlchemy model fields.""" purposes: set[Purpose] = field(default_factory=lambda: {Purpose.READ, Purpose.WRITE}) default_config: PurposeConfig = field(default_factory=PurposeConfig) configs: dict[Purpose, PurposeConfig] = field(default_factory=dict) def purpose_config(self, dto_config: DTOConfig) -> PurposeConfig: return self.configs.get(dto_config.purpose, self.default_config)
@dataclass class DTOFieldConfig: '''For configuring DTO behavior on SQLAlchemy model fields.''' def purpose_config(self, dto_config: DTOConfig) -> PurposeConfig: pass
3
1
2
0
2
0
1
0.17
0
3
3
0
1
0
1
1
9
2
6
5
4
1
6
5
4
1
0
0
1
327,941
gazorby/strawchemy
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/dto/types.py
strawchemy.dto.types.DTOMissing
from typing import TYPE_CHECKING, Any, Literal, Optional, Union, final, get_type_hints @final class DTOMissing: """A sentinel type to detect if a parameter is supplied or not when. constructing pydantic FieldInfo. """
@final class DTOMissing: '''A sentinel type to detect if a parameter is supplied or not when. constructing pydantic FieldInfo. ''' pass
2
1
0
0
0
0
0
3
0
0
0
0
0
0
0
0
5
1
1
1
0
3
1
1
0
0
0
0
0
327,942
gazorby/strawchemy
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/dto/types.py
strawchemy.dto.types.DTOSkip
from typing import TYPE_CHECKING, Any, Literal, Optional, Union, final, get_type_hints @final class DTOSkip: ...
@final class DTOSkip: pass
2
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
1
1
1
0
2
1
1
0
0
0
0
327,943
gazorby/strawchemy
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/dto/types.py
strawchemy.dto.types.DTOUnset
from typing import TYPE_CHECKING, Any, Literal, Optional, Union, final, get_type_hints from typing_extensions import TypeAlias, override @final class DTOUnset: @override def __str__(self) -> str: return '' @override def __repr__(self) -> str: return 'DTOUnset' def __bool__(self) -> bool: return False
@final class DTOUnset: @override def __str__(self) -> str: pass @override def __repr__(self) -> str: pass def __bool__(self) -> bool: pass
7
0
2
0
2
0
1
0
0
2
0
0
3
0
3
3
11
2
9
6
3
0
7
4
3
1
0
0
3
327,944
gazorby/strawchemy
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/dto/types.py
strawchemy.dto.types.Purpose
from enum import Enum class Purpose(str, Enum): """For identifying the purpose of a DTO to the factory. The factory will exclude fields marked as private or read-only on the domain model depending on the purpose of the DTO. Example: ```python ReadDTO = dto.factory("AuthorReadDTO", Author, purpose=dto.Purpose.READ) ``` """ READ = 'read' 'To mark a DTO that is to be used to serialize data returned to\n clients.' WRITE = 'write' 'To mark a DTO that is to deserialize and validate data provided by\n clients.' COMPLETE = 'complete' 'To mark a DTO that is to deserialize and validate data provided by\n clients. Fields marked as TO_COMPLETE must not be null.'
class Purpose(str, Enum): '''For identifying the purpose of a DTO to the factory. The factory will exclude fields marked as private or read-only on the domain model depending on the purpose of the DTO. Example: ```python ReadDTO = dto.factory("AuthorReadDTO", Author, purpose=dto.Purpose.READ) ``` ''' pass
1
1
0
0
0
0
0
3.5
2
0
0
0
0
0
0
115
21
3
4
4
3
14
4
4
3
0
4
0
0
327,945
gazorby/strawchemy
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/dto/types.py
strawchemy.dto.types.PurposeConfig
from typing import TYPE_CHECKING, Any, Literal, Optional, Union, final, get_type_hints from dataclasses import dataclass, field @dataclass class PurposeConfig: """Mark the field as read-only, or private.""" type_override: Optional[Any] = DTOMissing validator: Optional[Callable[[Any], Any]] = None 'Single argument callables that are defined on the DTO as validators for the field.' alias: Optional[str] = None 'Customize name of generated DTO field.' partial: Optional[bool] = None
@dataclass class PurposeConfig: '''Mark the field as read-only, or private.''' pass
2
1
0
0
0
0
0
0.6
0
0
0
0
0
0
0
0
9
1
5
5
4
3
5
5
4
0
0
0
0
327,946
gazorby/strawchemy
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/exceptions.py
strawchemy.exceptions.StrawchemyError
class StrawchemyError(Exception): ...
class StrawchemyError(Exception): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
10
1
0
1
1
1
0
2
1
1
0
3
0
0
327,947
gazorby/strawchemy
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/graph.py
strawchemy.graph.GraphError
class GraphError(Exception): ...
class GraphError(Exception): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
10
1
0
1
1
1
0
2
1
1
0
3
0
0
327,948
gazorby/strawchemy
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/graph.py
strawchemy.graph.GraphMetadata
from dataclasses import dataclass, field from typing import TYPE_CHECKING, Any, Generic, Literal, Optional, TypeVar, Union, overload @dataclass class GraphMetadata(Generic[T]): metadata: T count: int = 0
@dataclass class GraphMetadata(Generic[T]): pass
2
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
3
0
3
2
2
0
3
2
2
0
1
0
0
327,949
gazorby/strawchemy
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/graph.py
strawchemy.graph.Node
from dataclasses import dataclass, field from typing import TYPE_CHECKING, Any, Generic, Literal, Optional, TypeVar, Union, overload from collections import deque import dataclasses import sys from typing_extensions import Self, TypeAlias, override @dataclass class Node(Generic[NodeValueT, NodeMetadataT]): """Very minimalist implementation of a direct graph.""" value: NodeValueT node_metadata: Optional[NodeMetadata[NodeMetadataT]] = None parent: Optional[Node[NodeValueT, NodeMetadataT]] = None insert_order: int = field(default_factory=int) children: list[Node[NodeValueT, NodeMetadataT]] = field(default_factory=list) graph_metadata: GraphMetadata[Any] = field(default_factory=lambda: GraphMetadata(metadata={})) _root: Node[NodeValueT, NodeMetadataT] = field(init=False) _level: int = field(init=False, default=0) def __post_init__(self) -> None: """Initialize node level and root after instance creation. This method is automatically called after the dataclass is initialized. It sets the node's level based on its parent's level (if it has a parent) and establishes the root node reference. """ if self.parent: self._level = self.parent.level + 1 self._root = self.parent.root else: self._root = self def _iter(self, search_mode: IterationMode) -> Generator[Node[NodeValueT, NodeMetadataT], None, None]: """Internal iterator that yields nodes based on the specified search mode. Args: search_mode: The iteration strategy to use ('depth_first' or 'breadth_first') Yields: Nodes in the order specified by search_mode """ generator = self.iter_depth_first if search_mode == 'depth_first' else self.iter_breadth_first yield from generator() def _new(self, value: NodeValueT, metadata: Optional[NodeMetadata[Any]]=None, parent: Optional[Node[NodeValueT, NodeMetadataT]]=None) -> Self: """Create a new node instance with the same graph metadata. Args: value: The value for the new node metadata: Optional metadata for the new node parent: Optional parent node reference Returns: A new node instance with incremented insert order """ node = self.__class__(value, parent=parent, graph_metadata=self.graph_metadata, insert_order=self.graph_metadata.count + 1) if metadata is not None: node.node_metadata = metadata return node @classmethod def _node_hash_identity(cls, node: Node[NodeValueT, NodeMetadataT]) -> Hashable: return tuple((parent.value for parent in node.path_from_root())) def _hash_identity(self) -> Hashable: return (self._node_hash_identity(self.root), self._node_hash_identity(self)) def _hash(self) -> int: return hash(self._hash_identity()) % 2 ** sys.hash_info.width @property def metadata(self) -> NodeMetadata[NodeMetadataT]: if self.node_metadata is None: msg = 'Node has no metadata' raise GraphError(msg) return self.node_metadata @property def root(self) -> Node[NodeValueT, NodeMetadataT]: """Get the root node of the graph. Returns: The root node of the graph. If this node is the root, returns self. """ return self._root @property def is_root(self) -> bool: """Return True if this node does not have a parent.""" return self.parent is None @property def level(self) -> int: """Get the level of this node in the graph. The root node is at level 0, its children at level 1, etc. Returns: The level of this node in the graph hierarchy """ return self._level @classmethod def match_nodes(cls, left: Node[NodeValueT, NodeMetadataT], right: Node[NodeValueT, NodeMetadataT], match_on: Union[MatchOn, Callable[[Node[NodeValueT, NodeMetadataT], Node[NodeValueT, NodeMetadataT]], bool]]) -> bool: """Compare two nodes based on a matching condition. Args: left: First node to compare right: Second node to compare match_on: Matching condition. Can be: - A callable taking two nodes and returning a boolean - 'node_identity': Compare node references (is) - 'value_equality': Compare node values (==) - 'value_identity': Compare node value references (is) Returns: True if nodes match according to the condition, False otherwise """ if callable(match_on): return match_on(left, right) if match_on == 'node_identity': return left is right if match_on == 'value_equality': return left.value == right.value return left.value is right.value def insert_child(self, value: NodeValueT, metadata: Optional[NodeMetadata[Any]]=None) -> Self: """Add a new child with the given value to this node. Args: value: The value with which to create the node metadata: node metadata key: node key Returns: The newly created child """ node = self._new(value=value, metadata=metadata, parent=self) return self._update_new_child(node) def insert_node(self, child: NodeT) -> NodeT: """Insert an existing node as a child of this node. Creates a copy of the given node with updated graph metadata, parent reference, and insert order, then adds it to this node's children. Args: child: The node to insert as a child Returns: A copy of the inserted node with updated metadata """ copy = dataclasses.replace(child, graph_metadata=self.graph_metadata, parent=self, insert_order=self.graph_metadata.count + 1) return self._update_new_child(copy) def _update_new_child(self, child: NodeT) -> NodeT: self.children.append(child) self.graph_metadata.count += 1 return child def upsert_child(self, value: NodeValueT, metadata: Optional[NodeMetadata[Any]]=None, match_on: Union[MatchOn, Callable[[Node[NodeValueT, NodeMetadataT], Node[NodeValueT, NodeMetadataT]], bool]]='node_identity') -> tuple[Node[NodeValueT, NodeMetadataT], bool]: """Insert a new child node if no matching child exists, otherwise return the existing one. Args: value: The value for the new node metadata: Optional metadata for the new node match_on: The condition used to match existing children. Can be either a predefined matching strategy ('value_identity', 'value_equality', 'node_identity') or a custom function that takes two nodes and returns a boolean. Returns: A tuple containing: - The matched or newly created child node - A boolean indicating whether a new node was created (True) or an existing one was found (False) """ new_node = self._new(value=value, metadata=metadata) if (child := next((child for child in self.children if self.match_nodes(child, new_node, match_on)), None)): return (child, False) return (self.insert_child(value, metadata), True) def iter_parents(self) -> Generator[Node[NodeValueT, NodeMetadataT], None, None]: """Iterate over node parents until reaching root node. Yields: Parent nodes """ if self.parent: yield self.parent yield from self.parent.iter_parents() def iter_depth_first(self) -> Generator[Node[NodeValueT, NodeMetadataT], None, None]: """Iterate over children all in this subtree. Yields: Children nodes """ for child in self.children: yield child yield from child.iter_depth_first() def iter_breadth_first(self) -> Generator[Node[NodeValueT, NodeMetadataT], None, None]: """Iterate over all nodes in this subtree using breadth-first traversal. In breadth-first traversal, all nodes at the current level are visited before moving to nodes at the next level. Yields: Nodes in breadth-first order """ queue: deque[Node[NodeValueT, NodeMetadataT]] = deque(self.children) while queue: child = queue.popleft() yield child queue.extend(child.children) def leaves(self, iteration_mode: IterationMode='depth_first') -> Generator[Node[NodeValueT, NodeMetadataT], None, None]: """Iterate over all leaf nodes in the subtree. A leaf node is a node that has no children. Args: iteration_mode: The traversal strategy to use ('depth_first' or 'breadth_first') Yields: Leaf nodes in the order specified by iteration_mode """ for child in self._iter(iteration_mode): if not child.children: yield child def path_from_root(self) -> list[Node[NodeValueT, NodeMetadataT]]: """Get the path from the root node to this node. Returns: A list of nodes representing the path from root to this node, including both the root and this node """ return [*reversed(list(self.iter_parents())), self] @classmethod def common_path(cls, left: list[Node[NodeValueT, NodeMetadataT]], right: list[Node[NodeValueT, NodeMetadataT]], match_on: Union[MatchOn, Callable[[Node[NodeValueT, NodeMetadataT], Node[NodeValueT, NodeMetadataT]], bool]]='node_identity') -> list[Node[NodeValueT, NodeMetadataT]]: """Find the common path between two lists of nodes. Compares nodes at the same positions in both lists using the specified matching condition. Stops at the first non-matching pair of nodes. Args: left: First list of nodes right: Second list of nodes match_on: The condition used to match nodes. Can be either a predefined matching strategy ('value_identity', 'value_equality', 'node_identity') or a custom function that takes two nodes and returns a boolean. Returns: A list of nodes that form the common path between the two input lists """ common: list[Node[NodeValueT, NodeMetadataT]] = [] longest, shortest = (left, right) if len(left) > len(right) else (right, left) if len(shortest) == 0: return longest for i, longest_value in enumerate(longest): if i >= len(shortest): break if cls.match_nodes(longest_value, shortest[i], match_on): common.append(longest_value) return common def copy(self) -> Self: """Create a deep copy of this node and its subtree. Creates a new node with the same value, metadata, and graph metadata, then recursively copies all children. Returns: A new node that is a deep copy of this node and its subtree """ node = dataclasses.replace(self, children=[]) for child in self.children: node.insert_node(child.copy()) return node @overload def find_parent(self, func: Callable[[Node[NodeValueT, NodeMetadataT]], bool], strict: Literal[True]) -> Node[NodeValueT, NodeMetadataT]: ... @overload def find_parent(self, func: Callable[[Node[NodeValueT, NodeMetadataT]], bool], strict: Literal[False]) -> Optional[Node[NodeValueT, NodeMetadataT]]: ... @overload def find_parent(self, func: Callable[[Node[NodeValueT, NodeMetadataT]], bool], strict: bool=False) -> Optional[Node[NodeValueT, NodeMetadataT]]: ... def find_parent(self, func: Callable[[Node[NodeValueT, NodeMetadataT]], bool], strict: bool=False) -> Optional[Node[NodeValueT, NodeMetadataT]]: """Find the first parent node that satisfies a given condition. Args: func: A function that takes a node and returns True if it matches the search criteria strict: If True, raises GraphError when no matching parent is found Returns: The first parent node for which func returns True, or None if no match is found and strict is False Raises: GraphError: If strict is True and no matching parent is found """ for parent in self.iter_parents(): if func(parent): return parent if strict: msg = 'Parent not found' raise GraphError(msg) return None @overload def find_child(self, func: Callable[[Node[NodeValueT, NodeMetadataT]], bool], strict: Literal[True], iteration_mode: IterationMode='depth_first') -> Node[NodeValueT, NodeMetadataT]: ... @overload def find_child(self, func: Callable[[Node[NodeValueT, NodeMetadataT]], bool], strict: Literal[False], iteration_mode: IterationMode='depth_first') -> Optional[Node[NodeValueT, NodeMetadataT]]: ... @overload def find_child(self, func: Callable[[Node[NodeValueT, NodeMetadataT]], bool], strict: bool=False, iteration_mode: IterationMode='depth_first') -> Optional[Node[NodeValueT, NodeMetadataT]]: ... def find_child(self, func: Callable[[Node[NodeValueT, NodeMetadataT]], bool], strict: bool=False, iteration_mode: IterationMode='depth_first') -> Optional[Node[NodeValueT, NodeMetadataT]]: """Find the first child node that satisfies a given condition. Args: func: A function that takes a node and returns True if it matches the search criteria strict: If True, raises GraphError when no matching child is found iteration_mode: The traversal strategy to use ('depth_first' or 'breadth_first') Returns: The first child node for which func returns True, or None if no match is found and strict is False Raises: GraphError: If strict is True and no matching child is found """ for child in self._iter(iteration_mode): if func(child): return child if strict: msg = 'Child not found' raise GraphError(msg) return None def merge_same_children(self, match_on: Union[MatchOn, Callable[[Node[NodeValueT, NodeMetadataT], Node[NodeValueT, NodeMetadataT]], bool]]) -> Self: """Create a new node by merging children that match according to the given condition. This method creates a copy of the current node and merges its children that match according to the match_on parameter. For each child, if there are existing children that match it, they are merged together using the merge_trees function. Args: match_on: The condition used to determine if two nodes match. Can be either a predefined matching strategy ('value_identity', 'value_equality', 'node_identity') or a custom function that takes two nodes and returns a boolean. Returns: A new node with merged children. """ node = dataclasses.replace(self, children=[]) for child in self.children: child_copy = child.copy() existing_children = [child for child in node.children if child.match_nodes(child, child_copy, match_on)] for existing_child in existing_children: child_copy = merge_trees(existing_child, child_copy, match_on) node.insert_node(child_copy) return node def __gt__(self, other: Node[NodeValueT, NodeMetadataT]) -> bool: return self.insert_order > other.insert_order def __lt__(self, other: Node[NodeValueT, NodeMetadataT]) -> bool: return self.insert_order < other.insert_order def __le__(self, other: Node[NodeValueT, NodeMetadataT]) -> bool: return self.insert_order <= other.insert_order def __ge__(self, other: Node[NodeValueT, NodeMetadataT]) -> bool: return self.insert_order >= other.insert_order @override def __hash__(self) -> int: return self._hash() @override def __eq__(self, other: object) -> bool: return hash(self) == hash(other) @override def __ne__(self, other: object) -> bool: return hash(self) != hash(other)
@dataclass class Node(Generic[NodeValueT, NodeMetadataT]): '''Very minimalist implementation of a direct graph.''' def __post_init__(self) -> None: '''Initialize node level and root after instance creation. This method is automatically called after the dataclass is initialized. It sets the node's level based on its parent's level (if it has a parent) and establishes the root node reference. ''' pass def _iter(self, search_mode: IterationMode) -> Generator[Node[NodeValueT, NodeMetadataT], None, None]: '''Internal iterator that yields nodes based on the specified search mode. Args: search_mode: The iteration strategy to use ('depth_first' or 'breadth_first') Yields: Nodes in the order specified by search_mode ''' pass def _new(self, value: NodeValueT, metadata: Optional[NodeMetadata[Any]]=None, parent: Optional[Node[NodeValueT, NodeMetadataT]]=None) -> Self: '''Create a new node instance with the same graph metadata. Args: value: The value for the new node metadata: Optional metadata for the new node parent: Optional parent node reference Returns: A new node instance with incremented insert order ''' pass @classmethod def _node_hash_identity(cls, node: Node[NodeValueT, NodeMetadataT]) -> Hashable: pass def _hash_identity(self) -> Hashable: pass def _hash_identity(self) -> Hashable: pass @property def metadata(self) -> NodeMetadata[NodeMetadataT]: pass @property def root(self) -> Node[NodeValueT, NodeMetadataT]: '''Get the root node of the graph. Returns: The root node of the graph. If this node is the root, returns self. ''' pass @property def is_root(self) -> bool: '''Return True if this node does not have a parent.''' pass @property def level(self) -> int: '''Get the level of this node in the graph. The root node is at level 0, its children at level 1, etc. Returns: The level of this node in the graph hierarchy ''' pass @classmethod def match_nodes(cls, left: Node[NodeValueT, NodeMetadataT], right: Node[NodeValueT, NodeMetadataT], match_on: Union[MatchOn, Callable[[Node[NodeValueT, NodeMetadataT], Node[NodeValueT, NodeMetadataT]], bool]]) -> bool: '''Compare two nodes based on a matching condition. Args: left: First node to compare right: Second node to compare match_on: Matching condition. Can be: - A callable taking two nodes and returning a boolean - 'node_identity': Compare node references (is) - 'value_equality': Compare node values (==) - 'value_identity': Compare node value references (is) Returns: True if nodes match according to the condition, False otherwise ''' pass def insert_child(self, value: NodeValueT, metadata: Optional[NodeMetadata[Any]]=None) -> Self: '''Add a new child with the given value to this node. Args: value: The value with which to create the node metadata: node metadata key: node key Returns: The newly created child ''' pass def insert_node(self, child: NodeT) -> NodeT: '''Insert an existing node as a child of this node. Creates a copy of the given node with updated graph metadata, parent reference, and insert order, then adds it to this node's children. Args: child: The node to insert as a child Returns: A copy of the inserted node with updated metadata ''' pass def _update_new_child(self, child: NodeT) -> NodeT: pass def upsert_child(self, value: NodeValueT, metadata: Optional[NodeMetadata[Any]]=None, match_on: Union[MatchOn, Callable[[Node[NodeValueT, NodeMetadataT], Node[NodeValueT, NodeMetadataT]], bool]]='node_identity') -> tuple[Node[NodeValueT, NodeMetadataT], bool]: '''Insert a new child node if no matching child exists, otherwise return the existing one. Args: value: The value for the new node metadata: Optional metadata for the new node match_on: The condition used to match existing children. Can be either a predefined matching strategy ('value_identity', 'value_equality', 'node_identity') or a custom function that takes two nodes and returns a boolean. Returns: A tuple containing: - The matched or newly created child node - A boolean indicating whether a new node was created (True) or an existing one was found (False) ''' pass def iter_parents(self) -> Generator[Node[NodeValueT, NodeMetadataT], None, None]: '''Iterate over node parents until reaching root node. Yields: Parent nodes ''' pass def iter_depth_first(self) -> Generator[Node[NodeValueT, NodeMetadataT], None, None]: '''Iterate over children all in this subtree. Yields: Children nodes ''' pass def iter_breadth_first(self) -> Generator[Node[NodeValueT, NodeMetadataT], None, None]: '''Iterate over all nodes in this subtree using breadth-first traversal. In breadth-first traversal, all nodes at the current level are visited before moving to nodes at the next level. Yields: Nodes in breadth-first order ''' pass def leaves(self, iteration_mode: IterationMode='depth_first') -> Generator[Node[NodeValueT, NodeMetadataT], None, None]: '''Iterate over all leaf nodes in the subtree. A leaf node is a node that has no children. Args: iteration_mode: The traversal strategy to use ('depth_first' or 'breadth_first') Yields: Leaf nodes in the order specified by iteration_mode ''' pass def path_from_root(self) -> list[Node[NodeValueT, NodeMetadataT]]: '''Get the path from the root node to this node. Returns: A list of nodes representing the path from root to this node, including both the root and this node ''' pass @classmethod def common_path(cls, left: list[Node[NodeValueT, NodeMetadataT]], right: list[Node[NodeValueT, NodeMetadataT]], match_on: Union[MatchOn, Callable[[Node[NodeValueT, NodeMetadataT], Node[NodeValueT, NodeMetadataT]], bool]]='node_identity') -> list[Node[NodeValueT, NodeMetadataT]]: '''Find the common path between two lists of nodes. Compares nodes at the same positions in both lists using the specified matching condition. Stops at the first non-matching pair of nodes. Args: left: First list of nodes right: Second list of nodes match_on: The condition used to match nodes. Can be either a predefined matching strategy ('value_identity', 'value_equality', 'node_identity') or a custom function that takes two nodes and returns a boolean. Returns: A list of nodes that form the common path between the two input lists ''' pass def copy(self) -> Self: '''Create a deep copy of this node and its subtree. Creates a new node with the same value, metadata, and graph metadata, then recursively copies all children. Returns: A new node that is a deep copy of this node and its subtree ''' pass @overload def find_parent(self, func: Callable[[Node[NodeValueT, NodeMetadataT]], bool], strict: Literal[True]) -> Node[NodeValueT, NodeMetadataT]: pass @overload def find_parent(self, func: Callable[[Node[NodeValueT, NodeMetadataT]], bool], strict: Literal[True]) -> Node[NodeValueT, NodeMetadataT]: pass @overload def find_parent(self, func: Callable[[Node[NodeValueT, NodeMetadataT]], bool], strict: Literal[True]) -> Node[NodeValueT, NodeMetadataT]: pass def find_parent(self, func: Callable[[Node[NodeValueT, NodeMetadataT]], bool], strict: Literal[True]) -> Node[NodeValueT, NodeMetadataT]: '''Find the first parent node that satisfies a given condition. Args: func: A function that takes a node and returns True if it matches the search criteria strict: If True, raises GraphError when no matching parent is found Returns: The first parent node for which func returns True, or None if no match is found and strict is False Raises: GraphError: If strict is True and no matching parent is found ''' pass @overload def find_child(self, func: Callable[[Node[NodeValueT, NodeMetadataT]], bool], strict: Literal[True], iteration_mode: IterationMode='depth_first') -> Node[NodeValueT, NodeMetadataT]: pass @overload def find_child(self, func: Callable[[Node[NodeValueT, NodeMetadataT]], bool], strict: Literal[True], iteration_mode: IterationMode='depth_first') -> Node[NodeValueT, NodeMetadataT]: pass @overload def find_child(self, func: Callable[[Node[NodeValueT, NodeMetadataT]], bool], strict: Literal[True], iteration_mode: IterationMode='depth_first') -> Node[NodeValueT, NodeMetadataT]: pass def find_child(self, func: Callable[[Node[NodeValueT, NodeMetadataT]], bool], strict: Literal[True], iteration_mode: IterationMode='depth_first') -> Node[NodeValueT, NodeMetadataT]: '''Find the first child node that satisfies a given condition. Args: func: A function that takes a node and returns True if it matches the search criteria strict: If True, raises GraphError when no matching child is found iteration_mode: The traversal strategy to use ('depth_first' or 'breadth_first') Returns: The first child node for which func returns True, or None if no match is found and strict is False Raises: GraphError: If strict is True and no matching child is found ''' pass def merge_same_children(self, match_on: Union[MatchOn, Callable[[Node[NodeValueT, NodeMetadataT], Node[NodeValueT, NodeMetadataT]], bool]]) -> Self: '''Create a new node by merging children that match according to the given condition. This method creates a copy of the current node and merges its children that match according to the match_on parameter. For each child, if there are existing children that match it, they are merged together using the merge_trees function. Args: match_on: The condition used to determine if two nodes match. Can be either a predefined matching strategy ('value_identity', 'value_equality', 'node_identity') or a custom function that takes two nodes and returns a boolean. Returns: A new node with merged children. ''' pass def __gt__(self, other: Node[NodeValueT, NodeMetadataT]) -> bool: pass def __lt__(self, other: Node[NodeValueT, NodeMetadataT]) -> bool: pass def __le__(self, other: Node[NodeValueT, NodeMetadataT]) -> bool: pass def __ge__(self, other: Node[NodeValueT, NodeMetadataT]) -> bool: pass @override def __hash__(self) -> int: pass @override def __eq__(self, other: object) -> bool: pass @override def __ne__(self, other: object) -> bool: pass
56
21
10
1
5
4
2
0.67
1
14
3
1
35
0
38
38
453
79
224
144
118
150
149
70
110
6
1
2
65
327,950
gazorby/strawchemy
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/graph.py
strawchemy.graph.NodeMetadata
from typing import TYPE_CHECKING, Any, Generic, Literal, Optional, TypeVar, Union, overload from dataclasses import dataclass, field @dataclass class NodeMetadata(Generic[T]): data: T
@dataclass class NodeMetadata(Generic[T]): pass
2
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
2
0
2
1
1
0
2
1
1
0
1
0
0
327,951
gazorby/strawchemy
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/mapper.py
strawchemy.mapper.Strawchemy
import dataclasses from .strawberry._registry import StrawberryRegistry from strawchemy.strawberry.factories.enum import EnumDTOBackend, UpsertConflictFieldsEnumDTOBackend from .strawberry.factories.inputs import AggregateFilterDTOFactory, BooleanFilterDTOFactory from strawchemy.strawberry.factories.aggregations import EnumDTOFactory from .strawberry._field import StrawchemyCreateMutationField, StrawchemyDeleteMutationField, StrawchemyField, StrawchemyUpdateMutationField, StrawchemyUpsertMutationField from .dto.backend.strawberry import StrawberrryDTOBackend from strawberry.schema.config import StrawberryConfig from strawberry.annotation import StrawberryAnnotation from typing import TYPE_CHECKING, Any, Optional, TypeVar, Union, overload from functools import cached_property, partial from .types import DefaultOffsetPagination from .strawberry.factories.types import DistinctOnFieldsDTOFactory, InputFactory, OrderByDTOFactory, RootAggregateTypeDTOFactory, TypeDTOFactory, UpsertConflictFieldsDTOFactory from .strawberry.dto import BooleanFilterDTO, EnumDTO, MappedStrawberryGraphQLDTO, OrderByDTO, OrderByEnum from .config.base import StrawchemyConfig class Strawchemy: """Main entry point for integrating SQLAlchemy models with Strawberry GraphQL. This class provides a cohesive interface to generate Strawberry GraphQL types, inputs, filters, and fields based on SQLAlchemy models. It manages configuration, type registration, and various factories for DTO generation. Attributes: config (StrawchemyConfig): The configuration object for Strawchemy. registry (StrawberryRegistry): The registry for Strawberry types. filter: Factory for creating boolean filter input types. aggregate_filter: Factory for creating aggregate filter input types. distinct_on: Decorator for creating distinct_on enum types. input: Factory for creating general input types. create_input: Factory for creating input types for create mutations. pk_update_input: Factory for creating input types for update-by-PK mutations. filter_update_input: Factory for creating input types for update-by-filter mutations. order: Factory for creating order_by input types. type: Factory for creating Strawberry output types. aggregate: Factory for creating aggregation root types. upsert_update_fields: Factory for creating enum DTOs for upsert update fields. upsert_conflict_fields: Factory for creating enum DTOs for upsert conflict fields. pydantic (PydanticMapper): A mapper for generating Pydantic models. """ def __init__(self, config: Union[StrawchemyConfig, SupportedDialect], strawberry_config: Optional[StrawberryConfig]=None) -> None: """Initializes the Strawchemy instance. Sets up the configuration, registry, and various DTO factories required for type and field generation. Args: config: A StrawchemyConfig instance or a supported dialect string (e.g., "postgresql", "mysql") to initialize a default config. strawberry_config: A StrawberryConfig instance to initialize the registry. If not provided, a default StrawberryConfig will be used. """ self.config = StrawchemyConfig(config) if isinstance(config, str) else config self.registry = StrawberryRegistry(strawberry_config or StrawberryConfig()) strawberry_backend = StrawberrryDTOBackend(MappedStrawberryGraphQLDTO) enum_backend = EnumDTOBackend(self.config.auto_snake_case) upsert_conflict_fields_enum_backend = UpsertConflictFieldsEnumDTOBackend(self.config.inspector, self.config.auto_snake_case) self._aggregate_filter_factory = AggregateFilterDTOFactory(self) self._order_by_factory = OrderByDTOFactory(self) self._distinct_on_enum_factory = DistinctOnFieldsDTOFactory(self.config.inspector) self._type_factory = TypeDTOFactory(self, strawberry_backend, order_by_factory=self._order_by_factory) self._input_factory = InputFactory(self, strawberry_backend) self._aggregation_factory = RootAggregateTypeDTOFactory(self, strawberry_backend, type_factory=self._type_factory) self._enum_factory = EnumDTOFactory(self.config.inspector, enum_backend) self._filter_factory = BooleanFilterDTOFactory(self, aggregate_filter_factory=self._aggregate_filter_factory) self._upsert_conflict_factory = UpsertConflictFieldsDTOFactory(self.config.inspector, upsert_conflict_fields_enum_backend) self.filter = self._filter_factory.input self.aggregate_filter = partial(self._aggregate_filter_factory.input, mode='aggregate_filter') self.distinct_on = self._distinct_on_enum_factory.decorator self.input = self._input_factory.input self.create_input = partial(self._input_factory.input, mode='create_input') self.pk_update_input = partial(self._input_factory.input, mode='update_by_pk_input') self.filter_update_input = partial(self._input_factory.input, mode='update_by_filter_input') self.order = partial(self._order_by_factory.input, mode='order_by') self.type = self._type_factory.type self.aggregate = partial(self._aggregation_factory.type, mode='aggregate_type') self.upsert_update_fields = self._enum_factory.input self.upsert_conflict_fields = self._upsert_conflict_factory.input self.registry.register_enum(OrderByEnum, 'OrderByEnum') def _annotation_namespace(self) -> dict[str, Any]: """Provides the namespace for Strawberry annotations. Combines the registry's 'object' namespace with internal Strawchemy types. Returns: A dictionary representing the annotation namespace. """ return self.registry.namespace('object') | _TYPES_NS @cached_property def pydantic(self) -> PydanticMapper: """Provides access to a PydanticMapper instance. This mapper is used for generating Pydantic models corresponding to the SQLAlchemy models and Strawberry types. Returns: An instance of PydanticMapper. """ from .validation.pydantic import PydanticMapper return PydanticMapper(self) @overload def field(self, resolver: _RESOLVER_TYPE[Any], *, filter_input: Optional[type[BooleanFilterDTO]]=None, order_by: Optional[type[OrderByDTO]]=None, distinct_on: Optional[type[EnumDTO]]=None, pagination: Optional[Union[bool, DefaultOffsetPagination]]=None, arguments: Optional[list[StrawberryArgument]]=None, id_field_name: Optional[str]=None, root_aggregations: bool=False, filter_statement: Optional[FilterStatementCallable]=None, execution_options: Optional[dict[str, Any]]=None, query_hook: Optional[Union[QueryHook[Any], Sequence[QueryHook[Any]]]]=None, repository_type: Optional[AnyRepository]=None, name: Optional[str]=None, description: Optional[str]=None, permission_classes: Optional[list[type[BasePermission]]]=None, deprecation_reason: Optional[str]=None, default: Any=dataclasses.MISSING, default_factory: Union[Callable[..., object], object]=dataclasses.MISSING, metadata: Optional[Mapping[Any, Any]]=None, directives: Sequence[object]=(), graphql_type: Optional[Any]=None, extensions: Optional[list[FieldExtension]]=None, root_field: bool=True) -> StrawchemyField: ... @overload def field(self, *, filter_input: Optional[type[BooleanFilterDTO]]=None, order_by: Optional[type[OrderByDTO]]=None, distinct_on: Optional[type[EnumDTO]]=None, pagination: Optional[Union[bool, DefaultOffsetPagination]]=None, arguments: Optional[list[StrawberryArgument]]=None, id_field_name: Optional[str]=None, root_aggregations: bool=False, filter_statement: Optional[FilterStatementCallable]=None, execution_options: Optional[dict[str, Any]]=None, query_hook: Optional[Union[QueryHookCallable[Any], Sequence[QueryHookCallable[Any]]]]=None, repository_type: Optional[AnyRepository]=None, name: Optional[str]=None, description: Optional[str]=None, permission_classes: Optional[list[type[BasePermission]]]=None, deprecation_reason: Optional[str]=None, default: Any=dataclasses.MISSING, default_factory: Union[Callable[..., object], object]=dataclasses.MISSING, metadata: Optional[Mapping[Any, Any]]=None, directives: Sequence[object]=(), graphql_type: Optional[Any]=None, extensions: Optional[list[FieldExtension]]=None, root_field: bool=True) -> Any: ... def field(self, resolver: Optional[_RESOLVER_TYPE[Any]]=None, *, filter_input: Optional[type[BooleanFilterDTO]]=None, order_by: Optional[type[OrderByDTO]]=None, distinct_on: Optional[type[EnumDTO]]=None, pagination: Optional[Union[bool, DefaultOffsetPagination]]=None, arguments: Optional[list[StrawberryArgument]]=None, id_field_name: Optional[str]=None, root_aggregations: bool=False, filter_statement: Optional[FilterStatementCallable]=None, execution_options: Optional[dict[str, Any]]=None, query_hook: Optional[Union[QueryHookCallable[Any], Sequence[QueryHookCallable[Any]]]]=None, repository_type: Optional[AnyRepository]=None, name: Optional[str]=None, description: Optional[str]=None, permission_classes: Optional[list[type[BasePermission]]]=None, deprecation_reason: Optional[str]=None, default: Any=dataclasses.MISSING, default_factory: Union[Callable[..., object], object]=dataclasses.MISSING, metadata: Optional[Mapping[Any, Any]]=None, directives: Sequence[object]=(), graphql_type: Optional[Any]=None, extensions: Optional[list[FieldExtension]]=None, root_field: bool=True) -> Any: """Creates a Strawberry GraphQL field with enhanced SQLAlchemy capabilities. This method extends the standard Strawberry field creation by integrating SQLAlchemy-specific features like automatic filtering, ordering, pagination, and aggregations based on SQLAlchemy models. Args: resolver: The resolver function for the field. If not provided, Strawchemy will attempt to generate one based on the model. filter_input: The input type for filtering results. order_by: The input type for ordering results. distinct_on: The enum type for 'distinct on' clauses (PostgreSQL). pagination: Enables pagination for the field. Can be True for default offset pagination or a DefaultOffsetPagination instance for customization. arguments: A list of additional StrawberryArgument instances for the field. id_field_name: The name of the ID field, used for certain operations. root_aggregations: If True, enables root-level aggregations for the field. filter_statement: A callable to generate a filter statement for the query. execution_options: SQLAlchemy execution options for the query. query_hook: A callable or sequence of callables to modify the SQLAlchemy query. repository_type: A custom repository class for data fetching logic. name: The name of the GraphQL field. description: The description of the GraphQL field. permission_classes: A list of permission classes for the field. deprecation_reason: The reason for deprecating the field. default: The default value for the field. default_factory: A factory function to generate the default value. metadata: Additional metadata for the field. directives: A sequence of directives for the field. graphql_type: The GraphQL type of the field. If not provided, it's inferred. extensions: A list of Strawberry FieldExtensions. root_field: Indicates if this is a root-level field. Returns: A StrawchemyField instance, which is a specialized StrawberryField. """ namespace = self._annotation_namespace() type_annotation = StrawberryAnnotation.from_annotation(graphql_type, namespace) if graphql_type else None repository_type_ = repository_type if repository_type is not None else self.config.repository_type execution_options_ = execution_options if execution_options is not None else self.config.execution_options pagination = DefaultOffsetPagination(limit=self.config.pagination_default_limit) if pagination is True else pagination if pagination is None: pagination = self.config.pagination id_field_name = id_field_name or self.config.default_id_field_name field = StrawchemyField(config=self.config, repository_type=repository_type_, root_field=root_field, filter_statement=filter_statement, execution_options=execution_options_, filter_type=filter_input, order_by=order_by, pagination=pagination, id_field_name=id_field_name, distinct_on=distinct_on, root_aggregations=root_aggregations, query_hook=query_hook, python_name=None, graphql_name=name, type_annotation=type_annotation, is_subscription=False, permission_classes=permission_classes or [], deprecation_reason=deprecation_reason, default=default, default_factory=default_factory, metadata=metadata, directives=directives, extensions=extensions or [], registry_namespace=namespace, description=description, arguments=arguments) return field(resolver) if resolver else field def create(self, input_type: type[MappedGraphQLDTO[T]], resolver: Optional[_RESOLVER_TYPE[Any]]=None, *, repository_type: Optional[AnyRepository]=None, name: Optional[str]=None, description: Optional[str]=None, permission_classes: Optional[list[type[BasePermission]]]=None, deprecation_reason: Optional[str]=None, default: Any=dataclasses.MISSING, default_factory: Union[Callable[..., object], object]=dataclasses.MISSING, metadata: Optional[Mapping[Any, Any]]=None, directives: Sequence[object]=(), graphql_type: Optional[Any]=None, extensions: Optional[list[FieldExtension]]=None, validation: Optional[ValidationProtocol[T]]=None) -> Any: """Creates a Strawberry GraphQL mutation field for creating new model instances. This method generates a mutation field that handles the creation of SQLAlchemy model instances based on the provided input type. It integrates with Strawchemy's repository system for data persistence and allows for custom validation. Args: input_type: The Strawberry input type representing the data for creating a new model instance. This should be a `MappedGraphQLDTO`. resolver: An optional custom resolver function for the mutation. If not provided, Strawchemy will use a default resolver. repository_type: An optional custom repository class for data fetching and persistence logic. Defaults to the repository configured in `StrawchemyConfig`. name: The name of the GraphQL mutation field. description: The description of the GraphQL mutation field. permission_classes: A list of permission classes for the field. deprecation_reason: The reason for deprecating the field. default: The default value for the field (typically not used for mutations). default_factory: A factory function to generate the default value. metadata: Additional metadata for the field. directives: A sequence of directives for the field. graphql_type: The GraphQL return type of the mutation. If not provided, it's inferred, typically to be the corresponding output type of the model. extensions: A list of Strawberry FieldExtensions. validation: An optional validation protocol instance to validate the input data before creation. Returns: A `StrawchemyCreateMutationField` instance, which is a specialized StrawberryField configured for create mutations. """ namespace = self._annotation_namespace() type_annotation = StrawberryAnnotation.from_annotation(graphql_type, namespace) if graphql_type else None repository_type_ = repository_type if repository_type is not None else self.config.repository_type field = StrawchemyCreateMutationField(input_type, config=self.config, repository_type=repository_type_, python_name=None, graphql_name=name, type_annotation=type_annotation, is_subscription=False, permission_classes=permission_classes or [], deprecation_reason=deprecation_reason, default=default, default_factory=default_factory, metadata=metadata, directives=directives, extensions=extensions or [], registry_namespace=namespace, description=description, validation=validation) return field(resolver) if resolver else field def upsert(self, input_type: type[MappedGraphQLDTO[T]], update_fields: type[EnumDTO], conflict_fields: type[EnumDTO], resolver: Optional[_RESOLVER_TYPE[Any]]=None, *, repository_type: Optional[AnyRepository]=None, name: Optional[str]=None, description: Optional[str]=None, permission_classes: Optional[list[type[BasePermission]]]=None, deprecation_reason: Optional[str]=None, default: Any=dataclasses.MISSING, default_factory: Union[Callable[..., object], object]=dataclasses.MISSING, metadata: Optional[Mapping[Any, Any]]=None, directives: Sequence[object]=(), graphql_type: Optional[Any]=None, extensions: Optional[list[FieldExtension]]=None, validation: Optional[ValidationProtocol[T]]=None) -> Any: """Creates a Strawberry GraphQL mutation field for upserting model instances. This method generates a mutation field that handles the "upsert" (update or insert) of SQLAlchemy model instances. It uses the provided input type, update fields enum, and conflict fields enum to determine the behavior on conflict. It integrates with Strawchemy's repository system and allows for custom validation. Args: input_type: The Strawberry input type representing the data for the upsert operation. This should be a `MappedGraphQLDTO`. update_fields: An `EnumDTO` specifying which fields to update if a conflict occurs and an update is performed. conflict_fields: An `EnumDTO` specifying the fields to use for conflict detection (e.g., primary key or unique constraints). resolver: An optional custom resolver function for the mutation. If not provided, Strawchemy will use a default resolver. repository_type: An optional custom repository class for data fetching and persistence logic. Defaults to the repository configured in `StrawchemyConfig`. name: The name of the GraphQL mutation field. description: The description of the GraphQL mutation field. permission_classes: A list of permission classes for the field. deprecation_reason: The reason for deprecating the field. default: The default value for the field (typically not used for mutations). default_factory: A factory function to generate the default value. metadata: Additional metadata for the field. directives: A sequence of directives for the field. graphql_type: The GraphQL return type of the mutation. If not provided, it's inferred, typically to be the corresponding output type of the model. extensions: A list of Strawberry FieldExtensions. validation: An optional validation protocol instance to validate the input data before the upsert operation. Returns: A `StrawchemyUpsertMutationField` instance, which is a specialized StrawberryField configured for upsert mutations. """ namespace = self._annotation_namespace() type_annotation = StrawberryAnnotation.from_annotation(graphql_type, namespace) if graphql_type else None repository_type_ = repository_type if repository_type is not None else self.config.repository_type field = StrawchemyUpsertMutationField(input_type, update_fields_enum=update_fields, conflict_fields_enum=conflict_fields, config=self.config, repository_type=repository_type_, python_name=None, graphql_name=name, type_annotation=type_annotation, is_subscription=False, permission_classes=permission_classes or [], deprecation_reason=deprecation_reason, default=default, default_factory=default_factory, metadata=metadata, directives=directives, extensions=extensions or [], registry_namespace=namespace, description=description, validation=validation) return field(resolver) if resolver else field def update(self, input_type: type[MappedGraphQLDTO[T]], filter_input: type[BooleanFilterDTO], resolver: Optional[_RESOLVER_TYPE[Any]]=None, *, repository_type: Optional[AnyRepository]=None, name: Optional[str]=None, description: Optional[str]=None, permission_classes: Optional[list[type[BasePermission]]]=None, deprecation_reason: Optional[str]=None, default: Any=dataclasses.MISSING, default_factory: Union[Callable[..., object], object]=dataclasses.MISSING, metadata: Optional[Mapping[Any, Any]]=None, directives: Sequence[object]=(), graphql_type: Optional[Any]=None, extensions: Optional[list[FieldExtension]]=None, validation: Optional[ValidationProtocol[T]]=None) -> Any: """Creates a Strawberry GraphQL mutation field for updating model instances. This method generates a mutation field that handles updating existing SQLAlchemy model instances based on filter criteria. It uses the provided input type for the update data and a filter input type to specify which records to update. It integrates with Strawchemy's repository system and allows for custom validation. Args: input_type: The Strawberry input type representing the data to update on the model instances. This should be a `MappedGraphQLDTO`. filter_input: The Strawberry input type used to filter which model instances should be updated. This should be a `BooleanFilterDTO`. resolver: An optional custom resolver function for the mutation. If not provided, Strawchemy will use a default resolver. repository_type: An optional custom repository class for data fetching and persistence logic. Defaults to the repository configured in `StrawchemyConfig`. name: The name of the GraphQL mutation field. description: The description of the GraphQL mutation field. permission_classes: A list of permission classes for the field. deprecation_reason: The reason for deprecating the field. default: The default value for the field (typically not used for mutations). default_factory: A factory function to generate the default value. metadata: Additional metadata for the field. directives: A sequence of directives for the field. graphql_type: The GraphQL return type of the mutation. If not provided, it's inferred, typically to be a list of the corresponding output type of the model or a success/failure indicator. extensions: A list of Strawberry FieldExtensions. validation: An optional validation protocol instance to validate the input data before the update operation. Returns: A `StrawchemyUpdateMutationField` instance, which is a specialized StrawberryField configured for update mutations. """ namespace = self._annotation_namespace() type_annotation = StrawberryAnnotation.from_annotation(graphql_type, namespace) if graphql_type else None repository_type_ = repository_type if repository_type is not None else self.config.repository_type field = StrawchemyUpdateMutationField(config=self.config, input_type=input_type, filter_type=filter_input, repository_type=repository_type_, python_name=None, graphql_name=name, type_annotation=type_annotation, is_subscription=False, permission_classes=permission_classes or [], deprecation_reason=deprecation_reason, default=default, default_factory=default_factory, metadata=metadata, directives=directives, extensions=extensions or [], registry_namespace=namespace, description=description, validation=validation) return field(resolver) if resolver else field def update_by_ids(self, input_type: type[MappedGraphQLDTO[T]], resolver: Optional[_RESOLVER_TYPE[Any]]=None, *, repository_type: Optional[AnyRepository]=None, name: Optional[str]=None, description: Optional[str]=None, permission_classes: Optional[list[type[BasePermission]]]=None, deprecation_reason: Optional[str]=None, default: Any=dataclasses.MISSING, default_factory: Union[Callable[..., object], object]=dataclasses.MISSING, metadata: Optional[Mapping[Any, Any]]=None, directives: Sequence[object]=(), graphql_type: Optional[Any]=None, extensions: Optional[list[FieldExtension]]=None, validation: Optional[ValidationProtocol[T]]=None) -> Any: """Creates a Strawberry GraphQL mutation field for updating model instances by IDs. This method generates a mutation field that handles updating existing SQLAlchemy model instances based on their primary key(s). The input type should typically include the ID(s) of the record(s) to update and the data to apply. It integrates with Strawchemy's repository system and allows for custom validation. Args: input_type: The Strawberry input type representing the data for updating model instances. This should be a `MappedGraphQLDTO`, usually generated by `pk_update_input`, which includes primary key fields. resolver: An optional custom resolver function for the mutation. If not provided, Strawchemy will use a default resolver. repository_type: An optional custom repository class for data fetching and persistence logic. Defaults to the repository configured in `StrawchemyConfig`. name: The name of the GraphQL mutation field. description: The description of the GraphQL mutation field. permission_classes: A list of permission classes for the field. deprecation_reason: The reason for deprecating the field. default: The default value for the field (typically not used for mutations). default_factory: A factory function to generate the default value. metadata: Additional metadata for the field. directives: A sequence of directives for the field. graphql_type: The GraphQL return type of the mutation. If not provided, it's inferred, typically to be the corresponding output type of the model or a list thereof. extensions: A list of Strawberry FieldExtensions. validation: An optional validation protocol instance to validate the input data before the update operation. Returns: A `StrawchemyUpdateMutationField` instance, specialized for updates by ID. """ namespace = self._annotation_namespace() type_annotation = StrawberryAnnotation.from_annotation(graphql_type, namespace) if graphql_type else None repository_type_ = repository_type if repository_type is not None else self.config.repository_type field = StrawchemyUpdateMutationField(config=self.config, input_type=input_type, repository_type=repository_type_, python_name=None, graphql_name=name, type_annotation=type_annotation, is_subscription=False, permission_classes=permission_classes or [], deprecation_reason=deprecation_reason, default=default, default_factory=default_factory, metadata=metadata, directives=directives, extensions=extensions or [], registry_namespace=namespace, description=description, validation=validation) return field(resolver) if resolver else field def delete(self, filter_input: Optional[type[BooleanFilterDTO]]=None, resolver: Optional[_RESOLVER_TYPE[Any]]=None, *, repository_type: Optional[AnyRepository]=None, name: Optional[str]=None, description: Optional[str]=None, permission_classes: Optional[list[type[BasePermission]]]=None, deprecation_reason: Optional[str]=None, default: Any=dataclasses.MISSING, default_factory: Union[Callable[..., object], object]=dataclasses.MISSING, metadata: Optional[Mapping[Any, Any]]=None, directives: Sequence[object]=(), graphql_type: Optional[Any]=None, extensions: Optional[list[FieldExtension]]=None) -> Any: """Creates a Strawberry GraphQL mutation field for deleting model instances. This method generates a mutation field that handles the deletion of SQLAlchemy model instances. Deletion can be based on filter criteria provided via `filter_input` or by ID if the `filter_input` is structured to accept primary key(s). It integrates with Strawchemy's repository system for data persistence. Args: filter_input: The Strawberry input type used to filter which model instances should be deleted. This should be a `BooleanFilterDTO`. If deleting by ID, this DTO should contain the ID field(s). If None, the mutation might be configured to delete a single record based on an ID passed directly (implementation dependent). resolver: An optional custom resolver function for the mutation. If not provided, Strawchemy will use a default resolver. repository_type: An optional custom repository class for data fetching and persistence logic. Defaults to the repository configured in `StrawchemyConfig`. name: The name of the GraphQL mutation field. description: The description of the GraphQL mutation field. permission_classes: A list of permission classes for the field. deprecation_reason: The reason for deprecating the field. default: The default value for the field (typically not used for mutations). default_factory: A factory function to generate the default value. metadata: Additional metadata for the field. directives: A sequence of directives for the field. graphql_type: The GraphQL return type of the mutation. If not provided, it's inferred, often to indicate success/failure or the number of records deleted. extensions: A list of Strawberry FieldExtensions. Returns: A `StrawchemyDeleteMutationField` instance, which is a specialized StrawberryField configured for delete mutations. """ namespace = self._annotation_namespace() type_annotation = StrawberryAnnotation.from_annotation(graphql_type, namespace) if graphql_type else None repository_type_ = repository_type if repository_type is not None else self.config.repository_type field = StrawchemyDeleteMutationField(filter_input, config=self.config, repository_type=repository_type_, python_name=None, graphql_name=name, type_annotation=type_annotation, is_subscription=False, permission_classes=permission_classes or [], deprecation_reason=deprecation_reason, default=default, default_factory=default_factory, metadata=metadata, directives=directives, extensions=extensions or [], registry_namespace=namespace, description=description) return field(resolver) if resolver else field
class Strawchemy: '''Main entry point for integrating SQLAlchemy models with Strawberry GraphQL. This class provides a cohesive interface to generate Strawberry GraphQL types, inputs, filters, and fields based on SQLAlchemy models. It manages configuration, type registration, and various factories for DTO generation. Attributes: config (StrawchemyConfig): The configuration object for Strawchemy. registry (StrawberryRegistry): The registry for Strawberry types. filter: Factory for creating boolean filter input types. aggregate_filter: Factory for creating aggregate filter input types. distinct_on: Decorator for creating distinct_on enum types. input: Factory for creating general input types. create_input: Factory for creating input types for create mutations. pk_update_input: Factory for creating input types for update-by-PK mutations. filter_update_input: Factory for creating input types for update-by-filter mutations. order: Factory for creating order_by input types. type: Factory for creating Strawberry output types. aggregate: Factory for creating aggregation root types. upsert_update_fields: Factory for creating enum DTOs for upsert update fields. upsert_conflict_fields: Factory for creating enum DTOs for upsert conflict fields. pydantic (PydanticMapper): A mapper for generating Pydantic models. ''' def __init__(self, config: Union[StrawchemyConfig, SupportedDialect], strawberry_config: Optional[StrawberryConfig]=None) -> None: '''Initializes the Strawchemy instance. Sets up the configuration, registry, and various DTO factories required for type and field generation. Args: config: A StrawchemyConfig instance or a supported dialect string (e.g., "postgresql", "mysql") to initialize a default config. strawberry_config: A StrawberryConfig instance to initialize the registry. If not provided, a default StrawberryConfig will be used. ''' pass def _annotation_namespace(self) -> dict[str, Any]: '''Provides the namespace for Strawberry annotations. Combines the registry's 'object' namespace with internal Strawchemy types. Returns: A dictionary representing the annotation namespace. ''' pass @cached_property def pydantic(self) -> PydanticMapper: '''Provides access to a PydanticMapper instance. This mapper is used for generating Pydantic models corresponding to the SQLAlchemy models and Strawberry types. Returns: An instance of PydanticMapper. ''' pass @overload def field(self, resolver: _RESOLVER_TYPE[Any], *, filter_input: Optional[type[BooleanFilterDTO]]=None, order_by: Optional[type[OrderByDTO]]=None, distinct_on: Optional[type[EnumDTO]]=None, pagination: Optional[Union[bool, DefaultOffsetPagination]]=None, arguments: Optional[list[StrawberryArgument]]=None, id_field_name: Optional[str]=None, root_aggregations: bool=False, filter_statement: Optional[FilterStatementCallable]=None, execution_options: Optional[dict[str, Any]]=None, query_hook: Optional[Union[QueryHook[Any], Sequence[QueryHook[Any]]]]=None, repository_type: Optional[AnyRepository]=None, name: Optional[str]=None, description: Optional[str]=None, permission_classes: Optional[list[type[BasePermission]]]=None, deprecation_reason: Optional[str]=None, default: Any=dataclasses.MISSING, default_factory: Union[Callable[..., object], object]=dataclasses.MISSING, metadata: Optional[Mapping[Any, Any]]=None, directives: Sequence[object]=(), graphql_type: pass @overload def field(self, resolver: _RESOLVER_TYPE[Any], *, filter_input: Optional[type[BooleanFilterDTO]]=None, order_by: Optional[type[OrderByDTO]]=None, distinct_on: Optional[type[EnumDTO]]=None, pagination: Optional[Union[bool, DefaultOffsetPagination]]=None, arguments: Optional[list[StrawberryArgument]]=None, id_field_name: Optional[str]=None, root_aggregations: bool=False, filter_statement: Optional[FilterStatementCallable]=None, execution_options: Optional[dict[str, Any]]=None, query_hook: Optional[Union[QueryHook[Any], Sequence[QueryHook[Any]]]]=None, repository_type: Optional[AnyRepository]=None, name: Optional[str]=None, description: Optional[str]=None, permission_classes: Optional[list[type[BasePermission]]]=None, deprecation_reason: Optional[str]=None, default: Any=dataclasses.MISSING, default_factory: Union[Callable[..., object], object]=dataclasses.MISSING, metadata: Optional[Mapping[Any, Any]]=None, directives: Sequence[object]=(), graphql_type: pass def field(self, resolver: _RESOLVER_TYPE[Any], *, filter_input: Optional[type[BooleanFilterDTO]]=None, order_by: Optional[type[OrderByDTO]]=None, distinct_on: Optional[type[EnumDTO]]=None, pagination: Optional[Union[bool, DefaultOffsetPagination]]=None, arguments: Optional[list[StrawberryArgument]]=None, id_field_name: Optional[str]=None, root_aggregations: bool=False, filter_statement: Optional[FilterStatementCallable]=None, execution_options: Optional[dict[str, Any]]=None, query_hook: Optional[Union[QueryHook[Any], Sequence[QueryHook[Any]]]]=None, repository_type: Optional[AnyRepository]=None, name: Optional[str]=None, description: Optional[str]=None, permission_classes: Optional[list[type[BasePermission]]]=None, deprecation_reason: Optional[str]=None, default: Any=dataclasses.MISSING, default_factory: Union[Callable[..., object], object]=dataclasses.MISSING, metadata: Optional[Mapping[Any, Any]]=None, directives: Sequence[object]=(), graphql_type: '''Creates a Strawberry GraphQL field with enhanced SQLAlchemy capabilities. This method extends the standard Strawberry field creation by integrating SQLAlchemy-specific features like automatic filtering, ordering, pagination, and aggregations based on SQLAlchemy models. Args: resolver: The resolver function for the field. If not provided, Strawchemy will attempt to generate one based on the model. filter_input: The input type for filtering results. order_by: The input type for ordering results. distinct_on: The enum type for 'distinct on' clauses (PostgreSQL). pagination: Enables pagination for the field. Can be True for default offset pagination or a DefaultOffsetPagination instance for customization. arguments: A list of additional StrawberryArgument instances for the field. id_field_name: The name of the ID field, used for certain operations. root_aggregations: If True, enables root-level aggregations for the field. filter_statement: A callable to generate a filter statement for the query. execution_options: SQLAlchemy execution options for the query. query_hook: A callable or sequence of callables to modify the SQLAlchemy query. repository_type: A custom repository class for data fetching logic. name: The name of the GraphQL field. description: The description of the GraphQL field. permission_classes: A list of permission classes for the field. deprecation_reason: The reason for deprecating the field. default: The default value for the field. default_factory: A factory function to generate the default value. metadata: Additional metadata for the field. directives: A sequence of directives for the field. graphql_type: The GraphQL type of the field. If not provided, it's inferred. extensions: A list of Strawberry FieldExtensions. root_field: Indicates if this is a root-level field. Returns: A StrawchemyField instance, which is a specialized StrawberryField. ''' pass def create(self, input_type: type[MappedGraphQLDTO[T]], resolver: Optional[_RESOLVER_TYPE[Any]]=None, *, repository_type: Optional[AnyRepository]=None, name: Optional[str]=None, description: Optional[str]=None, permission_classes: Optional[list[type[BasePermission]]]=None, deprecation_reason: Optional[str]=None, default: Any=dataclasses.MISSING, default_factory: Union[Callable[..., object], object]=dataclasses.MISSING, metadata: Optional[Mapping[Any, Any]]=None, directives: Sequence[object]=(), graphql_type: '''Creates a Strawberry GraphQL mutation field for creating new model instances. This method generates a mutation field that handles the creation of SQLAlchemy model instances based on the provided input type. It integrates with Strawchemy's repository system for data persistence and allows for custom validation. Args: input_type: The Strawberry input type representing the data for creating a new model instance. This should be a `MappedGraphQLDTO`. resolver: An optional custom resolver function for the mutation. If not provided, Strawchemy will use a default resolver. repository_type: An optional custom repository class for data fetching and persistence logic. Defaults to the repository configured in `StrawchemyConfig`. name: The name of the GraphQL mutation field. description: The description of the GraphQL mutation field. permission_classes: A list of permission classes for the field. deprecation_reason: The reason for deprecating the field. default: The default value for the field (typically not used for mutations). default_factory: A factory function to generate the default value. metadata: Additional metadata for the field. directives: A sequence of directives for the field. graphql_type: The GraphQL return type of the mutation. If not provided, it's inferred, typically to be the corresponding output type of the model. extensions: A list of Strawberry FieldExtensions. validation: An optional validation protocol instance to validate the input data before creation. Returns: A `StrawchemyCreateMutationField` instance, which is a specialized StrawberryField configured for create mutations. ''' pass def upsert(self, input_type: type[MappedGraphQLDTO[T]], update_fields: type[EnumDTO], conflict_fields: type[EnumDTO], resolver: Optional[_RESOLVER_TYPE[Any]]=None, *, repository_type: Optional[AnyRepository]=None, name: Optional[str]=None, description: Optional[str]=None, permission_classes: Optional[list[type[BasePermission]]]=None, deprecation_reason: Optional[str]=None, default: Any=dataclasses.MISSING, default_factory: Union[Callable[..., object], object]=dataclasses.MISSING, metadata: Optional[Mapping[Any, Any]]=None, directives: Sequence[object]=(), graphql_type: '''Creates a Strawberry GraphQL mutation field for upserting model instances. This method generates a mutation field that handles the "upsert" (update or insert) of SQLAlchemy model instances. It uses the provided input type, update fields enum, and conflict fields enum to determine the behavior on conflict. It integrates with Strawchemy's repository system and allows for custom validation. Args: input_type: The Strawberry input type representing the data for the upsert operation. This should be a `MappedGraphQLDTO`. update_fields: An `EnumDTO` specifying which fields to update if a conflict occurs and an update is performed. conflict_fields: An `EnumDTO` specifying the fields to use for conflict detection (e.g., primary key or unique constraints). resolver: An optional custom resolver function for the mutation. If not provided, Strawchemy will use a default resolver. repository_type: An optional custom repository class for data fetching and persistence logic. Defaults to the repository configured in `StrawchemyConfig`. name: The name of the GraphQL mutation field. description: The description of the GraphQL mutation field. permission_classes: A list of permission classes for the field. deprecation_reason: The reason for deprecating the field. default: The default value for the field (typically not used for mutations). default_factory: A factory function to generate the default value. metadata: Additional metadata for the field. directives: A sequence of directives for the field. graphql_type: The GraphQL return type of the mutation. If not provided, it's inferred, typically to be the corresponding output type of the model. extensions: A list of Strawberry FieldExtensions. validation: An optional validation protocol instance to validate the input data before the upsert operation. Returns: A `StrawchemyUpsertMutationField` instance, which is a specialized StrawberryField configured for upsert mutations. ''' pass def update(self, input_type: type[MappedGraphQLDTO[T]], filter_input: type[BooleanFilterDTO], resolver: Optional[_RESOLVER_TYPE[Any]]=None, *, repository_type: Optional[AnyRepository]=None, name: Optional[str]=None, description: Optional[str]=None, permission_classes: Optional[list[type[BasePermission]]]=None, deprecation_reason: Optional[str]=None, default: Any=dataclasses.MISSING, default_factory: Union[Callable[..., object], object]=dataclasses.MISSING, metadata: Optional[Mapping[Any, Any]]=None, directives: Sequence[object]=(), graphql_type: '''Creates a Strawberry GraphQL mutation field for updating model instances. This method generates a mutation field that handles updating existing SQLAlchemy model instances based on filter criteria. It uses the provided input type for the update data and a filter input type to specify which records to update. It integrates with Strawchemy's repository system and allows for custom validation. Args: input_type: The Strawberry input type representing the data to update on the model instances. This should be a `MappedGraphQLDTO`. filter_input: The Strawberry input type used to filter which model instances should be updated. This should be a `BooleanFilterDTO`. resolver: An optional custom resolver function for the mutation. If not provided, Strawchemy will use a default resolver. repository_type: An optional custom repository class for data fetching and persistence logic. Defaults to the repository configured in `StrawchemyConfig`. name: The name of the GraphQL mutation field. description: The description of the GraphQL mutation field. permission_classes: A list of permission classes for the field. deprecation_reason: The reason for deprecating the field. default: The default value for the field (typically not used for mutations). default_factory: A factory function to generate the default value. metadata: Additional metadata for the field. directives: A sequence of directives for the field. graphql_type: The GraphQL return type of the mutation. If not provided, it's inferred, typically to be a list of the corresponding output type of the model or a success/failure indicator. extensions: A list of Strawberry FieldExtensions. validation: An optional validation protocol instance to validate the input data before the update operation. Returns: A `StrawchemyUpdateMutationField` instance, which is a specialized StrawberryField configured for update mutations. ''' pass def update_by_ids(self, input_type: type[MappedGraphQLDTO[T]], resolver: Optional[_RESOLVER_TYPE[Any]]=None, *, repository_type: Optional[AnyRepository]=None, name: Optional[str]=None, description: Optional[str]=None, permission_classes: Optional[list[type[BasePermission]]]=None, deprecation_reason: Optional[str]=None, default: Any=dataclasses.MISSING, default_factory: Union[Callable[..., object], object]=dataclasses.MISSING, metadata: Optional[Mapping[Any, Any]]=None, directives: Sequence[object]=(), graphql_type: '''Creates a Strawberry GraphQL mutation field for updating model instances by IDs. This method generates a mutation field that handles updating existing SQLAlchemy model instances based on their primary key(s). The input type should typically include the ID(s) of the record(s) to update and the data to apply. It integrates with Strawchemy's repository system and allows for custom validation. Args: input_type: The Strawberry input type representing the data for updating model instances. This should be a `MappedGraphQLDTO`, usually generated by `pk_update_input`, which includes primary key fields. resolver: An optional custom resolver function for the mutation. If not provided, Strawchemy will use a default resolver. repository_type: An optional custom repository class for data fetching and persistence logic. Defaults to the repository configured in `StrawchemyConfig`. name: The name of the GraphQL mutation field. description: The description of the GraphQL mutation field. permission_classes: A list of permission classes for the field. deprecation_reason: The reason for deprecating the field. default: The default value for the field (typically not used for mutations). default_factory: A factory function to generate the default value. metadata: Additional metadata for the field. directives: A sequence of directives for the field. graphql_type: The GraphQL return type of the mutation. If not provided, it's inferred, typically to be the corresponding output type of the model or a list thereof. extensions: A list of Strawberry FieldExtensions. validation: An optional validation protocol instance to validate the input data before the update operation. Returns: A `StrawchemyUpdateMutationField` instance, specialized for updates by ID. ''' pass def delete(self, filter_input: Optional[type[BooleanFilterDTO]]=None, resolver: Optional[_RESOLVER_TYPE[Any]]=None, *, repository_type: Optional[AnyRepository]=None, name: Optional[str]=None, description: Optional[str]=None, permission_classes: Optional[list[type[BasePermission]]]=None, deprecation_reason: Optional[str]=None, default: Any=dataclasses.MISSING, default_factory: Union[Callable[..., object], object]=dataclasses.MISSING, metadata: Optional[Mapping[Any, Any]]=None, directives: Sequence[object]=(), graphql_type: '''Creates a Strawberry GraphQL mutation field for deleting model instances. This method generates a mutation field that handles the deletion of SQLAlchemy model instances. Deletion can be based on filter criteria provided via `filter_input` or by ID if the `filter_input` is structured to accept primary key(s). It integrates with Strawchemy's repository system for data persistence. Args: filter_input: The Strawberry input type used to filter which model instances should be deleted. This should be a `BooleanFilterDTO`. If deleting by ID, this DTO should contain the ID field(s). If None, the mutation might be configured to delete a single record based on an ID passed directly (implementation dependent). resolver: An optional custom resolver function for the mutation. If not provided, Strawchemy will use a default resolver. repository_type: An optional custom repository class for data fetching and persistence logic. Defaults to the repository configured in `StrawchemyConfig`. name: The name of the GraphQL mutation field. description: The description of the GraphQL mutation field. permission_classes: A list of permission classes for the field. deprecation_reason: The reason for deprecating the field. default: The default value for the field (typically not used for mutations). default_factory: A factory function to generate the default value. metadata: Additional metadata for the field. directives: A sequence of directives for the field. graphql_type: The GraphQL return type of the mutation. If not provided, it's inferred, often to indicate success/failure or the number of records deleted. extensions: A list of Strawberry FieldExtensions. Returns: A `StrawchemyDeleteMutationField` instance, which is a specialized StrawberryField configured for delete mutations. ''' pass
15
10
56
3
33
20
3
0.64
0
32
21
0
11
23
11
11
657
48
371
231
193
238
79
64
66
7
0
1
33
327,952
gazorby/strawchemy
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/sqlalchemy/_executor.py
strawchemy.sqlalchemy._executor.AsyncQueryExecutor
from dataclasses import dataclass from .typing import AnyAsyncSession, AnySyncSession, DeclarativeT from typing import TYPE_CHECKING, Any, Generic, Literal, Optional, Union @dataclass class AsyncQueryExecutor(QueryExecutor[DeclarativeT]): """Extends QueryExecutor to provide asynchronous query execution. This class inherits the query building capabilities of `QueryExecutor` and adapts them for an asynchronous environment. It uses an `AnyAsyncSession` to execute the SQLAlchemy statements and retrieve results. The primary methods `execute`, `list`, and `get_one_or_none` are implemented as asynchronous methods, suitable for use with `async/await`. """ async def execute(self, session: AnyAsyncSession) -> Result[tuple[DeclarativeT, Any]]: """Executes the SQLAlchemy statement. The statement to be executed is determined by the `self.statement()` method. Args: session: The SQLAlchemy AnyAsyncSession to use. Returns: The result of the execution. """ return await session.execute(self.statement()) async def list(self, session: AnyAsyncSession) -> QueryResult[DeclarativeT]: """Executes the statement and returns a QueryResult object containing all results. The statement to be executed is determined by the `self.statement()` method. Args: session: The SQLAlchemy AnyAsyncSession to use. Returns: A QueryResult object containing all results. """ return self._to_query_result(await self.execute(session), 'all') async def get_one_or_none(self, session: AnyAsyncSession) -> QueryResult[DeclarativeT]: """Executes the statement and returns a QueryResult object containing at most one result. The statement to be executed is determined by the `self.statement()` method. Args: session: The SQLAlchemy AnyAsyncSession to use. Returns: A QueryResult object containing at most one result. """ return self._to_query_result(await self.execute(session), 'one_or_none')
@dataclass class AsyncQueryExecutor(QueryExecutor[DeclarativeT]): '''Extends QueryExecutor to provide asynchronous query execution. This class inherits the query building capabilities of `QueryExecutor` and adapts them for an asynchronous environment. It uses an `AnyAsyncSession` to execute the SQLAlchemy statements and retrieve results. The primary methods `execute`, `list`, and `get_one_or_none` are implemented as asynchronous methods, suitable for use with `async/await`. ''' async def execute(self, session: AnyAsyncSession) -> Result[tuple[DeclarativeT, Any]]: '''Executes the SQLAlchemy statement. The statement to be executed is determined by the `self.statement()` method. Args: session: The SQLAlchemy AnyAsyncSession to use. Returns: The result of the execution. ''' pass async def list(self, session: AnyAsyncSession) -> QueryResult[DeclarativeT]: '''Executes the statement and returns a QueryResult object containing all results. The statement to be executed is determined by the `self.statement()` method. Args: session: The SQLAlchemy AnyAsyncSession to use. Returns: A QueryResult object containing all results. ''' pass async def get_one_or_none(self, session: AnyAsyncSession) -> QueryResult[DeclarativeT]: '''Executes the statement and returns a QueryResult object containing at most one result. The statement to be executed is determined by the `self.statement()` method. Args: session: The SQLAlchemy AnyAsyncSession to use. Returns: A QueryResult object containing at most one result. ''' pass
5
4
13
3
2
8
1
4.43
1
3
1
0
3
0
3
5
52
14
7
4
3
31
7
4
3
1
2
0
3
327,953
gazorby/strawchemy
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/sqlalchemy/_executor.py
strawchemy.sqlalchemy._executor.NodeResult
from typing import TYPE_CHECKING, Any, Generic, Literal, Optional, Union import dataclasses from strawchemy.dto import ModelT from dataclasses import dataclass from typing_extensions import Self @dataclass class NodeResult(Generic[ModelT]): """Represents a single node result from a query. Attributes: model: The SQLAlchemy model instance. computed_values: A dictionary of computed values for this node. node_key: A callable to generate a key for a query node type. """ model: ModelT computed_values: dict[str, Any] node_key: Callable[[QueryNodeType], str] def value(self, key: QueryNodeType) -> Any: """Retrieves the value for a given query node type. If the key represents a computed or transformed value, it's fetched from `computed_values`. Otherwise, it's retrieved as an attribute from the `model`. Args: key: The query node type representing the desired value. Returns: The value corresponding to the key. """ if key.value.is_computed or key.metadata.data.is_transform: return self.computed_values[self.node_key(key)] return getattr(self.model, key.value.model_field_name) def copy_with(self, model: Any) -> Self: """Creates a copy of this NodeResult with a new model. Args: model: The new model instance to use. Returns: A new NodeResult instance with the updated model. """ return dataclasses.replace(self, model=model)
@dataclass class NodeResult(Generic[ModelT]): '''Represents a single node result from a query. Attributes: model: The SQLAlchemy model instance. computed_values: A dictionary of computed values for this node. node_key: A callable to generate a key for a query node type. ''' def value(self, key: QueryNodeType) -> Any: '''Retrieves the value for a given query node type. If the key represents a computed or transformed value, it's fetched from `computed_values`. Otherwise, it's retrieved as an attribute from the `model`. Args: key: The query node type representing the desired value. Returns: The value corresponding to the key. ''' pass def copy_with(self, model: Any) -> Self: '''Creates a copy of this NodeResult with a new model. Args: model: The new model instance to use. Returns: A new NodeResult instance with the updated model. ''' pass
4
3
13
3
3
8
2
2.1
1
1
0
0
2
0
2
2
40
9
10
3
7
21
10
3
7
2
1
1
3
327,954
gazorby/strawchemy
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/sqlalchemy/_executor.py
strawchemy.sqlalchemy._executor.QueryExecutor
from typing import TYPE_CHECKING, Any, Generic, Literal, Optional, Union from collections import defaultdict from dataclasses import dataclass import dataclasses from .typing import AnyAsyncSession, AnySyncSession, DeclarativeT @dataclass class QueryExecutor(Generic[DeclarativeT]): """Executes SQLAlchemy queries and converts the results into QueryResult objects. This class provides methods for executing SQLAlchemy queries and converting the results into QueryResult objects. It supports applying unique constraints, handling root aggregations, and fetching results as either a list or a single item. """ base_statement: Select[tuple[DeclarativeT]] scope: QueryScope[Any] apply_unique: bool = False root_aggregation_functions: list[Label[Any]] = dataclasses.field(default_factory=list) execution_options: Optional[dict[str, Any]] = None def _to_query_result(self, result: Result[tuple[DeclarativeT, Any]], fetch: Literal['one_or_none', 'all']) -> QueryResult[DeclarativeT]: """Converts a SQLAlchemy result to a QueryResult object. Args: result: The SQLAlchemy result to convert. fetch: Whether to fetch one or all results. Returns: A QueryResult object containing the nodes and computed values. """ nodes: list[DeclarativeT] = [] computed: list[dict[str, Any]] = [] if self.apply_unique: result = result.unique() if fetch == 'all': rows = result.all() else: item = result.one_or_none() rows = [] if item is None else [item] for row in rows: obj, *computed_values = row _, *computed_fields = row._fields nodes.append(obj) computed.append(dict(zip(computed_fields, computed_values))) root_aggregations_set = {function.name for function in self.root_aggregation_functions} first_computed = computed[0] if computed else {} query_computed_values = {name: value for name, value in first_computed.items() if name in root_aggregations_set} return QueryResult(nodes=nodes, node_computed_values=computed, query_computed_values=defaultdict(lambda: None) | query_computed_values, node_key=self.scope.key) def statement(self) -> Union[Select[tuple[DeclarativeT]], StatementLambdaElement]: """Returns the SQLAlchemy statement to be executed. Returns: The SQLAlchemy statement. """ statement = self.base_statement if self.execution_options: statement = statement.execution_options(**self.execution_options) return statement
@dataclass class QueryExecutor(Generic[DeclarativeT]): '''Executes SQLAlchemy queries and converts the results into QueryResult objects. This class provides methods for executing SQLAlchemy queries and converting the results into QueryResult objects. It supports applying unique constraints, handling root aggregations, and fetching results as either a list or a single item. ''' def _to_query_result(self, result: Result[tuple[DeclarativeT, Any]], fetch: Literal['one_or_none', 'all']) -> QueryResult[DeclarativeT]: '''Converts a SQLAlchemy result to a QueryResult object. Args: result: The SQLAlchemy result to convert. fetch: Whether to fetch one or all results. Returns: A QueryResult object containing the nodes and computed values. ''' pass def statement(self) -> Union[Select[tuple[DeclarativeT]], StatementLambdaElement]: '''Returns the SQLAlchemy statement to be executed. Returns: The SQLAlchemy statement. ''' pass
4
3
24
3
16
6
4
0.43
1
7
1
2
2
0
2
2
62
9
37
19
32
16
29
17
26
6
1
1
8
327,955
gazorby/strawchemy
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/sqlalchemy/_executor.py
strawchemy.sqlalchemy._executor.QueryResult
from strawchemy.dto import ModelT from typing import TYPE_CHECKING, Any, Generic, Literal, Optional, Union from dataclasses import dataclass from typing_extensions import Self from .exceptions import QueryResultError import dataclasses from collections import defaultdict @dataclass class QueryResult(Generic[ModelT]): """Represents the result of a GraphQL query. This class holds the nodes (data objects) returned by the query, computed values for each node, and computed values for the query itself. Attributes: nodes: A sequence of data objects of type ModelT. node_computed_values: A sequence of dictionaries containing computed values for each node. query_computed_values: A defaultdict containing computed values for the query. """ node_key: Callable[[QueryNodeType], str] = lambda key: str(key) nodes: Sequence[ModelT] = dataclasses.field(default_factory=list) node_computed_values: Sequence[dict[str, Any]] = dataclasses.field(default_factory=list) query_computed_values: defaultdict[str, Any] = dataclasses.field(default_factory=lambda: defaultdict(lambda: None)) def __post_init__(self) -> None: if not self.node_computed_values: self.node_computed_values = [{} for _ in range(len(self.nodes))] def __iter__(self) -> Generator[NodeResult[ModelT]]: """Iterates over the query results, yielding NodeResult instances. Yields: NodeResult[ModelT]: An individual result node. """ for model, computed_values in zip(self.nodes, self.node_computed_values): yield NodeResult(model, computed_values, self.node_key) def filter_in(self, **kwargs: Sequence[Any]) -> Self: """Filters the query results based on attribute values. Keeps only the nodes where the specified attributes are present in the provided sequences of values. Args: **kwargs: Keyword arguments where keys are attribute names of the model and values are sequences of allowed values for that attribute. Returns: A new QueryResult instance with the filtered nodes. """ filtered = [(model, computed_values) for model, computed_values in zip(self.nodes, self.node_computed_values) if all((getattr(model, key) in value for key, value in kwargs.items()))] nodes, computed_values = list(map(list, zip(*filtered))) if filtered else ([], []) return dataclasses.replace(self, nodes=nodes, node_computed_values=computed_values) def value(self, key: QueryNodeType) -> Any: """Retrieves a query-level computed value. Args: key: The query node type representing the desired query-level computed value. Returns: The computed value for the query. """ return self.query_computed_values[self.node_key(key)] def one(self) -> NodeResult[ModelT]: """Returns the single result node. Raises: QueryResultError: If the number of nodes is not exactly one. Returns: The single NodeResult. """ if len(self.nodes) != 1 or len(self.node_computed_values) != 1: msg = f'Expected one item, got {len(self.nodes)}' raise QueryResultError(msg) return NodeResult(self.nodes[0], self.node_computed_values[0], self.node_key) def one_or_none(self) -> Optional[NodeResult[ModelT]]: """Returns the single result node, or None if there isn't exactly one result. Returns: The single NodeResult or None. """ try: return self.one() except QueryResultError: return None
@dataclass class QueryResult(Generic[ModelT]): '''Represents the result of a GraphQL query. This class holds the nodes (data objects) returned by the query, computed values for each node, and computed values for the query itself. Attributes: nodes: A sequence of data objects of type ModelT. node_computed_values: A sequence of dictionaries containing computed values for each node. query_computed_values: A defaultdict containing computed values for the query. ''' def __post_init__(self) -> None: pass def __iter__(self) -> Generator[NodeResult[ModelT]]: '''Iterates over the query results, yielding NodeResult instances. Yields: NodeResult[ModelT]: An individual result node. ''' pass def filter_in(self, **kwargs: Sequence[Any]) -> Self: '''Filters the query results based on attribute values. Keeps only the nodes where the specified attributes are present in the provided sequences of values. Args: **kwargs: Keyword arguments where keys are attribute names of the model and values are sequences of allowed values for that attribute. Returns: A new QueryResult instance with the filtered nodes. ''' pass def value(self, key: QueryNodeType) -> Any: '''Retrieves a query-level computed value. Args: key: The query node type representing the desired query-level computed value. Returns: The computed value for the query. ''' pass def one(self) -> NodeResult[ModelT]: '''Returns the single result node. Raises: QueryResultError: If the number of nodes is not exactly one. Returns: The single NodeResult. ''' pass def one_or_none(self) -> Optional[NodeResult[ModelT]]: '''Returns the single result node, or None if there isn't exactly one result. Returns: The single NodeResult or None. ''' pass
8
6
11
2
4
5
2
1.29
1
10
2
0
6
0
6
6
89
18
31
17
24
40
27
15
20
2
1
1
11
327,956
gazorby/strawchemy
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/sqlalchemy/_executor.py
strawchemy.sqlalchemy._executor.SyncQueryExecutor
from dataclasses import dataclass from .typing import AnyAsyncSession, AnySyncSession, DeclarativeT from typing import TYPE_CHECKING, Any, Generic, Literal, Optional, Union @dataclass class SyncQueryExecutor(QueryExecutor[DeclarativeT]): """Extends QueryExecutor to provide synchronous query execution. This class inherits the query building capabilities of `QueryExecutor` and adapts them for a synchronous environment. It uses an `AnySyncSession` to execute the SQLAlchemy statements and retrieve results. The primary methods `execute`, `list`, and `get_one_or_none` are implemented as synchronous methods. """ def execute(self, session: AnySyncSession) -> Result[tuple[DeclarativeT, Any]]: """Executes the SQLAlchemy statement. The statement to be executed is determined by the `self.statement()` method. Args: session: The SQLAlchemy AnySyncSession to use. Returns: The result of the execution. """ return session.execute(self.statement()) def list(self, session: AnySyncSession) -> QueryResult[DeclarativeT]: """Executes the statement and returns a QueryResult object containing all results. The statement to be executed is determined by the `self.statement()` method. Args: session: The SQLAlchemy AnySyncSession to use. Returns: A QueryResult object containing all results. """ return self._to_query_result(self.execute(session), 'all') def get_one_or_none(self, session: AnySyncSession) -> QueryResult[DeclarativeT]: """Executes the statement and returns a QueryResult object containing at most one result. The statement to be executed is determined by the `self.statement()` method. Args: session: The SQLAlchemy AnySyncSession to use. Returns: A QueryResult object containing at most one result. """ return self._to_query_result(self.execute(session), 'one_or_none')
@dataclass class SyncQueryExecutor(QueryExecutor[DeclarativeT]): '''Extends QueryExecutor to provide synchronous query execution. This class inherits the query building capabilities of `QueryExecutor` and adapts them for a synchronous environment. It uses an `AnySyncSession` to execute the SQLAlchemy statements and retrieve results. The primary methods `execute`, `list`, and `get_one_or_none` are implemented as synchronous methods. ''' def execute(self, session: AnySyncSession) -> Result[tuple[DeclarativeT, Any]]: '''Executes the SQLAlchemy statement. The statement to be executed is determined by the `self.statement()` method. Args: session: The SQLAlchemy AnySyncSession to use. Returns: The result of the execution. ''' pass def list(self, session: AnySyncSession) -> QueryResult[DeclarativeT]: '''Executes the statement and returns a QueryResult object containing all results. The statement to be executed is determined by the `self.statement()` method. Args: session: The SQLAlchemy AnySyncSession to use. Returns: A QueryResult object containing all results. ''' pass def get_one_or_none(self, session: AnySyncSession) -> QueryResult[DeclarativeT]: '''Executes the statement and returns a QueryResult object containing at most one result. The statement to be executed is determined by the `self.statement()` method. Args: session: The SQLAlchemy AnySyncSession to use. Returns: A QueryResult object containing at most one result. ''' pass
5
4
13
3
2
8
1
4.43
1
3
1
0
3
0
3
5
52
14
7
4
3
31
7
4
3
1
2
0
3
327,957
gazorby/strawchemy
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/sqlalchemy/_query.py
strawchemy.sqlalchemy._query.AggregationJoin
from sqlalchemy.sql import ColumnElement, SQLColumnExpression from sqlalchemy.orm import QueryableAttribute, RelationshipDirection, RelationshipProperty, aliased, class_mapper, raiseload from sqlalchemy.sql.elements import NamedColumn from sqlalchemy.orm.util import AliasedClass from sqlalchemy import CTE, AliasedReturnsRows, BooleanClauseList, Label, Lateral, Select, Subquery, UnaryExpression, func, inspect, null, select from collections import defaultdict from typing import TYPE_CHECKING, Any, Generic, Optional, Union, cast class AggregationJoin(Join): """Represents a join specifically for aggregation purposes, often involving a subquery. This class extends `Join` and is used when aggregations (e.g., counts, sums) need to be performed on related entities. It manages a subquery that computes these aggregations and ensures that columns in the subquery have unique names. Attributes: subquery_alias: An aliased class representing the subquery used for aggregation. _column_names: Internal tracking of column names within the subquery to ensure uniqueness. """ def __init__(self, target: Union[Union[QueryableAttribute[Any], NamedFromClause], AliasedClass[Any]], node: QueryNodeType, subquery_alias: AliasedClass[Any], onclause: Optional[_OnClauseArgument]=None, is_outer: bool=False, order_nodes: Optional[list[QueryNodeType]]=None) -> None: super().__init__(target, node, onclause, is_outer, order_nodes) self.subquery_alias = subquery_alias self._column_names: defaultdict[str, int] = defaultdict(int) for column in self._inner_select.selected_columns: if isinstance(column, NamedColumn): self._column_names[column.name] = 1 @property def _inner_select(self) -> Select[Any]: """The inner SELECT statement of the subquery used for aggregation.""" if isinstance(self.selectable, CTE): return cast('Select[Any]', self.selectable.element) self_join = cast('AliasedReturnsRows', self.selectable) return cast('Select[Any]', cast('Subquery', self_join.element).element) def _existing_function_column(self, new_column: ColumnElement[Any]) -> Optional[ColumnElement[Any]]: """Checks if an equivalent column (typically a function call) already exists in the subquery. This is used to avoid adding duplicate aggregate functions to the subquery. Args: new_column: The new column (potentially a function) to check. Returns: The existing column if a match is found, otherwise None. """ for column in self._inner_select.selected_columns: base_columns = column.base_columns new_base_columns = new_column.base_columns if len(base_columns) != len(new_base_columns): continue for first, other in zip(base_columns, new_base_columns): if not first.compare(other): break else: return column return None def _ensure_unique_name(self, column: ColumnElement[Any]) -> ColumnElement[Any]: """Ensures that the given column has a unique name within the subquery. If a column with the same name already exists, it appends a suffix (e.g., '_1'). Args: column: The column to ensure has a unique name. Returns: The column, possibly relabeled to ensure uniqueness. """ if not isinstance(column, NamedColumn): return column if self._column_names[column.name]: name = f'{column.name}_{self._column_names[column.name]}' self._column_names[column.name] += 1 else: name = column.name return column.label(name) def add_column_to_subquery(self, column: ColumnElement[Any]) -> None: """Adds a new column to the aggregation subquery. The column name is made unique before adding. The subquery (CTE or Lateral) is then rebuilt with the new column. Args: column: The column to add to the subquery. """ new_sub_select = self._inner_select.add_columns(self._ensure_unique_name(column)) if isinstance(self.selectable, Lateral): new_sub_select = new_sub_select.lateral(self.name) else: new_sub_select = new_sub_select.cte(self.name) if isinstance(self.target, AliasedClass): inspect(self.target).selectable = new_sub_select else: self.target = new_sub_select def upsert_column_to_subquery(self, column: ColumnElement[Any]) -> tuple[ColumnElement[Any], bool]: """Adds a column to the subquery if an equivalent one doesn't already exist. If an equivalent column (e.g., the same aggregate function on the same base column) is already present, it returns the existing column. Otherwise, it adds the new column and returns it. Args: column: The column to potentially add to the subquery. Returns: A tuple containing the column (either existing or newly added) and a boolean indicating whether the column was newly added (True) or already existed (False). """ if (existing := self._existing_function_column(column)) is not None: return (existing, False) self.add_column_to_subquery(column) return (column, True)
class AggregationJoin(Join): '''Represents a join specifically for aggregation purposes, often involving a subquery. This class extends `Join` and is used when aggregations (e.g., counts, sums) need to be performed on related entities. It manages a subquery that computes these aggregations and ensures that columns in the subquery have unique names. Attributes: subquery_alias: An aliased class representing the subquery used for aggregation. _column_names: Internal tracking of column names within the subquery to ensure uniqueness. ''' def __init__(self, target: Union[Union[QueryableAttribute[Any], NamedFromClause], AliasedClass[Any]], node: QueryNodeType, subquery_alias: AliasedClass[Any], onclause: Optional[_OnClauseArgument]=None, is_outer: bool=False, order_nodes: Optional[list[QueryNodeType]]=None) -> None: pass @property def _inner_select(self) -> Select[Any]: '''The inner SELECT statement of the subquery used for aggregation.''' pass def _existing_function_column(self, new_column: ColumnElement[Any]) -> Optional[ColumnElement[Any]]: '''Checks if an equivalent column (typically a function call) already exists in the subquery. This is used to avoid adding duplicate aggregate functions to the subquery. Args: new_column: The new column (potentially a function) to check. Returns: The existing column if a match is found, otherwise None. ''' pass def _ensure_unique_name(self, column: ColumnElement[Any]) -> ColumnElement[Any]: '''Ensures that the given column has a unique name within the subquery. If a column with the same name already exists, it appends a suffix (e.g., '_1'). Args: column: The column to ensure has a unique name. Returns: The column, possibly relabeled to ensure uniqueness. ''' pass def add_column_to_subquery(self, column: ColumnElement[Any]) -> None: '''Adds a new column to the aggregation subquery. The column name is made unique before adding. The subquery (CTE or Lateral) is then rebuilt with the new column. Args: column: The column to add to the subquery. ''' pass def upsert_column_to_subquery(self, column: ColumnElement[Any]) -> tuple[ColumnElement[Any], bool]: '''Adds a column to the subquery if an equivalent one doesn't already exist. If an equivalent column (e.g., the same aggregate function on the same base column) is already present, it returns the existing column. Otherwise, it adds the new column and returns it. Args: column: The column to potentially add to the subquery. Returns: A tuple containing the column (either existing or newly added) and a boolean indicating whether the column was newly added (True) or already existed (False). ''' pass
8
6
17
2
9
5
3
0.69
1
8
0
0
6
3
6
16
120
22
58
28
42
40
46
17
39
5
1
3
18
327,958
gazorby/strawchemy
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/sqlalchemy/_query.py
strawchemy.sqlalchemy._query.Conjunction
from dataclasses import dataclass from sqlalchemy.sql import ColumnElement, SQLColumnExpression import dataclasses from sqlalchemy import CTE, AliasedReturnsRows, BooleanClauseList, Label, Lateral, Select, Subquery, UnaryExpression, func, inspect, null, select @dataclass class Conjunction: """Represents a group of SQLAlchemy filter expressions and their associated joins. A conjunction typically corresponds to a set of conditions that are ANDed together in a WHERE clause. It also tracks the common join path required by these expressions to ensure correct query construction. Attributes: expressions: A list of SQLAlchemy boolean column elements representing the filter conditions. joins: A list of `Join` objects required to evaluate the expressions. common_join_path: A list of `QueryNodeType` objects representing the deepest common path in the query graph shared by all expressions in this conjunction. This helps in optimizing join structures. """ expressions: list[ColumnElement[bool]] = dataclasses.field(default_factory=list) joins: list[Join] = dataclasses.field(default_factory=list) common_join_path: list[QueryNodeType] = dataclasses.field(default_factory=list) def has_many_predicates(self) -> bool: """Checks if the conjunction contains multiple filter predicates. This is true if there's more than one expression, or if the single expression is itself a `BooleanClauseList` (e.g., an `and_` or `or_`) containing multiple sub-expressions. Returns: True if there are multiple predicates, False otherwise. """ if not self.expressions: return False return len(self.expressions) > 1 or (isinstance(self.expressions[0], BooleanClauseList) and len(self.expressions[0]) > 1)
@dataclass class Conjunction: '''Represents a group of SQLAlchemy filter expressions and their associated joins. A conjunction typically corresponds to a set of conditions that are ANDed together in a WHERE clause. It also tracks the common join path required by these expressions to ensure correct query construction. Attributes: expressions: A list of SQLAlchemy boolean column elements representing the filter conditions. joins: A list of `Join` objects required to evaluate the expressions. common_join_path: A list of `QueryNodeType` objects representing the deepest common path in the query graph shared by all expressions in this conjunction. This helps in optimizing join structures. ''' def has_many_predicates(self) -> bool: '''Checks if the conjunction contains multiple filter predicates. This is true if there's more than one expression, or if the single expression is itself a `BooleanClauseList` (e.g., an `and_` or `or_`) containing multiple sub-expressions. Returns: True if there are multiple predicates, False otherwise. ''' pass
3
2
15
2
6
7
2
1.9
0
1
0
0
1
0
1
1
35
6
10
5
8
19
8
5
6
2
0
1
2
327,959
gazorby/strawchemy
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/sqlalchemy/_query.py
strawchemy.sqlalchemy._query.DistinctOn
from sqlalchemy.orm import QueryableAttribute, RelationshipDirection, RelationshipProperty, aliased, class_mapper, raiseload from sqlalchemy import CTE, AliasedReturnsRows, BooleanClauseList, Label, Lateral, Select, Subquery, UnaryExpression, func, inspect, null, select from typing import TYPE_CHECKING, Any, Generic, Optional, Union, cast from .exceptions import TranspilingError from strawchemy.strawberry.dto import BooleanFilterDTO, EnumDTO, Filter, GraphQLFieldDefinition, OrderByDTO, OrderByEnum, QueryNode from dataclasses import dataclass @dataclass class DistinctOn: """Manages the DISTINCT ON clause for a SQLAlchemy query. This class is responsible for generating the expressions for a `DISTINCT ON` clause. It ensures that the fields used in `DISTINCT ON` are compatible with the database and align with the initial fields of any `ORDER BY` clause, which is a requirement for `DISTINCT ON` in PostgreSQL. Attributes: query_graph: The `QueryGraph` instance providing context about the overall query structure, including selected fields and ordering, which is necessary to validate and construct the `DISTINCT ON` clause. """ query_graph: QueryGraph[Any] @property def _distinct_on_fields(self) -> list[GraphQLFieldDefinition]: """Extracts the fields relevant for the DISTINCT ON clause. These fields are derived from the `distinct_on` attribute of the `query_graph`. Returns: A list of `GraphQLFieldDefinition` instances for the DISTINCT ON clause. """ return [enum.field_definition for enum in self.query_graph.distinct_on] @property def expressions(self) -> list[QueryableAttribute[Any]]: """Creates DISTINCT ON expressions from the fields specified in the query graph. This method retrieves the fields intended for `DISTINCT ON` using `_distinct_on_fields`. It then validates these fields against the `order_by_nodes` from the `query_graph`. For `DISTINCT ON` to be valid (especially in PostgreSQL), the expressions in `DISTINCT ON` must match the leftmost expressions in the `ORDER BY` clause. Returns: A list of SQLAlchemy `QueryableAttribute` objects that can be used in a `SELECT.distinct(*attributes)` call. Raises: TranspilingError: If the `DISTINCT ON` fields do not correspond to the leftmost `ORDER BY` fields, or if `ORDER BY` is not specified when `DISTINCT ON` is used (and the database requires it). """ for i, distinct_field in enumerate(self._distinct_on_fields): if i > len(self.query_graph.order_by_nodes) - 1: break if self.query_graph.order_by_nodes[i].value.model_field is distinct_field.model_field: continue msg = 'Distinct on fields must match the leftmost order by fields' raise TranspilingError(msg) return [field.model_field.adapt_to_entity(inspect(self.query_graph.scope.root_alias)) for field in self._distinct_on_fields] def __bool__(self) -> bool: """Checks if any DISTINCT ON fields are specified in the query graph. Returns: True if `query_graph.distinct_on` is populated, False otherwise. """ return bool(self.expressions)
@dataclass class DistinctOn: '''Manages the DISTINCT ON clause for a SQLAlchemy query. This class is responsible for generating the expressions for a `DISTINCT ON` clause. It ensures that the fields used in `DISTINCT ON` are compatible with the database and align with the initial fields of any `ORDER BY` clause, which is a requirement for `DISTINCT ON` in PostgreSQL. Attributes: query_graph: The `QueryGraph` instance providing context about the overall query structure, including selected fields and ordering, which is necessary to validate and construct the `DISTINCT ON` clause. ''' @property def _distinct_on_fields(self) -> list[GraphQLFieldDefinition]: '''Extracts the fields relevant for the DISTINCT ON clause. These fields are derived from the `distinct_on` attribute of the `query_graph`. Returns: A list of `GraphQLFieldDefinition` instances for the DISTINCT ON clause. ''' pass @property def expressions(self) -> list[QueryableAttribute[Any]]: '''Creates DISTINCT ON expressions from the fields specified in the query graph. This method retrieves the fields intended for `DISTINCT ON` using `_distinct_on_fields`. It then validates these fields against the `order_by_nodes` from the `query_graph`. For `DISTINCT ON` to be valid (especially in PostgreSQL), the expressions in `DISTINCT ON` must match the leftmost expressions in the `ORDER BY` clause. Returns: A list of SQLAlchemy `QueryableAttribute` objects that can be used in a `SELECT.distinct(*attributes)` call. Raises: TranspilingError: If the `DISTINCT ON` fields do not correspond to the leftmost `ORDER BY` fields, or if `ORDER BY` is not specified when `DISTINCT ON` is used (and the database requires it). ''' pass def __bool__(self) -> bool: '''Checks if any DISTINCT ON fields are specified in the query graph. Returns: True if `query_graph.distinct_on` is populated, False otherwise. ''' pass
7
4
15
2
5
8
2
1.65
0
6
2
0
3
0
3
3
65
12
20
8
14
33
15
6
11
4
0
2
6
327,960
gazorby/strawchemy
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/sqlalchemy/_query.py
strawchemy.sqlalchemy._query.HookApplier
from dataclasses import dataclass from sqlalchemy.orm.util import AliasedClass from typing import TYPE_CHECKING, Any, Generic, Optional, Union, cast from sqlalchemy import CTE, AliasedReturnsRows, BooleanClauseList, Label, Lateral, Select, Subquery, UnaryExpression, func, inspect, null, select from .typing import DeclarativeT, OrderBySpec import dataclasses from collections import defaultdict @dataclass class HookApplier: """Manages and applies query hooks to SQLAlchemy SELECT statements. This class is responsible for invoking registered `QueryHook` instances at appropriate points during the construction of a SQLAlchemy query. Hooks can modify the statement, for example, by adding columns, applying transformations, or changing loader options, based on the current `QueryNodeType` being processed. Attributes: scope: The `QueryScope` providing context (e.g., root model, database features) that might be relevant for hook execution. hooks: A `defaultdict` mapping `QueryNodeType` instances to a list of `QueryHook` objects. This allows multiple hooks to be registered and applied for the same node type. """ scope: QueryScope[Any] hooks: defaultdict[QueryNodeType, list[QueryHook[Any]]] = dataclasses.field(default_factory=lambda: defaultdict(list)) def apply(self, statement: Select[tuple[DeclarativeT]], node: QueryNodeType, alias: AliasedClass[Any], loading_mode: ColumnLoadingMode, in_subquery: bool=False) -> tuple[Select[tuple[DeclarativeT]], list[_AbstractLoad]]: """Applies registered hooks for a given node to the SELECT statement. This method iterates through all `QueryHook` instances registered for the specified `node`. For each hook, it applies transformations to the `statement` and collects SQLAlchemy loader options. The application process for each hook involves: 1. `hook.apply_hook(statement, alias)`: For general statement modifications. 2. `hook.load_columns(statement, alias, loading_mode)`: For adding columns or defining column loading strategies. Loader options are collected. 3. `hook.load_relationships(...)`: If `in_subquery` is False, this allows hooks to define relationship loading strategies. The target alias for the relationship is resolved using `self.scope.alias_from_relation_node`. Loader options are collected. Args: statement: The SQLAlchemy `Select` statement to modify. node: The `QueryNodeType` identifying which set of hooks to apply. alias: The `AliasedClass` representing the current ORM context for the hooks. loading_mode: Specifies the general strategy for loading columns, which hooks can customize. in_subquery: If True, relationship loading hooks are skipped, as they are typically not applicable within subqueries. Defaults to False. Returns: A tuple containing the modified `Select` statement and a list of SQLAlchemy loader options (`_AbstractLoad`) accumulated from the hooks. """ options: list[_AbstractLoad] = [] for hook in self.hooks[node]: statement = hook.apply_hook(statement, alias) statement, column_options = hook.load_columns(statement, alias, loading_mode) options.extend(column_options) if not in_subquery: options.extend(hook.load_relationships(self.scope.alias_from_relation_node(node, 'target'))) return (statement, options)
@dataclass class HookApplier: '''Manages and applies query hooks to SQLAlchemy SELECT statements. This class is responsible for invoking registered `QueryHook` instances at appropriate points during the construction of a SQLAlchemy query. Hooks can modify the statement, for example, by adding columns, applying transformations, or changing loader options, based on the current `QueryNodeType` being processed. Attributes: scope: The `QueryScope` providing context (e.g., root model, database features) that might be relevant for hook execution. hooks: A `defaultdict` mapping `QueryNodeType` instances to a list of `QueryHook` objects. This allows multiple hooks to be registered and applied for the same node type. ''' def apply(self, statement: Select[tuple[DeclarativeT]], node: QueryNodeType, alias: AliasedClass[Any], loading_mode: ColumnLoadingMode, in_subquery: bool=False) -> tuple[Select[tuple[DeclarativeT]], list[_AbstractLoad]]: '''Applies registered hooks for a given node to the SELECT statement. This method iterates through all `QueryHook` instances registered for the specified `node`. For each hook, it applies transformations to the `statement` and collects SQLAlchemy loader options. The application process for each hook involves: 1. `hook.apply_hook(statement, alias)`: For general statement modifications. 2. `hook.load_columns(statement, alias, loading_mode)`: For adding columns or defining column loading strategies. Loader options are collected. 3. `hook.load_relationships(...)`: If `in_subquery` is False, this allows hooks to define relationship loading strategies. The target alias for the relationship is resolved using `self.scope.alias_from_relation_node`. Loader options are collected. Args: statement: The SQLAlchemy `Select` statement to modify. node: The `QueryNodeType` identifying which set of hooks to apply. alias: The `AliasedClass` representing the current ORM context for the hooks. loading_mode: Specifies the general strategy for loading columns, which hooks can customize. in_subquery: If True, relationship loading hooks are skipped, as they are typically not applicable within subqueries. Defaults to False. Returns: A tuple containing the modified `Select` statement and a list of SQLAlchemy loader options (`_AbstractLoad`) accumulated from the hooks. ''' pass
3
2
44
4
16
24
3
1.76
0
4
0
0
1
0
1
1
66
8
21
13
12
37
12
6
10
3
0
2
3
327,961
gazorby/strawchemy
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/sqlalchemy/_query.py
strawchemy.sqlalchemy._query.Join
from sqlalchemy.orm import QueryableAttribute, RelationshipDirection, RelationshipProperty, aliased, class_mapper, raiseload from sqlalchemy import CTE, AliasedReturnsRows, BooleanClauseList, Label, Lateral, Select, Subquery, UnaryExpression, func, inspect, null, select from typing import TYPE_CHECKING, Any, Generic, Optional, Union, cast from typing_extensions import Self from sqlalchemy.orm.util import AliasedClass class Join: """Represents a join to be applied to a SQLAlchemy query. This class encapsulates information about a join, including the target entity, the corresponding query node, join conditions, and ordering information. Attributes: target: The SQLAlchemy entity, CTE, or aliased class to join with. node: The query node type representing this join in the query graph. onclause: Optional custom ON clause for the join. is_outer: Whether this join is an outer join (LEFT OUTER JOIN). order_nodes: List of query nodes that define the order within this join, particularly relevant for ordered relationships. """ def __init__(self, target: Union[Union[QueryableAttribute[Any], NamedFromClause], AliasedClass[Any]], node: QueryNodeType, onclause: Optional[_OnClauseArgument]=None, is_outer: bool=False, order_nodes: Optional[list[QueryNodeType]]=None) -> None: self.target = target self.node = node self.onclause = onclause self.is_outer = is_outer self.order_nodes = order_nodes if order_nodes is not None else [] @property def _relationship(self) -> RelationshipProperty[Any]: """The SQLAlchemy RelationshipProperty associated with this join node.""" return cast('RelationshipProperty[Any]', self.node.value.model_field.property) @property def selectable(self) -> NamedFromClause: """The SQLAlchemy selectable (table, CTE, etc.) for this join target.""" if isinstance(self.target, AliasedClass): return cast('NamedFromClause', inspect(self.target).selectable) return cast('NamedFromClause', self.target) @property def order(self) -> int: """The order (depth level) of this join in the query graph.""" return self.node.level @property def name(self) -> str: """The name of the selectable for this join.""" return self.selectable.name @property def to_many(self) -> bool: """Whether this join represents a to-many relationship.""" return self._relationship.direction in {RelationshipDirection.MANYTOMANY, RelationshipDirection.ONETOMANY} def __gt__(self, other: Self) -> bool: """Compares this join with another based on their order (depth).""" return self.order > other.order def __lt__(self, other: Self) -> bool: """Compares this join with another based on their order (depth).""" return self.order < other.order def __le__(self, other: Self) -> bool: """Compares this join with another based on their order (depth).""" return self.order <= other.order def __ge__(self, other: Self) -> bool: """Compares this join with another based on their order (depth).""" return self.order >= other.order
class Join: '''Represents a join to be applied to a SQLAlchemy query. This class encapsulates information about a join, including the target entity, the corresponding query node, join conditions, and ordering information. Attributes: target: The SQLAlchemy entity, CTE, or aliased class to join with. node: The query node type representing this join in the query graph. onclause: Optional custom ON clause for the join. is_outer: Whether this join is an outer join (LEFT OUTER JOIN). order_nodes: List of query nodes that define the order within this join, particularly relevant for ordered relationships. ''' def __init__(self, target: Union[Union[QueryableAttribute[Any], NamedFromClause], AliasedClass[Any]], node: QueryNodeType, onclause: Optional[_OnClauseArgument]=None, is_outer: bool=False, order_nodes: Optional[list[QueryNodeType]]=None) -> None: pass @property def _relationship(self) -> RelationshipProperty[Any]: '''The SQLAlchemy RelationshipProperty associated with this join node.''' pass @property def selectable(self) -> NamedFromClause: '''The SQLAlchemy selectable (table, CTE, etc.) for this join target.''' pass @property def order(self) -> int: '''The order (depth level) of this join in the query graph.''' pass @property def name(self) -> str: '''The name of the selectable for this join.''' pass @property def to_many(self) -> bool: '''Whether this join represents a to-many relationship.''' pass def __gt__(self, other: Self) -> bool: '''Compares this join with another based on their order (depth).''' pass def __lt__(self, other: Self) -> bool: '''Compares this join with another based on their order (depth).''' pass def __le__(self, other: Self) -> bool: '''Compares this join with another based on their order (depth).''' pass def __ge__(self, other: Self) -> bool: '''Compares this join with another based on their order (depth).''' pass
16
10
5
0
4
1
1
0.48
0
5
0
1
10
5
10
10
74
12
42
28
19
20
27
16
16
2
0
1
12
327,962
gazorby/strawchemy
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/sqlalchemy/_query.py
strawchemy.sqlalchemy._query.OrderBy
from .typing import DeclarativeT, OrderBySpec from sqlalchemy.sql import ColumnElement, SQLColumnExpression from dataclasses import dataclass from sqlalchemy import CTE, AliasedReturnsRows, BooleanClauseList, Label, Lateral, Select, Subquery, UnaryExpression, func, inspect, null, select import dataclasses from strawchemy.strawberry.dto import BooleanFilterDTO, EnumDTO, Filter, GraphQLFieldDefinition, OrderByDTO, OrderByEnum, QueryNode from typing import TYPE_CHECKING, Any, Generic, Optional, Union, cast @dataclass class OrderBy: """Manages the ORDER BY clause components for a SQLAlchemy query. This class stores the columns to order by, their respective ordering directions (ASC, DESC, with NULLS FIRST/LAST handling), and any joins required to access these columns. It also considers database-specific features for NULL ordering. Attributes: db_features: An instance of `DatabaseFeatures` providing information about the capabilities of the target database (e.g., support for NULLS FIRST/LAST). columns: A list of tuples, where each tuple contains a SQLAlchemy column expression and an `OrderByEnum` value specifying the ordering for that column. joins: A list of `Join` objects required to access the columns specified in the ORDER BY clause. """ db_features: DatabaseFeatures columns: list[OrderBySpec] = dataclasses.field(default_factory=list) joins: list[Join] = dataclasses.field(default_factory=list) def _order_by(self, column: SQLColumnExpression[Any], order_by: OrderByEnum) -> list[UnaryExpression[Any]]: """Creates an order by expression for a given node and attribute. Args: column: The order by enum value (ASC, DESC, etc.). order_by: The column or attribute to order by. Returns: A unary expression representing the order by clause. """ expressions: list[UnaryExpression[Any]] = [] if order_by is OrderByEnum.ASC: expressions.append(column.asc()) elif order_by is OrderByEnum.DESC: expressions.append(column.desc()) elif order_by is OrderByEnum.ASC_NULLS_FIRST and self.db_features.supports_null_ordering: expressions.append(column.asc().nulls_first()) elif order_by is OrderByEnum.ASC_NULLS_FIRST: expressions.extend([column.is_(null()).desc(), column.asc()]) elif order_by is OrderByEnum.ASC_NULLS_LAST and self.db_features.supports_null_ordering: expressions.append(column.asc().nulls_last()) elif order_by is OrderByEnum.ASC_NULLS_LAST: expressions.extend([column.is_(null()).asc(), column.asc()]) elif order_by is OrderByEnum.DESC_NULLS_FIRST and self.db_features.supports_null_ordering: expressions.append(column.desc().nulls_first()) elif order_by is OrderByEnum.DESC_NULLS_FIRST: expressions.extend([column.is_(null()).desc(), column.desc()]) elif order_by is OrderByEnum.DESC_NULLS_LAST and self.db_features.supports_null_ordering: expressions.append(column.desc().nulls_last()) elif order_by is OrderByEnum.DESC_NULLS_LAST: expressions.extend([column.is_(null()).asc(), column.desc()]) return expressions @property def expressions(self) -> list[UnaryExpression[Any]]: """Generates a list of SQLAlchemy UnaryExpression objects for the ORDER BY clause. This method iterates through the `columns` and uses the `_order_by` method to convert each column and its ordering specification into the appropriate SQLAlchemy expression (e.g., `column.asc()`, `column.desc().nulls_first()`). Returns: A list of SQLAlchemy UnaryExpression objects ready to be applied to a query. """ expressions: list[UnaryExpression[Any]] = [] for column, order_by in self.columns: expressions.extend(self._order_by(column, order_by)) return expressions
@dataclass class OrderBy: '''Manages the ORDER BY clause components for a SQLAlchemy query. This class stores the columns to order by, their respective ordering directions (ASC, DESC, with NULLS FIRST/LAST handling), and any joins required to access these columns. It also considers database-specific features for NULL ordering. Attributes: db_features: An instance of `DatabaseFeatures` providing information about the capabilities of the target database (e.g., support for NULLS FIRST/LAST). columns: A list of tuples, where each tuple contains a SQLAlchemy column expression and an `OrderByEnum` value specifying the ordering for that column. joins: A list of `Join` objects required to access the columns specified in the ORDER BY clause. ''' def _order_by(self, column: SQLColumnExpression[Any], order_by: OrderByEnum) -> list[UnaryExpression[Any]]: '''Creates an order by expression for a given node and attribute. Args: column: The order by enum value (ASC, DESC, etc.). order_by: The column or attribute to order by. Returns: A unary expression representing the order by clause. ''' pass @property def expressions(self) -> list[UnaryExpression[Any]]: '''Generates a list of SQLAlchemy UnaryExpression objects for the ORDER BY clause. This method iterates through the `columns` and uses the `_order_by` method to convert each column and its ordering specification into the appropriate SQLAlchemy expression (e.g., `column.asc()`, `column.desc().nulls_first()`). Returns: A list of SQLAlchemy UnaryExpression objects ready to be applied to a query. ''' pass
5
3
23
2
14
7
7
0.79
0
3
1
0
2
0
2
2
68
9
33
9
29
26
23
8
20
11
0
1
13
327,963
gazorby/strawchemy
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/sqlalchemy/_query.py
strawchemy.sqlalchemy._query.Query
from .typing import DeclarativeT, OrderBySpec from typing import TYPE_CHECKING, Any, Generic, Optional, Union, cast import dataclasses from sqlalchemy.sql import ColumnElement, SQLColumnExpression from dataclasses import dataclass from sqlalchemy import CTE, AliasedReturnsRows, BooleanClauseList, Label, Lateral, Select, Subquery, UnaryExpression, func, inspect, null, select @dataclass class Query: """Encapsulates all components required to build a SQLAlchemy query. This class acts as a container for various parts of a query, such as database-specific features, distinct clauses, joins, filtering conditions (WHERE), ordering (ORDER BY), root-level aggregation functions, and pagination (limit/offset). It provides a structured way to assemble these components before generating the final SQLAlchemy `Select` statement. Attributes: db_features: An instance of `DatabaseFeatures` providing information about the capabilities of the target database. distinct_on: A `DistinctOn` object managing the expressions for a `DISTINCT ON` clause. joins: A list of `Join` objects representing the joins to be applied. where: An optional `Where` object containing the filter conditions. order_by: An optional `OrderBy` object specifying the sorting criteria. root_aggregation_functions: A list of SQLAlchemy `Label` objects for aggregations performed at the root level of the query (e.g., total counts). limit: An optional integer specifying the maximum number of rows to return. offset: An optional integer specifying the number of rows to skip before starting to return rows. use_distinct_on: A boolean flag indicating whether `DISTINCT ON` should be actively applied. This can depend on database support and query structure. """ db_features: DatabaseFeatures distinct_on: DistinctOn joins: list[Join] = dataclasses.field(default_factory=list) where: Optional[Where] = None order_by: Optional[OrderBy] = None root_aggregation_functions: list[Label[Any]] = dataclasses.field(default_factory=list) limit: Optional[int] = None offset: Optional[int] = None use_distinct_on: bool = False def _distinct_on(self, statement: Select[Any], order_by_expressions: list[UnaryExpression[Any]]) -> Select[Any]: """Applies DISTINCT ON expressions to the SELECT statement. If `self.use_distinct_on` is True, this method modifies the given `statement` to include `DISTINCT ON` behavior. It retrieves distinct expressions from `self.distinct_on`. Crucially, for `DISTINCT ON` to work correctly (especially in PostgreSQL), the columns in the `ORDER BY` clause must be available in the `SELECT` list if they are part of the `DISTINCT ON` criteria or if `DISTINCT ON` is used at all with an `ORDER BY`. This method ensures that any such necessary columns from `order_by_expressions` are added to the statement's selected columns before applying `.distinct()`. Args: statement: The SQLAlchemy `Select` statement to modify. order_by_expressions: A list of `UnaryExpression` objects representing the `ORDER BY` clause, used to ensure necessary columns are selected. Returns: The modified `Select` statement, potentially with added columns and a `.distinct()` clause applied. """ distinct_expressions = self.distinct_on.expressions if self.distinct_on else [] if self.use_distinct_on: statement = statement.add_columns(*[expression.element for expression in order_by_expressions if isinstance(expression.element, ColumnElement) and (not any((elem.compare(expression.element) for elem in statement.selected_columns)))]) statement = statement.distinct(*distinct_expressions) return statement @property def joins_have_many(self) -> bool: """Checks if any of the configured joins are to-many relationships. Returns: True if at least one join in `self.joins` has its `to_many` attribute set to True, indicating a join across a one-to-many or many-to-many relationship. False otherwise. """ return next((True for join in self.joins if join.to_many), False) def statement(self, base_statement: Select[tuple[DeclarativeT]]) -> Select[tuple[DeclarativeT]]: """Constructs the final SQLAlchemy Select statement from the query components. This method takes a base SELECT statement (usually selecting from the root entity) and applies all configured query parts in a specific order: 1. Joins (sorted by their defined order). 2. WHERE clause conditions. 3. ORDER BY clauses. 4. DISTINCT ON clauses (if applicable, potentially modifying selected columns). 5. LIMIT and OFFSET for pagination. 6. Root-level aggregation functions are added to the selected columns. Args: base_statement: The initial SQLAlchemy `Select` object, typically selecting from the primary model or its alias. Returns: The fully constructed SQLAlchemy `Select` statement, incorporating all joins, filters, ordering, pagination, and aggregations. """ sorted_joins = sorted(self.joins) distinct_expressions = self.distinct_on.expressions if self.distinct_on else [] order_by_expressions = self.order_by.expressions if self.order_by else [] for join in sorted_joins: base_statement = base_statement.join(join.target, onclause=join.onclause, isouter=join.is_outer) if self.where and self.where.expressions: base_statement = base_statement.where(*self.where.expressions) if order_by_expressions: base_statement = base_statement.order_by(*order_by_expressions) if distinct_expressions: base_statement = self._distinct_on(base_statement, order_by_expressions) if self.limit is not None: base_statement = base_statement.limit(self.limit) if self.offset is not None: base_statement = base_statement.offset(self.offset) return base_statement.add_columns(*self.root_aggregation_functions)
@dataclass class Query: '''Encapsulates all components required to build a SQLAlchemy query. This class acts as a container for various parts of a query, such as database-specific features, distinct clauses, joins, filtering conditions (WHERE), ordering (ORDER BY), root-level aggregation functions, and pagination (limit/offset). It provides a structured way to assemble these components before generating the final SQLAlchemy `Select` statement. Attributes: db_features: An instance of `DatabaseFeatures` providing information about the capabilities of the target database. distinct_on: A `DistinctOn` object managing the expressions for a `DISTINCT ON` clause. joins: A list of `Join` objects representing the joins to be applied. where: An optional `Where` object containing the filter conditions. order_by: An optional `OrderBy` object specifying the sorting criteria. root_aggregation_functions: A list of SQLAlchemy `Label` objects for aggregations performed at the root level of the query (e.g., total counts). limit: An optional integer specifying the maximum number of rows to return. offset: An optional integer specifying the number of rows to skip before starting to return rows. use_distinct_on: A boolean flag indicating whether `DISTINCT ON` should be actively applied. This can depend on database support and query structure. ''' def _distinct_on(self, statement: Select[Any], order_by_expressions: list[UnaryExpression[Any]]) -> Select[Any]: '''Applies DISTINCT ON expressions to the SELECT statement. If `self.use_distinct_on` is True, this method modifies the given `statement` to include `DISTINCT ON` behavior. It retrieves distinct expressions from `self.distinct_on`. Crucially, for `DISTINCT ON` to work correctly (especially in PostgreSQL), the columns in the `ORDER BY` clause must be available in the `SELECT` list if they are part of the `DISTINCT ON` criteria or if `DISTINCT ON` is used at all with an `ORDER BY`. This method ensures that any such necessary columns from `order_by_expressions` are added to the statement's selected columns before applying `.distinct()`. Args: statement: The SQLAlchemy `Select` statement to modify. order_by_expressions: A list of `UnaryExpression` objects representing the `ORDER BY` clause, used to ensure necessary columns are selected. Returns: The modified `Select` statement, potentially with added columns and a `.distinct()` clause applied. ''' pass @property def joins_have_many(self) -> bool: '''Checks if any of the configured joins are to-many relationships. Returns: True if at least one join in `self.joins` has its `to_many` attribute set to True, indicating a join across a one-to-many or many-to-many relationship. False otherwise. ''' pass def statement(self, base_statement: Select[tuple[DeclarativeT]]) -> Select[tuple[DeclarativeT]]: '''Constructs the final SQLAlchemy Select statement from the query components. This method takes a base SELECT statement (usually selecting from the root entity) and applies all configured query parts in a specific order: 1. Joins (sorted by their defined order). 2. WHERE clause conditions. 3. ORDER BY clauses. 4. DISTINCT ON clauses (if applicable, potentially modifying selected columns). 5. LIMIT and OFFSET for pagination. 6. Root-level aggregation functions are added to the selected columns. Args: base_statement: The initial SQLAlchemy `Select` object, typically selecting from the primary model or its alias. Returns: The fully constructed SQLAlchemy `Select` statement, incorporating all joins, filters, ordering, pagination, and aggregations. ''' pass
6
4
28
4
11
14
4
1.49
0
4
0
0
3
0
3
3
123
17
43
18
38
64
35
16
31
9
0
1
13
327,964
gazorby/strawchemy
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/sqlalchemy/_query.py
strawchemy.sqlalchemy._query.QueryGraph
from strawchemy.graph import merge_trees from typing import TYPE_CHECKING, Any, Generic, Optional, Union, cast import dataclasses from strawchemy.strawberry.dto import BooleanFilterDTO, EnumDTO, Filter, GraphQLFieldDefinition, OrderByDTO, OrderByEnum, QueryNode from dataclasses import dataclass from functools import cached_property from .typing import DeclarativeT, OrderBySpec from strawchemy.constants import AGGREGATIONS_KEY, NODES_KEY @dataclass class QueryGraph(Generic[DeclarativeT]): """Represents the structure and components of a GraphQL query to be translated to SQLAlchemy. This class holds information about the selected fields (selection_tree), ordering, distinct clauses, and filters. It processes these components to build various join trees (root, where, subquery) necessary for constructing the final SQLAlchemy query. Attributes: scope: The QueryScope, providing context about the root model and database features. selection_tree: The root node of the GraphQL query's selection set. order_by: A sequence of OrderByDTOs specifying how the results should be ordered. distinct_on: A list of EnumDTOs specifying fields for a DISTINCT ON clause. dto_filter: A BooleanFilterDTO representing the filtering conditions. query_filter: The processed Filter object derived from dto_filter. where_join_tree: The join tree required by the WHERE clause filters. subquery_join_tree: The join tree required by subqueries (often for aggregations or complex filters). root_join_tree: The main join tree representing all required joins for the query. order_by_nodes: A list of query nodes involved in the ORDER BY clause. """ scope: QueryScope[DeclarativeT] selection_tree: Optional[QueryNodeType] = None order_by: Sequence[OrderByDTO] = dataclasses.field(default_factory=list) distinct_on: list[EnumDTO] = dataclasses.field(default_factory=list) dto_filter: Optional[BooleanFilterDTO] = None query_filter: Optional[Filter] = dataclasses.field(init=False, default=None) where_join_tree: Optional[QueryNodeType] = dataclasses.field(init=False, default=None) subquery_join_tree: Optional[QueryNodeType] = dataclasses.field(init=False, default=None) root_join_tree: QueryNodeType = dataclasses.field(init=False) order_by_nodes: list[QueryNodeType] = dataclasses.field(init=False, default_factory=list) def __post_init__(self) -> None: """Initializes various join trees based on the selection, filters, and ordering. This method constructs the `root_join_tree`, `where_join_tree`, and `subquery_join_tree` by merging trees derived from the selection set, filters, order by clauses, and distinct on clauses. """ self.root_join_tree = self.resolved_selection_tree() if self.dto_filter is not None: self.where_join_tree, self.query_filter = self.dto_filter.filters_tree() self.subquery_join_tree = self.where_join_tree self.root_join_tree = merge_trees(self.root_join_tree, self.where_join_tree, match_on='value_equality') if self.order_by_tree: self.root_join_tree = merge_trees(self.root_join_tree, self.order_by_tree, match_on='value_equality') self.subquery_join_tree = merge_trees(self.subquery_join_tree, self.order_by_tree, match_on='value_equality') if self.subquery_join_tree else self.order_by_tree self.order_by_nodes = sorted(self.order_by_tree.leaves()) def resolved_selection_tree(self) -> QueryNodeType: """Resolves the selection tree by adding root aggregations and selection functions. This method processes the selection tree to include root aggregations and selection functions, ensuring that all necessary nodes are included for the query. Returns: The resolved selection tree. """ tree = self.selection_tree if tree and tree.graph_metadata.metadata.root_aggregations: tree = tree.find_child(lambda child: child.value.name == NODES_KEY) if tree else None if tree is None: tree = QueryNode.root_node(self.scope.model) for field in self.scope.id_field_definitions(self.scope.model): tree.insert_child(field) for node in tree.leaves(iteration_mode='breadth_first'): if node.value.is_function: self.scope.selection_function_nodes.add(node) return tree @cached_property def order_by_tree(self) -> Optional[QueryNodeType]: """Creates a query node tree from a list of order by DTOs. Args: dtos: List of order by DTOs to create the tree from. Returns: A query node tree representing the order by clauses, or None if no DTOs provided. """ merged_tree: Optional[QueryNodeType] = None max_order: int = 0 for order_by_dto in self.order_by: tree = order_by_dto.tree() orders: list[int] = [] for leaf in sorted(tree.leaves(iteration_mode='breadth_first')): leaf.insert_order += max_order orders.append(leaf.insert_order) merged_tree = tree if merged_tree is None else merge_trees(merged_tree, tree, match_on='value_equality') max_order = max(orders) + 1 return merged_tree def root_aggregation_tree(self) -> Optional[QueryNodeType]: if self.selection_tree: return self.selection_tree.find_child(lambda child: child.value.name == AGGREGATIONS_KEY) return None
@dataclass class QueryGraph(Generic[DeclarativeT]): '''Represents the structure and components of a GraphQL query to be translated to SQLAlchemy. This class holds information about the selected fields (selection_tree), ordering, distinct clauses, and filters. It processes these components to build various join trees (root, where, subquery) necessary for constructing the final SQLAlchemy query. Attributes: scope: The QueryScope, providing context about the root model and database features. selection_tree: The root node of the GraphQL query's selection set. order_by: A sequence of OrderByDTOs specifying how the results should be ordered. distinct_on: A list of EnumDTOs specifying fields for a DISTINCT ON clause. dto_filter: A BooleanFilterDTO representing the filtering conditions. query_filter: The processed Filter object derived from dto_filter. where_join_tree: The join tree required by the WHERE clause filters. subquery_join_tree: The join tree required by subqueries (often for aggregations or complex filters). root_join_tree: The main join tree representing all required joins for the query. order_by_nodes: A list of query nodes involved in the ORDER BY clause. ''' def __post_init__(self) -> None: '''Initializes various join trees based on the selection, filters, and ordering. This method constructs the `root_join_tree`, `where_join_tree`, and `subquery_join_tree` by merging trees derived from the selection set, filters, order by clauses, and distinct on clauses. ''' pass def resolved_selection_tree(self) -> QueryNodeType: '''Resolves the selection tree by adding root aggregations and selection functions. This method processes the selection tree to include root aggregations and selection functions, ensuring that all necessary nodes are included for the query. Returns: The resolved selection tree. ''' pass @cached_property def order_by_tree(self) -> Optional[QueryNodeType]: '''Creates a query node tree from a list of order by DTOs. Args: dtos: List of order by DTOs to create the tree from. Returns: A query node tree representing the order by clauses, or None if no DTOs provided. ''' pass def root_aggregation_tree(self) -> Optional[QueryNodeType]: pass
7
4
18
2
12
5
4
0.6
1
3
1
0
4
0
4
4
108
15
58
24
52
35
49
23
44
7
1
2
17
327,965
gazorby/strawchemy
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/sqlalchemy/_query.py
strawchemy.sqlalchemy._query.SubqueryBuilder
from sqlalchemy.sql import ColumnElement, SQLColumnExpression from functools import cached_property import dataclasses from sqlalchemy import CTE, AliasedReturnsRows, BooleanClauseList, Label, Lateral, Select, Subquery, UnaryExpression, func, inspect, null, select from sqlalchemy.orm.util import AliasedClass from .typing import DeclarativeT, OrderBySpec from sqlalchemy.sql.elements import NamedColumn from typing import TYPE_CHECKING, Any, Generic, Optional, Union, cast from sqlalchemy.orm import QueryableAttribute, RelationshipDirection, RelationshipProperty, aliased, class_mapper, raiseload from dataclasses import dataclass @dataclass class SubqueryBuilder(Generic[DeclarativeT]): """Builds and manages aliased subqueries, often for DISTINCT ON emulation. This utility class is responsible for constructing aliased subqueries, particularly useful for emulating `DISTINCT ON` behavior with window functions (e.g., `ROW_NUMBER()`). It handles creating an alias for the target model, generating unique names for helper columns (like a rank column), and defining conditions based on these columns. Attributes: scope: The `QueryScope` providing context for the model being queried. hook_applier: A `HookApplier` instance for applying query hooks. db_features: `DatabaseFeatures` for database-specific logic. alias: An `AliasedClass` for `scope.model`, initialized in `__post_init__`. """ scope: QueryScope[Any] hook_applier: HookApplier db_features: DatabaseFeatures alias: AliasedClass[DeclarativeT] = dataclasses.field(init=False) def __post_init__(self) -> None: """Initializes `self.alias` with an aliased version of the scoped model. This method creates an `AliasedClass` for the model specified in `self.scope.model`. The alias is named using `self.name` (typically the table name) and is created with `flat=True` to prevent nesting if the model is already an alias. """ self.alias = aliased(class_mapper(self.scope.model), name=self.name, flat=True) @cached_property def _distinct_on_rank_column(self) -> str: """Provides a unique name for the rank column used in DISTINCT ON emulation. This name is generated using `self.scope.key("distinct_on_rank")` to ensure it doesn't clash with other column names within the current query scope. The result is cached for efficiency. Returns: A string representing the unique name for the rank column. """ return self.scope.key('distinct_on_rank') def distinct_on_condition(self, aliased_subquery: AliasedClass[DeclarativeT]) -> ColumnElement[bool]: """Generates a SQLAlchemy filter condition to select rows with rank 1. This method is typically used after a window function (like `ROW_NUMBER()`) has assigned ranks to rows. It creates a condition to filter for rows where the column named by `self._distinct_on_rank_column` in the provided `aliased_subquery` is equal to 1. Args: aliased_subquery: The `AliasedClass` instance of the subquery that contains the rank column. Returns: A SQLAlchemy `ColumnElement[bool]` representing the filter condition (e.g., `rank_column == 1`). """ return inspect(aliased_subquery).selectable.columns[self._distinct_on_rank_column] == 1 @property def name(self) -> str: """The name for the subquery alias, typically the model's table name. Returns: The `__tablename__` attribute of the model in `self.scope`. """ return self.scope.model.__tablename__ def build(self, query_graph: QueryGraph[DeclarativeT], query: Query) -> AliasedClass[DeclarativeT]: """Builds a subquery (typically a CTE) for complex query scenarios. This method constructs a subquery based on the provided `query_graph` and `query` object. It's primarily used to handle situations like: - Emulating `DISTINCT ON` using a `ROW_NUMBER()` window function when `query.use_distinct_on` is True. - Ensuring correct pagination (limit/offset) when applied with complex joins or when `DISTINCT ON` is active. The process involves: 1. Selecting an initial set of columns from `self.alias` based on `query_graph`. 2. If `query.use_distinct_on`, adding a `ROW_NUMBER()` window function partitioned by distinct expressions and ordered by order_by expressions. The rank column is named using `self._distinct_on_rank_column`. 3. Applying query hooks to the selected columns. 4. Applying the main query components (joins, where, order by, limit, offset) from the `query` object to the subquery statement. 5. Materializing the resulting statement as a Common Table Expression (CTE). Args: query_graph: The `QueryGraph` instance providing the overall query structure, including selection trees and distinct/order by nodes. query: The `Query` object containing specific components like filters, ordering, pagination settings, and the `use_distinct_on` flag. Returns: An `AliasedClass` representing the built subquery (CTE), ready to be used in further query construction (e.g., by joining it). """ statement = select(inspect(self.alias)).options(raiseload('*')) only_columns: list[Union[QueryableAttribute[Any], NamedColumn[Any]]] = [*self.scope.inspect(query_graph.root_join_tree).selection(self.alias), *[self.scope.aliased_attribute(node) for node in query_graph.order_by_nodes if not node.value.is_computed]] if (aggregation_tree := query_graph.root_aggregation_tree()): only_columns.extend((self.scope.aliased_attribute(child) for child in aggregation_tree.leaves() if child.value.is_function_arg)) for function_node in self.scope.referenced_function_nodes: only_columns.append(self.scope.columns[function_node]) self.scope.columns[function_node] = self.scope.scoped_column(inspect(self.alias).selectable, self.scope.key(function_node)) if query.distinct_on and (not query.use_distinct_on): order_by_expressions = query.order_by.expressions if query.order_by else [] rank = func.row_number().over(partition_by=query.distinct_on.expressions, order_by=order_by_expressions or None).label(self._distinct_on_rank_column) only_columns.append(rank) statement = statement.with_only_columns(*only_columns) statement = dataclasses.replace(query, root_aggregation_functions=[]).statement(statement) statement, _ = self.hook_applier.apply(statement, node=query_graph.root_join_tree.root, alias=self.scope.root_alias, loading_mode='add', in_subquery=True) return aliased(class_mapper(self.scope.model), statement.subquery(self.name), name=self.name)
@dataclass class SubqueryBuilder(Generic[DeclarativeT]): '''Builds and manages aliased subqueries, often for DISTINCT ON emulation. This utility class is responsible for constructing aliased subqueries, particularly useful for emulating `DISTINCT ON` behavior with window functions (e.g., `ROW_NUMBER()`). It handles creating an alias for the target model, generating unique names for helper columns (like a rank column), and defining conditions based on these columns. Attributes: scope: The `QueryScope` providing context for the model being queried. hook_applier: A `HookApplier` instance for applying query hooks. db_features: `DatabaseFeatures` for database-specific logic. alias: An `AliasedClass` for `scope.model`, initialized in `__post_init__`. ''' def __post_init__(self) -> None: '''Initializes `self.alias` with an aliased version of the scoped model. This method creates an `AliasedClass` for the model specified in `self.scope.model`. The alias is named using `self.name` (typically the table name) and is created with `flat=True` to prevent nesting if the model is already an alias. ''' pass @cached_property def _distinct_on_rank_column(self) -> str: '''Provides a unique name for the rank column used in DISTINCT ON emulation. This name is generated using `self.scope.key("distinct_on_rank")` to ensure it doesn't clash with other column names within the current query scope. The result is cached for efficiency. Returns: A string representing the unique name for the rank column. ''' pass def distinct_on_condition(self, aliased_subquery: AliasedClass[DeclarativeT]) -> ColumnElement[bool]: '''Generates a SQLAlchemy filter condition to select rows with rank 1. This method is typically used after a window function (like `ROW_NUMBER()`) has assigned ranks to rows. It creates a condition to filter for rows where the column named by `self._distinct_on_rank_column` in the provided `aliased_subquery` is equal to 1. Args: aliased_subquery: The `AliasedClass` instance of the subquery that contains the rank column. Returns: A SQLAlchemy `ColumnElement[bool]` representing the filter condition (e.g., `rank_column == 1`). ''' pass @property def name(self) -> str: '''The name for the subquery alias, typically the model's table name. Returns: The `__tablename__` attribute of the model in `self.scope`. ''' pass def build(self, query_graph: QueryGraph[DeclarativeT], query: Query) -> AliasedClass[DeclarativeT]: '''Builds a subquery (typically a CTE) for complex query scenarios. This method constructs a subquery based on the provided `query_graph` and `query` object. It's primarily used to handle situations like: - Emulating `DISTINCT ON` using a `ROW_NUMBER()` window function when `query.use_distinct_on` is True. - Ensuring correct pagination (limit/offset) when applied with complex joins or when `DISTINCT ON` is active. The process involves: 1. Selecting an initial set of columns from `self.alias` based on `query_graph`. 2. If `query.use_distinct_on`, adding a `ROW_NUMBER()` window function partitioned by distinct expressions and ordered by order_by expressions. The rank column is named using `self._distinct_on_rank_column`. 3. Applying query hooks to the selected columns. 4. Applying the main query components (joins, where, order by, limit, offset) from the `query` object to the subquery statement. 5. Materializing the resulting statement as a Common Table Expression (CTE). Args: query_graph: The `QueryGraph` instance providing the overall query structure, including selection trees and distinct/order by nodes. query: The `Query` object containing specific components like filters, ordering, pagination settings, and the `use_distinct_on` flag. Returns: An `AliasedClass` representing the built subquery (CTE), ready to be used in further query construction (e.g., by joining it). ''' pass
9
6
22
3
9
11
2
1.34
1
6
2
0
5
0
5
5
140
23
50
16
42
67
29
13
23
5
1
1
9
327,966
gazorby/strawchemy
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/sqlalchemy/_query.py
strawchemy.sqlalchemy._query.Where
from typing_extensions import Self from dataclasses import dataclass from sqlalchemy.sql import ColumnElement, SQLColumnExpression import dataclasses @dataclass class Where: """Represents the WHERE clause of a SQLAlchemy query. This class encapsulates the filter conditions (as a `Conjunction`) and any additional joins specifically required by these conditions. Attributes: conjunction: A `Conjunction` object holding the filter expressions and their associated joins. joins: A list of `Join` objects that are specific to the WHERE clause, beyond those already in the conjunction. """ conjunction: Conjunction = dataclasses.field(default_factory=Conjunction) joins: list[Join] = dataclasses.field(default_factory=list) @property def expressions(self) -> list[ColumnElement[bool]]: """The list of SQLAlchemy boolean filter expressions.""" return self.conjunction.expressions def clear_expressions(self) -> None: """Clears all filter expressions from the WHERE clause.""" self.conjunction.expressions.clear() @classmethod def from_expressions(cls, *expressions: ColumnElement[bool]) -> Self: """Creates a `Where` clause instance from one or more SQLAlchemy expressions. Args: *expressions: SQLAlchemy boolean column elements to be used as filter conditions. Returns: A new `Where` instance populated with the given expressions. """ return cls(Conjunction(list(expressions)))
@dataclass class Where: '''Represents the WHERE clause of a SQLAlchemy query. This class encapsulates the filter conditions (as a `Conjunction`) and any additional joins specifically required by these conditions. Attributes: conjunction: A `Conjunction` object holding the filter expressions and their associated joins. joins: A list of `Join` objects that are specific to the WHERE clause, beyond those already in the conjunction. ''' @property def expressions(self) -> list[ColumnElement[bool]]: '''The list of SQLAlchemy boolean filter expressions.''' pass def clear_expressions(self) -> None: '''Clears all filter expressions from the WHERE clause.''' pass @classmethod def from_expressions(cls, *expressions: ColumnElement[bool]) -> Self: '''Creates a `Where` clause instance from one or more SQLAlchemy expressions. Args: *expressions: SQLAlchemy boolean column elements to be used as filter conditions. Returns: A new `Where` instance populated with the given expressions. ''' pass
7
4
6
1
2
3
1
1.64
0
3
1
0
2
0
3
3
37
8
11
8
5
18
9
6
5
1
0
0
3
327,967
gazorby/strawchemy
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/sqlalchemy/_scope.py
strawchemy.sqlalchemy._scope.AggregationFunctionInfo
from typing_extensions import Self, TypeAlias, override from .exceptions import TranspilingError from typing import TYPE_CHECKING, Any, ClassVar, Generic, Optional, Union from sqlalchemy.orm import DeclarativeBase, Mapper, MapperProperty, QueryableAttribute, RelationshipProperty, aliased from dataclasses import dataclass from sqlalchemy import ColumnElement, FromClause, Function, Label, Select, func, inspect @dataclass class AggregationFunctionInfo: """Information about a SQL function and its application context. A helper class that encapsulates information about how a SQL function should be applied in query building. Used internally by NodeInspect to map GraphQL functions to their SQLAlchemy equivalents. Attributes: sqla_function: The SQLAlchemy function generator (e.g., func.count, func.sum) apply_on_column: Whether the function should be applied to a column True for functions like MIN, MAX that operate on columns False for functions like COUNT that can operate independently """ functions_map: ClassVar[dict[str, FunctionGenerator]] = {'count': func.count, 'min': func.min, 'max': func.max, 'sum': func.sum, 'avg': func.avg, 'stddev_samp': func.stddev_samp, 'stddev_pop': func.stddev_pop, 'var_samp': func.var_samp, 'var_pop': func.var_pop} sqla_function: FunctionGenerator apply_on_column: bool visitor: Optional[_FunctionVisitor] = None @classmethod def from_name(cls, name: str, visitor: Optional[_FunctionVisitor]=None) -> Self: """Creates an AggregationFunctionInfo instance from a function name. Looks up the provided `name` in the `cls.functions_map` to find the corresponding SQLAlchemy function generator. It determines if the function typically applies to a column (e.g., MIN, MAX) or can operate on a wildcard (e.g., COUNT). Args: name: The name of the aggregation function (e.g., "count", "min"). visitor: An optional callable to transform the generated SQLAlchemy function expression. Returns: An instance of `AggregationFunctionInfo` configured for the named function. Raises: TranspilingError: If the `name` is not a known function. """ if name not in cls.functions_map: msg = f'Unknown function {name}' raise TranspilingError(msg) apply_on_column = name != 'count' return cls(sqla_function=cls.functions_map[name], apply_on_column=apply_on_column, visitor=visitor) def apply(self, *args: Union[QueryableAttribute[Any], ColumnElement[Any]]) -> ColumnElement[Any]: """Applies the configured SQLAlchemy function to the given arguments. Constructs a SQLAlchemy function call using `self.sqla_function` and the provided `args`. If a `visitor` was configured for this instance, it is applied to the resulting function expression. Args: *args: The arguments to pass to the SQLAlchemy function. These are typically column expressions or other SQL elements. Returns: A SQLAlchemy `ColumnElement` representing the function call. """ func = self.sqla_function(*args) if self.visitor: func = self.visitor(func) return func
@dataclass class AggregationFunctionInfo: '''Information about a SQL function and its application context. A helper class that encapsulates information about how a SQL function should be applied in query building. Used internally by NodeInspect to map GraphQL functions to their SQLAlchemy equivalents. Attributes: sqla_function: The SQLAlchemy function generator (e.g., func.count, func.sum) apply_on_column: Whether the function should be applied to a column True for functions like MIN, MAX that operate on columns False for functions like COUNT that can operate independently ''' @classmethod def from_name(cls, name: str, visitor: Optional[_FunctionVisitor]=None) -> Self: '''Creates an AggregationFunctionInfo instance from a function name. Looks up the provided `name` in the `cls.functions_map` to find the corresponding SQLAlchemy function generator. It determines if the function typically applies to a column (e.g., MIN, MAX) or can operate on a wildcard (e.g., COUNT). Args: name: The name of the aggregation function (e.g., "count", "min"). visitor: An optional callable to transform the generated SQLAlchemy function expression. Returns: An instance of `AggregationFunctionInfo` configured for the named function. Raises: TranspilingError: If the `name` is not a known function. ''' pass def apply(self, *args: Union[QueryableAttribute[Any], ColumnElement[Any]]) -> ColumnElement[Any]: '''Applies the configured SQLAlchemy function to the given arguments. Constructs a SQLAlchemy function call using `self.sqla_function` and the provided `args`. If a `visitor` was configured for this instance, it is applied to the resulting function expression. Args: *args: The arguments to pass to the SQLAlchemy function. These are typically column expressions or other SQL elements. Returns: A SQLAlchemy `ColumnElement` representing the function call. ''' pass
5
3
21
4
6
12
2
1.26
0
3
1
0
1
0
2
2
73
12
27
9
23
34
16
8
13
2
0
1
4
327,968
gazorby/strawchemy
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/sqlalchemy/_scope.py
strawchemy.sqlalchemy._scope.ColumnTransform
from dataclasses import dataclass from sqlalchemy.dialects import postgresql from sqlalchemy import cast as sqla_cast from sqlalchemy import ColumnElement, FromClause, Function, Label, Select, func, inspect from sqlalchemy.orm import DeclarativeBase, Mapper, MapperProperty, QueryableAttribute, RelationshipProperty, aliased from typing_extensions import Self, TypeAlias, override from typing import TYPE_CHECKING, Any, ClassVar, Generic, Optional, Union @dataclass(frozen=True) class ColumnTransform: """Represents a transformed SQLAlchemy column attribute. This dataclass typically stores a `QueryableAttribute` that has undergone some transformation, such as being labeled or having a function applied (e.g., for JSON extraction). Instances are usually created via its classmethod constructors like `_new` (for labeling) or `extract_json`. The main purpose is to encapsulate the transformed attribute along with the context (via `QueryScope` and `QueryNodeType`) in which the transformation occurred, ensuring unique naming and dialect-specific handling. Attributes: attribute: The transformed `QueryableAttribute`. """ attribute: QueryableAttribute[Any] @classmethod def _new(cls, attribute: Union[Function[Any], QueryableAttribute[Any]], node: QueryNodeType, scope: QueryScope[Any]) -> Self: """Creates a ColumnTransform by labeling an attribute or function. This factory method takes a SQLAlchemy `Function` or `QueryableAttribute` and applies a unique label to it based on the `QueryNodeType` and `QueryScope`. The label is generated using `scope.key(node)`. Args: attribute: The SQLAlchemy function or attribute to be labeled. node: The query node associated with this attribute/function. scope: The current query scope, used for generating a unique key/label. Returns: A new `ColumnTransform` instance with the labeled attribute. """ return cls(attribute.label(scope.key(node))) @classmethod def extract_json(cls, attribute: QueryableAttribute[Any], node: QueryNodeType, scope: QueryScope[Any]) -> Self: """Creates a ColumnTransform for extracting a value from a JSON column. This factory method generates a SQLAlchemy expression to extract a value from a JSON-like column (`attribute`) based on a JSON path specified in `node.metadata.data.json_path`. The extraction logic is dialect-specific: - For PostgreSQL (`scope.dialect == "postgresql"`), it uses `func.jsonb_path_query_first`, coalescing to an empty JSONB object (`{}`) if the path does not exist or the value is null. - For other dialects, it uses the `->` operator (common for JSON extraction), coalescing to an empty JSON object (`func.json_object()`) on null/missing. The resulting transformation is then labeled using `cls._new` to ensure a unique column name in the query. Args: attribute: The `QueryableAttribute` representing the JSON column. node: The query node containing metadata, specifically the `json_path` under `node.metadata.data.json_path`. scope: The current query scope, used for dialect-specific logic and for labeling the transformed attribute. Returns: A new `ColumnTransform` instance with the JSON extraction expression, appropriately labeled. """ if scope.dialect == 'postgresql': transform = func.coalesce(func.jsonb_path_query_first(attribute, sqla_cast(node.metadata.data.json_path, postgresql.JSONPATH)), sqla_cast({}, postgresql.JSONB)) else: transform = func.coalesce(attribute.op('->')(node.metadata.data.json_path), func.json_object()) return cls._new(transform, node, scope)
@dataclass(frozen=True) class ColumnTransform: '''Represents a transformed SQLAlchemy column attribute. This dataclass typically stores a `QueryableAttribute` that has undergone some transformation, such as being labeled or having a function applied (e.g., for JSON extraction). Instances are usually created via its classmethod constructors like `_new` (for labeling) or `extract_json`. The main purpose is to encapsulate the transformed attribute along with the context (via `QueryScope` and `QueryNodeType`) in which the transformation occurred, ensuring unique naming and dialect-specific handling. Attributes: attribute: The transformed `QueryableAttribute`. ''' @classmethod def _new(cls, attribute: Union[Function[Any], QueryableAttribute[Any]], node: QueryNodeType, scope: QueryScope[Any]) -> Self: '''Creates a ColumnTransform by labeling an attribute or function. This factory method takes a SQLAlchemy `Function` or `QueryableAttribute` and applies a unique label to it based on the `QueryNodeType` and `QueryScope`. The label is generated using `scope.key(node)`. Args: attribute: The SQLAlchemy function or attribute to be labeled. node: The query node associated with this attribute/function. scope: The current query scope, used for generating a unique key/label. Returns: A new `ColumnTransform` instance with the labeled attribute. ''' pass @classmethod def extract_json(cls, attribute: QueryableAttribute[Any], node: QueryNodeType, scope: QueryScope[Any]) -> Self: '''Creates a ColumnTransform for extracting a value from a JSON column. This factory method generates a SQLAlchemy expression to extract a value from a JSON-like column (`attribute`) based on a JSON path specified in `node.metadata.data.json_path`. The extraction logic is dialect-specific: - For PostgreSQL (`scope.dialect == "postgresql"`), it uses `func.jsonb_path_query_first`, coalescing to an empty JSONB object (`{}`) if the path does not exist or the value is null. - For other dialects, it uses the `->` operator (common for JSON extraction), coalescing to an empty JSON object (`func.json_object()`) on null/missing. The resulting transformation is then labeled using `cls._new` to ensure a unique column name in the query. Args: attribute: The `QueryableAttribute` representing the JSON column. node: The query node containing metadata, specifically the `json_path` under `node.metadata.data.json_path`. scope: The current query scope, used for dialect-specific logic and for labeling the transformed attribute. Returns: A new `ColumnTransform` instance with the JSON extraction expression, appropriately labeled. ''' pass
6
3
27
4
7
16
2
2.59
0
2
1
0
0
0
2
2
75
14
17
8
10
44
9
4
6
2
0
1
3
327,969
gazorby/strawchemy
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/sqlalchemy/_scope.py
strawchemy.sqlalchemy._scope.NodeInspect
from sqlalchemy import ColumnElement, FromClause, Function, Label, Select, func, inspect from sqlalchemy.orm.util import AliasedClass from sqlalchemy import distinct as sqla_distinct from sqlalchemy.orm import DeclarativeBase, Mapper, MapperProperty, QueryableAttribute, RelationshipProperty, aliased from strawchemy.constants import NODES_KEY from typing import TYPE_CHECKING, Any, ClassVar, Generic, Optional, Union from strawchemy.strawberry.dto import GraphQLFieldDefinition, QueryNode class NodeInspect: """Inspection helper for SQLAlchemy query nodes. Provides functionality to inspect and process SQLAlchemy query nodes within a QueryScope context. Handles function mapping, foreign key resolution, and property access for query nodes. Attributes: node (QueryNodeType): The query node being inspected scope (QueryScope): The query scope providing context for inspection Key Responsibilities: - Maps GraphQL functions to corresponding SQL functions - Resolves foreign key relationships between nodes - Provides access to node properties and children - Generates SQL expressions for functions and selections - Handles column and ID selection for query building The class works closely with QueryScope to provide context-aware inspection capabilities and is primarily used by the Transpiler class to build SQL queries from GraphQL queries. Example: >>> node = QueryNodeType(...) >>> scope = QueryScope(...) >>> inspector = NodeInspect(node, scope) >>> inspector.functions(alias) # Get SQL function expressions >>> inspector.columns_or_ids() # Get columns or IDs for selection """ def __init__(self, node: QueryNodeType, scope: QueryScope[Any]) -> None: """Initializes the NodeInspect helper. Args: node: The query node (`QueryNodeType`) to be inspected. scope: The `QueryScope` providing context for the inspection, such as aliases, dialect, and parent relationships. """ self.node = node self.scope = scope def _foreign_keys_selection(self, alias: Optional[AliasedClass[Any]]=None) -> list[QueryableAttribute[Any]]: """Selects local foreign key columns for child relationships of the current node. Iterates through the children of the current `self.node`. If a child represents a relationship (is_relation is True and model_field.property is a RelationshipProperty), it identifies the local columns involved in that relationship. These local columns (foreign keys on the parent side of the relationship) are then adapted to the provided `alias` (or an alias inferred from the parent node if `alias` is None). Args: alias: The `AliasedClass` to which the foreign key attributes should be adapted. If None, defaults to the alias of the parent of `self.node`. Returns: A list of `QueryableAttribute` objects representing the aliased local foreign key columns. """ selected_fks: list[QueryableAttribute[Any]] = [] alias_insp = inspect(alias or self.scope.alias_from_relation_node(self.node, 'parent')) for child in self.node.children: if not child.value.is_relation or not isinstance(child.value.model_field.property, RelationshipProperty): continue for column in child.value.model_field.property.local_columns: if column.key is None: continue selected_fks.extend([alias_insp.mapper.attrs[column.key].class_attribute.adapt_to_entity(alias_insp) for column in child.value.model_field.property.local_columns if column.key is not None]) return selected_fks def _transform_column(self, node: QueryNodeType, attribute: QueryableAttribute[Any]) -> Union[QueryableAttribute[Any], ColumnTransform]: """Applies transformations to a column attribute if necessary. Currently, this method checks if the `node.metadata.data.json_path` is set. If it is, it applies a JSON extraction transformation using `ColumnTransform.extract_json`. Otherwise, it returns the original attribute. Args: node: The `QueryNodeType` providing metadata for potential transformations (e.g., JSON path). attribute: The `QueryableAttribute` to potentially transform. Returns: The transformed attribute (as a `ColumnTransform` instance) if a transformation was applied, or the original `QueryableAttribute` otherwise. """ transform: Optional[ColumnTransform] = None if node.metadata.data.json_path: transform = ColumnTransform.extract_json(attribute, node, self.scope) return attribute if transform is None else transform @property def children(self) -> list[NodeInspect]: """Provides `NodeInspect` instances for all children of the current node. Returns: A list of `NodeInspect` objects, each initialized with a child of `self.node` and the same `self.scope`. """ return [NodeInspect(child, self.scope) for child in self.node.children] @property def value(self) -> GraphQLFieldDefinition: """The `GraphQLFieldDefinition` associated with the current node. This is a direct accessor to `self.node.value`. Returns: The `GraphQLFieldDefinition` of the current node. """ return self.node.value @property def mapper(self) -> Mapper[Any]: """The SQLAlchemy `Mapper` for the model associated with the current node. If the node's value (`self.value`) has a `model_field` (e.g., it's an attribute of a model), it returns the mapper from that field's property. Otherwise (e.g., it's a root model type), it returns the mapper directly from `self.value.model`. Returns: The SQLAlchemy `Mapper` object. """ if self.value.has_model_field: return self.value.model_field.property.mapper.mapper return self.value.model.__mapper__ @property def key(self) -> str: """Generates a base key for the current node. The key is constructed based on whether the node represents a function, is a root node, or is a model field. - If it's a function, the function name is used as a prefix. - If it's a root node, the model's table name is used as a suffix. - If it's a model field, the field's key is used as a suffix. Returns: A string key representing the node. """ prefix = f'{function.function}_' if (function := self.value.function()) else '' if self.node.is_root: suffix = self.value.model.__tablename__ else: suffix = self.value.model_field.key if self.value.has_model_field else '' return f'{prefix}{suffix}' @property def name(self) -> str: """Generates a potentially qualified name for the current node. If the node has a parent, the name is constructed by prefixing the parent's key (obtained via `NodeInspect(self.node.parent, self.scope).key`) to the current node's `key`, separated by '__'. If there is no parent, it simply returns the node's `key`. This helps create unique names in nested structures. Returns: A string name, potentially qualified by its parent's key. """ if self.node.parent and (parent_key := NodeInspect(self.node.parent, self.scope).key): return f'{parent_key}__{self.key}' return self.key @property def is_data_root(self) -> bool: """Determines if the current node acts as a data root in an aggregation query. A node is considered a data root if: - It's part of a query with root aggregations (`self.node.graph_metadata.metadata.root_aggregations` is True), - AND its field name is the standard 'nodes' key (`NODES_KEY`), - AND it has a parent node which is itself a root node (`self.node.parent.is_root`). Alternatively, if the node itself is marked as a root (`self.node.is_root`), it's also considered a data root. This is typically used to identify the primary entity collection within queries that involve aggregations at the root level (e.g., total count alongside a list of items). Returns: True if the node is a data root, False otherwise. """ return self.node.graph_metadata.metadata.root_aggregations and self.value.name == NODES_KEY and self.node.parent and self.node.parent.is_root or self.node.is_root def output_functions(self, alias: AliasedClass[Any], visit_func: _FunctionVisitor=lambda func: func) -> dict[QueryNodeType, Label[Any]]: """Generates labeled SQLAlchemy function expressions for output. This method processes the function defined in `self.value.function()`. It uses `AggregationFunctionInfo` to get the SQLAlchemy function. - If `apply_on_column` is True (e.g., MIN, MAX), it iterates through the children of the current node (which represent function arguments), adapts each child's model field to the given `alias`, applies the function, labels it with a unique key from the scope, and stores it. - If `apply_on_column` is False (e.g., COUNT), it applies the function (often to a wildcard or no specific column), labels it, and stores it. The `visit_func` can be used to further transform the generated SQLAlchemy function expression before labeling. Args: alias: The `AliasedClass` to which function arguments (if any) should be adapted. visit_func: An optional callable to transform the generated SQLAlchemy function expression before labeling. Defaults to an identity function. Returns: A dictionary mapping `QueryNodeType` (representing the function or its argument node) to the corresponding labeled SQLAlchemy function expression (`Label`). """ functions: dict[QueryNodeType, Label[Any]] = {} function_info = AggregationFunctionInfo.from_name(self.value.function(strict=True).function, visitor=visit_func) if function_info.apply_on_column: for arg_child in self.children: arg = self.mapper.attrs[arg_child.value.model_field_name].class_attribute.adapt_to_entity(inspect(alias)) functions[arg_child.node] = function_info.apply(arg).label(self.scope.key(arg_child.node)) else: functions[self.node] = visit_func(function_info.sqla_function()).label(self.scope.key(self.node)) return functions def filter_function(self, alias: AliasedClass[Any], distinct: Optional[bool]=None) -> tuple[QueryNodeType, Label[Any]]: """Generates a labeled SQLAlchemy function expression for use in filters. Similar to `output_functions`, but tailored for filter conditions. It retrieves the function using `AggregationFunctionInfo`. Arguments for the function are derived from the children of the current node, adapted to the given `alias`. If `distinct` is True, `sqlalchemy.distinct()` is applied to the arguments. The label for the function is determined by the scope key of either the first child (if there's only one, implying the function applies to that child's attribute) or the current node itself. Args: alias: The `AliasedClass` to adapt function arguments to. distinct: If True, applies `sqlalchemy.distinct()` to the function arguments. Defaults to None (no distinct). Returns: A tuple containing: - `QueryNodeType`: The node associated with the function (either the current node or its first child). - `Label[Any]`: The labeled SQLAlchemy function expression. """ function_info = AggregationFunctionInfo.from_name(self.value.function(strict=True).function) function_args = [] argument_attributes = [self.mapper.attrs[arg_child.value.model_field_name].class_attribute.adapt_to_entity(inspect(alias)) for arg_child in self.children] function_args = (sqla_distinct(*argument_attributes),) if distinct else argument_attributes if len(self.children) == 1: function_node = self.children[0].node label_name = self.scope.key(function_node) else: function_node = self.node label_name = self.scope.key(self.node) return (function_node, function_info.apply(*function_args).label(label_name)) def columns(self, alias: Optional[AliasedClass[Any]]=None) -> tuple[list[QueryableAttribute[Any]], list[ColumnTransform]]: """Extracts regular columns and transformed columns for the current node. Iterates through the children of the current node (`self.node`). For each child that is not a relation and not a computed field: 1. It gets the aliased attribute using `self.scope.aliased_attribute()`. 2. It attempts to transform the column using `self._transform_column()`. 3. If transformed (e.g., JSON extraction), it's added to the `transforms` list. 4. Otherwise, the regular aliased attribute is added to the `columns` list. After processing all children, it ensures that ID attributes for the current node (obtained via `self.scope.aliased_id_attributes()`) are included in the `columns` list if they haven't been added already (to avoid duplicates if ID fields were explicitly requested). Args: alias: The `AliasedClass` to which the column attributes should be adapted. If None, the scope will infer the appropriate alias. Returns: A tuple containing two lists: - The first list contains `QueryableAttribute` objects for regular columns. - The second list contains `ColumnTransform` objects for transformed columns. """ columns: list[QueryableAttribute[Any]] = [] transforms: list[ColumnTransform] = [] property_set: set[MapperProperty[Any]] = set() for child in self.node.children: if not child.value.is_relation and (not child.value.is_computed): aliased = self.scope.aliased_attribute(child, alias) property_set.add(aliased.property) aliased = self._transform_column(child, aliased) if isinstance(aliased, ColumnTransform): transforms.append(aliased) else: columns.append(aliased) id_attributes = self.scope.aliased_id_attributes(self.node, alias) columns.extend((attribute for attribute in id_attributes if attribute.property not in property_set)) return (columns, transforms) def foreign_key_columns(self, side: RelationshipSide, alias: Optional[AliasedClass[Any]]=None) -> list[QueryableAttribute[Any]]: """Retrieves foreign key columns for the current node's relationship. This method identifies the foreign key columns involved in the relationship represented by `self.node.value.model_field.property`. The `side` argument determines whether to fetch local columns (if `side` is "parent") or remote columns (if `side` is "child"). The columns are then adapted to the provided `alias` (or an alias inferred from the node and side if `alias` is None). Args: side: Specifies which side of the relationship to get keys from ("parent" for local, "child" for remote). alias: The `AliasedClass` to adapt the foreign key attributes to. If None, an alias is inferred based on the node and relationship side. Returns: A list of `QueryableAttribute` objects representing the aliased foreign key columns. Raises: AssertionError: If `self.node.value.model_field.property` is not a `RelationshipProperty`. """ alias_insp = inspect(alias or self.scope.alias_from_relation_node(self.node, side)) relationship = self.node.value.model_field.property assert isinstance(relationship, RelationshipProperty) columns = relationship.local_columns if side == 'parent' else relationship.remote_side return [alias_insp.mapper.attrs[column.key].class_attribute.adapt_to_entity(alias_insp) for column in columns if column.key is not None] def selection(self, alias: Optional[AliasedClass[Any]]=None) -> list[QueryableAttribute[Any]]: """Computes the full list of attributes to select for the current node. This combines the regular columns (and transformed columns, though only the `QueryableAttribute` part is returned here) obtained from `self.columns(alias)` with the foreign key columns needed for relationships, obtained from `self._foreign_keys_selection(alias)`. Args: alias: The `AliasedClass` to adapt attributes to. If None, aliases are inferred by the called methods. Returns: A list of `QueryableAttribute` objects representing all columns to be selected for the current node, including necessary foreign keys. """ columns, _ = self.columns(alias) return [*columns, *self._foreign_keys_selection(alias)]
class NodeInspect: '''Inspection helper for SQLAlchemy query nodes. Provides functionality to inspect and process SQLAlchemy query nodes within a QueryScope context. Handles function mapping, foreign key resolution, and property access for query nodes. Attributes: node (QueryNodeType): The query node being inspected scope (QueryScope): The query scope providing context for inspection Key Responsibilities: - Maps GraphQL functions to corresponding SQL functions - Resolves foreign key relationships between nodes - Provides access to node properties and children - Generates SQL expressions for functions and selections - Handles column and ID selection for query building The class works closely with QueryScope to provide context-aware inspection capabilities and is primarily used by the Transpiler class to build SQL queries from GraphQL queries. Example: >>> node = QueryNodeType(...) >>> scope = QueryScope(...) >>> inspector = NodeInspect(node, scope) >>> inspector.functions(alias) # Get SQL function expressions >>> inspector.columns_or_ids() # Get columns or IDs for selection ''' def __init__(self, node: QueryNodeType, scope: QueryScope[Any]) -> None: '''Initializes the NodeInspect helper. Args: node: The query node (`QueryNodeType`) to be inspected. scope: The `QueryScope` providing context for the inspection, such as aliases, dialect, and parent relationships. ''' pass def _foreign_keys_selection(self, alias: Optional[AliasedClass[Any]]=None) -> list[QueryableAttribute[Any]]: '''Selects local foreign key columns for child relationships of the current node. Iterates through the children of the current `self.node`. If a child represents a relationship (is_relation is True and model_field.property is a RelationshipProperty), it identifies the local columns involved in that relationship. These local columns (foreign keys on the parent side of the relationship) are then adapted to the provided `alias` (or an alias inferred from the parent node if `alias` is None). Args: alias: The `AliasedClass` to which the foreign key attributes should be adapted. If None, defaults to the alias of the parent of `self.node`. Returns: A list of `QueryableAttribute` objects representing the aliased local foreign key columns. ''' pass def _transform_column(self, node: QueryNodeType, attribute: QueryableAttribute[Any]) -> Union[QueryableAttribute[Any], ColumnTransform]: '''Applies transformations to a column attribute if necessary. Currently, this method checks if the `node.metadata.data.json_path` is set. If it is, it applies a JSON extraction transformation using `ColumnTransform.extract_json`. Otherwise, it returns the original attribute. Args: node: The `QueryNodeType` providing metadata for potential transformations (e.g., JSON path). attribute: The `QueryableAttribute` to potentially transform. Returns: The transformed attribute (as a `ColumnTransform` instance) if a transformation was applied, or the original `QueryableAttribute` otherwise. ''' pass @property def children(self) -> list[NodeInspect]: '''Provides `NodeInspect` instances for all children of the current node. Returns: A list of `NodeInspect` objects, each initialized with a child of `self.node` and the same `self.scope`. ''' pass @property def value(self) -> GraphQLFieldDefinition: '''The `GraphQLFieldDefinition` associated with the current node. This is a direct accessor to `self.node.value`. Returns: The `GraphQLFieldDefinition` of the current node. ''' pass @property def mapper(self) -> Mapper[Any]: '''The SQLAlchemy `Mapper` for the model associated with the current node. If the node's value (`self.value`) has a `model_field` (e.g., it's an attribute of a model), it returns the mapper from that field's property. Otherwise (e.g., it's a root model type), it returns the mapper directly from `self.value.model`. Returns: The SQLAlchemy `Mapper` object. ''' pass @property def key(self) -> str: '''Generates a base key for the current node. The key is constructed based on whether the node represents a function, is a root node, or is a model field. - If it's a function, the function name is used as a prefix. - If it's a root node, the model's table name is used as a suffix. - If it's a model field, the field's key is used as a suffix. Returns: A string key representing the node. ''' pass @property def name(self) -> str: '''Generates a potentially qualified name for the current node. If the node has a parent, the name is constructed by prefixing the parent's key (obtained via `NodeInspect(self.node.parent, self.scope).key`) to the current node's `key`, separated by '__'. If there is no parent, it simply returns the node's `key`. This helps create unique names in nested structures. Returns: A string name, potentially qualified by its parent's key. ''' pass @property def is_data_root(self) -> bool: '''Determines if the current node acts as a data root in an aggregation query. A node is considered a data root if: - It's part of a query with root aggregations (`self.node.graph_metadata.metadata.root_aggregations` is True), - AND its field name is the standard 'nodes' key (`NODES_KEY`), - AND it has a parent node which is itself a root node (`self.node.parent.is_root`). Alternatively, if the node itself is marked as a root (`self.node.is_root`), it's also considered a data root. This is typically used to identify the primary entity collection within queries that involve aggregations at the root level (e.g., total count alongside a list of items). Returns: True if the node is a data root, False otherwise. ''' pass def output_functions(self, alias: AliasedClass[Any], visit_func: _FunctionVisitor=lambda func: func) -> dict[QueryNodeType, Label[Any]]: '''Generates labeled SQLAlchemy function expressions for output. This method processes the function defined in `self.value.function()`. It uses `AggregationFunctionInfo` to get the SQLAlchemy function. - If `apply_on_column` is True (e.g., MIN, MAX), it iterates through the children of the current node (which represent function arguments), adapts each child's model field to the given `alias`, applies the function, labels it with a unique key from the scope, and stores it. - If `apply_on_column` is False (e.g., COUNT), it applies the function (often to a wildcard or no specific column), labels it, and stores it. The `visit_func` can be used to further transform the generated SQLAlchemy function expression before labeling. Args: alias: The `AliasedClass` to which function arguments (if any) should be adapted. visit_func: An optional callable to transform the generated SQLAlchemy function expression before labeling. Defaults to an identity function. Returns: A dictionary mapping `QueryNodeType` (representing the function or its argument node) to the corresponding labeled SQLAlchemy function expression (`Label`). ''' pass def filter_function(self, alias: AliasedClass[Any], distinct: Optional[bool]=None) -> tuple[QueryNodeType, Label[Any]]: '''Generates a labeled SQLAlchemy function expression for use in filters. Similar to `output_functions`, but tailored for filter conditions. It retrieves the function using `AggregationFunctionInfo`. Arguments for the function are derived from the children of the current node, adapted to the given `alias`. If `distinct` is True, `sqlalchemy.distinct()` is applied to the arguments. The label for the function is determined by the scope key of either the first child (if there's only one, implying the function applies to that child's attribute) or the current node itself. Args: alias: The `AliasedClass` to adapt function arguments to. distinct: If True, applies `sqlalchemy.distinct()` to the function arguments. Defaults to None (no distinct). Returns: A tuple containing: - `QueryNodeType`: The node associated with the function (either the current node or its first child). - `Label[Any]`: The labeled SQLAlchemy function expression. ''' pass def columns(self, alias: Optional[AliasedClass[Any]]=None) -> tuple[list[QueryableAttribute[Any]], list[ColumnTransform]]: '''Extracts regular columns and transformed columns for the current node. Iterates through the children of the current node (`self.node`). For each child that is not a relation and not a computed field: 1. It gets the aliased attribute using `self.scope.aliased_attribute()`. 2. It attempts to transform the column using `self._transform_column()`. 3. If transformed (e.g., JSON extraction), it's added to the `transforms` list. 4. Otherwise, the regular aliased attribute is added to the `columns` list. After processing all children, it ensures that ID attributes for the current node (obtained via `self.scope.aliased_id_attributes()`) are included in the `columns` list if they haven't been added already (to avoid duplicates if ID fields were explicitly requested). Args: alias: The `AliasedClass` to which the column attributes should be adapted. If None, the scope will infer the appropriate alias. Returns: A tuple containing two lists: - The first list contains `QueryableAttribute` objects for regular columns. - The second list contains `ColumnTransform` objects for transformed columns. ''' pass def foreign_key_columns(self, side: RelationshipSide, alias: Optional[AliasedClass[Any]]=None) -> list[QueryableAttribute[Any]]: '''Retrieves foreign key columns for the current node's relationship. This method identifies the foreign key columns involved in the relationship represented by `self.node.value.model_field.property`. The `side` argument determines whether to fetch local columns (if `side` is "parent") or remote columns (if `side` is "child"). The columns are then adapted to the provided `alias` (or an alias inferred from the node and side if `alias` is None). Args: side: Specifies which side of the relationship to get keys from ("parent" for local, "child" for remote). alias: The `AliasedClass` to adapt the foreign key attributes to. If None, an alias is inferred based on the node and relationship side. Returns: A list of `QueryableAttribute` objects representing the aliased foreign key columns. Raises: AssertionError: If `self.node.value.model_field.property` is not a `RelationshipProperty`. ''' pass def selection(self, alias: Optional[AliasedClass[Any]]=None) -> list[QueryableAttribute[Any]]: '''Computes the full list of attributes to select for the current node. This combines the regular columns (and transformed columns, though only the `QueryableAttribute` part is returned here) obtained from `self.columns(alias)` with the foreign key columns needed for relationships, obtained from `self._foreign_keys_selection(alias)`. Args: alias: The `AliasedClass` to adapt attributes to. If None, aliases are inferred by the called methods. Returns: A list of `QueryableAttribute` objects representing all columns to be selected for the current node, including necessary foreign keys. ''' pass
21
15
24
3
9
12
2
1.55
0
11
4
0
14
2
14
14
382
61
126
62
93
195
84
43
69
5
0
3
33
327,970
gazorby/strawchemy
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/sqlalchemy/_scope.py
strawchemy.sqlalchemy._scope.QueryScope
from typing import TYPE_CHECKING, Any, ClassVar, Generic, Optional, Union from strawchemy.dto.types import DTOConfig, Purpose from collections import defaultdict from strawchemy.strawberry.dto import GraphQLFieldDefinition, QueryNode from .inspector import SQLAlchemyInspector from sqlalchemy import ColumnElement, FromClause, Function, Label, Select, func, inspect from strawchemy.graph import Node from .typing import DeclarativeT from sqlalchemy.orm.util import AliasedClass from sqlalchemy.orm import DeclarativeBase, Mapper, MapperProperty, QueryableAttribute, RelationshipProperty, aliased from .exceptions import TranspilingError from typing_extensions import Self, TypeAlias, override class QueryScope(Generic[DeclarativeT]): """Manages the context for building SQLAlchemy queries from GraphQL queries. The QueryScope class is responsible for maintaining the state and context required to transpile a GraphQL query into a SQLAlchemy query. It manages aliases for tables and relationships, tracks selected columns, and provides utilities for generating SQL expressions. Key Responsibilities: - Manages aliases for SQLAlchemy models and relationships. - Tracks selected columns and functions within the query. - Provides methods for generating aliased attributes and literal columns. - Supports nested scopes for subqueries and related entities. - Maintains a mapping of relationship properties to their aliases. - Generates unique names for columns and functions within the scope. The class is used by the Transpiler to build complex SQL queries by providing context-aware access to model attributes and relationships. It ensures that all parts of the query are correctly aliased and referenced, preventing naming conflicts and ensuring the query is valid. Example: >>> from sqlalchemy.orm import declarative_base >>> from sqlalchemy import Column, Integer, String >>> Base = declarative_base() >>> class User(Base): ... __tablename__ = 'users' ... id = Column(Integer, primary_key=True) ... name = Column(String) >>> scope = QueryScope(User) >>> user_alias = scope.root_alias >>> print(user_alias.name) users """ def __init__(self, model: type[DeclarativeT], dialect: SupportedDialect, root_alias: Optional[AliasedClass[DeclarativeBase]]=None, parent: Optional[QueryScope[Any]]=None, alias_map: Optional[dict[tuple[QueryNodeType, RelationshipSide], AliasedClass[Any]]]=None, inspector: Optional[SQLAlchemyInspector]=None) -> None: """Initializes the QueryScope. Sets up the initial state for the query scope, including the root model, dialect, parent scope (if any), and alias mappings. Args: model: The primary SQLAlchemy model class for this scope. dialect: The SQL dialect being targeted (e.g., "postgresql", "sqlite"). root_alias: An optional pre-defined `AliasedClass` for the root model. If None, a new alias is created from the `model`. parent: An optional parent `QueryScope` if this is a nested scope (e.g., for a subquery or relationship). alias_map: An optional dictionary to pre-populate the mapping of (query node, relationship side) tuples to `AliasedClass` instances. inspector: An optional `SQLAlchemyInspector` instance. If None, a new one is created using the model's registry. """ self._parent: Optional[QueryScope[Any]] = parent self._root_alias = root_alias if root_alias is not None else aliased(model.__mapper__, name=model.__tablename__, flat=True) self._node_alias_map: dict[tuple[QueryNodeType, RelationshipSide], AliasedClass[Any]] = alias_map or {} self._node_keys: dict[QueryNodeType, str] = {} self._keys_set: set[str] = set() self._literal_name_counts: defaultdict[str, int] = defaultdict(int) self._literal_namespace: str = '__strawchemy' self._inspector = inspector or SQLAlchemyInspector([model.registry]) self.dialect: SupportedDialect = dialect self.model = model self.level: int = self._parent.level + 1 if self._parent else 0 self.columns: dict[QueryNodeType, NamedColumn[Any]] = {} self.selection_function_nodes: set[QueryNodeType] = set() self.order_by_function_nodes: set[QueryNodeType] = set() self.where_function_nodes: set[QueryNodeType] = set() def _add_scope_id(self, name: str) -> str: return name if self.is_root else f'{name}_{self.level}' def _node_key(self, node: QueryNodeType) -> str: if (name := self._node_keys.get(node)): return name node_inspect = self.inspect(node) scoped_name = node_inspect.name parent_prefix = '' for parent in node.iter_parents(): if scoped_name not in self._keys_set: self._node_keys[node] = scoped_name break parent_name = self.inspect(parent).name parent_prefix = f'{parent_prefix}__{parent_name}' if parent_prefix else parent_name scoped_name = f'{parent_prefix}__{node_inspect.key}' return scoped_name @property def referenced_function_nodes(self) -> set[QueryNodeType]: """Gets the set of query nodes that represent functions referenced in the query. This includes function nodes that are used in WHERE clauses and also selected for output, OR function nodes used in ORDER BY clauses. This helps in identifying all function calls that need to be part of the generated query. Returns: A set of `QueryNodeType` objects representing referenced functions. """ return self.where_function_nodes & self.selection_function_nodes | self.order_by_function_nodes @property def is_root(self) -> bool: """Checks if the current query scope is the root scope. A scope is considered the root scope if it does not have a parent scope. Returns: True if this is the root scope, False otherwise. """ return self._parent is None @property def root_alias(self) -> AliasedClass[Any]: return self._root_alias def inspect(self, node: QueryNodeType) -> NodeInspect: return NodeInspect(node, self) def alias_from_relation_node(self, node: QueryNodeType, side: RelationshipSide) -> AliasedClass[Any]: node_inspect = self.inspect(node) if side == 'parent' and node.parent and self.inspect(node.parent).is_data_root or node_inspect.is_data_root: return self._root_alias if not node.value.is_relation: msg = 'Node must be a relation node' raise TranspilingError(msg) attribute = node.value.model_field if (alias := self._node_alias_map.get((node, side))) is not None: return alias mapper = attribute.parent.mapper if side == 'parent' else attribute.entity.mapper alias = aliased(mapper.class_, name=self.key(node), flat=True) self.set_relation_alias(node, side, alias) return alias def aliased_attribute(self, node: QueryNodeType, alias: Optional[AliasedClass[Any]]=None) -> QueryableAttribute[Any]: """Adapts a model field to an aliased entity for query building. This method is a core component of the GraphQL to SQL transpilation process, handling the adaptation of model fields to their aliased representations in the generated SQL query. It manages both explicit aliases and inferred aliases based on parent-child relationships in the query structure. The method works in conjunction with other QueryScope methods to ensure consistent alias handling across the query: - Uses alias_from_relation_node for relationship traversal - Integrates with aliased_id_attributes for primary key handling - Supports the overall query building process in the Transpiler Args: node: The SQLAlchemy query node containing the model field to be aliased. Must be a valid query node with a model field reference. alias: An optional explicit alias to use for adaptation. If None, the alias will be inferred based on the node's position in the query structure. Returns: QueryableAttribute[Any]: The adapted attribute ready for use in SQL expressions. The attribute will be properly aliased according to the query context. Raises: AttributeError: If the node does not have a valid model field reference. TranspilingError: If there are issues with the node's relationship structure. Example: >>> node = QueryNodeType(...) # Node with model field reference >>> scope = QueryScope(User) # Query scope for User model >>> # Get attribute with explicit alias >>> attr = scope.aliased_attribute(node, aliased(User)) >>> # Get attribute with inferred alias >>> attr = scope.aliased_attribute(node) """ model_field: QueryableAttribute[RelationshipProperty[Any]] = node.value.model_field if alias is not None: return model_field.adapt_to_entity(inspect(alias)) parent = node.find_parent(lambda node: not node.value.is_computed, strict=True) if model_field.parent.is_aliased_class: return model_field if not node.value.is_relation: parent_alias = self.alias_from_relation_node(parent, 'target') return model_field.adapt_to_entity(inspect(parent_alias)) parent_alias = self._root_alias if self.inspect(parent).is_data_root else self.alias_from_relation_node(parent, 'target') model_field = model_field.adapt_to_entity(inspect(parent_alias)) child_alias = self.alias_from_relation_node(node, 'target') return model_field.of_type(child_alias) def aliased_id_attributes(self, node: QueryNodeType, alias: Optional[AliasedClass[Any]]=None) -> list[QueryableAttribute[Any]]: """Retrieves aliased primary key (ID) attributes for a given node. This method determines the correct mapper for the node (root mapper for root nodes, node's own mapper otherwise) and fetches its primary key attributes using `SQLAlchemyInspector.pk_attributes()`. The retrieved PK attributes are then adapted to an appropriate alias: - If an explicit `alias` is provided, all PKs are adapted to it. - If the `node` is the root of the query, PKs are adapted to the scope's `_root_alias`. - For non-root nodes (typically representing relationships), PKs are adapted to the target alias of that relationship, obtained via `self.alias_from_relation_node(node, "target")`. This ensures that ID columns are correctly referenced in the query, whether for direct selection or for joins. Args: node: The `QueryNodeType` for which to get aliased ID attributes. alias: An optional explicit `AliasedClass` to adapt the ID attributes to. If None, the alias is inferred based on the node's context. Returns: A list of `QueryableAttribute` objects representing the aliased primary key attributes. """ mapper = inspect(self._root_alias).mapper if node.is_root else self.inspect(node).mapper columns = SQLAlchemyInspector.pk_attributes(mapper) if alias is not None: return [pk_attribute.adapt_to_entity(inspect(alias)) for pk_attribute in columns] if node.is_root: columns = [pk_attribute.adapt_to_entity(inspect(self._root_alias)) for pk_attribute in columns] else: parent_alias = self.alias_from_relation_node(node, 'target') columns = [pk_attribute.adapt_to_entity(inspect(parent_alias)) for pk_attribute in columns] return columns def scoped_column(self, clause: Union[Select[Any], FromClause], column_name: str) -> Label[Any]: """Retrieves a column from a SELECT or FROM clause and labels it with a scope-specific ID. This is used to ensure that columns selected from subqueries or CTEs have unique names within the current query scope. The original column is fetched from the `clause.selected_columns` (for `Select`) or `clause.columns` (for `FromClause`) and then labeled using `_add_scope_id` to append a scope level identifier if not in the root scope. Args: clause: The SQLAlchemy `Select` or `FromClause` object from which to retrieve the column. column_name: The name of the column to retrieve and label. Returns: A `Label` object representing the scope-labeled column. """ columns = clause.selected_columns if isinstance(clause, Select) else clause.columns return columns[column_name].label(self._add_scope_id(column_name)) def set_relation_alias(self, node: QueryNodeType, side: RelationshipSide, alias: AliasedClass[Any]) -> None: """Stores an alias for a specific relationship node and side. This method updates the internal `_node_alias_map` to associate the given `alias` with the tuple `(node, side)`. This map is used to retrieve previously established aliases for relationships, preventing redundant alias creation and ensuring consistency. Args: node: The `QueryNodeType` representing the relationship. side: The `RelationshipSide` ("parent" or "target") for which this alias applies. alias: The `AliasedClass` to store for this node and side. """ self._node_alias_map[node, side] = alias def id_field_definitions(self, model: type[DeclarativeBase]) -> list[GraphQLFieldDefinition]: """Generates GraphQL field definitions for the ID attributes of a model. This method first gets the aliased ID attributes for the given `model` (treated as a root node for this purpose) using `self.aliased_id_attributes()`. Then, for each aliased ID attribute, it uses the scope's `_inspector` to create a `GraphQLFieldDefinition` suitable for read purposes. Args: model: The SQLAlchemy model class for which to generate ID field definitions. Returns: A list of `GraphQLFieldDefinition` objects for the model's ID fields. """ root = QueryNode.root_node(model) return [GraphQLFieldDefinition.from_field(self._inspector.field_definition(pk, DTOConfig(Purpose.READ))) for pk in self.aliased_id_attributes(root)] def key(self, element: Union[str, QueryNodeType]) -> str: """Generates a unique key for a query element or node. The key is used to uniquely identify elements within the query scope, ensuring proper referencing and preventing naming conflicts. The key generation strategy differs based on the input type: - For QueryNodeType: Generates a scoped name based on the node's position in the query structure, incorporating parent relationships and function prefixes - For string elements: Creates a unique name by appending a counter to prevent collisions with identical names Args: element: The element to generate a key for. Can be either: - A QueryNodeType: A node in the query structure - A string: A literal element name Returns: str: A unique key string that identifies the element within the query scope. The key is scoped to the current query level to maintain uniqueness across nested scopes. Example: >>> scope = QueryScope(User) >>> node = QueryNodeType(...) >>> scope.key(node) # Returns a unique key for the node >>> scope.key("column_name") # Returns a unique key for the literal """ if isinstance(element, Node): scoped_name = self._node_key(element) else: scoped_name = f'{self._literal_namespace}_{element}_{self._literal_name_counts[element]}' self._literal_name_counts[element] += 1 self._keys_set.add(scoped_name) return self._add_scope_id(scoped_name) def replace(self, model: Optional[type[DeclarativeT]]=None, alias: Optional[AliasedClass[Any]]=None) -> None: if model is not None: self.model = model if alias is not None: self._root_alias = alias def sub(self, model: type[DeclarativeSubT], alias: AliasedClass[Any]) -> QueryScope[DeclarativeSubT]: return QueryScope(model=model, root_alias=alias, parent=self, alias_map=self._node_alias_map, inspector=self._inspector, dialect=self.dialect) @override def __repr__(self) -> str: return f'<{self.__class__.__name__} {self.model},{self.level}>'
class QueryScope(Generic[DeclarativeT]): '''Manages the context for building SQLAlchemy queries from GraphQL queries. The QueryScope class is responsible for maintaining the state and context required to transpile a GraphQL query into a SQLAlchemy query. It manages aliases for tables and relationships, tracks selected columns, and provides utilities for generating SQL expressions. Key Responsibilities: - Manages aliases for SQLAlchemy models and relationships. - Tracks selected columns and functions within the query. - Provides methods for generating aliased attributes and literal columns. - Supports nested scopes for subqueries and related entities. - Maintains a mapping of relationship properties to their aliases. - Generates unique names for columns and functions within the scope. The class is used by the Transpiler to build complex SQL queries by providing context-aware access to model attributes and relationships. It ensures that all parts of the query are correctly aliased and referenced, preventing naming conflicts and ensuring the query is valid. Example: >>> from sqlalchemy.orm import declarative_base >>> from sqlalchemy import Column, Integer, String >>> Base = declarative_base() >>> class User(Base): ... __tablename__ = 'users' ... id = Column(Integer, primary_key=True) ... name = Column(String) >>> scope = QueryScope(User) >>> user_alias = scope.root_alias >>> print(user_alias.name) users ''' def __init__(self, model: type[DeclarativeT], dialect: SupportedDialect, root_alias: Optional[AliasedClass[DeclarativeBase]]=None, parent: Optional[QueryScope[Any]]=None, alias_map: Optional[dict[tuple[QueryNodeType, RelationshipSide], AliasedClass[Any]]]=None, inspector: Optional[SQLAlchemyInspector]=None) -> None: '''Initializes the QueryScope. Sets up the initial state for the query scope, including the root model, dialect, parent scope (if any), and alias mappings. Args: model: The primary SQLAlchemy model class for this scope. dialect: The SQL dialect being targeted (e.g., "postgresql", "sqlite"). root_alias: An optional pre-defined `AliasedClass` for the root model. If None, a new alias is created from the `model`. parent: An optional parent `QueryScope` if this is a nested scope (e.g., for a subquery or relationship). alias_map: An optional dictionary to pre-populate the mapping of (query node, relationship side) tuples to `AliasedClass` instances. inspector: An optional `SQLAlchemyInspector` instance. If None, a new one is created using the model's registry. ''' pass def _add_scope_id(self, name: str) -> str: pass def _node_key(self, node: QueryNodeType) -> str: pass @property def referenced_function_nodes(self) -> set[QueryNodeType]: '''Gets the set of query nodes that represent functions referenced in the query. This includes function nodes that are used in WHERE clauses and also selected for output, OR function nodes used in ORDER BY clauses. This helps in identifying all function calls that need to be part of the generated query. Returns: A set of `QueryNodeType` objects representing referenced functions. ''' pass @property def is_root(self) -> bool: '''Checks if the current query scope is the root scope. A scope is considered the root scope if it does not have a parent scope. Returns: True if this is the root scope, False otherwise. ''' pass @property def root_alias(self) -> AliasedClass[Any]: pass def inspect(self, node: QueryNodeType) -> NodeInspect: pass def alias_from_relation_node(self, node: QueryNodeType, side: RelationshipSide) -> AliasedClass[Any]: pass def aliased_attribute(self, node: QueryNodeType, alias: Optional[AliasedClass[Any]]=None) -> QueryableAttribute[Any]: '''Adapts a model field to an aliased entity for query building. This method is a core component of the GraphQL to SQL transpilation process, handling the adaptation of model fields to their aliased representations in the generated SQL query. It manages both explicit aliases and inferred aliases based on parent-child relationships in the query structure. The method works in conjunction with other QueryScope methods to ensure consistent alias handling across the query: - Uses alias_from_relation_node for relationship traversal - Integrates with aliased_id_attributes for primary key handling - Supports the overall query building process in the Transpiler Args: node: The SQLAlchemy query node containing the model field to be aliased. Must be a valid query node with a model field reference. alias: An optional explicit alias to use for adaptation. If None, the alias will be inferred based on the node's position in the query structure. Returns: QueryableAttribute[Any]: The adapted attribute ready for use in SQL expressions. The attribute will be properly aliased according to the query context. Raises: AttributeError: If the node does not have a valid model field reference. TranspilingError: If there are issues with the node's relationship structure. Example: >>> node = QueryNodeType(...) # Node with model field reference >>> scope = QueryScope(User) # Query scope for User model >>> # Get attribute with explicit alias >>> attr = scope.aliased_attribute(node, aliased(User)) >>> # Get attribute with inferred alias >>> attr = scope.aliased_attribute(node) ''' pass def aliased_id_attributes(self, node: QueryNodeType, alias: Optional[AliasedClass[Any]]=None) -> list[QueryableAttribute[Any]]: '''Retrieves aliased primary key (ID) attributes for a given node. This method determines the correct mapper for the node (root mapper for root nodes, node's own mapper otherwise) and fetches its primary key attributes using `SQLAlchemyInspector.pk_attributes()`. The retrieved PK attributes are then adapted to an appropriate alias: - If an explicit `alias` is provided, all PKs are adapted to it. - If the `node` is the root of the query, PKs are adapted to the scope's `_root_alias`. - For non-root nodes (typically representing relationships), PKs are adapted to the target alias of that relationship, obtained via `self.alias_from_relation_node(node, "target")`. This ensures that ID columns are correctly referenced in the query, whether for direct selection or for joins. Args: node: The `QueryNodeType` for which to get aliased ID attributes. alias: An optional explicit `AliasedClass` to adapt the ID attributes to. If None, the alias is inferred based on the node's context. Returns: A list of `QueryableAttribute` objects representing the aliased primary key attributes. ''' pass def scoped_column(self, clause: Union[Select[Any], FromClause], column_name: str) -> Label[Any]: '''Retrieves a column from a SELECT or FROM clause and labels it with a scope-specific ID. This is used to ensure that columns selected from subqueries or CTEs have unique names within the current query scope. The original column is fetched from the `clause.selected_columns` (for `Select`) or `clause.columns` (for `FromClause`) and then labeled using `_add_scope_id` to append a scope level identifier if not in the root scope. Args: clause: The SQLAlchemy `Select` or `FromClause` object from which to retrieve the column. column_name: The name of the column to retrieve and label. Returns: A `Label` object representing the scope-labeled column. ''' pass def set_relation_alias(self, node: QueryNodeType, side: RelationshipSide, alias: AliasedClass[Any]) -> None: '''Stores an alias for a specific relationship node and side. This method updates the internal `_node_alias_map` to associate the given `alias` with the tuple `(node, side)`. This map is used to retrieve previously established aliases for relationships, preventing redundant alias creation and ensuring consistency. Args: node: The `QueryNodeType` representing the relationship. side: The `RelationshipSide` ("parent" or "target") for which this alias applies. alias: The `AliasedClass` to store for this node and side. ''' pass def id_field_definitions(self, model: type[DeclarativeBase]) -> list[GraphQLFieldDefinition]: '''Generates GraphQL field definitions for the ID attributes of a model. This method first gets the aliased ID attributes for the given `model` (treated as a root node for this purpose) using `self.aliased_id_attributes()`. Then, for each aliased ID attribute, it uses the scope's `_inspector` to create a `GraphQLFieldDefinition` suitable for read purposes. Args: model: The SQLAlchemy model class for which to generate ID field definitions. Returns: A list of `GraphQLFieldDefinition` objects for the model's ID fields. ''' pass def key(self, element: Union[str, QueryNodeType]) -> str: '''Generates a unique key for a query element or node. The key is used to uniquely identify elements within the query scope, ensuring proper referencing and preventing naming conflicts. The key generation strategy differs based on the input type: - For QueryNodeType: Generates a scoped name based on the node's position in the query structure, incorporating parent relationships and function prefixes - For string elements: Creates a unique name by appending a counter to prevent collisions with identical names Args: element: The element to generate a key for. Can be either: - A QueryNodeType: A node in the query structure - A string: A literal element name Returns: str: A unique key string that identifies the element within the query scope. The key is scoped to the current query level to maintain uniqueness across nested scopes. Example: >>> scope = QueryScope(User) >>> node = QueryNodeType(...) >>> scope.key(node) # Returns a unique key for the node >>> scope.key("column_name") # Returns a unique key for the literal ''' pass def replace(self, model: Optional[type[DeclarativeT]]=None, alias: Optional[AliasedClass[Any]]=None) -> None: pass def sub(self, model: type[DeclarativeSubT], alias: AliasedClass[Any]) -> QueryScope[DeclarativeSubT]: pass @override def __repr__(self) -> str: pass
22
10
19
2
8
8
2
1.24
1
17
8
0
17
15
17
17
370
58
139
74
101
173
103
52
85
5
1
2
39
327,971
gazorby/strawchemy
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/sqlalchemy/_transpiler.py
strawchemy.sqlalchemy._transpiler.QueryTranspiler
from .exceptions import TranspilingError from .typing import DeclarativeT, OrderBySpec, QueryExecutorT import dataclasses from strawchemy.constants import AGGREGATIONS_KEY from strawchemy.strawberry.filters import GraphQLComparison from ._executor import SyncQueryExecutor from collections import defaultdict from strawchemy.strawberry.dto import AggregationFilter, BooleanFilterDTO, EnumDTO, Filter, OrderByDTO, OrderByEnum, OrderByRelationFilterDTO, QueryNode from typing_extensions import Self, override from ._query import AggregationJoin, Conjunction, DistinctOn, HookApplier, Join, OrderBy, Query, QueryGraph, SubqueryBuilder, Where from contextlib import contextmanager from ._scope import QueryScope from typing import TYPE_CHECKING, Any, Generic, Optional, Union, cast from sqlalchemy import Dialect, Label, Select, and_, column, func, inspect, literal_column, not_, null, or_, select, text, true from sqlalchemy.orm import Mapper, RelationshipProperty, aliased, class_mapper, contains_eager, load_only, raiseload from sqlalchemy.sql.elements import ColumnElement from .inspector import SQLAlchemyGraphQLInspector class QueryTranspiler(Generic[DeclarativeT]): """Transpiles a GraphQL query into a SQLAlchemy query.""" def __init__(self, model: type[DeclarativeT], dialect: Dialect, statement: Optional[Select[tuple[DeclarativeT]]]=None, scope: Optional[QueryScope[DeclarativeT]]=None, query_hooks: Optional[defaultdict[QueryNodeType, list[QueryHook[Any]]]]=None, deterministic_ordering: bool=False) -> None: """Initializes the QueryTranspiler. Args: model: The SQLAlchemy model to transpile queries for. dialect: The SQLAlchemy dialect to use. statement: An optional base SQLAlchemy statement to build upon. scope: An optional existing QueryScope. query_hooks: Optional hooks to apply during query transpilation. deterministic_ordering: Whether to ensure deterministic ordering of results. """ supported_dialect = cast('SupportedDialect', dialect.name) self._inspector = SQLAlchemyGraphQLInspector(supported_dialect, [model.registry]) self._aggregation_prefix: str = 'aggregation' self._aggregation_joins: dict[QueryNodeType, AggregationJoin] = {} self._statement = statement self._deterministic_ordering = deterministic_ordering self.dialect = dialect self.scope = scope or QueryScope(model, supported_dialect, inspector=self._inspector) self._hook_applier = HookApplier(self.scope, query_hooks or defaultdict(list)) def _base_statement(self) -> Select[tuple[DeclarativeT]]: """Creates the base select statement for the query. If a `self._statement` was provided during initialization, this method joins the root alias of the current scope to an aliased subquery derived from `self._statement`. Otherwise, it returns a simple select from the root alias. Returns: The base SQLAlchemy Select statement. """ if self._statement is not None: root_mapper = class_mapper(self.scope.model) alias = self._statement.subquery().alias() aliased_cls = aliased(root_mapper, alias) on_clause = and_(*[getattr(self.scope.root_alias, attr.key) == getattr(aliased_cls, attr.key) for attr in self._inspector.pk_attributes(root_mapper)]) return select(self.scope.root_alias).join(alias, onclause=on_clause) return select(self.scope.root_alias) @contextmanager def _sub_scope(self, model: type[Any], root_alias: AliasedClass[Any]) -> Iterator[Self]: """Creates a new scope for a sub-query. Args: model: The SQLAlchemy model to create a scope for. root_alias: The aliased class to use as the root of the scope. Yields: A new transpiler instance with the sub-scope. """ current_scope, sub_scope = (self.scope, self.scope.sub(model, root_alias)) try: self.scope = sub_scope yield self finally: self.scope = current_scope def _literal_column(self, from_name: str, column_name: str) -> Label[Any]: """Creates a literal column label, ensuring proper quoting. This method is used to generate a column label that correctly handles quoting for identifiers, especially when dealing with names that might contain special characters or reserved keywords, by constructing a temporary select statement. Args: from_name: The name of the table or alias from which the column originates. column_name: The name of the column. Returns: A SQLAlchemy Label construct for the column. """ temp_statement = select(column(column_name)).select_from(text(from_name)).alias(from_name) return literal_column(str(temp_statement.c[column_name])).label(column_name) def _filter_to_expressions(self, dto_filter: GraphQLComparison, override: Optional[ColumnElement[Any]]=None, not_null_check: bool=False) -> list[ColumnElement[bool]]: """Converts a DTO filter to a list of SQLAlchemy expressions. Args: dto_filter: The DTO filter to convert. override: An optional column element to override the filter attribute. not_null_check: Whether to add a not-null check to the expressions. Returns: A list of SQLAlchemy boolean expressions. """ expressions: list[ColumnElement[bool]] = [] attribute = override if override is not None else self.scope.aliased_attribute(dto_filter.field_node) expressions = dto_filter.to_expressions(self.dialect, attribute) if not_null_check: expressions.append(attribute.is_not(null())) return expressions def _gather_joins(self, tree: QueryNodeType, is_outer: bool=False) -> list[Join]: """Gathers all joins needed for a query tree. Args: tree: The query tree to gather joins from. is_outer: Whether to create outer joins. Returns: A list of join objects for the query tree. """ joins: list[Join] = [self._join(child, is_outer=is_outer) for child in tree.iter_breadth_first() if not child.value.is_computed and child.value.is_relation and (not child.is_root)] return joins def _gather_conjonctions(self, query: Sequence[Union[Union[Filter, AggregationFilter], GraphQLComparison]], not_null_check: bool=False) -> Conjunction: """Gathers all conjunctions from a sequence of filters. Args: query: A sequence of filters to gather conjunctions from. not_null_check: Whether to add not null checks to the expressions. Returns: A conjunction object containing the gathered expressions, joins and common join path. """ bool_expressions: list[ColumnElement[bool]] = [] joins: list[Join] = [] common_join_path: list[QueryNodeType] = [] node_path: list[QueryNodeType] = [] for value in query: if isinstance(value, AggregationFilter): node_path = value.field_node.path_from_root() lateral_join, aggregation_expressions = self._aggregation_filter(value) if lateral_join is not None: joins.append(lateral_join) bool_expressions.extend(aggregation_expressions) elif isinstance(value, GraphQLComparison): node_path = value.field_node.path_from_root() bool_expressions.extend(self._filter_to_expressions(value, not_null_check=not_null_check)) else: conjunction = self._conjonctions(value, not_null_check) common_join_path = QueryNode.common_path(common_join_path, conjunction.common_join_path) joins.extend(conjunction.joins) if conjunction.expressions: and_expression = and_(*conjunction.expressions) bool_expressions.append(and_expression.self_group() if conjunction.has_many_predicates() else and_expression) if not isinstance(value, AggregationFilter): common_join_path = QueryNode.common_path(node_path, common_join_path) return Conjunction(bool_expressions, joins, common_join_path) def _conjonctions(self, query: Filter, allow_null: bool=False) -> Conjunction: """Processes a filter's AND, OR, and NOT conditions into a conjunction. Args: query: The filter to process. allow_null: Whether to allow null values in the filter conditions. Returns: A conjunction object containing the processed expressions, joins and common join path. """ bool_expressions: list[ColumnElement[bool]] = [] and_conjunction = self._gather_conjonctions(query.and_, allow_null) or_conjunction = self._gather_conjonctions(query.or_, allow_null) common_path = QueryNode.common_path(and_conjunction.common_join_path, or_conjunction.common_join_path) joins = [*and_conjunction.joins, *or_conjunction.joins] if query.not_: not_conjunction = self._gather_conjonctions([query.not_], not_null_check=True) common_path = [node for node in common_path if all((not_node != node for not_node in not_conjunction.common_join_path))] joins.extend(not_conjunction.joins) and_conjunction.expressions.append(not_(and_(*not_conjunction.expressions))) if and_conjunction.expressions: and_expression = and_(*and_conjunction.expressions) if or_conjunction.expressions and and_conjunction.has_many_predicates(): and_expression = and_expression.self_group() bool_expressions.append(and_expression) if or_conjunction.expressions: or_expression = or_(*or_conjunction.expressions) if and_conjunction.expressions and or_conjunction.has_many_predicates(): or_expression = or_expression.self_group() bool_expressions.append(or_expression) return Conjunction(bool_expressions, joins, common_path) def _aggregation_filter(self, aggregation: AggregationFilter) -> tuple[Optional[Join], list[ColumnElement[bool]]]: """Creates a join and filter expressions for an aggregation filter. Args: aggregation: The aggregation filter to process. Returns: A tuple containing: - The join object if a new join is needed, None otherwise. - A list of boolean expressions for the filter. """ aggregation_name = self.scope.key(self._aggregation_prefix) aggregation_node_inspect = self.scope.inspect(aggregation.field_node) aggregation_node = aggregation.field_node.find_parent(lambda node: node.value.is_aggregate, strict=True) aggregated_alias: AliasedClass[Any] = aliased(aggregation_node_inspect.mapper.class_) aggregated_alias_inspected = inspect(aggregated_alias) root_relation = self.scope.aliased_attribute(aggregation_node).of_type(aggregated_alias) bool_expressions: list[ColumnElement[bool]] = [] if (function_column := self.scope.columns.get(aggregation.field_node)) is not None: bool_expressions.extend(aggregation.predicate.to_expressions(self.dialect, function_column)) return (None, bool_expressions) if (join := self._aggregation_joins.get(aggregation_node)): function_node, function = aggregation_node_inspect.filter_function(join.subquery_alias, distinct=aggregation.distinct) _, created = join.upsert_column_to_subquery(function) function_column = self.scope.scoped_column(join.selectable, self.scope.key(function_node)) if created: self.scope.columns[function_node] = function_column self.scope.where_function_nodes.add(function_node) bool_expressions.extend(aggregation.predicate.to_expressions(self.dialect, function_column)) return (None, bool_expressions) function_node, function = aggregation_node_inspect.filter_function(aggregated_alias, distinct=aggregation.distinct) if self._inspector.db_features.supports_lateral: statement = select(function).where(root_relation.expression).select_from(aggregated_alias_inspected).lateral(aggregation_name) join = AggregationJoin(target=statement, onclause=true(), node=aggregation_node, subquery_alias=aggregated_alias) else: statement = select(function).select_from(aggregated_alias_inspected) join = self._aggregation_cte_join(node=aggregation_node, alias=aggregated_alias, statement=statement, cte_name=aggregation_name) function_column = self._literal_column(aggregation_name, self.scope.key(function_node)) bool_expressions.extend(aggregation.predicate.to_expressions(self.dialect, function_column)) self.scope.columns[function_node] = function_column self.scope.where_function_nodes.add(function_node) self._aggregation_joins[aggregation_node] = join return (join, bool_expressions) def _upsert_aggregations(self, aggregation_node: QueryNodeType, existing_joins: list[Join]) -> tuple[list[NamedColumn[Any]], Optional[Join]]: """Upserts aggregations. Args: aggregation_node: QueryNodeType existing_joins: list[_Join] Returns: tuple[list[NamedColumn[Any]], _Join | None]: """ node_inspect = self.scope.inspect(aggregation_node) functions: dict[QueryNodeType, ColumnElement[Any]] = {} function_columns: list[NamedColumn[Any]] = [] new_join: Optional[Join] = None existing_join = next((join for join in existing_joins if isinstance(join, AggregationJoin) and join.node == aggregation_node), None) alias = existing_join.subquery_alias if existing_join else aliased(node_inspect.mapper) for child_inspect in node_inspect.children: child_functions = child_inspect.output_functions(alias) for node in set(child_functions) & set(self.scope.columns): child_functions.pop(node) function_columns.append(self.scope.columns[node]) functions.update(child_functions) if not functions: return (function_columns, None) if existing_join: for node, function in functions.items(): _, created = existing_join.upsert_column_to_subquery(function) function_column = self.scope.scoped_column(existing_join.selectable, self.scope.key(node)) if created: self.scope.columns[node] = function_column function_columns.append(function_column) else: if self._inspector.db_features.supports_lateral: new_join = self._aggregation_lateral_join(aggregation_node, functions.values(), alias) else: new_join = self._aggregation_cte_join(node=aggregation_node, alias=alias, statement=select(*functions.values()), cte_name=self.scope.key(self._aggregation_prefix)) for node in functions: function_column = self.scope.scoped_column(new_join.selectable, self.scope.key(node)) self.scope.columns[node] = function_column function_columns.append(function_column) return (function_columns, new_join) def _select_child(self, statement: Select[tuple[DeclarativeT]], node: QueryNodeType) -> tuple[Select[tuple[DeclarativeT]], _AbstractLoad]: """Applies the load options to the statement for the given node. Load is applied based on whether it's a relation or not. If it's a relation, it calls itself recursively for each child node and applies the load options to the statement. Args: statement: The statement to be modified node: The node to apply the load options for query_hooks: The query hooks to use for the node and its children Returns: The modified statement with the load options applied """ columns, column_transforms = self.scope.inspect(node).columns() for column_transform in column_transforms: statement = statement.add_columns(column_transform.attribute) eager_options: list[_AbstractLoad] = [] load = contains_eager(self.scope.aliased_attribute(node)) if columns: eager_options = [load_only(*columns)] node_alias = self.scope.alias_from_relation_node(node, 'target') statement, hook_options = self._hook_applier.apply(statement, node, node_alias, 'undefer') eager_options.extend(hook_options) load = load.options(*eager_options) for child in node.children: if not child.value.is_relation or child.value.is_computed: continue statement, column_options = self._select_child(statement, child) if column_options: load = load.options(column_options) return (statement, load) def _root_aggregation_functions(self, selection_tree: QueryNodeType) -> list[Label[Any]]: """Build a list of root aggregations, given an QueryNodeType representing the selection tree. :param selection_tree: The selection tree to build root aggregations from :return: A list of Labels representing the root aggregations """ if (aggregation_tree := selection_tree.find_child(lambda child: child.value.name == AGGREGATIONS_KEY)): return [function for child in aggregation_tree.children for function in self.scope.inspect(child).output_functions(self.scope.root_alias, lambda func: func.over()).values()] return [] def _join(self, node: QueryNodeType, is_outer: bool=False) -> Join: """Creates a join object for a query node. Args: node: The query node to create a join for. is_outer: Whether to create an outer join. Returns: A join object containing the join information. """ aliased_attribute = self.scope.aliased_attribute(node) relation_filter = node.metadata.data.relation_filter if not relation_filter: return Join(aliased_attribute, node=node, is_outer=is_outer) relationship = node.value.model_field.property assert isinstance(relationship, RelationshipProperty) target_mapper: Mapper[Any] = relationship.mapper.mapper target_alias = aliased(target_mapper, flat=True) order_by = relation_filter.order_by if isinstance(relation_filter, OrderByRelationFilterDTO) else () with self._sub_scope(target_mapper.class_, target_alias): query_graph = QueryGraph(self.scope, order_by=order_by) query = self._build_query(query_graph, limit=relation_filter.limit, offset=relation_filter.offset) if self._inspector.db_features.supports_lateral: join = self._lateral_join(node, target_alias, query, is_outer) else: join = self._cte_join(node, target_alias, query, is_outer) join.order_nodes = query_graph.order_by_nodes return join def _lateral_join(self, node: QueryNodeType, target_alias: AliasedClass[Any], query: Query, is_outer: bool) -> Join: """Creates a LATERAL join for a given node. Args: node: The query node representing the relation to join. target_alias: The aliased class for the target of the join. query: The subquery definition for the lateral join. is_outer: Whether to perform an outer join. Returns: A Join object representing the lateral join. """ target_insp = inspect(target_alias) aliased_attribute = self.scope.aliased_attribute(node) node_inspect = self.scope.inspect(node) name = self.scope.key(node) root_relation = aliased_attribute.of_type(target_insp) base_statement = select(target_insp).with_only_columns(*node_inspect.selection(target_alias)) statement = query.statement(base_statement).where(root_relation).lateral(name) lateral_alias = aliased(target_insp.mapper, statement, name=name, flat=True) self.scope.set_relation_alias(node, 'target', lateral_alias) return Join(statement, node=node, is_outer=is_outer, onclause=true()) def _cte_join(self, node: QueryNodeType, target_alias: AliasedClass[Any], query: Query, is_outer: bool) -> Join: """Creates a CTE-based join for a given node. This is used when LATERAL joins are not supported by the database. Args: node: The query node representing the relation to join. target_alias: The aliased class for the target of the join. query: The subquery definition for the CTE. is_outer: Whether to perform an outer join. Returns: A Join object representing the CTE-based join. """ aliased_attribute = self.scope.aliased_attribute(node) remote_fks = self.scope.inspect(node).foreign_key_columns('target', target_alias) rank_column: Optional[Label[int]] = None if query.order_by or query.limit is not None or query.offset is not None: rank_column = func.dense_rank().over(partition_by=remote_fks, order_by=query.order_by.expressions if query.order_by else None).label(name='rank') name = self.scope.key(node) query_wihtout_limit_offset = dataclasses.replace(query, offset=None, limit=None) node_inspect = self.scope.inspect(node) remote_fks = node_inspect.foreign_key_columns('target', target_alias) selection = node_inspect.selection(target_alias) base_statement = select(*selection, *remote_fks).group_by(*remote_fks, *selection).where(and_(*[fk.is_not(null()) for fk in remote_fks])) if rank_column is not None: base_statement = base_statement.add_columns(rank_column) statement = query_wihtout_limit_offset.statement(base_statement).cte(name) cte_alias = aliased(target_alias, statement, name=name) self.scope.set_relation_alias(node, 'target', cte_alias) limit_offset_condition: list[ColumnElement[bool]] = [] if rank_column is not None: rank_column = self.scope.scoped_column(statement, rank_column.name) if query.offset is not None: limit_offset_condition.append(rank_column > query.offset) if query.limit is not None: limit_offset_condition.append(rank_column <= (query.offset + query.limit if query.offset else query.limit)) return Join(statement, node, onclause=and_(aliased_attribute, *limit_offset_condition), is_outer=is_outer) def _aggregation_lateral_join(self, node: QueryNodeType, function_columns: Iterable[ColumnElement[Any]], alias: AliasedClass[Any]) -> AggregationJoin: """Creates an aggregation join object for a query node. Args: node: The query node to create an aggregation join for. function_columns: The columns to include in the aggregation. alias: The alias to use for the joined table. Returns: An aggregation join object containing the join information. """ lateral_name = self.scope.key(self._aggregation_prefix) root_relation = self.scope.aliased_attribute(node).of_type(inspect(alias)) lateral_statement = select(*function_columns).where(root_relation).lateral(lateral_name) return AggregationJoin(target=lateral_statement, onclause=true(), node=node, subquery_alias=alias) def _aggregation_cte_join(self, node: QueryNodeType, alias: AliasedClass[Any], statement: Select[Any], cte_name: str) -> AggregationJoin: """Creates an aggregation join using a Common Table Expression (CTE). This method is used for aggregations when LATERAL joins are not supported or not suitable. Args: node: The query node to create an aggregation join for. alias: The aliased class for the target of the aggregation. statement: The SQLAlchemy select statement for the aggregation. cte_name: The name to use for the CTE. Returns: An AggregationJoin object representing the CTE-based aggregation join. """ remote_fks = self.scope.inspect(node).foreign_key_columns('target', alias) cte_statement = statement.add_columns(*remote_fks).group_by(*remote_fks).where(and_(*[fk.is_not(null()) for fk in remote_fks])).cte(cte_name) cte_alias = aliased(alias, cte_statement) return AggregationJoin(target=cte_alias, onclause=self.scope.aliased_attribute(node).of_type(cte_alias), node=node, subquery_alias=alias) def _where(self, query_filter: Filter, allow_null: bool=False) -> Where: """Creates WHERE expressions and joins from a filter. Args: query_filter: The filter to create expressions from. allow_null: Whether to allow null values in the filter conditions. Returns: A tuple containing: - List of boolean expressions for the WHERE clause - List of joins needed for the expressions """ conjunction = self._conjonctions(query_filter, allow_null) return Where(conjunction, [*conjunction.joins, *[self._join(node) for node in conjunction.common_join_path if not node.is_root and node.value.is_relation]]) def _order_by(self, order_by_nodes: list[QueryNodeType], existing_joins: list[Join]) -> OrderBy: """Creates ORDER BY expressions and joins from a list of nodes. Args: order_by_nodes: The nodes to create order by expressions from. existing_joins: List of existing joins to check for reuse. Returns: A tuple containing: - List of unary expressions for the ORDER BY clause - List of new joins needed for the expressions """ columns: list[tuple[SQLColumnExpression[Any], OrderByEnum]] = [] joins: list[Join] = [] seen_aggregation_nodes: set[QueryNodeType] = set() for node in order_by_nodes: if node.value.is_function_arg and node.find_parent(lambda node: node.value.is_aggregate, strict=True) in seen_aggregation_nodes: continue if node.value.is_function: self.scope.order_by_function_nodes.add(node) if node.metadata.data.order_by is None: msg = 'Missing order by value' raise TranspilingError(msg) if node.value.is_function_arg or node.value.is_function: first_aggregate_parent = node.find_parent(lambda node: node.value.is_aggregate, strict=True) function_columns, new_join = self._upsert_aggregations(first_aggregate_parent, existing_joins) columns.extend([(function_column, node.metadata.data.order_by) for function_column in function_columns]) seen_aggregation_nodes.add(first_aggregate_parent) if new_join: joins.append(new_join) else: columns.append((self.scope.aliased_attribute(node), node.metadata.data.order_by)) if not columns and self._deterministic_ordering: pk_aliases = [pk_attribute.adapt_to_entity(inspect(self.scope.root_alias)) for pk_attribute in self._inspector.pk_attributes(self.scope.model.__mapper__)] columns.extend([(id_col, OrderByEnum.ASC) for id_col in pk_aliases]) return OrderBy(self._inspector.db_features, columns, joins) def _select(self, selection_tree: QueryNodeType) -> tuple[Select[tuple[DeclarativeT]], list[Join]]: """Builds the main SELECT statement based on the selection tree. This method constructs the core SQLAlchemy SELECT statement, incorporating selected columns, aggregations, and eager loading options for relations defined in the `selection_tree`. Args: selection_tree: The query node tree representing the fields to select. Returns: A tuple containing: - The constructed SQLAlchemy Select statement. - A list of Join objects required for aggregations. """ aggregation_joins: list[Join] = [] statement = self._base_statement() root_columns, column_transforms = self.scope.inspect(selection_tree).columns() for column_transform in column_transforms: statement = statement.add_columns(column_transform.attribute) for node in selection_tree.iter_depth_first(): if node.value.is_aggregate: function_columns, new_join = self._upsert_aggregations(node, aggregation_joins) statement = statement.add_columns(*function_columns) if new_join: aggregation_joins.append(new_join) root_options = [load_only(*root_columns)] if root_columns else [] statement, hook_options = self._hook_applier.apply(statement, selection_tree.root, self.scope.root_alias, 'undefer') root_options.extend(hook_options) for child in selection_tree.children: if not child.value.is_relation or child.value.is_computed: continue statement, options = self._select_child(statement, child) root_options.append(options) statement = statement.options(raiseload('*'), *root_options) return (statement, aggregation_joins) def _use_distinct_rank(self, query_graph: QueryGraph[DeclarativeT]) -> bool: """Determines if DISTINCT ON should be implemented using RANK() window function. This is necessary when the database dialect does not natively support DISTINCT ON, or when specific ordering is required with DISTINCT ON. Args: query_graph: The query graph containing distinct_on and order_by information. Returns: True if RANK() should be used for distinct operation, False otherwise. """ if self._inspector.db_features.supports_distinct_on: return bool(query_graph.distinct_on and (query_graph.order_by_tree or self._deterministic_ordering)) return bool(query_graph.distinct_on) def _relation_order_by(self, query_graph: QueryGraph[DeclarativeT], query: Query) -> list[OrderBySpec]: """Generates ORDER BY specifications for related entities in the query. Ensures consistent ordering for joined relations, especially when deterministic ordering is enabled or specific order nodes are defined for a join. Args: query_graph: The query graph containing selection and ordering information. query: The current Query object being built. Returns: A list of OrderBySpec tuples for related entities. """ selected_tree = query_graph.resolved_selection_tree() order_by_spec: list[OrderBySpec] = [] for join in sorted(query.joins): if isinstance(join, AggregationJoin) or join.node in query_graph.order_by_nodes or (not selected_tree.find_child(lambda node, _join=join: node.value.model_field is _join.node.value.model_field)): continue if not join.order_nodes and self._deterministic_ordering: order_by_spec.extend([(attribute, OrderByEnum.ASC) for attribute in self.scope.aliased_id_attributes(join.node)]) elif join.order_nodes: order_by_spec.extend([(self.scope.scoped_column(join.selectable, node.value.model_field_name), node.metadata.data.order_by) for node in join.order_nodes if node.metadata.data.order_by]) return order_by_spec def _build_query(self, query_graph: QueryGraph[DeclarativeT], limit: Optional[int]=None, offset: Optional[int]=None, allow_null: bool=False) -> Query: """Constructs the final SQLAlchemy Query object from a QueryGraph. This method orchestrates the assembly of the SELECT statement, WHERE clauses, ORDER BY clauses, LIMIT/OFFSET, DISTINCT ON logic, and all necessary JOINs (including subquery and aggregation joins). Args: query_graph: The graph representation of the query to build. limit: Optional limit for pagination. offset: Optional offset for pagination. allow_null: Whether to allow nulls in filter conditions. Returns: The fully constructed Query object. """ joins: list[Join] = [] subquery_join_nodes: set[QueryNodeType] = set() distinct_on_rank = self._use_distinct_rank(query_graph) query = Query(self._inspector.db_features, limit=limit, offset=offset, distinct_on=DistinctOn(query_graph), use_distinct_on=not distinct_on_rank) subquery_needed = self.scope.is_root and (limit is not None or offset is not None or distinct_on_rank) subquery_builder = SubqueryBuilder(self.scope, self._hook_applier, self._inspector.db_features) if subquery_needed: self.scope.replace(alias=subquery_builder.alias) if query_graph.query_filter: query.where = self._where(query_graph.query_filter, allow_null) joins.extend(query.where.joins) subquery_join_nodes = {join.node for join in query.where.joins} if query_graph.order_by_tree or self._deterministic_ordering: query.order_by = self._order_by(query_graph.order_by_nodes, joins) joins.extend(query.order_by.joins) if query_graph.subquery_join_tree: joins.extend([join for join in self._gather_joins(query_graph.subquery_join_tree, is_outer=True) if join.node not in subquery_join_nodes]) if subquery_needed: subquery_alias = subquery_builder.build(query_graph, dataclasses.replace(query, joins=joins)) self.scope.replace(alias=subquery_alias) query.offset = None query.joins = self._gather_joins(query_graph.root_join_tree, is_outer=True) query.order_by = self._order_by(query_graph.order_by_nodes, query.joins) query.joins.extend(query.order_by.joins) if distinct_on_rank: query.where = Where.from_expressions(subquery_builder.distinct_on_condition(subquery_alias)) elif query.where: query.where.clear_expressions() else: query.joins = joins + [join for join in self._gather_joins(query_graph.root_join_tree, is_outer=True) if join.node not in subquery_join_nodes] if query.order_by: query.order_by.columns.extend(self._relation_order_by(query_graph, query)) if query_graph.selection_tree and query_graph.selection_tree.graph_metadata.metadata.root_aggregations: query.root_aggregation_functions = self._root_aggregation_functions(query_graph.selection_tree) return query def select_executor(self, selection_tree: Optional[QueryNodeType]=None, dto_filter: Optional[BooleanFilterDTO]=None, order_by: Optional[list[OrderByDTO]]=None, limit: Optional[int]=None, offset: Optional[int]=None, distinct_on: Optional[list[EnumDTO]]=None, allow_null: bool=False, executor_cls: type[QueryExecutorT]=SyncQueryExecutor, execution_options: Optional[dict[str, Any]]=None) -> QueryExecutorT: """Creates a QueryExecutor that executes a SQLAlchemy query based on a selection tree. This method builds a QueryExecutor that can execute a SQLAlchemy query with various options like filtering, ordering, pagination, and aggregations. The query is built from a selection tree that defines which fields to select and how they relate to each other. Args: selection_tree: Tree structure defining fields to select and their relationships. If None, only ID fields are selected. dto_filter: Filter conditions to apply to the query. order_by: List of fields and directions to sort the results by. limit: Maximum number of results to return. offset: Number of results to skip before returning. distinct_on: Fields to apply DISTINCT ON to. allow_null: Whether to allow null values in filter conditions. executor_cls: Executor type to return. Defaults to SyncQueryExecutor. execution_options: Options for statement execution. Returns: A QueryExecutor instance that can execute the built query. Example: ```python # Create an executor that selects user data with filtering and ordering executor = transpiler.select_executor( selection_tree=user_fields_tree, dto_filter=BooleanFilterDTO(field="age", op="gt", value=18), order_by=[OrderByDTO(field="name", direction="ASC")], limit=10 ) results = await executor.execute() # If using an async executor ``` """ query_graph = QueryGraph(self.scope, selection_tree=selection_tree, dto_filter=dto_filter, order_by=order_by or [], distinct_on=distinct_on or []) query = self._build_query(query_graph, limit, offset, allow_null) statement, aggregation_joins = self._select(query_graph.resolved_selection_tree()) query.joins.extend(aggregation_joins) statement = query.statement(statement) return executor_cls(base_statement=statement, apply_unique=query.joins_have_many, root_aggregation_functions=query.root_aggregation_functions, scope=self.scope, execution_options=execution_options) def filter_expressions(self, dto_filter: BooleanFilterDTO) -> list[ColumnElement[bool]]: query_graph = QueryGraph(self.scope, dto_filter=dto_filter) query = self._build_query(query_graph) return query.where.expressions if query.where else [] @override def __repr__(self) -> str: return f'<{self.__class__.__name__} {self.scope.model}>'
class QueryTranspiler(Generic[DeclarativeT]): '''Transpiles a GraphQL query into a SQLAlchemy query.''' def __init__(self, model: type[DeclarativeT], dialect: Dialect, statement: Optional[Select[tuple[DeclarativeT]]]=None, scope: Optional[QueryScope[DeclarativeT]]=None, query_hooks: Optional[defaultdict[QueryNodeType, list[QueryHook[Any]]]]=None, deterministic_ordering: bool=False) -> None: '''Initializes the QueryTranspiler. Args: model: The SQLAlchemy model to transpile queries for. dialect: The SQLAlchemy dialect to use. statement: An optional base SQLAlchemy statement to build upon. scope: An optional existing QueryScope. query_hooks: Optional hooks to apply during query transpilation. deterministic_ordering: Whether to ensure deterministic ordering of results. ''' pass def _base_statement(self) -> Select[tuple[DeclarativeT]]: '''Creates the base select statement for the query. If a `self._statement` was provided during initialization, this method joins the root alias of the current scope to an aliased subquery derived from `self._statement`. Otherwise, it returns a simple select from the root alias. Returns: The base SQLAlchemy Select statement. ''' pass @contextmanager def _sub_scope(self, model: type[Any], root_alias: AliasedClass[Any]) -> Iterator[Self]: '''Creates a new scope for a sub-query. Args: model: The SQLAlchemy model to create a scope for. root_alias: The aliased class to use as the root of the scope. Yields: A new transpiler instance with the sub-scope. ''' pass def _literal_column(self, from_name: str, column_name: str) -> Label[Any]: '''Creates a literal column label, ensuring proper quoting. This method is used to generate a column label that correctly handles quoting for identifiers, especially when dealing with names that might contain special characters or reserved keywords, by constructing a temporary select statement. Args: from_name: The name of the table or alias from which the column originates. column_name: The name of the column. Returns: A SQLAlchemy Label construct for the column. ''' pass def _filter_to_expressions(self, dto_filter: GraphQLComparison, override: Optional[ColumnElement[Any]]=None, not_null_check: bool=False) -> list[ColumnElement[bool]]: '''Converts a DTO filter to a list of SQLAlchemy expressions. Args: dto_filter: The DTO filter to convert. override: An optional column element to override the filter attribute. not_null_check: Whether to add a not-null check to the expressions. Returns: A list of SQLAlchemy boolean expressions. ''' pass def _gather_joins(self, tree: QueryNodeType, is_outer: bool=False) -> list[Join]: '''Gathers all joins needed for a query tree. Args: tree: The query tree to gather joins from. is_outer: Whether to create outer joins. Returns: A list of join objects for the query tree. ''' pass def _gather_conjonctions(self, query: Sequence[Union[Union[Filter, AggregationFilter], GraphQLComparison]], not_null_check: bool=False) -> Conjunction: '''Gathers all conjunctions from a sequence of filters. Args: query: A sequence of filters to gather conjunctions from. not_null_check: Whether to add not null checks to the expressions. Returns: A conjunction object containing the gathered expressions, joins and common join path. ''' pass def _conjonctions(self, query: Filter, allow_null: bool=False) -> Conjunction: '''Processes a filter's AND, OR, and NOT conditions into a conjunction. Args: query: The filter to process. allow_null: Whether to allow null values in the filter conditions. Returns: A conjunction object containing the processed expressions, joins and common join path. ''' pass def _aggregation_filter(self, aggregation: AggregationFilter) -> tuple[Optional[Join], list[ColumnElement[bool]]]: '''Creates a join and filter expressions for an aggregation filter. Args: aggregation: The aggregation filter to process. Returns: A tuple containing: - The join object if a new join is needed, None otherwise. - A list of boolean expressions for the filter. ''' pass def _upsert_aggregations(self, aggregation_node: QueryNodeType, existing_joins: list[Join]) -> tuple[list[NamedColumn[Any]], Optional[Join]]: '''Upserts aggregations. Args: aggregation_node: QueryNodeType existing_joins: list[_Join] Returns: tuple[list[NamedColumn[Any]], _Join | None]: ''' pass def _select_child(self, statement: Select[tuple[DeclarativeT]], node: QueryNodeType) -> tuple[Select[tuple[DeclarativeT]], _AbstractLoad]: '''Applies the load options to the statement for the given node. Load is applied based on whether it's a relation or not. If it's a relation, it calls itself recursively for each child node and applies the load options to the statement. Args: statement: The statement to be modified node: The node to apply the load options for query_hooks: The query hooks to use for the node and its children Returns: The modified statement with the load options applied ''' pass def _root_aggregation_functions(self, selection_tree: QueryNodeType) -> list[Label[Any]]: '''Build a list of root aggregations, given an QueryNodeType representing the selection tree. :param selection_tree: The selection tree to build root aggregations from :return: A list of Labels representing the root aggregations ''' pass def _join(self, node: QueryNodeType, is_outer: bool=False) -> Join: '''Creates a join object for a query node. Args: node: The query node to create a join for. is_outer: Whether to create an outer join. Returns: A join object containing the join information. ''' pass def _lateral_join(self, node: QueryNodeType, target_alias: AliasedClass[Any], query: Query, is_outer: bool) -> Join: '''Creates a LATERAL join for a given node. Args: node: The query node representing the relation to join. target_alias: The aliased class for the target of the join. query: The subquery definition for the lateral join. is_outer: Whether to perform an outer join. Returns: A Join object representing the lateral join. ''' pass def _cte_join(self, node: QueryNodeType, target_alias: AliasedClass[Any], query: Query, is_outer: bool) -> Join: '''Creates a CTE-based join for a given node. This is used when LATERAL joins are not supported by the database. Args: node: The query node representing the relation to join. target_alias: The aliased class for the target of the join. query: The subquery definition for the CTE. is_outer: Whether to perform an outer join. Returns: A Join object representing the CTE-based join. ''' pass def _aggregation_lateral_join(self, node: QueryNodeType, function_columns: Iterable[ColumnElement[Any]], alias: AliasedClass[Any]) -> AggregationJoin: '''Creates an aggregation join object for a query node. Args: node: The query node to create an aggregation join for. function_columns: The columns to include in the aggregation. alias: The alias to use for the joined table. Returns: An aggregation join object containing the join information. ''' pass def _aggregation_cte_join(self, node: QueryNodeType, alias: AliasedClass[Any], statement: Select[Any], cte_name: str) -> AggregationJoin: '''Creates an aggregation join using a Common Table Expression (CTE). This method is used for aggregations when LATERAL joins are not supported or not suitable. Args: node: The query node to create an aggregation join for. alias: The aliased class for the target of the aggregation. statement: The SQLAlchemy select statement for the aggregation. cte_name: The name to use for the CTE. Returns: An AggregationJoin object representing the CTE-based aggregation join. ''' pass def _where(self, query_filter: Filter, allow_null: bool=False) -> Where: '''Creates WHERE expressions and joins from a filter. Args: query_filter: The filter to create expressions from. allow_null: Whether to allow null values in the filter conditions. Returns: A tuple containing: - List of boolean expressions for the WHERE clause - List of joins needed for the expressions ''' pass def _order_by(self, order_by_nodes: list[QueryNodeType], existing_joins: list[Join]) -> OrderBy: '''Creates ORDER BY expressions and joins from a list of nodes. Args: order_by_nodes: The nodes to create order by expressions from. existing_joins: List of existing joins to check for reuse. Returns: A tuple containing: - List of unary expressions for the ORDER BY clause - List of new joins needed for the expressions ''' pass def _select_child(self, statement: Select[tuple[DeclarativeT]], node: QueryNodeType) -> tuple[Select[tuple[DeclarativeT]], _AbstractLoad]: '''Builds the main SELECT statement based on the selection tree. This method constructs the core SQLAlchemy SELECT statement, incorporating selected columns, aggregations, and eager loading options for relations defined in the `selection_tree`. Args: selection_tree: The query node tree representing the fields to select. Returns: A tuple containing: - The constructed SQLAlchemy Select statement. - A list of Join objects required for aggregations. ''' pass def _use_distinct_rank(self, query_graph: QueryGraph[DeclarativeT]) -> bool: '''Determines if DISTINCT ON should be implemented using RANK() window function. This is necessary when the database dialect does not natively support DISTINCT ON, or when specific ordering is required with DISTINCT ON. Args: query_graph: The query graph containing distinct_on and order_by information. Returns: True if RANK() should be used for distinct operation, False otherwise. ''' pass def _relation_order_by(self, query_graph: QueryGraph[DeclarativeT], query: Query) -> list[OrderBySpec]: '''Generates ORDER BY specifications for related entities in the query. Ensures consistent ordering for joined relations, especially when deterministic ordering is enabled or specific order nodes are defined for a join. Args: query_graph: The query graph containing selection and ordering information. query: The current Query object being built. Returns: A list of OrderBySpec tuples for related entities. ''' pass def _build_query(self, query_graph: QueryGraph[DeclarativeT], limit: Optional[int]=None, offset: Optional[int]=None, allow_null: bool=False) -> Query: '''Constructs the final SQLAlchemy Query object from a QueryGraph. This method orchestrates the assembly of the SELECT statement, WHERE clauses, ORDER BY clauses, LIMIT/OFFSET, DISTINCT ON logic, and all necessary JOINs (including subquery and aggregation joins). Args: query_graph: The graph representation of the query to build. limit: Optional limit for pagination. offset: Optional offset for pagination. allow_null: Whether to allow nulls in filter conditions. Returns: The fully constructed Query object. ''' pass def select_executor(self, selection_tree: Optional[QueryNodeType]=None, dto_filter: Optional[BooleanFilterDTO]=None, order_by: Optional[list[OrderByDTO]]=None, limit: Optional[int]=None, offset: Optional[int]=None, distinct_on: Optional[list[EnumDTO]]=None, allow_null: bool=False, executor_cls: type[QueryExecutorT]=SyncQueryExecutor, execution_options: Optional[dict[str, Any]]=None) -> QueryExecutorT: '''Creates a QueryExecutor that executes a SQLAlchemy query based on a selection tree. This method builds a QueryExecutor that can execute a SQLAlchemy query with various options like filtering, ordering, pagination, and aggregations. The query is built from a selection tree that defines which fields to select and how they relate to each other. Args: selection_tree: Tree structure defining fields to select and their relationships. If None, only ID fields are selected. dto_filter: Filter conditions to apply to the query. order_by: List of fields and directions to sort the results by. limit: Maximum number of results to return. offset: Number of results to skip before returning. distinct_on: Fields to apply DISTINCT ON to. allow_null: Whether to allow null values in filter conditions. executor_cls: Executor type to return. Defaults to SyncQueryExecutor. execution_options: Options for statement execution. Returns: A QueryExecutor instance that can execute the built query. Example: ```python # Create an executor that selects user data with filtering and ordering executor = transpiler.select_executor( selection_tree=user_fields_tree, dto_filter=BooleanFilterDTO(field="age", op="gt", value=18), order_by=[OrderByDTO(field="name", direction="ASC")], limit=10 ) results = await executor.execute() # If using an async executor ``` ''' pass def filter_expressions(self, dto_filter: BooleanFilterDTO) -> list[ColumnElement[bool]]: pass @override def __repr__(self) -> str: pass
29
25
32
3
20
9
4
0.44
1
36
24
0
26
8
26
26
867
107
526
207
455
234
342
159
315
10
1
3
99
327,972
gazorby/strawchemy
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/sqlalchemy/exceptions.py
strawchemy.sqlalchemy.exceptions.QueryHookError
class QueryHookError(Exception): """Raised when an error occurs within a query hook's execution."""
class QueryHookError(Exception): '''Raised when an error occurs within a query hook's execution.''' pass
1
1
0
0
0
0
0
1
1
0
0
0
0
0
0
10
2
0
1
1
0
1
1
1
0
0
3
0
0
327,973
gazorby/strawchemy
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/sqlalchemy/exceptions.py
strawchemy.sqlalchemy.exceptions.QueryResultError
class QueryResultError(Exception): """Raised when an error occurs during query result processing or mapping."""
class QueryResultError(Exception): '''Raised when an error occurs during query result processing or mapping.''' pass
1
1
0
0
0
0
0
1
1
0
0
0
0
0
0
10
2
0
1
1
0
1
1
1
0
0
3
0
0
327,974
gazorby/strawchemy
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/sqlalchemy/exceptions.py
strawchemy.sqlalchemy.exceptions.TranspilingError
class TranspilingError(Exception): """Raised when an error occurs during transpiling."""
class TranspilingError(Exception): '''Raised when an error occurs during transpiling.''' pass
1
1
0
0
0
0
0
1
1
0
0
0
0
0
0
10
2
0
1
1
0
1
1
1
0
0
3
0
0
327,975
gazorby/strawchemy
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/sqlalchemy/hook.py
strawchemy.sqlalchemy.hook.QueryHook
from sqlalchemy.orm import ColumnProperty, RelationshipProperty, joinedload, selectinload, undefer from contextvars import ContextVar from sqlalchemy.orm.util import AliasedClass from .exceptions import QueryHookError from .typing import DeclarativeT from sqlalchemy.orm.strategy_options import _AbstractLoad from typing import TYPE_CHECKING, Any, ClassVar, Generic, Literal, Optional from dataclasses import dataclass, field @dataclass class QueryHook(Generic[DeclarativeT]): """Base class for defining custom query modifications and data loading strategies. `QueryHook` instances are used to dynamically alter SQLAlchemy queries, primarily for specifying which columns and relationships should be eagerly loaded. This is often driven by the fields requested in a GraphQL query, made available via the `info` context variable. Attributes: load: A sequence defining which attributes (columns or relationships) to load. Relationships can be specified with nested load options. Example: `[User.name, (User.addresses, [Address.street])]` info_var: A class-level `ContextVar` to access the Strawberry `Info` object, providing context about the current GraphQL request. """ info_var: ClassVar[ContextVar[Optional[Info[Any, Any]]]] = ContextVar('info', default=None) load: Sequence[LoadType] = field(default_factory=list) _columns: list[InstrumentedAttribute[Any]] = field(init=False, default_factory=list) _relationships: list[tuple[InstrumentedAttribute[Any], Sequence[LoadType]]] = field(init=False, default_factory=list) def __post_init__(self) -> None: for attribute in self.load: is_mapping = isinstance(attribute, tuple) if not is_mapping: if isinstance(attribute.property, ColumnProperty): self._columns.append(attribute) if isinstance(attribute.property, RelationshipProperty): self._relationships.append((attribute, [])) continue self._relationships.append(attribute) self._check_relationship_load_spec(self._relationships) def _check_relationship_load_spec(self, load_spec: list[tuple[InstrumentedAttribute[Any], Sequence[LoadType]]]) -> None: """Recursively validates relationship load specifications. Ensures that the primary attribute in each part of a relationship load specification is indeed a SQLAlchemy `RelationshipProperty`. Args: load_spec: The relationship load specification to validate, typically `self._relationships` or a nested part of it. Raises: QueryHookError: If an attribute intended to specify a relationship is not a `RelationshipProperty`. """ for key, attributes in load_spec: for attribute in attributes: if isinstance(attribute, list): self._check_relationship_load_spec(attribute) if not isinstance(key.property, RelationshipProperty): msg = f'Keys of mappings passed in `load` param must be relationship attributes: {key}' raise QueryHookError(msg) def _load_relationships(self, load_spec: RelationshipLoadSpec, parent_alias: Optional[AliasedClass[Any]]=None) -> _AbstractLoad: """Constructs SQLAlchemy loader options for a relationship. Generates `joinedload` or `selectinload` options based on the `load_spec`. It supports loading specific columns of the related model (`load_only`) and applying further nested loader options. Args: load_spec: A tuple containing the relationship attribute and a sequence of attributes or nested relationships to load for it. parent_alias: The aliased class of the parent entity. If `None`, `joinedload` is used for the relationship. Otherwise, `selectinload` is used from the `parent_alias`. Returns: A SQLAlchemy `_AbstractLoad` object representing the loader strategy. """ relationship, attributes = load_spec alias_relationship = getattr(parent_alias, relationship.key) if parent_alias else relationship load = joinedload(alias_relationship) if parent_alias is None else selectinload(alias_relationship) columns = [] children_loads: list[_AbstractLoad] = [] for attribute in attributes: if isinstance(attribute, tuple): children_loads.append(self._load_relationships(attribute)) else: columns.append(attribute) if columns: load = load.load_only(*columns) if children_loads: load = load.options(*children_loads) return load @property def info(self) -> Info[Any, Any]: """Provides access to the Strawberry GraphQL Info object. Retrieves the `Info` object from the `info_var` context variable. This object contains details about the current GraphQL request, which can be used to tailor the query. Returns: The Strawberry `Info` object. Raises: QueryHookError: If the `Info` object is not set in the context. """ if (info := self.info_var.get()): return info msg = 'info context is not available' raise QueryHookError(msg) def load_relationships(self, alias: AliasedClass[Any]) -> list[_AbstractLoad]: """Generates loader options for all configured relationships. Iterates over `self._relationships` and calls `_load_relationships` for each to create the appropriate SQLAlchemy loader options. Args: alias: The `AliasedClass` representing the entity to which these relationships are attached and should be loaded from. Returns: A list of SQLAlchemy `_AbstractLoad` objects. """ return [self._load_relationships(load_spec, alias) for load_spec in self._relationships] def load_columns(self, statement: Select[tuple[DeclarativeT]], alias: AliasedClass[Any], mode: ColumnLoadingMode) -> tuple[Select[tuple[DeclarativeT]], list[_AbstractLoad]]: """Applies column loading strategies to the SELECT statement. Modifies the given SQLAlchemy `Select` statement to ensure specified columns (from `self._columns`) are loaded. If `mode` is "undefer", it generates `undefer` options for the columns. If `mode` is "add", it adds the columns directly to the statement's selected entities. Args: statement: The SQLAlchemy `Select` statement to modify. alias: The `AliasedClass` of the entity from which columns are loaded. mode: The column loading mode, either "undefer" or "add". Returns: A tuple containing the potentially modified `Select` statement and a list of SQLAlchemy `_AbstractLoad` options (e.g., `undefer` options). """ load_options: list[_AbstractLoad] = [] for column in self._columns: alias_attribute = getattr(alias, column.key) if mode == 'undefer': load_options.append(undefer(alias_attribute)) else: statement = statement.add_columns(alias_attribute) return (statement, load_options) def apply_hook(self, statement: Select[tuple[DeclarativeT]], alias: AliasedClass[DeclarativeT]) -> Select[tuple[DeclarativeT]]: """Applies custom modifications to the SELECT statement. This method is intended to be overridden by subclasses to implement specific query alteration logic beyond column and relationship loading, such as adding filters, joins, or other clauses. By default, this base implementation returns the statement unchanged. Args: statement: The SQLAlchemy `Select` statement to modify. alias: The `AliasedClass` for the primary entity of the query. Returns: The (potentially) modified `Select` statement. """ return statement
@dataclass class QueryHook(Generic[DeclarativeT]): '''Base class for defining custom query modifications and data loading strategies. `QueryHook` instances are used to dynamically alter SQLAlchemy queries, primarily for specifying which columns and relationships should be eagerly loaded. This is often driven by the fields requested in a GraphQL query, made available via the `info` context variable. Attributes: load: A sequence defining which attributes (columns or relationships) to load. Relationships can be specified with nested load options. Example: `[User.name, (User.addresses, [Address.street])]` info_var: A class-level `ContextVar` to access the Strawberry `Info` object, providing context about the current GraphQL request. ''' def __post_init__(self) -> None: pass def _check_relationship_load_spec(self, load_spec: list[tuple[InstrumentedAttribute[Any], Sequence[LoadType]]]) -> None: '''Recursively validates relationship load specifications. Ensures that the primary attribute in each part of a relationship load specification is indeed a SQLAlchemy `RelationshipProperty`. Args: load_spec: The relationship load specification to validate, typically `self._relationships` or a nested part of it. Raises: QueryHookError: If an attribute intended to specify a relationship is not a `RelationshipProperty`. ''' pass def _load_relationships(self, load_spec: RelationshipLoadSpec, parent_alias: Optional[AliasedClass[Any]]=None) -> _AbstractLoad: '''Constructs SQLAlchemy loader options for a relationship. Generates `joinedload` or `selectinload` options based on the `load_spec`. It supports loading specific columns of the related model (`load_only`) and applying further nested loader options. Args: load_spec: A tuple containing the relationship attribute and a sequence of attributes or nested relationships to load for it. parent_alias: The aliased class of the parent entity. If `None`, `joinedload` is used for the relationship. Otherwise, `selectinload` is used from the `parent_alias`. Returns: A SQLAlchemy `_AbstractLoad` object representing the loader strategy. ''' pass @property def info(self) -> Info[Any, Any]: '''Provides access to the Strawberry GraphQL Info object. Retrieves the `Info` object from the `info_var` context variable. This object contains details about the current GraphQL request, which can be used to tailor the query. Returns: The Strawberry `Info` object. Raises: QueryHookError: If the `Info` object is not set in the context. ''' pass def load_relationships(self, alias: AliasedClass[Any]) -> list[_AbstractLoad]: '''Generates loader options for all configured relationships. Iterates over `self._relationships` and calls `_load_relationships` for each to create the appropriate SQLAlchemy loader options. Args: alias: The `AliasedClass` representing the entity to which these relationships are attached and should be loaded from. Returns: A list of SQLAlchemy `_AbstractLoad` objects. ''' pass def load_columns(self, statement: Select[tuple[DeclarativeT]], alias: AliasedClass[Any], mode: ColumnLoadingMode) -> tuple[Select[tuple[DeclarativeT]], list[_AbstractLoad]]: '''Applies column loading strategies to the SELECT statement. Modifies the given SQLAlchemy `Select` statement to ensure specified columns (from `self._columns`) are loaded. If `mode` is "undefer", it generates `undefer` options for the columns. If `mode` is "add", it adds the columns directly to the statement's selected entities. Args: statement: The SQLAlchemy `Select` statement to modify. alias: The `AliasedClass` of the entity from which columns are loaded. mode: The column loading mode, either "undefer" or "add". Returns: A tuple containing the potentially modified `Select` statement and a list of SQLAlchemy `_AbstractLoad` options (e.g., `undefer` options). ''' pass def apply_hook(self, statement: Select[tuple[DeclarativeT]], alias: AliasedClass[DeclarativeT]) -> Select[tuple[DeclarativeT]]: '''Applies custom modifications to the SELECT statement. This method is intended to be overridden by subclasses to implement specific query alteration logic beyond column and relationship loading, such as adding filters, joins, or other clauses. By default, this base implementation returns the statement unchanged. Args: statement: The SQLAlchemy `Select` statement to modify. alias: The `AliasedClass` for the primary entity of the query. Returns: The (potentially) modified `Select` statement. ''' pass
10
7
21
3
9
9
3
1.13
1
5
1
6
7
0
7
7
178
31
69
37
52
78
56
27
48
7
1
3
24
327,976
gazorby/strawchemy
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/sqlalchemy/inspector.py
strawchemy.sqlalchemy.inspector.SQLAlchemyGraphQLInspector
from sqlalchemy.orm import NO_VALUE, DeclarativeBase, QueryableAttribute, registry from typing import TYPE_CHECKING, Any, Dict, Optional, TypeVar from strawchemy.strawberry.filters.inputs import make_full_json_comparison_input, make_sqlite_json_comparison_input from strawchemy.constants import GEO_INSTALLED from strawchemy.config.databases import DatabaseFeatures from sqlalchemy.types import ARRAY from strawchemy.dto.inspectors.sqlalchemy import SQLAlchemyInspector from strawchemy.strawberry.filters import ArrayComparison, DateComparison, DateTimeComparison, EqualityComparison, GraphQLComparison, OrderComparison, TextComparison, TimeComparison, TimeDeltaComparison class SQLAlchemyGraphQLInspector(SQLAlchemyInspector): """Inspects SQLAlchemy models to determine appropriate GraphQL filter types. This inspector extends `SQLAlchemyInspector` to provide mappings from SQLAlchemy model attributes and Python types to specific GraphQL comparison filter input types (e.g., `TextComparison`, `OrderComparison`). It takes into account the database dialect's features (via `DatabaseFeatures`) to select suitable filters, for example, for JSON or geospatial types. Custom filter mappings can also be provided through `filter_overrides`. Key methods `get_field_comparison` and `get_type_comparison` are used to retrieve the corresponding filter types. """ def __init__(self, dialect: SupportedDialect, registries: Optional[list[registry]]=None, filter_overrides: Optional[FilterMap]=None) -> None: """Initializes the SQLAlchemyGraphQLInspector. Args: dialect: The SQL dialect of the target database (e.g., "postgresql", "sqlite"). registries: An optional list of SQLAlchemy registries to inspect. If None, the default registry is used. filter_overrides: An optional mapping to override or extend the default Python type to GraphQL filter type mappings. """ super().__init__(registries) self.db_features = DatabaseFeatures.new(dialect) self.filters_map = self._filter_map() self.filters_map |= filter_overrides or {} def _filter_map(self) -> FilterMap: """Constructs the map of Python types to GraphQL filter comparison types. Starts with a default set of filters (`_DEFAULT_FILTERS_MAP`). If GeoAlchemy is installed (`GEO_INSTALLED`), it adds mappings for geospatial types to `GeoComparison`. It then adds mappings for `dict` to appropriate JSON comparison types based on whether the dialect is SQLite or another database that supports more advanced JSON operations. Returns: The constructed `FilterMap`. """ filters_map = _DEFAULT_FILTERS_MAP if GEO_INSTALLED: from geoalchemy2 import WKBElement, WKTElement from shapely import Geometry from strawchemy.strawberry.filters.geo import GeoComparison filters_map |= {(Geometry, WKBElement, WKTElement): GeoComparison} if self.db_features.dialect == 'sqlite': filters_map[dict, Dict] = make_sqlite_json_comparison_input() else: filters_map[dict, Dict] = make_full_json_comparison_input() return filters_map @classmethod def _is_specialized(cls, type_: type[Any]) -> bool: """Checks if a generic type is fully specialized. A type is considered specialized if it has no type parameters (`__parameters__`) or if all its type parameters are concrete types (not `TypeVar`). Args: type_: The type to check. Returns: True if the type is specialized, False otherwise. """ return not hasattr(type_, '__parameters__') or all((not isinstance(param, TypeVar) for param in type_.__parameters__)) @classmethod def _filter_type(cls, type_: type[Any], sqlalchemy_filter: type[GraphQLComparison]) -> type[GraphQLComparison]: """Potentially specializes a generic GraphQL filter type with a Python type. If the provided `sqlalchemy_filter` is a generic type (e.g., `OrderComparison[T]`) and is not yet specialized, this method specializes it using `type_` (e.g., `OrderComparison[int]`). If `sqlalchemy_filter` is already specialized or not generic, it's returned as is. Args: type_: The Python type to use for specialization if needed. sqlalchemy_filter: The GraphQL filter type, which might be generic. Returns: The (potentially specialized) GraphQL filter type. """ return sqlalchemy_filter if cls._is_specialized(sqlalchemy_filter) else sqlalchemy_filter[type_] def get_field_comparison(self, field_definition: DTOFieldDefinition[DeclarativeBase, QueryableAttribute[Any]]) -> type[GraphQLComparison]: """Determines the GraphQL comparison filter type for a DTO field. This method inspects the type of the given DTO field. For `ARRAY` types on PostgreSQL, it returns a specialized `ArrayComparison`. Otherwise, it delegates to `get_type_comparison` using the Python type of the model field. Args: field_definition: The DTO field definition, which contains information about the model attribute and its type. Returns: The GraphQL comparison filter type suitable for the field. """ field_type = field_definition.model_field.type if isinstance(field_type, ARRAY) and self.db_features.dialect == 'postgresql': return ArrayComparison[field_type.item_type.python_type] return self.get_type_comparison(self.model_field_type(field_definition)) def get_type_comparison(self, type_: type[Any]) -> type[GraphQLComparison]: """Determines the GraphQL comparison filter type for a Python type. It iterates through the `self.filters_map` (which includes default and dialect-specific filters) to find a filter type that matches the provided Python `type_`. If a direct match or a superclass match is found, the corresponding filter type is returned, potentially specialized using `_filter_type`. If no specific filter is found in the map, it defaults to `EqualityComparison` specialized with the given `type_`. Args: type_: The Python type for which to find a GraphQL filter. Returns: The GraphQL comparison filter type suitable for the Python type. """ for types, sqlalchemy_filter in self.filters_map.items(): if issubclass(type_, types): return self._filter_type(type_, sqlalchemy_filter) return EqualityComparison[type_]
class SQLAlchemyGraphQLInspector(SQLAlchemyInspector): '''Inspects SQLAlchemy models to determine appropriate GraphQL filter types. This inspector extends `SQLAlchemyInspector` to provide mappings from SQLAlchemy model attributes and Python types to specific GraphQL comparison filter input types (e.g., `TextComparison`, `OrderComparison`). It takes into account the database dialect's features (via `DatabaseFeatures`) to select suitable filters, for example, for JSON or geospatial types. Custom filter mappings can also be provided through `filter_overrides`. Key methods `get_field_comparison` and `get_type_comparison` are used to retrieve the corresponding filter types. ''' def __init__(self, dialect: SupportedDialect, registries: Optional[list[registry]]=None, filter_overrides: Optional[FilterMap]=None) -> None: '''Initializes the SQLAlchemyGraphQLInspector. Args: dialect: The SQL dialect of the target database (e.g., "postgresql", "sqlite"). registries: An optional list of SQLAlchemy registries to inspect. If None, the default registry is used. filter_overrides: An optional mapping to override or extend the default Python type to GraphQL filter type mappings. ''' pass def _filter_map(self) -> FilterMap: '''Constructs the map of Python types to GraphQL filter comparison types. Starts with a default set of filters (`_DEFAULT_FILTERS_MAP`). If GeoAlchemy is installed (`GEO_INSTALLED`), it adds mappings for geospatial types to `GeoComparison`. It then adds mappings for `dict` to appropriate JSON comparison types based on whether the dialect is SQLite or another database that supports more advanced JSON operations. Returns: The constructed `FilterMap`. ''' pass @classmethod def _is_specialized(cls, type_: type[Any]) -> bool: '''Checks if a generic type is fully specialized. A type is considered specialized if it has no type parameters (`__parameters__`) or if all its type parameters are concrete types (not `TypeVar`). Args: type_: The type to check. Returns: True if the type is specialized, False otherwise. ''' pass @classmethod def _filter_type(cls, type_: type[Any], sqlalchemy_filter: type[GraphQLComparison]) -> type[GraphQLComparison]: '''Potentially specializes a generic GraphQL filter type with a Python type. If the provided `sqlalchemy_filter` is a generic type (e.g., `OrderComparison[T]`) and is not yet specialized, this method specializes it using `type_` (e.g., `OrderComparison[int]`). If `sqlalchemy_filter` is already specialized or not generic, it's returned as is. Args: type_: The Python type to use for specialization if needed. sqlalchemy_filter: The GraphQL filter type, which might be generic. Returns: The (potentially specialized) GraphQL filter type. ''' pass def get_field_comparison(self, field_definition: DTOFieldDefinition[DeclarativeBase, QueryableAttribute[Any]]) -> type[GraphQLComparison]: '''Determines the GraphQL comparison filter type for a DTO field. This method inspects the type of the given DTO field. For `ARRAY` types on PostgreSQL, it returns a specialized `ArrayComparison`. Otherwise, it delegates to `get_type_comparison` using the Python type of the model field. Args: field_definition: The DTO field definition, which contains information about the model attribute and its type. Returns: The GraphQL comparison filter type suitable for the field. ''' pass def get_type_comparison(self, type_: type[Any]) -> type[GraphQLComparison]: '''Determines the GraphQL comparison filter type for a Python type. It iterates through the `self.filters_map` (which includes default and dialect-specific filters) to find a filter type that matches the provided Python `type_`. If a direct match or a superclass match is found, the corresponding filter type is returned, potentially specialized using `_filter_type`. If no specific filter is found in the map, it defaults to `EqualityComparison` specialized with the given `type_`. Args: type_: The Python type for which to find a GraphQL filter. Returns: The GraphQL comparison filter type suitable for the Python type. ''' pass
9
7
20
3
7
11
2
1.72
1
12
6
0
4
2
6
74
141
27
43
24
24
74
31
15
21
3
7
2
12
327,977
gazorby/strawchemy
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/sqlalchemy/repository/_async.py
strawchemy.sqlalchemy.repository._async.SQLAlchemyGraphQLAsyncRepository
from strawchemy.sqlalchemy._executor import AsyncQueryExecutor, QueryResult from strawchemy.strawberry.mutation.input import UpsertData from ._base import InsertData, MutationData, SQLAlchemyGraphQLRepository from sqlalchemy.orm import RelationshipProperty from sqlalchemy import ColumnElement, Row, and_, delete, inspect, select, update from strawchemy.sqlalchemy._transpiler import QueryTranspiler from strawchemy.strawberry.mutation.types import RelationType from typing import TYPE_CHECKING, Any, NamedTuple, Optional, TypeVar from collections import defaultdict, namedtuple from strawchemy.sqlalchemy.typing import AnyAsyncSession, DeclarativeT class SQLAlchemyGraphQLAsyncRepository(SQLAlchemyGraphQLRepository[DeclarativeT, AnyAsyncSession]): async def _insert_many(self, data: InsertData) -> Sequence[Row[Any]]: if self._dialect.insert_executemany_returning_sort_by_parameter_order and (not (self._dialect.name == 'postgresql' and data.is_upsert)): results = await self.session.execute(self._insert_statement(data).returning(*data.model_type.__mapper__.primary_key, sort_by_parameter_order=True), data.values) return results.all() rows: Sequence[Row[Any]] = [] conn = await self.session.connection() for value in data.values: cursor = await conn.execute(self._insert_statement(data).values(**value)) assert cursor.inserted_primary_key is not None rows.append(cursor.inserted_primary_key) return rows async def _insert_nested(self, data: InsertData, level: LevelInput) -> None: """Inserts multiple records for a given model type and updates related instances. This internal method performs a bulk insert operation for the specified SQLAlchemy model type using the provided values. After insertion, it retrieves the primary keys of the newly created records and updates the corresponding instance objects within the provided `level` input with these keys. It also handles updating foreign keys for to-one relationships where applicable. Args: data: An InsertData object containing the model type, values to insert, and optional upsert configuration for handling conflicts. level: The input level containing information about the instances being created and their relationships, used to update instances with generated primary and foreign keys. """ instance_ids: Sequence[Row[Any]] = await self._insert_many(data) pk_names = [pk.name for pk in data.model_type.__mapper__.primary_key] pk_index, fk_index = (0, 0) for relation_input in level.inputs: if not isinstance(relation_input.instance, data.model_type): continue for column in data.model_type.__mapper__.primary_key: setattr(relation_input.instance, column.key, instance_ids[pk_index][pk_names.index(column.key)]) pk_index += 1 if relation_input.relation.relation_type is RelationType.TO_MANY: continue prop = relation_input.relation.attribute assert isinstance(prop, RelationshipProperty) assert prop.local_remote_pairs for local, remote in prop.local_remote_pairs: assert local.key assert remote.key setattr(relation_input.relation.parent, local.key, instance_ids[fk_index][pk_names.index(remote.key)]) fk_index += 1 async def _delete_where(self, alias: AliasedClass[Any], where: Optional[list[ColumnElement[bool]]]=None, execution_options: Optional[dict[str, Any]]=None) -> Sequence[Row[Any]]: alias_insp = inspect(alias) model_pks = [getattr(alias, pk.key) for pk in alias_insp.mapper.primary_key] if self._dialect.delete_returning: statement = delete(alias_insp).returning(*model_pks) if where: statement = statement.where(*where) result = await self.session.execute(statement, execution_options=execution_options or {}) return result.all() affected_statement, delete_statement = (select(*model_pks), delete(alias_insp)) if where: affected_statement, delete_statement = (affected_statement.where(*where), delete_statement.where(*where)) affected_rows = (await self.session.execute(affected_statement)).all() conn = await self.session.connection() await conn.execute(delete_statement, execution_options=execution_options or {}) return affected_rows async def _update_where(self, alias: AliasedClass[Any], values: dict[str, Any], where: Optional[list[ColumnElement[bool]]]=None, execution_options: Optional[dict[str, Any]]=None) -> Sequence[Row[Any]]: alias_insp = inspect(alias) model_pks = [getattr(alias, pk.key) for pk in alias_insp.mapper.primary_key] if self._dialect.update_returning: statement = update(alias_insp).values(**values).returning(*model_pks) if where: statement = statement.where(*where) result = await self.session.execute(statement, execution_options=execution_options or {}) return result.all() affected_statement, update_statement = (select(*model_pks), update(alias_insp).values(**values)) if where: affected_statement, update_statement = (affected_statement.where(*where), update_statement.where(*where)) affected_rows = (await self.session.execute(affected_statement)).all() conn = await self.session.connection() await conn.execute(update_statement, execution_options=execution_options or {}) return affected_rows async def _create_nested_to_one_relations(self, data: Input[DeclarativeT]) -> None: """Creates nested related objects for to-one relationships. Iterates through the input data levels filtered for 'create' operations on to-one relationships. It groups the instances to be created by their model type and then calls `_insert` for each type to perform bulk insertions. Args: data: The processed input data containing nested structures and relationship information. """ for level in data.filter_by_level(RelationType.TO_ONE, ['create', 'upsert']): insert_params: defaultdict[type[DeclarativeBase], list[dict[str, Any]]] = defaultdict(list) upsert_data_map: dict[type[DeclarativeBase], UpsertData] = {} for create_input in level.inputs: insert_params[create_input.relation.related].append(self._to_dict(create_input.instance)) if create_input.relation.upsert is not None: upsert_data_map[create_input.relation.related] = create_input.relation.upsert for model_type, values in insert_params.items(): await self._insert_nested(InsertData(model_type, values, upsert_data_map.get(model_type)), level) async def _update_to_many_relations(self, data: Input[DeclarativeT], created_ids: Sequence[_RowLike]) -> None: """Updates foreign keys to connect existing related objects for to-many relationships. Iterates through the input data levels filtered for 'set' operations on to-many relationships. For each relationship, it prepares bulk update statements to set the foreign keys on the related models, linking them to the parent objects (either newly created or existing). Args: data: The processed input data containing relationship information. created_ids: A sequence of RowLike objects containing the primary keys of the main objects created or updated in the parent operation. Used to link the 'set' relations to the correct parent. """ for level in data.filter_by_level(RelationType.TO_MANY, ['add', 'remove']): update_params: defaultdict[type[DeclarativeBase], list[dict[str, Any]]] = defaultdict(list) for level_input in level.inputs: relation = level_input.relation prop = relation.attribute assert prop.local_remote_pairs parent = created_ids[relation.input_index] if relation.level == 1 else relation.parent update_params[relation.related].extend([{column.key: getattr(relation_model, column.key) for column in relation_model.__mapper__.primary_key} | {remote.key: getattr(parent, local.key) for local, remote in prop.local_remote_pairs if local.key and remote.key} for relation_model in relation.add]) update_params[relation.related].extend([{column.key: getattr(relation_model, column.key) for column in relation_model.__mapper__.primary_key} | {remote.key: None for local, remote in prop.local_remote_pairs if local.key and remote.key} for relation_model in relation.remove]) for model_type, values in update_params.items(): await self.session.execute(update(model_type), values) async def _set_to_many_relations(self, mode: InsertOrUpdate, data: Input[DeclarativeT], created_ids: Sequence[_RowLike]) -> None: for level in data.filter_by_level(RelationType.TO_MANY, ['set']): remove_old_ids: defaultdict[type[DeclarativeBase], defaultdict[str, list[Any]]] = defaultdict(lambda: defaultdict(list)) set_params: defaultdict[type[DeclarativeBase], list[dict[str, Any]]] = defaultdict(list) for level_input in level.inputs: relation = level_input.relation prop = relation.attribute assert prop.local_remote_pairs parent = created_ids[relation.input_index] if relation.level == 1 else relation.parent if relation.level == 1 and mode in {'update_by_pks', 'update_where'}: for local, remote in prop.local_remote_pairs: remove_old_ids[relation.related][remote.key].append(getattr(parent, local.key)) for relation_model in relation.set or []: set_params[relation.related].append({column.key: getattr(relation_model, column.key) for column in relation_model.__mapper__.primary_key} | {remote.key: getattr(parent, local.key) for local, remote in prop.local_remote_pairs if local.key and remote.key}) for model_type, set_values in set_params.items(): if (current_ids := remove_old_ids[model_type]): remove_previous_stmt = update(model_type).where(and_(*[model_type.__mapper__.attrs[key].class_attribute.in_(ids) for key, ids in current_ids.items()])) await self.session.execute(remove_previous_stmt, dict.fromkeys(current_ids)) await self.session.execute(update(model_type), set_values) async def _create_to_many_relations(self, data: Input[DeclarativeT], created_ids: Sequence[_RowLike]) -> None: """Creates and connects new related objects for to-many relationships. Iterates through the input data levels filtered for 'create' operations on to-many relationships. It prepares the data for the new related objects, including setting the foreign keys based on the parent object's primary key, and then calls `_insert` to perform bulk insertions. Args: data: The processed input data containing nested structures and relationship information. created_ids: A sequence of RowLike objects containing the primary keys of the main objects created in the parent operation. Used to set foreign keys on the newly created related objects. """ for level in data.filter_by_level(RelationType.TO_MANY, ['create', 'upsert']): insert_params: defaultdict[type[DeclarativeBase], list[dict[str, Any]]] = defaultdict(list) upsert_data_map: dict[type[DeclarativeBase], UpsertData] = {} for create_input in level.inputs: relation = create_input.relation prop = relation.attribute assert prop.local_remote_pairs parent = created_ids[relation.input_index] if relation.level == 1 else relation.parent fks = {remote.key: getattr(parent, local.key) for local, remote in prop.local_remote_pairs if local.key and remote.key} insert_params[relation.related].append(self._to_dict(create_input.instance) | fks) if create_input.relation.upsert is not None: upsert_data_map[create_input.relation.related] = create_input.relation.upsert for model_type, values in insert_params.items(): await self._insert_nested(InsertData(model_type, values, upsert_data_map.get(model_type)), level) async def _execute_insert_or_update(self, data: MutationData[DeclarativeT]) -> Sequence[_RowLike]: values = [self._to_dict(instance) for instance in data.input.instances] if data.mode == 'insert': return await self._insert_many(InsertData(self.model, values)) if data.mode == 'upsert': return await self._insert_many(InsertData(self.model, values, UpsertData(update_fields=data.upsert_update_fields or [], conflict_constraint=data.upsert_conflict_fields))) pks = [column.key for column in self.model.__mapper__.primary_key] pk_tuple = namedtuple('AsRow', pks) if data.mode == 'update_by_pks': await self.session.execute(update(self.model), values) return [pk_tuple(*[instance[name] for name in pks]) for instance in values] transpiler = QueryTranspiler(self.model, self._dialect, statement=self.statement) where_expressions = transpiler.filter_expressions(data.dto_filter) if data.dto_filter else None return await self._update_where(transpiler.scope.root_alias, values[0], where_expressions) async def _mutate(self, data: MutationData[DeclarativeT]) -> Sequence[_RowLike]: self._connect_to_one_relations(data.input) data.input.add_non_input_relations() async with self.session.begin_nested() as transaction: await self._create_nested_to_one_relations(data.input) instance_ids = await self._execute_insert_or_update(data) await self._create_to_many_relations(data.input, instance_ids) await self._update_to_many_relations(data.input, instance_ids) await self._set_to_many_relations(data.mode, data.input, instance_ids) await transaction.commit() return instance_ids async def _list_by_ids(self, id_rows: Sequence[_RowLike], selection: Optional[QueryNodeType]=None) -> QueryResult[DeclarativeT]: """Retrieves multiple records by their primary keys with optional selection. Fetches records from the repository's main model that match the provided primary key combinations. Allows specifying a GraphQL selection Args: id_rows: A sequence of RowLike objects, each containing the primary key values for one record to retrieve. selection: An optional QueryNodeType representing the GraphQL selection set to apply to the query. Returns: A QueryResult containing the list of fetched records matching the provided IDs, structured according to the selection. """ executor = self._get_query_executor(AsyncQueryExecutor, selection=selection) id_fields = executor.scope.id_field_definitions(self.model) executor.base_statement = executor.base_statement.where(*[field.model_field.in_([getattr(row, field.model_field_name) for row in id_rows]) for field in id_fields]) return await executor.list(self.session) async def list(self, selection: Optional[QueryNodeType]=None, dto_filter: Optional[BooleanFilterDTO]=None, order_by: Optional[list[OrderByDTO]]=None, limit: Optional[int]=None, offset: Optional[int]=None, distinct_on: Optional[list[EnumDTO]]=None, allow_null: bool=False, query_hooks: Optional[defaultdict[QueryNodeType, list[QueryHook[DeclarativeBase]]]]=None, execution_options: Optional[dict[str, Any]]=None, **kwargs: Any) -> QueryResult[DeclarativeT]: """Retrieves a list of records based on filtering, ordering, and pagination. Fetches records from the repository's main model, applying optional filtering, ordering, pagination (limit/offset), and distinct constraints. Supports GraphQL selection sets for optimized data retrieval and query hooks for customization. Args: selection: An optional QueryNodeType representing the GraphQL selection set. dto_filter: An optional filter object derived from GraphQL input. order_by: An optional list of ordering criteria. limit: An optional integer limiting the number of results. offset: An optional integer specifying the starting point for results. distinct_on: An optional list of fields for DISTINCT ON clause (if supported). allow_null: If True, allows certain operations even if parts of the filter path are null (implementation specific to executor). query_hooks: Optional hooks to modify the query at different stages. execution_options: Optional dictionary of execution options passed to SQLAlchemy. **kwargs: Additional keyword arguments (currently unused but allows extension). Returns: A QueryResult containing the list of fetched records and potentially pagination info or total count, structured according to the selection. """ executor = self._get_query_executor(executor_type=AsyncQueryExecutor, selection=selection, dto_filter=dto_filter, order_by=order_by, limit=limit, offset=offset, distinct_on=distinct_on, allow_null=allow_null, query_hooks=query_hooks, execution_options=execution_options) return await executor.list(self.session) async def get_one(self, selection: Optional[QueryNodeType]=None, dto_filter: Optional[BooleanFilterDTO]=None, order_by: Optional[list[OrderByDTO]]=None, limit: Optional[int]=None, offset: Optional[int]=None, distinct_on: Optional[list[EnumDTO]]=None, allow_null: bool=False, query_hooks: Optional[defaultdict[QueryNodeType, list[QueryHook[DeclarativeBase]]]]=None, execution_options: Optional[dict[str, Any]]=None, **kwargs: Any) -> QueryResult[DeclarativeT]: """Retrieves a single record based on filtering and ordering criteria. Fetches a single record matching the provided filters. If multiple records match, ordering, limit, and offset can be used to pinpoint one. Returns None if no record matches. Supports GraphQL selection sets and query hooks. Args: selection: An optional QueryNodeType representing the GraphQL selection set. dto_filter: An optional filter object derived from GraphQL input. order_by: An optional list of ordering criteria. limit: An optional integer limiting the number of potential matches considered (usually 1 for get_one). offset: An optional integer specifying the starting point. distinct_on: An optional list of fields for DISTINCT ON clause. allow_null: If True, allows certain operations even if parts of the filter path are null. query_hooks: Optional hooks to modify the query. execution_options: Optional dictionary of execution options. **kwargs: Additional keyword arguments passed to the query executor setup. Returns: A QueryResult containing the single fetched record or None, structured according to the selection. """ executor = self._get_query_executor(executor_type=AsyncQueryExecutor, selection=selection, dto_filter=dto_filter, order_by=order_by, limit=limit, offset=offset, distinct_on=distinct_on, allow_null=allow_null, query_hooks=query_hooks, execution_options=execution_options, **kwargs) return await executor.get_one_or_none(self.session) async def get_by_id(self, selection: Optional[QueryNodeType]=None, query_hooks: Optional[defaultdict[QueryNodeType, list[QueryHook[DeclarativeBase]]]]=None, execution_options: Optional[dict[str, Any]]=None, **kwargs: Any) -> QueryResult[DeclarativeT]: """Retrieves a single record by its primary key(s). Fetches a single record matching the provided primary key values passed as keyword arguments. Returns None if no record matches. Supports GraphQL selection sets and query hooks. Args: selection: An optional QueryNodeType representing the GraphQL selection set. query_hooks: Optional hooks to modify the query. execution_options: Optional dictionary of execution options. **kwargs: Keyword arguments where keys are the primary key field names and values are the corresponding primary key values. Returns: A QueryResult containing the single fetched record or None, structured according to the selection. """ executor = self._get_query_executor(AsyncQueryExecutor, selection=selection, query_hooks=query_hooks, execution_options=execution_options) executor.base_statement = executor.base_statement.where(*[field_def.model_field == kwargs.pop(field_def.name) for field_def in executor.scope.id_field_definitions(self.model)]) return await executor.get_one_or_none(self.session) async def create(self, data: Input[DeclarativeT], selection: Optional[QueryNodeType]=None) -> QueryResult[DeclarativeT]: """Creates one or more records with nested relationships and returns them. Takes processed input data, performs the creation using `_create_many`, and then fetches the newly created records using `_list_by_ids` based on the returned primary keys and the provided selection set. Args: data: The processed input data for creation. selection: An optional QueryNodeType representing the GraphQL selection set for the returned data. Returns: A QueryResult containing the newly created records, structured according to the selection. """ created_ids = await self._mutate(MutationData('insert', data)) return await self._list_by_ids(created_ids, selection) async def upsert(self, data: Input[DeclarativeT], selection: Optional[QueryNodeType]=None, update_fields: Optional[list[EnumDTO]]=None, conflict_fields: Optional[EnumDTO]=None, dto_filter: Optional[BooleanFilterDTO]=None) -> QueryResult[DeclarativeT]: created_ids = await self._mutate(MutationData('upsert', data, dto_filter=dto_filter, upsert_update_fields=update_fields, upsert_conflict_fields=conflict_fields)) return await self._list_by_ids(created_ids, selection) async def update_by_ids(self, data: Input[DeclarativeT], selection: Optional[QueryNodeType]=None) -> QueryResult[DeclarativeT]: """Updates one or more records with nested relationships and returns them. Takes processed input data, performs the update using `_update_many`, and then fetches the updated records using `_list_by_ids` based on the returned primary keys and the provided selection set. Args: data: The processed input data for update. Must include primary keys. selection: An optional QueryNodeType representing the GraphQL selection set for the returned data. Returns: A QueryResult containing the updated records, structured according to the selection. """ updated_ids = await self._mutate(MutationData('update_by_pks', data)) return await self._list_by_ids(updated_ids, selection) async def update_by_filter(self, data: Input[DeclarativeT], dto_filter: BooleanFilterDTO, selection: Optional[QueryNodeType]=None) -> QueryResult[DeclarativeT]: updated_ids = await self._mutate(MutationData('update_where', data, dto_filter)) return await self._list_by_ids(updated_ids, selection) async def delete(self, selection: Optional[QueryNodeType]=None, dto_filter: Optional[BooleanFilterDTO]=None, execution_options: Optional[dict[str, Any]]=None) -> QueryResult[DeclarativeT]: async with self.session.begin_nested() as transaction: transpiler = QueryTranspiler(self.model, self._dialect, statement=self.statement) where_expressions = transpiler.filter_expressions(dto_filter) if dto_filter else None to_be_deleted = await self.list(selection, dto_filter=dto_filter) affected_rows = await self._delete_where(transpiler.scope.root_alias, where_expressions, execution_options) await transaction.commit() return to_be_deleted.filter_in(**self._rows_to_filter_dict(affected_rows))
class SQLAlchemyGraphQLAsyncRepository(SQLAlchemyGraphQLRepository[DeclarativeT, AnyAsyncSession]): async def _insert_many(self, data: InsertData) -> Sequence[Row[Any]]: pass async def _insert_nested(self, data: InsertData, level: LevelInput) -> None: '''Inserts multiple records for a given model type and updates related instances. This internal method performs a bulk insert operation for the specified SQLAlchemy model type using the provided values. After insertion, it retrieves the primary keys of the newly created records and updates the corresponding instance objects within the provided `level` input with these keys. It also handles updating foreign keys for to-one relationships where applicable. Args: data: An InsertData object containing the model type, values to insert, and optional upsert configuration for handling conflicts. level: The input level containing information about the instances being created and their relationships, used to update instances with generated primary and foreign keys. ''' pass async def _delete_where(self, alias: AliasedClass[Any], where: Optional[list[ColumnElement[bool]]]=None, execution_options: Optional[dict[str, Any]]=None) -> Sequence[Row[Any]]: pass async def _update_where(self, alias: AliasedClass[Any], values: dict[str, Any], where: Optional[list[ColumnElement[bool]]]=None, execution_options: Optional[dict[str, Any]]=None) -> Sequence[Row[Any]]: pass async def _create_nested_to_one_relations(self, data: Input[DeclarativeT]) -> None: '''Creates nested related objects for to-one relationships. Iterates through the input data levels filtered for 'create' operations on to-one relationships. It groups the instances to be created by their model type and then calls `_insert` for each type to perform bulk insertions. Args: data: The processed input data containing nested structures and relationship information. ''' pass async def _update_to_many_relations(self, data: Input[DeclarativeT], created_ids: Sequence[_RowLike]) -> None: '''Updates foreign keys to connect existing related objects for to-many relationships. Iterates through the input data levels filtered for 'set' operations on to-many relationships. For each relationship, it prepares bulk update statements to set the foreign keys on the related models, linking them to the parent objects (either newly created or existing). Args: data: The processed input data containing relationship information. created_ids: A sequence of RowLike objects containing the primary keys of the main objects created or updated in the parent operation. Used to link the 'set' relations to the correct parent. ''' pass async def _set_to_many_relations(self, mode: InsertOrUpdate, data: Input[DeclarativeT], created_ids: Sequence[_RowLike]) -> None: pass async def _create_to_many_relations(self, data: Input[DeclarativeT], created_ids: Sequence[_RowLike]) -> None: '''Creates and connects new related objects for to-many relationships. Iterates through the input data levels filtered for 'create' operations on to-many relationships. It prepares the data for the new related objects, including setting the foreign keys based on the parent object's primary key, and then calls `_insert` to perform bulk insertions. Args: data: The processed input data containing nested structures and relationship information. created_ids: A sequence of RowLike objects containing the primary keys of the main objects created in the parent operation. Used to set foreign keys on the newly created related objects. ''' pass async def _execute_insert_or_update(self, data: MutationData[DeclarativeT]) -> Sequence[_RowLike]: pass async def _mutate(self, data: MutationData[DeclarativeT]) -> Sequence[_RowLike]: pass async def _list_by_ids(self, id_rows: Sequence[_RowLike], selection: Optional[QueryNodeType]=None) -> QueryResult[DeclarativeT]: '''Retrieves multiple records by their primary keys with optional selection. Fetches records from the repository's main model that match the provided primary key combinations. Allows specifying a GraphQL selection Args: id_rows: A sequence of RowLike objects, each containing the primary key values for one record to retrieve. selection: An optional QueryNodeType representing the GraphQL selection set to apply to the query. Returns: A QueryResult containing the list of fetched records matching the provided IDs, structured according to the selection. ''' pass async def list(self, selection: Optional[QueryNodeType]=None, dto_filter: Optional[BooleanFilterDTO]=None, order_by: Optional[list[OrderByDTO]]=None, limit: Optional[int]=None, offset: Optional[int]=None, distinct_on: Optional[list[EnumDTO]]=None, allow_null: bool=False, query_hooks: Optional[defaultdict[QueryNodeType, list[QueryHook[DeclarativeBase]]]]=None, execution_options: Optional[dict[str, Any]]=None, **kwargs: Any) -> QueryResult[DeclarativeT]: '''Retrieves a list of records based on filtering, ordering, and pagination. Fetches records from the repository's main model, applying optional filtering, ordering, pagination (limit/offset), and distinct constraints. Supports GraphQL selection sets for optimized data retrieval and query hooks for customization. Args: selection: An optional QueryNodeType representing the GraphQL selection set. dto_filter: An optional filter object derived from GraphQL input. order_by: An optional list of ordering criteria. limit: An optional integer limiting the number of results. offset: An optional integer specifying the starting point for results. distinct_on: An optional list of fields for DISTINCT ON clause (if supported). allow_null: If True, allows certain operations even if parts of the filter path are null (implementation specific to executor). query_hooks: Optional hooks to modify the query at different stages. execution_options: Optional dictionary of execution options passed to SQLAlchemy. **kwargs: Additional keyword arguments (currently unused but allows extension). Returns: A QueryResult containing the list of fetched records and potentially pagination info or total count, structured according to the selection. ''' pass async def get_one(self, selection: Optional[QueryNodeType]=None, dto_filter: Optional[BooleanFilterDTO]=None, order_by: Optional[list[OrderByDTO]]=None, limit: Optional[int]=None, offset: Optional[int]=None, distinct_on: Optional[list[EnumDTO]]=None, allow_null: bool=False, query_hooks: Optional[defaultdict[QueryNodeType, list[QueryHook[DeclarativeBase]]]]=None, execution_options: Optional[dict[str, Any]]=None, **kwargs: Any) -> QueryResult[DeclarativeT]: '''Retrieves a single record based on filtering and ordering criteria. Fetches a single record matching the provided filters. If multiple records match, ordering, limit, and offset can be used to pinpoint one. Returns None if no record matches. Supports GraphQL selection sets and query hooks. Args: selection: An optional QueryNodeType representing the GraphQL selection set. dto_filter: An optional filter object derived from GraphQL input. order_by: An optional list of ordering criteria. limit: An optional integer limiting the number of potential matches considered (usually 1 for get_one). offset: An optional integer specifying the starting point. distinct_on: An optional list of fields for DISTINCT ON clause. allow_null: If True, allows certain operations even if parts of the filter path are null. query_hooks: Optional hooks to modify the query. execution_options: Optional dictionary of execution options. **kwargs: Additional keyword arguments passed to the query executor setup. Returns: A QueryResult containing the single fetched record or None, structured according to the selection. ''' pass async def get_by_id(self, selection: Optional[QueryNodeType]=None, query_hooks: Optional[defaultdict[QueryNodeType, list[QueryHook[DeclarativeBase]]]]=None, execution_options: Optional[dict[str, Any]]=None, **kwargs: Any) -> QueryResult[DeclarativeT]: '''Retrieves a single record by its primary key(s). Fetches a single record matching the provided primary key values passed as keyword arguments. Returns None if no record matches. Supports GraphQL selection sets and query hooks. Args: selection: An optional QueryNodeType representing the GraphQL selection set. query_hooks: Optional hooks to modify the query. execution_options: Optional dictionary of execution options. **kwargs: Keyword arguments where keys are the primary key field names and values are the corresponding primary key values. Returns: A QueryResult containing the single fetched record or None, structured according to the selection. ''' pass async def create(self, data: Input[DeclarativeT], selection: Optional[QueryNodeType]=None) -> QueryResult[DeclarativeT]: '''Creates one or more records with nested relationships and returns them. Takes processed input data, performs the creation using `_create_many`, and then fetches the newly created records using `_list_by_ids` based on the returned primary keys and the provided selection set. Args: data: The processed input data for creation. selection: An optional QueryNodeType representing the GraphQL selection set for the returned data. Returns: A QueryResult containing the newly created records, structured according to the selection. ''' pass async def upsert(self, data: Input[DeclarativeT], selection: Optional[QueryNodeType]=None, update_fields: Optional[list[EnumDTO]]=None, conflict_fields: Optional[EnumDTO]=None, dto_filter: Optional[BooleanFilterDTO]=None) -> QueryResult[DeclarativeT]: pass async def update_by_ids(self, data: Input[DeclarativeT], selection: Optional[QueryNodeType]=None) -> QueryResult[DeclarativeT]: '''Updates one or more records with nested relationships and returns them. Takes processed input data, performs the update using `_update_many`, and then fetches the updated records using `_list_by_ids` based on the returned primary keys and the provided selection set. Args: data: The processed input data for update. Must include primary keys. selection: An optional QueryNodeType representing the GraphQL selection set for the returned data. Returns: A QueryResult containing the updated records, structured according to the selection. ''' pass async def update_by_filter(self, data: Input[DeclarativeT], dto_filter: BooleanFilterDTO, selection: Optional[QueryNodeType]=None) -> QueryResult[DeclarativeT]: pass async def delete(self, selection: Optional[QueryNodeType]=None, dto_filter: Optional[BooleanFilterDTO]=None, execution_options: Optional[dict[str, Any]]=None) -> QueryResult[DeclarativeT]: pass
20
10
28
2
18
8
3
0.42
1
21
13
0
19
2
19
27
547
56
346
167
260
146
180
97
160
9
2
4
58
327,978
gazorby/strawchemy
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/sqlalchemy/repository/_base.py
strawchemy.sqlalchemy.repository._base.InsertData
from dataclasses import dataclass from typing import TYPE_CHECKING, Any, Generic, Literal, Optional, TypeVar, Union from strawchemy.exceptions import StrawchemyError from sqlalchemy import Column, Function, Insert, Row, func, insert @dataclass(frozen=True) class InsertData: model_type: type[DeclarativeBase] values: list[dict[str, Any]] upsert_data: Optional[UpsertData] = None @property def is_upsert(self) -> bool: return self.upsert_data is not None @property def upsert_data_or_raise(self) -> UpsertData: if self.upsert_data is None: msg = 'UpsertData is required' raise StrawchemyError(msg) return self.upsert_data def conflict_target_columns(self) -> list[Column[Any]]: if self.upsert_data_or_raise.conflict_constraint: return list(self.upsert_data_or_raise.conflict_constraint.value.columns) return list(self.model_type.__mapper__.primary_key) def upsert_set(self, dialect: SupportedDialect, columns: ReadOnlyColumnCollection[str, KeyedColumnElement[Any]]) -> Mapping[Column[Any], Union[KeyedColumnElement[Any], Function[Any]]]: update_fields_set = {dto_field.field_definition.model_field_name for dto_field in self.upsert_data_or_raise.update_fields} or {name for value_dict in self.values for name in value_dict} mapper = self.model_type.__mapper__ update_fields = {mapper.columns[name]: value for name, value in columns.items() if name in update_fields_set} if dialect == 'mysql' and (auto_increment_pk_column := next((column for column in self.model_type.__mapper__.primary_key if column.autoincrement), None)) is not None: update_fields = {auto_increment_pk_column: func.last_insert_id(auto_increment_pk_column)} | update_fields return update_fields
@dataclass(frozen=True) class InsertData: @property def is_upsert(self) -> bool: pass @property def upsert_data_or_raise(self) -> UpsertData: pass def conflict_target_columns(self) -> list[Column[Any]]: pass def upsert_set(self, dialect: SupportedDialect, columns: ReadOnlyColumnCollection[str, KeyedColumnElement[Any]]) -> Mapping[Column[Any], Union[KeyedColumnElement[Any], Function[Any]]]: pass
8
0
8
0
8
0
2
0
0
7
2
0
4
0
4
4
41
4
37
16
28
0
22
10
17
2
0
1
7
327,979
gazorby/strawchemy
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/sqlalchemy/repository/_base.py
strawchemy.sqlalchemy.repository._base.MutationData
from typing import TYPE_CHECKING, Any, Generic, Literal, Optional, TypeVar, Union from strawchemy.sqlalchemy.typing import DeclarativeT, QueryExecutorT, SessionT from dataclasses import dataclass @dataclass(frozen=True) class MutationData(Generic[DeclarativeT]): mode: InsertOrUpdate input: Input[DeclarativeT] dto_filter: Optional[BooleanFilterDTO] = None upsert_update_fields: Optional[list[EnumDTO]] = None upsert_conflict_fields: Optional[EnumDTO] = None
@dataclass(frozen=True) class MutationData(Generic[DeclarativeT]): pass
2
0
0
0
0
0
0
0
1
0
0
0
0
0
0
2
6
0
6
4
5
0
6
4
5
0
1
0
0
327,980
gazorby/strawchemy
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/sqlalchemy/repository/_base.py
strawchemy.sqlalchemy.repository._base.SQLAlchemyGraphQLRepository
from strawchemy.strawberry.mutation.types import RelationType from typing import TYPE_CHECKING, Any, Generic, Literal, Optional, TypeVar, Union from strawchemy.exceptions import StrawchemyError from strawchemy.dto.inspectors.sqlalchemy import SQLAlchemyInspector from sqlalchemy import Column, Function, Insert, Row, func, insert from sqlalchemy.dialects import mysql, postgresql, sqlite from strawchemy.sqlalchemy.typing import DeclarativeT, QueryExecutorT, SessionT from strawchemy.sqlalchemy._transpiler import QueryTranspiler from collections import defaultdict from sqlalchemy.orm import RelationshipProperty class SQLAlchemyGraphQLRepository(Generic[DeclarativeT, SessionT]): def __init__(self, model: type[DeclarativeT], session: SessionT, statement: Optional[Select[tuple[DeclarativeT]]]=None, execution_options: Optional[dict[str, Any]]=None, deterministic_ordering: bool=False) -> None: self.model = model self.session = session self.statement = statement self.execution_options = execution_options self.deterministic_ordering = deterministic_ordering self._dialect = session.get_bind().dialect def _get_query_executor(self, executor_type: type[QueryExecutorT], selection: Optional[QueryNodeType]=None, dto_filter: Optional[BooleanFilterDTO]=None, order_by: Optional[list[OrderByDTO]]=None, limit: Optional[int]=None, offset: Optional[int]=None, distinct_on: Optional[list[EnumDTO]]=None, allow_null: bool=False, query_hooks: Optional[defaultdict[QueryNodeType, list[QueryHook[DeclarativeBase]]]]=None, execution_options: Optional[dict[str, Any]]=None) -> QueryExecutorT: transpiler = QueryTranspiler(self.model, self._dialect, query_hooks=query_hooks, statement=self.statement, deterministic_ordering=self.deterministic_ordering) return transpiler.select_executor(selection_tree=selection, dto_filter=dto_filter, order_by=order_by, limit=limit, offset=offset, distinct_on=distinct_on, allow_null=allow_null, executor_cls=executor_type, execution_options=execution_options if execution_options is not None else self.execution_options) def _insert_statement(self, data: InsertData) -> Insert: if not data.is_upsert: return insert(data.model_type) if self._dialect.name == 'postgresql': statement = postgresql.insert(data.model_type) statement = statement.on_conflict_do_update(set_=data.upsert_set(self._dialect.name, statement.excluded), index_elements=data.conflict_target_columns()) elif self._dialect.name == 'sqlite': statement = sqlite.insert(data.model_type) statement = statement.on_conflict_do_update(set_=data.upsert_set(self._dialect.name, statement.excluded), index_elements=data.conflict_target_columns()) elif self._dialect.name == 'mysql': statement = mysql.insert(data.model_type) statement = statement.on_duplicate_key_update(data.upsert_set(self._dialect.name, statement.inserted)) else: msg = f'This dialect does not support upsert statements: {self._dialect.name}' raise StrawchemyError(msg) return statement def _to_dict(self, model: DeclarativeBase) -> dict[str, Any]: return {field: getattr(model, field) for field in model.__mapper__.columns.keys() if field in SQLAlchemyInspector.loaded_attributes(model)} def _connect_to_one_relations(self, data: Input[DeclarativeT]) -> None: for relation in data.relations: prop = relation.attribute if not relation.set and relation.set is not None or not isinstance(prop, RelationshipProperty) or relation.relation_type is not RelationType.TO_ONE: continue assert prop.local_remote_pairs for local, remote in prop.local_remote_pairs: assert local.key assert remote.key value = getattr(relation.set[0], remote.key) if relation.set else None setattr(relation.parent, local.key, value) def _rows_to_filter_dict(self, rows: Sequence[Row[Any]]) -> dict[str, list[Any]]: filter_dict = defaultdict(list) for row in rows: for key, value in row._asdict().items(): filter_dict[key].append(value) return filter_dict
class SQLAlchemyGraphQLRepository(Generic[DeclarativeT, SessionT]): def __init__(self, model: type[DeclarativeT], session: SessionT, statement: Optional[Select[tuple[DeclarativeT]]]=None, execution_options: Optional[dict[str, Any]]=None, deterministic_ordering: bool=False) -> None: pass def _get_query_executor(self, executor_type: type[QueryExecutorT], selection: Optional[QueryNodeType]=None, dto_filter: Optional[BooleanFilterDTO]=None, order_by: Optional[list[OrderByDTO]]=None, limit: Optional[int]=None, offset: Optional[int]=None, distinct_on: Optional[list[EnumDTO]]=None, allow_null: bool=False, query_hooks: Optional[defaultdict[QueryNodeType, list[QueryHook[DeclarativeBase]]]]=None, execution_options: Optional[dict[str, Any]]=None) -> QueryExecutorT: pass def _insert_statement(self, data: InsertData) -> Insert: pass def _to_dict(self, model: DeclarativeBase) -> dict[str, Any]: pass def _connect_to_one_relations(self, data: Input[DeclarativeT]) -> None: pass def _rows_to_filter_dict(self, rows: Sequence[Row[Any]]) -> dict[str, list[Any]]: pass
7
0
16
0
16
0
3
0.02
1
19
10
2
6
6
6
8
102
6
95
42
69
2
43
23
36
5
1
2
17
327,981
gazorby/strawchemy
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/sqlalchemy/repository/_sync.py
strawchemy.sqlalchemy.repository._sync.SQLAlchemyGraphQLSyncRepository
from sqlalchemy import ColumnElement, Row, and_, delete, inspect, select, update from strawchemy.sqlalchemy._transpiler import QueryTranspiler from strawchemy.sqlalchemy.typing import AnySyncSession, DeclarativeT from typing import TYPE_CHECKING, Any, NamedTuple, Optional, TypeVar from strawchemy.strawberry.mutation.types import RelationType from collections import defaultdict, namedtuple from sqlalchemy.orm import RelationshipProperty from ._base import InsertData, MutationData, SQLAlchemyGraphQLRepository from strawchemy.sqlalchemy._executor import QueryResult, SyncQueryExecutor from strawchemy.strawberry.mutation.input import UpsertData class SQLAlchemyGraphQLSyncRepository(SQLAlchemyGraphQLRepository[DeclarativeT, AnySyncSession]): def _insert_many(self, data: InsertData) -> Sequence[Row[Any]]: if self._dialect.insert_executemany_returning_sort_by_parameter_order and (not (self._dialect.name == 'postgresql' and data.is_upsert)): results = self.session.execute(self._insert_statement(data).returning(*data.model_type.__mapper__.primary_key, sort_by_parameter_order=True), data.values) return results.all() rows: Sequence[Row[Any]] = [] conn = self.session.connection() for value in data.values: cursor = conn.execute(self._insert_statement(data).values(**value)) assert cursor.inserted_primary_key is not None rows.append(cursor.inserted_primary_key) return rows def _insert_nested(self, data: InsertData, level: LevelInput) -> None: """Inserts multiple records for a given model type and updates related instances. This internal method performs a bulk insert operation for the specified SQLAlchemy model type using the provided values. After insertion, it retrieves the primary keys of the newly created records and updates the corresponding instance objects within the provided `level` input with these keys. It also handles updating foreign keys for to-one relationships where applicable. Args: data: An InsertData object containing the model type, values to insert, and optional upsert configuration for handling conflicts. level: The input level containing information about the instances being created and their relationships, used to update instances with generated primary and foreign keys. """ instance_ids: Sequence[Row[Any]] = self._insert_many(data) pk_names = [pk.name for pk in data.model_type.__mapper__.primary_key] pk_index, fk_index = (0, 0) for relation_input in level.inputs: if not isinstance(relation_input.instance, data.model_type): continue for column in data.model_type.__mapper__.primary_key: setattr(relation_input.instance, column.key, instance_ids[pk_index][pk_names.index(column.key)]) pk_index += 1 if relation_input.relation.relation_type is RelationType.TO_MANY: continue prop = relation_input.relation.attribute assert isinstance(prop, RelationshipProperty) assert prop.local_remote_pairs for local, remote in prop.local_remote_pairs: assert local.key assert remote.key setattr(relation_input.relation.parent, local.key, instance_ids[fk_index][pk_names.index(remote.key)]) fk_index += 1 def _delete_where(self, alias: AliasedClass[Any], where: Optional[list[ColumnElement[bool]]]=None, execution_options: Optional[dict[str, Any]]=None) -> Sequence[Row[Any]]: alias_insp = inspect(alias) model_pks = [getattr(alias, pk.key) for pk in alias_insp.mapper.primary_key] if self._dialect.delete_returning: statement = delete(alias_insp).returning(*model_pks) if where: statement = statement.where(*where) result = self.session.execute(statement, execution_options=execution_options or {}) return result.all() affected_statement, delete_statement = (select(*model_pks), delete(alias_insp)) if where: affected_statement, delete_statement = (affected_statement.where(*where), delete_statement.where(*where)) affected_rows = self.session.execute(affected_statement).all() conn = self.session.connection() conn.execute(delete_statement, execution_options=execution_options or {}) return affected_rows def _update_where(self, alias: AliasedClass[Any], values: dict[str, Any], where: Optional[list[ColumnElement[bool]]]=None, execution_options: Optional[dict[str, Any]]=None) -> Sequence[Row[Any]]: alias_insp = inspect(alias) model_pks = [getattr(alias, pk.key) for pk in alias_insp.mapper.primary_key] if self._dialect.update_returning: statement = update(alias_insp).values(**values).returning(*model_pks) if where: statement = statement.where(*where) result = self.session.execute(statement, execution_options=execution_options or {}) return result.all() affected_statement, update_statement = (select(*model_pks), update(alias_insp).values(**values)) if where: affected_statement, update_statement = (affected_statement.where(*where), update_statement.where(*where)) affected_rows = self.session.execute(affected_statement).all() conn = self.session.connection() conn.execute(update_statement, execution_options=execution_options or {}) return affected_rows def _create_nested_to_one_relations(self, data: Input[DeclarativeT]) -> None: """Creates nested related objects for to-one relationships. Iterates through the input data levels filtered for 'create' operations on to-one relationships. It groups the instances to be created by their model type and then calls `_insert` for each type to perform bulk insertions. Args: data: The processed input data containing nested structures and relationship information. """ for level in data.filter_by_level(RelationType.TO_ONE, ['create', 'upsert']): insert_params: defaultdict[type[DeclarativeBase], list[dict[str, Any]]] = defaultdict(list) upsert_data_map: dict[type[DeclarativeBase], UpsertData] = {} for create_input in level.inputs: insert_params[create_input.relation.related].append(self._to_dict(create_input.instance)) if create_input.relation.upsert is not None: upsert_data_map[create_input.relation.related] = create_input.relation.upsert for model_type, values in insert_params.items(): self._insert_nested(InsertData(model_type, values, upsert_data_map.get(model_type)), level) def _update_to_many_relations(self, data: Input[DeclarativeT], created_ids: Sequence[_RowLike]) -> None: """Updates foreign keys to connect existing related objects for to-many relationships. Iterates through the input data levels filtered for 'set' operations on to-many relationships. For each relationship, it prepares bulk update statements to set the foreign keys on the related models, linking them to the parent objects (either newly created or existing). Args: data: The processed input data containing relationship information. created_ids: A sequence of RowLike objects containing the primary keys of the main objects created or updated in the parent operation. Used to link the 'set' relations to the correct parent. """ for level in data.filter_by_level(RelationType.TO_MANY, ['add', 'remove']): update_params: defaultdict[type[DeclarativeBase], list[dict[str, Any]]] = defaultdict(list) for level_input in level.inputs: relation = level_input.relation prop = relation.attribute assert prop.local_remote_pairs parent = created_ids[relation.input_index] if relation.level == 1 else relation.parent update_params[relation.related].extend([{column.key: getattr(relation_model, column.key) for column in relation_model.__mapper__.primary_key} | {remote.key: getattr(parent, local.key) for local, remote in prop.local_remote_pairs if local.key and remote.key} for relation_model in relation.add]) update_params[relation.related].extend([{column.key: getattr(relation_model, column.key) for column in relation_model.__mapper__.primary_key} | {remote.key: None for local, remote in prop.local_remote_pairs if local.key and remote.key} for relation_model in relation.remove]) for model_type, values in update_params.items(): self.session.execute(update(model_type), values) def _set_to_many_relations(self, mode: InsertOrUpdate, data: Input[DeclarativeT], created_ids: Sequence[_RowLike]) -> None: for level in data.filter_by_level(RelationType.TO_MANY, ['set']): remove_old_ids: defaultdict[type[DeclarativeBase], defaultdict[str, list[Any]]] = defaultdict(lambda: defaultdict(list)) set_params: defaultdict[type[DeclarativeBase], list[dict[str, Any]]] = defaultdict(list) for level_input in level.inputs: relation = level_input.relation prop = relation.attribute assert prop.local_remote_pairs parent = created_ids[relation.input_index] if relation.level == 1 else relation.parent if relation.level == 1 and mode in {'update_by_pks', 'update_where'}: for local, remote in prop.local_remote_pairs: remove_old_ids[relation.related][remote.key].append(getattr(parent, local.key)) for relation_model in relation.set or []: set_params[relation.related].append({column.key: getattr(relation_model, column.key) for column in relation_model.__mapper__.primary_key} | {remote.key: getattr(parent, local.key) for local, remote in prop.local_remote_pairs if local.key and remote.key}) for model_type, set_values in set_params.items(): if (current_ids := remove_old_ids[model_type]): remove_previous_stmt = update(model_type).where(and_(*[model_type.__mapper__.attrs[key].class_attribute.in_(ids) for key, ids in current_ids.items()])) self.session.execute(remove_previous_stmt, dict.fromkeys(current_ids)) self.session.execute(update(model_type), set_values) def _create_to_many_relations(self, data: Input[DeclarativeT], created_ids: Sequence[_RowLike]) -> None: """Creates and connects new related objects for to-many relationships. Iterates through the input data levels filtered for 'create' operations on to-many relationships. It prepares the data for the new related objects, including setting the foreign keys based on the parent object's primary key, and then calls `_insert` to perform bulk insertions. Args: data: The processed input data containing nested structures and relationship information. created_ids: A sequence of RowLike objects containing the primary keys of the main objects created in the parent operation. Used to set foreign keys on the newly created related objects. """ for level in data.filter_by_level(RelationType.TO_MANY, ['create', 'upsert']): insert_params: defaultdict[type[DeclarativeBase], list[dict[str, Any]]] = defaultdict(list) upsert_data_map: dict[type[DeclarativeBase], UpsertData] = {} for create_input in level.inputs: relation = create_input.relation prop = relation.attribute assert prop.local_remote_pairs parent = created_ids[relation.input_index] if relation.level == 1 else relation.parent fks = {remote.key: getattr(parent, local.key) for local, remote in prop.local_remote_pairs if local.key and remote.key} insert_params[relation.related].append(self._to_dict(create_input.instance) | fks) if create_input.relation.upsert is not None: upsert_data_map[create_input.relation.related] = create_input.relation.upsert for model_type, values in insert_params.items(): self._insert_nested(InsertData(model_type, values, upsert_data_map.get(model_type)), level) def _execute_insert_or_update(self, data: MutationData[DeclarativeT]) -> Sequence[_RowLike]: values = [self._to_dict(instance) for instance in data.input.instances] if data.mode == 'insert': return self._insert_many(InsertData(self.model, values)) if data.mode == 'upsert': return self._insert_many(InsertData(self.model, values, UpsertData(update_fields=data.upsert_update_fields or [], conflict_constraint=data.upsert_conflict_fields))) pks = [column.key for column in self.model.__mapper__.primary_key] pk_tuple = namedtuple('AsRow', pks) if data.mode == 'update_by_pks': self.session.execute(update(self.model), values) return [pk_tuple(*[instance[name] for name in pks]) for instance in values] transpiler = QueryTranspiler(self.model, self._dialect, statement=self.statement) where_expressions = transpiler.filter_expressions(data.dto_filter) if data.dto_filter else None return self._update_where(transpiler.scope.root_alias, values[0], where_expressions) def _mutate(self, data: MutationData[DeclarativeT]) -> Sequence[_RowLike]: self._connect_to_one_relations(data.input) data.input.add_non_input_relations() with self.session.begin_nested() as transaction: self._create_nested_to_one_relations(data.input) instance_ids = self._execute_insert_or_update(data) self._create_to_many_relations(data.input, instance_ids) self._update_to_many_relations(data.input, instance_ids) self._set_to_many_relations(data.mode, data.input, instance_ids) transaction.commit() return instance_ids def _list_by_ids(self, id_rows: Sequence[_RowLike], selection: Optional[QueryNodeType]=None) -> QueryResult[DeclarativeT]: """Retrieves multiple records by their primary keys with optional selection. Fetches records from the repository's main model that match the provided primary key combinations. Allows specifying a GraphQL selection Args: id_rows: A sequence of RowLike objects, each containing the primary key values for one record to retrieve. selection: An optional QueryNodeType representing the GraphQL selection set to apply to the query. Returns: A QueryResult containing the list of fetched records matching the provided IDs, structured according to the selection. """ executor = self._get_query_executor(SyncQueryExecutor, selection=selection) id_fields = executor.scope.id_field_definitions(self.model) executor.base_statement = executor.base_statement.where(*[field.model_field.in_([getattr(row, field.model_field_name) for row in id_rows]) for field in id_fields]) return executor.list(self.session) def list(self, selection: Optional[QueryNodeType]=None, dto_filter: Optional[BooleanFilterDTO]=None, order_by: Optional[list[OrderByDTO]]=None, limit: Optional[int]=None, offset: Optional[int]=None, distinct_on: Optional[list[EnumDTO]]=None, allow_null: bool=False, query_hooks: Optional[defaultdict[QueryNodeType, list[QueryHook[DeclarativeBase]]]]=None, execution_options: Optional[dict[str, Any]]=None, **kwargs: Any) -> QueryResult[DeclarativeT]: """Retrieves a list of records based on filtering, ordering, and pagination. Fetches records from the repository's main model, applying optional filtering, ordering, pagination (limit/offset), and distinct constraints. Supports GraphQL selection sets for optimized data retrieval and query hooks for customization. Args: selection: An optional QueryNodeType representing the GraphQL selection set. dto_filter: An optional filter object derived from GraphQL input. order_by: An optional list of ordering criteria. limit: An optional integer limiting the number of results. offset: An optional integer specifying the starting point for results. distinct_on: An optional list of fields for DISTINCT ON clause (if supported). allow_null: If True, allows certain operations even if parts of the filter path are null (implementation specific to executor). query_hooks: Optional hooks to modify the query at different stages. execution_options: Optional dictionary of execution options passed to SQLAlchemy. **kwargs: Additional keyword arguments (currently unused but allows extension). Returns: A QueryResult containing the list of fetched records and potentially pagination info or total count, structured according to the selection. """ executor = self._get_query_executor(executor_type=SyncQueryExecutor, selection=selection, dto_filter=dto_filter, order_by=order_by, limit=limit, offset=offset, distinct_on=distinct_on, allow_null=allow_null, query_hooks=query_hooks, execution_options=execution_options) return executor.list(self.session) def get_one(self, selection: Optional[QueryNodeType]=None, dto_filter: Optional[BooleanFilterDTO]=None, order_by: Optional[list[OrderByDTO]]=None, limit: Optional[int]=None, offset: Optional[int]=None, distinct_on: Optional[list[EnumDTO]]=None, allow_null: bool=False, query_hooks: Optional[defaultdict[QueryNodeType, list[QueryHook[DeclarativeBase]]]]=None, execution_options: Optional[dict[str, Any]]=None, **kwargs: Any) -> QueryResult[DeclarativeT]: """Retrieves a single record based on filtering and ordering criteria. Fetches a single record matching the provided filters. If multiple records match, ordering, limit, and offset can be used to pinpoint one. Returns None if no record matches. Supports GraphQL selection sets and query hooks. Args: selection: An optional QueryNodeType representing the GraphQL selection set. dto_filter: An optional filter object derived from GraphQL input. order_by: An optional list of ordering criteria. limit: An optional integer limiting the number of potential matches considered (usually 1 for get_one). offset: An optional integer specifying the starting point. distinct_on: An optional list of fields for DISTINCT ON clause. allow_null: If True, allows certain operations even if parts of the filter path are null. query_hooks: Optional hooks to modify the query. execution_options: Optional dictionary of execution options. **kwargs: Additional keyword arguments passed to the query executor setup. Returns: A QueryResult containing the single fetched record or None, structured according to the selection. """ executor = self._get_query_executor(executor_type=SyncQueryExecutor, selection=selection, dto_filter=dto_filter, order_by=order_by, limit=limit, offset=offset, distinct_on=distinct_on, allow_null=allow_null, query_hooks=query_hooks, execution_options=execution_options, **kwargs) return executor.get_one_or_none(self.session) def get_by_id(self, selection: Optional[QueryNodeType]=None, query_hooks: Optional[defaultdict[QueryNodeType, list[QueryHook[DeclarativeBase]]]]=None, execution_options: Optional[dict[str, Any]]=None, **kwargs: Any) -> QueryResult[DeclarativeT]: """Retrieves a single record by its primary key(s). Fetches a single record matching the provided primary key values passed as keyword arguments. Returns None if no record matches. Supports GraphQL selection sets and query hooks. Args: selection: An optional QueryNodeType representing the GraphQL selection set. query_hooks: Optional hooks to modify the query. execution_options: Optional dictionary of execution options. **kwargs: Keyword arguments where keys are the primary key field names and values are the corresponding primary key values. Returns: A QueryResult containing the single fetched record or None, structured according to the selection. """ executor = self._get_query_executor(SyncQueryExecutor, selection=selection, query_hooks=query_hooks, execution_options=execution_options) executor.base_statement = executor.base_statement.where(*[field_def.model_field == kwargs.pop(field_def.name) for field_def in executor.scope.id_field_definitions(self.model)]) return executor.get_one_or_none(self.session) def create(self, data: Input[DeclarativeT], selection: Optional[QueryNodeType]=None) -> QueryResult[DeclarativeT]: """Creates one or more records with nested relationships and returns them. Takes processed input data, performs the creation using `_create_many`, and then fetches the newly created records using `_list_by_ids` based on the returned primary keys and the provided selection set. Args: data: The processed input data for creation. selection: An optional QueryNodeType representing the GraphQL selection set for the returned data. Returns: A QueryResult containing the newly created records, structured according to the selection. """ created_ids = self._mutate(MutationData('insert', data)) return self._list_by_ids(created_ids, selection) def upsert(self, data: Input[DeclarativeT], selection: Optional[QueryNodeType]=None, update_fields: Optional[list[EnumDTO]]=None, conflict_fields: Optional[EnumDTO]=None, dto_filter: Optional[BooleanFilterDTO]=None) -> QueryResult[DeclarativeT]: created_ids = self._mutate(MutationData('upsert', data, dto_filter=dto_filter, upsert_update_fields=update_fields, upsert_conflict_fields=conflict_fields)) return self._list_by_ids(created_ids, selection) def update_by_ids(self, data: Input[DeclarativeT], selection: Optional[QueryNodeType]=None) -> QueryResult[DeclarativeT]: """Updates one or more records with nested relationships and returns them. Takes processed input data, performs the update using `_update_many`, and then fetches the updated records using `_list_by_ids` based on the returned primary keys and the provided selection set. Args: data: The processed input data for update. Must include primary keys. selection: An optional QueryNodeType representing the GraphQL selection set for the returned data. Returns: A QueryResult containing the updated records, structured according to the selection. """ updated_ids = self._mutate(MutationData('update_by_pks', data)) return self._list_by_ids(updated_ids, selection) def update_by_filter(self, data: Input[DeclarativeT], dto_filter: BooleanFilterDTO, selection: Optional[QueryNodeType]=None) -> QueryResult[DeclarativeT]: updated_ids = self._mutate(MutationData('update_where', data, dto_filter)) return self._list_by_ids(updated_ids, selection) def delete(self, selection: Optional[QueryNodeType]=None, dto_filter: Optional[BooleanFilterDTO]=None, execution_options: Optional[dict[str, Any]]=None) -> QueryResult[DeclarativeT]: with self.session.begin_nested() as transaction: transpiler = QueryTranspiler(self.model, self._dialect, statement=self.statement) where_expressions = transpiler.filter_expressions(dto_filter) if dto_filter else None to_be_deleted = self.list(selection, dto_filter=dto_filter) affected_rows = self._delete_where(transpiler.scope.root_alias, where_expressions, execution_options) transaction.commit() return to_be_deleted.filter_in(**self._rows_to_filter_dict(affected_rows))
class SQLAlchemyGraphQLSyncRepository(SQLAlchemyGraphQLRepository[DeclarativeT, AnySyncSession]): def _insert_many(self, data: InsertData) -> Sequence[Row[Any]]: pass def _insert_nested(self, data: InsertData, level: LevelInput) -> None: '''Inserts multiple records for a given model type and updates related instances. This internal method performs a bulk insert operation for the specified SQLAlchemy model type using the provided values. After insertion, it retrieves the primary keys of the newly created records and updates the corresponding instance objects within the provided `level` input with these keys. It also handles updating foreign keys for to-one relationships where applicable. Args: data: An InsertData object containing the model type, values to insert, and optional upsert configuration for handling conflicts. level: The input level containing information about the instances being created and their relationships, used to update instances with generated primary and foreign keys. ''' pass def _delete_where(self, alias: AliasedClass[Any], where: Optional[list[ColumnElement[bool]]]=None, execution_options: Optional[dict[str, Any]]=None) -> Sequence[Row[Any]]: pass def _update_where(self, alias: AliasedClass[Any], values: dict[str, Any], where: Optional[list[ColumnElement[bool]]]=None, execution_options: Optional[dict[str, Any]]=None) -> Sequence[Row[Any]]: pass def _create_nested_to_one_relations(self, data: Input[DeclarativeT]) -> None: '''Creates nested related objects for to-one relationships. Iterates through the input data levels filtered for 'create' operations on to-one relationships. It groups the instances to be created by their model type and then calls `_insert` for each type to perform bulk insertions. Args: data: The processed input data containing nested structures and relationship information. ''' pass def _update_to_many_relations(self, data: Input[DeclarativeT], created_ids: Sequence[_RowLike]) -> None: '''Updates foreign keys to connect existing related objects for to-many relationships. Iterates through the input data levels filtered for 'set' operations on to-many relationships. For each relationship, it prepares bulk update statements to set the foreign keys on the related models, linking them to the parent objects (either newly created or existing). Args: data: The processed input data containing relationship information. created_ids: A sequence of RowLike objects containing the primary keys of the main objects created or updated in the parent operation. Used to link the 'set' relations to the correct parent. ''' pass def _set_to_many_relations(self, mode: InsertOrUpdate, data: Input[DeclarativeT], created_ids: Sequence[_RowLike]) -> None: pass def _create_to_many_relations(self, data: Input[DeclarativeT], created_ids: Sequence[_RowLike]) -> None: '''Creates and connects new related objects for to-many relationships. Iterates through the input data levels filtered for 'create' operations on to-many relationships. It prepares the data for the new related objects, including setting the foreign keys based on the parent object's primary key, and then calls `_insert` to perform bulk insertions. Args: data: The processed input data containing nested structures and relationship information. created_ids: A sequence of RowLike objects containing the primary keys of the main objects created in the parent operation. Used to set foreign keys on the newly created related objects. ''' pass def _execute_insert_or_update(self, data: MutationData[DeclarativeT]) -> Sequence[_RowLike]: pass def _mutate(self, data: MutationData[DeclarativeT]) -> Sequence[_RowLike]: pass def _list_by_ids(self, id_rows: Sequence[_RowLike], selection: Optional[QueryNodeType]=None) -> QueryResult[DeclarativeT]: '''Retrieves multiple records by their primary keys with optional selection. Fetches records from the repository's main model that match the provided primary key combinations. Allows specifying a GraphQL selection Args: id_rows: A sequence of RowLike objects, each containing the primary key values for one record to retrieve. selection: An optional QueryNodeType representing the GraphQL selection set to apply to the query. Returns: A QueryResult containing the list of fetched records matching the provided IDs, structured according to the selection. ''' pass def list(self, selection: Optional[QueryNodeType]=None, dto_filter: Optional[BooleanFilterDTO]=None, order_by: Optional[list[OrderByDTO]]=None, limit: Optional[int]=None, offset: Optional[int]=None, distinct_on: Optional[list[EnumDTO]]=None, allow_null: bool=False, query_hooks: Optional[defaultdict[QueryNodeType, list[QueryHook[DeclarativeBase]]]]=None, execution_options: Optional[dict[str, Any]]=None, **kwargs: Any) -> QueryResult[DeclarativeT]: '''Retrieves a list of records based on filtering, ordering, and pagination. Fetches records from the repository's main model, applying optional filtering, ordering, pagination (limit/offset), and distinct constraints. Supports GraphQL selection sets for optimized data retrieval and query hooks for customization. Args: selection: An optional QueryNodeType representing the GraphQL selection set. dto_filter: An optional filter object derived from GraphQL input. order_by: An optional list of ordering criteria. limit: An optional integer limiting the number of results. offset: An optional integer specifying the starting point for results. distinct_on: An optional list of fields for DISTINCT ON clause (if supported). allow_null: If True, allows certain operations even if parts of the filter path are null (implementation specific to executor). query_hooks: Optional hooks to modify the query at different stages. execution_options: Optional dictionary of execution options passed to SQLAlchemy. **kwargs: Additional keyword arguments (currently unused but allows extension). Returns: A QueryResult containing the list of fetched records and potentially pagination info or total count, structured according to the selection. ''' pass def get_one(self, selection: Optional[QueryNodeType]=None, dto_filter: Optional[BooleanFilterDTO]=None, order_by: Optional[list[OrderByDTO]]=None, limit: Optional[int]=None, offset: Optional[int]=None, distinct_on: Optional[list[EnumDTO]]=None, allow_null: bool=False, query_hooks: Optional[defaultdict[QueryNodeType, list[QueryHook[DeclarativeBase]]]]=None, execution_options: Optional[dict[str, Any]]=None, **kwargs: Any) -> QueryResult[DeclarativeT]: '''Retrieves a single record based on filtering and ordering criteria. Fetches a single record matching the provided filters. If multiple records match, ordering, limit, and offset can be used to pinpoint one. Returns None if no record matches. Supports GraphQL selection sets and query hooks. Args: selection: An optional QueryNodeType representing the GraphQL selection set. dto_filter: An optional filter object derived from GraphQL input. order_by: An optional list of ordering criteria. limit: An optional integer limiting the number of potential matches considered (usually 1 for get_one). offset: An optional integer specifying the starting point. distinct_on: An optional list of fields for DISTINCT ON clause. allow_null: If True, allows certain operations even if parts of the filter path are null. query_hooks: Optional hooks to modify the query. execution_options: Optional dictionary of execution options. **kwargs: Additional keyword arguments passed to the query executor setup. Returns: A QueryResult containing the single fetched record or None, structured according to the selection. ''' pass def get_by_id(self, selection: Optional[QueryNodeType]=None, query_hooks: Optional[defaultdict[QueryNodeType, list[QueryHook[DeclarativeBase]]]]=None, execution_options: Optional[dict[str, Any]]=None, **kwargs: Any) -> QueryResult[DeclarativeT]: '''Retrieves a single record by its primary key(s). Fetches a single record matching the provided primary key values passed as keyword arguments. Returns None if no record matches. Supports GraphQL selection sets and query hooks. Args: selection: An optional QueryNodeType representing the GraphQL selection set. query_hooks: Optional hooks to modify the query. execution_options: Optional dictionary of execution options. **kwargs: Keyword arguments where keys are the primary key field names and values are the corresponding primary key values. Returns: A QueryResult containing the single fetched record or None, structured according to the selection. ''' pass def create(self, data: Input[DeclarativeT], selection: Optional[QueryNodeType]=None) -> QueryResult[DeclarativeT]: '''Creates one or more records with nested relationships and returns them. Takes processed input data, performs the creation using `_create_many`, and then fetches the newly created records using `_list_by_ids` based on the returned primary keys and the provided selection set. Args: data: The processed input data for creation. selection: An optional QueryNodeType representing the GraphQL selection set for the returned data. Returns: A QueryResult containing the newly created records, structured according to the selection. ''' pass def upsert(self, data: Input[DeclarativeT], selection: Optional[QueryNodeType]=None, update_fields: Optional[list[EnumDTO]]=None, conflict_fields: Optional[EnumDTO]=None, dto_filter: Optional[BooleanFilterDTO]=None) -> QueryResult[DeclarativeT]: pass def update_by_ids(self, data: Input[DeclarativeT], selection: Optional[QueryNodeType]=None) -> QueryResult[DeclarativeT]: '''Updates one or more records with nested relationships and returns them. Takes processed input data, performs the update using `_update_many`, and then fetches the updated records using `_list_by_ids` based on the returned primary keys and the provided selection set. Args: data: The processed input data for update. Must include primary keys. selection: An optional QueryNodeType representing the GraphQL selection set for the returned data. Returns: A QueryResult containing the updated records, structured according to the selection. ''' pass def update_by_filter(self, data: Input[DeclarativeT], dto_filter: BooleanFilterDTO, selection: Optional[QueryNodeType]=None) -> QueryResult[DeclarativeT]: pass def delete(self, selection: Optional[QueryNodeType]=None, dto_filter: Optional[BooleanFilterDTO]=None, execution_options: Optional[dict[str, Any]]=None) -> QueryResult[DeclarativeT]: pass
20
10
28
2
18
8
3
0.42
1
21
13
0
19
2
19
27
545
56
344
165
260
146
180
97
160
9
2
4
58
327,982
gazorby/strawchemy
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/strawberry/_field.py
strawchemy.strawberry._field.StrawchemyCreateMutationField
from typing import TYPE_CHECKING, Any, Literal, Optional, TypeVar, Union, cast, get_args, get_origin from strawberry.annotation import StrawberryAnnotation from strawchemy.constants import DATA_KEY, DISTINCT_ON_KEY, FILTER_KEY, LIMIT_KEY, NODES_KEY, OFFSET_KEY, ORDER_BY_KEY, UPSERT_CONFLICT_FIELDS, UPSERT_UPDATE_FIELDS from .repository import StrawchemyAsyncRepository from strawberry.types.arguments import StrawberryArgument from collections.abc import Sequence from strawchemy.strawberry.mutation.input import Input from typing_extensions import Self, TypeAlias, TypeIs, override from strawchemy.validation.base import InputValidationError class StrawchemyCreateMutationField(_StrawchemyInputMutationField, _StrawchemyMutationField): def _create_resolver(self, info: Info, data: Union[AnyMappedDTO, Sequence[AnyMappedDTO]]) -> Union[_CreateOrUpdateResolverResult, Coroutine[_CreateOrUpdateResolverResult, Any, Any]]: repository = self._get_repository(info) try: input_data = Input(data, self._validation) except InputValidationError as error: return error.graphql_type() if isinstance(repository, StrawchemyAsyncRepository): return self._input_result_async(repository.create(input_data), input_data) return self._input_result_sync(repository.create(input_data), input_data) @override def auto_arguments(self) -> list[StrawberryArgument]: if self.is_list: return [StrawberryArgument(DATA_KEY, None, type_annotation=StrawberryAnnotation(list[self._input_type]))] return [StrawberryArgument(DATA_KEY, None, type_annotation=StrawberryAnnotation(self._input_type))] @override def resolver(self, info: Info[Any, Any], *args: Any, **kwargs: Any) -> Union[_CreateOrUpdateResolverResult, Coroutine[_CreateOrUpdateResolverResult, Any, Any]]: return self._create_resolver(info, *args, **kwargs)
class StrawchemyCreateMutationField(_StrawchemyInputMutationField, _StrawchemyMutationField): def _create_resolver(self, info: Info, data: Union[AnyMappedDTO, Sequence[AnyMappedDTO]]) -> Union[_CreateOrUpdateResolverResult, Coroutine[_CreateOrUpdateResolverResult, Any, Any]]: pass @override def auto_arguments(self) -> list[StrawberryArgument]: pass @override def resolver(self, info: Info[Any, Any], *args: Any, **kwargs: Any) -> Union[_CreateOrUpdateResolverResult, Coroutine[_CreateOrUpdateResolverResult, Any, Any]]: pass
6
0
6
0
6
0
2
0
2
7
3
0
3
0
3
36
24
2
22
13
12
0
16
6
12
3
3
1
6
327,983
gazorby/strawchemy
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/strawberry/_field.py
strawchemy.strawberry._field.StrawchemyDeleteMutationField
from strawchemy.strawberry.dto import BooleanFilterDTO, EnumDTO, MappedStrawberryGraphQLDTO, OrderByDTO, StrawchemyDTOAttributes from .exceptions import StrawchemyFieldError from typing import TYPE_CHECKING, Any, Literal, Optional, TypeVar, Union, cast, get_args, get_origin from strawberry.types.base import StrawberryList, StrawberryOptional, StrawberryType, WithStrawberryObjectDefinition from .repository import StrawchemyAsyncRepository from strawberry.types.arguments import StrawberryArgument from strawchemy.constants import DATA_KEY, DISTINCT_ON_KEY, FILTER_KEY, LIMIT_KEY, NODES_KEY, OFFSET_KEY, ORDER_BY_KEY, UPSERT_CONFLICT_FIELDS, UPSERT_UPDATE_FIELDS from strawberry.annotation import StrawberryAnnotation from typing_extensions import Self, TypeAlias, TypeIs, override class StrawchemyDeleteMutationField(StrawchemyField, _StrawchemyMutationField): def __init__(self, input_type: Optional[type[BooleanFilterDTO]]=None, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) self.is_root_field = True self._input_type = input_type def _delete_resolver(self, info: Info, filter_input: Optional[BooleanFilterDTO]=None) -> Union[_CreateOrUpdateResolverResult, Coroutine[_CreateOrUpdateResolverResult, Any, Any]]: repository = self._get_repository(info) if isinstance(repository, StrawchemyAsyncRepository): return self._list_result_async(repository.delete(filter_input)) return self._list_result_sync(repository.delete(filter_input)) @override def _validate_type(self, type_: Union[Union[StrawberryType, type[WithStrawberryObjectDefinition]], Any]) -> None: if not _is_list(type_): msg = f'Type of delete mutation must be a list: {self.name}' raise StrawchemyFieldError(msg) @override def auto_arguments(self) -> list[StrawberryArgument]: if self._input_type: return [StrawberryArgument(python_name='filter_input', graphql_name=FILTER_KEY, default=None, type_annotation=StrawberryAnnotation(self._input_type))] return [] @override def resolver(self, info: Info[Any, Any], *args: Any, **kwargs: Any) -> Union[_CreateOrUpdateResolverResult, Coroutine[_CreateOrUpdateResolverResult, Any, Any]]: return self._delete_resolver(info, *args, **kwargs)
class StrawchemyDeleteMutationField(StrawchemyField, _StrawchemyMutationField): def __init__(self, input_type: Optional[type[BooleanFilterDTO]]=None, *args: Any, **kwargs: Any) -> None: pass def _delete_resolver(self, info: Info, filter_input: Optional[BooleanFilterDTO]=None) -> Union[_CreateOrUpdateResolverResult, Coroutine[_CreateOrUpdateResolverResult, Any, Any]]: pass @override def _validate_type(self, type_: Union[Union[StrawberryType, type[WithStrawberryObjectDefinition]], Any]) -> None: pass @override def auto_arguments(self) -> list[StrawberryArgument]: pass @override def resolver(self, info: Info[Any, Any], *args: Any, **kwargs: Any) -> Union[_CreateOrUpdateResolverResult, Coroutine[_CreateOrUpdateResolverResult, Any, Any]]: pass
9
0
8
0
7
0
2
0.02
2
8
3
0
5
2
5
37
46
4
41
24
21
1
20
10
14
2
2
1
8
327,984
gazorby/strawchemy
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/strawberry/_field.py
strawchemy.strawberry._field.StrawchemyField
from strawchemy.strawberry.dto import BooleanFilterDTO, EnumDTO, MappedStrawberryGraphQLDTO, OrderByDTO, StrawchemyDTOAttributes from strawchemy.dto.types import DTOConfig, Purpose from inspect import isclass import dataclasses from typing import TYPE_CHECKING, Any, Literal, Optional, TypeVar, Union, cast, get_args, get_origin from collections.abc import Sequence from strawchemy.constants import DATA_KEY, DISTINCT_ON_KEY, FILTER_KEY, LIMIT_KEY, NODES_KEY, OFFSET_KEY, ORDER_BY_KEY, UPSERT_CONFLICT_FIELDS, UPSERT_UPDATE_FIELDS from functools import cached_property from strawchemy.dto.base import MappedDTO from typing_extensions import Self, TypeAlias, TypeIs, override from strawchemy.utils import is_type_hint_optional from .exceptions import StrawchemyFieldError from strawberry.annotation import StrawberryAnnotation from ._utils import dto_model_from_type, strawberry_contained_types, strawberry_contained_user_type from .repository import StrawchemyAsyncRepository from strawberry.types.arguments import StrawberryArgument from strawberry.types import get_object_definition from strawberry.types.base import StrawberryList, StrawberryOptional, StrawberryType, WithStrawberryObjectDefinition from strawchemy.types import DefaultOffsetPagination from strawberry.types.field import UNRESOLVED, StrawberryField class StrawchemyField(StrawberryField): """A custom field class for Strawberry GraphQL that allows explicit handling of resolver arguments. This class extends the default Strawberry field functionality by allowing the specification of a list of arguments that the resolver function accepts, instead of pulling them from the function signature. This is useful for scenarios where you want to have fine-grained control over the resolver arguments or when integrating with other systems that require explicit argument definitions. Attributes: arguments: A list of StrawberryArgument instances representing the arguments that the resolver function accepts. """ @override def __init__(self, config: StrawchemyConfig, repository_type: AnyRepository, filter_type: Optional[type[BooleanFilterDTO]]=None, order_by: Optional[type[OrderByDTO]]=None, distinct_on: Optional[type[EnumDTO]]=None, pagination: Union[bool, DefaultOffsetPagination]=False, root_aggregations: bool=False, registry_namespace: Optional[dict[str, Any]]=None, filter_statement: Optional[FilterStatementCallable]=None, query_hook: Optional[Union[QueryHookCallable[Any], Sequence[QueryHookCallable[Any]]]]=None, execution_options: Optional[dict[str, Any]]=None, id_field_name: str='id', arguments: Optional[list[StrawberryArgument]]=None, python_name: Optional[str]=None, graphql_name: Optional[str]=None, type_annotation: Optional[StrawberryAnnotation]=None, origin: Optional[Union[Union[Union[type, Callable[..., Any]], staticmethod[Any, Any]], classmethod[Any, Any, Any]]]=None, is_subscription: bool=False, description: Optional[str]=None, base_resolver: Optional[StrawberryResolver[Any]]=None, permission_classes: list[type[BasePermission]]=(), default: object=dataclasses.MISSING, default_factory: Union[Callable[[], Any], object]=dataclasses.MISSING, metadata: Optional[Mapping[Any, Any]]=None, deprecation_reason: Optional[str]=None, directives: Sequence[object]=(), extensions: list[FieldExtension]=(), root_field: bool=False) -> None: self.type_annotation = type_annotation self.registry_namespace = registry_namespace self.is_root_field = root_field self.root_aggregations = root_aggregations self.distinct_on = distinct_on self.query_hook = query_hook self.pagination: Union[DefaultOffsetPagination, Literal[False]] = DefaultOffsetPagination() if pagination is True else pagination self.id_field_name = id_field_name self._filter = filter_type self._order_by = order_by self._description = description self._filter_statement = filter_statement self._execution_options = execution_options self._config = config self._repository_type = repository_type super().__init__(python_name, graphql_name, type_annotation, origin, is_subscription, description, base_resolver, permission_classes, default, default_factory, metadata, deprecation_reason, directives, extensions) self._arguments = arguments def _type_or_annotation(self) -> Union[Union[Union[StrawberryType, type[WithStrawberryObjectDefinition]], object], str]: type_ = self.type if type_ is UNRESOLVED and self.type_annotation: type_ = self.type_annotation.annotation return type_ @property def _strawchemy_type(self) -> type[StrawchemyTypeWithStrawberryObjectDefinition]: return cast('type[StrawchemyTypeWithStrawberryObjectDefinition]', self.type) def _get_repository(self, info: Info[Any, Any]) -> Union[StrawchemySyncRepository[Any], StrawchemyAsyncRepository[Any]]: return self._repository_type(self._strawchemy_type, session=self._config.session_getter(info), info=info, auto_snake_case=self._config.auto_snake_case, root_aggregations=self.root_aggregations, filter_statement=self.filter_statement(info), execution_options=self._execution_options, deterministic_ordering=self._config.deterministic_ordering) async def _list_result_async(self, repository_call: Awaitable[GraphQLResult[Any, Any]]) -> _ListResolverResult: return (await repository_call).graphql_list(root_aggregations=self.root_aggregations) def _list_result_sync(self, repository_call: GraphQLResult[Any, Any]) -> _ListResolverResult: return repository_call.graphql_list(root_aggregations=self.root_aggregations) async def _get_by_id_result_async(self, repository_call: Awaitable[GraphQLResult[Any, Any]]) -> _GetByIdResolverResult: result = await repository_call return result.graphql_type_or_none() if self.is_optional else result.graphql_type() def _get_by_id_result_sync(self, repository_call: GraphQLResult[Any, Any]) -> _GetByIdResolverResult: return repository_call.graphql_type_or_none() if self.is_optional else repository_call.graphql_type() def _get_by_id_resolver(self, info: Info, **kwargs: Any) -> Union[_GetByIdResolverResult, Coroutine[_GetByIdResolverResult, Any, Any]]: repository = self._get_repository(info) if isinstance(repository, StrawchemyAsyncRepository): return self._get_by_id_result_async(repository.get_by_id(**kwargs)) return self._get_by_id_result_sync(repository.get_by_id(**kwargs)) def _list_resolver(self, info: Info, filter_input: Optional[BooleanFilterDTO]=None, order_by: Optional[list[OrderByDTO]]=None, distinct_on: Optional[list[EnumDTO]]=None, limit: Optional[int]=None, offset: Optional[int]=None) -> Union[_ListResolverResult, Coroutine[_ListResolverResult, Any, Any]]: repository = self._get_repository(info) if isinstance(repository, StrawchemyAsyncRepository): return self._list_result_async(repository.list(filter_input, order_by, distinct_on, limit, offset)) return self._list_result_sync(repository.list(filter_input, order_by, distinct_on, limit, offset)) def _validate_type(self, type_: Union[Union[StrawberryType, type[WithStrawberryObjectDefinition]], Any]) -> None: for inner_type in strawberry_contained_types(type_): if self.root_aggregations and issubclass(inner_type, StrawchemyDTOAttributes) and (not inner_type.__strawchemy_is_root_aggregation_type__): msg = f'The `{self.name}` field is defined with `root_aggregations` enabled but the field type is not a root aggregation type.' raise StrawchemyFieldError(msg) @classmethod def _is_strawchemy_type(cls, type_: Any) -> TypeIs[Union[MappedStrawberryGraphQLDTO[Any], type[MappedStrawberryGraphQLDTO[Any]]]]: return isinstance(type_, MappedStrawberryGraphQLDTO) or (isclass(type_) and issubclass(type_, MappedStrawberryGraphQLDTO)) @cached_property def filter(self) -> Optional[type[BooleanFilterDTO]]: inner_type = strawberry_contained_user_type(self.type) if self._filter is None and self._is_strawchemy_type(inner_type): return inner_type.__strawchemy_filter__ return self._filter @cached_property def order_by(self) -> Optional[type[OrderByDTO]]: inner_type = strawberry_contained_user_type(self.type) if self._order_by is None and self._is_strawchemy_type(inner_type): return inner_type.__strawchemy_order_by__ return self._order_by def auto_arguments(self) -> list[StrawberryArgument]: arguments: list[StrawberryArgument] = [] inner_type = strawberry_contained_user_type(self.type) if self.is_list: if self.pagination: arguments.extend([StrawberryArgument(LIMIT_KEY, None, type_annotation=StrawberryAnnotation(Optional[int]), default=self.pagination.limit), StrawberryArgument(OFFSET_KEY, None, type_annotation=StrawberryAnnotation(int), default=self.pagination.offset)]) if self.filter: arguments.append(StrawberryArgument(python_name='filter_input', graphql_name=FILTER_KEY, type_annotation=StrawberryAnnotation(Optional[self.filter]), default=None)) if self.order_by: arguments.append(StrawberryArgument(ORDER_BY_KEY, None, type_annotation=StrawberryAnnotation(Optional[list[self.order_by]]), default=None)) if self.distinct_on: arguments.append(StrawberryArgument(DISTINCT_ON_KEY, None, type_annotation=StrawberryAnnotation(Optional[list[self.distinct_on]]), default=None)) elif issubclass(inner_type, MappedDTO): model = dto_model_from_type(inner_type) id_fields = list(self._config.inspector.id_field_definitions(model, DTOConfig(Purpose.READ))) if len(id_fields) == 1: field = id_fields[0][1] arguments.append(StrawberryArgument(self.id_field_name, None, type_annotation=StrawberryAnnotation(field.type_))) else: arguments.extend([StrawberryArgument(name, None, type_annotation=StrawberryAnnotation(field.type_)) for name, field in self._config.inspector.id_field_definitions(model, DTOConfig(Purpose.READ))]) return arguments def filter_statement(self, info: Info[Any, Any]) -> Optional[Select[tuple[DeclarativeBase]]]: return self._filter_statement(info) if self._filter_statement else None @cached_property def is_list(self) -> bool: return True if self.root_aggregations else _is_list(self._type_or_annotation()) @cached_property def is_optional(self) -> bool: type_ = self._type_or_annotation() return isinstance(type_, StrawberryOptional) or is_type_hint_optional(type_) @property @override def is_basic_field(self) -> bool: return not self.is_root_field @cached_property @override def is_async(self) -> bool: return issubclass(self._repository_type, StrawchemyAsyncRepository) @override def __copy__(self) -> Self: new_field = type(self)(python_name=self.python_name, graphql_name=self.graphql_name, type_annotation=self.type_annotation, origin=self.origin, is_subscription=self.is_subscription, description=self.description, base_resolver=self.base_resolver, permission_classes=self.permission_classes[:] if self.permission_classes is not None else [], default=self.default_value, default_factory=self.default_factory, metadata=self.metadata.copy() if self.metadata is not None else None, deprecation_reason=self.deprecation_reason, directives=self.directives[:] if self.directives is not None else [], extensions=self.extensions[:] if self.extensions is not None else [], filter_statement=self._filter_statement, query_hook=self.query_hook, id_field_name=self.id_field_name, repository_type=self._repository_type, root_aggregations=self.root_aggregations, filter_type=self._filter, order_by=self._order_by, distinct_on=self.distinct_on, pagination=self.pagination, registry_namespace=self.registry_namespace, execution_options=self._execution_options, config=self._config) new_field._arguments = self._arguments[:] if self._arguments is not None else None return new_field @property @override def type(self) -> Union[Union[StrawberryType, type[WithStrawberryObjectDefinition]], Literal[UNRESOLVED]]: return super().type @type.setter def type(self, type_: Any) -> None: current_annotation = self.type_annotation.annotation if self.type_annotation else UNRESOLVED if type_ is UNRESOLVED and current_annotation is not UNRESOLVED: return self.type_annotation = StrawberryAnnotation.from_annotation(type_, namespace=self.registry_namespace) @property @override def description(self) -> Optional[str]: if self._description is not None: return self._description definition = get_object_definition(strawberry_contained_user_type(self.type), strict=False) named_template = 'Fetch {object} from the {name} collection' if not definition or definition.is_input: return None if not self.is_list: description = named_template.format(object='object', name=definition.name) return description if self.base_resolver else f'{description} by id' if self.root_aggregations: nodes_field = next((field for field in definition.fields if field.python_name == NODES_KEY)) definition = get_object_definition(strawberry_contained_user_type(nodes_field.type), strict=True) return named_template.format(object='aggregation data', name=definition.name) return named_template.format(object='objects', name=definition.name) @description.setter def description(self, value: str) -> None: self._description = value @property @override def arguments(self) -> list[StrawberryArgument]: if self.base_resolver: return super().arguments if not self._arguments: self._arguments = self.auto_arguments() return self._arguments @arguments.setter def arguments(self, value: list[StrawberryArgument]) -> None: args_prop = super(StrawchemyField, self.__class__).arguments return args_prop.fset(self, value) @override def resolve_type(self, *, type_definition: Optional[StrawberryObjectDefinition]=None) -> Union[Union[StrawberryType, type[WithStrawberryObjectDefinition]], Any]: type_ = super().resolve_type(type_definition=type_definition) self._validate_type(type_) return type_ def resolver(self, info: Info[Any, Any], *args: Any, **kwargs: Any) -> Union[Union[Union[Union[Union[_ListResolverResult, Coroutine[_ListResolverResult, Any, Any]], _GetByIdResolverResult], Coroutine[_GetByIdResolverResult, Any, Any]], _CreateOrUpdateResolverResult], Coroutine[_CreateOrUpdateResolverResult, Any, Any]]: if self.is_list: return self._list_resolver(info, *args, **kwargs) return self._get_by_id_resolver(info, *args, **kwargs) @override def get_result(self, source: Any, info: Optional[Info[Any, Any]], args: list[Any], kwargs: dict[str, Any]) -> Union[Awaitable[Any], Any]: if self.is_root_field and self.base_resolver is None: assert info return self.resolver(info, *args, **kwargs) return super().get_result(source, info, args, kwargs)
class StrawchemyField(StrawberryField): '''A custom field class for Strawberry GraphQL that allows explicit handling of resolver arguments. This class extends the default Strawberry field functionality by allowing the specification of a list of arguments that the resolver function accepts, instead of pulling them from the function signature. This is useful for scenarios where you want to have fine-grained control over the resolver arguments or when integrating with other systems that require explicit argument definitions. Attributes: arguments: A list of StrawberryArgument instances representing the arguments that the resolver function accepts. ''' @override def __init__(self, config: StrawchemyConfig, repository_type: AnyRepository, filter_type: Optional[type[BooleanFilterDTO]]=None, order_by: Optional[type[OrderByDTO]]=None, distinct_on: Optional[type[EnumDTO]]=None, pagination: Union[bool, DefaultOffsetPagination]=False, root_aggregations: bool=False, registry_namespace: Optional[dict[str, Any]]=None, filter_statement: Optional[FilterStatementCallable]=None, query_hook: Optional[Union[QueryHookCallable[Any], Sequence[QueryHookCallable[Any]]]]=None, execution_options: Optional[dict[str, Any]]=None, id_field_name: str='id', arguments: Optional[list[StrawberryArgument]]=None, python_name: Optional[str]=None, graphql_name: Optional[str]=None, type_annotation: Optional[StrawberryAnnotation]=None, origin: Optional[Union[Union[Union[type, Callable[..., Any]], staticmethod[Any, Any]], classmethod[Any, Any, Any]]]=None, is_subscription: bool=False, description: Optional[str]=None, base_resolver: Optional[StrawberryResolver[Any]]=None, permission_classes: list[type[BasePermission]]=(), default: pass def _type_or_annotation(self) -> Union[Union[Union[StrawberryType, type[WithStrawberryObjectDefinition]], object], str]: pass @property def _strawchemy_type(self) -> type[StrawchemyTypeWithStrawberryObjectDefinition]: pass def _get_repository(self, info: Info[Any, Any]) -> Union[StrawchemySyncRepository[Any], StrawchemyAsyncRepository[Any]]: pass async def _list_result_async(self, repository_call: Awaitable[GraphQLResult[Any, Any]]) -> _ListResolverResult: pass def _list_result_sync(self, repository_call: GraphQLResult[Any, Any]) -> _ListResolverResult: pass async def _get_by_id_result_async(self, repository_call: Awaitable[GraphQLResult[Any, Any]]) -> _GetByIdResolverResult: pass def _get_by_id_result_sync(self, repository_call: GraphQLResult[Any, Any]) -> _GetByIdResolverResult: pass def _get_by_id_resolver(self, info: Info, **kwargs: Any) -> Union[_GetByIdResolverResult, Coroutine[_GetByIdResolverResult, Any, Any]]: pass def _list_resolver(self, info: Info, filter_input: Optional[BooleanFilterDTO]=None, order_by: Optional[list[OrderByDTO]]=None, distinct_on: Optional[list[EnumDTO]]=None, limit: Optional[int]=None, offset: Optional[int]=None) -> Union[_ListResolverResult, Coroutine[_ListResolverResult, Any, Any]]: pass def _validate_type(self, type_: Union[Union[StrawberryType, type[WithStrawberryObjectDefinition]], Any]) -> None: pass @classmethod def _is_strawchemy_type(cls, type_: Any) -> TypeIs[Union[MappedStrawberryGraphQLDTO[Any], type[MappedStrawberryGraphQLDTO[Any]]]]: pass @cached_property def filter(self) -> Optional[type[BooleanFilterDTO]]: pass @cached_property def order_by(self) -> Optional[type[OrderByDTO]]: pass def auto_arguments(self) -> list[StrawberryArgument]: pass def filter_statement(self, info: Info[Any, Any]) -> Optional[Select[tuple[DeclarativeBase]]]: pass @cached_property def is_list(self) -> bool: pass @cached_property def is_optional(self) -> bool: pass @property @override def is_basic_field(self) -> bool: pass @cached_property @override def is_async(self) -> bool: pass @override def __copy__(self) -> Self: pass @property @override def type(self) -> Union[Union[StrawberryType, type[WithStrawberryObjectDefinition]], Literal[UNRESOLVED]]: pass @type.setter def type(self) -> Union[Union[StrawberryType, type[WithStrawberryObjectDefinition]], Literal[UNRESOLVED]]: pass @property @override def description(self) -> Optional[str]: pass @description.setter def description(self) -> Optional[str]: pass @property @override def arguments(self) -> list[StrawberryArgument]: pass @arguments.setter def arguments(self) -> list[StrawberryArgument]: pass @override def resolve_type(self, *, type_definition: Optional[StrawberryObjectDefinition]=None) -> Union[Union[StrawberryType, type[WithStrawberryObjectDefinition]], Any]: pass def resolver(self, info: Info[Any, Any], *args: Any, **kwargs: Any) -> Union[Union[Union[Union[Union[_ListResolverResult, Coroutine[_ListResolverResult, Any, Any]], _GetByIdResolverResult], Coroutine[_GetByIdResolverResult, Any, Any]], _CreateOrUpdateResolverResult], Coroutine[_CreateOrUpdateResolverResult, Any, Any]]: pass @override def get_result(self, source: Any, info: Optional[Info[Any, Any]], args: list[Any], kwargs: dict[str, Any]) -> Union[Awaitable[Any], Any]: pass
54
1
11
0
10
0
2
0.07
1
30
13
2
29
24
30
30
384
36
336
160
217
23
144
69
113
8
1
2
65
327,985
gazorby/strawchemy
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/strawberry/_field.py
strawchemy.strawberry._field.StrawchemyUpdateMutationField
from typing_extensions import Self, TypeAlias, TypeIs, override from typing import TYPE_CHECKING, Any, Literal, Optional, TypeVar, Union, cast, get_args, get_origin from .exceptions import StrawchemyFieldError from strawchemy.strawberry.dto import BooleanFilterDTO, EnumDTO, MappedStrawberryGraphQLDTO, OrderByDTO, StrawchemyDTOAttributes from .repository import StrawchemyAsyncRepository from collections.abc import Sequence from strawberry.types.base import StrawberryList, StrawberryOptional, StrawberryType, WithStrawberryObjectDefinition from strawberry.annotation import StrawberryAnnotation from strawberry.types.arguments import StrawberryArgument from strawchemy.validation.base import InputValidationError from strawchemy.constants import DATA_KEY, DISTINCT_ON_KEY, FILTER_KEY, LIMIT_KEY, NODES_KEY, OFFSET_KEY, ORDER_BY_KEY, UPSERT_CONFLICT_FIELDS, UPSERT_UPDATE_FIELDS from strawchemy.strawberry.mutation.input import Input class StrawchemyUpdateMutationField(_StrawchemyInputMutationField, _StrawchemyMutationField): @override def _validate_type(self, type_: Union[Union[StrawberryType, type[WithStrawberryObjectDefinition]], Any]) -> None: if self._filter is not None and (not _is_list(type_)): msg = f'Type of update mutation by filter must be a list: {self.name}' raise StrawchemyFieldError(msg) def _update_by_ids_resolver(self, info: Info, data: Union[AnyMappedDTO, Sequence[AnyMappedDTO]], **_: Any) -> Union[_CreateOrUpdateResolverResult, Coroutine[_CreateOrUpdateResolverResult, Any, Any]]: repository = self._get_repository(info) try: input_data = Input(data, self._validation) except InputValidationError as error: error_result = error.graphql_type() return [error_result] if isinstance(data, Sequence) else error_result if isinstance(repository, StrawchemyAsyncRepository): return self._input_result_async(repository.update_by_id(input_data), input_data) return self._input_result_sync(repository.update_by_id(input_data), input_data) def _update_by_filter_resolver(self, info: Info, data: AnyMappedDTO, filter_input: BooleanFilterDTO) -> Union[_CreateOrUpdateResolverResult, Coroutine[_CreateOrUpdateResolverResult, Any, Any]]: repository = self._get_repository(info) try: input_data = Input(data, self._validation) except InputValidationError as error: return [error.graphql_type()] if isinstance(repository, StrawchemyAsyncRepository): return self._list_result_async(repository.update_by_filter(input_data, filter_input)) return self._list_result_sync(repository.update_by_filter(input_data, filter_input)) @override def auto_arguments(self) -> list[StrawberryArgument]: if self.filter: return [StrawberryArgument(DATA_KEY, None, type_annotation=StrawberryAnnotation(self._input_type)), StrawberryArgument(python_name='filter_input', graphql_name=FILTER_KEY, type_annotation=StrawberryAnnotation(Optional[self.filter]), default=None)] if self.is_list: return [StrawberryArgument(DATA_KEY, None, type_annotation=StrawberryAnnotation(list[self._input_type]))] return [StrawberryArgument(DATA_KEY, None, type_annotation=StrawberryAnnotation(self._input_type))] @override def resolver(self, info: Info[Any, Any], *args: Any, **kwargs: Any) -> Union[_CreateOrUpdateResolverResult, Coroutine[_CreateOrUpdateResolverResult, Any, Any]]: if self._filter is None: return self._update_by_ids_resolver(info, *args, **kwargs) return self._update_by_filter_resolver(info, *args, **kwargs)
class StrawchemyUpdateMutationField(_StrawchemyInputMutationField, _StrawchemyMutationField): @override def _validate_type(self, type_: Union[Union[StrawberryType, type[WithStrawberryObjectDefinition]], Any]) -> None: pass def _update_by_ids_resolver(self, info: Info, data: Union[AnyMappedDTO, Sequence[AnyMappedDTO]], **_: Any) -> Union[_CreateOrUpdateResolverResult, Coroutine[_CreateOrUpdateResolverResult, Any, Any]]: pass def _update_by_filter_resolver(self, info: Info, data: AnyMappedDTO, filter_input: BooleanFilterDTO) -> Union[_CreateOrUpdateResolverResult, Coroutine[_CreateOrUpdateResolverResult, Any, Any]]: pass @override def auto_arguments(self) -> list[StrawberryArgument]: pass @override def resolver(self, info: Info[Any, Any], *args: Any, **kwargs: Any) -> Union[_CreateOrUpdateResolverResult, Coroutine[_CreateOrUpdateResolverResult, Any, Any]]: pass
9
0
10
0
9
0
3
0
2
10
5
0
5
1
5
38
56
5
51
24
36
0
34
12
28
4
3
1
14
327,986
gazorby/strawchemy
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/strawberry/_field.py
strawchemy.strawberry._field.StrawchemyUpsertMutationField
from typing import TYPE_CHECKING, Any, Literal, Optional, TypeVar, Union, cast, get_args, get_origin from strawchemy.strawberry.mutation.input import Input from .repository import StrawchemyAsyncRepository from collections.abc import Sequence from strawberry.annotation import StrawberryAnnotation from strawchemy.strawberry.dto import BooleanFilterDTO, EnumDTO, MappedStrawberryGraphQLDTO, OrderByDTO, StrawchemyDTOAttributes from strawchemy.constants import DATA_KEY, DISTINCT_ON_KEY, FILTER_KEY, LIMIT_KEY, NODES_KEY, OFFSET_KEY, ORDER_BY_KEY, UPSERT_CONFLICT_FIELDS, UPSERT_UPDATE_FIELDS from strawchemy.validation.base import InputValidationError from typing_extensions import Self, TypeAlias, TypeIs, override from strawberry.types.arguments import StrawberryArgument class StrawchemyUpsertMutationField(_StrawchemyInputMutationField, _StrawchemyMutationField): def __init__(self, input_type: type[MappedGraphQLDTO[T]], update_fields_enum: type[EnumDTO], conflict_fields_enum: type[EnumDTO], *args: Any, **kwargs: Any) -> None: super().__init__(input_type, *args, **kwargs) self._update_fields_enum = update_fields_enum self._conflict_fields_enum = conflict_fields_enum def _upsert_resolver(self, info: Info, data: Union[AnyMappedDTO, Sequence[AnyMappedDTO]], filter_input: Optional[BooleanFilterDTO]=None, update_fields: Optional[list[EnumDTO]]=None, conflict_fields: Optional[EnumDTO]=None) -> Union[_CreateOrUpdateResolverResult, Coroutine[_CreateOrUpdateResolverResult, Any, Any]]: repository = self._get_repository(info) try: input_data = Input(data, self._validation) except InputValidationError as error: return error.graphql_type() if isinstance(repository, StrawchemyAsyncRepository): return self._input_result_async(repository.upsert(input_data, filter_input, update_fields, conflict_fields), input_data) return self._input_result_sync(repository.upsert(input_data, filter_input, update_fields, conflict_fields), input_data) @override def auto_arguments(self) -> list[StrawberryArgument]: arguments = [StrawberryArgument(UPSERT_UPDATE_FIELDS, None, type_annotation=StrawberryAnnotation(Optional[list[self._update_fields_enum]]), default=None), StrawberryArgument(UPSERT_CONFLICT_FIELDS, None, type_annotation=StrawberryAnnotation(Optional[self._conflict_fields_enum]), default=None)] if self.is_list: arguments.append(StrawberryArgument(DATA_KEY, None, type_annotation=StrawberryAnnotation(list[self._input_type]))) else: arguments.append(StrawberryArgument(DATA_KEY, None, type_annotation=StrawberryAnnotation(self._input_type))) return arguments @override def resolver(self, info: Info[Any, Any], *args: Any, **kwargs: Any) -> Union[_CreateOrUpdateResolverResult, Coroutine[_CreateOrUpdateResolverResult, Any, Any]]: return self._upsert_resolver(info, *args, **kwargs)
class StrawchemyUpsertMutationField(_StrawchemyInputMutationField, _StrawchemyMutationField): def __init__(self, input_type: type[MappedGraphQLDTO[T]], update_fields_enum: type[EnumDTO], conflict_fields_enum: type[EnumDTO], *args: Any, **kwargs: Any) -> None: pass def _upsert_resolver(self, info: Info, data: Union[AnyMappedDTO, Sequence[AnyMappedDTO]], filter_input: Optional[BooleanFilterDTO]=None, update_fields: Optional[list[EnumDTO]]=None, conflict_fields: Optional[EnumDTO]=None) -> Union[_CreateOrUpdateResolverResult, Coroutine[_CreateOrUpdateResolverResult, Any, Any]]: pass @override def auto_arguments(self) -> list[StrawberryArgument]: pass @override def resolver(self, info: Info[Any, Any], *args: Any, **kwargs: Any) -> Union[_CreateOrUpdateResolverResult, Coroutine[_CreateOrUpdateResolverResult, Any, Any]]: pass
7
0
14
0
14
0
2
0
2
11
5
0
4
2
4
37
63
3
60
29
37
0
22
10
17
3
3
1
7
327,987
gazorby/strawchemy
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/strawberry/_field.py
strawchemy.strawberry._field._StrawchemyInputMutationField
from typing import TYPE_CHECKING, Any, Literal, Optional, TypeVar, Union, cast, get_args, get_origin class _StrawchemyInputMutationField(StrawchemyField): def __init__(self, input_type: type[MappedGraphQLDTO[T]], *args: Any, validation: Optional[ValidationProtocol[T]]=None, **kwargs: Any) -> None: super().__init__(*args, **kwargs) self.is_root_field = True self._input_type = input_type self._validation = validation
class _StrawchemyInputMutationField(StrawchemyField): def __init__(self, input_type: type[MappedGraphQLDTO[T]], *args: Any, validation: Optional[ValidationProtocol[T]]=None, **kwargs: Any) -> None: pass
2
0
11
0
11
0
1
0
1
4
1
3
1
3
1
31
12
0
12
11
4
0
6
5
4
1
2
0
1
327,988
gazorby/strawchemy
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/strawberry/_field.py
strawchemy.strawberry._field._StrawchemyMutationField
from strawchemy.strawberry.mutation.input import Input from typing import TYPE_CHECKING, Any, Literal, Optional, TypeVar, Union, cast, get_args, get_origin class _StrawchemyMutationField: async def _input_result_async(self, repository_call: Awaitable[GraphQLResult[Any, Any]], input_data: Input[Any]) -> _ListResolverResult: result = await repository_call return result.graphql_list() if input_data.list_input else result.graphql_type() def _input_result_sync(self, repository_call: GraphQLResult[Any, Any], input_data: Input[Any]) -> _ListResolverResult: return repository_call.graphql_list() if input_data.list_input else repository_call.graphql_type()
class _StrawchemyMutationField: async def _input_result_async(self, repository_call: Awaitable[GraphQLResult[Any, Any]], input_data: Input[Any]) -> _ListResolverResult: pass def _input_result_sync(self, repository_call: GraphQLResult[Any, Any], input_data: Input[Any]) -> _ListResolverResult: pass
3
0
5
0
5
0
2
0
0
4
2
4
2
0
2
2
11
1
10
8
3
0
6
4
3
2
0
0
4
327,989
gazorby/strawchemy
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/strawberry/_instance.py
strawchemy.strawberry._instance.MapperModelInstance
from strawberry.types.private import StrawberryPrivate class MapperModelInstance(StrawberryPrivate): ...
class MapperModelInstance(StrawberryPrivate): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
1
0
1
1
1
0
2
1
1
0
1
0
0
327,990
gazorby/strawchemy
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/strawberry/_registry.py
strawchemy.strawberry._registry.RegistryTypeInfo
from typing import TYPE_CHECKING, Any, ForwardRef, Literal, NewType, Optional, TypeVar, Union, cast, get_args, get_origin, overload import dataclasses @dataclasses.dataclass(frozen=True, eq=True) class RegistryTypeInfo: name: str graphql_type: GraphQLType default_name: Optional[str] = None user_defined: bool = False override: bool = False pagination: Union[DefaultOffsetPagination, Literal[False]] = False order_by: bool = False scope: DTOScope | None = None model: type[DeclarativeBase] | None = None tags: frozenset[str] = dataclasses.field(default_factory=frozenset) exclude_from_scope: bool = False @property def scoped_id(self) -> Hashable: return (self.model, self.graphql_type, self.tags)
@dataclasses.dataclass(frozen=True, eq=True) class RegistryTypeInfo: @property def scoped_id(self) -> Hashable: pass
4
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
7
0
7
5
6
0
7
5
6
0
0
0
0
327,991
gazorby/strawchemy
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/strawberry/_registry.py
strawchemy.strawberry._registry.StrawberryRegistry
import dataclasses from strawberry.types import get_object_definition, has_object_definition import strawberry from typing import TYPE_CHECKING, Any, ForwardRef, Literal, NewType, Optional, TypeVar, Union, cast, get_args, get_origin, overload from ._utils import strawberry_contained_types from strawberry.types.field import StrawberryField from collections import defaultdict class StrawberryRegistry: def __init__(self, strawberry_config: StrawberryConfig) -> None: self.strawberry_config = strawberry_config self._namespaces: defaultdict[GraphQLType, dict[str, type[StrawchemyTypeWithStrawberryObjectDefinition]]] = defaultdict(dict) self._forward_type_refs: defaultdict[GraphQLType, defaultdict[str, list[_TypeReference]]] = defaultdict(lambda: defaultdict(list)) self._type_refs: defaultdict[Hashable, list[_TypeReference]] = defaultdict(list) self._scoped_types: dict[Hashable, type[StrawchemyTypeWithStrawberryObjectDefinition]] = {} self._type_map: dict[RegistryTypeInfo, type[Any]] = {} self._names_map: defaultdict[GraphQLType, dict[str, RegistryTypeInfo]] = defaultdict(dict) self._tracked_type_names: defaultdict[GraphQLType, set[str]] = defaultdict(set) def _get_field_type_name(self, field: Union[StrawberryField, StrawberryArgument], inner_type: Any, graphql_type: GraphQLType) -> Optional[str]: """Get the type name of a field. This will handle forward references and get the object definition if available. Args: field: The field or argument to get the type name from. inner_type: The inner type of the field. graphql_type: The graphql type of the field. Returns: The type name of the field, or None if it cannot be resolved. """ if field.type_annotation: for type_ in self._inner_types(field.type_annotation.raw_annotation): if isinstance(type_, (str, ForwardRef)): field.type_annotation.namespace = self.namespace(graphql_type) return type_.__forward_arg__ if isinstance(type_, ForwardRef) else type_ if (field_type_def := get_object_definition(inner_type)): return field_type_def.name return None def _update_references(self, field: Union[StrawberryField, StrawberryArgument], graphql_type: GraphQLType) -> None: """Update the references of a field. This will resolve forward references and update the type of the field if necessary. Args: field: The field or argument to update the references of. graphql_type: The graphql type of the field. """ for inner_type in strawberry_contained_types(field.type): field_type_name = self._get_field_type_name(field, inner_type, graphql_type) if not field_type_name: continue type_ref = _TypeReference(field) type_info = self.get(graphql_type, field_type_name, None) if type_info and (not type_info.exclude_from_scope): self._type_refs[type_info.scoped_id].append(type_ref) if (scoped_type := self._scoped_types.get(type_info.scoped_id)): type_ref.update_type(scoped_type) if type_info is None or not type_info.override: self._forward_type_refs[graphql_type][field_type_name].append(type_ref) else: type_ref.update_type(self._type_map[type_info]) if get_object_definition(inner_type): self._track_references(inner_type, graphql_type) def _track_references(self, strawberry_type: type[Union[WithStrawberryObjectDefinition, StrawberryTypeFromPydantic[PydanticModel]]], graphql_type: GraphQLType, force: bool=False) -> None: """Track the references of a strawberry type. This will recursively track the references of all fields and arguments of the given type. Args: strawberry_type: The type to track the references of. graphql_type: The graphql type of the type. force: Whether to force tracking the references even if the type has already been tracked. """ object_definition = get_object_definition(strawberry_type, strict=True) schema_name = self.strawberry_config.name_converter.get_name_from_type(strawberry_type) if not force and schema_name in self._tracked_type_names[graphql_type]: return self._tracked_type_names[graphql_type].add(schema_name) for field in object_definition.fields: for argument in field.arguments: if any((get_object_definition(inner_type) is not None for inner_type in strawberry_contained_types(argument.type))): self._update_references(argument, 'input') self._update_references(field, graphql_type) def _register_type(self, type_info: RegistryTypeInfo, strawberry_type: type[Any]) -> None: """Register a type in the registry. This will add the type to the namespace, update forward references, and track the references of the type. Args: type_info: The type info of the type to register. strawberry_type: The type to register. """ self.namespace(type_info.graphql_type)[type_info.name] = strawberry_type if type_info.override or type_info.scope == 'global': for reference in self._forward_type_refs[type_info.graphql_type][type_info.name]: reference.update_type(strawberry_type) self._track_references(strawberry_type, type_info.graphql_type, force=type_info.override) if type_info.scope == 'global' and type_info.model: if type_info.default_name: self._namespaces[type_info.graphql_type][type_info.default_name] = strawberry_type for reference in self._type_refs[type_info.scoped_id]: reference.update_type(strawberry_type) self._scoped_types[type_info.scoped_id] = strawberry_type self._names_map[type_info.graphql_type][type_info.name] = type_info self._type_map[type_info] = strawberry_type @classmethod def _inner_types(cls, typ: Any) -> tuple[Any, ...]: """Get innermost types in typ. List[Optional[str], Union[Mapping[int, float]]] -> (str, int, float) Args: typ: A type annotation Returns: All inner types found after walked in all outer types """ origin = get_origin(typ) if not origin or not hasattr(typ, '__args__'): return (typ,) arg_types = [] for arg_type in get_args(typ): arg_types.extend(cls._inner_types(arg_type)) return tuple(arg_types) def _get(self, type_info: RegistryTypeInfo) -> Optional[type[Any]]: """Get a type from the registry. This will return the type if it exists and is an override, or if it is not an override and a non-override type with the same info exists. Args: type_info: The type info of the type to get. Returns: The type if it exists, otherwise None. """ if (existing := self.get(type_info.graphql_type, type_info.name, None)) and existing.override: return self._type_map[existing] if not type_info.override and (existing := self._type_map.get(type_info)): return existing return None def _check_conflicts(self, type_info: RegistryTypeInfo) -> None: """Check for conflicts in the registry. This will raise a ValueError if a conflict is found. Args: type_info: The type info to check for conflicts with. """ if self.non_override_exists(type_info) or self.namespace('enum').get(type_info.name) or self.name_clash(type_info): msg = f'Type {type_info.name} is already registered' raise ValueError(msg) def __contains__(self, type_info: RegistryTypeInfo) -> bool: return type_info in self._type_map def name_clash(self, type_info: RegistryTypeInfo) -> bool: return type_info not in self and (existing := self.get(type_info.graphql_type, type_info.name, None)) is not None and (not existing.override) and (not type_info.override) @overload def get(self, graphql_type: GraphQLType, name: str, default: _RegistryMissing) -> RegistryTypeInfo: ... @overload def get(self, graphql_type: GraphQLType, name: str) -> RegistryTypeInfo: ... @overload def get(self, graphql_type: GraphQLType, name: str, default: T) -> Union[RegistryTypeInfo, T]: ... def get(self, graphql_type: GraphQLType, name: str, default: T=_RegistryMissing) -> Union[RegistryTypeInfo, T]: if default is _RegistryMissing: return self._names_map[graphql_type][name] return self._names_map[graphql_type].get(name, default) def non_override_exists(self, type_info: RegistryTypeInfo) -> bool: return dataclasses.replace(type_info, user_defined=True, override=False) in self or (dataclasses.replace(type_info, user_defined=False, override=False) in self and (not type_info.override) and type_info.user_defined) def namespace(self, graphql_type: GraphQLType) -> dict[str, type[Any]]: return self._namespaces[graphql_type] def register_type(self, type_: type[Any], type_info: RegistryTypeInfo, description: Optional[str]=None, directives: Optional[Sequence[object]]=()) -> type[Any]: self._check_conflicts(type_info) if has_object_definition(type_): return type_ if (existing := self._get(type_info)): return existing strawberry_type = strawberry.type(type_, name=type_info.name, is_input=type_info.graphql_type == 'input', is_interface=type_info.graphql_type == 'interface', description=description, directives=directives) self._register_type(type_info, strawberry_type) return strawberry_type def register_enum(self, enum_type: type[EnumT], name: Optional[str]=None, description: Optional[str]=None, directives: Iterable[object]=()) -> type[EnumT]: type_name = name or f'{enum_type.__name__}Enum' if (existing := self.namespace('enum').get(type_name)): return cast('type[EnumT]', existing) strawberry_enum_type = strawberry.enum(cls=enum_type, name=name, description=description, directives=directives) self.namespace('enum')[type_name] = strawberry_enum_type return strawberry_enum_type
class StrawberryRegistry: def __init__(self, strawberry_config: StrawberryConfig) -> None: pass def _get_field_type_name(self, field: Union[StrawberryField, StrawberryArgument], inner_type: Any, graphql_type: GraphQLType) -> Optional[str]: '''Get the type name of a field. This will handle forward references and get the object definition if available. Args: field: The field or argument to get the type name from. inner_type: The inner type of the field. graphql_type: The graphql type of the field. Returns: The type name of the field, or None if it cannot be resolved. ''' pass def _update_references(self, field: Union[StrawberryField, StrawberryArgument], graphql_type: GraphQLType) -> None: '''Update the references of a field. This will resolve forward references and update the type of the field if necessary. Args: field: The field or argument to update the references of. graphql_type: The graphql type of the field. ''' pass def _track_references(self, strawberry_type: type[Union[WithStrawberryObjectDefinition, StrawberryTypeFromPydantic[PydanticModel]]], graphql_type: GraphQLType, force: bool=False) -> None: '''Track the references of a strawberry type. This will recursively track the references of all fields and arguments of the given type. Args: strawberry_type: The type to track the references of. graphql_type: The graphql type of the type. force: Whether to force tracking the references even if the type has already been tracked. ''' pass def _register_type(self, type_info: RegistryTypeInfo, strawberry_type: type[Any]) -> None: '''Register a type in the registry. This will add the type to the namespace, update forward references, and track the references of the type. Args: type_info: The type info of the type to register. strawberry_type: The type to register. ''' pass @classmethod def _inner_types(cls, typ: Any) -> tuple[Any, ...]: '''Get innermost types in typ. List[Optional[str], Union[Mapping[int, float]]] -> (str, int, float) Args: typ: A type annotation Returns: All inner types found after walked in all outer types ''' pass def _get_field_type_name(self, field: Union[StrawberryField, StrawberryArgument], inner_type: Any, graphql_type: GraphQLType) -> Optional[str]: '''Get a type from the registry. This will return the type if it exists and is an override, or if it is not an override and a non-override type with the same info exists. Args: type_info: The type info of the type to get. Returns: The type if it exists, otherwise None. ''' pass def _check_conflicts(self, type_info: RegistryTypeInfo) -> None: '''Check for conflicts in the registry. This will raise a ValueError if a conflict is found. Args: type_info: The type info to check for conflicts with. ''' pass def __contains__(self, type_info: RegistryTypeInfo) -> bool: pass def name_clash(self, type_info: RegistryTypeInfo) -> bool: pass @overload def get(self, graphql_type: GraphQLType, name: str, default: _RegistryMissing) -> RegistryTypeInfo: pass @overload def get(self, graphql_type: GraphQLType, name: str, default: _RegistryMissing) -> RegistryTypeInfo: pass @overload def get(self, graphql_type: GraphQLType, name: str, default: _RegistryMissing) -> RegistryTypeInfo: pass def get(self, graphql_type: GraphQLType, name: str, default: _RegistryMissing) -> RegistryTypeInfo: pass def non_override_exists(self, type_info: RegistryTypeInfo) -> bool: pass def namespace(self, graphql_type: GraphQLType) -> dict[str, type[Any]]: pass def register_type(self, type_: type[Any], type_info: RegistryTypeInfo, description: Optional[str]=None, directives: Optional[Sequence[object]]=()) -> type[Any]: pass def register_enum(self, enum_type: type[EnumT], name: Optional[str]=None, description: Optional[str]=None, directives: Iterable[object]=()) -> type[EnumT]: pass
23
7
9
0
8
1
2
0.06
0
14
2
0
16
5
17
17
173
20
144
64
108
9
96
38
78
10
0
4
41
327,992
gazorby/strawchemy
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/strawberry/_registry.py
strawchemy.strawberry._registry._TypeReference
import dataclasses from strawberry.types.field import StrawberryField from strawberry.types.base import StrawberryContainer from typing import TYPE_CHECKING, Any, ForwardRef, Literal, NewType, Optional, TypeVar, Union, cast, get_args, get_origin, overload from copy import copy from strawberry.annotation import StrawberryAnnotation @dataclasses.dataclass class _TypeReference: ref_holder: Union[StrawberryField, StrawberryArgument] @classmethod def _replace_contained_type(cls, container: StrawberryContainer, strawberry_type: type[WithStrawberryObjectDefinition]) -> StrawberryContainer: """Recursively replace the contained type in a StrawberryContainer. Args: container: The container to replace the type in. strawberry_type: The type to replace with. Returns: A new container with the type replaced. """ container_copy = copy(container) if isinstance(container.of_type, StrawberryContainer): replaced = cls._replace_contained_type(container.of_type, strawberry_type) else: replaced = strawberry_type container_copy.of_type = replaced return container_copy def _set_type(self, strawberry_type: Union[type[WithStrawberryObjectDefinition], StrawberryContainer]) -> None: """Set the type of the referenced field or argument. Args: strawberry_type: The type to set. """ if isinstance(self.ref_holder, StrawberryField): self.ref_holder.type = strawberry_type self.ref_holder.type_annotation = StrawberryAnnotation(strawberry_type, namespace=self.ref_holder.type_annotation.namespace if self.ref_holder.type_annotation else None) def update_type(self, strawberry_type: type[WithStrawberryObjectDefinition]) -> None: """Update the type of the referenced field or argument. If the referenced type is a container, it will recursively replace the contained type. Args: strawberry_type: The type to update to. """ if isinstance(self.ref_holder.type, StrawberryContainer): self._set_type(self._replace_contained_type(self.ref_holder.type, strawberry_type)) else: self._set_type(strawberry_type)
@dataclasses.dataclass class _TypeReference: @classmethod def _replace_contained_type(cls, container: StrawberryContainer, strawberry_type: type[WithStrawberryObjectDefinition]) -> StrawberryContainer: '''Recursively replace the contained type in a StrawberryContainer. Args: container: The container to replace the type in. strawberry_type: The type to replace with. Returns: A new container with the type replaced. ''' pass def _set_type(self, strawberry_type: Union[type[WithStrawberryObjectDefinition], StrawberryContainer]) -> None: '''Set the type of the referenced field or argument. Args: strawberry_type: The type to set. ''' pass def update_type(self, strawberry_type: type[WithStrawberryObjectDefinition]) -> None: '''Update the type of the referenced field or argument. If the referenced type is a container, it will recursively replace the contained type. Args: strawberry_type: The type to update to. ''' pass
6
3
7
0
7
0
2
0
0
1
0
0
2
0
3
3
28
3
25
9
18
0
17
6
13
3
0
1
7
327,993
gazorby/strawchemy
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/strawberry/dto.py
strawchemy.strawberry.dto.AggregateDTO
from sqlalchemy.orm import DeclarativeBase, QueryableAttribute class AggregateDTO(UnmappedStrawberryGraphQLDTO[DeclarativeBase]): ...
class AggregateDTO(UnmappedStrawberryGraphQLDTO[DeclarativeBase]): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
2
1
0
1
1
1
0
2
1
1
0
4
0
0
327,994
gazorby/strawchemy
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/strawberry/dto.py
strawchemy.strawberry.dto.AggregateFieldDefinition
from dataclasses import dataclass @dataclass(eq=False, repr=False) class AggregateFieldDefinition(GraphQLFieldDefinition): is_relation: bool = True is_aggregate: bool = True
@dataclass(eq=False, repr=False) class AggregateFieldDefinition(GraphQLFieldDefinition): pass
2
0
0
0
0
0
0
0
1
0
0
0
0
0
0
26
3
0
3
3
2
0
3
3
2
0
3
0
0
327,995
gazorby/strawchemy
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/strawberry/dto.py
strawchemy.strawberry.dto.AggregateFilterDTO
from .typing import GraphQLPurpose, OrderByDTOT, QueryNodeType class AggregateFilterDTO(GraphQLFilterDTO): def flatten(self, aggregation_node: QueryNodeType) -> list[AggregationFilter]: aggregations = [] for name in self.dto_set_fields: function_filter: AggregationFunctionFilterDTO = getattr(self, name) function_filter.predicate.field_node = aggregation_node aggregation_function = function_filter.__dto_function_info__ function_node = aggregation_node.insert_child(FunctionFieldDefinition(dto_config=self.__dto_config__, model=aggregation_node.value.model, model_field_name=aggregation_function.field_name, type_hint=function_filter.__class__, _function=aggregation_function, _model_field=aggregation_node.value.model_field)) for arg in function_filter.arguments: function_node.insert_child(FunctionArgFieldDefinition.from_field(arg.__field_definitions__[arg.value], function=aggregation_function)) aggregations.append(AggregationFilter(function_info=aggregation_function, field_node=function_node, predicate=function_filter.predicate, distinct=function_filter.distinct)) return aggregations
class AggregateFilterDTO(GraphQLFilterDTO): def flatten(self, aggregation_node: QueryNodeType) -> list[AggregationFilter]: pass
2
0
31
0
31
0
3
0
1
5
4
0
1
1
1
4
32
0
32
9
30
0
12
8
10
3
5
2
3
327,996
gazorby/strawchemy
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/strawberry/dto.py
strawchemy.strawberry.dto.AggregationFilter
from dataclasses import dataclass from typing import TYPE_CHECKING, Any, ClassVar, Generic, Literal, Optional, TypeVar, Union, overload from .typing import GraphQLPurpose, OrderByDTOT, QueryNodeType @dataclass class AggregationFilter: function_info: FilterFunctionInfo predicate: EqualityComparison[Any] field_node: QueryNodeType distinct: Optional[bool] = None
@dataclass class AggregationFilter: pass
2
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
5
0
5
2
4
0
5
2
4
0
0
0
0
327,997
gazorby/strawchemy
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/strawberry/dto.py
strawchemy.strawberry.dto.AggregationFunctionFilterDTO
from sqlalchemy.orm import DeclarativeBase, QueryableAttribute from typing import TYPE_CHECKING, Any, ClassVar, Generic, Literal, Optional, TypeVar, Union, overload class AggregationFunctionFilterDTO(UnmappedStrawberryGraphQLDTO[DeclarativeBase]): __dto_function_info__: ClassVar[FilterFunctionInfo] arguments: list[_ArgumentValue] predicate: EqualityComparison[Any] distinct: Optional[bool] = None
class AggregationFunctionFilterDTO(UnmappedStrawberryGraphQLDTO[DeclarativeBase]): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
2
6
1
5
2
4
0
5
2
4
0
4
0
0
327,998
gazorby/strawchemy
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/strawberry/dto.py
strawchemy.strawberry.dto.BooleanFilterDTO
import strawberry from typing import TYPE_CHECKING, Any, ClassVar, Generic, Literal, Optional, TypeVar, Union, overload from .typing import GraphQLPurpose, OrderByDTOT, QueryNodeType from msgspec import Struct, field, json from typing_extensions import Self, override class BooleanFilterDTO(GraphQLFilterDTO): and_: list[Self] = strawberry.field(default_factory=list, name='_and') or_: list[Self] = strawberry.field(default_factory=list, name='_or') not_: Optional[Self] = strawberry.field(default=strawberry.UNSET, name='_not') def filters_tree(self, _node: Optional[QueryNodeType]=None) -> tuple[QueryNodeType, Filter]: node = _node or QueryNode.root_node(self.__dto_model__) key = DTOKey.from_query_node(node) query = Filter(and_=[and_val.filters_tree(node)[1] for and_val in self.and_], or_=[or_val.filters_tree(node)[1] for or_val in self.or_], not_=self.not_.filters_tree(node)[1] if self.not_ else None) for name in self.dto_set_fields: value: Union[Union[EqualityComparison[Any], BooleanFilterDTO], AggregateFilterDTO] = getattr(self, name) field = self.__strawchemy_field_map__[key + name] if isinstance(value, BooleanFilterDTO): child, _ = node.upsert_child(field, match_on='value_equality') _, sub_query = value.filters_tree(child) if sub_query: query.and_.append(sub_query) elif isinstance(value, AggregateFilterDTO): child = node.insert_child(field) query.and_.extend(value.flatten(child)) else: value.field_node = node.insert_child(field) query.and_.append(value) return (node, query)
class BooleanFilterDTO(GraphQLFilterDTO): def filters_tree(self, _node: Optional[QueryNodeType]=None) -> tuple[QueryNodeType, Filter]: pass
2
0
23
0
23
0
6
0
1
7
5
0
1
0
1
4
28
1
27
13
25
0
21
13
19
6
5
3
6
327,999
gazorby/strawchemy
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/unseen_data/git_repos_for_analysis/gazorby_strawchemy/src/strawchemy/strawberry/dto.py
strawchemy.strawberry.dto.DTOKey
from .typing import GraphQLPurpose, OrderByDTOT, QueryNodeType from typing import TYPE_CHECKING, Any, ClassVar, Generic, Literal, Optional, TypeVar, Union, overload from typing_extensions import Self, override from strawchemy.graph import AnyNode, GraphMetadata, MatchOn, Node, NodeMetadata, NodeT class DTOKey(_Key[type[Any]]): @override def to_str(self, obj: type[Any]) -> str: return obj.__name__ @classmethod def from_dto_node(cls, node: Node[Any, Any]) -> Self: return cls([node.value.model]) @classmethod def from_query_node(cls, node: QueryNodeType) -> Self: if node.is_root: return cls([node.value.model]) if node.value.related_model: return cls([node.value.related_model]) return cls([node.value.model])
class DTOKey(_Key[type[Any]]): @override def to_str(self, obj: type[Any]) -> str: pass @classmethod def from_dto_node(cls, node: Node[Any, Any]) -> Self: pass @classmethod def from_query_node(cls, node: QueryNodeType) -> Self: pass
7
0
3
0
3
0
2
0
1
4
1
0
1
0
3
13
16
2
14
7
7
0
11
4
7
3
2
1
5