language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
falconry__falcon
tests/test_utils.py
{ "start": 1769, "end": 2255 }
class ____(media.MessagePackHandler): def __init__(self): super().__init__() self.deserialize_count = 0 def deserialize(self, *args, **kwargs): result = super().deserialize(*args, **kwargs) self.deserialize_count += 1 return result async def deserialize_async(self, *args, **kwargs): result = await super().deserialize_async(*args, **kwargs) self.deserialize_count += 1 return result
TrackingMessagePackHandler
python
huggingface__transformers
src/transformers/models/deepseek_vl/modular_deepseek_vl.py
{ "start": 5260, "end": 5854 }
class ____(JanusModel): def __init__(self, config): super().__init__(config) self.config = config self.vision_model = AutoModel.from_config(config.vision_config) self.aligner = DeepseekVLAligner(config) self.language_model = AutoModel.from_config(config=config.text_config) self.gradient_checkpointing = False # Initialize weights and apply final processing. self.post_init() del self.vqmodel del self.generation_embeddings del self.generation_aligner del self.generation_head
DeepseekVLModel
python
google__flatbuffers
python/flatbuffers/builder.py
{ "start": 1531, "end": 1695 }
class ____(RuntimeError): """Error caused by using a Builder to write a Struct at a location that is not the current Offset. """ pass
StructIsNotInlineError
python
pytorch__pytorch
torch/_numpy/_dtypes.py
{ "start": 251, "end": 800 }
class ____: name = "generic" def __new__(cls, value): # NumPy scalars are modelled as 0-D arrays # so a call to np.float32(4) produces a 0-D array. from ._ndarray import asarray, ndarray if isinstance(value, str) and value in ["inf", "nan"]: value = {"inf": torch.inf, "nan": torch.nan}[value] if isinstance(value, ndarray): return value.astype(cls) else: return asarray(value, dtype=cls) ################## # abstract types # ##################
generic
python
python-pillow__Pillow
src/PIL/IptcImagePlugin.py
{ "start": 783, "end": 6444 }
class ____(ImageFile.ImageFile): format = "IPTC" format_description = "IPTC/NAA" def getint(self, key: tuple[int, int]) -> int: return _i(self.info[key]) def field(self) -> tuple[tuple[int, int] | None, int]: # # get a IPTC field header s = self.fp.read(5) if not s.strip(b"\x00"): return None, 0 tag = s[1], s[2] # syntax if s[0] != 0x1C or tag[0] not in [1, 2, 3, 4, 5, 6, 7, 8, 9, 240]: msg = "invalid IPTC/NAA file" raise SyntaxError(msg) # field size size = s[3] if size > 132: msg = "illegal field length in IPTC/NAA file" raise OSError(msg) elif size == 128: size = 0 elif size > 128: size = _i(self.fp.read(size - 128)) else: size = i16(s, 3) return tag, size def _open(self) -> None: # load descriptive fields while True: offset = self.fp.tell() tag, size = self.field() if not tag or tag == (8, 10): break if size: tagdata = self.fp.read(size) else: tagdata = None if tag in self.info: if isinstance(self.info[tag], list): self.info[tag].append(tagdata) else: self.info[tag] = [self.info[tag], tagdata] else: self.info[tag] = tagdata # mode layers = self.info[(3, 60)][0] component = self.info[(3, 60)][1] if layers == 1 and not component: self._mode = "L" band = None else: if layers == 3 and component: self._mode = "RGB" elif layers == 4 and component: self._mode = "CMYK" if (3, 65) in self.info: band = self.info[(3, 65)][0] - 1 else: band = 0 # size self._size = self.getint((3, 20)), self.getint((3, 30)) # compression try: compression = COMPRESSION[self.getint((3, 120))] except KeyError as e: msg = "Unknown IPTC image compression" raise OSError(msg) from e # tile if tag == (8, 10): self.tile = [ ImageFile._Tile("iptc", (0, 0) + self.size, offset, (compression, band)) ] def load(self) -> Image.core.PixelAccess | None: if self.tile: args = self.tile[0].args assert isinstance(args, tuple) compression, band = args self.fp.seek(self.tile[0].offset) # Copy image data to temporary file o = BytesIO() if compression == "raw": # To simplify access to the extracted file, # prepend a PPM header o.write(b"P5\n%d %d\n255\n" % self.size) while True: type, size = self.field() if type != (8, 10): break while size > 0: s = self.fp.read(min(size, 8192)) if not s: break o.write(s) size -= len(s) with Image.open(o) as _im: if band is not None: bands = [Image.new("L", _im.size)] * Image.getmodebands(self.mode) bands[band] = _im _im = Image.merge(self.mode, bands) else: _im.load() self.im = _im.im self.tile = [] return ImageFile.ImageFile.load(self) Image.register_open(IptcImageFile.format, IptcImageFile) Image.register_extension(IptcImageFile.format, ".iim") def getiptcinfo( im: ImageFile.ImageFile, ) -> dict[tuple[int, int], bytes | list[bytes]] | None: """ Get IPTC information from TIFF, JPEG, or IPTC file. :param im: An image containing IPTC data. :returns: A dictionary containing IPTC information, or None if no IPTC information block was found. """ from . import JpegImagePlugin, TiffImagePlugin data = None info: dict[tuple[int, int], bytes | list[bytes]] = {} if isinstance(im, IptcImageFile): # return info dictionary right away for k, v in im.info.items(): if isinstance(k, tuple): info[k] = v return info elif isinstance(im, JpegImagePlugin.JpegImageFile): # extract the IPTC/NAA resource photoshop = im.info.get("photoshop") if photoshop: data = photoshop.get(0x0404) elif isinstance(im, TiffImagePlugin.TiffImageFile): # get raw data from the IPTC/NAA tag (PhotoShop tags the data # as 4-byte integers, so we cannot use the get method...) try: data = im.tag_v2._tagdata[TiffImagePlugin.IPTC_NAA_CHUNK] except KeyError: pass if data is None: return None # no properties # create an IptcImagePlugin object without initializing it class FakeImage: pass fake_im = FakeImage() fake_im.__class__ = IptcImageFile # type: ignore[assignment] iptc_im = cast(IptcImageFile, fake_im) # parse the IPTC information chunk iptc_im.info = {} iptc_im.fp = BytesIO(data) try: iptc_im._open() except (IndexError, KeyError): pass # expected failure for k, v in iptc_im.info.items(): if isinstance(k, tuple): info[k] = v return info
IptcImageFile
python
numba__numba
numba/tests/test_ndarray_subclasses.py
{ "start": 1073, "end": 2355 }
class ____(np.ndarray): # Tell Numba to not seamlessly treat this type as a regular ndarray. __numba_array_subtype_dispatch__ = True # __array__ is not needed given that this is a ndarray subclass # # def __array__(self, dtype=None): # return self # Interoperate with NumPy outside of Numba. def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): if method == "__call__": N = None scalars = [] for inp in inputs: if isinstance(inp, Number): scalars.append(inp) elif isinstance(inp, (type(self), np.ndarray)): if isinstance(inp, type(self)): scalars.append(np.ndarray(inp.shape, inp.dtype, inp)) else: scalars.append(inp) if N is not None: if N != inp.shape: raise TypeError("inconsistent sizes") else: N = inp.shape else: return NotImplemented ret = ufunc(*scalars, **kwargs) return self.__class__(ret.shape, ret.dtype, ret) else: return NotImplemented
MyArray
python
django__django
tests/migrations/test_migrations_conflict_long_name/0002_second.py
{ "start": 43, "end": 317 }
class ____(migrations.Migration): dependencies = [("migrations", "0001_initial")] operations = [ migrations.CreateModel( "Something", [ ("id", models.AutoField(primary_key=True)), ], ), ]
Migration
python
numpy__numpy
numpy/ma/tests/test_mrecords.py
{ "start": 14103, "end": 15469 }
class ____: def _create_data(self): a, b = (np.arange(10), np.random.rand(10)) ndtype = [('a', float), ('b', float)] arr = np.array(list(zip(a, b)), dtype=ndtype) mrec = fromarrays([a, b], dtype=ndtype, fill_value=(-9., -99.)) mrec.mask[3] = (False, True) return mrec, a, b, arr def test_view_by_itself(self): mrec = self._create_data()[0] test = mrec.view() assert_(isinstance(test, MaskedRecords)) assert_equal_records(test, mrec) assert_equal_records(test._mask, mrec._mask) def test_view_simple_dtype(self): mrec, a, b, _ = self._create_data() ntype = (float, 2) test = mrec.view(ntype) assert_(isinstance(test, ma.MaskedArray)) assert_equal(test, np.array(list(zip(a, b)), dtype=float)) assert_(test[3, 1] is ma.masked) def test_view_flexible_type(self): mrec, _, _, arr = self._create_data() alttype = [('A', float), ('B', float)] test = mrec.view(alttype) assert_(isinstance(test, MaskedRecords)) assert_equal_records(test, arr.view(alttype)) assert_(test['B'][3] is masked) assert_equal(test.dtype, np.dtype(alttype)) assert_(test._fill_value is None) ##############################################################################
TestView
python
scikit-learn__scikit-learn
sklearn/model_selection/_search_successive_halving.py
{ "start": 802, "end": 2414 }
class ____: """Splitter that subsamples a given fraction of the dataset""" def __init__(self, *, base_cv, fraction, subsample_test, random_state): self.base_cv = base_cv self.fraction = fraction self.subsample_test = subsample_test self.random_state = random_state def split(self, X, y, **kwargs): for train_idx, test_idx in self.base_cv.split(X, y, **kwargs): train_idx = resample( train_idx, replace=False, random_state=self.random_state, n_samples=int(self.fraction * len(train_idx)), ) if self.subsample_test: test_idx = resample( test_idx, replace=False, random_state=self.random_state, n_samples=int(self.fraction * len(test_idx)), ) yield train_idx, test_idx def _top_k(results, k, itr): # Return the best candidates of a given iteration iteration, mean_test_score, params = ( np.asarray(a) for a in (results["iter"], results["mean_test_score"], results["params"]) ) iter_indices = np.flatnonzero(iteration == itr) scores = mean_test_score[iter_indices] # argsort() places NaNs at the end of the array so we move NaNs to the # front of the array so the last `k` items are the those with the # highest scores. sorted_indices = np.roll(np.argsort(scores), np.count_nonzero(np.isnan(scores))) return np.array(params[iter_indices][sorted_indices[-k:]])
_SubsampleMetaSplitter
python
pandas-dev__pandas
pandas/tests/indexes/datetimelike_/test_value_counts.py
{ "start": 206, "end": 3150 }
class ____: # GH#7735 def test_value_counts_unique_datetimeindex(self, tz_naive_fixture): tz = tz_naive_fixture orig = date_range("2011-01-01 09:00", freq="h", periods=10, tz=tz) self._check_value_counts_with_repeats(orig) def test_value_counts_unique_timedeltaindex(self): orig = timedelta_range("1 days 09:00:00", freq="h", periods=10) self._check_value_counts_with_repeats(orig) def test_value_counts_unique_periodindex(self): orig = period_range("2011-01-01 09:00", freq="h", periods=10) self._check_value_counts_with_repeats(orig) def _check_value_counts_with_repeats(self, orig): # create repeated values, 'n'th element is repeated by n+1 times idx = type(orig)( np.repeat(orig._values, range(1, len(orig) + 1)), dtype=orig.dtype ) exp_idx = orig[::-1] if not isinstance(exp_idx, PeriodIndex): exp_idx = exp_idx._with_freq(None) expected = Series(range(10, 0, -1), index=exp_idx, dtype="int64", name="count") for obj in [idx, Series(idx)]: tm.assert_series_equal(obj.value_counts(), expected) tm.assert_index_equal(idx.unique(), orig) def test_value_counts_unique_datetimeindex2(self, tz_naive_fixture): tz = tz_naive_fixture idx = DatetimeIndex( [ "2013-01-01 09:00", "2013-01-01 09:00", "2013-01-01 09:00", "2013-01-01 08:00", "2013-01-01 08:00", NaT, ], tz=tz, ) self._check_value_counts_dropna(idx) def test_value_counts_unique_timedeltaindex2(self): idx = TimedeltaIndex( [ "1 days 09:00:00", "1 days 09:00:00", "1 days 09:00:00", "1 days 08:00:00", "1 days 08:00:00", NaT, ] ) self._check_value_counts_dropna(idx) def test_value_counts_unique_periodindex2(self): idx = PeriodIndex( [ "2013-01-01 09:00", "2013-01-01 09:00", "2013-01-01 09:00", "2013-01-01 08:00", "2013-01-01 08:00", NaT, ], freq="h", ) self._check_value_counts_dropna(idx) def _check_value_counts_dropna(self, idx): exp_idx = idx[[2, 3]] expected = Series([3, 2], index=exp_idx, name="count") for obj in [idx, Series(idx)]: tm.assert_series_equal(obj.value_counts(), expected) exp_idx = idx[[2, 3, -1]] expected = Series([3, 2, 1], index=exp_idx, name="count") for obj in [idx, Series(idx)]: tm.assert_series_equal(obj.value_counts(dropna=False), expected) tm.assert_index_equal(idx.unique(), exp_idx)
TestValueCounts
python
celery__celery
celery/app/control.py
{ "start": 2071, "end": 15646 }
class ____: """API for inspecting workers. This class provides proxy for accessing Inspect API of workers. The API is defined in :py:mod:`celery.worker.control` """ app = None def __init__(self, destination=None, timeout=1.0, callback=None, connection=None, app=None, limit=None, pattern=None, matcher=None): self.app = app or self.app self.destination = destination self.timeout = timeout self.callback = callback self.connection = connection self.limit = limit self.pattern = pattern self.matcher = matcher def _prepare(self, reply): if reply: by_node = flatten_reply(reply) if (self.destination and not isinstance(self.destination, (list, tuple))): return by_node.get(self.destination) if self.pattern: pattern = self.pattern matcher = self.matcher return {node: reply for node, reply in by_node.items() if match(node, pattern, matcher)} return by_node def _request(self, command, **kwargs): return self._prepare(self.app.control.broadcast( command, arguments=kwargs, destination=self.destination, callback=self.callback, connection=self.connection, limit=self.limit, timeout=self.timeout, reply=True, pattern=self.pattern, matcher=self.matcher, )) def report(self): """Return human readable report for each worker. Returns: Dict: Dictionary ``{HOSTNAME: {'ok': REPORT_STRING}}``. """ return self._request('report') def clock(self): """Get the Clock value on workers. >>> app.control.inspect().clock() {'celery@node1': {'clock': 12}} Returns: Dict: Dictionary ``{HOSTNAME: CLOCK_VALUE}``. """ return self._request('clock') def active(self, safe=None): """Return list of tasks currently executed by workers. Arguments: safe (Boolean): Set to True to disable deserialization. Returns: Dict: Dictionary ``{HOSTNAME: [TASK_INFO,...]}``. See Also: For ``TASK_INFO`` details see :func:`query_task` return value. """ return self._request('active', safe=safe) def scheduled(self, safe=None): """Return list of scheduled tasks with details. Returns: Dict: Dictionary ``{HOSTNAME: [TASK_SCHEDULED_INFO,...]}``. Here is the list of ``TASK_SCHEDULED_INFO`` fields: * ``eta`` - scheduled time for task execution as string in ISO 8601 format * ``priority`` - priority of the task * ``request`` - field containing ``TASK_INFO`` value. See Also: For more details about ``TASK_INFO`` see :func:`query_task` return value. """ return self._request('scheduled') def reserved(self, safe=None): """Return list of currently reserved tasks, not including scheduled/active. Returns: Dict: Dictionary ``{HOSTNAME: [TASK_INFO,...]}``. See Also: For ``TASK_INFO`` details see :func:`query_task` return value. """ return self._request('reserved') def stats(self): """Return statistics of worker. Returns: Dict: Dictionary ``{HOSTNAME: STAT_INFO}``. Here is the list of ``STAT_INFO`` fields: * ``broker`` - Section for broker information. * ``connect_timeout`` - Timeout in seconds (int/float) for establishing a new connection. * ``heartbeat`` - Current heartbeat value (set by client). * ``hostname`` - Node name of the remote broker. * ``insist`` - No longer used. * ``login_method`` - Login method used to connect to the broker. * ``port`` - Port of the remote broker. * ``ssl`` - SSL enabled/disabled. * ``transport`` - Name of transport used (e.g., amqp or redis) * ``transport_options`` - Options passed to transport. * ``uri_prefix`` - Some transports expects the host name to be a URL. E.g. ``redis+socket:///tmp/redis.sock``. In this example the URI-prefix will be redis. * ``userid`` - User id used to connect to the broker with. * ``virtual_host`` - Virtual host used. * ``clock`` - Value of the workers logical clock. This is a positive integer and should be increasing every time you receive statistics. * ``uptime`` - Numbers of seconds since the worker controller was started * ``pid`` - Process id of the worker instance (Main process). * ``pool`` - Pool-specific section. * ``max-concurrency`` - Max number of processes/threads/green threads. * ``max-tasks-per-child`` - Max number of tasks a thread may execute before being recycled. * ``processes`` - List of PIDs (or thread-id’s). * ``put-guarded-by-semaphore`` - Internal * ``timeouts`` - Default values for time limits. * ``writes`` - Specific to the prefork pool, this shows the distribution of writes to each process in the pool when using async I/O. * ``prefetch_count`` - Current prefetch count value for the task consumer. * ``rusage`` - System usage statistics. The fields available may be different on your platform. From :manpage:`getrusage(2)`: * ``stime`` - Time spent in operating system code on behalf of this process. * ``utime`` - Time spent executing user instructions. * ``maxrss`` - The maximum resident size used by this process (in kilobytes). * ``idrss`` - Amount of non-shared memory used for data (in kilobytes times ticks of execution) * ``isrss`` - Amount of non-shared memory used for stack space (in kilobytes times ticks of execution) * ``ixrss`` - Amount of memory shared with other processes (in kilobytes times ticks of execution). * ``inblock`` - Number of times the file system had to read from the disk on behalf of this process. * ``oublock`` - Number of times the file system has to write to disk on behalf of this process. * ``majflt`` - Number of page faults that were serviced by doing I/O. * ``minflt`` - Number of page faults that were serviced without doing I/O. * ``msgrcv`` - Number of IPC messages received. * ``msgsnd`` - Number of IPC messages sent. * ``nvcsw`` - Number of times this process voluntarily invoked a context switch. * ``nivcsw`` - Number of times an involuntary context switch took place. * ``nsignals`` - Number of signals received. * ``nswap`` - The number of times this process was swapped entirely out of memory. * ``total`` - Map of task names and the total number of tasks with that type the worker has accepted since start-up. """ return self._request('stats') def revoked(self): """Return list of revoked tasks. >>> app.control.inspect().revoked() {'celery@node1': ['16f527de-1c72-47a6-b477-c472b92fef7a']} Returns: Dict: Dictionary ``{HOSTNAME: [TASK_ID, ...]}``. """ return self._request('revoked') def registered(self, *taskinfoitems): """Return all registered tasks per worker. >>> app.control.inspect().registered() {'celery@node1': ['task1', 'task1']} >>> app.control.inspect().registered('serializer', 'max_retries') {'celery@node1': ['task_foo [serializer=json max_retries=3]', 'tasb_bar [serializer=json max_retries=3]']} Arguments: taskinfoitems (Sequence[str]): List of :class:`~celery.app.task.Task` attributes to include. Returns: Dict: Dictionary ``{HOSTNAME: [TASK1_INFO, ...]}``. """ return self._request('registered', taskinfoitems=taskinfoitems) registered_tasks = registered def ping(self, destination=None): """Ping all (or specific) workers. >>> app.control.inspect().ping() {'celery@node1': {'ok': 'pong'}, 'celery@node2': {'ok': 'pong'}} >>> app.control.inspect().ping(destination=['celery@node1']) {'celery@node1': {'ok': 'pong'}} Arguments: destination (List): If set, a list of the hosts to send the command to, when empty broadcast to all workers. Returns: Dict: Dictionary ``{HOSTNAME: {'ok': 'pong'}}``. See Also: :meth:`broadcast` for supported keyword arguments. """ if destination: self.destination = destination return self._request('ping') def active_queues(self): """Return information about queues from which worker consumes tasks. Returns: Dict: Dictionary ``{HOSTNAME: [QUEUE_INFO, QUEUE_INFO,...]}``. Here is the list of ``QUEUE_INFO`` fields: * ``name`` * ``exchange`` * ``name`` * ``type`` * ``arguments`` * ``durable`` * ``passive`` * ``auto_delete`` * ``delivery_mode`` * ``no_declare`` * ``routing_key`` * ``queue_arguments`` * ``binding_arguments`` * ``consumer_arguments`` * ``durable`` * ``exclusive`` * ``auto_delete`` * ``no_ack`` * ``alias`` * ``bindings`` * ``no_declare`` * ``expires`` * ``message_ttl`` * ``max_length`` * ``max_length_bytes`` * ``max_priority`` See Also: See the RabbitMQ/AMQP documentation for more details about ``queue_info`` fields. Note: The ``queue_info`` fields are RabbitMQ/AMQP oriented. Not all fields applies for other transports. """ return self._request('active_queues') def query_task(self, *ids): """Return detail of tasks currently executed by workers. Arguments: *ids (str): IDs of tasks to be queried. Returns: Dict: Dictionary ``{HOSTNAME: {TASK_ID: [STATE, TASK_INFO]}}``. Here is the list of ``TASK_INFO`` fields: * ``id`` - ID of the task * ``name`` - Name of the task * ``args`` - Positinal arguments passed to the task * ``kwargs`` - Keyword arguments passed to the task * ``type`` - Type of the task * ``hostname`` - Hostname of the worker processing the task * ``time_start`` - Time of processing start * ``acknowledged`` - True when task was acknowledged to broker * ``delivery_info`` - Dictionary containing delivery information * ``exchange`` - Name of exchange where task was published * ``routing_key`` - Routing key used when task was published * ``priority`` - Priority used when task was published * ``redelivered`` - True if the task was redelivered * ``worker_pid`` - PID of worker processing the task """ # signature used be unary: query_task(ids=[id1, id2]) # we need this to preserve backward compatibility. if len(ids) == 1 and isinstance(ids[0], (list, tuple)): ids = ids[0] return self._request('query_task', ids=ids) def conf(self, with_defaults=False): """Return configuration of each worker. Arguments: with_defaults (bool): if set to True, method returns also configuration options with default values. Returns: Dict: Dictionary ``{HOSTNAME: WORKER_CONFIGURATION}``. See Also: ``WORKER_CONFIGURATION`` is a dictionary containing current configuration options. See :ref:`configuration` for possible values. """ return self._request('conf', with_defaults=with_defaults) def hello(self, from_node, revoked=None): return self._request('hello', from_node=from_node, revoked=revoked) def memsample(self): """Return sample current RSS memory usage. Note: Requires the psutils library. """ return self._request('memsample') def memdump(self, samples=10): """Dump statistics of previous memsample requests. Note: Requires the psutils library. """ return self._request('memdump', samples=samples) def objgraph(self, type='Request', n=200, max_depth=10): """Create graph of uncollected objects (memory-leak debugging). Arguments: n (int): Max number of objects to graph. max_depth (int): Traverse at most n levels deep. type (str): Name of object to graph. Default is ``"Request"``. Returns: Dict: Dictionary ``{'filename': FILENAME}`` Note: Requires the objgraph library. """ return self._request('objgraph', num=n, max_depth=max_depth, type=type)
Inspect
python
walkccc__LeetCode
solutions/768. Max Chunks To Make Sorted II/768.py
{ "start": 0, "end": 334 }
class ____: def maxChunksToSorted(self, arr: list[int]) -> int: n = len(arr) ans = 0 mx = -math.inf mn = [arr[-1]] * n for i in reversed(range(n - 1)): mn[i] = min(mn[i + 1], arr[i]) for i in range(n - 1): mx = max(mx, arr[i]) if mx <= mn[i + 1]: ans += 1 return ans + 1
Solution
python
apache__airflow
providers/dbt/cloud/src/airflow/providers/dbt/cloud/hooks/dbt.py
{ "start": 3588, "end": 4397 }
class ____(Enum): """dbt Cloud Job statuses.""" QUEUED = 1 STARTING = 2 RUNNING = 3 SUCCESS = 10 ERROR = 20 CANCELLED = 30 NON_TERMINAL_STATUSES = (QUEUED, STARTING, RUNNING) TERMINAL_STATUSES = (SUCCESS, ERROR, CANCELLED) @classmethod def check_is_valid(cls, statuses: int | Sequence[int] | set[int]): """Validate input statuses are a known value.""" if isinstance(statuses, (Sequence, set)): for status in statuses: cls(status) else: cls(statuses) @classmethod def is_terminal(cls, status: int) -> bool: """Check if the input status is that of a terminal type.""" cls.check_is_valid(statuses=status) return status in cls.TERMINAL_STATUSES.value
DbtCloudJobRunStatus
python
run-llama__llama_index
llama-index-integrations/tools/llama-index-tools-box/llama_index/tools/box/search/base.py
{ "start": 3694, "end": 7008 }
class ____(BaseToolSpec): """ Provides functionalities for searching Box resources. This class allows you to search for Box resources based on various criteria specified using the `BoxSearchOptions` class. It utilizes the Box API search functionality and returns a list of `Document` objects containing information about the found resources. Attributes: spec_functions (list): A list of supported functions (always "box_search"). _box_client (BoxClient): An instance of BoxClient for interacting with Box API. _options (BoxSearchOptions): An instance of BoxSearchOptions containing search options. Methods: box_search(query: str) -> List[Document]: Performs a search for Box resources based on the provided query and configured search options. Returns a list of `Document` objects representing the found resources. """ spec_functions = ["box_search"] _box_client: BoxClient _options: BoxSearchOptions def __init__( self, box_client: BoxClient, options: BoxSearchOptions = BoxSearchOptions() ) -> None: """ Initializes a `BoxSearchToolSpec` instance. Args: box_client (BoxClient): An authenticated Box API client. options (BoxSearchOptions, optional): An instance of `BoxSearchOptions` containing search options. Defaults to `BoxSearchOptions()`. """ self._box_client = add_extra_header_to_box_client(box_client) self._options = options def box_search( self, query: str, ) -> List[Document]: """ Searches for Box resources based on the provided query and configured search options. This method utilizes the Box API search functionality to find resources matching the provided query and search options specified in the `BoxSearchOptions` object. It returns a list of `Document` objects containing information about the found resources. Args: query (str): The search query to use for searching Box resources. Returns: List[Document]: A list of `Document` objects representing the found Box resources. """ box_check_connection(self._box_client) box_files = search_files( box_client=self._box_client, query=query, scope=self._options.scope, file_extensions=self._options.file_extensions, created_at_range=self._options.created_at_range, updated_at_range=self._options.updated_at_range, size_range=self._options.size_range, owner_user_ids=self._options.owner_user_ids, recent_updater_user_ids=self._options.recent_updater_user_ids, ancestor_folder_ids=self._options.ancestor_folder_ids, content_types=self._options.content_types, limit=self._options.limit, offset=self._options.offset, ) box_files = get_box_files_details( box_client=self._box_client, file_ids=[file.id for file in box_files] ) docs: List[Document] = [] for file in box_files: doc = box_file_to_llama_document(file) docs.append(doc) return docs
BoxSearchToolSpec
python
wandb__wandb
wandb/vendor/pygments/lexers/robotframework.py
{ "start": 3419, "end": 5295 }
class ____(object): def __init__(self): self._table = UnknownTable() self._splitter = RowSplitter() testcases = TestCaseTable() settings = SettingTable(testcases.set_default_template) variables = VariableTable() keywords = KeywordTable() self._tables = {'settings': settings, 'setting': settings, 'metadata': settings, 'variables': variables, 'variable': variables, 'testcases': testcases, 'testcase': testcases, 'keywords': keywords, 'keyword': keywords, 'userkeywords': keywords, 'userkeyword': keywords} def tokenize(self, row): commented = False heading = False for index, value in enumerate(self._splitter.split(row)): # First value, and every second after that, is a separator. index, separator = divmod(index-1, 2) if value.startswith('#'): commented = True elif index == 0 and value.startswith('*'): self._table = self._start_table(value) heading = True for value, token in self._tokenize(value, index, commented, separator, heading): yield value, token self._table.end_row() def _start_table(self, header): name = normalize(header, remove='*') return self._tables.get(name, UnknownTable()) def _tokenize(self, value, index, commented, separator, heading): if commented: yield value, COMMENT elif separator: yield value, SEPARATOR elif heading: yield value, HEADING else: for value, token in self._table.tokenize(value, index): yield value, token
RowTokenizer
python
kamyu104__LeetCode-Solutions
Python/regular-expression-matching.py
{ "start": 1553, "end": 2875 }
class ____(object): # @return a boolean def isMatch(self, s, p): p_ptr, s_ptr, last_s_ptr, last_p_ptr = 0, 0, -1, -1 last_ptr = [] while s_ptr < len(s): if p_ptr < len(p) and (p_ptr == len(p) - 1 or p[p_ptr + 1] != '*') and \ (s_ptr < len(s) and (p[p_ptr] == s[s_ptr] or p[p_ptr] == '.')): s_ptr += 1 p_ptr += 1 elif p_ptr < len(p) - 1 and (p_ptr != len(p) - 1 and p[p_ptr + 1] == '*'): p_ptr += 2 last_ptr.append([s_ptr, p_ptr]) elif last_ptr: [last_s_ptr, last_p_ptr] = last_ptr.pop() while last_ptr and p[last_p_ptr - 2] != s[last_s_ptr] and p[last_p_ptr - 2] != '.': [last_s_ptr, last_p_ptr] = last_ptr.pop() if p[last_p_ptr - 2] == s[last_s_ptr] or p[last_p_ptr - 2] == '.': last_s_ptr += 1 s_ptr = last_s_ptr p_ptr = last_p_ptr last_ptr.append([s_ptr, p_ptr]) else: return False else: return False while p_ptr < len(p) - 1 and p[p_ptr] == '.' and p[p_ptr + 1] == '*': p_ptr += 2 return p_ptr == len(p) # recursive
Solution3
python
pennersr__django-allauth
allauth/idp/oidc/models.py
{ "start": 871, "end": 6185 }
class ____(models.Model): class GrantType(models.TextChoices): AUTHORIZATION_CODE = "authorization_code", _("Authorization code") DEVICE_CODE = "urn:ietf:params:oauth:grant-type:device_code", _("Device code") CLIENT_CREDENTIALS = "client_credentials", _("Client credentials") REFRESH_TOKEN = "refresh_token", _("Refresh token") class Type(models.TextChoices): CONFIDENTIAL = "confidential", _("Confidential") PUBLIC = "public", _("Public") id = models.CharField( primary_key=True, max_length=100, default=default_client_id, verbose_name="Client ID", ) name = models.CharField( max_length=100, ) secret = models.CharField(max_length=200, default=default_client_secret) scopes = models.TextField( help_text=_( "The scope(s) the client is allowed to request. Provide one value per line, e.g.: openid(ENTER)profile(ENTER)email(ENTER)" ), default="openid", ) default_scopes = models.TextField( help_text=_( "In case the client does not specify any scope, these default scopes are used. Provide one value per line, e.g.: openid(ENTER)profile(ENTER)email(ENTER)" ), default="", blank=True, ) type = models.CharField( max_length=20, default=Type.CONFIDENTIAL, choices=Type.choices ) grant_types = models.TextField( default=GrantType.AUTHORIZATION_CODE, help_text=_( "A list of allowed grant types. Provide one value per line, e.g.: authorization_code(ENTER)client_credentials(ENTER)refresh_token(ENTER)" ), ) redirect_uris = models.TextField( help_text="A list of allowed redirect (callback) URLs, one per line.", blank=True, default="", ) cors_origins = models.TextField( blank=True, help_text=_( "A list of allowed origins for cross-origin requests, one per line." ), default="", verbose_name="CORS allowed origins", ) allow_uri_wildcards = models.BooleanField( default=False, help_text=_( "Allow wildcards (*) in redirect URIs and CORS origins. " "When enabled, URIs can contain a single asterisk to match subdomains." ), verbose_name="Allow URI wildcards", ) response_types = models.TextField( default="code", help_text=_( "A list of allowed response types. Provide one value per line, e.g.: code(ENTER)id_token token(ENTER)" ), ) owner = models.ForeignKey( settings.AUTH_USER_MODEL, blank=True, null=True, on_delete=models.CASCADE ) skip_consent = models.BooleanField( default=False, help_text="Flag to allow skip the consent screen for this client" ) created_at = models.DateTimeField(default=timezone.now) data = models.JSONField(blank=True, null=True, default=None) class Meta: verbose_name = _("client") verbose_name_plural = _("clients") def get_redirect_uris(self) -> List[str]: return _values_from_text(self.redirect_uris) def set_redirect_uris(self, uris: List[str]): self.redirect_uris = _values_to_text(uris) def get_cors_origins(self) -> List[str]: return _values_from_text(self.cors_origins) def set_cors_origins(self, uris: List[str]): self.cors_origins = _values_to_text(uris) def get_scopes(self) -> List[str]: return _values_from_text(self.scopes) def set_scopes(self, scopes: List[str]) -> None: self.scopes = _values_to_text(scopes) def get_default_scopes(self) -> List[str]: return _values_from_text(self.default_scopes) def set_default_scopes(self, scopes: List[str]) -> None: self.default_scopes = _values_to_text(scopes) def get_response_types(self) -> List[str]: return _values_from_text(self.response_types) def set_response_types(self, response_types: List[str]) -> None: self.response_types = _values_to_text(response_types) def get_grant_types(self) -> List[str]: return _values_from_text(self.grant_types) def set_grant_types(self, grant_types: List[str]): self.grant_types = _values_to_text(grant_types) def set_secret(self, secret) -> None: self.secret = make_password(secret) def check_secret(self, secret: str) -> bool: return check_password(secret, self.secret) def clean_redirect_uris(self): from allauth.idp.oidc.internal.clientkit import _validate_uri_wildcard_format uris = self.get_redirect_uris() for uri in uris: _validate_uri_wildcard_format(uri, self.allow_uri_wildcards) return uris def clean_cors_origins(self): from allauth.idp.oidc.internal.clientkit import _validate_uri_wildcard_format origins = self.get_cors_origins() for origin in origins: _validate_uri_wildcard_format(origin, self.allow_uri_wildcards) return origins def clean(self): # the django admin doesn't call full_clean, so we need to call them here self.clean_redirect_uris() self.clean_cors_origins() def __str__(self) -> str: return self.id
Client
python
openai__openai-python
tests/lib/test_azure.py
{ "start": 4203, "end": 31999 }
class ____: @pytest.fixture(autouse=True) def logger_with_filter(self) -> logging.Logger: logger = logging.getLogger("openai") logger.setLevel(logging.DEBUG) logger.addFilter(SensitiveHeadersFilter()) return logger @pytest.mark.respx() def test_azure_api_key_redacted(self, respx_mock: MockRouter, caplog: pytest.LogCaptureFixture) -> None: respx_mock.post( "https://example-resource.azure.openai.com/openai/deployments/gpt-4/chat/completions?api-version=2024-06-01" ).mock(return_value=httpx.Response(200, json={"model": "gpt-4"})) client = AzureOpenAI( api_version="2024-06-01", api_key="example_api_key", azure_endpoint="https://example-resource.azure.openai.com", ) with caplog.at_level(logging.DEBUG): client.chat.completions.create(messages=[], model="gpt-4") for record in caplog.records: if is_dict(record.args) and record.args.get("headers") and is_dict(record.args["headers"]): assert record.args["headers"]["api-key"] == "<redacted>" @pytest.mark.respx() def test_azure_bearer_token_redacted(self, respx_mock: MockRouter, caplog: pytest.LogCaptureFixture) -> None: respx_mock.post( "https://example-resource.azure.openai.com/openai/deployments/gpt-4/chat/completions?api-version=2024-06-01" ).mock(return_value=httpx.Response(200, json={"model": "gpt-4"})) client = AzureOpenAI( api_version="2024-06-01", azure_ad_token="example_token", azure_endpoint="https://example-resource.azure.openai.com", ) with caplog.at_level(logging.DEBUG): client.chat.completions.create(messages=[], model="gpt-4") for record in caplog.records: if is_dict(record.args) and record.args.get("headers") and is_dict(record.args["headers"]): assert record.args["headers"]["Authorization"] == "<redacted>" @pytest.mark.asyncio @pytest.mark.respx() async def test_azure_api_key_redacted_async(self, respx_mock: MockRouter, caplog: pytest.LogCaptureFixture) -> None: respx_mock.post( "https://example-resource.azure.openai.com/openai/deployments/gpt-4/chat/completions?api-version=2024-06-01" ).mock(return_value=httpx.Response(200, json={"model": "gpt-4"})) client = AsyncAzureOpenAI( api_version="2024-06-01", api_key="example_api_key", azure_endpoint="https://example-resource.azure.openai.com", ) with caplog.at_level(logging.DEBUG): await client.chat.completions.create(messages=[], model="gpt-4") for record in caplog.records: if is_dict(record.args) and record.args.get("headers") and is_dict(record.args["headers"]): assert record.args["headers"]["api-key"] == "<redacted>" @pytest.mark.asyncio @pytest.mark.respx() async def test_azure_bearer_token_redacted_async( self, respx_mock: MockRouter, caplog: pytest.LogCaptureFixture ) -> None: respx_mock.post( "https://example-resource.azure.openai.com/openai/deployments/gpt-4/chat/completions?api-version=2024-06-01" ).mock(return_value=httpx.Response(200, json={"model": "gpt-4"})) client = AsyncAzureOpenAI( api_version="2024-06-01", azure_ad_token="example_token", azure_endpoint="https://example-resource.azure.openai.com", ) with caplog.at_level(logging.DEBUG): await client.chat.completions.create(messages=[], model="gpt-4") for record in caplog.records: if is_dict(record.args) and record.args.get("headers") and is_dict(record.args["headers"]): assert record.args["headers"]["Authorization"] == "<redacted>" @pytest.mark.parametrize( "client,base_url,api,json_data,expected", [ # Deployment-based endpoints # AzureOpenAI: No deployment specified ( AzureOpenAI( api_version="2024-02-01", api_key="example API key", azure_endpoint="https://example-resource.azure.openai.com", ), "https://example-resource.azure.openai.com/openai/", "/chat/completions", {"model": "deployment-body"}, "https://example-resource.azure.openai.com/openai/deployments/deployment-body/chat/completions?api-version=2024-02-01", ), # AzureOpenAI: Deployment specified ( AzureOpenAI( api_version="2024-02-01", api_key="example API key", azure_endpoint="https://example-resource.azure.openai.com", azure_deployment="deployment-client", ), "https://example-resource.azure.openai.com/openai/deployments/deployment-client/", "/chat/completions", {"model": "deployment-body"}, "https://example-resource.azure.openai.com/openai/deployments/deployment-client/chat/completions?api-version=2024-02-01", ), # AzureOpenAI: "deployments" in the DNS name ( AzureOpenAI( api_version="2024-02-01", api_key="example API key", azure_endpoint="https://deployments.example-resource.azure.openai.com", ), "https://deployments.example-resource.azure.openai.com/openai/", "/chat/completions", {"model": "deployment-body"}, "https://deployments.example-resource.azure.openai.com/openai/deployments/deployment-body/chat/completions?api-version=2024-02-01", ), # AzureOpenAI: Deployment called deployments ( AzureOpenAI( api_version="2024-02-01", api_key="example API key", azure_endpoint="https://example-resource.azure.openai.com", azure_deployment="deployments", ), "https://example-resource.azure.openai.com/openai/deployments/deployments/", "/chat/completions", {"model": "deployment-body"}, "https://example-resource.azure.openai.com/openai/deployments/deployments/chat/completions?api-version=2024-02-01", ), # AzureOpenAI: base_url and azure_deployment specified; ignored b/c not supported ( AzureOpenAI( # type: ignore api_version="2024-02-01", api_key="example API key", base_url="https://example.azure-api.net/PTU/", azure_deployment="deployment-client", ), "https://example.azure-api.net/PTU/", "/chat/completions", {"model": "deployment-body"}, "https://example.azure-api.net/PTU/deployments/deployment-body/chat/completions?api-version=2024-02-01", ), # AsyncAzureOpenAI: No deployment specified ( AsyncAzureOpenAI( api_version="2024-02-01", api_key="example API key", azure_endpoint="https://example-resource.azure.openai.com", ), "https://example-resource.azure.openai.com/openai/", "/chat/completions", {"model": "deployment-body"}, "https://example-resource.azure.openai.com/openai/deployments/deployment-body/chat/completions?api-version=2024-02-01", ), # AsyncAzureOpenAI: Deployment specified ( AsyncAzureOpenAI( api_version="2024-02-01", api_key="example API key", azure_endpoint="https://example-resource.azure.openai.com", azure_deployment="deployment-client", ), "https://example-resource.azure.openai.com/openai/deployments/deployment-client/", "/chat/completions", {"model": "deployment-body"}, "https://example-resource.azure.openai.com/openai/deployments/deployment-client/chat/completions?api-version=2024-02-01", ), # AsyncAzureOpenAI: "deployments" in the DNS name ( AsyncAzureOpenAI( api_version="2024-02-01", api_key="example API key", azure_endpoint="https://deployments.example-resource.azure.openai.com", ), "https://deployments.example-resource.azure.openai.com/openai/", "/chat/completions", {"model": "deployment-body"}, "https://deployments.example-resource.azure.openai.com/openai/deployments/deployment-body/chat/completions?api-version=2024-02-01", ), # AsyncAzureOpenAI: Deployment called deployments ( AsyncAzureOpenAI( api_version="2024-02-01", api_key="example API key", azure_endpoint="https://example-resource.azure.openai.com", azure_deployment="deployments", ), "https://example-resource.azure.openai.com/openai/deployments/deployments/", "/chat/completions", {"model": "deployment-body"}, "https://example-resource.azure.openai.com/openai/deployments/deployments/chat/completions?api-version=2024-02-01", ), # AsyncAzureOpenAI: base_url and azure_deployment specified; azure_deployment ignored b/c not supported ( AsyncAzureOpenAI( # type: ignore api_version="2024-02-01", api_key="example API key", base_url="https://example.azure-api.net/PTU/", azure_deployment="deployment-client", ), "https://example.azure-api.net/PTU/", "/chat/completions", {"model": "deployment-body"}, "https://example.azure-api.net/PTU/deployments/deployment-body/chat/completions?api-version=2024-02-01", ), ], ) def test_prepare_url_deployment_endpoint( client: Client, base_url: str, api: str, json_data: dict[str, str], expected: str ) -> None: req = client._build_request( FinalRequestOptions.construct( method="post", url=api, json_data=json_data, ) ) assert req.url == expected assert client.base_url == base_url @pytest.mark.parametrize( "client,base_url,api,json_data,expected", [ # Non-deployment endpoints # AzureOpenAI: No deployment specified ( AzureOpenAI( api_version="2024-02-01", api_key="example API key", azure_endpoint="https://example-resource.azure.openai.com", ), "https://example-resource.azure.openai.com/openai/", "/models", {}, "https://example-resource.azure.openai.com/openai/models?api-version=2024-02-01", ), # AzureOpenAI: No deployment specified ( AzureOpenAI( api_version="2024-02-01", api_key="example API key", azure_endpoint="https://example-resource.azure.openai.com", ), "https://example-resource.azure.openai.com/openai/", "/assistants", {"model": "deployment-body"}, "https://example-resource.azure.openai.com/openai/assistants?api-version=2024-02-01", ), # AzureOpenAI: Deployment specified ( AzureOpenAI( api_version="2024-02-01", api_key="example API key", azure_endpoint="https://example-resource.azure.openai.com", azure_deployment="deployment-client", ), "https://example-resource.azure.openai.com/openai/deployments/deployment-client/", "/models", {}, "https://example-resource.azure.openai.com/openai/models?api-version=2024-02-01", ), # AzureOpenAI: Deployment specified ( AzureOpenAI( api_version="2024-02-01", api_key="example API key", azure_endpoint="https://example-resource.azure.openai.com", azure_deployment="deployment-client", ), "https://example-resource.azure.openai.com/openai/deployments/deployment-client/", "/assistants", {"model": "deployment-body"}, "https://example-resource.azure.openai.com/openai/assistants?api-version=2024-02-01", ), # AzureOpenAI: "deployments" in the DNS name ( AzureOpenAI( api_version="2024-02-01", api_key="example API key", azure_endpoint="https://deployments.example-resource.azure.openai.com", ), "https://deployments.example-resource.azure.openai.com/openai/", "/models", {}, "https://deployments.example-resource.azure.openai.com/openai/models?api-version=2024-02-01", ), # AzureOpenAI: Deployment called "deployments" ( AzureOpenAI( api_version="2024-02-01", api_key="example API key", azure_endpoint="https://example-resource.azure.openai.com", azure_deployment="deployments", ), "https://example-resource.azure.openai.com/openai/deployments/deployments/", "/models", {}, "https://example-resource.azure.openai.com/openai/models?api-version=2024-02-01", ), # AzureOpenAI: base_url and azure_deployment specified; azure_deployment ignored b/c not supported ( AzureOpenAI( # type: ignore api_version="2024-02-01", api_key="example API key", base_url="https://example.azure-api.net/PTU/", azure_deployment="deployment-client", ), "https://example.azure-api.net/PTU/", "/models", {}, "https://example.azure-api.net/PTU/models?api-version=2024-02-01", ), # AsyncAzureOpenAI: No deployment specified ( AsyncAzureOpenAI( api_version="2024-02-01", api_key="example API key", azure_endpoint="https://example-resource.azure.openai.com", ), "https://example-resource.azure.openai.com/openai/", "/models", {}, "https://example-resource.azure.openai.com/openai/models?api-version=2024-02-01", ), # AsyncAzureOpenAI: No deployment specified ( AsyncAzureOpenAI( api_version="2024-02-01", api_key="example API key", azure_endpoint="https://example-resource.azure.openai.com", ), "https://example-resource.azure.openai.com/openai/", "/assistants", {"model": "deployment-body"}, "https://example-resource.azure.openai.com/openai/assistants?api-version=2024-02-01", ), # AsyncAzureOpenAI: Deployment specified ( AsyncAzureOpenAI( api_version="2024-02-01", api_key="example API key", azure_endpoint="https://example-resource.azure.openai.com", azure_deployment="deployment-client", ), "https://example-resource.azure.openai.com/openai/deployments/deployment-client/", "/models", {}, "https://example-resource.azure.openai.com/openai/models?api-version=2024-02-01", ), # AsyncAzureOpenAI: Deployment specified ( AsyncAzureOpenAI( api_version="2024-02-01", api_key="example API key", azure_endpoint="https://example-resource.azure.openai.com", azure_deployment="deployment-client", ), "https://example-resource.azure.openai.com/openai/deployments/deployment-client/", "/assistants", {"model": "deployment-body"}, "https://example-resource.azure.openai.com/openai/assistants?api-version=2024-02-01", ), # AsyncAzureOpenAI: "deployments" in the DNS name ( AsyncAzureOpenAI( api_version="2024-02-01", api_key="example API key", azure_endpoint="https://deployments.example-resource.azure.openai.com", ), "https://deployments.example-resource.azure.openai.com/openai/", "/models", {}, "https://deployments.example-resource.azure.openai.com/openai/models?api-version=2024-02-01", ), # AsyncAzureOpenAI: Deployment called "deployments" ( AsyncAzureOpenAI( api_version="2024-02-01", api_key="example API key", azure_endpoint="https://example-resource.azure.openai.com", azure_deployment="deployments", ), "https://example-resource.azure.openai.com/openai/deployments/deployments/", "/models", {}, "https://example-resource.azure.openai.com/openai/models?api-version=2024-02-01", ), # AsyncAzureOpenAI: base_url and azure_deployment specified; azure_deployment ignored b/c not supported ( AsyncAzureOpenAI( # type: ignore api_version="2024-02-01", api_key="example API key", base_url="https://example.azure-api.net/PTU/", azure_deployment="deployment-client", ), "https://example.azure-api.net/PTU/", "/models", {}, "https://example.azure-api.net/PTU/models?api-version=2024-02-01", ), ], ) def test_prepare_url_nondeployment_endpoint( client: Client, base_url: str, api: str, json_data: dict[str, str], expected: str ) -> None: req = client._build_request( FinalRequestOptions.construct( method="post", url=api, json_data=json_data, ) ) assert req.url == expected assert client.base_url == base_url @pytest.mark.parametrize( "client,base_url,json_data,expected", [ # Realtime endpoint # AzureOpenAI: No deployment specified ( AzureOpenAI( api_version="2024-02-01", api_key="example API key", azure_endpoint="https://example-resource.azure.openai.com", ), "https://example-resource.azure.openai.com/openai/", {"model": "deployment-body"}, "wss://example-resource.azure.openai.com/openai/realtime?api-version=2024-02-01&deployment=deployment-body", ), # AzureOpenAI: Deployment specified ( AzureOpenAI( api_version="2024-02-01", api_key="example API key", azure_endpoint="https://example-resource.azure.openai.com", azure_deployment="deployment-client", ), "https://example-resource.azure.openai.com/openai/deployments/deployment-client/", {"model": "deployment-body"}, "wss://example-resource.azure.openai.com/openai/realtime?api-version=2024-02-01&deployment=deployment-client", ), # AzureOpenAI: "deployments" in the DNS name ( AzureOpenAI( api_version="2024-02-01", api_key="example API key", azure_endpoint="https://deployments.azure.openai.com", ), "https://deployments.azure.openai.com/openai/", {"model": "deployment-body"}, "wss://deployments.azure.openai.com/openai/realtime?api-version=2024-02-01&deployment=deployment-body", ), # AzureOpenAI: Deployment called "deployments" ( AzureOpenAI( api_version="2024-02-01", api_key="example API key", azure_endpoint="https://example-resource.azure.openai.com", azure_deployment="deployments", ), "https://example-resource.azure.openai.com/openai/deployments/deployments/", {"model": "deployment-body"}, "wss://example-resource.azure.openai.com/openai/realtime?api-version=2024-02-01&deployment=deployments", ), # AzureOpenAI: base_url and azure_deployment specified; azure_deployment ignored b/c not supported ( AzureOpenAI( # type: ignore api_version="2024-02-01", api_key="example API key", base_url="https://example.azure-api.net/PTU/", azure_deployment="my-deployment", ), "https://example.azure-api.net/PTU/", {"model": "deployment-body"}, "wss://example.azure-api.net/PTU/realtime?api-version=2024-02-01&deployment=deployment-body", ), # AzureOpenAI: websocket_base_url specified ( AzureOpenAI( api_version="2024-02-01", api_key="example API key", azure_endpoint="https://example-resource.azure.openai.com", websocket_base_url="wss://example-resource.azure.openai.com/base", ), "https://example-resource.azure.openai.com/openai/", {"model": "deployment-body"}, "wss://example-resource.azure.openai.com/base/realtime?api-version=2024-02-01&deployment=deployment-body", ), ], ) def test_prepare_url_realtime(client: AzureOpenAI, base_url: str, json_data: dict[str, str], expected: str) -> None: url, _ = client._configure_realtime(json_data["model"], {}) assert str(url) == expected assert client.base_url == base_url @pytest.mark.parametrize( "client,base_url,json_data,expected", [ # AsyncAzureOpenAI: No deployment specified ( AsyncAzureOpenAI( api_version="2024-02-01", api_key="example API key", azure_endpoint="https://example-resource.azure.openai.com", ), "https://example-resource.azure.openai.com/openai/", {"model": "deployment-body"}, "wss://example-resource.azure.openai.com/openai/realtime?api-version=2024-02-01&deployment=deployment-body", ), # AsyncAzureOpenAI: Deployment specified ( AsyncAzureOpenAI( api_version="2024-02-01", api_key="example API key", azure_endpoint="https://example-resource.azure.openai.com", azure_deployment="deployment-client", ), "https://example-resource.azure.openai.com/openai/deployments/deployment-client/", {"model": "deployment-body"}, "wss://example-resource.azure.openai.com/openai/realtime?api-version=2024-02-01&deployment=deployment-client", ), # AsyncAzureOpenAI: "deployments" in the DNS name ( AsyncAzureOpenAI( api_version="2024-02-01", api_key="example API key", azure_endpoint="https://deployments.azure.openai.com", ), "https://deployments.azure.openai.com/openai/", {"model": "deployment-body"}, "wss://deployments.azure.openai.com/openai/realtime?api-version=2024-02-01&deployment=deployment-body", ), # AsyncAzureOpenAI: Deployment called "deployments" ( AsyncAzureOpenAI( api_version="2024-02-01", api_key="example API key", azure_endpoint="https://example-resource.azure.openai.com", azure_deployment="deployments", ), "https://example-resource.azure.openai.com/openai/deployments/deployments/", {"model": "deployment-body"}, "wss://example-resource.azure.openai.com/openai/realtime?api-version=2024-02-01&deployment=deployments", ), # AsyncAzureOpenAI: base_url and azure_deployment specified; azure_deployment ignored b/c not supported ( AsyncAzureOpenAI( # type: ignore api_version="2024-02-01", api_key="example API key", base_url="https://example.azure-api.net/PTU/", azure_deployment="deployment-client", ), "https://example.azure-api.net/PTU/", {"model": "deployment-body"}, "wss://example.azure-api.net/PTU/realtime?api-version=2024-02-01&deployment=deployment-body", ), # AsyncAzureOpenAI: websocket_base_url specified ( AsyncAzureOpenAI( api_version="2024-02-01", api_key="example API key", azure_endpoint="https://example-resource.azure.openai.com", websocket_base_url="wss://example-resource.azure.openai.com/base", ), "https://example-resource.azure.openai.com/openai/", {"model": "deployment-body"}, "wss://example-resource.azure.openai.com/base/realtime?api-version=2024-02-01&deployment=deployment-body", ), ], ) async def test_prepare_url_realtime_async( client: AsyncAzureOpenAI, base_url: str, json_data: dict[str, str], expected: str ) -> None: url, _ = await client._configure_realtime(json_data["model"], {}) assert str(url) == expected assert client.base_url == base_url def test_client_sets_base_url(client: Client) -> None: client = AzureOpenAI( api_version="2024-02-01", api_key="example API key", azure_endpoint="https://example-resource.azure.openai.com", azure_deployment="my-deployment", ) assert client.base_url == "https://example-resource.azure.openai.com/openai/deployments/my-deployment/" # (not recommended) user sets base_url to target different deployment client.base_url = "https://example-resource.azure.openai.com/openai/deployments/different-deployment/" req = client._build_request( FinalRequestOptions.construct( method="post", url="/chat/completions", json_data={"model": "placeholder"}, ) ) assert ( req.url == "https://example-resource.azure.openai.com/openai/deployments/different-deployment/chat/completions?api-version=2024-02-01" ) req = client._build_request( FinalRequestOptions.construct( method="post", url="/models", json_data={}, ) ) assert req.url == "https://example-resource.azure.openai.com/openai/models?api-version=2024-02-01" # (not recommended) user sets base_url to remove deployment client.base_url = "https://example-resource.azure.openai.com/openai/" req = client._build_request( FinalRequestOptions.construct( method="post", url="/chat/completions", json_data={"model": "deployment"}, ) ) assert ( req.url == "https://example-resource.azure.openai.com/openai/deployments/deployment/chat/completions?api-version=2024-02-01" ) req = client._build_request( FinalRequestOptions.construct( method="post", url="/models", json_data={}, ) ) assert req.url == "https://example-resource.azure.openai.com/openai/models?api-version=2024-02-01"
TestAzureLogging
python
pytorch__pytorch
torch/_higher_order_ops/partitioner.py
{ "start": 587, "end": 6338 }
class ____: def __init__( self, fw_gm: torch.fx.GraphModule, bw_gm: torch.fx.GraphModule, n_fw_outputs: int, n_intermediates: int, no_complex_exprs_at_boundary: bool, ): self.fw_gm = fw_gm self.bw_gm = bw_gm self.n_fw_outputs = n_fw_outputs self.n_intermediates = n_intermediates self.no_complex_exprs_at_boundary = no_complex_exprs_at_boundary self._reorder_fw_output() self._check_partition_boundary() def _check_partition_boundary(self) -> None: """check partitioned graph is in valid state.""" invalid_reasons = [] fw_outputs = _find_hop_subgraph_outputs(self.fw_gm) for i, out in enumerate(fw_outputs): if "val" not in out.meta: invalid_reasons.append(f"fw_gm output[{i}] doesn't have a 'val' meta.") elif not isinstance(out.meta["val"], (torch.SymInt, torch.Tensor)): invalid_reasons.append( f"fw_gm output[{i}] is of type {type(out.meta['val'])} but only SymInt or Tensor are allowed." ) elif ( isinstance(out.meta["val"], torch.SymInt) and is_complex_expr(out.meta["val"].node.expr) and self.no_complex_exprs_at_boundary ): invalid_reasons.append( f"fw_gm output[{i}] must be of type SymInt with basic symbols or " f"Tensor but got {type(out.meta['val'])} {out.meta['val']}" ) if len(fw_outputs) != self.n_fw_outputs + self.n_intermediates: invalid_reasons.append( f"len(fw_outputs) ({len(fw_outputs)}) != n_fw_outputs ({self.n_fw_outputs}) + n_intermediates ({self.n_intermediates})" # noqa: B950 ) bw_phs = list(self.bw_gm.graph.find_nodes(op="placeholder")) if len(fw_outputs) != len(bw_phs): invalid_reasons.append( f"Expect number of fw_gm's output to be the same as bw_gm's input but " f"fw_gm has {len(fw_outputs)} outputs, bw_gm takes {len(bw_phs)} inputs." ) original_forward_outputs = fw_outputs[: self.n_fw_outputs] fw_intermediates = fw_outputs[self.n_fw_outputs :] bw_intermediates = bw_phs[: -self.n_fw_outputs] bw_grads = bw_phs[-self.n_fw_outputs :] def _match_size_or_expr( val1: Union[torch.SymInt, torch.Tensor], val2: Union[torch.SymInt, torch.Tensor], ) -> bool: if type(val1) is not type(val2): return False if isinstance(val1, torch.SymInt) and isinstance(val2, torch.SymInt): return val1.node.expr == val2.node.expr elif isinstance(val1, torch.Tensor) and isinstance(val2, torch.Tensor): return val1.size() == val2.size() return False for fw, bw in zip(fw_intermediates, bw_intermediates): if fw.name != bw.name or not _match_size_or_expr( fw.meta["val"], bw.meta["val"] ): invalid_reasons.append("fw intermediates don't match bw intermediates") for fw_out, bw_grad in zip(original_forward_outputs, bw_grads): if not _match_size_or_expr(fw_out.meta["val"], bw_grad.meta["val"]): invalid_reasons.append("fw outputs don't match bw gradients") if len(invalid_reasons) > 0: newline = "\n" raise RuntimeError( "Invalid HopPartitionedGraph. Reasons:\n", f"{newline.join(invalid_reasons)}", ) def _reorder_fw_output(self) -> None: """ Before the pass, fw_gm returns (*fw_outputs, *intermediates1) and bw_gm takes (*intermediates2, *grad_fw_outputs) as input. intermediates1 and intermediates2 share the same node names but they might be in different order. E.g. this could happen if there are inputs that contain symints. To simplify downstream processing, this graph pass normalizes the output of fw_gm to be consistent with the bacwkard inputs: fw_gm: - input: fw_args - output: (*fw_outputs, *intermediates) bw_gm: - input: (*intermediates, *grad_fw_outputs) - output: grad_fw_args Example: def fw_gm(x, y, z): a, b, c = f(x), g(y), k(z) return a, b, c, f_tmp, g_tmp, k_tmp , where a, b, c are fw_outputs, f_tmp, g_tmp, k_tmp are intermediates The corresponding bw_gm has the following signature: def bw_gm(f_tmp, g_tmp, k_tmp, grad_a, grad_b, grac): return grad_x, grad_y, grad_z """ fw_gm_output_nodes = _find_hop_subgraph_outputs(self.fw_gm) fw_outputs_nodes = fw_gm_output_nodes[: self.n_fw_outputs] fw_intermediates_nodes = fw_gm_output_nodes[self.n_fw_outputs :] if len(fw_intermediates_nodes) > 0: fw_intermediates_name_to_node = {n.name: n for n in fw_intermediates_nodes} # First n_intermediates placeholders bw_names: list[str] = [ ph.name for ph in list(self.bw_gm.graph.find_nodes(op="placeholder"))[ : self.n_intermediates ] ] new_fw_outputs = list(fw_outputs_nodes) + [ fw_intermediates_name_to_node[name] for name in bw_names ] output_node = self.fw_gm.graph.find_nodes(op="output")[0] output_node.args = (tuple(new_fw_outputs),) self.fw_gm.graph.lint() self.fw_gm.recompile()
HopPartitionedGraph
python
kamyu104__LeetCode-Solutions
Python/maximum-number-of-darts-inside-of-a-circular-dartboard.py
{ "start": 268, "end": 1303 }
class ____(object): def numPoints(self, points, r): """ :type points: List[List[int]] :type r: int :rtype: int """ def count_points(points, r, i): angles = [] for j in xrange(len(points)): if i == j: continue dx, dy = points[i][0]-points[j][0], points[i][1]-points[j][1] d = math.sqrt(dx**2 + dy**2) if d > 2*r: continue delta, angle = math.acos(d/(2*r)), math.atan2(dy, dx) angles.append((angle-delta, 0)), angles.append((angle+delta, 1)) angles.sort() result, count = 1, 1 for _, is_closed in angles: # angle sweep if not is_closed: count += 1 else: count -= 1 result = max(result, count) return result return max(count_points(points, r, i) for i in xrange(len(points)))
Solution
python
google__jax
tests/pallas/pallas_test.py
{ "start": 87118, "end": 90174 }
class ____(PallasBaseTest): def test_named_grid(self): def kernel(x_ref, y_ref): y_ref[...] = x_ref[...] x = jnp.arange(2 * 8 * 128, dtype=np.int32).reshape((2, 8, 128)) y = self.pallas_call( kernel, out_shape=x, in_specs=[ pl.BlockSpec((None, 8, 128), lambda i: (i, 0, 0)), ], out_specs=pl.BlockSpec((None, 8, 128), lambda i: (i, 0, 0)), grid=(("i", 2),) )(x) np.testing.assert_array_equal(y, x) def test_named_grid_reordered_names(self): def kernel(x_ref, y_ref): y_ref[...] = x_ref[...] x = jnp.arange(4 * 16 * 128, dtype=np.int32).reshape((4, 16, 128)) y = self.pallas_call( kernel, out_shape=x, in_specs=[ pl.BlockSpec((None, 8, 128), lambda i, j: (i, j, 0)), ], out_specs=pl.BlockSpec((None, 8, 128), lambda i, j: (i, j, 0)), grid=(("j", 4), ("i", 2)) )(x) np.testing.assert_array_equal(y, x) def test_can_query_named_grid_size_in_kernel_via_psum(self): def kernel(x_ref, y_ref): self.assertEqual(lax.axis_size("i"), 2) self.assertEqual(lax.axis_size("j"), 4) y_ref[...] = x_ref[...] x = jnp.arange(4 * 16 * 128, dtype=np.int32).reshape((4, 16, 128)) y = self.pallas_call( kernel, out_shape=x, in_specs=[ pl.BlockSpec((None, 8, 128), lambda i, j: (i, j, 0)), ], out_specs=pl.BlockSpec((None, 8, 128), lambda i, j: (i, j, 0)), grid=(("j", 4), ("i", 2)) )(x) np.testing.assert_array_equal(y, x) def test_can_query_named_dynamic_grid_size_in_kernel_via_psum(self): # TODO(): Enable dynamic grid size via axis_size primitive. self.skipTest("Not supported.") def kernel(x_ref, y_ref): self.assertEqual(lax.axis_size("i"), 2) self.assertEqual(lax.axis_size("j"), 4) y_ref[...] = x_ref[...] x = jnp.arange(4 * 8 * 128, dtype=np.int32).reshape((4, 8, 128)) @jax.jit def foo(n): return self.pallas_call( kernel, out_shape=x, in_specs=[ pl.BlockSpec((None, 8, 128), lambda i: (i, 0, 0)), ], out_specs=pl.BlockSpec((None, 8, 128), lambda i: (i, 0, 0)), grid=(("i", n),) )(x) y = foo(4) np.testing.assert_array_equal(y, x) def test_can_query_named_grid_program_id_in_kernel_via_axis_index(self): if self.INTERPRET: self.skipTest("Not supported in interpret mode.") def kernel(x_ref, y_ref): i_index = lax.axis_index("i") y_ref[...] = x_ref[...] + i_index x = jnp.arange(4 * 8 * 128, dtype=np.int32).reshape((4, 8, 128)) y = self.pallas_call( kernel, out_shape=x, in_specs=[ pl.BlockSpec((None, 8, 128), lambda i: (i, 0, 0)), ], out_specs=pl.BlockSpec((None, 8, 128), lambda i: (i, 0, 0)), grid=(("i", 4),), )(x) np.testing.assert_array_equal( y, x + jnp.arange(4, dtype=jnp.int32)[:, None, None] )
PallasCallNamedGridTest
python
pytorch__pytorch
torch/_dynamo/eval_frame.py
{ "start": 4267, "end": 4995 }
class ____(Enum): token = 0 cached_backends: dict[int, CompilerFn] = {} unset = Unset.token if DISABLE_JUSTKNOBS: _maybe_set_eval_frame = set_eval_frame else: def _maybe_set_eval_frame(callback: DynamoCallback) -> DynamoCallback: # A wrapper on set_eval_frame that is guarded by a Justknob. # Users can disable torchDynamo by setting the JK to False. if not justknobs_check("pytorch/compiler:enable_compiler_set_eval_frame"): torch._dynamo.utils.warn_once( "Dynamo disabled by Justknob: enable_compiler_set_eval_frame, skipping set_eval_frame" ) return callback else: return set_eval_frame(callback) @dataclass
Unset
python
pandas-dev__pandas
pandas/tests/series/methods/test_is_monotonic.py
{ "start": 73, "end": 838 }
class ____: def test_is_monotonic_numeric(self): ser = Series(np.random.default_rng(2).integers(0, 10, size=1000)) assert not ser.is_monotonic_increasing ser = Series(np.arange(1000)) assert ser.is_monotonic_increasing is True assert ser.is_monotonic_increasing is True ser = Series(np.arange(1000, 0, -1)) assert ser.is_monotonic_decreasing is True def test_is_monotonic_dt64(self): ser = Series(date_range("20130101", periods=10)) assert ser.is_monotonic_increasing is True assert ser.is_monotonic_increasing is True ser = Series(list(reversed(ser))) assert ser.is_monotonic_increasing is False assert ser.is_monotonic_decreasing is True
TestIsMonotonic
python
ray-project__ray
python/ray/train/lint/check_circular_imports.py
{ "start": 1893, "end": 13768 }
class ____(ast.NodeVisitor): """ An AST node visitor that collects all module-level imports from a Python source file. It traverses the AST and records module-level import statements (`import ...` and `from ... import ...`) that are not inside function or class definitions, and that are not guarded by `if TYPE_CHECKING` or `if typing.TYPE_CHECKING` blocks. """ def __init__(self, module_name: str, is_package: bool) -> None: self._module_name = module_name self._is_package = is_package self.imports: Set[Import] = set() self.type_checking_imported = False # --- private helpers --- def _is_type_checking_test(self, expr: ast.AST) -> bool: """Return True for `if TYPE_CHECKING` or `if typing.TYPE_CHECKING`.""" if ( self.type_checking_imported and isinstance(expr, ast.Name) and expr.id == "TYPE_CHECKING" ): return True elif ( isinstance(expr, ast.Attribute) and isinstance(expr.value, ast.Name) and expr.value.id == "typing" and expr.attr == "TYPE_CHECKING" ): return True return False def _get_package_parts(self) -> List[str]: parts = self._module_name.split(".") return parts if self._is_package else parts[:-1] def _to_absolute_module( self, level: int, module_str: Optional[str] ) -> Optional[str]: """Construct the absolute module string from a relative import.""" # Absolute import if level == 0: return module_str package_parts = self._get_package_parts() # If the relative import is out of bounds if level - 1 > len(package_parts): return None # Base parts based on the level base_module_parts = ( package_parts if level == 1 else package_parts[: -(level - 1)] ) # Construct absolute module string abs_module_parts = ( base_module_parts + module_str.split(".") if module_str else base_module_parts ) return ".".join(abs_module_parts) # --- parsing functions --- def visit_If(self, node: ast.If) -> None: # If the test is not TYPE_CHECKING, visit statement body if not self._is_type_checking_test(node.test): for stmt in node.body: self.visit(stmt) # Also visit conditional branches for stmt in node.orelse: self.visit(stmt) def visit_Import(self, node: ast.Import) -> None: for alias in node.names: if alias.name: self.imports.add( Import(module=alias.name, is_package=is_train_package(alias.name)) ) def visit_ImportFrom(self, node: ast.ImportFrom) -> None: import_str = self._to_absolute_module(node.level or 0, node.module) if not import_str: return names = [alias.name for alias in node.names] self.imports.add( Import( module=import_str, is_package=is_train_package(import_str), names=names ) ) if "TYPE_CHECKING" in names and import_str == "typing": self.type_checking_imported = True def visit_FunctionDef(self, node: ast.FunctionDef) -> None: # Skip function contents return def visit_AsyncFunctionDef(self, node: ast.AsyncFunctionDef) -> None: # Skip function contents return def visit_ClassDef(self, node: ast.ClassDef) -> None: # Skip class contents return def collect_imports( module_name: str, is_package: bool, source_text: str ) -> Set[Import]: try: tree = ast.parse(source_text) except SyntaxError: print(f"Warning: Failed to parse {module_name} for circular imports") return set() collector = ImportCollector(module_name, is_package) collector.visit(tree) return collector.imports def to_module_name_and_is_package(py_file: Path) -> Tuple[str, bool]: """ Convert a Python file path to its corresponding module name and determine if it is a package. Args: py_file: The path to the Python file. Returns: Tuple[str, bool]: A tuple containing the module name as a string and a boolean indicating whether the module is a package (True if it is an __init__.py file). """ file_path = py_file.relative_to(get_base_dir()) module_path = file_path.with_suffix("") module_parts = module_path.parts is_package = module_parts[-1] == "__init__" if is_package: module_parts = module_parts[:-1] module_str = ".".join(module_parts) return module_str, is_package def get_file_module_imports( files: List[Path], module_match_string: Optional[str] = None ) -> Dict[str, List[Import]]: """ Collect and return the module-level imports for a list of Python files. Args: files: A list of Path objects representing Python files to analyze. module_match_string: An optional string to filter imports. Only imports containing this string will be included in the result. Returns: A dictionary mapping module names to a list of their import statements. The module names are derived from the file paths, and the import statements are filtered based on the optional module_match_string. """ module_imports: Dict[str, List[Import]] = {} # Collect the imports for each python file for py_file in files: try: module_name, is_package = to_module_name_and_is_package(py_file) src = py_file.read_text(encoding="utf-8", errors="ignore") imports = collect_imports(module_name, is_package, src) module_imports[module_name] = [ stmt for stmt in imports if module_match_string is None or module_match_string in stmt.module ] except Exception: continue return module_imports def convert_to_file_paths(imports: List[Import]) -> List[Path]: """ Convert a list of import strings to a list of file paths. Args: imports: A list of Import objects Returns: A list of file paths. """ base_dir = get_base_dir() file_paths = [] for imp in imports: if imp.is_package: relative_path = imp.module.replace(".", "/") + "/__init__.py" else: relative_path = imp.module.replace(".", "/") + ".py" file_paths.append(base_dir / relative_path) return file_paths def expand_to_include_reexports(import_map: Dict[str, List[Import]]) -> None: """ Expands the set of imports for a given import map to include the modules resulting from reexports. So if in the base train module, there is "from x import a, b" and x is a package, then this function will explore the __init__.py of x and include the modules a and b were reexported from in the import map. """ for module, base_imports in import_map.items(): # Get only the package imports packages = [imp for imp in base_imports if imp.is_package] package_files = convert_to_file_paths(packages) reexports = get_file_module_imports(package_files) agg_reexports = [] # Filter patch init file imports to those that only contain the right names for base_import in base_imports: if base_import.module in reexports: import_list = reexports[base_import.module] target_reexports = [ imp for imp in import_list if set(imp.names) & set(base_import.names) ] agg_reexports.extend(target_reexports) # Expand modules to include reexported modules import_map[module].extend(agg_reexports) def check_violations( base_train_patching_imports: Dict[str, List[Import]], patch_dir: Path ) -> List[str]: """ Check for circular import violations between base and patch train modules. Args: base_train_patching_imports: A dictionary mapping base train module names to their imports. patch_dir: The directory path containing patch train modules. Returns: A list of strings describing any circular import violations found. """ violations: List[str] = [] # Get the imports from the patch train init files patch_train_init_files = list(patch_dir.rglob("__init__.py")) patch_train_init_imports = get_file_module_imports( patch_train_init_files, module_match_string="ray.train" ) # Expand the imports to include reexports expand_to_include_reexports(base_train_patching_imports) # Process each patch train init module for violations for base_train_init_module, imports in base_train_patching_imports.items(): # Get the imports from the patch train files patch_train_files = convert_to_file_paths(imports) patch_train_file_imports = get_file_module_imports( patch_train_files, module_match_string="ray.train" ) for patch_module, imports in patch_train_file_imports.items(): # Skip if the base train init module is in the import path of the patch module if does_overlap(patch_module, base_train_init_module): continue # Skip if the patch train module init file imports the base train init module patch_init_module = ( ".".join(patch_module.split(".")[:-1]) if not is_train_package(patch_module) else patch_module ) patch_init_imports = patch_train_init_imports.get(patch_init_module, []) if any( does_overlap(imp.module, base_train_init_module) for imp in patch_init_imports ): continue for patch_import in imports: # If any of those v1 imports go through the init file, then it is a violation if does_overlap(patch_import.module, base_train_init_module): violations.append( f"circular-import-train: Circular import between {base_train_init_module} (importing {patch_module}) and {patch_module} (importing {patch_import.module}). Resolve by importing {base_train_init_module} in the __init__.py of {patch_init_module}." ) return violations def main(): parser = argparse.ArgumentParser() parser.add_argument( "--patch_dir", default="ray/train/v2", help="Path to the directory containing patching contents", ) args = parser.parse_args() # Get train directory paths base_dir = get_base_dir() base_train_dir = get_base_train_dir() patch_train_dir = base_dir / Path(args.patch_dir) # Find and save all train packages in global TRAIN_PACKAGES for reference find_train_packages(base_train_dir, patch_train_dir) # Collect all base train init files base_train_init_files = [ f for f in base_train_dir.rglob("__init__.py") if not f.is_relative_to(patch_train_dir) ] # Get the patching imports in the base train init files dotted_module_prefix = str(patch_train_dir.relative_to(base_dir)).replace("/", ".") patching_imports = get_file_module_imports( base_train_init_files, module_match_string=dotted_module_prefix ) # Collect all violations based off the patching imports violations = check_violations(patching_imports, patch_train_dir) if violations: print("\n".join(violations)) sys.exit(1) if __name__ == "__main__": main()
ImportCollector
python
PyCQA__pylint
pylint/utils/pragma_parser.py
{ "start": 2588, "end": 2993 }
class ____(Exception): """A class for exceptions thrown by pragma_parser module.""" def __init__(self, message: str, token: str) -> None: """:args message: explain the reason why the exception has been thrown :args token: token concerned by the exception. """ self.message = message self.token = token super().__init__(self.message)
PragmaParserError
python
hynek__structlog
tests/test_testing.py
{ "start": 489, "end": 4421 }
class ____: def test_captures_logs(self): """ Log entries are captured and retain their structure. """ with testing.capture_logs() as logs: get_logger().bind(x="y").info("hello", answer=42) get_logger().bind(a="b").info("goodbye", foo={"bar": "baz"}) assert [ {"event": "hello", "log_level": "info", "x": "y", "answer": 42}, { "a": "b", "event": "goodbye", "log_level": "info", "foo": {"bar": "baz"}, }, ] == logs def get_active_procs(self): return get_config()["processors"] def test_restores_processors_on_success(self): """ Processors are patched within the contextmanager and restored on exit. """ orig_procs = self.get_active_procs() assert len(orig_procs) > 1 with testing.capture_logs(): modified_procs = self.get_active_procs() assert len(modified_procs) == 1 assert isinstance(modified_procs[0], LogCapture) restored_procs = self.get_active_procs() assert orig_procs is restored_procs assert len(restored_procs) > 1 def test_uses_processors_arg_and_restores_on_success(self): """ Processors passed with `processors` arg are active only until context exits. """ orig_procs = self.get_active_procs() assert len(orig_procs) > 1 with testing.capture_logs(processors=[contextvars.merge_contextvars]): modified_procs = self.get_active_procs() assert len(modified_procs) == 2 assert contextvars.merge_contextvars == modified_procs[0] assert isinstance(modified_procs[1], LogCapture) assert len(modified_procs) == 2 assert contextvars.merge_contextvars == modified_procs[0] assert isinstance(modified_procs[1], LogCapture) restored_procs = self.get_active_procs() assert orig_procs is restored_procs assert len(restored_procs) > 1 def test_restores_processors_on_error(self): """ Processors are restored even on errors. """ orig_procs = self.get_active_procs() with pytest.raises(NotImplementedError), testing.capture_logs(): raise NotImplementedError("from test") assert orig_procs is self.get_active_procs() def test_captures_bound_logers(self): """ Even logs from already bound loggers are captured and their processors restored on exit. """ logger = get_logger("bound").bind(foo="bar") logger.info("ensure logger is bound") with testing.capture_logs() as logs: logger.info("hello", answer=42) assert logs == [ { "event": "hello", "answer": 42, "foo": "bar", "log_level": "info", } ] def test_captures_log_level_mapping(self): """ exceptions and warn log levels are mapped like in regular loggers. """ structlog.configure( processors=[ structlog.stdlib.ProcessorFormatter.wrap_for_formatter, ], logger_factory=structlog.stdlib.LoggerFactory(), wrapper_class=structlog.stdlib.BoundLogger, ) with testing.capture_logs() as logs: get_logger().exception("hello", answer=42) get_logger().warn("again", answer=23) assert [ { "event": "hello", "answer": 42, "exc_info": True, "log_level": "error", }, { "answer": 23, "event": "again", "log_level": "warning", }, ] == logs
TestCaptureLogs
python
aio-libs__aiohttp
aiohttp/payload.py
{ "start": 39348, "end": 40275 }
class ____(AsyncIterablePayload): def __init__(self, value: StreamReader, *args: Any, **kwargs: Any) -> None: super().__init__(value.iter_any(), *args, **kwargs) PAYLOAD_REGISTRY = PayloadRegistry() PAYLOAD_REGISTRY.register(BytesPayload, (bytes, bytearray, memoryview)) PAYLOAD_REGISTRY.register(StringPayload, str) PAYLOAD_REGISTRY.register(StringIOPayload, io.StringIO) PAYLOAD_REGISTRY.register(TextIOPayload, io.TextIOBase) PAYLOAD_REGISTRY.register(BytesIOPayload, io.BytesIO) PAYLOAD_REGISTRY.register(BufferedReaderPayload, (io.BufferedReader, io.BufferedRandom)) PAYLOAD_REGISTRY.register(IOBasePayload, io.IOBase) PAYLOAD_REGISTRY.register(StreamReaderPayload, StreamReader) # try_last for giving a chance to more specialized async interables like # multipart.BodyPartReaderPayload override the default PAYLOAD_REGISTRY.register(AsyncIterablePayload, AsyncIterable, order=Order.try_last)
StreamReaderPayload
python
tensorflow__tensorflow
tensorflow/python/eager/custom_device_test.py
{ "start": 982, "end": 2433 }
class ____(test.TestCase): def setUp(self): super().setUp() context._reset_context() def testRegisterCustomDevice(self): device_name = '/job:localhost/replica:0/task:0/device:CUSTOM:0' device, device_info, arrived_flag, executed_flag = ( custom_device_testutil.GetLoggingDeviceCapsules(device_name)) context.register_custom_device(device, device_name, device_info) self.assertFalse(custom_device_testutil.FlagValue(arrived_flag)) self.assertFalse(custom_device_testutil.FlagValue(executed_flag)) with ops.device(device_name): x = constant_op.constant(1.) y = x * constant_op.constant(2.) self.assertTrue(custom_device_testutil.FlagValue(executed_flag)) # There was no copy onto the device. Actually I'm not sure how to trigger # that from Python. self.assertFalse(custom_device_testutil.FlagValue(arrived_flag)) with self.assertRaisesRegex(errors.InternalError, 'Trying to copy'): y.numpy() def testIsCustomDevice(self): device_name = '/job:localhost/replica:0/task:0/device:CUSTOM:0' device, device_info, _, _ = ( custom_device_testutil.GetLoggingDeviceCapsules(device_name)) context.register_custom_device(device, device_name, device_info) self.assertTrue(context.is_custom_device(device_name)) self.assertFalse(context.is_custom_device('cpu:0')) if __name__ == '__main__': ops.enable_eager_execution() test.main()
CustomDeviceTest
python
numba__numba
numba/tests/test_jit_module.py
{ "start": 720, "end": 5248 }
class ____(object): pass jit_module({jit_options}) """ def test_create_temp_jitted_module(self): sys_path_original = list(sys.path) sys_modules_original = dict(sys.modules) with create_temp_module(self.source_lines) as test_module: temp_module_dir = os.path.dirname(test_module.__file__) self.assertEqual(temp_module_dir, sys.path[0]) self.assertEqual(sys.path[1:], sys_path_original) self.assertTrue(test_module.__name__ in sys.modules) # Test that modifications to sys.path / sys.modules are reverted self.assertEqual(sys.path, sys_path_original) self.assertEqual(sys.modules, sys_modules_original) def test_create_temp_jitted_module_with_exception(self): try: sys_path_original = list(sys.path) sys_modules_original = dict(sys.modules) with create_temp_module(self.source_lines): raise ValueError("Something went wrong!") except ValueError: # Test that modifications to sys.path / sys.modules are reverted self.assertEqual(sys.path, sys_path_original) self.assertEqual(sys.modules, sys_modules_original) def test_jit_module(self): with create_temp_module(self.source_lines) as test_module: self.assertIsInstance(test_module.inc, dispatcher.Dispatcher) self.assertIsInstance(test_module.add, dispatcher.Dispatcher) self.assertIsInstance(test_module.inc_add, dispatcher.Dispatcher) self.assertTrue(test_module.mean is np.mean) self.assertTrue(inspect.isclass(test_module.Foo)) # Test output of jitted functions is as expected x, y = 1.7, 2.3 self.assertEqual(test_module.inc(x), test_module.inc.py_func(x)) self.assertEqual(test_module.add(x, y), test_module.add.py_func(x, y)) self.assertEqual(test_module.inc_add(x), test_module.inc_add.py_func(x)) def test_jit_module_jit_options(self): jit_options = {"nopython": True, "nogil": False, "error_model": "numpy", "boundscheck": False, } with create_temp_module(self.source_lines, **jit_options) as test_module: self.assertEqual(test_module.inc.targetoptions, jit_options) def test_jit_module_jit_options_override(self): source_lines = """ from numba import jit, jit_module @jit(nogil=True, forceobj=True) def inc(x): return x + 1 def add(x, y): return x + y jit_module({jit_options}) """ jit_options = {"nopython": True, "error_model": "numpy", "boundscheck": False, } with create_temp_module(source_lines=source_lines, **jit_options) as test_module: self.assertEqual(test_module.add.targetoptions, jit_options) # Test that manual jit-wrapping overrides jit_module options, # `forceobj` will automatically apply `nopython=False`. self.assertEqual(test_module.inc.targetoptions, {'nogil': True, 'forceobj': True, 'boundscheck': None, 'nopython': False}) def test_jit_module_logging_output(self): logger = logging.getLogger('numba.core.decorators') logger.setLevel(logging.DEBUG) jit_options = {"nopython": True, "error_model": "numpy", } with captured_logs(logger) as logs: with create_temp_module(self.source_lines, **jit_options) as test_module: logs = logs.getvalue() expected = ["Auto decorating function", "from module {}".format(test_module.__name__), "with jit and options: {}".format(jit_options)] self.assertTrue(all(i in logs for i in expected)) def test_jit_module_logging_level(self): logger = logging.getLogger('numba.core.decorators') # Test there's no logging for INFO level logger.setLevel(logging.INFO) with captured_logs(logger) as logs: with create_temp_module(self.source_lines): self.assertEqual(logs.getvalue(), '')
Foo
python
prabhupant__python-ds
data_structures/doubly_linked_list/doubly_linked_list.py
{ "start": 163, "end": 1975 }
class ____: def __init__(self): self.first = self.last = Node() def empty(self): return self.first.data == self.last.data def search(self, data): if self.empty(): return None auxiliar = self.first.next while auxiliar.next != None and auxiliar.data != data: auxiliar = auxiliar.next if auxiliar.data == data: return auxiliar.data return None def append(self, data): self.last.next = Node(data = data, next = None, previous = self.last) self.last = self.last.next def __str__(self): if self.empty(): return "" aux = self.first format_ = "" while aux.next != None: if aux.data!= None: format_ += str(aux.data) + " " aux = aux.next format_ += str(aux.data) + "" return format_ def remove(self, data): if self.empty(): return None auxiliar = self.first.next while auxiliar != None and auxiliar.data != data: auxiliar = auxiliar.next if auxiliar == None: return None else: item = auxiliar.data if auxiliar.previous != None: auxiliar.previous.next = auxiliar.next if auxiliar.next != None: auxiliar.next.previous = auxiliar.previous if self.empty(): self.last = self.first = Node() elif auxiliar.next == None: self.last = auxiliar.previous del auxiliar return item #the auxiliar variable is to help like a flag in the code, like aux too #If something isn't right, or in another language it is bc I am a native portuguese speaker, so I translated my code
Lista
python
xlwings__xlwings
xlwings/constants.py
{ "start": 75915, "end": 76075 }
class ____: xlLookForBlanks = 0 # from enum XlLookFor xlLookForErrors = 1 # from enum XlLookFor xlLookForFormulas = 2 # from enum XlLookFor
LookFor
python
graphql-python__graphene
graphene/relay/tests/test_mutation_async.py
{ "start": 1282, "end": 2262 }
class ____(ObjectType): say_promise = SaySomethingAsync.Field() other = OtherMutation.Field() schema = Schema(query=RootQuery, mutation=Mutation) client = Client(schema) @mark.asyncio async def test_node_query_promise(): executed = await client.execute_async( 'mutation a { sayPromise(input: {what:"hello", clientMutationId:"1"}) { phrase } }' ) assert isinstance(executed, dict) assert "errors" not in executed assert executed["data"] == {"sayPromise": {"phrase": "hello"}} @mark.asyncio async def test_edge_query(): executed = await client.execute_async( 'mutation a { other(input: {clientMutationId:"1"}) { clientMutationId, myNodeEdge { cursor node { name }} } }' ) assert isinstance(executed, dict) assert "errors" not in executed assert executed["data"] == { "other": { "clientMutationId": "1", "myNodeEdge": {"cursor": "1", "node": {"name": "name"}}, } }
Mutation
python
graphql-python__graphene
examples/simple_example.py
{ "start": 132, "end": 690 }
class ____(graphene.ObjectType): patron = graphene.Field(Patron) def resolve_patron(root, info): return Patron(id=1, name="Syrus", age=27) schema = graphene.Schema(query=Query) query = """ query something{ patron { id name age } } """ def test_query(): result = schema.execute(query) assert not result.errors assert result.data == {"patron": {"id": "1", "name": "Syrus", "age": 27}} if __name__ == "__main__": result = schema.execute(query) print(result.data["patron"])
Query
python
ray-project__ray
python/ray/experimental/tqdm_ray.py
{ "start": 1235, "end": 4646 }
class ____: """Experimental: Ray distributed tqdm implementation. This class lets you use tqdm from any Ray remote task or actor, and have the progress centrally reported from the driver. This avoids issues with overlapping / conflicting progress bars, as the driver centrally manages tqdm positions. Supports a limited subset of tqdm args. """ DEFAULT_FLUSH_INTERVAL_SECONDS = 1.0 def __init__( self, iterable: Optional[Iterable] = None, desc: Optional[str] = None, total: Optional[int] = None, unit: Optional[str] = None, position: Optional[int] = None, flush_interval_s: Optional[float] = None, ): import ray._private.services as services if total is None and iterable is not None: try: total = len(iterable) except (TypeError, AttributeError): total = None self._iterable = iterable self._desc = desc or "" self._total = total self._unit = unit or "it" self._ip = services.get_node_ip_address() self._pid = os.getpid() self._pos = position or 0 self._uuid = uuid.uuid4().hex self._x = 0 self._closed = False self._flush_interval_s = ( flush_interval_s if flush_interval_s is not None else self.DEFAULT_FLUSH_INTERVAL_SECONDS ) self._last_flush_time = 0.0 def set_description(self, desc): """Implements tqdm.tqdm.set_description.""" self._desc = desc self._dump_state() def update(self, n=1): """Implements tqdm.tqdm.update.""" self._x += n self._dump_state() def close(self): """Implements tqdm.tqdm.close.""" self._closed = True # Don't bother if ray is shutdown (in __del__ hook). if ray is not None: self._dump_state(force_flush=True) def refresh(self): """Implements tqdm.tqdm.refresh.""" self._dump_state() @property def total(self) -> Optional[int]: return self._total @total.setter def total(self, total: int): self._total = total def _dump_state(self, force_flush=False) -> None: now = time.time() if not force_flush and now - self._last_flush_time < self._flush_interval_s: return self._last_flush_time = now if ray._private.worker.global_worker.mode == ray.WORKER_MODE: # Include newline in payload to avoid split prints. # TODO(ekl) we should move this to events.json to avoid log corruption. print(json.dumps(self._get_state()) + "\n", end="") else: instance().process_state_update(copy.deepcopy(self._get_state())) def _get_state(self) -> ProgressBarState: return { "__magic_token__": RAY_TQDM_MAGIC, "x": self._x, "pos": self._pos, "desc": self._desc, "total": self._total, "unit": self._unit, "ip": self._ip, "pid": self._pid, "uuid": self._uuid, "closed": self._closed, } def __iter__(self): if self._iterable is None: raise ValueError("No iterable provided") for x in iter(self._iterable): self.update(1) yield x
tqdm
python
tensorflow__tensorflow
tensorflow/python/kernel_tests/math_ops/reduction_ops_test_big.py
{ "start": 1026, "end": 1154 }
class ____(test.TestCase): def _tf_reduce(self, x, reduction_axes, keepdims): raise NotImplementedError()
BaseReductionTest
python
huggingface__transformers
src/transformers/models/rt_detr/modeling_rt_detr.py
{ "start": 10830, "end": 16821 }
class ____(ModelOutput): r""" loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` are provided)): Total loss as a linear combination of a negative log-likehood (cross-entropy) for class prediction and a bounding box loss. The latter is defined as a linear combination of the L1 loss and the generalized scale-invariant IoU loss. loss_dict (`Dict`, *optional*): A dictionary containing the individual losses. Useful for logging. logits (`torch.FloatTensor` of shape `(batch_size, num_queries, num_classes + 1)`): Classification logits (including no-object) for all queries. pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`): Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These values are normalized in [0, 1], relative to the size of each individual image in the batch (disregarding possible padding). You can use [`~RTDetrImageProcessor.post_process_object_detection`] to retrieve the unnormalized (absolute) bounding boxes. auxiliary_outputs (`list[Dict]`, *optional*): Optional, only returned when auxiliary losses are activated (i.e. `config.auxiliary_loss` is set to `True`) and labels are provided. It is a list of dictionaries containing the two above keys (`logits` and `pred_boxes`) for each decoder layer. last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`): Sequence of hidden-states at the output of the last layer of the decoder of the model. intermediate_hidden_states (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, hidden_size)`): Stacked intermediate hidden states (output of each layer of the decoder). intermediate_logits (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, config.num_labels)`): Stacked intermediate logits (logits of each layer of the decoder). intermediate_reference_points (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, 4)`): Stacked intermediate reference points (reference points of each layer of the decoder). intermediate_predicted_corners (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, 4)`): Stacked intermediate predicted corners (predicted corners of each layer of the decoder). initial_reference_points (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, 4)`): Stacked initial reference points (initial reference points of each layer of the decoder). init_reference_points (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`): Initial reference points sent through the Transformer decoder. enc_topk_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_labels)`, *optional*, returned when `config.with_box_refine=True` and `config.two_stage=True`): Logits of predicted bounding boxes coordinates in the encoder. enc_topk_bboxes (`torch.FloatTensor` of shape `(batch_size, sequence_length, 4)`, *optional*, returned when `config.with_box_refine=True` and `config.two_stage=True`): Logits of predicted bounding boxes coordinates in the encoder. enc_outputs_class (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_labels)`, *optional*, returned when `config.with_box_refine=True` and `config.two_stage=True`): Predicted bounding boxes scores where the top `config.two_stage_num_proposals` scoring bounding boxes are picked as region proposals in the first stage. Output of bounding box binary classification (i.e. foreground and background). enc_outputs_coord_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, 4)`, *optional*, returned when `config.with_box_refine=True` and `config.two_stage=True`): Logits of predicted bounding boxes coordinates in the first stage. denoising_meta_values (`dict`): Extra dictionary for the denoising related values """ loss: Optional[torch.FloatTensor] = None loss_dict: Optional[dict] = None logits: Optional[torch.FloatTensor] = None pred_boxes: Optional[torch.FloatTensor] = None auxiliary_outputs: Optional[list[dict]] = None last_hidden_state: Optional[torch.FloatTensor] = None intermediate_hidden_states: Optional[torch.FloatTensor] = None intermediate_logits: Optional[torch.FloatTensor] = None intermediate_reference_points: Optional[torch.FloatTensor] = None intermediate_predicted_corners: Optional[torch.FloatTensor] = None initial_reference_points: Optional[torch.FloatTensor] = None decoder_hidden_states: Optional[tuple[torch.FloatTensor]] = None decoder_attentions: Optional[tuple[torch.FloatTensor]] = None cross_attentions: Optional[tuple[torch.FloatTensor]] = None encoder_last_hidden_state: Optional[torch.FloatTensor] = None encoder_hidden_states: Optional[tuple[torch.FloatTensor]] = None encoder_attentions: Optional[tuple[torch.FloatTensor]] = None init_reference_points: Optional[tuple[torch.FloatTensor]] = None enc_topk_logits: Optional[torch.FloatTensor] = None enc_topk_bboxes: Optional[torch.FloatTensor] = None enc_outputs_class: Optional[torch.FloatTensor] = None enc_outputs_coord_logits: Optional[torch.FloatTensor] = None denoising_meta_values: Optional[dict] = None def _get_clones(partial_module, N): return nn.ModuleList([partial_module() for i in range(N)]) # Copied from transformers.models.conditional_detr.modeling_conditional_detr.inverse_sigmoid def inverse_sigmoid(x, eps=1e-5): x = x.clamp(min=0, max=1) x1 = x.clamp(min=eps) x2 = (1 - x).clamp(min=eps) return torch.log(x1 / x2) # Copied from transformers.models.detr.modeling_detr.DetrFrozenBatchNorm2d with Detr->RTDetr
RTDetrObjectDetectionOutput
python
huggingface__transformers
tests/models/flava/test_processing_flava.py
{ "start": 1170, "end": 2980 }
class ____(ProcessorTesterMixin, unittest.TestCase): processor_class = FlavaProcessor @classmethod def _setup_image_processor(cls): image_processor_class = cls._get_component_class_from_processor("image_processor") image_processor_map = { "image_mean": FLAVA_IMAGE_MEAN, "image_std": FLAVA_IMAGE_STD, "do_normalize": True, "do_resize": True, "size": 224, "do_center_crop": True, "crop_size": 224, "input_size_patches": 14, "total_mask_patches": 75, "mask_group_max_patches": None, "mask_group_min_patches": 16, "mask_group_min_aspect_ratio": 0.3, "mask_group_max_aspect_ratio": None, "codebook_do_resize": True, "codebook_size": 112, "codebook_do_center_crop": True, "codebook_crop_size": 112, "codebook_do_map_pixels": True, "codebook_do_normalize": True, "codebook_image_mean": FLAVA_CODEBOOK_MEAN, "codebook_image_std": FLAVA_CODEBOOK_STD, } image_processor = image_processor_class(**image_processor_map) return image_processor @classmethod def _setup_tokenizer(cls): tokenizer_class = cls._get_component_class_from_processor("tokenizer") vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest"] # fmt: skip vocab_file = os.path.join(cls.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) with open(vocab_file, "w", encoding="utf-8") as fp: fp.write("".join([x + "\n" for x in vocab_tokens])) return tokenizer_class.from_pretrained(cls.tmpdirname)
FlavaProcessorTest
python
huggingface__transformers
src/transformers/models/qwen2_5_omni/modeling_qwen2_5_omni.py
{ "start": 47193, "end": 47861 }
class ____(nn.Module): def __init__(self, config, bias: bool = False): super().__init__() self.hidden_size = config.hidden_size self.intermediate_size = config.intermediate_size self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=bias) self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=bias) self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=bias) self.act_fn = ACT2FN[config.hidden_act] def forward(self, hidden_state): return self.down_proj(self.act_fn(self.gate_proj(hidden_state)) * self.up_proj(hidden_state))
Qwen2_5OmniMLP
python
prompt-toolkit__python-prompt-toolkit
src/prompt_toolkit/history.py
{ "start": 7648, "end": 8066 }
class ____(History): """ :class:`.History` object that doesn't remember anything. """ def load_history_strings(self) -> Iterable[str]: return [] def store_string(self, string: str) -> None: pass def append_string(self, string: str) -> None: # Don't remember this. pass _StrOrBytesPath = Union[str, bytes, "os.PathLike[str]", "os.PathLike[bytes]"]
DummyHistory
python
pallets__werkzeug
src/werkzeug/routing/exceptions.py
{ "start": 1225, "end": 1437 }
class ____(RoutingException): """Internal exception.""" __slots__ = ("path_info",) def __init__(self, path_info: str) -> None: super().__init__() self.path_info = path_info
RequestPath
python
django__django
tests/indexes/tests.py
{ "start": 4098, "end": 4693 }
class ____(TransactionTestCase): available_apps = ["indexes"] def test_create_index_ignores_opclasses(self): index = Index( name="test_ops_class", fields=["headline"], opclasses=["varchar_pattern_ops"], ) with connection.schema_editor() as editor: # This would error if opclasses weren't ignored. editor.add_index(IndexedArticle2, index) # The `condition` parameter is ignored by databases that don't support partial # indexes. @skipIfDBFeature("supports_partial_indexes")
SchemaIndexesNotPostgreSQLTests
python
pytorch__pytorch
.github/scripts/test_runner_determinator.py
{ "start": 165, "end": 4125 }
class ____(TestCase): def test_parse_settings(self) -> None: settings_text = """ experiments: lf: rollout_perc: 25 otherExp: rollout_perc: 0 default: false --- Users: @User1,lf @User2,lf,otherExp """ settings = rd.parse_settings(settings_text) self.assertTupleEqual( rd.Experiment(rollout_perc=25), settings.experiments["lf"], "lf settings not parsed correctly", ) self.assertTupleEqual( rd.Experiment(rollout_perc=0, default=False), settings.experiments["otherExp"], "otherExp settings not parsed correctly", ) def test_parse_settings_with_invalid_experiment_name_skips_experiment(self) -> None: settings_text = """ experiments: lf: rollout_perc: 25 -badExp: rollout_perc: 0 default: false --- Users: @User1,lf @User2,lf,-badExp """ settings = rd.parse_settings(settings_text) self.assertTupleEqual( rd.Experiment(rollout_perc=25), settings.experiments["lf"], "lf settings not parsed correctly", ) self.assertNotIn("-badExp", settings.experiments) def test_parse_settings_in_code_block(self) -> None: settings_text = """ ``` experiments: lf: rollout_perc: 25 otherExp: rollout_perc: 0 default: false ``` --- Users: @User1,lf @User2,lf,otherExp """ settings = rd.parse_settings(settings_text) self.assertTupleEqual( rd.Experiment(rollout_perc=25), settings.experiments["lf"], "lf settings not parsed correctly", ) self.assertTupleEqual( rd.Experiment(rollout_perc=0, default=False), settings.experiments["otherExp"], "otherExp settings not parsed correctly", ) def test_parse_all_branches_setting(self) -> None: settings_text = """ ``` experiments: lf: rollout_perc: 25 all_branches: true otherExp: all_branches: True rollout_perc: 0 ``` --- Users: @User1,lf @User2,lf,otherExp """ settings = rd.parse_settings(settings_text) self.assertTupleEqual( rd.Experiment(rollout_perc=25, all_branches=True), settings.experiments["lf"], "lf settings not parsed correctly", ) self.assertTrue(settings.experiments["otherExp"].all_branches) self.assertTupleEqual( rd.Experiment(rollout_perc=0, all_branches=True), settings.experiments["otherExp"], "otherExp settings not parsed correctly", ) def test_parse_users(self) -> None: settings_text = """ experiments: lf: rollout_perc: 0 otherExp: rollout_perc: 0 --- Users: @User1,lf @User2,lf,otherExp """ users = rd.parse_users(settings_text) self.assertDictEqual( {"User1": ["lf"], "User2": ["lf", "otherExp"]}, users, "Users not parsed correctly", ) def test_parse_users_without_settings(self) -> None: settings_text = """ @User1,lf @User2,lf,otherExp """ users = rd.parse_users(settings_text) self.assertDictEqual( {"User1": ["lf"], "User2": ["lf", "otherExp"]}, users, "Users not parsed correctly", )
TestRunnerDeterminatorIssueParser
python
PyCQA__pylint
tests/functional/c/class_members_py30.py
{ "start": 1277, "end": 1403 }
class ____(Missing): """Don't emit no-member if we don't know the bases of a class.""" NoKnownBases().lalala()
NoKnownBases
python
getsentry__sentry
src/sentry/releases/endpoints/project_release_file_details.py
{ "start": 7258, "end": 10886 }
class ____(ProjectEndpoint, ReleaseFileDetailsMixin): publish_status = { "DELETE": ApiPublishStatus.UNKNOWN, "GET": ApiPublishStatus.UNKNOWN, "PUT": ApiPublishStatus.UNKNOWN, } permission_classes = (ProjectReleasePermission,) def get(self, request: Request, project, version, file_id) -> Response: """ Retrieve a Project Release's File ````````````````````````````````` Return details on an individual file within a release. This does not actually return the contents of the file, just the associated metadata. :pparam string organization_id_or_slug: the id or slug of the organization the release belongs to. :pparam string project_id_or_slug: the id or slug of the project to retrieve the file of. :pparam string version: the version identifier of the release. :pparam string file_id: the ID of the file to retrieve. :auth: required """ try: release = Release.objects.get( organization_id=project.organization_id, projects=project, version=version ) except Release.DoesNotExist: raise ResourceDoesNotExist return self.get_releasefile( request, release, file_id, check_permission_fn=lambda: has_download_permission(request, project), ) def put(self, request: Request, project, version, file_id) -> Response: """ Update a File ````````````` Update metadata of an existing file. Currently only the name of the file can be changed. :pparam string organization_id_or_slug: the id or slug of the organization the release belongs to. :pparam string project_id_or_slug: the id or slug of the project to update the file of. :pparam string version: the version identifier of the release. :pparam string file_id: the ID of the file to update. :param string name: the new name of the file. :auth: required """ try: release = Release.objects.get( organization_id=project.organization_id, projects=project, version=version ) except Release.DoesNotExist: raise ResourceDoesNotExist return self.update_releasefile(request, release, file_id) def delete(self, request: Request, project, version, file_id) -> Response: """ Delete a File ````````````` Permanently remove a file from a release. This will also remove the physical file from storage, except if it is stored as part of an artifact bundle. :pparam string organization_id_or_slug: the id or slug of the organization the release belongs to. :pparam string project_id_or_slug: the id or slug of the project to delete the file of. :pparam string version: the version identifier of the release. :pparam string file_id: the ID of the file to delete. :auth: required """ try: release = Release.objects.get( organization_id=project.organization_id, projects=project, version=version ) except Release.DoesNotExist: raise ResourceDoesNotExist return self.delete_releasefile(release, file_id)
ProjectReleaseFileDetailsEndpoint
python
weaviate__weaviate-python-client
weaviate/collections/classes/aggregate.py
{ "start": 1229, "end": 1485 }
class ____: """The aggregation result for a boolean property.""" count: Optional[int] percentage_false: Optional[float] percentage_true: Optional[float] total_false: Optional[int] total_true: Optional[int] @dataclass
AggregateBoolean
python
huggingface__transformers
tests/models/instructblipvideo/test_video_processing_instructblipvideo.py
{ "start": 2965, "end": 4379 }
class ____(VideoProcessingTestMixin, unittest.TestCase): fast_video_processing_class = InstructBlipVideoVideoProcessor if is_torchvision_available() else None input_name = "pixel_values" def setUp(self): super().setUp() self.video_processor_tester = InstructBlipVideoVideoProcessingTester(self) @property def video_processor_dict(self): return self.video_processor_tester.prepare_video_processor_dict() def test_image_processor_properties(self): video_processing = self.fast_video_processing_class(**self.video_processor_dict) self.assertTrue(hasattr(video_processing, "do_resize")) self.assertTrue(hasattr(video_processing, "size")) self.assertTrue(hasattr(video_processing, "do_normalize")) self.assertTrue(hasattr(video_processing, "image_mean")) self.assertTrue(hasattr(video_processing, "image_std")) self.assertTrue(hasattr(video_processing, "do_convert_rgb")) def test_video_processor_from_dict_with_kwargs(self): video_processor = self.fast_video_processing_class.from_dict(self.video_processor_dict) self.assertEqual(video_processor.size, {"height": 18, "width": 18}) video_processor = self.fast_video_processing_class.from_dict(self.video_processor_dict, size=42) self.assertEqual(video_processor.size, {"height": 42, "width": 42})
InstructBlipVideoProcessingTest
python
getsentry__sentry
src/sentry/hybridcloud/rpc/resolvers.py
{ "start": 1037, "end": 1380 }
class ____(RegionResolutionStrategy): """Resolve from an `str` parameter representing a region's name""" parameter_name: str = "region_name" def resolve(self, arguments: ArgumentDict) -> Region: region_name = arguments[self.parameter_name] return get_region_by_name(region_name) @dataclass(frozen=True)
ByRegionName
python
jina-ai__jina
tests/unit/jaml/parsers/executors/test_legacy.py
{ "start": 84, "end": 149 }
class ____: def __init__(self, a00): self.a00 = a00
A00
python
tornadoweb__tornado
tornado/test/tcpclient_test.py
{ "start": 1880, "end": 5792 }
class ____(AsyncTestCase): def setUp(self): super().setUp() self.server = None self.client = TCPClient() def start_server(self, family): self.server = TestTCPServer(family) return self.server.port def stop_server(self): if self.server is not None: self.server.stop() self.server = None def tearDown(self): self.client.close() self.stop_server() super().tearDown() def skipIfLocalhostV4(self): # The port used here doesn't matter, but some systems require it # to be non-zero if we do not also pass AI_PASSIVE. addrinfo = self.io_loop.run_sync(lambda: Resolver().resolve("localhost", 80)) families = {addr[0] for addr in addrinfo} if socket.AF_INET6 not in families: self.skipTest("localhost does not resolve to ipv6") @gen_test def do_test_connect(self, family, host, source_ip=None, source_port=None): port = self.start_server(family) stream = yield self.client.connect( host, port, source_ip=source_ip, source_port=source_port, af=family, ) assert self.server is not None server_stream = yield self.server.queue.get() with closing(stream): stream.write(b"hello") data = yield server_stream.read_bytes(5) self.assertEqual(data, b"hello") def test_connect_ipv4_ipv4(self): self.do_test_connect(socket.AF_INET, "127.0.0.1") def test_connect_ipv4_dual(self): self.do_test_connect(socket.AF_INET, "localhost") @skipIfNoIPv6 def test_connect_ipv6_ipv6(self): self.skipIfLocalhostV4() self.do_test_connect(socket.AF_INET6, "::1") @skipIfNoIPv6 def test_connect_ipv6_dual(self): self.skipIfLocalhostV4() self.do_test_connect(socket.AF_INET6, "localhost") def test_connect_unspec_ipv4(self): self.do_test_connect(socket.AF_UNSPEC, "127.0.0.1") @skipIfNoIPv6 def test_connect_unspec_ipv6(self): self.skipIfLocalhostV4() self.do_test_connect(socket.AF_UNSPEC, "::1") def test_connect_unspec_dual(self): self.do_test_connect(socket.AF_UNSPEC, "localhost") @gen_test def test_refused_ipv4(self): cleanup_func, port = refusing_port() self.addCleanup(cleanup_func) with self.assertRaises(IOError): yield self.client.connect("127.0.0.1", port) def test_source_ip_fail(self): """Fail when trying to use the source IP Address '8.8.8.8'.""" self.assertRaises( socket.error, self.do_test_connect, socket.AF_INET, "127.0.0.1", source_ip="8.8.8.8", ) def test_source_ip_success(self): """Success when trying to use the source IP Address '127.0.0.1'.""" self.do_test_connect(socket.AF_INET, "127.0.0.1", source_ip="127.0.0.1") @skipIfNonUnix def test_source_port_fail(self): """Fail when trying to use source port 1.""" if getpass.getuser() == "root": # Root can use any port so we can't easily force this to fail. # This is mainly relevant for docker. self.skipTest("running as root") self.assertRaises( socket.error, self.do_test_connect, socket.AF_INET, "127.0.0.1", source_port=1, ) @gen_test def test_connect_timeout(self): timeout = 0.05 class TimeoutResolver(Resolver): def resolve(self, *args, **kwargs): return Future() # never completes with self.assertRaises(TimeoutError): yield TCPClient(resolver=TimeoutResolver()).connect( "1.2.3.4", 12345, timeout=timeout )
TCPClientTest
python
pytorch__pytorch
test/inductor/test_group_batch_fusion.py
{ "start": 25020, "end": 44782 }
class ____(TestCase): # Helper function to build a Graph from a data description. def build_graph(self, desc): # desc: { # "n1": ["n2", "n3"], # "n2": ["n3"], # "n3": [], # } # g = torch.fx.Graph() lookup = {} desc = collections.deque((k, v) for k, v in desc.items()) unsatisfied = 0 while desc: unsatisfied += 1 assert unsatisfied <= len(desc) # cycle or bad input? name, v = desc.popleft() args = tuple(lookup.get(n) for n in v) if None in args: desc.append((name, v)) continue node = g.create_node("placeholder", "target", name=name, args=args) lookup[name] = node unsatisfied = 0 return g, lookup def verify(self, tree, subnodes, min_fuse, max_fuse, expected): _, lookup = self.build_graph(tree) subnodes = [lookup[n] for n in subnodes] expected = [[lookup[n] for n in sub] for sub in expected] opts = { "min_fuse_set_size": min_fuse, "max_fuse_set_size": max_fuse, } result = list( torch._inductor.fx_passes.group_batch_fusion.find_independent_subset_greedy( subnodes, opts ) ) self.assertEqual(expected, result) def test_find_independent_subset_greedy(self): # First some randomly generated tests. self.verify({"n0": (), "n1": ()}, ["n0"], 0, 100, [["n0"]]) self.verify( {"n0": (), "n1": (), "n2": ("n0",)}, ["n1", "n2"], 0, 100, [["n1", "n2"]] ) self.verify( { "n0": (), "n1": (), "n2": ("n0",), "n3": (), "n4": ("n0", "n1", "n2"), "n5": ("n0", "n2", "n4"), "n6": ("n3",), "n7": ("n4", "n5", "n6", "n1", "n3"), "n8": ("n7", "n1", "n3", "n5", "n0"), "n9": ("n3", "n4", "n8", "n6", "n5", "n2", "n0", "n7"), "n10": ("n0",), "n11": ("n4", "n0", "n2", "n3", "n1", "n9"), "n12": ("n2", "n3", "n10", "n6", "n9"), }, ["n10", "n5", "n3", "n4", "n9"], 0, 100, [["n10", "n5", "n3"], ["n4"], ["n9"]], ) self.verify({"n0": (), "n1": (), "n2": ("n0",)}, ["n2"], 0, 100, [["n2"]]) self.verify( { "n0": (), "n1": (), "n2": (), "n3": (), "n4": ("n3", "n1", "n0"), "n5": ("n1", "n2", "n4", "n0"), "n6": ("n0", "n3", "n2"), "n7": ("n6", "n1", "n5", "n4", "n3", "n0"), "n8": ("n2", "n7", "n3"), "n9": ("n3", "n5", "n6", "n7", "n2", "n1"), "n10": ("n8", "n0", "n2", "n4", "n6", "n3"), "n11": ("n6", "n5", "n8", "n1", "n3", "n10", "n2"), "n12": ("n7", "n4"), }, ["n7"], 0, 100, [["n7"]], ) self.verify( { "n0": (), "n1": (), "n2": (), "n3": ("n1", "n2"), "n4": ("n1",), "n5": (), "n6": ("n5",), "n7": ("n1", "n6", "n5", "n2", "n3", "n0"), "n8": ("n5", "n7", "n2", "n6"), "n9": ("n1",), "n10": ("n9",), "n11": ("n3", "n4", "n0", "n2"), "n12": ("n8", "n9", "n5", "n1"), "n13": ("n11", "n4", "n12", "n1", "n9", "n3", "n0"), }, ["n9", "n2", "n8", "n10", "n5", "n6", "n13", "n7", "n3", "n0", "n4"], 0, 100, [ ["n9", "n2", "n5", "n0", "n4"], ["n8", "n10"], ["n6", "n3"], ["n13"], ["n7"], ], ) self.verify({"n0": ()}, ["n0"], 0, 100, [["n0"]]) self.verify( { "n0": (), "n1": (), "n2": (), "n3": (), "n4": ("n1", "n2"), "n5": ("n0", "n4", "n1"), "n6": ("n1", "n5"), "n7": (), "n8": ("n7", "n1", "n3", "n5", "n6"), "n9": ("n2", "n1", "n8", "n0", "n4", "n7", "n6", "n5"), "n10": ("n4", "n7", "n2", "n3", "n8"), "n11": (), "n12": ("n9", "n7", "n5", "n11", "n8"), "n13": ( "n5", "n6", "n12", "n3", "n9", "n8", "n4", "n11", "n2", "n10", "n1", ), "n14": ("n7", "n3", "n12", "n10", "n2", "n0", "n4", "n5"), "n15": ("n9", "n5", "n1", "n13", "n8", "n10", "n12", "n7", "n11", "n3"), "n16": ( "n2", "n4", "n15", "n5", "n0", "n6", "n3", "n8", "n14", "n12", "n9", "n10", "n7", "n13", ), }, ["n0", "n3", "n2", "n11", "n1", "n6", "n12", "n5", "n4", "n15", "n8"], 0, 100, [ ["n0", "n3", "n2", "n11", "n1"], ["n6"], ["n12"], ["n5"], ["n4"], ["n15"], ["n8"], ], ) self.verify( { "n0": (), "n1": (), "n2": (), "n3": ("n2", "n1"), "n4": ("n2", "n3", "n1"), "n5": ("n3", "n1"), "n6": ("n1",), "n7": ("n5", "n4"), "n8": ("n6", "n2"), }, ["n4", "n3", "n1", "n8", "n5", "n6", "n2"], 0, 100, [["n4", "n8", "n5"], ["n3", "n6"], ["n1", "n2"]], ) self.verify( { "n0": (), "n1": (), "n2": (), "n3": ("n1", "n0"), "n4": ("n0",), "n5": ("n1", "n4"), "n6": ("n2", "n1", "n4"), "n7": ("n0", "n3"), "n8": ("n5", "n0", "n6", "n1", "n4", "n2", "n3"), "n9": ("n1", "n4", "n8", "n7", "n5"), "n10": ("n9", "n8", "n0", "n2", "n7", "n1", "n3", "n5"), "n11": ("n9", "n2", "n6", "n0", "n3"), "n12": ("n1", "n4", "n7", "n10", "n5", "n2", "n11", "n6"), "n13": ("n9", "n2", "n3", "n0", "n7", "n5", "n10", "n11"), "n14": ( "n8", "n0", "n3", "n6", "n10", "n1", "n5", "n9", "n12", "n11", "n4", ), "n15": ( "n3", "n10", "n0", "n4", "n9", "n11", "n2", "n13", "n12", "n8", "n5", "n14", ), "n16": ("n6",), "n17": ( "n4", "n3", "n14", "n8", "n15", "n16", "n2", "n5", "n7", "n12", "n1", "n0", "n11", ), }, ["n17", "n16", "n10", "n4", "n8", "n12", "n6", "n1"], 0, 100, [["n17"], ["n16", "n10"], ["n4", "n1"], ["n8"], ["n12"], ["n6"]], ) self.verify( { "n0": (), "n1": (), "n2": ("n0",), "n3": ("n0", "n1"), "n4": ("n0",), "n5": ("n0",), "n6": ("n5", "n3", "n0", "n2"), "n7": (), "n8": ("n2", "n5", "n3", "n1", "n7", "n6", "n0"), "n9": ("n4",), "n10": ("n4", "n5", "n1", "n2", "n0", "n6", "n8", "n9", "n7"), "n11": ("n3", "n0", "n9", "n10", "n5", "n1", "n2", "n7", "n4", "n6"), "n12": ("n9", "n5"), }, ["n8", "n3", "n1", "n12", "n2", "n5", "n11", "n4", "n10", "n6", "n0"], 0, 100, [ ["n8", "n12"], ["n3", "n2", "n5", "n4"], ["n1", "n0"], ["n11"], ["n10"], ["n6"], ], ) self.verify( { "n0": (), "n1": (), "n2": (), "n3": (), "n4": ("n2", "n3"), "n5": ("n1", "n3", "n2", "n4"), "n6": ("n5", "n4", "n1", "n3"), "n7": ("n5",), "n8": ("n5", "n4", "n1"), "n9": ("n2", "n3", "n1", "n5", "n7", "n0", "n8"), "n10": ("n5", "n3", "n1", "n7", "n8", "n9"), "n11": ("n1", "n4", "n2", "n0", "n8", "n9"), "n12": ("n4", "n3", "n9"), "n13": ( "n6", "n10", "n4", "n8", "n0", "n11", "n12", "n7", "n3", "n2", "n1", ), "n14": ("n4", "n13", "n2"), "n15": ("n11", "n7", "n6", "n10", "n14"), "n16": ("n15", "n3"), "n17": ("n10", "n2", "n7", "n0", "n5", "n6", "n9"), "n18": ( "n16", "n8", "n6", "n9", "n11", "n12", "n14", "n5", "n13", "n4", "n1", ), }, [ "n1", "n0", "n16", "n6", "n15", "n9", "n7", "n4", "n3", "n11", "n13", "n17", "n12", "n18", ], 0, 100, [ ["n1", "n0", "n4"], ["n16", "n17"], ["n6", "n9"], ["n15"], ["n7"], ["n3"], ["n11", "n12"], ["n13"], ["n18"], ], ) self.verify( { "n0": (), "n1": (), "n2": (), "n3": ("n2",), "n4": ("n1",), "n5": (), "n6": ("n1", "n4"), "n7": ("n5", "n1"), "n8": ("n6",), "n9": ("n6", "n1", "n2", "n0"), "n10": ("n0", "n7"), "n11": ("n0", "n4", "n3", "n5"), "n12": ("n9", "n8", "n7", "n4", "n0"), }, ["n8", "n9", "n11", "n2", "n4", "n0", "n7", "n5", "n1"], 0, 100, [["n8", "n9", "n11", "n7"], ["n2", "n4", "n0", "n5"], ["n1"]], ) self.verify( {"n0": (), "n1": (), "n2": (), "n3": ("n0",), "n4": ("n3",)}, ["n1", "n2", "n4"], 0, 100, [["n1", "n2", "n4"]], ) self.verify( { "n0": (), "n1": (), "n2": ("n1",), "n3": ("n2", "n1"), "n4": ("n3",), "n5": (), "n6": ("n1", "n5"), "n7": (), "n8": ("n4", "n5"), "n9": ("n0", "n3", "n6", "n4", "n5", "n8", "n7", "n1"), "n10": ("n3", "n0", "n6", "n9", "n7"), "n11": (), "n12": ("n1", "n8", "n3", "n6", "n7", "n0", "n10", "n5", "n9", "n11"), "n13": ("n9", "n11", "n4"), "n14": (), "n15": ("n6", "n12"), "n16": ( "n1", "n7", "n10", "n3", "n9", "n0", "n2", "n5", "n8", "n13", "n14", "n15", "n4", "n6", ), }, [ "n11", "n16", "n5", "n12", "n7", "n2", "n0", "n6", "n3", "n9", "n8", "n15", "n14", "n4", "n13", "n1", ], 0, 100, [ ["n11", "n5", "n7", "n2", "n0", "n14"], ["n16"], ["n12", "n13"], ["n6", "n3"], ["n9"], ["n8"], ["n15"], ["n4"], ["n1"], ], ) self.verify({"n0": (), "n1": ()}, ["n1"], 0, 100, [["n1"]]) self.verify( { "n0": (), "n1": (), "n2": ("n1",), "n3": (), "n4": ("n0", "n2", "n3"), "n5": ("n2", "n3"), "n6": ("n3",), }, ["n6", "n2", "n3", "n1"], 0, 100, [["n6", "n2"], ["n3", "n1"]], ) self.verify( { "n0": (), "n1": (), "n2": (), "n3": ("n2",), "n4": ("n0",), "n5": ("n1", "n2"), "n6": ("n2", "n3", "n1", "n0", "n5"), "n7": ("n6", "n2", "n0", "n4", "n5", "n1"), "n8": ("n4",), "n9": ("n4", "n6", "n7", "n1", "n2"), }, ["n8", "n6", "n2", "n4", "n7", "n5", "n3", "n9"], 0, 100, [["n8", "n6"], ["n2", "n4"], ["n7"], ["n5", "n3"], ["n9"]], ) self.verify( { "n0": (), "n1": (), "n2": (), "n3": ("n1", "n2"), "n4": ("n0",), "n5": ("n2", "n3", "n0", "n1"), "n6": ("n4", "n1"), "n7": ("n5",), "n8": ("n7", "n1", "n5", "n6", "n3", "n4", "n0"), "n9": ("n2", "n8"), }, ["n1", "n7", "n4", "n2", "n0", "n8", "n3", "n5"], 0, 100, [["n1", "n4", "n2"], ["n7"], ["n0", "n3"], ["n8"], ["n5"]], ) self.verify( { "n0": (), "n1": (), "n2": ("n0",), "n3": ("n1",), "n4": ("n2", "n1"), "n5": (), "n6": ("n0",), "n7": ("n6", "n3", "n2", "n1", "n0"), "n8": ("n0", "n2"), "n9": ("n6", "n5", "n8", "n4", "n0"), "n10": ("n1", "n7", "n5", "n8", "n6", "n2", "n4", "n9"), }, ["n0"], 0, 100, [["n0"]], ) # trivial test of min_fuse self.verify( { "n0": (), "n1": (), "n2": (), "n3": ("n1", "n2"), "n4": ("n1",), "n5": (), "n6": ("n5",), "n7": ("n1", "n6", "n5", "n2", "n3", "n0"), "n8": ("n5", "n7", "n2", "n6"), "n9": ("n1",), "n10": ("n9",), "n11": ("n3", "n4", "n0", "n2"), "n12": ("n8", "n9", "n5", "n1"), "n13": ("n11", "n4", "n12", "n1", "n9", "n3", "n0"), }, ["n9", "n2", "n8", "n10", "n5", "n6", "n13", "n7", "n3", "n0", "n4"], 2, 10, [["n9", "n2", "n5", "n0", "n4"], ["n8", "n10"], ["n6", "n3"]], ) # trivial test of max_fuse self.verify( { "n0": (), "n1": (), "n2": (), "n3": ("n1", "n2"), "n4": ("n1",), "n5": (), "n6": ("n5",), "n7": ("n1", "n6", "n5", "n2", "n3", "n0"), "n8": ("n5", "n7", "n2", "n6"), "n9": ("n1",), "n10": ("n9",), "n11": ("n3", "n4", "n0", "n2"), "n12": ("n8", "n9", "n5", "n1"), "n13": ("n11", "n4", "n12", "n1", "n9", "n3", "n0"), }, ["n9", "n2", "n8", "n10", "n5", "n6", "n13", "n7", "n3", "n0", "n4"], 0, 3, [ ["n9", "n2", "n5"], ["n8", "n10", "n4"], ["n6", "n3", "n0"], ["n13"], ["n7"], ], ) def test_find_independent_subset_greedy_fuse(self): # ensure that fusing the sets during iteration results in the correct # iteration results. In the example graph after we merge n2 and n3, # n4 is no longer independent from n1. g, lookup = self.build_graph( { "n0": (), "n1": (), "n2": ("n0",), "n3": ("n1",), "n4": ("n2",), "n5": (), } ) opts = { "min_fuse_set_size": 0, "max_fuse_set_size": 100, } subnodes = ["n2", "n3", "n4", "n0", "n1", "n5"] subnodes = [lookup[n] for n in subnodes] i = torch._inductor.fx_passes.group_batch_fusion.find_independent_subset_greedy( subnodes, opts ) self.assertEqual(next(i), [lookup[n] for n in ["n2", "n3", "n5"]]) # fuse n2 and n3 which makes n4 now dependent on n1. args = tuple(lookup[n] for n in ["n0", "n1"]) fused = g.create_node("placeholder", "target", name="n2+n3", args=args) lookup["n2"].replace_all_uses_with(fused) g.erase_node(lookup["n2"]) lookup["n3"].replace_all_uses_with(fused) g.erase_node(lookup["n3"]) self.assertEqual(next(i), [lookup[n] for n in ["n4"]]) self.assertEqual(next(i), [lookup[n] for n in ["n0", "n1"]]) self.assertRaises(StopIteration, lambda: next(i)) if __name__ == "__main__": run_tests()
TestFindIndependentSubsetGreedy
python
django__django
tests/decorators/tests.py
{ "start": 2191, "end": 4207 }
class ____(TestCase): def test_attributes(self): """ Built-in decorators set certain attributes of the wrapped function. """ self.assertEqual(fully_decorated.__name__, "fully_decorated") self.assertEqual(fully_decorated.__doc__, "Expected __doc__") self.assertEqual(fully_decorated.__dict__["anything"], "Expected __dict__") def test_user_passes_test_composition(self): """ The user_passes_test decorator can be applied multiple times (#9474). """ def test1(user): user.decorators_applied.append("test1") return True def test2(user): user.decorators_applied.append("test2") return True def callback(request): return request.user.decorators_applied callback = user_passes_test(test1)(callback) callback = user_passes_test(test2)(callback) class DummyUser: pass class DummyRequest: pass request = DummyRequest() request.user = DummyUser() request.user.decorators_applied = [] response = callback(request) self.assertEqual(response, ["test2", "test1"]) # For testing method_decorator, a decorator that assumes a single argument. # We will get type arguments if there is a mismatch in the number of arguments. def simple_dec(func): @wraps(func) def wrapper(arg): return func("test:" + arg) return wrapper simple_dec_m = method_decorator(simple_dec) # For testing method_decorator, two decorators that add an attribute to the # function def myattr_dec(func): def wrapper(*args, **kwargs): return func(*args, **kwargs) wrapper.myattr = True return wrapper myattr_dec_m = method_decorator(myattr_dec) def myattr2_dec(func): def wrapper(*args, **kwargs): return func(*args, **kwargs) wrapper.myattr2 = True return wrapper myattr2_dec_m = method_decorator(myattr2_dec)
DecoratorsTest
python
django__django
django/db/models/query_utils.py
{ "start": 11199, "end": 18222 }
class ____: def _get_lookup(self, lookup_name): return self.get_lookups().get(lookup_name, None) @functools.cache def get_class_lookups(cls): class_lookups = [ parent.__dict__.get("class_lookups", {}) for parent in inspect.getmro(cls) ] return cls.merge_dicts(class_lookups) def get_instance_lookups(self): class_lookups = self.get_class_lookups() if instance_lookups := getattr(self, "instance_lookups", None): return {**class_lookups, **instance_lookups} return class_lookups get_lookups = class_or_instance_method(get_class_lookups, get_instance_lookups) get_class_lookups = classmethod(get_class_lookups) def get_lookup(self, lookup_name): from django.db.models.lookups import Lookup found = self._get_lookup(lookup_name) if found is None and hasattr(self, "output_field"): return self.output_field.get_lookup(lookup_name) if found is not None and not issubclass(found, Lookup): return None return found def get_transform(self, lookup_name): from django.db.models.lookups import Transform found = self._get_lookup(lookup_name) if found is None and hasattr(self, "output_field"): return self.output_field.get_transform(lookup_name) if found is not None and not issubclass(found, Transform): return None return found @staticmethod def merge_dicts(dicts): """ Merge dicts in reverse to preference the order of the original list. e.g., merge_dicts([a, b]) will preference the keys in 'a' over those in 'b'. """ merged = {} for d in reversed(dicts): merged.update(d) return merged @classmethod def _clear_cached_class_lookups(cls): for subclass in subclasses(cls): subclass.get_class_lookups.cache_clear() def register_class_lookup(cls, lookup, lookup_name=None): if lookup_name is None: lookup_name = lookup.lookup_name if "class_lookups" not in cls.__dict__: cls.class_lookups = {} cls.class_lookups[lookup_name] = lookup cls._clear_cached_class_lookups() return lookup def register_instance_lookup(self, lookup, lookup_name=None): if lookup_name is None: lookup_name = lookup.lookup_name if "instance_lookups" not in self.__dict__: self.instance_lookups = {} self.instance_lookups[lookup_name] = lookup return lookup register_lookup = class_or_instance_method( register_class_lookup, register_instance_lookup ) register_class_lookup = classmethod(register_class_lookup) def _unregister_class_lookup(cls, lookup, lookup_name=None): """ Remove given lookup from cls lookups. For use in tests only as it's not thread-safe. """ if lookup_name is None: lookup_name = lookup.lookup_name del cls.class_lookups[lookup_name] cls._clear_cached_class_lookups() def _unregister_instance_lookup(self, lookup, lookup_name=None): """ Remove given lookup from instance lookups. For use in tests only as it's not thread-safe. """ if lookup_name is None: lookup_name = lookup.lookup_name del self.instance_lookups[lookup_name] _unregister_lookup = class_or_instance_method( _unregister_class_lookup, _unregister_instance_lookup ) _unregister_class_lookup = classmethod(_unregister_class_lookup) def select_related_descend(field, restricted, requested, select_mask): """ Return whether `field` should be used to descend deeper for `select_related()` purposes. Arguments: * `field` - the field to be checked. Can be either a `Field` or `ForeignObjectRel` instance. * `restricted` - a boolean field, indicating if the field list has been manually restricted using a select_related() clause. * `requested` - the select_related() dictionary. * `select_mask` - the dictionary of selected fields. """ # Only relationships can be descended. if not field.remote_field: return False # Forward MTI parent links should not be explicitly descended as they are # always JOIN'ed against (unless excluded by `select_mask`). if getattr(field.remote_field, "parent_link", False): return False # When `select_related()` is used without a `*requested` mask all # relationships are descended unless they are nullable. if not restricted: return not field.null # When `select_related(*requested)` is used only fields that are part of # `requested` should be descended. if field.name not in requested: return False # Prevent invalid usages of `select_related()` and `only()`/`defer()` such # as `select_related("a").only("b")` and `select_related("a").defer("a")`. if select_mask and field not in select_mask: raise FieldError( f"Field {field.model._meta.object_name}.{field.name} cannot be both " "deferred and traversed using select_related at the same time." ) return True def refs_expression(lookup_parts, annotations): """ Check if the lookup_parts contains references to the given annotations set. Because the LOOKUP_SEP is contained in the default annotation names, check each prefix of the lookup_parts for a match. """ for n in range(1, len(lookup_parts) + 1): level_n_lookup = LOOKUP_SEP.join(lookup_parts[0:n]) if annotations.get(level_n_lookup): return level_n_lookup, lookup_parts[n:] return None, () def check_rel_lookup_compatibility(model, target_opts, field): """ Check that self.model is compatible with target_opts. Compatibility is OK if: 1) model and opts match (where proxy inheritance is removed) 2) model is parent of opts' model or the other way around """ def check(opts): return ( model._meta.concrete_model == opts.concrete_model or opts.concrete_model in model._meta.all_parents or model in opts.all_parents ) # If the field is a primary key, then doing a query against the field's # model is ok, too. Consider the case: # class Restaurant(models.Model): # place = OneToOneField(Place, primary_key=True): # Restaurant.objects.filter(pk__in=Restaurant.objects.all()). # If we didn't have the primary key check, then pk__in (== place__in) would # give Place's opts as the target opts, but Restaurant isn't compatible # with that. This logic applies only to primary keys, as when doing # __in=qs, we are going to turn this into __in=qs.values('pk') later on. return check(target_opts) or ( getattr(field, "primary_key", False) and check(field.model._meta) )
RegisterLookupMixin
python
airbytehq__airbyte
airbyte-ci/connectors/live-tests/src/live_tests/commons/backends/duckdb_backend.py
{ "start": 358, "end": 3277 }
class ____(FileBackend): SAMPLE_SIZE = -1 def __init__( self, output_directory: Path, duckdb_path: Path, schema: Optional[Iterable[str]] = None, ): super().__init__(output_directory) self.duckdb_path = duckdb_path self.schema = schema @property def jsonl_files_to_insert(self) -> Iterable[Path]: return [ self.jsonl_catalogs_path, self.jsonl_connection_status_path, self.jsonl_specs_path, self.jsonl_states_path, self.jsonl_traces_path, self.jsonl_logs_path, self.jsonl_controls_path, self.jsonl_records_path, ] @staticmethod def sanitize_table_name(table_name: str) -> str: sanitized = str(table_name).replace(" ", "_") sanitized = re.sub(r"[^\w\s]", "", sanitized) if sanitized and sanitized[0].isdigit(): sanitized = "_" + sanitized return sanitized def write(self, airbyte_messages: Iterable[AirbyteMessage]) -> None: # Use the FileBackend to write the messages to disk as jsonl files super().write(airbyte_messages) duck_db_conn = duckdb.connect(str(self.duckdb_path)) if self.schema: sanitized_schema_name = "_".join([self.sanitize_table_name(s) for s in self.schema]) duck_db_conn.sql(f"CREATE SCHEMA IF NOT EXISTS {sanitized_schema_name}") duck_db_conn.sql(f"USE {sanitized_schema_name}") logging.info(f"Using schema {sanitized_schema_name}") for json_file in self.jsonl_files_to_insert: if json_file.exists(): table_name = self.sanitize_table_name(json_file.stem) logging.info(f"Creating table {table_name} from {json_file} in schema {sanitized_schema_name}") duck_db_conn.sql( f"CREATE TABLE {table_name} AS SELECT * FROM read_json_auto('{json_file}', sample_size = {self.SAMPLE_SIZE}, format = 'newline_delimited')" ) logging.info(f"Table {table_name} created in schema {sanitized_schema_name}") for json_file in self.record_per_stream_paths_data_only.values(): if json_file.exists(): table_name = self.sanitize_table_name(f"records_{json_file.stem}") logging.info( f"Creating table {table_name} from {json_file} in schema {sanitized_schema_name} to store stream records with the data field only" ) duck_db_conn.sql( f"CREATE TABLE {self.sanitize_table_name(table_name)} AS SELECT * FROM read_json_auto('{json_file}', sample_size = {self.SAMPLE_SIZE}, format = 'newline_delimited')" ) logging.info(f"Table {table_name} created in schema {sanitized_schema_name}") duck_db_conn.close()
DuckDbBackend
python
django__django
tests/admin_inlines/admin.py
{ "start": 6020, "end": 6193 }
class ____(admin.TabularInline): model = Question readonly_fields = ["call_me"] def call_me(self, obj): return "Callable in QuestionInline"
QuestionInline
python
bokeh__bokeh
src/bokeh/core/property/color.py
{ "start": 2237, "end": 2587 }
class ____(Property[colors.RGB]): """ Accept colors.RGB values. """ def validate(self, value: Any, detail: bool = True) -> None: super().validate(value, detail) if isinstance(value, colors.RGB): return msg = "" if not detail else f"expected RGB value, got {value!r}" raise ValueError(msg)
RGB
python
doocs__leetcode
solution/2900-2999/2951.Find the Peaks/Solution.py
{ "start": 0, "end": 228 }
class ____: def findPeaks(self, mountain: List[int]) -> List[int]: return [ i for i in range(1, len(mountain) - 1) if mountain[i - 1] < mountain[i] > mountain[i + 1] ]
Solution
python
dask__dask
dask/dataframe/dask_expr/_expr.py
{ "start": 91825, "end": 91898 }
class ____(Binop): operation = operator.ge _operator_repr = ">="
GE
python
pytorch__pytorch
torch/_inductor/pattern_matcher.py
{ "start": 13788, "end": 15275 }
class ____: """ Internal state needed while running PatternExpr._match(). """ outputs: list[Optional[PatternExpr]] pattern_to_node: dict[PatternExpr, Optional[torch.fx.Node]] graph: torch.fx.Graph exclusive_node_set: list[NodeOrConstant] def __init__( self, outputs: list[Optional[PatternExpr]], pattern_to_node: Optional[dict[PatternExpr, torch.fx.Node]] = None, *, graph: torch.fx.Graph, ) -> None: self.outputs = outputs self.pattern_to_node = {} if pattern_to_node is None else dict(pattern_to_node) self.graph = graph self.exclusive_node_set = [] def match(self, pattern: PatternExpr, node: NodeOrConstant) -> MatchResult: """wrapper to check reused nodes in patterns""" if pattern in self.pattern_to_node: if self.pattern_to_node[pattern] == node: return Match(self, pattern) # already checked this node else: return FailedMatch("repeated pattern differs") m = pattern._match(node, self) assert pattern not in self.pattern_to_node self.pattern_to_node[pattern] = node if m else None return m def filter_multi_user_patterns(self) -> dict[PatternExpr, torch.fx.Node]: return { pattern: node for pattern, node in self.pattern_to_node.items() if pattern.has_multiple_users() and node is not None }
MatchContext
python
apache__airflow
airflow-core/src/airflow/utils/log/logging_mixin.py
{ "start": 4329, "end": 5404 }
class ____(metaclass=abc.ABCMeta): """Define a log handler based on an external service (e.g. ELK, StackDriver).""" @property @abc.abstractmethod def log_name(self) -> str: """Return log name.""" @abc.abstractmethod def get_external_log_url(self, task_instance, try_number) -> str: """Return the URL for log visualization in the external service.""" @property @abc.abstractmethod def supports_external_link(self) -> bool: """Return whether handler is able to support external links.""" # We have to ignore typing errors here because Python I/O classes are a mess, and they do not # have the same type hierarchy defined as the `typing.IO` - they violate Liskov Substitution Principle # While it is ok to make your class derive from TextIOBase (and its good thing to do as they provide # base implementation for IO-implementing classes, it's impossible to make them work with # IO generics (and apparently it has not even been intended) # See more: https://giters.com/python/typeshed/issues/6077
ExternalLoggingMixin
python
buildout__buildout
src/zc/buildout/testrecipes.py
{ "start": 41, "end": 405 }
class ____: def __init__(self, buildout, name, options): self.buildout = buildout self.name = name self.options = options def install(self): items = list(self.options.items()) items.sort() for option, value in items: print_(" %s=%r" % (option, value)) return () update = install
Debug
python
dagster-io__dagster
python_modules/dagster-graphql/dagster_graphql/schema/solids.py
{ "start": 11821, "end": 14083 }
class ____(graphene.ObjectType): resource_key = graphene.NonNull(graphene.String) class Meta: name = "ResourceRequirement" def __init__(self, resource_key): super().__init__() self.resource_key = resource_key def build_solids(represented_pipeline, current_dep_index): check.inst_param(represented_pipeline, "represented_pipeline", RepresentedJob) return sorted( [ GrapheneSolid(represented_pipeline, solid_name, current_dep_index) for solid_name in current_dep_index.node_invocation_names ], key=lambda solid: solid.name, ) def _build_solid_handles( represented_pipeline: RepresentedJob, current_dep_index: DependencyStructureIndex, parent: Optional["GrapheneSolidHandle"] = None, ) -> Sequence["GrapheneSolidHandle"]: check.inst_param(represented_pipeline, "represented_pipeline", RepresentedJob) check.opt_inst_param(parent, "parent", GrapheneSolidHandle) all_handle: list[GrapheneSolidHandle] = [] for solid_invocation in current_dep_index.node_invocations: solid_name, solid_def_name = solid_invocation.node_name, solid_invocation.node_def_name handle = GrapheneSolidHandle( solid=GrapheneSolid(represented_pipeline, solid_name, current_dep_index), handle=NodeHandle(solid_name, parent.handleID if parent else None), parent=parent if parent else None, ) solid_def_snap = represented_pipeline.get_node_def_snap(solid_def_name) if isinstance(solid_def_snap, GraphDefSnap): all_handle += _build_solid_handles( represented_pipeline, represented_pipeline.get_dep_structure_index(solid_def_name), handle, ) all_handle.append(handle) return all_handle @lru_cache(maxsize=32) def build_solid_handles( represented_pipeline: RepresentedJob, ) -> Mapping[str, "GrapheneSolidHandle"]: check.inst_param(represented_pipeline, "represented_pipeline", RepresentedJob) return { str(item.handleID): item for item in _build_solid_handles( represented_pipeline, represented_pipeline.dep_structure_index ) }
GrapheneResourceRequirement
python
openai__openai-python
src/openai/types/beta/realtime/conversation_item_input_audio_transcription_completed_event.py
{ "start": 1837, "end": 2538 }
class ____(BaseModel): content_index: int """The index of the content part containing the audio.""" event_id: str """The unique ID of the server event.""" item_id: str """The ID of the user message item containing the audio.""" transcript: str """The transcribed text.""" type: Literal["conversation.item.input_audio_transcription.completed"] """ The event type, must be `conversation.item.input_audio_transcription.completed`. """ usage: Usage """Usage statistics for the transcription.""" logprobs: Optional[List[Logprob]] = None """The log probabilities of the transcription."""
ConversationItemInputAudioTranscriptionCompletedEvent
python
PrefectHQ__prefect
src/integrations/prefect-aws/tests/test_batch.py
{ "start": 3188, "end": 3538 }
class ____: async def test_batch_submit_explicit_async( self, job_queue_arn, job_definition_arn, aws_credentials ): job_id = await abatch_submit( "batch_test_job", job_queue_arn, job_definition_arn, aws_credentials, ) assert_valid_job_id(job_id)
TestBatchSubmitAsync
python
getsentry__sentry
tests/sentry/issues/endpoints/test_group_hashes.py
{ "start": 351, "end": 8699 }
class ____(APITestCase, SnubaTestCase): def test_only_return_latest_event(self) -> None: self.login_as(user=self.user) min_ago = before_now(minutes=1).isoformat() two_min_ago = before_now(minutes=2).isoformat() new_event_id = "b" * 32 old_event = self.store_event( data={ "event_id": "a" * 32, "message": "message", "timestamp": two_min_ago, "fingerprint": ["group-1"], }, project_id=self.project.id, ) new_event = self.store_event( data={ "event_id": new_event_id, "message": "message", "timestamp": min_ago, "fingerprint": ["group-1"], }, project_id=self.project.id, ) assert new_event.group_id == old_event.group_id url = f"/api/0/issues/{new_event.group_id}/hashes/" response = self.client.get(url, format="json") assert response.status_code == 200, response.content assert len(response.data) == 1 assert response.data[0]["latestEvent"]["eventID"] == new_event_id def test_return_multiple_hashes(self) -> None: self.login_as(user=self.user) min_ago = before_now(minutes=1).isoformat() two_min_ago = before_now(minutes=2).isoformat() event1 = self.store_event( data={ "event_id": "a" * 32, "message": "message", "timestamp": two_min_ago, "fingerprint": ["group-1"], }, project_id=self.project.id, ) event2 = self.store_event( data={ "event_id": "b" * 32, "message": "message2", "timestamp": min_ago, "fingerprint": ["group-2"], }, project_id=self.project.id, ) # Merge the events eventstream = SnubaEventStream() state = eventstream.start_merge(self.project.id, [event2.group_id], event1.group_id) assert state is not None eventstream.end_merge(state) url = f"/api/0/issues/{event1.group_id}/hashes/" response = self.client.get(url, format="json") assert response.status_code == 200, response.content assert len(response.data) == 2 primary_hashes = [hash["id"] for hash in response.data] assert primary_hashes == [event2.get_primary_hash(), event1.get_primary_hash()] def test_return_multiple_hashes_with_seer_match(self) -> None: self.login_as(user=self.user) min_ago = before_now(minutes=1).isoformat() two_min_ago = before_now(minutes=2).isoformat() event1 = self.store_event( data={ "event_id": "a" * 32, "message": "message", "timestamp": two_min_ago, "fingerprint": ["group-1"], }, project_id=self.project.id, ) event2 = self.store_event( data={ "event_id": "b" * 32, "message": "message2", "timestamp": min_ago, "fingerprint": ["group-2"], }, project_id=self.project.id, ) # Merge the events eventstream = SnubaEventStream() state = eventstream.start_merge(self.project.id, [event2.group_id], event1.group_id) assert state is not None eventstream.end_merge(state) # Get the grouphashes for both events (refresh after merge) hash1 = event1.get_primary_hash() hash2 = event2.get_primary_hash() # Refresh the grouphashes after merge to get updated group assignments grouphash1 = GroupHash.objects.get(project=self.project, hash=hash1) grouphash2 = GroupHash.objects.get(project=self.project, hash=hash2) # Manually update grouphash2 to point to the merged group (event1.group_id) grouphash2.group = event1.group grouphash2.save() # Get or create metadata for both grouphashes metadata1, _ = GroupHashMetadata.objects.get_or_create( grouphash=grouphash1, defaults={"schema_version": "8"} ) metadata2, _ = GroupHashMetadata.objects.get_or_create( grouphash=grouphash2, defaults={ "schema_version": "8", "seer_matched_grouphash": grouphash1, # hash2 points to hash1 as its seer match }, ) # Update the seer match if metadata already existed metadata2.seer_matched_grouphash = grouphash1 metadata2.save() url = f"/api/0/issues/{event1.group_id}/hashes/" response = self.client.get(url, format="json") assert response.status_code == 200, response.content assert len(response.data) == 2 # Find the hash data for each hash hash1_data = next(h for h in response.data if h["id"] == hash1) hash2_data = next(h for h in response.data if h["id"] == hash2) # hash1 should not be matched by seer (it's the parent) assert hash1_data["mergedBySeer"] is False # hash2 should be matched by seer (it points to hash1) assert hash2_data["mergedBySeer"] is True def test_unmerge(self) -> None: self.login_as(user=self.user) group = self.create_group( platform="javascript", metadata={"sdk": {"name_normalized": "sentry.javascript.nextjs"}}, ) hashes = [ GroupHash.objects.create(project=group.project, group=group, hash=hash) for hash in ["a" * 32, "b" * 32] ] url = "?".join( [ f"/api/0/issues/{group.id}/hashes/", urlencode({"id": [h.hash for h in hashes]}, True), ] ) with patch("sentry.issues.endpoints.group_hashes.metrics.incr") as mock_metrics_incr: response = self.client.put(url, format="json") assert response.status_code == 202, response.content mock_metrics_incr.assert_any_call( "grouping.unmerge_issues", sample_rate=1.0, tags={"platform": "javascript", "sdk": "sentry.javascript.nextjs"}, ) def test_unmerge_put_member(self) -> None: member_user = self.create_user(is_superuser=False) member = self.create_member(organization=self.organization, user=member_user, role="member") self.login_as(user=member) group = self.create_group( platform="javascript", metadata={"sdk": {"name_normalized": "sentry.javascript.nextjs"}}, ) hashes = [ GroupHash.objects.create(project=group.project, group=group, hash=hash) for hash in ["a" * 32, "b" * 32] ] url = "?".join( [ f"/api/0/issues/{group.id}/hashes/", urlencode({"id": [h.hash for h in hashes]}, True), ] ) with patch("sentry.issues.endpoints.group_hashes.metrics.incr") as mock_metrics_incr: response = self.client.put(url, format="json") assert response.status_code == 202, response.content mock_metrics_incr.assert_any_call( "grouping.unmerge_issues", sample_rate=1.0, tags={"platform": "javascript", "sdk": "sentry.javascript.nextjs"}, ) def test_unmerge_conflict(self) -> None: self.login_as(user=self.user) group = self.create_group(platform="javascript") hashes = [ GroupHash.objects.create(project=group.project, group=group, hash=hash) for hash in ["a" * 32, "b" * 32] ] url = "?".join( [ f"/api/0/issues/{group.id}/hashes/", urlencode({"id": [h.hash for h in hashes]}, True), ] ) hashes[0].update(state=GroupHash.State.LOCKED_IN_MIGRATION) hashes[1].update(state=GroupHash.State.LOCKED_IN_MIGRATION) response = self.client.put(url, format="json") assert response.status_code == 409 assert response.data["detail"] == "Already being unmerged"
GroupHashesTest
python
facebook__pyre-check
source/interprocedural_analyses/taint/test/integration/model_query_parameters_where.py
{ "start": 238, "end": 1300 }
class ____: ... def test1_alarm1(a: int, b: str, c: C, d): _test_sink(a) def test1_alarm2(a: int, b: str, c: C, d): _test_sink(b) def test1_alarm3(a: int, b: str, c: C, d): _test_sink(c) def test1_alarm4(a: int, b: str, c: C, d): _test_sink(d) def test1_positional_arguments(a: int, /, b: str): _test_sink(a) def test1_args_kwargs(a: int, *args, **kwargs): _test_sink(a) def test2_noalarm1(foo_1, foo_2): _test_sink(foo_1) def test2_noalarm2(foo_1, foo_2): _test_sink(foo_2) def test3_noalarm1(a: int, b: str, c: C, d): _test_sink(a) def test3_noalarm2(a: int, b: str, c: C, d): _test_sink(b) def test3_alarm1(a: int, b: str, c: C, d): _test_sink(c) def test3_alarm2(a: int, b: str, c: C, d): _test_sink(d) def test4_alarm1(a: List[str], b: List[int], c: C, d): _test_sink(a) def test4_noalarm1(a: List[str], b: List[int], c: C, d): _test_sink(b) def test4_alarm2(a: int, b: str, c: C, d): _test_sink(c) def test4_noalarm2(a: int, b: str, c: C, d): _test_sink(d)
C
python
readthedocs__readthedocs.org
readthedocs/core/logs.py
{ "start": 6262, "end": 7274 }
class ____: def __call__(self, logger, method_name, event_dict): record = event_dict.get("_record", None) if record is None: return event_dict event_dict.update( { "process_id": record.process, "line_number": record.lineno, } ) return event_dict shared_processors = [ structlog.contextvars.merge_contextvars, structlog.stdlib.add_logger_name, structlog.stdlib.add_log_level, structlog.stdlib.PositionalArgumentsFormatter(), structlog.processors.StackInfoRenderer(), structlog.processors.UnicodeDecoder(), ] structlog.configure( processors=list( [ structlog.stdlib.filter_by_level, *shared_processors, structlog.stdlib.ProcessorFormatter.wrap_for_formatter, ] ), logger_factory=structlog.stdlib.LoggerFactory(), wrapper_class=structlog.stdlib.BoundLogger, cache_logger_on_first_use=True, )
SysLogProcessor
python
apache__airflow
providers/redis/tests/integration/redis/sensors/test_redis_key.py
{ "start": 1117, "end": 1770 }
class ____: def setup_method(self): args = {"owner": "airflow", "start_date": DEFAULT_DATE} self.dag = DAG("test_dag_id", default_args=args) self.sensor = RedisKeySensor( task_id="test_task", redis_conn_id="redis_default", dag=self.dag, key="test_key" ) def test_poke(self): hook = RedisHook(redis_conn_id="redis_default") redis = hook.get_conn() redis.set("test_key", "test_value") assert self.sensor.poke(None), "Key exists on first call." redis.delete("test_key") assert not self.sensor.poke(None), "Key does NOT exists on second call."
TestRedisSensor
python
getsentry__sentry
tests/sentry/api/endpoints/test_warmup.py
{ "start": 116, "end": 337 }
class ____(APITestCase): def test_warmup_endpoint(self) -> None: url = reverse("sentry-warmup") response = self.client.get(url) assert response.status_code == status.HTTP_200_OK
WarmupEndpointTest
python
Pylons__pyramid
tests/test_config/test_assets.py
{ "start": 33638, "end": 33976 }
class ____(AssetSourceIntegrationTests, unittest.TestCase): def _getTargetClass(self): from pyramid.config.assets import FSAssetSource return FSAssetSource def _makeOne(self, prefix, base_prefix=here): klass = self._getTargetClass() return klass(os.path.join(base_prefix, prefix))
TestFSAssetSource
python
apache__airflow
providers/google/tests/unit/google/cloud/operators/test_datacatalog.py
{ "start": 22396, "end": 23819 }
class ____: @mock.patch("airflow.providers.google.cloud.operators.datacatalog.CloudDataCatalogHook") def test_assert_valid_hook_call(self, mock_hook) -> None: with pytest.warns(AirflowProviderDeprecationWarning): task = CloudDataCatalogDeleteTagTemplateFieldOperator( task_id="task_id", location=TEST_LOCATION, tag_template=TEST_TAG_TEMPLATE_ID, field=TEST_TAG_TEMPLATE_FIELD_ID, force=TEST_FORCE, project_id=TEST_PROJECT_ID, retry=TEST_RETRY, timeout=TEST_TIMEOUT, metadata=TEST_METADATA, gcp_conn_id=TEST_GCP_CONN_ID, impersonation_chain=TEST_IMPERSONATION_CHAIN, ) task.execute(context=mock.MagicMock()) mock_hook.assert_called_once_with( gcp_conn_id=TEST_GCP_CONN_ID, impersonation_chain=TEST_IMPERSONATION_CHAIN, ) mock_hook.return_value.delete_tag_template_field.assert_called_once_with( location=TEST_LOCATION, tag_template=TEST_TAG_TEMPLATE_ID, field=TEST_TAG_TEMPLATE_FIELD_ID, force=TEST_FORCE, project_id=TEST_PROJECT_ID, retry=TEST_RETRY, timeout=TEST_TIMEOUT, metadata=TEST_METADATA, )
TestCloudDataCatalogDeleteTagTemplateFieldOperator
python
streamlit__streamlit
lib/streamlit/testing/v1/element_tree.py
{ "start": 18142, "end": 18320 }
class ____(HeadingBase): def __init__(self, proto: HeadingProto, root: ElementTree) -> None: super().__init__(proto, root, "subheader") @dataclass(repr=False)
Subheader
python
fsspec__filesystem_spec
fsspec/compression.py
{ "start": 3073, "end": 5086 }
class ____(AbstractBufferedFile): def __init__(self, infile, mode, **kwargs): import snappy super().__init__( fs=None, path="snappy", mode=mode.strip("b") + "b", size=999999999, **kwargs ) self.infile = infile if "r" in mode: self.codec = snappy.StreamDecompressor() else: self.codec = snappy.StreamCompressor() def _upload_chunk(self, final=False): self.buffer.seek(0) out = self.codec.add_chunk(self.buffer.read()) self.infile.write(out) return True def seek(self, loc, whence=0): raise NotImplementedError("SnappyFile is not seekable") def seekable(self): return False def _fetch_range(self, start, end): """Get the specified set of bytes from remote""" data = self.infile.read(end - start) return self.codec.decompress(data) try: import snappy snappy.compress(b"") # Snappy may use the .sz file extension, but this is not part of the # standard implementation. register_compression("snappy", SnappyFile, []) except (ImportError, NameError, AttributeError): pass try: import lz4.frame register_compression("lz4", lz4.frame.open, "lz4") except ImportError: pass try: # zstd in the standard library for python >= 3.14 from compression.zstd import ZstdFile register_compression("zstd", ZstdFile, "zst") except ImportError: try: import zstandard as zstd def zstandard_file(infile, mode="rb"): if "r" in mode: cctx = zstd.ZstdDecompressor() return cctx.stream_reader(infile) else: cctx = zstd.ZstdCompressor(level=10) return cctx.stream_writer(infile) register_compression("zstd", zstandard_file, "zst") except ImportError: pass def available_compressions(): """Return a list of the implemented compressions.""" return list(compr)
SnappyFile
python
tensorflow__tensorflow
third_party/xla/xla/hlo/tools/tests/generate_hlo_test_checks_test.py
{ "start": 775, "end": 7433 }
class ____(absltest.TestCase): def setUp(self): super().setUp() self._data_dir = os.path.dirname(__file__) parent_dir = os.path.abspath(os.path.join(self._data_dir, os.pardir)) self._optimizer_path = os.path.join(parent_dir, "hlo-opt") self._input_file_path = os.path.join( self._data_dir, "generate_hlo_test_checks_test_input.hlo" ) self._output_file_path = os.path.join( self._data_dir, "generate_hlo_test_checks_test_output.hlo" ) def test_parallel_mode_generates_expected_output_file(self): actual_output = io.StringIO() # The `worker_count` argument is normally inferred, but we specify it here # in order to ensure that we're testing the parallel code path even in the # unlikely event that `os.process_cpu_count() == 1` on the test machine. with generate_hlo_test_checks.TestCheckWriter( optimizer_path=self._optimizer_path, optimizer_args=[ "{}", "--passes=logistic-expander,reshape-mover", ], worker_count=8, ) as writer: writer.transform_and_print_file( self._input_file_path, output_stream=actual_output, ) actual_output.seek(0) with open(self._output_file_path, mode="r") as expected_output: self.assertEqual(actual_output.read(), expected_output.read()) def test_sequential_mode_generates_expected_output_file(self): actual_output = io.StringIO() writer = generate_hlo_test_checks.TestCheckWriter( optimizer_path=self._optimizer_path, optimizer_args=[ "{}", "--passes=logistic-expander,reshape-mover", ], ) writer.transform_and_print_file( self._input_file_path, output_stream=actual_output, ) actual_output.seek(0) with open(self._output_file_path, mode="r") as expected_output: self.assertEqual(actual_output.read(), expected_output.read()) def test_custom_input_file_placeholder_string(self): actual_output = io.StringIO() with generate_hlo_test_checks.TestCheckWriter( optimizer_path=self._optimizer_path, optimizer_args=[ "%s", "--passes=logistic-expander,reshape-mover", ], expand_to_input="%s", ) as writer: writer.transform_and_print_file( self._input_file_path, output_stream=actual_output, ) actual_output.seek(0) with open(self._output_file_path, mode="r") as expected_output: self.assertEqual(actual_output.read(), expected_output.read()) def test_argument_parsing(self): args = generate_hlo_test_checks.parse_args( "/path/to/test_file.hlo -- /path/to/hlo-opt {} --passes=foo,bar".split() ) self.assertEqual(args.test_file, "/path/to/test_file.hlo") self.assertEqual(args.in_place, False) self.assertEqual(args.expand_to_input, "{}") self.assertEqual(args.opt_cmd, "/path/to/hlo-opt") self.assertEqual(args.opt_args, ["{}", "--passes=foo,bar"]) args = generate_hlo_test_checks.parse_args( "test_file.hlo -i -I%s -- hlo-opt %s --passes=foo,bar".split() ) self.assertEqual(args.test_file, "test_file.hlo") self.assertEqual(args.in_place, True) self.assertEqual(args.expand_to_input, "%s") self.assertEqual(args.opt_cmd, "hlo-opt") self.assertEqual(args.opt_args, ["%s", "--passes=foo,bar"]) def test_conditional_parallelism(self): # A writer with `worker_count > 1` should have a worker pool. with generate_hlo_test_checks.TestCheckWriter( optimizer_path=self._optimizer_path, optimizer_args=["{}"], worker_count=2, ) as parallel_writer: self.assertIsNotNone(parallel_writer._worker_pool) # The worker pool should be destroyed when exiting the context manager. self.assertIsNone(parallel_writer._worker_pool) # A writer with `worker_count == 1` should not have a worker pool. with generate_hlo_test_checks.TestCheckWriter( optimizer_path=self._optimizer_path, optimizer_args=["{}"], worker_count=1, ) as sequential_writer: self.assertIsNone(sequential_writer._worker_pool) # Attempting to construct a writer with `worker_count < 1` should result in # a `ValueError`. with self.assertRaises(ValueError): with generate_hlo_test_checks.TestCheckWriter( optimizer_path=self._optimizer_path, optimizer_args=["{}"], worker_count=0, ): pass def test_unscoped_writer(self): # A writer with no active context manager shouldn't have a worker pool even # if `worker_count > 1`. This is because the worker pool is RAII-managed by # the context manager (i.e. the `with` block). In this situation, the writer # will operate sequentially as though `worker_count` were 1. writer = generate_hlo_test_checks.TestCheckWriter( optimizer_path=self._optimizer_path, optimizer_args=["{}"], worker_count=4, ) self.assertIsNone(writer._worker_pool) # However, entering a context manager for a previously constructed writer # with `worker_count > 1` should initialize a worker pool that lasts for the # duration of the context manager. with writer: self.assertIsNotNone(writer._worker_pool) # The worker pool should be destroyed when exiting the context manager. self.assertIsNone(writer._worker_pool) def test_prevent_conflicting_context_managers(self): with generate_hlo_test_checks.TestCheckWriter( optimizer_path=self._optimizer_path, optimizer_args=["{}"], ) as writer: # Creating multiple overlapping context managers for the same # `TestCheckWriter` instance should result in a `RuntimeError`. with self.assertRaises(RuntimeError): with writer: pass def test_allow_non_conflicting_context_managers(self): with generate_hlo_test_checks.TestCheckWriter( optimizer_path=self._optimizer_path, optimizer_args=["{}"], ) as writer: # Context managers for *separate* `TestCheckWriter` instances are allowed # to coexist since they're managing separate resources. with generate_hlo_test_checks.TestCheckWriter( optimizer_path=self._optimizer_path, optimizer_args=["{}"], ): pass # A given `TestCheckWriter` instance may be reused in multiple context # managers as long as their scopes never overlap with each other (i.e. the # resources are released before they're re-acquired). with writer: pass if __name__ == "__main__": absltest.main()
GenerateHloTestChecksTest
python
keras-team__keras
keras/src/ops/numpy.py
{ "start": 79815, "end": 82962 }
class ____(Operation): def __init__(self, offset=0, axis1=0, axis2=1, *, name=None): super().__init__(name=name) self.offset = offset self.axis1 = axis1 self.axis2 = axis2 def call(self, x): return backend.numpy.diagonal( x, offset=self.offset, axis1=self.axis1, axis2=self.axis2, ) def compute_output_spec(self, x): x_shape = list(x.shape) if len(x_shape) < 2: raise ValueError( "`diagonal` requires an array of at least two dimensions, but " f"`x` is of shape {x.shape}." ) shape_2d = [x_shape[self.axis1], x_shape[self.axis2]] x_shape[self.axis1] = -1 x_shape[self.axis2] = -1 output_shape = list(filter((-1).__ne__, x_shape)) if None in shape_2d: diag_shape = [None] else: shorter_side = np.minimum(shape_2d[0], shape_2d[1]) if self.offset > 0: remaining = shape_2d[1] - self.offset else: remaining = shape_2d[0] + self.offset diag_shape = [ int(np.maximum(0, np.minimum(remaining, shorter_side))) ] output_shape = output_shape + diag_shape return KerasTensor(output_shape, dtype=x.dtype) @keras_export(["keras.ops.diagonal", "keras.ops.numpy.diagonal"]) def diagonal(x, offset=0, axis1=0, axis2=1): """Return specified diagonals. If `x` is 2-D, returns the diagonal of `x` with the given offset, i.e., the collection of elements of the form `x[i, i+offset]`. If `x` has more than two dimensions, the axes specified by `axis1` and `axis2` are used to determine the 2-D sub-array whose diagonal is returned. The shape of the resulting array can be determined by removing `axis1` and `axis2` and appending an index to the right equal to the size of the resulting diagonals. Args: x: Input tensor. offset: Offset of the diagonal from the main diagonal. Can be positive or negative. Defaults to `0`.(main diagonal). axis1: Axis to be used as the first axis of the 2-D sub-arrays. Defaults to `0`.(first axis). axis2: Axis to be used as the second axis of the 2-D sub-arrays. Defaults to `1` (second axis). Returns: Tensor of diagonals. Examples: >>> from keras.src import ops >>> x = ops.arange(4).reshape((2, 2)) >>> x array([[0, 1], [2, 3]]) >>> x.diagonal() array([0, 3]) >>> x.diagonal(1) array([1]) >>> x = ops.arange(8).reshape((2, 2, 2)) >>> x array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]]) >>> x.diagonal(0, 0, 1) array([[0, 6], [1, 7]]) """ if any_symbolic_tensors((x,)): return Diagonal( offset=offset, axis1=axis1, axis2=axis2, ).symbolic_call(x) return backend.numpy.diagonal( x, offset=offset, axis1=axis1, axis2=axis2, )
Diagonal
python
geekcomputers__Python
venv/Lib/site-packages/pip/_internal/index/package_finder.py
{ "start": 21555, "end": 37666 }
class ____: """This finds packages. This is meant to match easy_install's technique for looking for packages, by reading pages and looking for appropriate links. """ def __init__( self, link_collector: LinkCollector, target_python: TargetPython, allow_yanked: bool, format_control: Optional[FormatControl] = None, candidate_prefs: Optional[CandidatePreferences] = None, ignore_requires_python: Optional[bool] = None, ) -> None: """ This constructor is primarily meant to be used by the create() class method and from tests. :param format_control: A FormatControl object, used to control the selection of source packages / binary packages when consulting the index and links. :param candidate_prefs: Options to use when creating a CandidateEvaluator object. """ if candidate_prefs is None: candidate_prefs = CandidatePreferences() format_control = format_control or FormatControl(set(), set()) self._allow_yanked = allow_yanked self._candidate_prefs = candidate_prefs self._ignore_requires_python = ignore_requires_python self._link_collector = link_collector self._target_python = target_python self.format_control = format_control # These are boring links that have already been logged somehow. self._logged_links: Set[Tuple[Link, LinkType, str]] = set() # Don't include an allow_yanked default value to make sure each call # site considers whether yanked releases are allowed. This also causes # that decision to be made explicit in the calling code, which helps # people when reading the code. @classmethod def create( cls, link_collector: LinkCollector, selection_prefs: SelectionPreferences, target_python: Optional[TargetPython] = None, ) -> "PackageFinder": """Create a PackageFinder. :param selection_prefs: The candidate selection preferences, as a SelectionPreferences object. :param target_python: The target Python interpreter to use when checking compatibility. If None (the default), a TargetPython object will be constructed from the running Python. """ if target_python is None: target_python = TargetPython() candidate_prefs = CandidatePreferences( prefer_binary=selection_prefs.prefer_binary, allow_all_prereleases=selection_prefs.allow_all_prereleases, ) return cls( candidate_prefs=candidate_prefs, link_collector=link_collector, target_python=target_python, allow_yanked=selection_prefs.allow_yanked, format_control=selection_prefs.format_control, ignore_requires_python=selection_prefs.ignore_requires_python, ) @property def target_python(self) -> TargetPython: return self._target_python @property def search_scope(self) -> SearchScope: return self._link_collector.search_scope @search_scope.setter def search_scope(self, search_scope: SearchScope) -> None: self._link_collector.search_scope = search_scope @property def find_links(self) -> List[str]: return self._link_collector.find_links @property def index_urls(self) -> List[str]: return self.search_scope.index_urls @property def trusted_hosts(self) -> Iterable[str]: for host_port in self._link_collector.session.pip_trusted_origins: yield build_netloc(*host_port) @property def allow_all_prereleases(self) -> bool: return self._candidate_prefs.allow_all_prereleases def set_allow_all_prereleases(self) -> None: self._candidate_prefs.allow_all_prereleases = True @property def prefer_binary(self) -> bool: return self._candidate_prefs.prefer_binary def set_prefer_binary(self) -> None: self._candidate_prefs.prefer_binary = True def requires_python_skipped_reasons(self) -> List[str]: reasons = { detail for _, result, detail in self._logged_links if result == LinkType.requires_python_mismatch } return sorted(reasons) def make_link_evaluator(self, project_name: str) -> LinkEvaluator: canonical_name = canonicalize_name(project_name) formats = self.format_control.get_allowed_formats(canonical_name) return LinkEvaluator( project_name=project_name, canonical_name=canonical_name, formats=formats, target_python=self._target_python, allow_yanked=self._allow_yanked, ignore_requires_python=self._ignore_requires_python, ) def _sort_links(self, links: Iterable[Link]) -> List[Link]: """ Returns elements of links in order, non-egg links first, egg links second, while eliminating duplicates """ eggs, no_eggs = [], [] seen: Set[Link] = set() for link in links: if link not in seen: seen.add(link) if link.egg_fragment: eggs.append(link) else: no_eggs.append(link) return no_eggs + eggs def _log_skipped_link(self, link: Link, result: LinkType, detail: str) -> None: entry = (link, result, detail) if entry not in self._logged_links: # Put the link at the end so the reason is more visible and because # the link string is usually very long. logger.debug("Skipping link: %s: %s", detail, link) self._logged_links.add(entry) def get_install_candidate( self, link_evaluator: LinkEvaluator, link: Link ) -> Optional[InstallationCandidate]: """ If the link is a candidate for install, convert it to an InstallationCandidate and return it. Otherwise, return None. """ result, detail = link_evaluator.evaluate_link(link) if result != LinkType.candidate: self._log_skipped_link(link, result, detail) return None try: return InstallationCandidate( name=link_evaluator.project_name, link=link, version=detail, ) except InvalidVersion: return None def evaluate_links( self, link_evaluator: LinkEvaluator, links: Iterable[Link] ) -> List[InstallationCandidate]: """ Convert links that are candidates to InstallationCandidate objects. """ candidates = [] for link in self._sort_links(links): candidate = self.get_install_candidate(link_evaluator, link) if candidate is not None: candidates.append(candidate) return candidates def process_project_url( self, project_url: Link, link_evaluator: LinkEvaluator ) -> List[InstallationCandidate]: logger.debug( "Fetching project page and analyzing links: %s", project_url, ) index_response = self._link_collector.fetch_response(project_url) if index_response is None: return [] page_links = list(parse_links(index_response)) with indent_log(): package_links = self.evaluate_links( link_evaluator, links=page_links, ) return package_links @functools.lru_cache(maxsize=None) def find_all_candidates(self, project_name: str) -> List[InstallationCandidate]: """Find all available InstallationCandidate for project_name This checks index_urls and find_links. All versions found are returned as an InstallationCandidate list. See LinkEvaluator.evaluate_link() for details on which files are accepted. """ link_evaluator = self.make_link_evaluator(project_name) collected_sources = self._link_collector.collect_sources( project_name=project_name, candidates_from_page=functools.partial( self.process_project_url, link_evaluator=link_evaluator, ), ) page_candidates_it = itertools.chain.from_iterable( source.page_candidates() for sources in collected_sources for source in sources if source is not None ) page_candidates = list(page_candidates_it) file_links_it = itertools.chain.from_iterable( source.file_links() for sources in collected_sources for source in sources if source is not None ) file_candidates = self.evaluate_links( link_evaluator, sorted(file_links_it, reverse=True), ) if logger.isEnabledFor(logging.DEBUG) and file_candidates: paths = [] for candidate in file_candidates: assert candidate.link.url # we need to have a URL try: paths.append(candidate.link.file_path) except Exception: paths.append(candidate.link.url) # it's not a local file logger.debug("Local files found: %s", ", ".join(paths)) # This is an intentional priority ordering return file_candidates + page_candidates def make_candidate_evaluator( self, project_name: str, specifier: Optional[specifiers.BaseSpecifier] = None, hashes: Optional[Hashes] = None, ) -> CandidateEvaluator: """Create a CandidateEvaluator object to use.""" candidate_prefs = self._candidate_prefs return CandidateEvaluator.create( project_name=project_name, target_python=self._target_python, prefer_binary=candidate_prefs.prefer_binary, allow_all_prereleases=candidate_prefs.allow_all_prereleases, specifier=specifier, hashes=hashes, ) @functools.lru_cache(maxsize=None) def find_best_candidate( self, project_name: str, specifier: Optional[specifiers.BaseSpecifier] = None, hashes: Optional[Hashes] = None, ) -> BestCandidateResult: """Find matches for the given project and specifier. :param specifier: An optional object implementing `filter` (e.g. `packaging.specifiers.SpecifierSet`) to filter applicable versions. :return: A `BestCandidateResult` instance. """ candidates = self.find_all_candidates(project_name) candidate_evaluator = self.make_candidate_evaluator( project_name=project_name, specifier=specifier, hashes=hashes, ) return candidate_evaluator.compute_best_candidate(candidates) def find_requirement( self, req: InstallRequirement, upgrade: bool ) -> Optional[InstallationCandidate]: """Try to find a Link matching req Expects req, an InstallRequirement and upgrade, a boolean Returns a InstallationCandidate if found, Raises DistributionNotFound or BestVersionAlreadyInstalled otherwise """ hashes = req.hashes(trust_internet=False) best_candidate_result = self.find_best_candidate( req.name, specifier=req.specifier, hashes=hashes, ) best_candidate = best_candidate_result.best_candidate installed_version: Optional[_BaseVersion] = None if req.satisfied_by is not None: installed_version = req.satisfied_by.version def _format_versions(cand_iter: Iterable[InstallationCandidate]) -> str: # This repeated parse_version and str() conversion is needed to # handle different vendoring sources from pip and pkg_resources. # If we stop using the pkg_resources provided specifier and start # using our own, we can drop the cast to str(). return ( ", ".join( sorted( {str(c.version) for c in cand_iter}, key=parse_version, ) ) or "none" ) if installed_version is None and best_candidate is None: logger.critical( "Could not find a version that satisfies the requirement %s " "(from versions: %s)", req, _format_versions(best_candidate_result.iter_all()), ) raise DistributionNotFound(f"No matching distribution found for {req}") def _should_install_candidate( candidate: Optional[InstallationCandidate], ) -> "TypeGuard[InstallationCandidate]": if installed_version is None: return True if best_candidate is None: return False return best_candidate.version > installed_version if not upgrade and installed_version is not None: if _should_install_candidate(best_candidate): logger.debug( "Existing installed version (%s) satisfies requirement " "(most up-to-date version is %s)", installed_version, best_candidate.version, ) else: logger.debug( "Existing installed version (%s) is most up-to-date and " "satisfies requirement", installed_version, ) return None if _should_install_candidate(best_candidate): logger.debug( "Using version %s (newest of versions: %s)", best_candidate.version, _format_versions(best_candidate_result.iter_applicable()), ) return best_candidate # We have an existing version, and its the best version logger.debug( "Installed version (%s) is most up-to-date (past versions: %s)", installed_version, _format_versions(best_candidate_result.iter_applicable()), ) raise BestVersionAlreadyInstalled def _find_name_version_sep(fragment: str, canonical_name: str) -> int: """Find the separator's index based on the package's canonical name. :param fragment: A <package>+<version> filename "fragment" (stem) or egg fragment. :param canonical_name: The package's canonical name. This function is needed since the canonicalized name does not necessarily have the same length as the egg info's name part. An example:: >>> fragment = 'foo__bar-1.0' >>> canonical_name = 'foo-bar' >>> _find_name_version_sep(fragment, canonical_name) 8 """ # Project name and version must be separated by one single dash. Find all # occurrences of dashes; if the string in front of it matches the canonical # name, this is the one separating the name and version parts. for i, c in enumerate(fragment): if c != "-": continue if canonicalize_name(fragment[:i]) == canonical_name: return i raise ValueError(f"{fragment} does not match {canonical_name}") def _extract_version_from_fragment(fragment: str, canonical_name: str) -> Optional[str]: """Parse the version string from a <package>+<version> filename "fragment" (stem) or egg fragment. :param fragment: The string to parse. E.g. foo-2.1 :param canonical_name: The canonicalized name of the package this belongs to. """ try: version_start = _find_name_version_sep(fragment, canonical_name) + 1 except ValueError: return None version = fragment[version_start:] if not version: return None return version
PackageFinder
python
run-llama__llama_index
llama-index-integrations/voice_agents/llama-index-voice-agents-elevenlabs/llama_index/voice_agents/elevenlabs/events.py
{ "start": 978, "end": 1132 }
class ____(BaseVoiceAgentEvent): model_config = ConfigDict(extra="allow") tool_call_id: str tool_name: str parameters: Any
ClientToolCallEvent
python
pytorch__pytorch
torch/distributed/tensor/_ops/_view_ops.py
{ "start": 1218, "end": 1340 }
class ____(DimSpec): """Output dimension maps directly to an input dimension.""" input_dim: int @dataclass
InputDim
python
scipy__scipy
benchmarks/benchmarks/optimize.py
{ "start": 10364, "end": 15428 }
class ____(Benchmark): """Benchmark the optimizers with smooth, unbounded, functions""" params = [ ['rosenbrock_slow', 'rosenbrock_nograd', 'rosenbrock', 'rosenbrock_tight', 'simple_quadratic', 'asymmetric_quadratic', 'sin_1d', 'booth', 'beale', 'LJ'], ["COBYLA", 'COBYQA', 'Powell', 'nelder-mead', 'L-BFGS-B', 'BFGS', 'CG', 'TNC', 'SLSQP', "Newton-CG", 'dogleg', 'trust-ncg', 'trust-exact', 'trust-krylov', 'trust-constr'], ["mean_nfev", "mean_time"] ] param_names = ["test function", "solver", "result type"] def setup(self, func_name, method_name, ret_val): b = getattr(self, 'run_' + func_name)(methods=[method_name]) r = b.average_results().get(method_name) if r is None: raise NotImplementedError() self.result = getattr(r, ret_val) def track_all(self, func_name, method_name, ret_val): return self.result # SlowRosen has a 50us delay on each function evaluation. By comparing to # rosenbrock_nograd it should be possible to figure out how much time a # minimizer uses internally, compared to the time required for function # evaluation. def run_rosenbrock_slow(self, methods=None): s = funcs.SlowRosen() b = _BenchOptimizers("Rosenbrock function", fun=s.fun) for i in range(10): b.bench_run(np.random.uniform(-3, 3, 3), methods=methods) return b # see what the performance of the solvers are if numerical differentiation # has to be used. def run_rosenbrock_nograd(self, methods=None): b = _BenchOptimizers("Rosenbrock function", fun=rosen) for i in range(10): b.bench_run(np.random.uniform(-3, 3, 3), methods=methods) return b def run_rosenbrock(self, methods=None): b = _BenchOptimizers("Rosenbrock function", fun=rosen, der=rosen_der, hess=rosen_hess) for i in range(10): b.bench_run(np.random.uniform(-3, 3, 3), methods=methods) return b def run_rosenbrock_tight(self, methods=None): b = _BenchOptimizers("Rosenbrock function", fun=rosen, der=rosen_der, hess=rosen_hess, tol=1e-8) for i in range(10): b.bench_run(np.random.uniform(-3, 3, 3), methods=methods) return b def run_simple_quadratic(self, methods=None): s = funcs.SimpleQuadratic() # print "checking gradient", # scipy.optimize.check_grad(s.fun, s.der, np.array([1.1, -2.3])) b = _BenchOptimizers("simple quadratic function", fun=s.fun, der=s.der, hess=s.hess) for i in range(10): b.bench_run(np.random.uniform(-2, 2, 3), methods=methods) return b def run_asymmetric_quadratic(self, methods=None): s = funcs.AsymmetricQuadratic() # print "checking gradient", # scipy.optimize.check_grad(s.fun, s.der, np.array([1.1, -2.3])) b = _BenchOptimizers("function sum(x**2) + x[0]", fun=s.fun, der=s.der, hess=s.hess) for i in range(10): b.bench_run(np.random.uniform(-2, 2, 3), methods=methods) return b def run_sin_1d(self, methods=None): def fun(x): return np.sin(x[0]) def der(x): return np.array([np.cos(x[0])]) b = _BenchOptimizers("1d sin function", fun=fun, der=der, hess=None) for i in range(10): b.bench_run(np.random.uniform(-2, 2, 1), methods=methods) return b def run_booth(self, methods=None): s = funcs.Booth() # print "checking gradient", # scipy.optimize.check_grad(s.fun, s.der, np.array([1.1, -2.3])) b = _BenchOptimizers("Booth's function", fun=s.fun, der=s.der, hess=None) for i in range(10): b.bench_run(np.random.uniform(0, 10, 2), methods=methods) return b def run_beale(self, methods=None): s = funcs.Beale() # print "checking gradient", # scipy.optimize.check_grad(s.fun, s.der, np.array([1.1, -2.3])) b = _BenchOptimizers("Beale's function", fun=s.fun, der=s.der, hess=None) for i in range(10): b.bench_run(np.random.uniform(0, 10, 2), methods=methods) return b def run_LJ(self, methods=None): s = funcs.LJ() # print "checking gradient", # scipy.optimize.check_grad(s.get_energy, s.get_gradient, # np.random.uniform(-2,2,3*4)) natoms = 4 b = _BenchOptimizers( f"{natoms} atom Lennard Jones potential", fun=s.fun, der=s.der, hess=None ) for _ in range(10): b.bench_run(np.random.uniform(-2, 2, natoms*3), methods=methods) return b
BenchSmoothUnbounded
python
dagster-io__dagster
examples/docs_snippets/docs_snippets/guides/dagster/development_to_production/assets_v2.py
{ "start": 133, "end": 840 }
class ____(Config): base_item_id: int @asset( io_manager_key="snowflake_io_manager", ) def items(config: ItemsConfig, hn_client: HNAPIClient) -> pd.DataFrame: """Items from the Hacker News API: each is a story or a comment on a story.""" max_id = hn_client.fetch_max_item_id() rows = [] # Hacker News API is 1-indexed, so adjust range by 1 for item_id in range(max_id - config.base_item_id + 1, max_id + 1): rows.append(hn_client.fetch_item_by_id(item_id)) result = pd.DataFrame(rows, columns=hn_client.item_field_names).drop_duplicates( subset=["id"] ) result.rename(columns={"by": "user_id"}, inplace=True) return result # end_items
ItemsConfig
python
django__django
tests/generic_inline_admin/tests.py
{ "start": 630, "end": 899 }
class ____: @classmethod def setUpTestData(cls): cls.superuser = User.objects.create_superuser( username="super", password="secret", email="super@example.com" ) @override_settings(ROOT_URLCONF="generic_inline_admin.urls")
TestDataMixin
python
google__jax
jaxlib/mosaic/python/layout_defs.py
{ "start": 911, "end": 1155 }
class ____(enum.Enum): SUBLANES = "sublanes" LANES = "lanes" SUBELEMENTS = "subelements" def __repr__(self): return self.name.lower() SUBLANES = Direction.SUBLANES LANES = Direction.LANES SUBELEMENTS = Direction.SUBELEMENTS
Direction
python
bokeh__bokeh
src/bokeh/core/property/validation.py
{ "start": 1438, "end": 3364 }
class ____: """ Control validation of bokeh properties This can be used as a context manager, or as a normal callable Args: value (bool) : Whether validation should occur or not Example: .. code-block:: python with validate(False): # do no validate while within this block pass validate(False) # don't validate ever See Also: :func:`~bokeh.core.property.bases.validation_on`: check the state of validation :func:`~bokeh.core.properties.without_property_validation`: function decorator """ def __init__(self, value) -> None: self.old = Property._should_validate Property._should_validate = value def __enter__(self): pass def __exit__(self, typ, value, traceback): Property._should_validate = self.old def without_property_validation(input_function): """ Turn off property validation during update callbacks Example: .. code-block:: python @without_property_validation def update(attr, old, new): # do things without validation See Also: :class:`~bokeh.core.properties.validate`: context manager for more fine-grained control """ @wraps(input_function) def func(*args, **kwargs): with validate(False): return input_function(*args, **kwargs) return func #----------------------------------------------------------------------------- # Dev API #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Private API #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Code #-----------------------------------------------------------------------------
validate
python
fastapi__sqlmodel
docs_src/tutorial/where/tutorial008_py310.py
{ "start": 71, "end": 1564 }
class ____(SQLModel, table=True): id: int | None = Field(default=None, primary_key=True) name: str secret_name: str age: int | None = None sqlite_file_name = "database.db" sqlite_url = f"sqlite:///{sqlite_file_name}" engine = create_engine(sqlite_url, echo=True) def create_db_and_tables(): SQLModel.metadata.create_all(engine) def create_heroes(): hero_1 = Hero(name="Deadpond", secret_name="Dive Wilson") hero_2 = Hero(name="Spider-Boy", secret_name="Pedro Parqueador") hero_3 = Hero(name="Rusty-Man", secret_name="Tommy Sharp", age=48) hero_4 = Hero(name="Tarantula", secret_name="Natalia Roman-on", age=32) hero_5 = Hero(name="Black Lion", secret_name="Trevor Challa", age=35) hero_6 = Hero(name="Dr. Weird", secret_name="Steve Weird", age=36) hero_7 = Hero(name="Captain North America", secret_name="Esteban Rogelios", age=93) with Session(engine) as session: session.add(hero_1) session.add(hero_2) session.add(hero_3) session.add(hero_4) session.add(hero_5) session.add(hero_6) session.add(hero_7) session.commit() def select_heroes(): with Session(engine) as session: statement = select(Hero).where(Hero.age >= 35, Hero.age < 40) results = session.exec(statement) for hero in results: print(hero) def main(): create_db_and_tables() create_heroes() select_heroes() if __name__ == "__main__": main()
Hero
python
apache__avro
lang/py/avro/test/test_io.py
{ "start": 17947, "end": 20076 }
class ____(unittest.TestCase): def test_deserialization_fails(self) -> None: reader_schema = avro.schema.parse( json.dumps( { "namespace": "example.avro", "type": "record", "name": "User", "fields": [ {"name": "name", "type": "string"}, {"name": "age", "type": "int"}, {"name": "location", "type": "string"}, ], } ) ) writer_schema = avro.schema.parse( json.dumps( { "namespace": "example.avro", "type": "record", "name": "IncompatibleUser", "fields": [ {"name": "name", "type": "int"}, {"name": "age", "type": "int"}, {"name": "location", "type": "string"}, ], } ) ) incompatibleUserRecord = {"name": 100, "age": 21, "location": "Woodford"} writer = avro.io.DatumWriter(writer_schema) with io.BytesIO() as writer_bio: enc = avro.io.BinaryEncoder(writer_bio) writer.write(incompatibleUserRecord, enc) enc_bytes = writer_bio.getvalue() reader = avro.io.DatumReader(reader_schema) with io.BytesIO(enc_bytes) as reader_bio: self.assertRaises(avro.errors.InvalidAvroBinaryEncoding, reader.read, avro.io.BinaryDecoder(reader_bio)) incompatibleUserRecord = {"name": -10, "age": 21, "location": "Woodford"} with io.BytesIO() as writer_bio: enc = avro.io.BinaryEncoder(writer_bio) writer.write(incompatibleUserRecord, enc) enc_bytes = writer_bio.getvalue() reader = avro.io.DatumReader(reader_schema) with io.BytesIO(enc_bytes) as reader_bio: self.assertRaises(avro.errors.InvalidAvroBinaryEncoding, reader.read, avro.io.BinaryDecoder(reader_bio))
TestIncompatibleSchemaReading
python
getsentry__sentry
src/sentry/rules/filters/age_comparison.py
{ "start": 1188, "end": 3117 }
class ____(EventFilter): id = "sentry.rules.filters.age_comparison.AgeComparisonFilter" form_fields = { "comparison_type": {"type": "choice", "choices": age_comparison_choices}, "value": {"type": "number", "placeholder": 10}, "time": {"type": "choice", "choices": get_timerange_choices()}, } # An issue is newer/older than X minutes/hours/days/weeks label = "The issue is {comparison_type} than {value} {time}" prompt = "The issue is older or newer than..." def _passes(self, first_seen: datetime, current_time: datetime) -> bool: comparison_type = self.get_option("comparison_type") time = self.get_option("time") try: value = int(self.get_option("value")) except (TypeError, ValueError): return False if ( not comparison_type or not time or time not in timeranges or ( comparison_type != AgeComparisonType.OLDER and comparison_type != AgeComparisonType.NEWER ) ): return False _, delta_time = timeranges[time] passes_: bool = age_comparison_map[AgeComparisonType(comparison_type)]( first_seen + (value * delta_time), current_time ) return passes_ def passes(self, event: GroupEvent, state: EventState) -> bool: return self._passes(event.group.first_seen, timezone.now()) def passes_activity( self, condition_activity: ConditionActivity, event_map: dict[str, Any] ) -> bool: try: group = Group.objects.get_from_cache(id=condition_activity.group_id) except Group.DoesNotExist: return False return self._passes(group.first_seen, condition_activity.timestamp) def get_form_instance(self) -> AgeComparisonForm: return AgeComparisonForm(self.data)
AgeComparisonFilter
python
PyCQA__pylint
tests/functional/u/undefined/undefined_variable_py30.py
{ "start": 2658, "end": 2825 }
class ____(metaclass=MetaClass, parameter=variable): # [undefined-variable] pass # Test for #4031 # https://github.com/pylint-dev/pylint/issues/4031
InheritingClass
python
airbytehq__airbyte
airbyte-integrations/connectors/source-zoho-crm/source_zoho_crm/source.py
{ "start": 565, "end": 1352 }
class ____(AbstractSource): def check_connection(self, logger: logging.Logger, config: Mapping[str, Any]) -> Tuple[bool, any]: """ :param config: the user-input config object conforming to the connector's spec.json :param logger: logger object :return Tuple[bool, any]: (True, None) if the input config can be used to connect to the API successfully, (False, error) otherwise. """ api = ZohoAPI(config) return api.check_connection() def streams(self, config: Mapping[str, Any]) -> List["Stream"]: """ :param config: A Mapping of the user input configuration as defined in the connector spec. """ stream_factory = ZohoStreamFactory(config) return stream_factory.produce()
SourceZohoCrm
python
kamyu104__LeetCode-Solutions
Python/web-crawler-multithreaded.py
{ "start": 325, "end": 1918 }
class ____(object): NUMBER_OF_WORKERS = 8 def __init__(self): self.__cv = threading.Condition() self.__q = Queue.Queue() def crawl(self, startUrl, htmlParser): """ :type startUrl: str :type htmlParser: HtmlParser :rtype: List[str] """ SCHEME = "http://" def hostname(url): pos = url.find('/', len(SCHEME)) if pos == -1: return url return url[:pos] def worker(htmlParser, lookup): while True: from_url = self.__q.get() if from_url is None: break name = hostname(from_url) for to_url in htmlParser.getUrls(from_url): if name != hostname(to_url): continue with self.__cv: if to_url not in lookup: lookup.add(to_url) self.__q.put(to_url) self.__q.task_done() workers = [] self.__q = Queue.Queue() self.__q.put(startUrl) lookup = set([startUrl]) for i in xrange(self.NUMBER_OF_WORKERS): t = threading.Thread(target=worker, args=(htmlParser, lookup)) t.start() workers.append(t) self.__q.join() for t in workers: self.__q.put(None) for t in workers: t.join() return list(lookup) # Time: O(|V| + |E|) # Space: O(|V|) import threading import collections
Solution
python
pytorch__pytorch
torch/_inductor/codegen/segmented_tree.py
{ "start": 203, "end": 8193 }
class ____(Generic[T]): def __init__( self, values: list[T], update_op: Callable[[T, T], T], summary_op: Callable[[T, T], T], identity_element: T, ): """ Initialize a segment tree with the given values and operations. Args: values: list of initial values update_op: Function to apply when updating a value (e.g., addition) summary_op: Function to summarize two values (e.g., min, max, sum) identity_element: Identity element for the summary_op (e.g., 0 for sum, float('inf') for min) Raises: ValueError: If the input values list is empty """ if not values: raise ValueError("Cannot create a segment tree with empty values list") self.n = len(values) self.update_op = update_op self.summary_op = summary_op self.identity = identity_element # Size of segment tree array (next power of 2 * 2) # The tree follows a standard heap layout where # node `n`'s children are at `2*n` and `2*n+1`. # Index 0 is unused. self.size = 1 while self.size < self.n: self.size *= 2 self.size *= 2 # Initialize tree and lazy arrays self.tree = [identity_element] * self.size # The lazy array contains updates to the given node # Upon update, we only push updates to the top-most # nodes that fully receive the update. We then # propagate the update down as required (i.e., when # we receive an interval query that neither fully # contains the node nor fully doesn't contain the # node self.lazy: list[Optional[T]] = [None] * self.size # Build the tree self._build(values, 1, 0, self.n - 1) def _build(self, values: list[T], node: int, start: int, end: int) -> None: """ Build the segment tree recursively. Args: values: Original array of values node: Current node index in the segment tree start: Start index of the segment end: End index of the segment """ if start == end: # Leaf node if start < len(values): self.tree[node] = values[start] return mid = (start + end) // 2 left_child = 2 * node right_child = 2 * node + 1 # Recursively build left and right subtrees self._build(values, left_child, start, mid) self._build(values, right_child, mid + 1, end) # Update current node with summary of children self.tree[node] = self.summary_op(self.tree[left_child], self.tree[right_child]) def _children(self, node: int) -> list[int]: return [2 * node, 2 * node + 1] def _push_lazy(self, node: int, start: int, end: int) -> None: """ Push lazy updates down to children. Args: node: Current node index start: Start index of the segment end: End index of the segment """ lazy_node = self.lazy[node] if lazy_node is None: return # Apply lazy update to current node self.tree[node] = self.update_op(self.tree[node], lazy_node) if start != end: # Not a leaf node # Propagate to children for child in self._children(node): self.lazy[child] = self.update_op( _value_or(self.lazy[child], self.identity), lazy_node ) # Clear the lazy value self.lazy[node] = None def _update_range_helper( self, node: int, start: int, end: int, left: int, right: int, value: T ) -> None: """ Helper method to update a range of values in the segment tree. Args: node: Current node index start: Start index of the current segment end: End index of the current segment left: Start index of the range to update right: End index of the range to update value: Value to apply to the range """ # Push lazy updates before processing this node self._push_lazy(node, start, end) # No overlap if start > right or end < left: return # Complete overlap if start >= left and end <= right: # Apply update to current node self.lazy[node] = value self._push_lazy(node, start, end) return # Partial overlap, recurse to children mid = (start + end) // 2 left_child = 2 * node right_child = 2 * node + 1 self._update_range_helper(left_child, start, mid, left, right, value) self._update_range_helper(right_child, mid + 1, end, left, right, value) # Update current node based on children self.tree[node] = self.summary_op(self.tree[left_child], self.tree[right_child]) def _query_range_helper( self, node: int, start: int, end: int, left: int, right: int ) -> T: """ Helper method to query a range of values in the segment tree. Args: node: Current node index start: Start index of the current segment end: End index of the current segment left: Start index of the range to query right: End index of the range to query Returns: Summary value for the range """ # No overlap if start > right or end < left: return self.identity # Push lazy updates before processing this node self._push_lazy(node, start, end) # Complete overlap if start >= left and end <= right: return self.tree[node] # Partial overlap, recurse to children mid = (start + end) // 2 left_child = 2 * node right_child = 2 * node + 1 left_result = self._query_range_helper(left_child, start, mid, left, right) right_result = self._query_range_helper(right_child, mid + 1, end, left, right) # Combine results from children return self.summary_op(left_result, right_result) def update_range(self, start: int, end: int, value: T) -> None: """ Update a range of values in the segment tree. Args: start: Start index of the range to update (inclusive) end: End index of the range to update (inclusive) value: Value to apply to the range Raises: ValueError: If start > end or indices are out of bounds """ if start > end: raise ValueError("Start index must be less than or equal to end index") if start < 0 or start >= self.n: raise ValueError(f"Start index {start} out of bounds [0, {self.n - 1}]") if end < 0 or end >= self.n: raise ValueError(f"End index {end} out of bounds [0, {self.n - 1}]") self._update_range_helper(1, 0, self.n - 1, start, end, value) def summarize_range(self, start: int, end: int) -> T: """ Query a range of values in the segment tree. Args: start: Start index of the range to query (inclusive) end: End index of the range to query (inclusive) Returns: Summary value for the range according to the summary operation Raises: ValueError: If start > end or indices are out of bounds """ if start > end: raise ValueError("Start index must be less than or equal to end index") if start < 0 or start >= self.n: raise ValueError(f"Start index {start} out of bounds [0, {self.n - 1}]") if end < 0 or end >= self.n: raise ValueError(f"End index {end} out of bounds [0, {self.n - 1}]") return self._query_range_helper(1, 0, self.n - 1, start, end)
SegmentedTree
python
huggingface__transformers
tests/utils/import_structures/import_structure_raw_register_with_versions.py
{ "start": 992, "end": 1132 }
class ____: def __init__(self): pass @requires(backends=("torch<=2.5",)) def d2(): pass @requires(backends=("torch<2.5",))
D2
python
davidhalter__jedi
test/refactor/extract_function.py
{ "start": 3466, "end": 3617 }
class ____(): a = 3 #? 11 text {'new_name': 'f'} c = a + 2 # ++++++++++++++++++++++++++++++++++++++++++++++++++ def f(a): return a + 2
Ya
python
huggingface__transformers
src/transformers/models/speecht5/modeling_speecht5.py
{ "start": 33150, "end": 34558 }
class ____(nn.Module, EmbeddingAccessMixin): def __init__(self, config): super().__init__() self.config = config self.dropout = nn.Dropout(config.positional_dropout) self.embed_scale = math.sqrt(config.hidden_size) if config.scale_embedding else 1.0 self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, config.pad_token_id) self.embed_positions = SpeechT5SinusoidalPositionalEmbedding( config.max_text_positions + config.pad_token_id + 1, config.hidden_size, config.pad_token_id, ) def forward( self, input_ids: torch.Tensor, attention_mask: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, ): if input_ids is not None: input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) else: raise ValueError("You have to specify `decoder_input_ids`") past_key_values_length = 0 if past_key_values is None else past_key_values.get_seq_length() positions = self.embed_positions(input_ids, past_key_values_length) inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale inputs_embeds += positions inputs_embeds = self.dropout(inputs_embeds) return inputs_embeds, attention_mask
SpeechT5TextDecoderPrenet
python
ApeWorX__ape
src/ape/utils/trace.py
{ "start": 6601, "end": 14433 }
class ____: """ Colors to use when displaying a call trace. Each item in the class points to the part of the trace it colors. """ CONTRACTS = "#ff8c00" """Contract type names.""" METHODS = "bright_green" """Method names; not including arguments or return values.""" INPUTS = "bright_magenta" """Method arguments.""" OUTPUTS = "bright_blue" """Method return values.""" DELEGATE = "#d75f00" """The part '(delegate)' that appears before delegate calls.""" VALUE = "#00afd7" """The transaction value, when it's > 0.""" GAS_COST = "dim" """The gas used of the call.""" def parse_gas_table(report: "GasReport") -> list[Table]: tables: list[Table] = [] for contract_id, method_calls in report.items(): title = f"{contract_id} Gas" table = Table(title=title, box=SIMPLE) table.add_column("Method") table.add_column("Times called", justify="right") table.add_column("Min.", justify="right") table.add_column("Max.", justify="right") table.add_column("Mean", justify="right") table.add_column("Median", justify="right") has_at_least_1_row = False for method_call, gases in sorted(method_calls.items()): if not gases: continue if not method_call or is_zero_hex(method_call) or is_evm_precompile(method_call): continue elif method_call == "__new__": # Looks better in the gas report. method_call = "__init__" has_at_least_1_row = True table.add_row( method_call, f"{len(gases)}", f"{min(gases)}", f"{max(gases)}", f"{int(round(mean(gases)))}", f"{int(round(median(gases)))}", ) if has_at_least_1_row: tables.append(table) return tables def parse_coverage_tables( coverage: "CoverageReport", verbose: bool = False, statement: bool = True ) -> list[Table]: return ( _parse_verbose_coverage(coverage, statement=statement) if verbose else [_parse_coverage_table(coverage, statement=statement)] ) def _parse_coverage_table(coverage: "CoverageReport", statement: bool = True) -> Table: table = Table(title="Contract Coverage", box=SIMPLE) # NOTE: Purposely uses same column names as coveragepy table.add_column("Name") if statement: table.add_column("Stmts", justify="right") table.add_column("Miss", justify="right") table.add_column("Cover", justify="right") table.add_column("Funcs", justify="right") for project in coverage.projects: for src in sorted(project.sources, key=lambda x: x.source_id.lower()): row = ( ( src.source_id, f"{src.lines_valid}", f"{src.miss_count}", f"{round(src.line_rate * 100, 2)}%", f"{round(src.function_rate * 100, 2)}%", ) if statement else (src.source_id, f"{round(src.function_rate * 100, 2)}%") ) table.add_row(*row) return table def _parse_verbose_coverage(coverage: "CoverageReport", statement: bool = True) -> list[Table]: tables = [] row: tuple[str, ...] for project in coverage.projects: for src in project.sources: for contract in src.contracts: title = f"{contract.name} Coverage" line_rate = round(contract.line_rate * 100, 2) fn_rate = round(contract.function_rate * 100, 2) caption = f"line={line_rate}%, func={fn_rate}%" table = Table(title=title, box=SIMPLE, caption=caption) rows: list[tuple[str, ...]] = [] table.add_column("Func", justify="right") if statement: table.add_column("Stmts", justify="right") table.add_column("Miss", justify="right") table.add_column("Cover", justify="right") for fn in contract.functions: if fn.name == "__builtin__" and not statement: # Ignore builtins when statement coverage is not being asked for. # It is impossible to really track. continue if fn.name == "__builtin__": # Create a row per unique type. builtins = {x.tag for x in fn.statements if x.tag} for builtin in builtins: name_chars = [ c for c in builtin.lower().strip().replace(" ", "_") if c.isalpha() or c == "_" ] name = f"__{''.join(name_chars).replace('dev_', '')}__" miss = ( 0 if any(s.hit_count > 0 for s in fn.statements if s.tag == builtin) else 1 ) rows.append( tuple((name, name, "1", f"{miss}", "0.0%" if miss else "100.0%")) ) else: row = ( ( fn.name, fn.full_name, f"{fn.lines_valid}", f"{fn.miss_count}", f"{round(fn.line_rate * 100, 2)}%", ) if statement else (fn.name, fn.full_name, "✓" if fn.hit_count > 0 else "x") ) rows.append(row) # Handle cases where normal names are duplicated. # Use full names in this case. rows_corrected = [] while rows: row = rows.pop() if row[0] in [r[0] for r in rows]: # Use full-name for all with same name. rows_corrected.append(row[1:]) for subrow in rows: if subrow[0] != row[0]: continue rows_corrected.append(subrow[1:]) rows.remove(subrow) else: # Use smaller name (no duplicates). rows_corrected.append((row[0], *row[2:])) for tbl_row in sorted(rows_corrected): table.add_row(*tbl_row) tables.append(table) return tables def _exclude_gas( exclusions: Sequence["ContractFunctionPath"], contract_id: str, method_id: str ) -> bool: for exclusion in exclusions: if exclusion.method_name is None and fnmatch(contract_id, exclusion.contract_name): # Skip this whole contract. Search contracts from sub-calls. return True for excl in exclusions: if not excl.method_name: # Full contract skips handled above. continue elif not fnmatch(contract_id, excl.contract_name): # Method may match, but contract does not match, so continue. continue elif method_id and fnmatch(method_id, excl.method_name): # Skip this report because of the method name exclusion criteria. return True return False
TraceStyles
python
apache__airflow
providers/hashicorp/tests/unit/hashicorp/secrets/test_vault.py
{ "start": 984, "end": 15664 }
class ____: @mock.patch("airflow.providers.hashicorp._internal_client.vault_client.hvac") def test_get_connection(self, mock_hvac): mock_client = mock.MagicMock() mock_hvac.Client.return_value = mock_client mock_client.secrets.kv.v2.read_secret_version.return_value = { "request_id": "94011e25-f8dc-ec29-221b-1f9c1d9ad2ae", "lease_id": "", "renewable": False, "lease_duration": 0, "data": { "data": { "conn_type": "postgresql", "login": "airflow", "password": "airflow", "host": "host", "port": "5432", "schema": "airflow", "extra": '{"foo":"bar","baz":"taz"}', }, "metadata": { "created_time": "2020-03-16T21:01:43.331126Z", "deletion_time": "", "destroyed": False, "version": 1, }, }, "wrap_info": None, "warnings": None, "auth": None, } kwargs = { "connections_path": "connections", "mount_point": "airflow", "auth_type": "token", "url": "http://127.0.0.1:8200", "token": "s.7AU0I51yv1Q1lxOIg1F3ZRAS", } test_client = VaultBackend(**kwargs) connection = test_client.get_connection(conn_id="test_postgres") assert connection.get_uri() == "postgresql://airflow:airflow@host:5432/airflow?foo=bar&baz=taz" @mock.patch("airflow.providers.hashicorp._internal_client.vault_client.hvac") def test_get_connection_without_predefined_mount_point(self, mock_hvac): mock_client = mock.MagicMock() mock_hvac.Client.return_value = mock_client mock_client.secrets.kv.v2.read_secret_version.return_value = { "request_id": "94011e25-f8dc-ec29-221b-1f9c1d9ad2ae", "lease_id": "", "renewable": False, "lease_duration": 0, "data": { "data": { "conn_type": "postgresql", "login": "airflow", "password": "airflow", "host": "host", "port": "5432", "schema": "airflow", "extra": '{"foo":"bar","baz":"taz"}', }, "metadata": { "created_time": "2020-03-16T21:01:43.331126Z", "deletion_time": "", "destroyed": False, "version": 1, }, }, "wrap_info": None, "warnings": None, "auth": None, } kwargs = { "connections_path": "connections", "mount_point": None, "auth_type": "token", "url": "http://127.0.0.1:8200", "token": "s.7AU0I51yv1Q1lxOIg1F3ZRAS", } test_client = VaultBackend(**kwargs) connection = test_client.get_connection(conn_id="airflow/test_postgres") assert connection.get_uri() == "postgresql://airflow:airflow@host:5432/airflow?foo=bar&baz=taz" @mock.patch("airflow.providers.hashicorp._internal_client.vault_client.hvac") def test_get_variable_value(self, mock_hvac): mock_client = mock.MagicMock() mock_hvac.Client.return_value = mock_client mock_client.secrets.kv.v2.read_secret_version.return_value = { "request_id": "2d48a2ad-6bcb-e5b6-429d-da35fdf31f56", "lease_id": "", "renewable": False, "lease_duration": 0, "data": { "data": {"value": "world"}, "metadata": { "created_time": "2020-03-28T02:10:54.301784Z", "deletion_time": "", "destroyed": False, "version": 1, }, }, "wrap_info": None, "warnings": None, "auth": None, } kwargs = { "variables_path": "variables", "mount_point": "airflow", "auth_type": "token", "url": "http://127.0.0.1:8200", "token": "s.7AU0I51yv1Q1lxOIg1F3ZRAS", } test_client = VaultBackend(**kwargs) returned_uri = test_client.get_variable("hello") assert returned_uri == "world" @mock.patch("airflow.providers.hashicorp._internal_client.vault_client.hvac") def test_get_variable_value_without_predefined_mount_point(self, mock_hvac): mock_client = mock.MagicMock() mock_hvac.Client.return_value = mock_client mock_client.secrets.kv.v2.read_secret_version.return_value = { "request_id": "2d48a2ad-6bcb-e5b6-429d-da35fdf31f56", "lease_id": "", "renewable": False, "lease_duration": 0, "data": { "data": {"value": "world"}, "metadata": { "created_time": "2020-03-28T02:10:54.301784Z", "deletion_time": "", "destroyed": False, "version": 1, }, }, "wrap_info": None, "warnings": None, "auth": None, } kwargs = { "variables_path": "variables", "mount_point": None, "auth_type": "token", "url": "http://127.0.0.1:8200", "token": "s.7AU0I51yv1Q1lxOIg1F3ZRAS", } test_client = VaultBackend(**kwargs) returned_uri = test_client.get_variable("airflow/hello") assert returned_uri == "world" @pytest.mark.parametrize( ("mount_point", "variables_path", "variable_key", "expected_args"), [ ("airflow", "variables", "hello", {"mount_point": "airflow", "path": "variables/hello"}), ( "airflow", "", "path/to/variables/hello", {"mount_point": "airflow", "path": "path/to/variables/hello"}, ), (None, "variables", "airflow/hello", {"mount_point": "airflow", "path": "variables/hello"}), ( None, "", "airflow/path/to/variables/hello", {"mount_point": "airflow", "path": "path/to/variables/hello"}, ), ], ) @mock.patch("airflow.providers.hashicorp._internal_client.vault_client.hvac") def test_get_variable_value_engine_version_1( self, mock_hvac, mount_point, variables_path, variable_key, expected_args ): mock_client = mock.MagicMock() mock_hvac.Client.return_value = mock_client mock_client.secrets.kv.v1.read_secret.return_value = { "request_id": "182d0673-618c-9889-4cba-4e1f4cfe4b4b", "lease_id": "", "renewable": False, "lease_duration": 2764800, "data": {"value": "world"}, "wrap_info": None, "warnings": None, "auth": None, } kwargs = { "variables_path": variables_path, "mount_point": mount_point, "auth_type": "token", "url": "http://127.0.0.1:8200", "token": "s.7AU0I51yv1Q1lxOIg1F3ZRAS", "kv_engine_version": 1, } test_client = VaultBackend(**kwargs) returned_uri = test_client.get_variable(variable_key) mock_client.secrets.kv.v1.read_secret.assert_called_once_with(**expected_args) assert returned_uri == "world" @mock.patch.dict( "os.environ", { "AIRFLOW_VAR_HELLO": "world", }, ) @mock.patch("airflow.providers.hashicorp._internal_client.vault_client.hvac") def test_get_variable_value_non_existent_key(self, mock_hvac): """ Test that if the key with connection ID is not present in Vault, _VaultClient.get_connection should return None """ mock_client = mock.MagicMock() mock_hvac.Client.return_value = mock_client # Response does not contain the requested key mock_client.secrets.kv.v2.read_secret_version.side_effect = InvalidPath() kwargs = { "variables_path": "variables", "mount_point": "airflow", "auth_type": "token", "url": "http://127.0.0.1:8200", "token": "s.7AU0I51yv1Q1lxOIg1F3ZRAS", } test_client = VaultBackend(**kwargs) assert test_client.get_variable("hello") is None mock_client.secrets.kv.v2.read_secret_version.assert_called_once_with( mount_point="airflow", path="variables/hello", version=None, raise_on_deleted_version=True ) assert test_client.get_variable("hello") is None @mock.patch("airflow.providers.hashicorp._internal_client.vault_client.hvac") def test_auth_failure_raises_error(self, mock_hvac): mock_client = mock.MagicMock() mock_hvac.Client.return_value = mock_client mock_client.is_authenticated.return_value = False kwargs = { "connections_path": "connections", "mount_point": "airflow", "auth_type": "token", "url": "http://127.0.0.1:8200", "token": "test_wrong_token", } with pytest.raises(VaultError, match="Vault Authentication Error!"): VaultBackend(**kwargs).get_connection(conn_id="test") def test_auth_type_kubernetes_with_unreadable_jwt_raises_error(self): path = "/var/tmp/this_does_not_exist/334e918ef11987d3ef2f9553458ea09f" kwargs = { "auth_type": "kubernetes", "kubernetes_role": "default", "kubernetes_jwt_path": path, "url": "http://127.0.0.1:8200", } with pytest.raises(FileNotFoundError, match=path): VaultBackend(**kwargs).get_connection(conn_id="test") @mock.patch("airflow.providers.hashicorp._internal_client.vault_client.hvac") def test_get_config_value(self, mock_hvac): mock_client = mock.MagicMock() mock_hvac.Client.return_value = mock_client mock_client.secrets.kv.v2.read_secret_version.return_value = { "request_id": "2d48a2ad-6bcb-e5b6-429d-da35fdf31f56", "lease_id": "", "renewable": False, "lease_duration": 0, "data": { "data": {"value": "sqlite:////Users/airflow/airflow/airflow.db"}, "metadata": { "created_time": "2020-03-28T02:10:54.301784Z", "deletion_time": "", "destroyed": False, "version": 1, }, }, "wrap_info": None, "warnings": None, "auth": None, } kwargs = { "configs_path": "configurations", "mount_point": "secret", "auth_type": "token", "url": "http://127.0.0.1:8200", "token": "s.FnL7qg0YnHZDpf4zKKuFy0UK", } test_client = VaultBackend(**kwargs) returned_uri = test_client.get_config("sql_alchemy_conn") assert returned_uri == "sqlite:////Users/airflow/airflow/airflow.db" @mock.patch("airflow.providers.hashicorp._internal_client.vault_client.hvac") def test_get_config_value_without_predefined_mount_point(self, mock_hvac): mock_client = mock.MagicMock() mock_hvac.Client.return_value = mock_client mock_client.secrets.kv.v2.read_secret_version.return_value = { "request_id": "2d48a2ad-6bcb-e5b6-429d-da35fdf31f56", "lease_id": "", "renewable": False, "lease_duration": 0, "data": { "data": {"value": "sqlite:////Users/airflow/airflow/airflow.db"}, "metadata": { "created_time": "2020-03-28T02:10:54.301784Z", "deletion_time": "", "destroyed": False, "version": 1, }, }, "wrap_info": None, "warnings": None, "auth": None, } kwargs = { "configs_path": "configurations", "mount_point": None, "auth_type": "token", "url": "http://127.0.0.1:8200", "token": "s.FnL7qg0YnHZDpf4zKKuFy0UK", } test_client = VaultBackend(**kwargs) returned_uri = test_client.get_config("airflow/sql_alchemy_conn") assert returned_uri == "sqlite:////Users/airflow/airflow/airflow.db" @mock.patch("airflow.providers.hashicorp._internal_client.vault_client.hvac") def test_connections_path_none_value(self, mock_hvac): mock_client = mock.MagicMock() mock_hvac.Client.return_value = mock_client kwargs = { "connections_path": None, "mount_point": "airflow", "auth_type": "token", "url": "http://127.0.0.1:8200", "token": "s.FnL7qg0YnHZDpf4zKKuFy0UK", } test_client = VaultBackend(**kwargs) assert test_client.get_connection(conn_id="test") is None mock_hvac.Client.assert_not_called() @mock.patch("airflow.providers.hashicorp._internal_client.vault_client.hvac") def test_variables_path_none_value(self, mock_hvac): mock_client = mock.MagicMock() mock_hvac.Client.return_value = mock_client kwargs = { "variables_path": None, "mount_point": "airflow", "auth_type": "token", "url": "http://127.0.0.1:8200", "token": "s.FnL7qg0YnHZDpf4zKKuFy0UK", } test_client = VaultBackend(**kwargs) assert test_client.get_variable("hello") is None mock_hvac.Client.assert_not_called() @mock.patch("airflow.providers.hashicorp._internal_client.vault_client.hvac") def test_config_path_none_value(self, mock_hvac): mock_client = mock.MagicMock() mock_hvac.Client.return_value = mock_client kwargs = { "config_path": None, "mount_point": "airflow", "auth_type": "token", "url": "http://127.0.0.1:8200", "token": "s.FnL7qg0YnHZDpf4zKKuFy0UK", } test_client = VaultBackend(**kwargs) assert test_client.get_config("test") is None mock_hvac.Client.assert_not_called()
TestVaultSecrets
python
run-llama__llama_index
llama-index-integrations/tools/llama-index-tools-playgrounds/llama_index/tools/playgrounds/subgraph_inspector/base.py
{ "start": 122, "end": 10178 }
class ____(GraphQLToolSpec): """ Connects to subgraphs on The Graph's decentralized network via the Playgrounds API and introspects the subgraph. Provides functionalities to process and summarize the introspected schema for easy comprehension. Attributes: spec_functions (list): List of functions that specify the tool's capabilities. url (str): The endpoint URL for the GraphQL requests. headers (dict): Headers used for the GraphQL requests. """ spec_functions = ["introspect_and_summarize_subgraph"] def __init__(self, identifier: str, api_key: str, use_deployment_id: bool = False): """ Initialize the connection to the specified subgraph on The Graph's network. Args: identifier (str): The subgraph's identifier or deployment ID. api_key (str): API key for the Playgrounds API. use_deployment_id (bool): If True, treats the identifier as a deployment ID. Default is False. """ self.url = self._generate_url(identifier, use_deployment_id) self.headers = { "Content-Type": "application/json", "Playgrounds-Api-Key": api_key, } def _generate_url(self, identifier: str, use_deployment_id: bool) -> str: """ Generate the appropriate URL based on the identifier and whether it's a deployment ID or not. Args: identifier (str): The subgraph's identifier or deployment ID. use_deployment_id (bool): If True, constructs the URL using the deployment ID. Returns: str: The constructed URL. """ endpoint = "deployments" if use_deployment_id else "subgraphs" return f"https://api.playgrounds.network/v1/proxy/{endpoint}/id/{identifier}" def introspect_and_summarize_subgraph(self) -> str: """ Introspects the subgraph and summarizes its schema into textual categories. Returns: str: A textual summary of the introspected subgraph schema. """ introspection_query = """ query { __schema { types { kind name description enumValues { name } fields { name args { name } type { kind name ofType { name } } } } } } """ response = self._graphql_request(introspection_query) if "data" in response: result = response["data"] processed_subgraph = self._process_subgraph(result) return self.subgraph_to_text(processed_subgraph) else: return "Error during introspection." def _graphql_request(self, query: str) -> dict: """ Execute a GraphQL query against the subgraph's endpoint. Args: query (str): The GraphQL query string. Returns: dict: Response from the GraphQL server, either containing the data or an error. """ payload = {"query": query.strip()} try: response = requests.post(self.url, headers=self.headers, json=payload) response.raise_for_status() return response.json() except requests.RequestException as e: return {"error": str(e)} def _process_subgraph(self, result: dict) -> dict: """ Processes the introspected subgraph schema into categories based on naming conventions. Args: result (dict): Introspected schema result from the GraphQL query. Returns: dict: A processed representation of the introspected schema, categorized into specific entity queries, list entity queries, and other entities. """ processed_subgraph = { "specific_entity_queries": {}, "list_entity_queries": {}, "other_entities": {}, } for type_ in result["__schema"]["types"]: if type_["name"].startswith("__"): continue # Skip meta entities entity_name = type_["name"] fields, args_required = self._get_fields(type_) if fields: # Determine category based on naming convention if entity_name.endswith("s") and not args_required: processed_subgraph["list_entity_queries"][entity_name] = fields elif not entity_name.endswith("s") and args_required: processed_subgraph["specific_entity_queries"][entity_name] = fields else: processed_subgraph["other_entities"][entity_name] = fields return processed_subgraph def _get_fields(self, type_): """ Extracts relevant fields and their details from a given type within the introspected schema. Args: type_ (dict): A type within the introspected schema. Returns: tuple: A tuple containing a list of relevant fields and a boolean indicating if arguments are required for the fields. """ fields = [] args_required = False for f in type_.get("fields") or []: if f["name"] != "__typename" and not ( f["name"].endswith("_filter") or f["name"].endswith("_orderBy") or f["name"].islower() ): field_info = {"name": f["name"]} # Check for enum values if "enumValues" in f["type"] and f["type"]["enumValues"]: field_info["enumValues"] = [ enum_val["name"] for enum_val in f["type"]["enumValues"] ] fields.append(field_info) if f.get("args") and len(f["args"]) > 0: args_required = True if f.get("type") and f["type"].get("fields"): subfields, sub_args_required = self._get_fields(f["type"]) fields.extend(subfields) if sub_args_required: args_required = True return fields, args_required def format_section( self, category: str, description: str, example: str, entities: dict ) -> str: """ Formats a given section of the subgraph introspection result into a readable string format. Args: category (str): The category name of the entities. description (str): A description explaining the category. example (str): A generic GraphQL query example related to the category. entities (dict): Dictionary containing entities and their fields related to the category. Returns: str: A formatted string representation of the provided section data. """ section = [ f"Category: {category}", f"Description: {description}", "Generic Example:", example, "\nDetailed Breakdown:", ] for entity, fields in entities.items(): section.append(f" Entity: {entity}") for field_info in fields: field_str = f" - {field_info['name']}" if "enumValues" in field_info: field_str += ( f" (Enum values: {', '.join(field_info['enumValues'])})" ) section.append(field_str) section.append("") # Add a blank line for separation section.append("") # Add another blank line for separation between sections return "\n".join(section) def subgraph_to_text(self, subgraph: dict) -> str: """ Converts a processed subgraph representation into a textual summary based on entity categories. Args: subgraph (dict): A processed representation of the introspected schema, categorized into specific entity queries, list entity queries, and other entities. Returns: str: A textual summary of the processed subgraph schema. """ sections = [ ( "Specific Entity Queries (Requires Arguments)", "These queries target a singular entity and require specific arguments (like an ID) to fetch data.", """ { entityName(id: "specific_id") { fieldName1 fieldName2 ... } } """, subgraph["specific_entity_queries"], ), ( "List Entity Queries (Optional Arguments)", "These queries fetch a list of entities. They don't strictly require arguments but often accept optional parameters for filtering, sorting, and pagination.", """ { entityNames(first: 10, orderBy: "someField", orderDirection: "asc") { fieldName1 fieldName2 ... } } """, subgraph["list_entity_queries"], ), ( "Other Entities", "These are additional entities that may not fit the conventional singular/plural querying pattern of subgraphs.", "", subgraph["other_entities"], ), ] result_lines = [] for category, desc, example, entities in sections: result_lines.append(self.format_section(category, desc, example, entities)) return "\n".join(result_lines)
PlaygroundsSubgraphInspectorToolSpec
python
wandb__wandb
wandb/plot/viz.py
{ "start": 173, "end": 538 }
class ____: name: str key: str = "" @property def config_value(self) -> dict[str, Any]: return { "id": self.name, "historyFieldSettings": {"x-axis": "_step", "key": self.key}, } @property def config_key(self) -> tuple[str, str, str]: return ("_wandb", "viz", self.key) @dataclass
VisualizeSpec
python
nedbat__coveragepy
tests/test_plugins.py
{ "start": 38121, "end": 46174 }
class ____(CoverageTest): """Tests of plugins that implement `dynamic_context`.""" def make_plugin_capitalized_testnames(self, filename: str) -> None: """Create a dynamic context plugin that capitalizes the part after 'test_'.""" self.make_file( filename, """\ from coverage import CoveragePlugin class Plugin(CoveragePlugin): def dynamic_context(self, frame): name = frame.f_code.co_name if name.startswith(("test_", "doctest_")): parts = name.split("_", 1) return "%s:%s" % (parts[0], parts[1].upper()) return None def coverage_init(reg, options): reg.add_dynamic_context(Plugin()) """, ) def make_plugin_track_render(self, filename: str) -> None: """Make a dynamic context plugin that tracks 'render_' functions.""" self.make_file( filename, """\ from coverage import CoveragePlugin class Plugin(CoveragePlugin): def dynamic_context(self, frame): name = frame.f_code.co_name if name.startswith("render_"): return 'renderer:' + name[7:] return None def coverage_init(reg, options): reg.add_dynamic_context(Plugin()) """, ) def make_test_files(self) -> None: """Make some files to use while testing dynamic context plugins.""" self.make_file( "rendering.py", """\ def html_tag(tag, content): return f'<{tag}>{content}</{tag}>' def render_paragraph(text): return html_tag('p', text) def render_span(text): return html_tag('span', text) def render_bold(text): return html_tag('b', text) """, ) self.make_file( "testsuite.py", """\ import rendering def test_html_tag() -> None: assert rendering.html_tag('b', 'hello') == '<b>hello</b>' def doctest_html_tag(): assert eval(''' rendering.html_tag('i', 'text') == '<i>text</i>' '''.strip()) def test_renderers() -> None: assert rendering.render_paragraph('hello') == '<p>hello</p>' assert rendering.render_bold('wide') == '<b>wide</b>' assert rendering.render_span('world') == '<span>world</span>' def build_full_html(): html = '<html><body>%s</body></html>' % ( rendering.render_paragraph( rendering.render_span('hello'))) return html """, ) def run_all_functions(self, cov: Coverage, suite_name: str) -> None: # pragma: nested """Run all functions in `suite_name` under coverage.""" cov.start() suite = import_local_file(suite_name) try: # Call all functions in this module for name in dir(suite): variable = getattr(suite, name) if inspect.isfunction(variable): variable() finally: cov.stop() def test_plugin_standalone(self) -> None: self.make_plugin_capitalized_testnames("plugin_tests.py") self.make_test_files() # Enable dynamic context plugin cov = coverage.Coverage() cov.set_option("run:plugins", ["plugin_tests"]) # Run the tests self.run_all_functions(cov, "testsuite") # Labeled coverage is collected data = cov.get_data() filenames = self.get_measured_filenames(data) expected = ["", "doctest:HTML_TAG", "test:HTML_TAG", "test:RENDERERS"] assert expected == sorted(data.measured_contexts()) data.set_query_context("doctest:HTML_TAG") assert [2] == sorted_lines(data, filenames["rendering.py"]) data.set_query_context("test:HTML_TAG") assert [2] == sorted_lines(data, filenames["rendering.py"]) data.set_query_context("test:RENDERERS") assert [2, 5, 8, 11] == sorted_lines(data, filenames["rendering.py"]) def test_static_context(self) -> None: self.make_plugin_capitalized_testnames("plugin_tests.py") self.make_test_files() # Enable dynamic context plugin for coverage with named context cov = coverage.Coverage(context="mytests") cov.set_option("run:plugins", ["plugin_tests"]) # Run the tests self.run_all_functions(cov, "testsuite") # Static context prefix is preserved data = cov.get_data() expected = [ "mytests", "mytests|doctest:HTML_TAG", "mytests|test:HTML_TAG", "mytests|test:RENDERERS", ] assert expected == sorted(data.measured_contexts()) def test_plugin_with_test_function(self) -> None: self.make_plugin_capitalized_testnames("plugin_tests.py") self.make_test_files() # Enable both a plugin and test_function dynamic context cov = coverage.Coverage() cov.set_option("run:plugins", ["plugin_tests"]) cov.set_option("run:dynamic_context", "test_function") # Run the tests self.run_all_functions(cov, "testsuite") # test_function takes precedence over plugins - only # functions that are not labeled by test_function are # labeled by plugin_tests. data = cov.get_data() filenames = self.get_measured_filenames(data) expected = [ "", "doctest:HTML_TAG", "testsuite.test_html_tag", "testsuite.test_renderers", ] assert expected == sorted(data.measured_contexts()) def assert_context_lines(context: str, lines: list[TLineNo]) -> None: data.set_query_context(context) assert lines == sorted_lines(data, filenames["rendering.py"]) assert_context_lines("doctest:HTML_TAG", [2]) assert_context_lines("testsuite.test_html_tag", [2]) assert_context_lines("testsuite.test_renderers", [2, 5, 8, 11]) def test_multiple_plugins(self) -> None: self.make_plugin_capitalized_testnames("plugin_tests.py") self.make_plugin_track_render("plugin_renderers.py") self.make_test_files() # Enable two plugins cov = coverage.Coverage() cov.set_option("run:plugins", ["plugin_renderers", "plugin_tests"]) self.run_all_functions(cov, "testsuite") # It is important to note, that line 11 (render_bold function) is never # labeled as renderer:bold context, because it is only called from # test_renderers function - so it already falls under test:RENDERERS # context. # # render_paragraph and render_span (lines 5, 8) are directly called by # testsuite.build_full_html, so they get labeled by renderers plugin. data = cov.get_data() filenames = self.get_measured_filenames(data) expected = [ "", "doctest:HTML_TAG", "renderer:paragraph", "renderer:span", "test:HTML_TAG", "test:RENDERERS", ] assert expected == sorted(data.measured_contexts()) def assert_context_lines(context: str, lines: list[TLineNo]) -> None: data.set_query_context(context) assert lines == sorted_lines(data, filenames["rendering.py"]) assert_context_lines("test:HTML_TAG", [2]) assert_context_lines("test:RENDERERS", [2, 5, 8, 11]) assert_context_lines("doctest:HTML_TAG", [2]) assert_context_lines("renderer:paragraph", [2, 5]) assert_context_lines("renderer:span", [2, 8])
DynamicContextPluginTest
python
dagster-io__dagster
python_modules/dagster/dagster/_grpc/types.py
{ "start": 17674, "end": 19034 }
class ____( NamedTuple( "_PartitionSetExecutionParamArgs", [ ("repository_origin", RemoteRepositoryOrigin), ("partition_set_name", str), ("partition_names", Sequence[str]), ("instance_ref", Optional[InstanceRef]), ], ) ): def __new__( cls, repository_origin: RemoteRepositoryOrigin, partition_set_name: str, partition_names: Sequence[str], instance_ref: Optional[InstanceRef] = None, ): return super().__new__( cls, repository_origin=check.inst_param( repository_origin, "repository_origin", RemoteRepositoryOrigin ), partition_set_name=check.str_param(partition_set_name, "partition_set_name"), partition_names=check.sequence_param(partition_names, "partition_names", of_type=str), instance_ref=check.opt_inst_param(instance_ref, "instance_ref", InstanceRef), ) @whitelist_for_serdes( storage_name="PipelineSubsetSnapshotArgs", storage_field_names={ "job_origin": "pipeline_origin", "op_selection": "solid_selection", }, # asset_selection previously was erroneously represented as a sequence field_serializers={"asset_selection": SetToSequenceFieldSerializer}, )
PartitionSetExecutionParamArgs
python
tiangolo__fastapi
tests/test_security_api_key_header_optional.py
{ "start": 264, "end": 2082 }
class ____(BaseModel): username: str def get_current_user(oauth_header: Optional[str] = Security(api_key)): if oauth_header is None: return None user = User(username=oauth_header) return user @app.get("/users/me") def read_current_user(current_user: Optional[User] = Depends(get_current_user)): if current_user is None: return {"msg": "Create an account first"} return current_user client = TestClient(app) def test_security_api_key(): response = client.get("/users/me", headers={"key": "secret"}) assert response.status_code == 200, response.text assert response.json() == {"username": "secret"} def test_security_api_key_no_key(): response = client.get("/users/me") assert response.status_code == 200, response.text assert response.json() == {"msg": "Create an account first"} def test_openapi_schema(): response = client.get("/openapi.json") assert response.status_code == 200, response.text assert response.json() == { "openapi": "3.1.0", "info": {"title": "FastAPI", "version": "0.1.0"}, "paths": { "/users/me": { "get": { "responses": { "200": { "description": "Successful Response", "content": {"application/json": {"schema": {}}}, } }, "summary": "Read Current User", "operationId": "read_current_user_users_me_get", "security": [{"APIKeyHeader": []}], } } }, "components": { "securitySchemes": { "APIKeyHeader": {"type": "apiKey", "name": "key", "in": "header"} } }, }
User
python
ray-project__ray
python/ray/data/_internal/logical/operators/n_ary_operator.py
{ "start": 581, "end": 1086 }
class ____(NAry): """Logical operator for zip.""" def __init__( self, *input_ops: LogicalOperator, ): super().__init__(*input_ops) def estimated_num_outputs(self): total_num_outputs = 0 for input in self._input_dependencies: num_outputs = input.estimated_num_outputs() if num_outputs is None: return None total_num_outputs = max(total_num_outputs, num_outputs) return total_num_outputs
Zip