language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | sqlalchemy__sqlalchemy | test/orm/inheritance/test_assorted_poly.py | {
"start": 52060,
"end": 55797
} | class ____(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
global t1, t2
t1 = Table(
"t1",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("type", String(30), nullable=False),
Column("data", String(30)),
)
# note that the primary key column in t2 is named differently
t2 = Table(
"t2",
metadata,
Column("t2id", Integer, ForeignKey("t1.id"), primary_key=True),
Column("t2data", String(30)),
)
def test_custompk(self):
"""test that the primary_key attribute is propagated to the
polymorphic mapper"""
class T1:
pass
class T2(T1):
pass
# create a polymorphic union with the select against the base table
# first. with the join being second, the alias of the union will
# pick up two "primary key" columns. technically the alias should have
# a 2-col pk in any case but the leading select has a NULL for the
# "t2id" column
d = util.OrderedDict()
d["t1"] = t1.select().where(t1.c.type == "t1").subquery()
d["t2"] = t1.join(t2)
pjoin = polymorphic_union(d, None, "pjoin")
self.mapper_registry.map_imperatively(
T1,
t1,
polymorphic_on=t1.c.type,
polymorphic_identity="t1",
with_polymorphic=("*", pjoin),
primary_key=[pjoin.c.id],
)
self.mapper_registry.map_imperatively(
T2, t2, inherits=T1, polymorphic_identity="t2"
)
ot1 = T1()
ot2 = T2()
sess = fixture_session()
sess.add(ot1)
sess.add(ot2)
sess.flush()
sess.expunge_all()
# query using get(), using only one value.
# this requires the select_table mapper
# has the same single-col primary key.
assert sess.get(T1, ot1.id).id == ot1.id
ot1 = sess.get(T1, ot1.id)
ot1.data = "hi"
sess.flush()
def test_pk_collapses(self):
"""test that a composite primary key attribute formed by a join
is "collapsed" into its minimal columns"""
class T1:
pass
class T2(T1):
pass
# create a polymorphic union with the select against the base table
# first. with the join being second, the alias of the union will
# pick up two "primary key" columns. technically the alias should have
# a 2-col pk in any case but the leading select has a NULL for the
# "t2id" column
d = util.OrderedDict()
d["t1"] = t1.select().where(t1.c.type == "t1").subquery()
d["t2"] = t1.join(t2)
pjoin = polymorphic_union(d, None, "pjoin")
self.mapper_registry.map_imperatively(
T1,
t1,
polymorphic_on=t1.c.type,
polymorphic_identity="t1",
with_polymorphic=("*", pjoin),
)
self.mapper_registry.map_imperatively(
T2, t2, inherits=T1, polymorphic_identity="t2"
)
assert len(class_mapper(T1).primary_key) == 1
ot1 = T1()
ot2 = T2()
sess = fixture_session()
sess.add(ot1)
sess.add(ot2)
sess.flush()
sess.expunge_all()
# query using get(), using only one value. this requires the
# select_table mapper
# has the same single-col primary key.
assert sess.get(T1, ot1.id).id == ot1.id
ot1 = sess.get(T1, ot1.id)
ot1.data = "hi"
sess.flush()
| CustomPKTest |
python | django__django | tests/queries/models.py | {
"start": 11699,
"end": 11832
} | class ____(models.Model):
name = models.CharField(max_length=20, null=True)
class Meta:
ordering = ["id"]
| NullableName |
python | fsspec__filesystem_spec | fsspec/implementations/http_sync.py | {
"start": 26466,
"end": 30332
} | class ____(AbstractBufferedFile):
def __init__(self, fs, url, mode="rb", session=None, **kwargs):
self.url = url
self.session = session
if mode != "rb":
raise ValueError
self.details = {"name": url, "size": None}
super().__init__(fs=fs, path=url, mode=mode, cache_type="readahead", **kwargs)
r = self.session.get(self.fs.encode_url(url), stream=True, **kwargs)
self.fs._raise_not_found_for_status(r, url)
self.it = r.iter_content(1024, False)
self.leftover = b""
self.r = r
def seek(self, *args, **kwargs):
raise ValueError("Cannot seek streaming HTTP file")
def read(self, num=-1):
bufs = [self.leftover]
leng = len(self.leftover)
while leng < num or num < 0:
try:
out = self.it.__next__()
except StopIteration:
break
if out:
bufs.append(out)
else:
break
leng += len(out)
out = b"".join(bufs)
if num >= 0:
self.leftover = out[num:]
out = out[:num]
else:
self.leftover = b""
self.loc += len(out)
return out
def close(self):
self.r.close()
self.closed = True
def get_range(session, url, start, end, **kwargs):
# explicit get a range when we know it must be safe
kwargs = kwargs.copy()
headers = kwargs.pop("headers", {}).copy()
headers["Range"] = f"bytes={start}-{end - 1}"
r = session.get(url, headers=headers, **kwargs)
r.raise_for_status()
return r.content
def _file_info(url, session, size_policy="head", **kwargs):
"""Call HEAD on the server to get details about the file (size/checksum etc.)
Default operation is to explicitly allow redirects and use encoding
'identity' (no compression) to get the true size of the target.
"""
logger.debug("Retrieve file size for %s", url)
kwargs = kwargs.copy()
ar = kwargs.pop("allow_redirects", True)
head = kwargs.get("headers", {}).copy()
# TODO: not allowed in JS
# head["Accept-Encoding"] = "identity"
kwargs["headers"] = head
info = {}
if size_policy == "head":
r = session.head(url, allow_redirects=ar, **kwargs)
elif size_policy == "get":
r = session.get(url, allow_redirects=ar, **kwargs)
else:
raise TypeError(f'size_policy must be "head" or "get", got {size_policy}')
r.raise_for_status()
# TODO:
# recognise lack of 'Accept-Ranges',
# or 'Accept-Ranges': 'none' (not 'bytes')
# to mean streaming only, no random access => return None
if "Content-Length" in r.headers:
info["size"] = int(r.headers["Content-Length"])
elif "Content-Range" in r.headers:
info["size"] = int(r.headers["Content-Range"].split("/")[1])
elif "content-length" in r.headers:
info["size"] = int(r.headers["content-length"])
elif "content-range" in r.headers:
info["size"] = int(r.headers["content-range"].split("/")[1])
for checksum_field in ["ETag", "Content-MD5", "Digest"]:
if r.headers.get(checksum_field):
info[checksum_field] = r.headers[checksum_field]
return info
# importing this is enough to register it
def register():
register_implementation("http", HTTPFileSystem, clobber=True)
register_implementation("https", HTTPFileSystem, clobber=True)
register_implementation("sync-http", HTTPFileSystem, clobber=True)
register_implementation("sync-https", HTTPFileSystem, clobber=True)
register()
def unregister():
from fsspec.implementations.http import HTTPFileSystem
register_implementation("http", HTTPFileSystem, clobber=True)
register_implementation("https", HTTPFileSystem, clobber=True)
| HTTPStreamFile |
python | kamyu104__LeetCode-Solutions | Python/can-you-eat-your-favorite-candy-on-your-favorite-day.py | {
"start": 29,
"end": 429
} | class ____(object):
def canEat(self, candiesCount, queries):
"""
:type candiesCount: List[int]
:type queries: List[List[int]]
:rtype: List[bool]
"""
prefix = [0]*(len(candiesCount)+1)
for i, c in enumerate(candiesCount):
prefix[i+1] = prefix[i]+c
return [prefix[t]//c < d+1 <= prefix[t+1]//1 for t, d, c in queries]
| Solution |
python | gevent__gevent | src/gevent/tests/test__queue.py | {
"start": 13059,
"end": 15116
} | class ____(TestSimpleQueue):
queue = queue
def _getFUT(self):
return queue.Queue
def test_task_done(self):
channel = self._makeOne()
X = object()
gevent.spawn(channel.put, X)
result = channel.get()
self.assertIs(result, X)
self.assertEqual(1, channel.unfinished_tasks)
channel.task_done()
self.assertEqual(0, channel.unfinished_tasks)
def _shutdown_all_methods_in_one_thread(self, immediate):
q = self._makeOne()
q.put("L")
q.put_nowait("O")
q.shutdown(immediate)
with self.assertRaises(self.queue.ShutDown):
q.put("E")
with self.assertRaises(self.queue.ShutDown):
q.put_nowait("W")
if immediate:
with self.assertRaises(self.queue.ShutDown):
q.get()
with self.assertRaises(self.queue.ShutDown):
q.get_nowait()
with self.assertRaises(ValueError):
q.task_done()
q.join()
else:
self.assertIn(q.get(), "LO")
q.task_done()
self.assertIn(q.get(), "LO")
q.task_done()
q.join()
# on shutdown(immediate=False)
# when queue is empty, should raise ShutDown Exception
with self.assertRaises(self.queue.ShutDown):
q.get() # p.get(True)
with self.assertRaises(self.queue.ShutDown):
q.get_nowait() # p.get(False)
with self.assertRaises(self.queue.ShutDown):
q.get(True, 1.0)
def test_shutdown_all_methods_in_one_thread(self):
return self._shutdown_all_methods_in_one_thread(False)
def test_shutdown_immediate_all_methods_in_one_thread(self):
return self._shutdown_all_methods_in_one_thread(True)
def test_issue_45(self):
"""Test that join() exits immediately if not jobs were put into the queue"""
self.switch_expected = False
q = self._makeOne()
q.join()
| TestQueue |
python | ansible__ansible | test/lib/ansible_test/_internal/host_configs.py | {
"start": 17369,
"end": 17528
} | class ____:
"""Details about controller fallback behavior."""
reason: FallbackReason
message: str
@dataclasses.dataclass(frozen=True)
| FallbackDetail |
python | ray-project__ray | rllib/env/external/rllink.py | {
"start": 199,
"end": 3514
} | class ____(Enum):
PROTOCOL_VERSION = Version("0.0.1")
# Requests: Client (external env) -> Server (RLlib).
# ----
# Ping command (initial handshake).
PING = "PING"
# List of episodes (similar to what an EnvRunner.sample() call would return).
EPISODES = "EPISODES"
# Request state (e.g. model weights).
GET_STATE = "GET_STATE"
# Request Algorithm config.
GET_CONFIG = "GET_CONFIG"
# Send episodes and request the next state update right after that.
# Clients sending this message should wait for a SET_STATE message as an immediate
# response. Useful for external samplers that must collect on-policy data.
EPISODES_AND_GET_STATE = "EPISODES_AND_GET_STATE"
# Responses: Server (RLlib) -> Client (external env).
# ----
# Pong response (initial handshake).
PONG = "PONG"
# Set state (e.g. model weights).
SET_STATE = "SET_STATE"
# Set Algorithm config.
SET_CONFIG = "SET_CONFIG"
# @OldAPIStack (to be deprecated soon).
ACTION_SPACE = "ACTION_SPACE"
OBSERVATION_SPACE = "OBSERVATION_SPACE"
GET_WORKER_ARGS = "GET_WORKER_ARGS"
GET_WEIGHTS = "GET_WEIGHTS"
REPORT_SAMPLES = "REPORT_SAMPLES"
START_EPISODE = "START_EPISODE"
GET_ACTION = "GET_ACTION"
LOG_ACTION = "LOG_ACTION"
LOG_RETURNS = "LOG_RETURNS"
END_EPISODE = "END_EPISODE"
def __str__(self):
return self.name
@DeveloperAPI
def send_rllink_message(sock_, message: dict):
"""Sends a message to the client with a length header."""
global msgpack
if msgpack is None:
msgpack = try_import_msgpack(error=True)
body = msgpack.packb(message, use_bin_type=True) # .encode("utf-8")
header = str(len(body)).zfill(8).encode("utf-8")
try:
sock_.sendall(header + body)
except Exception as e:
raise ConnectionError(
f"Error sending message {message} to server on socket {sock_}! "
f"Original error was: {e}"
)
@DeveloperAPI
def get_rllink_message(sock_):
"""Receives a message from the client following the length-header protocol."""
global msgpack
if msgpack is None:
msgpack = try_import_msgpack(error=True)
try:
# Read the length header (8 bytes)
header = _get_num_bytes(sock_, 8)
msg_length = int(header.decode("utf-8"))
# Read the message body
body = _get_num_bytes(sock_, msg_length)
# Decode JSON.
message = msgpack.unpackb(body, raw=False) # .loads(body.decode("utf-8"))
# Check for proper protocol.
if "type" not in message:
raise ConnectionError(
"Protocol Error! Message from peer does not contain `type` field."
)
return RLlink(message.pop("type")), message
except Exception as e:
raise ConnectionError(
f"Error receiving message from peer on socket {sock_}! "
f"Original error was: {e}"
)
def _get_num_bytes(sock_, num_bytes):
"""Helper function to receive a specific number of bytes."""
data = b""
while len(data) < num_bytes:
packet = sock_.recv(num_bytes - len(data))
if not packet:
raise ConnectionError(f"No data received from socket {sock_}!")
data += packet
return data
| RLlink |
python | matplotlib__matplotlib | lib/matplotlib/backends/backend_gtk4agg.py | {
"start": 1186,
"end": 1262
} | class ____(_BackendGTK4):
FigureCanvas = FigureCanvasGTK4Agg
| _BackendGTK4Agg |
python | pytorch__pytorch | torch/testing/_internal/common_distributed.py | {
"start": 59359,
"end": 70608
} | class ____(TestCase):
# Class variables:
MAIN_PROCESS_RANK = -1
# number of test processes
world_size: int = -2 # unset state
# rank of the current process
rank: int = -2 # unset state
# Rendezvous file
rdvz_file: Optional[str] = None
# timeout configured per class
timeout: timedelta = timedelta(seconds=120)
# Poison pill for rest of tests if one of them fails
poison_pill: bool = False
@classmethod
def backend_str(cls) -> Optional[str]:
"""
ProcessGroup backend str.
To be customized by sub test classes, e.g. "nccl".
Otherwise we return None -- lazily decided by tensor.
"""
return None
# Please override if you intend to test on specific device type
@classmethod
def device_type(cls) -> str:
curr_device = torch.accelerator.current_accelerator()
if curr_device is None:
return "cpu"
return curr_device.type
@classmethod
def opts(cls, high_priority_stream=False):
"""
ProcessGroup init options.
To be customized by sub test classes, e.g. ProcessGroupNCCLOpTest
Here we return None.
"""
return None
@classmethod
def _init_pg(cls, rank, world_size, rdvz_file):
assert rdvz_file is not None
# rank should be local_rank for tests running on <= 8 gpus which is how all these tests are designed
# and we expect LOCAL_RANK set by torchrun. Setting it lets init_device_mesh set the device without
# issuing a warning
os.environ["LOCAL_RANK"] = str(rank)
store = c10d.FileStore(rdvz_file, world_size)
# create nccl processgroup with opts
c10d.init_process_group(
backend=cls.backend_str(),
world_size=world_size,
rank=rank,
store=store,
pg_options=cls.opts(),
timeout=cls.timeout,
)
cls.pg = c10d.distributed_c10d._get_default_group()
@classmethod
def _run_test_given_id(cls, test_id: str, **kwargs) -> None:
# self.id() == e.g. '__main__.TestDistributed.TestAdditive.test_get_rank'
test_name = test_id.rsplit(".", maxsplit=1)[-1]
# Get the test function from the test class
self = cls(test_name)
self.rank = cls.rank
self.world_size = cls.world_size
test_fn = getattr(self, test_name)
# Ensure all the ranks use the same seed.
common_utils.set_rng_seed()
# Run the test function
test_fn(**kwargs)
@classmethod
def _worker_loop(cls, rank, world_size, rdvz_file, task_queue, completion_queue):
raised_exception = False
# Sub tests are going to access these values, check first
assert 0 <= rank < world_size
# set class variables for the test class
cls.rank = rank
cls.world_size = world_size
# Initialize the process group
cls._init_pg(rank, world_size, rdvz_file)
# End of bootstrap
logger.debug("Setup complete")
# Loop forever, waiting for a test name to run
while True:
test_id = task_queue.get()
logger.debug(f"Got test {test_id}") # noqa: G004
# None means exit
if test_id is None:
break
# Run the test
try:
cls._run_test_given_id(test_id)
completion_queue.put(test_id)
except BaseException as ex: # noqa: B036
if isinstance(ex, SystemExit):
# Get exit code from the process
exit_code = getattr(ex, "code", None)
# Look up exit code in TEST_SKIPS to see if it is a valid skip
skip_entry = next(
(v for v in TEST_SKIPS.values() if v.exit_code == exit_code),
None,
)
# If we found an entry, we want to skip the test and the object back to the main process
if skip_entry:
completion_queue.put(unittest.SkipTest(skip_entry.message))
# Skip exception handling below, move to main thread for processing the skip
continue
raised_exception = True
# Send the exception and stack trace back to the dispatcher
exc_info = sys.exc_info()
tb_str = "".join(traceback.format_exception(*exc_info))
# Create a new exception with the original exception and traceback
enhanced_ex = RuntimeError(f"Exception in worker process:\n{tb_str}")
enhanced_ex.__cause__ = ex
completion_queue.put(enhanced_ex)
# Termination
logger.debug("Terminating ...")
# Calling destroy_process_group when workers have exceptions
# while others are doing collectives will cause a deadlock since
# it waits for enqueued collectives to finish.
# Only call this on a clean exit path
if not raised_exception:
c10d.destroy_process_group()
@classmethod
def _spawn_processes(cls, world_size) -> None:
cls.processes = []
cls.task_queues = []
cls.completion_queues = []
# Need a rendezvous file for `init_process_group` purpose.
cls.rdvz_file = tempfile.NamedTemporaryFile(delete=False).name
# CUDA multiprocessing requires spawn instead of fork, to make sure
# child processes have their own memory space.
try:
torch.multiprocessing.set_start_method("spawn")
except RuntimeError:
# The start method has already been set
pass
for rank in range(int(world_size)):
task_queue = torch.multiprocessing.Queue()
completion_queue = torch.multiprocessing.Queue()
process = torch.multiprocessing.Process(
target=cls._worker_loop,
name="process " + str(rank),
daemon=True, # so that child processes will exit if parent decides to terminate
args=(rank, world_size, cls.rdvz_file, task_queue, completion_queue),
)
process.start()
cls.processes.append(process)
cls.task_queues.append(task_queue)
cls.completion_queues.append(completion_queue)
logger.debug("Started process %s with pid %s", rank, process.pid) # noqa: UP031
@classmethod
def setUpClass(cls):
"""
Class-scope test fixture. Run once for entire test class, before any test starts.
Set up the process group.
"""
super().setUpClass()
# Use device count as world size
device_type = cls.device_type()
# If world_size is not set, use device count
if cls.world_size == -2:
cls.world_size = torch.get_device_module(device_type).device_count()
if cls.world_size == 0:
raise unittest.SkipTest(f"No {device_type} devices available")
logger.info(
f"Testing class {cls.__name__} on {cls.world_size} {device_type}" # noqa: G004
)
cls._spawn_processes(cls.world_size)
@classmethod
def tearDownClass(cls):
"""
Class-scope test fixture. Run once for entire test class, after all tests finish.
Tear down the process group.
"""
logger.debug(f"Joining {cls.world_size} workers") # noqa: G004
# Enqueue "None" to all workers to tell them to exit
for task_queue in cls.task_queues:
task_queue.put(None)
# Wait for all workers to exit
for process in cls.processes:
process.join()
# Clear up the rendezvous file
try:
os.remove(cls.rdvz_file)
except OSError:
pass
logger.info(f"Class {cls.__name__} finished") # noqa: G004
super().tearDownClass()
def setUp(self) -> None:
"""
Test fixture. Run before each test.
"""
super().setUp()
# I am the dispatcher
self.rank = self.MAIN_PROCESS_RANK
# If this test class hits an exception in one test, skip the rest of tests
if self.__class__.poison_pill:
raise unittest.SkipTest(f"Previous test failed, skipping {self.id()}")
# Enqueue "current test" to all workers
for i, task_queue in enumerate(self.task_queues):
logger.debug(f"Sending Rank {i}: {self.id()}") # noqa: G004
task_queue.put(self.id())
def _worker_run_main_wait(self, fn):
@wraps(fn)
def wrapper(self):
if self.rank == self.MAIN_PROCESS_RANK:
logger.debug(f"Waiting for workers to finish {self.id()}") # noqa: G004
# Wait for the workers to finish the test
for i, completion_queue in enumerate(self.completion_queues):
rv = completion_queue.get()
if isinstance(rv, unittest.SkipTest):
raise rv
if isinstance(rv, BaseException):
# Hit an exception, re-raise it in the main process.
logger.warning(
f"Detected failure from Rank {i} in: {self.id()}, " # noqa: G004
f"skipping rest of tests in Test class: {self.__class__.__name__}" # noqa: G004
)
# Poison rest of tests (because ProcessGroup may be not
# reusable now)
self.__class__.poison_pill = True
raise rv
# Success
assert rv == self.id()
logger.debug(
f"Main proc detected rank {i} finished {self.id()}" # noqa: G004
)
else:
# Worker just runs the test
fn()
return types.MethodType(wrapper, self)
# The main process spawns N subprocesses that run the test.
# Constructor patches current instance test method to
# assume the role of the main process and join its subprocesses,
# or run the underlying test function.
def __init__(
self, method_name: str = "runTest", methodName: str = "runTest"
) -> None:
# methodName is the correct naming in unittest and testslide uses keyword arguments.
# So we need to use both to 1) not break BC and, 2) support testslide.
if methodName != "runTest":
method_name = methodName
super().__init__(method_name)
try:
fn = getattr(self, method_name)
setattr(self, method_name, self._worker_run_main_wait(fn))
except AttributeError as e:
if methodName != "runTest":
# we allow instantiation with no explicit method name
# but not an *incorrect* or missing method name
raise ValueError(
f"no such test method in {self.__class__}: {methodName}"
) from e
| MultiProcContinuousTest |
python | pytest-dev__pytest | doc/en/example/assertion/failure_demo.py | {
"start": 4226,
"end": 5138
} | class ____:
def test_complex_error(self):
def f():
return 44
def g():
return 43
somefunc(f(), g())
def test_z1_unpack_error(self):
items = []
a, b = items
def test_z2_type_error(self):
items = 3
a, b = items
def test_startswith(self):
s = "123"
g = "456"
assert s.startswith(g)
def test_startswith_nested(self):
def f():
return "123"
def g():
return "456"
assert f().startswith(g())
def test_global_func(self):
assert isinstance(globf(42), float)
def test_instance(self):
self.x = 6 * 7
assert self.x != 42
def test_compare(self):
assert globf(10) < 5
def test_try_finally(self):
x = 1
try:
assert x == 0
finally:
x = 0
| TestMoreErrors |
python | tensorflow__tensorflow | tensorflow/python/training/sync_replicas_optimizer.py | {
"start": 1956,
"end": 18742
} | class ____(optimizer.Optimizer):
"""Class to synchronize, aggregate gradients and pass them to the optimizer.
This class is deprecated. For synchronous training, please use [Distribution
Strategies](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/distribute).
In a typical asynchronous training environment, it's common to have some
stale gradients. For example, with a N-replica asynchronous training,
gradients will be applied to the variables N times independently. Depending
on each replica's training speed, some gradients might be calculated from
copies of the variable from several steps back (N-1 steps on average). This
optimizer avoids stale gradients by collecting gradients from all replicas,
averaging them, then applying them to the variables in one shot, after
which replicas can fetch the new variables and continue.
The following accumulators/queue are created:
* N `gradient accumulators`, one per variable to train. Gradients are pushed
to them and the chief worker will wait until enough gradients are collected
and then average them before applying to variables. The accumulator will
drop all stale gradients (more details in the accumulator op).
* 1 `token` queue where the optimizer pushes the new global_step value after
all variables are updated.
The following local variable is created:
* `sync_rep_local_step`, one per replica. Compared against the global_step in
each accumulator to check for staleness of the gradients.
The optimizer adds nodes to the graph to collect gradients and pause the
trainers until variables are updated.
For the Parameter Server job:
1. An accumulator is created for each variable, and each replica pushes the
gradients into the accumulators instead of directly applying them to the
variables.
2. Each accumulator averages once enough gradients (replicas_to_aggregate)
have been accumulated.
3. Apply the averaged gradients to the variables.
4. Only after all variables have been updated, increment the global step.
5. Only after step 4, pushes `global_step` in the `token_queue`, once for
each worker replica. The workers can now fetch the global step, use it to
update its local_step variable and start the next batch. Please note that
some workers can consume multiple minibatches, while some may not consume
even one. This is because each worker fetches minibatches as long as
a token exists. If one worker is stuck for some reason and does not
consume a token, another worker can use it.
For the replicas:
1. Start a step: fetch variables and compute gradients.
2. Once the gradients have been computed, push them into gradient
accumulators. Each accumulator will check the staleness and drop the stale.
3. After pushing all the gradients, dequeue an updated value of global_step
from the token queue and record that step to its local_step variable. Note
that this is effectively a barrier.
4. Start the next batch.
### Usage
```python
# Create any optimizer to update the variables, say a simple SGD:
opt = GradientDescentOptimizer(learning_rate=0.1)
# Wrap the optimizer with sync_replicas_optimizer with 50 replicas: at each
# step the optimizer collects 50 gradients before applying to variables.
# Note that if you want to have 2 backup replicas, you can change
# total_num_replicas=52 and make sure this number matches how many physical
# replicas you started in your job.
opt = tf.compat.v1.train.SyncReplicasOptimizer(opt, replicas_to_aggregate=50,
total_num_replicas=50)
# Some models have startup_delays to help stabilize the model but when using
# sync_replicas training, set it to 0.
# Now you can call `minimize()` or `compute_gradients()` and
# `apply_gradients()` normally
training_op = opt.minimize(total_loss, global_step=self.global_step)
# You can create the hook which handles initialization and queues.
sync_replicas_hook = opt.make_session_run_hook(is_chief)
```
In the training program, every worker will run the train_op as if not
synchronized.
```python
with training.MonitoredTrainingSession(
master=workers[worker_id].target, is_chief=is_chief,
hooks=[sync_replicas_hook]) as mon_sess:
while not mon_sess.should_stop():
mon_sess.run(training_op)
```
"""
@deprecation.deprecated(
None, "The `SyncReplicaOptimizer` class is deprecated. For synchronous "
"training, please use [Distribution Strategies](https://github.com/"
"tensorflow/tensorflow/tree/master/tensorflow/contrib/distribute).",
warn_once=True)
def __init__(self,
opt,
replicas_to_aggregate,
total_num_replicas=None,
variable_averages=None,
variables_to_average=None,
use_locking=False,
name="sync_replicas"):
"""Construct a sync_replicas optimizer.
Args:
opt: The actual optimizer that will be used to compute and apply the
gradients. Must be one of the Optimizer classes.
replicas_to_aggregate: number of replicas to aggregate for each variable
update.
total_num_replicas: Total number of tasks/workers/replicas, could be
different from replicas_to_aggregate.
If total_num_replicas > replicas_to_aggregate: it is backup_replicas +
replicas_to_aggregate.
If total_num_replicas < replicas_to_aggregate: Replicas compute
multiple batches per update to variables.
variable_averages: Optional `ExponentialMovingAverage` object, used to
maintain moving averages for the variables passed in
`variables_to_average`.
variables_to_average: a list of variables that need to be averaged. Only
needed if variable_averages is passed in.
use_locking: If True use locks for update operation.
name: string. Optional name of the returned operation.
"""
if total_num_replicas is None:
total_num_replicas = replicas_to_aggregate
super(SyncReplicasOptimizer, self).__init__(use_locking, name)
logging.info(
"SyncReplicasV2: replicas_to_aggregate=%s; total_num_replicas=%s",
replicas_to_aggregate, total_num_replicas)
self._opt = opt
self._replicas_to_aggregate = replicas_to_aggregate
self._gradients_applied = False
self._variable_averages = variable_averages
self._variables_to_average = variables_to_average
self._total_num_replicas = total_num_replicas
self._tokens_per_step = max(total_num_replicas, replicas_to_aggregate)
self._global_step = None
self._sync_token_queue = None
# The synchronization op will be executed in a queue runner which should
# only be executed by one of the replicas (usually the chief).
self._chief_queue_runner = None
# Remember which accumulator is on which device to set the initial step in
# the accumulator to be global step. This list contains list of the
# following format: (accumulator, device).
self._accumulator_list = []
def compute_gradients(self, *args, **kwargs):
"""Compute gradients of "loss" for the variables in "var_list".
This simply wraps the compute_gradients() from the real optimizer. The
gradients will be aggregated in the apply_gradients() so that user can
modify the gradients like clipping with per replica global norm if needed.
The global norm with aggregated gradients can be bad as one replica's huge
gradients can hurt the gradients from other replicas.
Args:
*args: Arguments for compute_gradients().
**kwargs: Keyword arguments for compute_gradients().
Returns:
A list of (gradient, variable) pairs.
"""
return self._opt.compute_gradients(*args, **kwargs)
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
"""Apply gradients to variables.
This contains most of the synchronization implementation and also wraps the
apply_gradients() from the real optimizer.
Args:
grads_and_vars: List of (gradient, variable) pairs as returned by
compute_gradients().
global_step: Optional Variable to increment by one after the
variables have been updated.
name: Optional name for the returned operation. Default to the
name passed to the Optimizer constructor.
Returns:
train_op: The op to dequeue a token so the replicas can exit this batch
and start the next one. This is executed by each replica.
Raises:
ValueError: If the grads_and_vars is empty.
ValueError: If global step is not provided, the staleness cannot be
checked.
"""
if not grads_and_vars:
raise ValueError("Must supply at least one variable")
if global_step is None:
raise ValueError("Global step is required to check staleness")
self._global_step = global_step
train_ops = []
aggregated_grad = []
var_list = []
# local_anchor op will be placed on this worker task by default.
local_anchor = control_flow_ops.no_op()
# Colocating local_step variable prevents it being placed on the PS.
distribution_strategy = distribute_lib.get_strategy()
with distribution_strategy.extended.colocate_vars_with(local_anchor):
self._local_step = variable_v1.VariableV1(
initial_value=0,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
dtype=global_step.dtype.base_dtype,
name="sync_rep_local_step")
self.local_step_init_op = state_ops.assign(self._local_step, global_step)
chief_init_ops = [self.local_step_init_op]
self.ready_for_local_init_op = variables.report_uninitialized_variables(
variables.global_variables())
with ops.name_scope(None, self._name):
for grad, var in grads_and_vars:
var_list.append(var)
with ops.device(var.device):
# Dense gradients.
if grad is None:
aggregated_grad.append(None) # pass-through.
continue
elif isinstance(grad, tensor.Tensor):
grad_accum = data_flow_ops.ConditionalAccumulator(
grad.dtype,
shape=var.get_shape(),
shared_name=var.name + "/grad_accum")
train_ops.append(grad_accum.apply_grad(
grad, local_step=self._local_step))
aggregated_grad.append(grad_accum.take_grad(
self._replicas_to_aggregate))
else:
if not isinstance(grad, indexed_slices.IndexedSlices):
raise ValueError("Unknown grad type!")
grad_accum = data_flow_ops.SparseConditionalAccumulator(
grad.dtype, shape=(), shared_name=var.name + "/grad_accum")
train_ops.append(grad_accum.apply_indexed_slices_grad(
grad, local_step=self._local_step))
aggregated_grad.append(grad_accum.take_indexed_slices_grad(
self._replicas_to_aggregate))
self._accumulator_list.append((grad_accum, var.device))
aggregated_grads_and_vars = zip(aggregated_grad, var_list)
# sync_op will be assigned to the same device as the global step.
with ops.device(global_step.device), ops.name_scope(""):
update_op = self._opt.apply_gradients(aggregated_grads_and_vars,
global_step)
# Create token queue.
with ops.device(global_step.device), ops.name_scope(""):
sync_token_queue = (
data_flow_ops.FIFOQueue(-1,
global_step.dtype.base_dtype,
shapes=(),
name="sync_token_q",
shared_name="sync_token_q"))
self._sync_token_queue = sync_token_queue
with ops.device(global_step.device), ops.name_scope(""):
# Replicas have to wait until they can get a token from the token queue.
with ops.control_dependencies(train_ops):
token = sync_token_queue.dequeue()
train_op = state_ops.assign(self._local_step, token)
with ops.control_dependencies([update_op]):
# Sync_op needs to insert tokens to the token queue at the end of the
# step so the replicas can fetch them to start the next step.
tokens = array_ops.fill([self._tokens_per_step], global_step)
sync_op = sync_token_queue.enqueue_many((tokens,))
if self._variable_averages is not None:
with ops.control_dependencies([sync_op]), ops.name_scope(""):
sync_op = self._variable_averages.apply(
self._variables_to_average)
self._chief_queue_runner = queue_runner.QueueRunner(
sync_token_queue, [sync_op])
for accum, dev in self._accumulator_list:
with ops.device(dev):
chief_init_ops.append(
accum.set_global_step(
global_step, name="SetGlobalStep"))
self.chief_init_op = control_flow_ops.group(*(chief_init_ops))
self._gradients_applied = True
return train_op
def get_chief_queue_runner(self):
"""Returns the QueueRunner for the chief to execute.
This includes the operations to synchronize replicas: aggregate gradients,
apply to variables, increment global step, insert tokens to token queue.
Note that this can only be called after calling apply_gradients() which
actually generates this queuerunner.
Returns:
A `QueueRunner` for chief to execute.
Raises:
ValueError: If this is called before apply_gradients().
"""
if self._gradients_applied is False:
raise ValueError("Should be called after apply_gradients().")
return self._chief_queue_runner
def get_slot(self, *args, **kwargs):
"""Return a slot named "name" created for "var" by the Optimizer.
This simply wraps the get_slot() from the actual optimizer.
Args:
*args: Arguments for get_slot().
**kwargs: Keyword arguments for get_slot().
Returns:
The `Variable` for the slot if it was created, `None` otherwise.
"""
return self._opt.get_slot(*args, **kwargs)
def variables(self):
"""Fetches a list of optimizer variables in the default graph.
This wraps `variables()` from the actual optimizer. It does not include
the `SyncReplicasOptimizer`'s local step.
Returns:
A list of variables.
"""
return self._opt.variables()
def get_slot_names(self, *args, **kwargs):
"""Return a list of the names of slots created by the `Optimizer`.
This simply wraps the get_slot_names() from the actual optimizer.
Args:
*args: Arguments for get_slot().
**kwargs: Keyword arguments for get_slot().
Returns:
A list of strings.
"""
return self._opt.get_slot_names(*args, **kwargs)
def get_init_tokens_op(self, num_tokens=-1):
"""Returns the op to fill the sync_token_queue with the tokens.
This is supposed to be executed in the beginning of the chief/sync thread
so that even if the total_num_replicas is less than replicas_to_aggregate,
the model can still proceed as the replicas can compute multiple steps per
variable update. Make sure:
`num_tokens >= replicas_to_aggregate - total_num_replicas`.
Args:
num_tokens: Number of tokens to add to the queue.
Returns:
An op for the chief/sync replica to fill the token queue.
Raises:
ValueError: If this is called before apply_gradients().
ValueError: If num_tokens are smaller than replicas_to_aggregate -
total_num_replicas.
"""
if self._gradients_applied is False:
raise ValueError(
"get_init_tokens_op() should be called after apply_gradients().")
tokens_needed = self._replicas_to_aggregate - self._total_num_replicas
if num_tokens == -1:
num_tokens = self._replicas_to_aggregate
elif num_tokens < tokens_needed:
raise ValueError(
"Too few tokens to finish the first step: %d (given) vs %d (needed)" %
(num_tokens, tokens_needed))
if num_tokens > 0:
with ops.device(self._global_step.device), ops.name_scope(""):
tokens = array_ops.fill([num_tokens], self._global_step)
init_tokens = self._sync_token_queue.enqueue_many((tokens,))
else:
init_tokens = control_flow_ops.no_op(name="no_init_tokens")
return init_tokens
def make_session_run_hook(self, is_chief, num_tokens=-1):
"""Creates a hook to handle SyncReplicasHook ops such as initialization."""
return _SyncReplicasOptimizerHook(self, is_chief, num_tokens)
| SyncReplicasOptimizer |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_object_position11.py | {
"start": 315,
"end": 999
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("object_position11.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with textbox(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.insert_textbox("E9", "This is some text", {"object_position": 4})
worksheet.set_row(10, None, None, {"hidden": True})
worksheet.set_column("F:F", None, None, {"hidden": True})
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-prestashop/components.py | {
"start": 555,
"end": 5523
} | class ____(RecordTransformation):
"""
Remove all "empty" (e.g. '0000-00-00', '0000-00-00 00:00:00') 'date' and 'date-time' fields from record
"""
config: Config
parameters: InitVar[Optional[Mapping[str, Any]]] = None
def __post_init__(self, parameters: Optional[Mapping[str, Any]] = None):
# Handle the case when parameters is None
parameters = parameters or {}
self.name = parameters.get("name")
# Skip schema loading if name is None
if self.name is None:
self._schema = {}
self._date_and_date_time_fields = []
else:
self._schema = self._get_schema_root_properties()
self._date_and_date_time_fields = self._get_fields_with_property_formats_from_schema(("date", "date-time"))
def _get_schema_root_properties(self):
# Only call this if self.name is not None
if not self.name:
return {}
schema_loader = JsonFileSchemaLoader(config=self.config, parameters={"name": self.name})
schema = schema_loader.get_json_schema()
return schema.get("properties", {})
def _get_fields_with_property_formats_from_schema(self, property_formats: Tuple[str, ...]) -> List[str]:
"""
Get all properties from schema within property_formats
"""
return [k for k, v in self._schema.items() if v.get("format") in property_formats]
def parse(self, text):
"""
Direct replacement for pendulum.parse functionality.
Handles various date formats including those with timezone information.
"""
# Reject dates with zeros like '0000-00-00' or '0000-00-00 00:00:00'
if re.match(r"^0+[-]0+[-]0+", text):
raise ParserError("Zero date not allowed")
# Comprehensive list of formats to try
formats = [
# Basic formats
"%Y-%m-%d",
"%Y/%m/%d",
"%d-%m-%Y",
"%d/%m/%Y",
# Date and time formats
"%Y-%m-%d %H:%M:%S",
"%Y-%m-%d %H:%M:%S.%f",
"%Y/%m/%d %H:%M:%S",
"%Y/%m/%d %H:%M:%S.%f",
# ISO formats
"%Y-%m-%dT%H:%M:%S",
"%Y-%m-%dT%H:%M:%S.%f",
# With timezone
"%Y-%m-%d %H:%M:%S%z",
"%Y-%m-%d %H:%M:%S.%f%z",
"%Y-%m-%dT%H:%M:%S%z",
"%Y-%m-%dT%H:%M:%S.%f%z",
# Using Z for UTC
"%Y-%m-%dT%H:%M:%SZ",
"%Y-%m-%dT%H:%M:%S.%fZ",
]
# Try parsing with different formats
for fmt in formats:
try:
# Handle 'Z' timezone indicator for UTC
text_to_parse = text
if fmt.endswith("Z") and not text.endswith("Z"):
continue
if not fmt.endswith("Z") and text.endswith("Z"):
text_to_parse = text[:-1] # Remove Z
fmt = fmt + "Z" if "Z" not in fmt else fmt
date_obj = dt.strptime(text_to_parse, fmt)
# In pendulum, dates with zero components are rejected
if date_obj.year == 0 or date_obj.month == 0 or date_obj.day == 0:
raise ParserError("Date with zero components")
return date_obj
except ValueError:
continue
# Try ISO format as a last resort
try:
# Replace Z with +00:00 for ISO format parsing
iso_text = text.replace("Z", "+00:00")
# For Python < 3.11 compatibility, remove microseconds if they have more than 6 digits
microseconds_match = re.search(r"\.(\d{7,})(?=[+-Z]|$)", iso_text)
if microseconds_match:
fixed_micro = microseconds_match.group(1)[:6]
iso_text = iso_text.replace(microseconds_match.group(0), f".{fixed_micro}")
date_obj = dt.fromisoformat(iso_text)
if date_obj.year == 0 or date_obj.month == 0 or date_obj.day == 0:
raise ParserError("Date with zero components")
return date_obj
except (ValueError, AttributeError):
pass
# If nothing worked, raise the error like pendulum would
raise ParserError(f"Unable to parse: {text}")
def transform(
self,
record: Record,
config: Optional[Config] = None,
stream_state: Optional[StreamState] = None,
stream_slice: Optional[StreamSlice] = None,
) -> Record:
# If we don't have any fields to check, just return the record as is
if not self._date_and_date_time_fields:
return record
for item in record:
if item in self._date_and_date_time_fields and record.get(item):
try:
self.parse(record[item])
except ParserError:
record[item] = None
return record
| CustomFieldTransformation |
python | plotly__plotly.py | plotly/graph_objs/_choroplethmap.py | {
"start": 215,
"end": 64427
} | class ____(_BaseTraceType):
_parent_path_str = ""
_path_str = "choroplethmap"
_valid_props = {
"autocolorscale",
"below",
"coloraxis",
"colorbar",
"colorscale",
"customdata",
"customdatasrc",
"featureidkey",
"geojson",
"hoverinfo",
"hoverinfosrc",
"hoverlabel",
"hovertemplate",
"hovertemplatefallback",
"hovertemplatesrc",
"hovertext",
"hovertextsrc",
"ids",
"idssrc",
"legend",
"legendgroup",
"legendgrouptitle",
"legendrank",
"legendwidth",
"locations",
"locationssrc",
"marker",
"meta",
"metasrc",
"name",
"reversescale",
"selected",
"selectedpoints",
"showlegend",
"showscale",
"stream",
"subplot",
"text",
"textsrc",
"type",
"uid",
"uirevision",
"unselected",
"visible",
"z",
"zauto",
"zmax",
"zmid",
"zmin",
"zsrc",
}
@property
def autocolorscale(self):
"""
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`colorscale`. In case `colorscale` is unspecified or
`autocolorscale` is true, the default palette will be chosen
according to whether numbers in the `color` array are all
positive, all negative or mixed.
The 'autocolorscale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["autocolorscale"]
@autocolorscale.setter
def autocolorscale(self, val):
self["autocolorscale"] = val
@property
def below(self):
"""
Determines if the choropleth polygons will be inserted before
the layer with the specified ID. By default, choroplethmap
traces are placed above the water layers. If set to '', the
layer will be inserted above every existing layer.
The 'below' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["below"]
@below.setter
def below(self, val):
self["below"] = val
@property
def coloraxis(self):
"""
Sets a reference to a shared color axis. References to these
shared color axes are "coloraxis", "coloraxis2", "coloraxis3",
etc. Settings for these shared color axes are set in the
layout, under `layout.coloraxis`, `layout.coloraxis2`, etc.
Note that multiple color scales can be linked to the same color
axis.
The 'coloraxis' property is an identifier of a particular
subplot, of type 'coloraxis', that may be specified as the string 'coloraxis'
optionally followed by an integer >= 1
(e.g. 'coloraxis', 'coloraxis1', 'coloraxis2', 'coloraxis3', etc.)
Returns
-------
str
"""
return self["coloraxis"]
@coloraxis.setter
def coloraxis(self, val):
self["coloraxis"] = val
@property
def colorbar(self):
"""
The 'colorbar' property is an instance of ColorBar
that may be specified as:
- An instance of :class:`plotly.graph_objs.choroplethmap.ColorBar`
- A dict of string/value properties that will be passed
to the ColorBar constructor
Returns
-------
plotly.graph_objs.choroplethmap.ColorBar
"""
return self["colorbar"]
@colorbar.setter
def colorbar(self, val):
self["colorbar"] = val
@property
def colorscale(self):
"""
Sets the colorscale. The colorscale must be an array containing
arrays mapping a normalized value to an rgb, rgba, hex, hsl,
hsv, or named color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required. For example,
`[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`. To control the
bounds of the colorscale in color space, use `zmin` and `zmax`.
Alternatively, `colorscale` may be a palette name string of the
following list: Blackbody,Bluered,Blues,Cividis,Earth,Electric,
Greens,Greys,Hot,Jet,Picnic,Portland,Rainbow,RdBu,Reds,Viridis,
YlGnBu,YlOrRd.
The 'colorscale' property is a colorscale and may be
specified as:
- A list of colors that will be spaced evenly to create the colorscale.
Many predefined colorscale lists are included in the sequential, diverging,
and cyclical modules in the plotly.colors package.
- A list of 2-element lists where the first element is the
normalized color level value (starting at 0 and ending at 1),
and the second item is a valid color string.
(e.g. [[0, 'green'], [0.5, 'red'], [1.0, 'rgb(0, 0, 255)']])
- One of the following named colorscales:
['aggrnyl', 'agsunset', 'algae', 'amp', 'armyrose', 'balance',
'blackbody', 'bluered', 'blues', 'blugrn', 'bluyl', 'brbg',
'brwnyl', 'bugn', 'bupu', 'burg', 'burgyl', 'cividis', 'curl',
'darkmint', 'deep', 'delta', 'dense', 'earth', 'edge', 'electric',
'emrld', 'fall', 'geyser', 'gnbu', 'gray', 'greens', 'greys',
'haline', 'hot', 'hsv', 'ice', 'icefire', 'inferno', 'jet',
'magenta', 'magma', 'matter', 'mint', 'mrybm', 'mygbm', 'oranges',
'orrd', 'oryel', 'oxy', 'peach', 'phase', 'picnic', 'pinkyl',
'piyg', 'plasma', 'plotly3', 'portland', 'prgn', 'pubu', 'pubugn',
'puor', 'purd', 'purp', 'purples', 'purpor', 'rainbow', 'rdbu',
'rdgy', 'rdpu', 'rdylbu', 'rdylgn', 'redor', 'reds', 'solar',
'spectral', 'speed', 'sunset', 'sunsetdark', 'teal', 'tealgrn',
'tealrose', 'tempo', 'temps', 'thermal', 'tropic', 'turbid',
'turbo', 'twilight', 'viridis', 'ylgn', 'ylgnbu', 'ylorbr',
'ylorrd'].
Appending '_r' to a named colorscale reverses it.
Returns
-------
str
"""
return self["colorscale"]
@colorscale.setter
def colorscale(self, val):
self["colorscale"] = val
@property
def customdata(self):
"""
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note that,
"scatter" traces also appends customdata items in the markers
DOM elements
The 'customdata' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["customdata"]
@customdata.setter
def customdata(self, val):
self["customdata"] = val
@property
def customdatasrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`customdata`.
The 'customdatasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["customdatasrc"]
@customdatasrc.setter
def customdatasrc(self, val):
self["customdatasrc"] = val
@property
def featureidkey(self):
"""
Sets the key in GeoJSON features which is used as id to match
the items included in the `locations` array. Support nested
property, for example "properties.name".
The 'featureidkey' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["featureidkey"]
@featureidkey.setter
def featureidkey(self, val):
self["featureidkey"] = val
@property
def geojson(self):
"""
Sets the GeoJSON data associated with this trace. It can be set
as a valid GeoJSON object or as a URL string. Note that we only
accept GeoJSONs of type "FeatureCollection" or "Feature" with
geometries of type "Polygon" or "MultiPolygon".
The 'geojson' property accepts values of any type
Returns
-------
Any
"""
return self["geojson"]
@geojson.setter
def geojson(self, val):
self["geojson"] = val
@property
def hoverinfo(self):
"""
Determines which trace information appear on hover. If `none`
or `skip` are set, no information is displayed upon hovering.
But, if `none` is set, click and hover events are still fired.
The 'hoverinfo' property is a flaglist and may be specified
as a string containing:
- Any combination of ['location', 'z', 'text', 'name'] joined with '+' characters
(e.g. 'location+z')
OR exactly one of ['all', 'none', 'skip'] (e.g. 'skip')
- A list or array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["hoverinfo"]
@hoverinfo.setter
def hoverinfo(self, val):
self["hoverinfo"] = val
@property
def hoverinfosrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`hoverinfo`.
The 'hoverinfosrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hoverinfosrc"]
@hoverinfosrc.setter
def hoverinfosrc(self, val):
self["hoverinfosrc"] = val
@property
def hoverlabel(self):
"""
The 'hoverlabel' property is an instance of Hoverlabel
that may be specified as:
- An instance of :class:`plotly.graph_objs.choroplethmap.Hoverlabel`
- A dict of string/value properties that will be passed
to the Hoverlabel constructor
Returns
-------
plotly.graph_objs.choroplethmap.Hoverlabel
"""
return self["hoverlabel"]
@hoverlabel.setter
def hoverlabel(self, val):
self["hoverlabel"] = val
@property
def hovertemplate(self):
"""
Template string used for rendering the information that appear
on hover box. Note that this will override `hoverinfo`.
Variables are inserted using %{variable}, for example "y: %{y}"
as well as %{xother}, {%_xother}, {%_xother_}, {%xother_}. When
showing info for several points, "xother" will be added to
those with different x positions from the first point. An
underscore before or after "(x|y)other" will add a space on
that side, only when this field is shown. Numbers are formatted
using d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format for
details on the formatting syntax. Dates are formatted using
d3-time-format's syntax %{variable|d3-time-format}, for example
"Day: %{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the date
formatting syntax. Variables that can't be found will be
replaced with the specifier. For example, a template of "data:
%{x}, %{y}" will result in a value of "data: 1, %{y}" if x is 1
and y is missing. Variables with an undefined value will be
replaced with the fallback value. The variables available in
`hovertemplate` are the ones emitted as event data described at
this link https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, all attributes that can be specified per-
point (the ones that are `arrayOk: true`) are available.
Finally, the template string has access to variable
`properties` Anything contained in tag `<extra>` is displayed
in the secondary box, for example
`<extra>%{fullData.name}</extra>`. To hide the secondary box
completely, use an empty tag `<extra></extra>`.
The 'hovertemplate' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["hovertemplate"]
@hovertemplate.setter
def hovertemplate(self, val):
self["hovertemplate"] = val
@property
def hovertemplatefallback(self):
"""
Fallback string that's displayed when a variable referenced in
a template is missing. If the boolean value 'false' is passed
in, the specifier with the missing variable will be displayed.
The 'hovertemplatefallback' property accepts values of any type
Returns
-------
Any
"""
return self["hovertemplatefallback"]
@hovertemplatefallback.setter
def hovertemplatefallback(self, val):
self["hovertemplatefallback"] = val
@property
def hovertemplatesrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`hovertemplate`.
The 'hovertemplatesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hovertemplatesrc"]
@hovertemplatesrc.setter
def hovertemplatesrc(self, val):
self["hovertemplatesrc"] = val
@property
def hovertext(self):
"""
Same as `text`.
The 'hovertext' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["hovertext"]
@hovertext.setter
def hovertext(self, val):
self["hovertext"] = val
@property
def hovertextsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`hovertext`.
The 'hovertextsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hovertextsrc"]
@hovertextsrc.setter
def hovertextsrc(self, val):
self["hovertextsrc"] = val
@property
def ids(self):
"""
Assigns id labels to each datum. These ids for object constancy
of data points during animation. Should be an array of strings,
not numbers or any other type.
The 'ids' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["ids"]
@ids.setter
def ids(self, val):
self["ids"] = val
@property
def idssrc(self):
"""
Sets the source reference on Chart Studio Cloud for `ids`.
The 'idssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["idssrc"]
@idssrc.setter
def idssrc(self, val):
self["idssrc"] = val
@property
def legend(self):
"""
Sets the reference to a legend to show this trace in.
References to these legends are "legend", "legend2", "legend3",
etc. Settings for these legends are set in the layout, under
`layout.legend`, `layout.legend2`, etc.
The 'legend' property is an identifier of a particular
subplot, of type 'legend', that may be specified as the string 'legend'
optionally followed by an integer >= 1
(e.g. 'legend', 'legend1', 'legend2', 'legend3', etc.)
Returns
-------
str
"""
return self["legend"]
@legend.setter
def legend(self, val):
self["legend"] = val
@property
def legendgroup(self):
"""
Sets the legend group for this trace. Traces and shapes part of
the same legend group hide/show at the same time when toggling
legend items.
The 'legendgroup' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["legendgroup"]
@legendgroup.setter
def legendgroup(self, val):
self["legendgroup"] = val
@property
def legendgrouptitle(self):
"""
The 'legendgrouptitle' property is an instance of Legendgrouptitle
that may be specified as:
- An instance of :class:`plotly.graph_objs.choroplethmap.Legendgrouptitle`
- A dict of string/value properties that will be passed
to the Legendgrouptitle constructor
Returns
-------
plotly.graph_objs.choroplethmap.Legendgrouptitle
"""
return self["legendgrouptitle"]
@legendgrouptitle.setter
def legendgrouptitle(self, val):
self["legendgrouptitle"] = val
@property
def legendrank(self):
"""
Sets the legend rank for this trace. Items and groups with
smaller ranks are presented on top/left side while with
"reversed" `legend.traceorder` they are on bottom/right side.
The default legendrank is 1000, so that you can use ranks less
than 1000 to place certain items before all unranked items, and
ranks greater than 1000 to go after all unranked items. When
having unranked or equal rank items shapes would be displayed
after traces i.e. according to their order in data and layout.
The 'legendrank' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["legendrank"]
@legendrank.setter
def legendrank(self, val):
self["legendrank"] = val
@property
def legendwidth(self):
"""
Sets the width (in px or fraction) of the legend for this
trace.
The 'legendwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["legendwidth"]
@legendwidth.setter
def legendwidth(self, val):
self["legendwidth"] = val
@property
def locations(self):
"""
Sets which features found in "geojson" to plot using their
feature `id` field.
The 'locations' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["locations"]
@locations.setter
def locations(self, val):
self["locations"] = val
@property
def locationssrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`locations`.
The 'locationssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["locationssrc"]
@locationssrc.setter
def locationssrc(self, val):
self["locationssrc"] = val
@property
def marker(self):
"""
The 'marker' property is an instance of Marker
that may be specified as:
- An instance of :class:`plotly.graph_objs.choroplethmap.Marker`
- A dict of string/value properties that will be passed
to the Marker constructor
Returns
-------
plotly.graph_objs.choroplethmap.Marker
"""
return self["marker"]
@marker.setter
def marker(self, val):
self["marker"] = val
@property
def meta(self):
"""
Assigns extra meta information associated with this trace that
can be used in various text attributes. Attributes such as
trace `name`, graph, axis and colorbar `title.text`, annotation
`text` `rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta` values in
an attribute in the same trace, simply use `%{meta[i]}` where
`i` is the index or key of the `meta` item in question. To
access trace `meta` in layout attributes, use
`%{data[n[.meta[i]}` where `i` is the index or key of the
`meta` and `n` is the trace index.
The 'meta' property accepts values of any type
Returns
-------
Any|numpy.ndarray
"""
return self["meta"]
@meta.setter
def meta(self, val):
self["meta"] = val
@property
def metasrc(self):
"""
Sets the source reference on Chart Studio Cloud for `meta`.
The 'metasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["metasrc"]
@metasrc.setter
def metasrc(self, val):
self["metasrc"] = val
@property
def name(self):
"""
Sets the trace name. The trace name appears as the legend item
and on hover.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
@property
def reversescale(self):
"""
Reverses the color mapping if true. If true, `zmin` will
correspond to the last color in the array and `zmax` will
correspond to the first color.
The 'reversescale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["reversescale"]
@reversescale.setter
def reversescale(self, val):
self["reversescale"] = val
@property
def selected(self):
"""
The 'selected' property is an instance of Selected
that may be specified as:
- An instance of :class:`plotly.graph_objs.choroplethmap.Selected`
- A dict of string/value properties that will be passed
to the Selected constructor
Returns
-------
plotly.graph_objs.choroplethmap.Selected
"""
return self["selected"]
@selected.setter
def selected(self, val):
self["selected"] = val
@property
def selectedpoints(self):
"""
Array containing integer indices of selected points. Has an
effect only for traces that support selections. Note that an
empty array means an empty selection where the `unselected` are
turned on for all points, whereas, any other non-array values
means no selection all where the `selected` and `unselected`
styles have no effect.
The 'selectedpoints' property accepts values of any type
Returns
-------
Any
"""
return self["selectedpoints"]
@selectedpoints.setter
def selectedpoints(self, val):
self["selectedpoints"] = val
@property
def showlegend(self):
"""
Determines whether or not an item corresponding to this trace
is shown in the legend.
The 'showlegend' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showlegend"]
@showlegend.setter
def showlegend(self, val):
self["showlegend"] = val
@property
def showscale(self):
"""
Determines whether or not a colorbar is displayed for this
trace.
The 'showscale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showscale"]
@showscale.setter
def showscale(self, val):
self["showscale"] = val
@property
def stream(self):
"""
The 'stream' property is an instance of Stream
that may be specified as:
- An instance of :class:`plotly.graph_objs.choroplethmap.Stream`
- A dict of string/value properties that will be passed
to the Stream constructor
Returns
-------
plotly.graph_objs.choroplethmap.Stream
"""
return self["stream"]
@stream.setter
def stream(self, val):
self["stream"] = val
@property
def subplot(self):
"""
Sets a reference between this trace's data coordinates and a
map subplot. If "map" (the default value), the data refer to
`layout.map`. If "map2", the data refer to `layout.map2`, and
so on.
The 'subplot' property is an identifier of a particular
subplot, of type 'map', that may be specified as the string 'map'
optionally followed by an integer >= 1
(e.g. 'map', 'map1', 'map2', 'map3', etc.)
Returns
-------
str
"""
return self["subplot"]
@subplot.setter
def subplot(self, val):
self["subplot"] = val
@property
def text(self):
"""
Sets the text elements associated with each location.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
@property
def textsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `text`.
The 'textsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["textsrc"]
@textsrc.setter
def textsrc(self, val):
self["textsrc"] = val
@property
def uid(self):
"""
Assign an id to this trace, Use this to provide object
constancy between traces during animations and transitions.
The 'uid' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["uid"]
@uid.setter
def uid(self, val):
self["uid"] = val
@property
def uirevision(self):
"""
Controls persistence of some user-driven changes to the trace:
`constraintrange` in `parcoords` traces, as well as some
`editable: true` modifications such as `name` and
`colorbar.title`. Defaults to `layout.uirevision`. Note that
other user-driven trace attribute changes are controlled by
`layout` attributes: `trace.visible` is controlled by
`layout.legend.uirevision`, `selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)` (accessible
with `config: {editable: true}`) is controlled by
`layout.editrevision`. Trace changes are tracked by `uid`,
which only falls back on trace index if no `uid` is provided.
So if your app can add/remove traces before the end of the
`data` array, such that the same trace has a different index,
you can still preserve user-driven changes if you give each
trace a `uid` that stays with it as it moves.
The 'uirevision' property accepts values of any type
Returns
-------
Any
"""
return self["uirevision"]
@uirevision.setter
def uirevision(self, val):
self["uirevision"] = val
@property
def unselected(self):
"""
The 'unselected' property is an instance of Unselected
that may be specified as:
- An instance of :class:`plotly.graph_objs.choroplethmap.Unselected`
- A dict of string/value properties that will be passed
to the Unselected constructor
Returns
-------
plotly.graph_objs.choroplethmap.Unselected
"""
return self["unselected"]
@unselected.setter
def unselected(self, val):
self["unselected"] = val
@property
def visible(self):
"""
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as a
legend item (provided that the legend itself is visible).
The 'visible' property is an enumeration that may be specified as:
- One of the following enumeration values:
[True, False, 'legendonly']
Returns
-------
Any
"""
return self["visible"]
@visible.setter
def visible(self, val):
self["visible"] = val
@property
def z(self):
"""
Sets the color values.
The 'z' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["z"]
@z.setter
def z(self, val):
self["z"] = val
@property
def zauto(self):
"""
Determines whether or not the color domain is computed with
respect to the input data (here in `z`) or the bounds set in
`zmin` and `zmax` Defaults to `false` when `zmin` and `zmax`
are set by the user.
The 'zauto' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["zauto"]
@zauto.setter
def zauto(self, val):
self["zauto"] = val
@property
def zmax(self):
"""
Sets the upper bound of the color domain. Value should have the
same units as in `z` and if set, `zmin` must be set as well.
The 'zmax' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["zmax"]
@zmax.setter
def zmax(self, val):
self["zmax"] = val
@property
def zmid(self):
"""
Sets the mid-point of the color domain by scaling `zmin` and/or
`zmax` to be equidistant to this point. Value should have the
same units as in `z`. Has no effect when `zauto` is `false`.
The 'zmid' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["zmid"]
@zmid.setter
def zmid(self, val):
self["zmid"] = val
@property
def zmin(self):
"""
Sets the lower bound of the color domain. Value should have the
same units as in `z` and if set, `zmax` must be set as well.
The 'zmin' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["zmin"]
@zmin.setter
def zmin(self, val):
self["zmin"] = val
@property
def zsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `z`.
The 'zsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["zsrc"]
@zsrc.setter
def zsrc(self, val):
self["zsrc"] = val
@property
def type(self):
return self._props["type"]
@property
def _prop_descriptions(self):
return """\
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`colorscale`. In case `colorscale` is unspecified or
`autocolorscale` is true, the default palette will be
chosen according to whether numbers in the `color`
array are all positive, all negative or mixed.
below
Determines if the choropleth polygons will be inserted
before the layer with the specified ID. By default,
choroplethmap traces are placed above the water layers.
If set to '', the layer will be inserted above every
existing layer.
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorbar
:class:`plotly.graph_objects.choroplethmap.ColorBar`
instance or dict with compatible properties
colorscale
Sets the colorscale. The colorscale must be an array
containing arrays mapping a normalized value to an rgb,
rgba, hex, hsl, hsv, or named color string. At minimum,
a mapping for the lowest (0) and highest (1) values are
required. For example, `[[0, 'rgb(0,0,255)'], [1,
'rgb(255,0,0)']]`. To control the bounds of the
colorscale in color space, use `zmin` and `zmax`.
Alternatively, `colorscale` may be a palette name
string of the following list: Blackbody,Bluered,Blues,C
ividis,Earth,Electric,Greens,Greys,Hot,Jet,Picnic,Portl
and,Rainbow,RdBu,Reds,Viridis,YlGnBu,YlOrRd.
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud for
`customdata`.
featureidkey
Sets the key in GeoJSON features which is used as id to
match the items included in the `locations` array.
Support nested property, for example "properties.name".
geojson
Sets the GeoJSON data associated with this trace. It
can be set as a valid GeoJSON object or as a URL
string. Note that we only accept GeoJSONs of type
"FeatureCollection" or "Feature" with geometries of
type "Polygon" or "MultiPolygon".
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud for
`hoverinfo`.
hoverlabel
:class:`plotly.graph_objects.choroplethmap.Hoverlabel`
instance or dict with compatible properties
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}" as well as %{xother}, {%_xother},
{%_xother_}, {%xother_}. When showing info for several
points, "xother" will be added to those with different
x positions from the first point. An underscore before
or after "(x|y)other" will add a space on that side,
only when this field is shown. Numbers are formatted
using d3-format's syntax %{variable:d3-format}, for
example "Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format
for details on the formatting syntax. Dates are
formatted using d3-time-format's syntax
%{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the
date formatting syntax. Variables that can't be found
will be replaced with the specifier. For example, a
template of "data: %{x}, %{y}" will result in a value
of "data: 1, %{y}" if x is 1 and y is missing.
Variables with an undefined value will be replaced with
the fallback value. The variables available in
`hovertemplate` are the ones emitted as event data
described at this link
https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, all attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. Finally, the template string has access
to variable `properties` Anything contained in tag
`<extra>` is displayed in the secondary box, for
example `<extra>%{fullData.name}</extra>`. To hide the
secondary box completely, use an empty tag
`<extra></extra>`.
hovertemplatefallback
Fallback string that's displayed when a variable
referenced in a template is missing. If the boolean
value 'false' is passed in, the specifier with the
missing variable will be displayed.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud for
`hovertemplate`.
hovertext
Same as `text`.
hovertextsrc
Sets the source reference on Chart Studio Cloud for
`hovertext`.
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud for
`ids`.
legend
Sets the reference to a legend to show this trace in.
References to these legends are "legend", "legend2",
"legend3", etc. Settings for these legends are set in
the layout, under `layout.legend`, `layout.legend2`,
etc.
legendgroup
Sets the legend group for this trace. Traces and shapes
part of the same legend group hide/show at the same
time when toggling legend items.
legendgrouptitle
:class:`plotly.graph_objects.choroplethmap.Legendgroupt
itle` instance or dict with compatible properties
legendrank
Sets the legend rank for this trace. Items and groups
with smaller ranks are presented on top/left side while
with "reversed" `legend.traceorder` they are on
bottom/right side. The default legendrank is 1000, so
that you can use ranks less than 1000 to place certain
items before all unranked items, and ranks greater than
1000 to go after all unranked items. When having
unranked or equal rank items shapes would be displayed
after traces i.e. according to their order in data and
layout.
legendwidth
Sets the width (in px or fraction) of the legend for
this trace.
locations
Sets which features found in "geojson" to plot using
their feature `id` field.
locationssrc
Sets the source reference on Chart Studio Cloud for
`locations`.
marker
:class:`plotly.graph_objects.choroplethmap.Marker`
instance or dict with compatible properties
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
`meta`.
name
Sets the trace name. The trace name appears as the
legend item and on hover.
reversescale
Reverses the color mapping if true. If true, `zmin`
will correspond to the last color in the array and
`zmax` will correspond to the first color.
selected
:class:`plotly.graph_objects.choroplethmap.Selected`
instance or dict with compatible properties
selectedpoints
Array containing integer indices of selected points.
Has an effect only for traces that support selections.
Note that an empty array means an empty selection where
the `unselected` are turned on for all points, whereas,
any other non-array values means no selection all where
the `selected` and `unselected` styles have no effect.
showlegend
Determines whether or not an item corresponding to this
trace is shown in the legend.
showscale
Determines whether or not a colorbar is displayed for
this trace.
stream
:class:`plotly.graph_objects.choroplethmap.Stream`
instance or dict with compatible properties
subplot
Sets a reference between this trace's data coordinates
and a map subplot. If "map" (the default value), the
data refer to `layout.map`. If "map2", the data refer
to `layout.map2`, and so on.
text
Sets the text elements associated with each location.
textsrc
Sets the source reference on Chart Studio Cloud for
`text`.
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
unselected
:class:`plotly.graph_objects.choroplethmap.Unselected`
instance or dict with compatible properties
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
z
Sets the color values.
zauto
Determines whether or not the color domain is computed
with respect to the input data (here in `z`) or the
bounds set in `zmin` and `zmax` Defaults to `false`
when `zmin` and `zmax` are set by the user.
zmax
Sets the upper bound of the color domain. Value should
have the same units as in `z` and if set, `zmin` must
be set as well.
zmid
Sets the mid-point of the color domain by scaling
`zmin` and/or `zmax` to be equidistant to this point.
Value should have the same units as in `z`. Has no
effect when `zauto` is `false`.
zmin
Sets the lower bound of the color domain. Value should
have the same units as in `z` and if set, `zmax` must
be set as well.
zsrc
Sets the source reference on Chart Studio Cloud for
`z`.
"""
def __init__(
self,
arg=None,
autocolorscale=None,
below=None,
coloraxis=None,
colorbar=None,
colorscale=None,
customdata=None,
customdatasrc=None,
featureidkey=None,
geojson=None,
hoverinfo=None,
hoverinfosrc=None,
hoverlabel=None,
hovertemplate=None,
hovertemplatefallback=None,
hovertemplatesrc=None,
hovertext=None,
hovertextsrc=None,
ids=None,
idssrc=None,
legend=None,
legendgroup=None,
legendgrouptitle=None,
legendrank=None,
legendwidth=None,
locations=None,
locationssrc=None,
marker=None,
meta=None,
metasrc=None,
name=None,
reversescale=None,
selected=None,
selectedpoints=None,
showlegend=None,
showscale=None,
stream=None,
subplot=None,
text=None,
textsrc=None,
uid=None,
uirevision=None,
unselected=None,
visible=None,
z=None,
zauto=None,
zmax=None,
zmid=None,
zmin=None,
zsrc=None,
**kwargs,
):
"""
Construct a new Choroplethmap object
GeoJSON features to be filled are set in `geojson` The data
that describes the choropleth value-to-color mapping is set in
`locations` and `z`.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.Choroplethmap`
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`colorscale`. In case `colorscale` is unspecified or
`autocolorscale` is true, the default palette will be
chosen according to whether numbers in the `color`
array are all positive, all negative or mixed.
below
Determines if the choropleth polygons will be inserted
before the layer with the specified ID. By default,
choroplethmap traces are placed above the water layers.
If set to '', the layer will be inserted above every
existing layer.
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorbar
:class:`plotly.graph_objects.choroplethmap.ColorBar`
instance or dict with compatible properties
colorscale
Sets the colorscale. The colorscale must be an array
containing arrays mapping a normalized value to an rgb,
rgba, hex, hsl, hsv, or named color string. At minimum,
a mapping for the lowest (0) and highest (1) values are
required. For example, `[[0, 'rgb(0,0,255)'], [1,
'rgb(255,0,0)']]`. To control the bounds of the
colorscale in color space, use `zmin` and `zmax`.
Alternatively, `colorscale` may be a palette name
string of the following list: Blackbody,Bluered,Blues,C
ividis,Earth,Electric,Greens,Greys,Hot,Jet,Picnic,Portl
and,Rainbow,RdBu,Reds,Viridis,YlGnBu,YlOrRd.
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud for
`customdata`.
featureidkey
Sets the key in GeoJSON features which is used as id to
match the items included in the `locations` array.
Support nested property, for example "properties.name".
geojson
Sets the GeoJSON data associated with this trace. It
can be set as a valid GeoJSON object or as a URL
string. Note that we only accept GeoJSONs of type
"FeatureCollection" or "Feature" with geometries of
type "Polygon" or "MultiPolygon".
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud for
`hoverinfo`.
hoverlabel
:class:`plotly.graph_objects.choroplethmap.Hoverlabel`
instance or dict with compatible properties
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}" as well as %{xother}, {%_xother},
{%_xother_}, {%xother_}. When showing info for several
points, "xother" will be added to those with different
x positions from the first point. An underscore before
or after "(x|y)other" will add a space on that side,
only when this field is shown. Numbers are formatted
using d3-format's syntax %{variable:d3-format}, for
example "Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format
for details on the formatting syntax. Dates are
formatted using d3-time-format's syntax
%{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the
date formatting syntax. Variables that can't be found
will be replaced with the specifier. For example, a
template of "data: %{x}, %{y}" will result in a value
of "data: 1, %{y}" if x is 1 and y is missing.
Variables with an undefined value will be replaced with
the fallback value. The variables available in
`hovertemplate` are the ones emitted as event data
described at this link
https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, all attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. Finally, the template string has access
to variable `properties` Anything contained in tag
`<extra>` is displayed in the secondary box, for
example `<extra>%{fullData.name}</extra>`. To hide the
secondary box completely, use an empty tag
`<extra></extra>`.
hovertemplatefallback
Fallback string that's displayed when a variable
referenced in a template is missing. If the boolean
value 'false' is passed in, the specifier with the
missing variable will be displayed.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud for
`hovertemplate`.
hovertext
Same as `text`.
hovertextsrc
Sets the source reference on Chart Studio Cloud for
`hovertext`.
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud for
`ids`.
legend
Sets the reference to a legend to show this trace in.
References to these legends are "legend", "legend2",
"legend3", etc. Settings for these legends are set in
the layout, under `layout.legend`, `layout.legend2`,
etc.
legendgroup
Sets the legend group for this trace. Traces and shapes
part of the same legend group hide/show at the same
time when toggling legend items.
legendgrouptitle
:class:`plotly.graph_objects.choroplethmap.Legendgroupt
itle` instance or dict with compatible properties
legendrank
Sets the legend rank for this trace. Items and groups
with smaller ranks are presented on top/left side while
with "reversed" `legend.traceorder` they are on
bottom/right side. The default legendrank is 1000, so
that you can use ranks less than 1000 to place certain
items before all unranked items, and ranks greater than
1000 to go after all unranked items. When having
unranked or equal rank items shapes would be displayed
after traces i.e. according to their order in data and
layout.
legendwidth
Sets the width (in px or fraction) of the legend for
this trace.
locations
Sets which features found in "geojson" to plot using
their feature `id` field.
locationssrc
Sets the source reference on Chart Studio Cloud for
`locations`.
marker
:class:`plotly.graph_objects.choroplethmap.Marker`
instance or dict with compatible properties
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
`meta`.
name
Sets the trace name. The trace name appears as the
legend item and on hover.
reversescale
Reverses the color mapping if true. If true, `zmin`
will correspond to the last color in the array and
`zmax` will correspond to the first color.
selected
:class:`plotly.graph_objects.choroplethmap.Selected`
instance or dict with compatible properties
selectedpoints
Array containing integer indices of selected points.
Has an effect only for traces that support selections.
Note that an empty array means an empty selection where
the `unselected` are turned on for all points, whereas,
any other non-array values means no selection all where
the `selected` and `unselected` styles have no effect.
showlegend
Determines whether or not an item corresponding to this
trace is shown in the legend.
showscale
Determines whether or not a colorbar is displayed for
this trace.
stream
:class:`plotly.graph_objects.choroplethmap.Stream`
instance or dict with compatible properties
subplot
Sets a reference between this trace's data coordinates
and a map subplot. If "map" (the default value), the
data refer to `layout.map`. If "map2", the data refer
to `layout.map2`, and so on.
text
Sets the text elements associated with each location.
textsrc
Sets the source reference on Chart Studio Cloud for
`text`.
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
unselected
:class:`plotly.graph_objects.choroplethmap.Unselected`
instance or dict with compatible properties
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
z
Sets the color values.
zauto
Determines whether or not the color domain is computed
with respect to the input data (here in `z`) or the
bounds set in `zmin` and `zmax` Defaults to `false`
when `zmin` and `zmax` are set by the user.
zmax
Sets the upper bound of the color domain. Value should
have the same units as in `z` and if set, `zmin` must
be set as well.
zmid
Sets the mid-point of the color domain by scaling
`zmin` and/or `zmax` to be equidistant to this point.
Value should have the same units as in `z`. Has no
effect when `zauto` is `false`.
zmin
Sets the lower bound of the color domain. Value should
have the same units as in `z` and if set, `zmax` must
be set as well.
zsrc
Sets the source reference on Chart Studio Cloud for
`z`.
Returns
-------
Choroplethmap
"""
super().__init__("choroplethmap")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.Choroplethmap
constructor must be a dict or
an instance of :class:`plotly.graph_objs.Choroplethmap`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("autocolorscale", arg, autocolorscale)
self._set_property("below", arg, below)
self._set_property("coloraxis", arg, coloraxis)
self._set_property("colorbar", arg, colorbar)
self._set_property("colorscale", arg, colorscale)
self._set_property("customdata", arg, customdata)
self._set_property("customdatasrc", arg, customdatasrc)
self._set_property("featureidkey", arg, featureidkey)
self._set_property("geojson", arg, geojson)
self._set_property("hoverinfo", arg, hoverinfo)
self._set_property("hoverinfosrc", arg, hoverinfosrc)
self._set_property("hoverlabel", arg, hoverlabel)
self._set_property("hovertemplate", arg, hovertemplate)
self._set_property("hovertemplatefallback", arg, hovertemplatefallback)
self._set_property("hovertemplatesrc", arg, hovertemplatesrc)
self._set_property("hovertext", arg, hovertext)
self._set_property("hovertextsrc", arg, hovertextsrc)
self._set_property("ids", arg, ids)
self._set_property("idssrc", arg, idssrc)
self._set_property("legend", arg, legend)
self._set_property("legendgroup", arg, legendgroup)
self._set_property("legendgrouptitle", arg, legendgrouptitle)
self._set_property("legendrank", arg, legendrank)
self._set_property("legendwidth", arg, legendwidth)
self._set_property("locations", arg, locations)
self._set_property("locationssrc", arg, locationssrc)
self._set_property("marker", arg, marker)
self._set_property("meta", arg, meta)
self._set_property("metasrc", arg, metasrc)
self._set_property("name", arg, name)
self._set_property("reversescale", arg, reversescale)
self._set_property("selected", arg, selected)
self._set_property("selectedpoints", arg, selectedpoints)
self._set_property("showlegend", arg, showlegend)
self._set_property("showscale", arg, showscale)
self._set_property("stream", arg, stream)
self._set_property("subplot", arg, subplot)
self._set_property("text", arg, text)
self._set_property("textsrc", arg, textsrc)
self._set_property("uid", arg, uid)
self._set_property("uirevision", arg, uirevision)
self._set_property("unselected", arg, unselected)
self._set_property("visible", arg, visible)
self._set_property("z", arg, z)
self._set_property("zauto", arg, zauto)
self._set_property("zmax", arg, zmax)
self._set_property("zmid", arg, zmid)
self._set_property("zmin", arg, zmin)
self._set_property("zsrc", arg, zsrc)
self._props["type"] = "choroplethmap"
arg.pop("type", None)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Choroplethmap |
python | getsentry__sentry | fixtures/safe_migrations_apps/decimal_to_float_app/migrations/0001_initial.py | {
"start": 106,
"end": 831
} | class ____(CheckedMigration):
initial = True
dependencies = []
operations = [
migrations.CreateModel(
name="Value",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
(
"amount",
models.DecimalField(
blank=True, decimal_places=4, default=None, max_digits=12, null=True
),
),
],
options={
"abstract": False,
},
),
]
| Migration |
python | django-haystack__django-haystack | test_haystack/solr_tests/test_solr_management_commands.py | {
"start": 896,
"end": 1080
} | class ____(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, model_attr="name")
def get_model(self):
return MockTag
| SolrMockTagSearchIndex |
python | EpistasisLab__tpot | tpot/builtin_modules/nn.py | {
"start": 7131,
"end": 7626
} | class ____(nn.Module):
# pylint: disable=arguments-differ
def __init__(self, input_size, num_classes):
super(_MLP, self).__init__()
self.hidden_size = round((input_size+num_classes)/2)
self.fc1 = nn.Linear(input_size, self.hidden_size)
self.relu = nn.Tanh()
self.fc2 = nn.Linear(self.hidden_size, num_classes)
def forward(self, x):
hidden = self.fc1(x)
r1 = self.relu(hidden)
out = self.fc2(r1)
return out
| _MLP |
python | huggingface__transformers | tests/models/dpr/test_tokenization_dpr.py | {
"start": 1008,
"end": 1321
} | class ____(test_tokenization_bert.BertTokenizationTest):
tokenizer_class = DPRContextEncoderTokenizer
rust_tokenizer_class = DPRContextEncoderTokenizerFast
test_rust_tokenizer = True
from_pretrained_id = "facebook/dpr-ctx_encoder-single-nq-base"
@require_tokenizers
| DPRContextEncoderTokenizationTest |
python | airbytehq__airbyte | airbyte-integrations/connectors/destination-google-sheets/destination_google_sheets/helpers.py | {
"start": 1202,
"end": 2741
} | class ____:
"""
Performs connection test write operation to ensure the target spreadsheet is available for writing.
Initiating the class itself, performs the connection test and stores the result in ConnectionTest.result property.
"""
def __init__(self, spreadsheet: Spreadsheet):
self.spreadsheet = spreadsheet
self.wks_name: str = "_airbyte_conn_test"
self.test_data: List[str] = ["conn_test", "success"]
def add_test_wks(self) -> Worksheet:
self.spreadsheet.spreadsheet.add_worksheet(self.wks_name, rows=2, cols=1)
return self.spreadsheet.open_worksheet(self.wks_name)
def remove_test_wks(self):
wks = self.spreadsheet.open_worksheet(self.wks_name)
self.spreadsheet.spreadsheet.del_worksheet(wks)
def populate_test_wks(self, wks: Worksheet) -> Worksheet:
wks.append_table(self.test_data, dimension="COLUMNS")
return wks
def check_values(self, wks: Worksheet) -> bool:
value = wks.get_value("A2")
return True if value == self.test_data[1] else False
def perform_connection_test(self) -> bool:
try:
if self.spreadsheet.spreadsheet.worksheets("title", self.wks_name):
self.remove_test_wks()
result: bool = self.check_values(self.populate_test_wks(self.add_test_wks()))
except WorksheetNotFound:
result: bool = self.check_values(self.populate_test_wks(self.add_test_wks()))
self.remove_test_wks()
return result
| ConnectionTest |
python | getsentry__sentry | src/sentry/db/models/fields/uuid.py | {
"start": 5778,
"end": 6125
} | class ____:
def __init__(self, value):
if not isinstance(value, UUID):
raise TypeError("UUIDAdapter only understands UUID objects.")
self.value = value
def getquoted(self):
return ("'%s'" % self.value).encode("utf8")
# Register the UUID type with psycopg2.
register_adapter(UUID, UUIDAdapter)
| UUIDAdapter |
python | scrapy__scrapy | tests/mockserver/http_resources.py | {
"start": 4021,
"end": 4190
} | class ____(LeafResource):
def render_GET(self, request):
n = getarg(request, b"n", 200, type_=int)
request.setResponseCode(n)
return b""
| Status |
python | langchain-ai__langchain | libs/core/langchain_core/documents/transformers.py | {
"start": 314,
"end": 2543
} | class ____(ABC):
"""Abstract base class for document transformation.
A document transformation takes a sequence of `Document` objects and returns a
sequence of transformed `Document` objects.
Example:
```python
class EmbeddingsRedundantFilter(BaseDocumentTransformer, BaseModel):
embeddings: Embeddings
similarity_fn: Callable = cosine_similarity
similarity_threshold: float = 0.95
class Config:
arbitrary_types_allowed = True
def transform_documents(
self, documents: Sequence[Document], **kwargs: Any
) -> Sequence[Document]:
stateful_documents = get_stateful_documents(documents)
embedded_documents = _get_embeddings_from_stateful_docs(
self.embeddings, stateful_documents
)
included_idxs = _filter_similar_embeddings(
embedded_documents,
self.similarity_fn,
self.similarity_threshold,
)
return [stateful_documents[i] for i in sorted(included_idxs)]
async def atransform_documents(
self, documents: Sequence[Document], **kwargs: Any
) -> Sequence[Document]:
raise NotImplementedError
```
"""
@abstractmethod
def transform_documents(
self, documents: Sequence[Document], **kwargs: Any
) -> Sequence[Document]:
"""Transform a list of documents.
Args:
documents: A sequence of `Document` objects to be transformed.
Returns:
A sequence of transformed `Document` objects.
"""
async def atransform_documents(
self, documents: Sequence[Document], **kwargs: Any
) -> Sequence[Document]:
"""Asynchronously transform a list of documents.
Args:
documents: A sequence of `Document` objects to be transformed.
Returns:
A sequence of transformed `Document` objects.
"""
return await run_in_executor(
None, self.transform_documents, documents, **kwargs
)
| BaseDocumentTransformer |
python | readthedocs__readthedocs.org | readthedocs/projects/models.py | {
"start": 55300,
"end": 56923
} | class ____(models.Model):
"""
Imported files model.
This tracks files that are output from documentation builds, useful for
things like CDN invalidation.
"""
id = models.BigAutoField(primary_key=True)
project = models.ForeignKey(
Project,
verbose_name=_("Project"),
related_name="imported_files",
on_delete=models.CASCADE,
)
version = models.ForeignKey(
"builds.Version",
verbose_name=_("Version"),
related_name="imported_files",
null=True,
on_delete=models.CASCADE,
)
name = models.CharField(_("Name"), max_length=255)
# max_length is set to 4096 because linux has a maximum path length
# of 4096 characters for most filesystems (including EXT4).
# https://github.com/rtfd/readthedocs.org/issues/5061
path = models.CharField(_("Path"), max_length=4096)
commit = models.CharField(_("Commit"), max_length=255)
build = models.IntegerField(_("Build id"), null=True)
modified_date = models.DateTimeField(_("Modified date"), auto_now=True)
rank = models.IntegerField(
_("Page search rank"),
default=0,
validators=[MinValueValidator(-10), MaxValueValidator(10)],
)
ignore = models.BooleanField(
_("Ignore this file from operations like indexing"),
# default=False,
# TODO: remove after migration
null=True,
)
def get_absolute_url(self):
return Resolver().resolve_version(
project=self.project,
version=self.version.slug,
filename=self.path,
)
| ImportedFile |
python | celery__celery | t/unit/concurrency/test_prefork.py | {
"start": 16274,
"end": 17152
} | class ____:
def setup_method(self):
pytest.importorskip('multiprocessing')
def test_process_result(self):
x = asynpool.ResultHandler(
Mock(), Mock(), {}, Mock(),
Mock(), Mock(), Mock(), Mock(),
fileno_to_outq={},
on_process_alive=Mock(),
on_job_ready=Mock(),
)
assert x
hub = Mock(name='hub')
recv = x._recv_message = Mock(name='recv_message')
recv.return_value = iter([])
x.on_state_change = Mock()
x.register_with_event_loop(hub)
proc = x.fileno_to_outq[3] = Mock()
reader = proc.outq._reader
reader.poll.return_value = False
x.handle_event(6) # KeyError
x.handle_event(3)
x._recv_message.assert_called_with(
hub.add_reader, 3, x.on_state_change,
)
| test_ResultHandler |
python | numba__numba | numba/core/typing/npydecl.py | {
"start": 14241,
"end": 21561
} | class ____(AbstractTemplate):
"""
A template redirecting a Numpy global function (e.g. np.sum) to an
array method of the same name (e.g. ndarray.sum).
"""
# Arguments like *axis* can specialize on literals but also support
# non-literals
prefer_literal = True
def generic(self, args, kws):
pysig = None
if kws:
if self.method_name == 'sum':
if 'axis' in kws and 'dtype' not in kws:
def sum_stub(arr, axis):
pass
pysig = utils.pysignature(sum_stub)
elif 'dtype' in kws and 'axis' not in kws:
def sum_stub(arr, dtype):
pass
pysig = utils.pysignature(sum_stub)
elif 'dtype' in kws and 'axis' in kws:
def sum_stub(arr, axis, dtype):
pass
pysig = utils.pysignature(sum_stub)
elif self.method_name == 'argsort':
def argsort_stub(arr, kind='quicksort'):
pass
pysig = utils.pysignature(argsort_stub)
else:
fmt = "numba doesn't support kwarg for {}"
raise TypingError(fmt.format(self.method_name))
arr = args[0]
# This will return a BoundFunction
meth_ty = self.context.resolve_getattr(arr, self.method_name)
# Resolve arguments on the bound function
meth_sig = self.context.resolve_function_type(meth_ty, args[1:], kws)
if meth_sig is not None:
return meth_sig.as_function().replace(pysig=pysig)
# Function to glue attributes onto the numpy-esque object
def _numpy_redirect(fname):
numpy_function = getattr(np, fname)
cls = type("Numpy_redirect_{0}".format(fname), (Numpy_method_redirection,),
dict(key=numpy_function, method_name=fname))
infer_global(numpy_function, types.Function(cls))
for func in ['sum', 'argsort', 'nonzero', 'ravel']:
_numpy_redirect(func)
# -----------------------------------------------------------------------------
# Numpy scalar constructors
if config.USE_LEGACY_TYPE_SYSTEM:
# Register np.int8, etc. as converters to the equivalent Numba types
np_types = set(getattr(np, str(nb_type)) for nb_type in types.number_domain)
np_types.add(np.bool_)
# Those may or may not be aliases (depending on the Numpy build / version)
np_types.add(np.intc)
np_types.add(np.intp)
np_types.add(np.uintc)
np_types.add(np.uintp)
def register_number_classes(register_global):
for np_type in np_types:
nb_type = getattr(types, np_type.__name__)
register_global(np_type, types.NumberClass(nb_type))
else:
# Register np.int8, etc. as converters to the equivalent Numba types
np_types = set(getattr(np, str(nb_type).split('np_')[-1]) for nb_type in types.np_number_domain)
np_types.add(np.bool_)
# Those may or may not be aliases (depending on the Numpy build / version)
np_types.add(np.intc)
np_types.add(np.intp)
np_types.add(np.uintc)
np_types.add(np.uintp)
def register_number_classes(register_global):
for np_type in np_types:
nb_type = getattr(types, f'np_{np_type.__name__}')
register_global(np_type, types.NumberClass(nb_type))
register_number_classes(infer_global)
# -----------------------------------------------------------------------------
# Numpy array constructors
def parse_shape(shape):
"""
Given a shape, return the number of dimensions.
"""
ndim = None
if isinstance(shape, types.Integer):
ndim = 1
elif isinstance(shape, (types.Tuple, types.UniTuple)):
int_tys = (types.Integer, types.IntEnumMember)
if all(isinstance(s, int_tys) for s in shape):
ndim = len(shape)
return ndim
def parse_dtype(dtype):
"""
Return the dtype of a type, if it is either a DtypeSpec (used for most
dtypes) or a TypeRef (used for record types).
"""
if isinstance(dtype, types.DTypeSpec):
return dtype.dtype
elif isinstance(dtype, types.TypeRef):
return dtype.instance_type
elif isinstance(dtype, types.StringLiteral):
dtstr = dtype.literal_value
try:
dt = np.dtype(dtstr)
except TypeError:
msg = f"Invalid NumPy dtype specified: '{dtstr}'"
raise TypingError(msg)
return from_dtype(dt)
def _parse_nested_sequence(context, typ):
"""
Parse a (possibly 0d) nested sequence type.
A (ndim, dtype) tuple is returned. Note the sequence may still be
heterogeneous, as long as it converts to the given dtype.
"""
if isinstance(typ, (types.Buffer,)):
raise TypingError("%s not allowed in a homogeneous sequence" % typ)
elif isinstance(typ, (types.Sequence,)):
n, dtype = _parse_nested_sequence(context, typ.dtype)
return n + 1, dtype
elif isinstance(typ, (types.BaseTuple,)):
if typ.count == 0:
# Mimic Numpy's behaviour
return 1, types.float64
n, dtype = _parse_nested_sequence(context, typ[0])
dtypes = [dtype]
for i in range(1, typ.count):
_n, dtype = _parse_nested_sequence(context, typ[i])
if _n != n:
raise TypingError("type %s does not have a regular shape"
% (typ,))
dtypes.append(dtype)
dtype = context.unify_types(*dtypes)
if dtype is None:
raise TypingError("cannot convert %s to a homogeneous type" % typ)
return n + 1, dtype
else:
# Scalar type => check it's valid as a Numpy array dtype
as_dtype(typ)
return 0, typ
def _infer_dtype_from_inputs(inputs):
return dtype
def _homogeneous_dims(context, func_name, arrays):
ndim = arrays[0].ndim
for a in arrays:
if a.ndim != ndim:
msg = (f"{func_name}(): all the input arrays must have same number "
"of dimensions")
raise NumbaTypeError(msg)
return ndim
def _sequence_of_arrays(context, func_name, arrays,
dim_chooser=_homogeneous_dims):
if (not isinstance(arrays, types.BaseTuple)
or not len(arrays)
or not all(isinstance(a, types.Array) for a in arrays)):
raise TypingError("%s(): expecting a non-empty tuple of arrays, "
"got %s" % (func_name, arrays))
ndim = dim_chooser(context, func_name, arrays)
dtype = context.unify_types(*(a.dtype for a in arrays))
if dtype is None:
raise TypingError("%s(): input arrays must have "
"compatible dtypes" % func_name)
return dtype, ndim
def _choose_concatenation_layout(arrays):
# Only create a F array if all input arrays have F layout.
# This is a simplified version of Numpy's behaviour,
# while Numpy's actually processes the input strides to
# decide on optimal output strides
# (see PyArray_CreateMultiSortedStridePerm()).
return 'F' if all(a.layout == 'F' for a in arrays) else 'C'
# -----------------------------------------------------------------------------
# Linear algebra
| Numpy_method_redirection |
python | kamyu104__LeetCode-Solutions | Python/search-suggestions-system.py | {
"start": 1508,
"end": 1937
} | class ____(object):
def __init__(self):
self.__TOP_COUNT = 3
self.leaves = collections.defaultdict(TrieNode2)
self.infos = []
def insert(self, words, i):
curr = self
for c in words[i]:
curr = curr.leaves[c]
curr.add_info(i)
def add_info(self, i):
if len(self.infos) == self.__TOP_COUNT:
return
self.infos.append(i)
| TrieNode2 |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 544982,
"end": 545508
} | class ____(sgqlc.types.Type):
"""Autogenerated return type of CreateTeamDiscussionComment"""
__schema__ = github_schema
__field_names__ = ("client_mutation_id", "team_discussion_comment")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
team_discussion_comment = sgqlc.types.Field("TeamDiscussionComment", graphql_name="teamDiscussionComment")
"""The new comment."""
| CreateTeamDiscussionCommentPayload |
python | walkccc__LeetCode | solutions/1210. Minimum Moves to Reach Target with Rotations/1210.py | {
"start": 27,
"end": 81
} | class ____(IntEnum):
HORIZONTAL = 0
VERTICAL = 1
| Pos |
python | kamyu104__LeetCode-Solutions | Python/count-asterisks.py | {
"start": 38,
"end": 375
} | class ____(object):
def countAsterisks(self, s):
"""
:type s: str
:rtype: int
"""
result = cnt = 0
for c in s:
if c == '|':
cnt = (cnt+1)%2
continue
if c == '*' and cnt == 0:
result += 1
return result
| Solution |
python | spack__spack | lib/spack/spack/build_environment.py | {
"start": 63186,
"end": 65197
} | class ____:
"""The function :meth:`spack.package_base.PackageBase.setup_dependent_package` receives
an instance of this class for the ``module`` argument. It's used to set global variables in the
module of a package, and propagate those globals to the modules of all classes in the
inheritance hierarchy of the package. It's reminiscent of
:class:`spack.util.environment.EnvironmentModifications`, but sets Python variables instead
of environment variables. This class should typically not be instantiated in packages directly.
"""
_PROTECTED_NAMES = ("package", "current_module", "modules_in_mro", "_set_attributes")
def __init__(self, package: spack.package_base.PackageBase) -> None:
self._set_self_attributes("package", package)
self._set_self_attributes("current_module", package.module)
#: Modules for the classes in the MRO up to PackageBase
modules_in_mro = []
for cls in package.__class__.__mro__:
module = getattr(cls, "module", None)
if module is None or module is spack.package_base:
break
if module is self.current_module:
continue
modules_in_mro.append(module)
self._set_self_attributes("modules_in_mro", modules_in_mro)
self._set_self_attributes("_set_attributes", {})
def _set_self_attributes(self, key, value):
super().__setattr__(key, value)
def __getattr__(self, item):
return getattr(self.current_module, item)
def __setattr__(self, key, value):
if key in ModuleChangePropagator._PROTECTED_NAMES:
msg = f'Cannot set attribute "{key}" in ModuleMonkeyPatcher'
return AttributeError(msg)
setattr(self.current_module, key, value)
self._set_attributes[key] = value
def propagate_changes_to_mro(self):
for module_in_mro in self.modules_in_mro:
module_in_mro.__dict__.update(self._set_attributes)
| ModuleChangePropagator |
python | getsentry__sentry | src/sentry/users/api/serializers/authenticator.py | {
"start": 1362,
"end": 2698
} | class ____(Serializer):
def serialize(
self,
obj: AuthenticatorInterface,
attrs: Mapping[str, Any],
user: User | RpcUser | AnonymousUser,
**kwargs: Any,
) -> AuthenticatorInterfaceSerializerResponse:
data: AuthenticatorInterfaceSerializerResponse = {
"id": str(obj.interface_id),
"name": obj.name,
"description": obj.description,
"rotationWarning": obj.rotation_warning,
"enrollButton": obj.enroll_button,
"configureButton": obj.configure_button,
"removeButton": obj.remove_button,
"isBackupInterface": obj.is_backup_interface,
"isEnrolled": obj.is_enrolled(),
"disallowNewEnrollment": obj.disallow_new_enrollment,
"status": str(obj.status.value),
"canValidateOtp": obj.can_validate_otp,
"allowMultiEnrollment": obj.allow_multi_enrollment,
"allowRotationInPlace": obj.allow_rotation_in_place,
}
# authenticator is enrolled
if obj.authenticator is not None:
data["authId"] = str(obj.authenticator.id)
data["createdAt"] = obj.authenticator.created_at
data["lastUsedAt"] = obj.authenticator.last_used_at
return data
| AuthenticatorInterfaceSerializer |
python | google__python-fire | fire/test_components.py | {
"start": 1857,
"end": 1975
} | class ____:
def double(self, count):
return 2 * count
def triple(self, count):
return 3 * count
| NoDefaults |
python | pypa__warehouse | tests/unit/metrics/test_services.py | {
"start": 281,
"end": 1530
} | class ____:
def test_verify_service(self):
assert verifyClass(IMetricsService, NullMetrics)
def test_create_service(self):
assert isinstance(
NullMetrics.create_service(pretend.stub(), pretend.stub()), NullMetrics
)
@pytest.mark.parametrize(
"method",
[
"gauge",
"increment",
"decrement",
"histogram",
"distribution",
"timing",
"set",
],
)
def test_noop(self, method):
metrics = NullMetrics()
getattr(metrics, method)("my metric", pretend.stub())
def test_timed(self):
metrics = NullMetrics()
@metrics.timed("my metric")
@pretend.call_recorder
def fn(inp):
return inp
result = pretend.stub()
assert fn(result) is result
assert fn.calls == [pretend.call(result)]
with metrics.timed("my metric"):
pass
def test_event(self):
metrics = NullMetrics()
metrics.event(pretend.stub(), pretend.stub(), pretend.stub())
def test_service_check(self):
metrics = NullMetrics()
metrics.service_check(pretend.stub(), pretend.stub())
| TestNullMetrics |
python | getsentry__sentry | fixtures/safe_migrations_apps/bad_flow_add_column_with_notnull_default_app/migrations/0002_add_field_notnull_default.py | {
"start": 153,
"end": 468
} | class ____(CheckedMigration):
dependencies = [
("bad_flow_add_column_with_notnull_default_app", "0001_initial"),
]
operations = [
migrations.AddField(
model_name="testtable",
name="field",
field=models.IntegerField(default=0),
),
]
| Migration |
python | coleifer__peewee | tests/regressions.py | {
"start": 17156,
"end": 18644
} | class ____(TestModel):
version = IntegerField(default=1, index=True)
def save_optimistic(self):
if not self.id:
# This is a new record, so the default logic is to perform an
# INSERT. Ideally your model would also have a unique
# constraint that made it impossible for two INSERTs to happen
# at the same time.
return self.save()
# Update any data that has changed and bump the version counter.
field_data = dict(self.__data__)
current_version = field_data.pop('version', 1)
self._populate_unsaved_relations(field_data)
field_data = self._prune_fields(field_data, self.dirty_fields)
if not field_data:
raise ValueError('No changes have been made.')
ModelClass = type(self)
field_data['version'] = ModelClass.version + 1 # Atomic increment.
query = ModelClass.update(**field_data).where(
(ModelClass.version == current_version) &
(ModelClass.id == self.id))
if query.execute() == 0:
# No rows were updated, indicating another process has saved
# a new version. How you handle this situation is up to you,
# but for simplicity I'm just raising an exception.
raise ConflictDetectedException()
else:
# Increment local version to match what is now in the db.
self.version += 1
return True
| BaseVersionedModel |
python | django__django | django/contrib/gis/db/backends/mysql/base.py | {
"start": 251,
"end": 498
} | class ____(MySQLDatabaseWrapper):
SchemaEditorClass = MySQLGISSchemaEditor
# Classes instantiated in __init__().
features_class = DatabaseFeatures
introspection_class = MySQLIntrospection
ops_class = MySQLOperations
| DatabaseWrapper |
python | vyperlang__vyper | vyper/builtins/_signatures.py | {
"start": 2759,
"end": 6095
} | class ____(VyperType):
typeclass = "builtin_function"
_has_varargs = False
_inputs: list[tuple[str, Any]] = []
_kwargs: dict[str, KwargSettings] = {}
_modifiability: Modifiability = Modifiability.MODIFIABLE
_return_type: Optional[VyperType] = None
_equality_attrs = ("_id",)
_is_terminus = False
mutability: StateMutability = StateMutability.PURE
@property
def modifiability(self):
return self._modifiability
# helper function to deal with TYPE_Ts
def _validate_single(self, arg: vy_ast.VyperNode, expected_type: VyperType) -> None:
if TYPE_T.any().compare_type(expected_type):
# try to parse the type - call type_from_annotation
# for its side effects (will throw if is not a type)
type_from_annotation(arg)
else:
validate_expected_type(arg, expected_type)
def _validate_arg_types(self, node: vy_ast.Call) -> None:
num_args = len(self._inputs) # the number of args the signature indicates
expect_num_args: Any = num_args
if self._has_varargs:
# note special meaning for -1 in validate_call_args API
expect_num_args = (num_args, -1)
validate_call_args(node, expect_num_args, list(self._kwargs.keys()))
for arg, (_, expected) in zip(node.args, self._inputs):
self._validate_single(arg, expected)
for kwarg in node.keywords:
kwarg_settings = self._kwargs[kwarg.arg]
if kwarg_settings.require_literal and not check_modifiability(
kwarg.value, Modifiability.CONSTANT
):
raise TypeMismatch("Value must be literal", kwarg.value)
self._validate_single(kwarg.value, kwarg_settings.typ)
# typecheck varargs. we don't have type info from the signature,
# so ensure that the types of the args can be inferred exactly.
varargs = node.args[num_args:]
if len(varargs) > 0:
assert self._has_varargs # double check validate_call_args
for arg in varargs:
# call get_exact_type_from_node for its side effects -
# ensures the type can be inferred exactly.
get_exact_type_from_node(arg)
def check_modifiability_for_call(self, node: vy_ast.Call, modifiability: Modifiability) -> bool:
return self._modifiability <= modifiability
def fetch_call_return(self, node: vy_ast.Call) -> Optional[VyperType]:
self._validate_arg_types(node)
return self._return_type
def infer_arg_types(self, node: vy_ast.Call, expected_return_typ=None) -> list[VyperType]:
self._validate_arg_types(node)
ret = [expected for (_, expected) in self._inputs]
# handle varargs.
n_known_args = len(self._inputs)
varargs = node.args[n_known_args:]
if len(varargs) > 0:
assert self._has_varargs
ret.extend(get_exact_type_from_node(arg) for arg in varargs)
return ret
def infer_kwarg_types(self, node: vy_ast.Call) -> dict[str, VyperType]:
return {i.arg: self._kwargs[i.arg].typ for i in node.keywords}
def __repr__(self):
return f"(builtin) {self._id}"
def _try_fold(self, node):
raise UnfoldableNode(f"not foldable: {self}", node)
| BuiltinFunctionT |
python | kamyu104__LeetCode-Solutions | Python/transpose-matrix.py | {
"start": 406,
"end": 572
} | class ____(object):
def transpose(self, A):
"""
:type A: List[List[int]]
:rtype: List[List[int]]
"""
return zip(*A)
| Solution2 |
python | ionelmc__pytest-benchmark | tests/test_sample.py | {
"start": 266,
"end": 509
} | class ____:
def __init__(self, factory):
self.factory = factory
self.object = empty
def __str__(self):
if self.object is empty:
self.object = self.factory()
return str(self.object)
| SimpleProxy |
python | networkx__networkx | networkx/algorithms/bipartite/tests/test_project.py | {
"start": 6282,
"end": 15294
} | class ____:
@classmethod
def setup_class(cls):
# Tore Opsahl's example
# http://toreopsahl.com/2009/05/01/projecting-two-mode-networks-onto-weighted-one-mode-networks/
cls.G = nx.Graph()
cls.G.add_edge("A", 1)
cls.G.add_edge("A", 2)
cls.G.add_edge("B", 1)
cls.G.add_edge("B", 2)
cls.G.add_edge("B", 3)
cls.G.add_edge("B", 4)
cls.G.add_edge("B", 5)
cls.G.add_edge("C", 1)
cls.G.add_edge("D", 3)
cls.G.add_edge("E", 4)
cls.G.add_edge("E", 5)
cls.G.add_edge("E", 6)
cls.G.add_edge("F", 6)
# Graph based on figure 6 from Newman (2001)
cls.N = nx.Graph()
cls.N.add_edge("A", 1)
cls.N.add_edge("A", 2)
cls.N.add_edge("A", 3)
cls.N.add_edge("B", 1)
cls.N.add_edge("B", 2)
cls.N.add_edge("B", 3)
cls.N.add_edge("C", 1)
cls.N.add_edge("D", 1)
cls.N.add_edge("E", 3)
def test_project_weighted_shared(self):
edges = [
("A", "B", 2),
("A", "C", 1),
("B", "C", 1),
("B", "D", 1),
("B", "E", 2),
("E", "F", 1),
]
Panswer = nx.Graph()
Panswer.add_weighted_edges_from(edges)
P = bipartite.weighted_projected_graph(self.G, "ABCDEF")
assert edges_equal(list(P.edges()), Panswer.edges())
for u, v in list(P.edges()):
assert P[u][v]["weight"] == Panswer[u][v]["weight"]
edges = [
("A", "B", 3),
("A", "E", 1),
("A", "C", 1),
("A", "D", 1),
("B", "E", 1),
("B", "C", 1),
("B", "D", 1),
("C", "D", 1),
]
Panswer = nx.Graph()
Panswer.add_weighted_edges_from(edges)
P = bipartite.weighted_projected_graph(self.N, "ABCDE")
assert edges_equal(list(P.edges()), Panswer.edges())
for u, v in list(P.edges()):
assert P[u][v]["weight"] == Panswer[u][v]["weight"]
def test_project_weighted_newman(self):
edges = [
("A", "B", 1.5),
("A", "C", 0.5),
("B", "C", 0.5),
("B", "D", 1),
("B", "E", 2),
("E", "F", 1),
]
Panswer = nx.Graph()
Panswer.add_weighted_edges_from(edges)
P = bipartite.collaboration_weighted_projected_graph(self.G, "ABCDEF")
assert edges_equal(list(P.edges()), Panswer.edges())
for u, v in list(P.edges()):
assert P[u][v]["weight"] == Panswer[u][v]["weight"]
edges = [
("A", "B", 11 / 6.0),
("A", "E", 1 / 2.0),
("A", "C", 1 / 3.0),
("A", "D", 1 / 3.0),
("B", "E", 1 / 2.0),
("B", "C", 1 / 3.0),
("B", "D", 1 / 3.0),
("C", "D", 1 / 3.0),
]
Panswer = nx.Graph()
Panswer.add_weighted_edges_from(edges)
P = bipartite.collaboration_weighted_projected_graph(self.N, "ABCDE")
assert edges_equal(list(P.edges()), Panswer.edges())
for u, v in list(P.edges()):
assert P[u][v]["weight"] == Panswer[u][v]["weight"]
def test_project_weighted_ratio(self):
edges = [
("A", "B", 2 / 6.0),
("A", "C", 1 / 6.0),
("B", "C", 1 / 6.0),
("B", "D", 1 / 6.0),
("B", "E", 2 / 6.0),
("E", "F", 1 / 6.0),
]
Panswer = nx.Graph()
Panswer.add_weighted_edges_from(edges)
P = bipartite.weighted_projected_graph(self.G, "ABCDEF", ratio=True)
assert edges_equal(list(P.edges()), Panswer.edges())
for u, v in list(P.edges()):
assert P[u][v]["weight"] == Panswer[u][v]["weight"]
edges = [
("A", "B", 3 / 3.0),
("A", "E", 1 / 3.0),
("A", "C", 1 / 3.0),
("A", "D", 1 / 3.0),
("B", "E", 1 / 3.0),
("B", "C", 1 / 3.0),
("B", "D", 1 / 3.0),
("C", "D", 1 / 3.0),
]
Panswer = nx.Graph()
Panswer.add_weighted_edges_from(edges)
P = bipartite.weighted_projected_graph(self.N, "ABCDE", ratio=True)
assert edges_equal(list(P.edges()), Panswer.edges())
for u, v in list(P.edges()):
assert P[u][v]["weight"] == Panswer[u][v]["weight"]
def test_project_weighted_overlap(self):
edges = [
("A", "B", 2 / 2.0),
("A", "C", 1 / 1.0),
("B", "C", 1 / 1.0),
("B", "D", 1 / 1.0),
("B", "E", 2 / 3.0),
("E", "F", 1 / 1.0),
]
Panswer = nx.Graph()
Panswer.add_weighted_edges_from(edges)
P = bipartite.overlap_weighted_projected_graph(self.G, "ABCDEF", jaccard=False)
assert edges_equal(list(P.edges()), Panswer.edges())
for u, v in list(P.edges()):
assert P[u][v]["weight"] == Panswer[u][v]["weight"]
edges = [
("A", "B", 3 / 3.0),
("A", "E", 1 / 1.0),
("A", "C", 1 / 1.0),
("A", "D", 1 / 1.0),
("B", "E", 1 / 1.0),
("B", "C", 1 / 1.0),
("B", "D", 1 / 1.0),
("C", "D", 1 / 1.0),
]
Panswer = nx.Graph()
Panswer.add_weighted_edges_from(edges)
P = bipartite.overlap_weighted_projected_graph(self.N, "ABCDE", jaccard=False)
assert edges_equal(list(P.edges()), Panswer.edges())
for u, v in list(P.edges()):
assert P[u][v]["weight"] == Panswer[u][v]["weight"]
def test_project_weighted_jaccard(self):
edges = [
("A", "B", 2 / 5.0),
("A", "C", 1 / 2.0),
("B", "C", 1 / 5.0),
("B", "D", 1 / 5.0),
("B", "E", 2 / 6.0),
("E", "F", 1 / 3.0),
]
Panswer = nx.Graph()
Panswer.add_weighted_edges_from(edges)
P = bipartite.overlap_weighted_projected_graph(self.G, "ABCDEF")
assert edges_equal(list(P.edges()), Panswer.edges())
for u, v in list(P.edges()):
assert P[u][v]["weight"] == Panswer[u][v]["weight"]
edges = [
("A", "B", 3 / 3.0),
("A", "E", 1 / 3.0),
("A", "C", 1 / 3.0),
("A", "D", 1 / 3.0),
("B", "E", 1 / 3.0),
("B", "C", 1 / 3.0),
("B", "D", 1 / 3.0),
("C", "D", 1 / 1.0),
]
Panswer = nx.Graph()
Panswer.add_weighted_edges_from(edges)
P = bipartite.overlap_weighted_projected_graph(self.N, "ABCDE")
assert edges_equal(list(P.edges()), Panswer.edges())
for u, v in P.edges():
assert P[u][v]["weight"] == Panswer[u][v]["weight"]
def test_generic_weighted_projected_graph_simple(self):
def shared(G, u, v):
return len(set(G[u]) & set(G[v]))
B = nx.path_graph(5)
G = bipartite.generic_weighted_projected_graph(
B, [0, 2, 4], weight_function=shared
)
assert nodes_equal(list(G), [0, 2, 4])
assert edges_equal(
list(G.edges(data=True)),
[(0, 2, {"weight": 1}), (2, 4, {"weight": 1})],
)
G = bipartite.generic_weighted_projected_graph(B, [0, 2, 4])
assert nodes_equal(list(G), [0, 2, 4])
assert edges_equal(
list(G.edges(data=True)),
[(0, 2, {"weight": 1}), (2, 4, {"weight": 1})],
)
B = nx.DiGraph()
nx.add_path(B, range(5))
G = bipartite.generic_weighted_projected_graph(B, [0, 2, 4])
assert nodes_equal(list(G), [0, 2, 4])
assert edges_equal(
list(G.edges(data=True)),
[(0, 2, {"weight": 1}), (2, 4, {"weight": 1})],
directed=True,
)
def test_generic_weighted_projected_graph_custom(self):
def jaccard(G, u, v):
unbrs = set(G[u])
vnbrs = set(G[v])
return len(unbrs & vnbrs) / len(unbrs | vnbrs)
def my_weight(G, u, v, weight="weight"):
w = 0
for nbr in set(G[u]) & set(G[v]):
w += G.edges[u, nbr].get(weight, 1) + G.edges[v, nbr].get(weight, 1)
return w
B = nx.bipartite.complete_bipartite_graph(2, 2)
for i, (u, v) in enumerate(B.edges()):
B.edges[u, v]["weight"] = i + 1
G = bipartite.generic_weighted_projected_graph(
B, [0, 1], weight_function=jaccard
)
assert edges_equal(list(G.edges(data=True)), [(0, 1, {"weight": 1.0})])
G = bipartite.generic_weighted_projected_graph(
B, [0, 1], weight_function=my_weight
)
assert edges_equal(list(G.edges(data=True)), [(0, 1, {"weight": 10})])
G = bipartite.generic_weighted_projected_graph(B, [0, 1])
assert edges_equal(list(G.edges(data=True)), [(0, 1, {"weight": 2})])
| TestBipartiteWeightedProjection |
python | tiangolo__fastapi | docs_src/extra_models/tutorial001.py | {
"start": 236,
"end": 341
} | class ____(BaseModel):
username: str
email: EmailStr
full_name: Union[str, None] = None
| UserOut |
python | ray-project__ray | python/ray/autoscaler/_private/fake_multi_node/test_utils.py | {
"start": 643,
"end": 11692
} | class ____:
"""Docker cluster wrapper.
Creates a directory for starting a fake multinode docker cluster.
Includes APIs to update the cluster config as needed in tests,
and to start and connect to the cluster.
"""
def __init__(self, config: Optional[Dict[str, Any]] = None):
self._base_config_file = os.path.join(
os.path.dirname(__file__), "example_docker.yaml"
)
self._tempdir = None
self._config_file = None
self._nodes_file = None
self._nodes = {}
self._status_file = None
self._status = {}
self._partial_config = config
self._cluster_config = None
self._docker_image = None
self._monitor_script = os.path.join(
os.path.dirname(__file__), "docker_monitor.py"
)
self._monitor_process = None
self._execution_thread = None
self._execution_event = threading.Event()
self._execution_queue = None
@property
def config_file(self):
return self._config_file
@property
def cluster_config(self):
return self._cluster_config
@property
def cluster_dir(self):
return self._tempdir
@property
def gcs_port(self):
return self._cluster_config.get("provider", {}).get(
"host_gcs_port", FAKE_DOCKER_DEFAULT_GCS_PORT
)
@property
def client_port(self):
return self._cluster_config.get("provider", {}).get(
"host_client_port", FAKE_DOCKER_DEFAULT_CLIENT_PORT
)
def connect(self, client: bool = True, timeout: int = 120, **init_kwargs):
"""Connect to the docker-compose Ray cluster.
Assumes the cluster is at RAY_TESTHOST (defaults to
``127.0.0.1``).
Args:
client: If True, uses Ray client to connect to the
cluster. If False, uses GCS to connect to the cluster.
timeout: Connection timeout in seconds.
**init_kwargs: kwargs to pass to ``ray.init()``.
"""
host = os.environ.get("RAY_TESTHOST", "127.0.0.1")
if client:
port = self.client_port
address = f"ray://{build_address(host, port)}"
else:
port = self.gcs_port
address = build_address(host, port)
timeout_at = time.monotonic() + timeout
while time.monotonic() < timeout_at:
try:
ray.init(address, **init_kwargs)
self.wait_for_resources({"CPU": 1})
except ResourcesNotReadyError:
time.sleep(1)
continue
else:
break
try:
ray.cluster_resources()
except Exception as e:
raise RuntimeError(f"Timed out connecting to Ray: {e}")
def remote_execution_api(self) -> "RemoteAPI":
"""Create an object to control cluster state from within the cluster."""
self._execution_queue = Queue(actor_options={"num_cpus": 0})
stop_event = self._execution_event
def entrypoint():
while not stop_event.is_set():
try:
cmd, kwargs = self._execution_queue.get(timeout=1)
except Empty:
continue
if cmd == "kill_node":
self.kill_node(**kwargs)
self._execution_thread = threading.Thread(target=entrypoint)
self._execution_thread.start()
return RemoteAPI(self._execution_queue)
@staticmethod
def wait_for_resources(resources: Dict[str, float], timeout: int = 60):
"""Wait until Ray cluster resources are available
Args:
resources: Minimum resources needed before
this function returns.
timeout: Timeout in seconds.
"""
timeout = time.monotonic() + timeout
available = ray.cluster_resources()
while any(available.get(k, 0.0) < v for k, v in resources.items()):
if time.monotonic() > timeout:
raise ResourcesNotReadyError(
f"Timed out waiting for resources: {resources}"
)
time.sleep(1)
available = ray.cluster_resources()
def update_config(self, config: Optional[Dict[str, Any]] = None):
"""Update autoscaling config.
Does a deep update of the base config with a new configuration.
This can change autoscaling behavior.
Args:
config: Partial config to update current
config with.
"""
assert self._tempdir, "Call setup() first"
config = config or {}
if config:
self._partial_config = config
if not config.get("provider", {}).get("image"):
# No image specified, trying to parse from buildkite
docker_image = os.environ.get("RAY_DOCKER_IMAGE", None)
if not docker_image:
# If still no docker image, use one according to Python version
mj = sys.version_info.major
mi = sys.version_info.minor
docker_image = DEFAULT_DOCKER_IMAGE.format(major=mj, minor=mi)
self._docker_image = docker_image
with open(self._base_config_file, "rt") as f:
cluster_config = yaml.safe_load(f)
if self._partial_config:
deep_update(cluster_config, self._partial_config, new_keys_allowed=True)
if self._docker_image:
cluster_config["provider"]["image"] = self._docker_image
cluster_config["provider"]["shared_volume_dir"] = self._tempdir
self._cluster_config = cluster_config
with open(self._config_file, "wt") as f:
yaml.safe_dump(self._cluster_config, f)
logging.info(f"Updated cluster config to: {self._cluster_config}")
def maybe_pull_image(self):
if self._docker_image:
try:
images_str = subprocess.check_output(
f"docker image inspect {self._docker_image}", shell=True
)
images = json.loads(images_str)
except Exception as e:
logger.error(f"Error inspecting image {self._docker_image}: {e}")
return
if not images:
try:
subprocess.check_call(
f"docker pull {self._docker_image}", shell=True
)
except Exception as e:
logger.error(f"Error pulling image {self._docker_image}: {e}")
def setup(self):
"""Setup docker compose cluster environment.
Creates the temporary directory, writes the initial config file,
and pulls the docker image, if required.
"""
self._tempdir = tempfile.mkdtemp(dir=os.environ.get("RAY_TEMPDIR", None))
os.chmod(self._tempdir, 0o777)
self._config_file = os.path.join(self._tempdir, "cluster.yaml")
self._nodes_file = os.path.join(self._tempdir, "nodes.json")
self._status_file = os.path.join(self._tempdir, "status.json")
self.update_config()
self.maybe_pull_image()
def teardown(self, keep_dir: bool = False):
"""Tear down docker compose cluster environment.
Args:
keep_dir: If True, cluster directory
will not be removed after termination.
"""
if not keep_dir:
shutil.rmtree(self._tempdir)
self._tempdir = None
self._config_file = None
def _start_monitor(self):
self._monitor_process = subprocess.Popen(
[sys.executable, self._monitor_script, self.config_file]
)
time.sleep(2)
def _stop_monitor(self):
if self._monitor_process:
self._monitor_process.wait(timeout=30)
if self._monitor_process.poll() is None:
self._monitor_process.terminate()
self._monitor_process = None
def start(self):
"""Start docker compose cluster.
Starts the monitor process and runs ``ray up``.
"""
self._start_monitor()
subprocess.check_call(
f"RAY_FAKE_CLUSTER=1 ray up -y {self.config_file}", shell=True
)
def stop(self):
"""Stop docker compose cluster.
Runs ``ray down`` and stops the monitor process.
"""
if ray.is_initialized:
ray.shutdown()
subprocess.check_call(
f"RAY_FAKE_CLUSTER=1 ray down -y {self.config_file}", shell=True
)
self._stop_monitor()
self._execution_event.set()
def _update_nodes(self):
with open(self._nodes_file, "rt") as f:
self._nodes = json.load(f)
def _update_status(self):
with open(self._status_file, "rt") as f:
self._status = json.load(f)
def _get_node(
self,
node_id: Optional[str] = None,
num: Optional[int] = None,
rand: Optional[str] = None,
) -> str:
self._update_nodes()
if node_id:
assert (
not num and not rand
), "Only provide either `node_id`, `num`, or `random`."
elif num:
assert (
not node_id and not rand
), "Only provide either `node_id`, `num`, or `random`."
base = "fffffffffffffffffffffffffffffffffffffffffffffffffff"
node_id = base + str(num).zfill(5)
elif rand:
assert (
not node_id and not num
), "Only provide either `node_id`, `num`, or `random`."
assert rand in [
"worker",
"any",
], "`random` must be one of ['worker', 'any']"
choices = list(self._nodes.keys())
if rand == "worker":
choices.remove(
"fffffffffffffffffffffffffffffffffffffffffffffffffff00000"
)
# Else: any
node_id = random.choice(choices)
assert node_id in self._nodes, f"Node with ID {node_id} is not in active nodes."
return node_id
def _get_docker_container(self, node_id: str) -> Optional[str]:
self._update_status()
node_status = self._status.get(node_id)
if not node_status:
return None
return node_status["Name"]
def kill_node(
self,
node_id: Optional[str] = None,
num: Optional[int] = None,
rand: Optional[str] = None,
):
"""Kill node.
If ``node_id`` is given, kill that node.
If ``num`` is given, construct node_id from this number, and kill
that node.
If ``rand`` is given (as either ``worker`` or ``any``), kill a random
node.
"""
node_id = self._get_node(node_id=node_id, num=num, rand=rand)
container = self._get_docker_container(node_id=node_id)
subprocess.check_call(f"docker kill {container}", shell=True)
| DockerCluster |
python | tensorflow__tensorflow | tensorflow/python/distribute/distribute_lib_test.py | {
"start": 17407,
"end": 21988
} | class ____(test.TestCase, parameterized.TestCase):
def testMergeCall(self):
_assert_in_default_state(self)
def merge_fn(dist, s):
self.assertIs(distribute_lib._get_default_strategy(), dist)
self.assertIs(None, distribute_lib.get_replica_context())
self.assertIs(dist, distribute_lib.get_cross_replica_context())
self.assertTrue(distribute_lib.in_cross_replica_context())
self.assertIs(dist, distribute_lib.get_strategy())
self.assertFalse(distribute_lib.has_strategy())
return "foo_" + s
replica_ctx = distribute_lib.get_replica_context()
self.assertIs(distribute_lib._get_default_replica_context(), replica_ctx)
self.assertEqual("foo_bar", replica_ctx.merge_call(merge_fn, args=("bar",)))
_assert_in_default_state(self)
def testMergeCallAutoGraph(self):
_assert_in_default_state(self)
def merge_fn(_, s):
self.assertTrue(converter_testing.is_inside_generated_code())
return s
@def_function.function # AutoGraph is default-on only within tf.function
def test_fn():
replica_ctx = distribute_lib.get_replica_context()
replica_ctx.merge_call(merge_fn, args=("bar",))
test_fn()
def testScopeMostlyNoOp(self):
_assert_in_default_state(self)
test_strategy = _TestStrategy2()
with test_strategy.scope():
variable_v1.VariableV1(1.0, name="before")
default_strategy = distribute_lib._get_default_strategy()
scope = default_strategy.scope()
with scope:
_assert_in_default_state(self)
with test_strategy.scope():
with self.assertRaisesRegex(
RuntimeError, "Mixing different tf.distribute.Strategy objects"):
variable_v1.VariableV1(1.0, name="error")
with scope:
_assert_in_default_state(self)
with test_strategy.scope():
with self.assertRaisesRegex(
RuntimeError, "Mixing different tf.distribute.Strategy objects"):
variable_v1.VariableV1(1.0, name="also_error")
_assert_in_default_state(self)
_assert_in_default_state(self)
with test_strategy.scope():
variable_v1.VariableV1(1.0, name="after")
def testExperimentalRunV2(self):
default_strategy = distribute_lib._get_default_strategy()
dataset = dataset_ops.Dataset.range(10).batch(2)
iterator = default_strategy.extended._make_dataset_iterator(dataset)
next_val = iterator.get_next()
def train_step(input_data):
return input_data
for _ in range(2):
default_strategy.run(train_step, args=(next_val,))
@combinations.generate(combinations.combine(mode=["graph", "eager"]))
def testDistributedDatasets(self):
default_strategy = distribute_lib._get_default_strategy()
if context.executing_eagerly():
dataset_fn = lambda _: dataset_ops.DatasetV2.range(10).batch(2)
dist_dataset = default_strategy.experimental_distribute_dataset(
dataset_fn(distribute_lib.InputContext()))
next_val = next(iter(dist_dataset))
else:
dataset_fn = lambda _: dataset_ops.DatasetV1.range(10).batch(2)
dist_dataset = default_strategy.experimental_distribute_dataset(
dataset_fn(distribute_lib.InputContext()))
iterator = dist_dataset.make_initializable_iterator()
self.evaluate(iterator.initializer)
next_val = iterator.get_next()
self.assertAllEqual([0, 1], self.evaluate(next_val))
@combinations.generate(combinations.combine(mode=["graph", "eager"]))
def testDistributedDatasetsFromFunction(self):
default_strategy = distribute_lib._get_default_strategy()
if context.executing_eagerly():
dataset_fn = lambda _: dataset_ops.DatasetV2.range(10).batch(2)
dist_dataset_from_func = \
default_strategy.distribute_datasets_from_function(
dataset_fn)
next_val = next(iter(dist_dataset_from_func))
self.assertAllEqual([0, 1], self.evaluate(next_val))
else:
dataset_fn = lambda _: dataset_ops.DatasetV2.range(10).batch(2)
dist_dataset_from_func = \
default_strategy.distribute_datasets_from_function(
dataset_fn)
dataset_ops.make_initializable_iterator(dist_dataset_from_func)
@combinations.generate(combinations.combine(tf_api_version=1))
def testV1(self):
self.assertIsInstance(
distribute_lib.get_strategy(), distribute_lib.StrategyV1)
@combinations.generate(combinations.combine(tf_api_version=2))
def testV2(self):
self.assertIsInstance(
distribute_lib.get_strategy(), distribute_lib.Strategy)
| DefaultDistributionStrategyTest |
python | astropy__astropy | astropy/io/votable/exceptions.py | {
"start": 27823,
"end": 28127
} | class ____(VOTableSpecWarning):
"""
The attribute requires the value to be a valid XML token, as
defined by `XML 1.0
<http://www.w3.org/TR/2000/WD-xml-2e-20000814#NT-Nmtoken>`__.
"""
message_template = "'{}' is an invalid token for attribute '{}'"
default_args = ("x", "y")
| W34 |
python | PyCQA__pydocstyle | src/pydocstyle/parser.py | {
"start": 9996,
"end": 11543
} | class ____:
# A logical newline is where a new expression or statement begins. When
# there is a physical new line, but not a logical one, for example:
# (x +
# y)
# The token will be tk.NL, not tk.NEWLINE.
LOGICAL_NEWLINES = {tk.NEWLINE, tk.INDENT, tk.DEDENT}
def __init__(self, filelike):
self._generator = tk.generate_tokens(filelike.readline)
self.current = Token(*next(self._generator, None))
self.line = self.current.start[0]
self.log = log
self.got_logical_newline = True
def move(self):
previous = self.current
current = self._next_from_generator()
self.current = None if current is None else Token(*current)
self.line = self.current.start[0] if self.current else self.line
is_logical_blank = previous.kind in (tk.NL, tk.COMMENT)
self.got_logical_newline = (
previous.kind in self.LOGICAL_NEWLINES
# Retain logical_newline status if last line was logically blank
or (self.got_logical_newline and is_logical_blank)
)
return previous
def _next_from_generator(self):
try:
return next(self._generator, None)
except (SyntaxError, tk.TokenError):
self.log.warning('error generating tokens', exc_info=True)
return None
def __iter__(self):
while True:
if self.current is not None:
yield self.current
else:
return
self.move()
| TokenStream |
python | apache__airflow | providers/slack/src/airflow/providers/slack/hooks/slack.py | {
"start": 2579,
"end": 18854
} | class ____(BaseHook):
"""
Creates a Slack API Connection to be used for calls.
This class provide a thin wrapper around the ``slack_sdk.WebClient``.
.. seealso::
- :ref:`Slack API connection <howto/connection:slack>`
- https://api.slack.com/messaging
- https://slack.dev/python-slack-sdk/web/index.html
.. warning::
This hook intend to use `Slack API` connection
and might not work correctly with `Slack Incoming Webhook` and `HTTP` connections.
Examples:
.. code-block:: python
# Create hook
slack_hook = SlackHook(slack_conn_id="slack_api_default")
# Call generic API with parameters (errors are handled by hook)
# For more details check https://api.slack.com/methods/chat.postMessage
slack_hook.call("chat.postMessage", json={"channel": "#random", "text": "Hello world!"})
# Call method from Slack SDK (you have to handle errors yourself)
# For more details check https://slack.dev/python-slack-sdk/web/index.html#messaging
slack_hook.client.chat_postMessage(channel="#random", text="Hello world!")
Additional arguments which are not listed into parameters exposed
into the rest of ``slack.WebClient`` constructor args.
:param slack_conn_id: :ref:`Slack connection id <howto/connection:slack>`
that has Slack API token in the password field.
:param timeout: The maximum number of seconds the client will wait to connect
and receive a response from Slack. If not set than default WebClient value will use.
:param base_url: A string representing the Slack API base URL.
If not set than default WebClient BASE_URL will use (``https://www.slack.com/api/``).
:param proxy: Proxy to make the Slack API call.
:param retry_handlers: List of handlers to customize retry logic in ``slack_sdk.WebClient``.
"""
conn_name_attr = "slack_conn_id"
default_conn_name = "slack_api_default"
conn_type = "slack"
hook_name = "Slack API"
def __init__(
self,
*,
slack_conn_id: str = default_conn_name,
base_url: str | None = None,
timeout: int | None = None,
proxy: str | None = None,
retry_handlers: list[RetryHandler] | None = None,
**extra_client_args: Any,
) -> None:
super().__init__()
self.slack_conn_id = slack_conn_id
self.base_url = base_url
self.timeout = timeout
self.proxy = proxy
self.retry_handlers = retry_handlers
if "token" in extra_client_args:
warnings.warn(
f"Provide `token` as part of {type(self).__name__!r} parameters is disallowed, "
f"please use Airflow Connection.",
UserWarning,
stacklevel=2,
)
extra_client_args.pop("token")
if "logger" not in extra_client_args:
extra_client_args["logger"] = self.log
self.extra_client_args = extra_client_args
# Use for caching channels result
self._channels_mapping: dict[str, str] = {}
@cached_property
def client(self) -> WebClient:
"""Get the underlying slack_sdk.WebClient (cached)."""
conn = self.get_connection(self.slack_conn_id)
return WebClient(**self._get_conn_params(conn=conn))
async def get_async_client(self) -> AsyncWebClient:
"""Get the underlying `slack_sdk.web.async_client.AsyncWebClient`."""
conn = await get_async_connection(self.slack_conn_id)
return AsyncWebClient(**self._get_conn_params(conn))
def get_conn(self) -> WebClient:
"""Get the underlying slack_sdk.WebClient (cached)."""
return self.client
def _get_conn_params(self, conn: Connection) -> dict[str, Any]:
"""Fetch connection params as a dict and merge it with hook parameters."""
if not conn.password:
raise AirflowNotFoundException(
f"Connection ID {self.slack_conn_id!r} does not contain password (Slack API Token)."
)
conn_params: dict[str, Any] = {"token": conn.password, "retry_handlers": self.retry_handlers}
extra_config = ConnectionExtraConfig(
conn_type=self.conn_type, conn_id=conn.conn_id, extra=conn.extra_dejson
)
# Merge Hook parameters with Connection config
conn_params.update(
{
"timeout": self.timeout or extra_config.getint("timeout", default=None),
"base_url": self.base_url or extra_config.get("base_url", default=None),
"proxy": self.proxy or extra_config.get("proxy", default=None),
}
)
# Add additional client args
conn_params.update(self.extra_client_args)
return {k: v for k, v in conn_params.items() if v is not None}
def call(self, api_method: str, **kwargs) -> SlackResponse:
"""
Call Slack WebClient `WebClient.api_call` with given arguments.
:param api_method: The target Slack API method. e.g. 'chat.postMessage'. Required.
:param http_verb: HTTP Verb. Optional (defaults to 'POST')
:param files: Files to multipart upload. e.g. {imageORfile: file_objectORfile_path}
:param data: The body to attach to the request. If a dictionary is provided,
form-encoding will take place. Optional.
:param params: The URL parameters to append to the URL. Optional.
:param json: JSON for the body to attach to the request. Optional.
:return: The server's response to an HTTP request. Data from the response can be
accessed like a dict. If the response included 'next_cursor' it can be
iterated on to execute subsequent requests.
"""
return self.client.api_call(api_method, **kwargs)
async def async_call(self, api_method: str, **kwargs) -> AsyncSlackResponse:
"""
Call Slack WebClient `AsyncWebClient.api_call` with given arguments.
:param api_method: The target Slack API method. e.g. 'chat.postMessage'. Required.
:param http_verb: HTTP Verb. Optional (defaults to 'POST')
:param files: Files to multipart upload. e.g. {imageORfile: file_objectORfile_path}
:param data: The body to attach to the request. If a dictionary is provided,
form-encoding will take place. Optional.
:param params: The URL parameters to append to the URL. Optional.
:param json: JSON for the body to attach to the request. Optional.
:return: The server's response to an HTTP request. Data from the response can be
accessed like a dict. If the response included 'next_cursor' it can be
iterated on to execute subsequent requests.
"""
client = await self.get_async_client()
return await client.api_call(api_method, **kwargs)
def send_file_v2(
self,
*,
channel_id: str | None = None,
file_uploads: FileUploadTypeDef | list[FileUploadTypeDef],
initial_comment: str | None = None,
) -> SlackResponse:
"""
Send one or more files to a Slack channel using the Slack SDK Client method `files_upload_v2`.
:param channel_id: The ID of the channel to send the file to.
If omitting this parameter, then file will send to workspace.
:param file_uploads: The file(s) specification to upload.
:param initial_comment: The message text introducing the file in specified ``channel``.
"""
if channel_id and channel_id.startswith("#"):
retried_channel_id = self.get_channel_id(channel_id[1:])
warnings.warn(
"The method `files_upload_v2` in the Slack SDK Client expects a Slack Channel ID, "
f"but received a Slack Channel Name. To resolve this, consider replacing {channel_id!r} "
f"with the corresponding Channel ID {retried_channel_id!r}.",
UserWarning,
stacklevel=2,
)
channel_id = retried_channel_id
if not isinstance(file_uploads, list):
file_uploads = [file_uploads]
for file_upload in file_uploads:
if not file_upload.get("filename"):
# Some of early version of Slack SDK (such as 3.19.0) raise an error if ``filename`` not set.
file_upload["filename"] = "Uploaded file"
return self.client.files_upload_v2(
channel=channel_id,
# mypy doesn't happy even if TypedDict used instead of dict[str, Any]
# see: https://github.com/python/mypy/issues/4976
file_uploads=file_uploads, # type: ignore[arg-type]
initial_comment=initial_comment,
)
def send_file_v1_to_v2(
self,
*,
channels: str | Sequence[str] | None = None,
file: str | Path | None = None,
content: str | None = None,
filename: str | None = None,
initial_comment: str | None = None,
title: str | None = None,
snippet_type: str | None = None,
) -> list[SlackResponse]:
"""
Smooth transition between ``send_file`` and ``send_file_v2`` methods.
:param channels: Comma-separated list of channel names or IDs where the file will be shared.
If omitting this parameter, then file will send to workspace.
File would be uploaded for each channel individually.
:param file: Path to file which need to be sent.
:param content: File contents. If omitting this parameter, you must provide a file.
:param filename: Displayed filename.
:param initial_comment: The message text introducing the file in specified ``channels``.
:param title: Title of the file.
:param snippet_type: Syntax type for the content being uploaded.
"""
if not exactly_one(file, content):
raise ValueError("Either `file` or `content` must be provided, not both.")
if file:
file = Path(file)
file_uploads: FileUploadTypeDef = {"file": file.__fspath__(), "filename": filename or file.name}
else:
file_uploads = {"content": content, "filename": filename}
file_uploads.update({"title": title, "snippet_type": snippet_type})
if channels:
if isinstance(channels, str):
channels = channels.split(",")
channels_to_share: list[str | None] = list(map(str.strip, channels))
else:
channels_to_share = [None]
responses = []
for channel in channels_to_share:
responses.append(
self.send_file_v2(
channel_id=channel, file_uploads=file_uploads, initial_comment=initial_comment
)
)
return responses
def get_channel_id(self, channel_name: str) -> str:
"""
Retrieve a Slack channel id by a channel name.
It continuously iterates over all Slack channels (public and private)
until it finds the desired channel name in addition cache results for further usage.
.. seealso::
https://api.slack.com/methods/conversations.list
:param channel_name: The name of the Slack channel for which ID has to be found.
"""
next_cursor = None
while not (channel_id := self._channels_mapping.get(channel_name)):
res = self._call_conversations_list(cursor=next_cursor)
if TYPE_CHECKING:
# Slack SDK response type too broad, this should make mypy happy
assert isinstance(res.data, dict)
for channel_data in res.data.get("channels", []):
self._channels_mapping[channel_data["name"]] = channel_data["id"]
if not (next_cursor := res.data.get("response_metadata", {}).get("next_cursor")):
channel_id = self._channels_mapping.get(channel_name)
break
if not channel_id:
msg = f"Unable to find slack channel with name: {channel_name!r}"
raise LookupError(msg)
return channel_id
def _call_conversations_list(self, cursor: str | None = None):
"""
Call ``conversations.list`` with automatic 429-retry.
.. versionchanged:: 3.0.0
Automatically retries on 429 responses (up to 5 times, honouring *Retry-After* header).
:param cursor: Pagination cursor returned by the previous ``conversations.list`` call.
Pass ``None`` (default) to start from the first page.
:raises AirflowException: If the method hits the rate-limit 5 times in a row.
:raises SlackApiError: Propagated when errors other than 429 occur.
:return: Slack SDK response for the page requested.
"""
max_retries = 5
for attempt in range(max_retries):
try:
return self.client.conversations_list(cursor=cursor, types="public_channel,private_channel")
except SlackApiError as e:
if e.response.status_code == 429 and attempt < max_retries:
retry_after = int(e.response.headers.get("Retry-After", 30))
self.log.warning(
"Rate limit hit. Retrying in %s seconds. Attempt %s/%s",
retry_after,
attempt + 1,
max_retries,
)
time.sleep(retry_after)
else:
raise
raise AirflowException("Max retries reached for conversations.list")
def test_connection(self):
"""
Tests the Slack API connection.
.. seealso::
https://api.slack.com/methods/auth.test
"""
try:
response = self.call("auth.test")
response.validate()
except SlackApiError as e:
return False, str(e)
except Exception as e:
return False, f"Unknown error occurred while testing connection: {e}"
if isinstance(response.data, bytes):
# If response data binary then return simple message
return True, f"Connection successfully tested (url: {response.api_url})."
try:
return True, json.dumps(response.data)
except TypeError:
return True, str(response)
@classmethod
def get_connection_form_widgets(cls) -> dict[str, Any]:
"""Return dictionary of widgets to be added for the hook to handle extra values."""
from flask_appbuilder.fieldwidgets import BS3TextFieldWidget
from flask_babel import lazy_gettext
from wtforms import IntegerField, StringField
from wtforms.validators import NumberRange, Optional
return {
"timeout": IntegerField(
lazy_gettext("Timeout"),
widget=BS3TextFieldWidget(),
validators=[Optional(strip_whitespace=True), NumberRange(min=1)],
description="Optional. The maximum number of seconds the client will wait to connect "
"and receive a response from Slack API.",
),
"base_url": StringField(
lazy_gettext("Base URL"),
widget=BS3TextFieldWidget(),
description="Optional. A string representing the Slack API base URL.",
),
"proxy": StringField(
lazy_gettext("Proxy"),
widget=BS3TextFieldWidget(),
description="Optional. Proxy to make the Slack API call.",
),
}
@classmethod
def get_ui_field_behaviour(cls) -> dict[str, Any]:
"""Return custom field behaviour."""
return {
"hidden_fields": ["login", "port", "host", "schema", "extra"],
"relabeling": {
"password": "Slack API Token",
},
"placeholders": {
"password": "REPLACE ME WITH A SLACK ACCESS TOKEN",
"timeout": "30",
"base_url": "https://www.slack.com/api/",
"proxy": "http://localhost:9000",
},
}
| SlackHook |
python | numba__numba | numba/cuda/cudadecl.py | {
"start": 15292,
"end": 15615
} | class ____(AbstractTemplate):
key = cuda.atomic.compare_and_swap
def generic(self, args, kws):
assert not kws
ary, old, val = args
dty = ary.dtype
if dty in integer_numba_types and ary.ndim == 1:
return signature(dty, ary, dty, dty)
@register
| Cuda_atomic_compare_and_swap |
python | cython__cython | Cython/Compiler/ExprNodes.py | {
"start": 592771,
"end": 596424
} | class ____(CoercionNode):
# This node is used to check that a generic Python
# object is an instance of a particular extension type.
# This node borrows the result of its argument node.
# Builtin subtypes or compatible types (e.g. int/float) are coerced
# to the base type automatically.
exact_builtin_type = True
def __init__(self, arg, dst_type, env, notnone=False):
# The arg is known to be a Python object,
# and the dst_type is known to be an extension type or builtin.
assert dst_type.is_extension_type or dst_type.is_builtin_type, \
"PyTypeTest for %s against non extension type %s" % (arg.type, dst_type)
if dst_type.is_builtin_type:
# Use a temp to allow builtin type conversions (which cannot replace literals).
arg = arg.coerce_to_temp(env)
CoercionNode.__init__(self, arg)
self.type = dst_type
self.result_ctype = arg.ctype()
self.notnone = notnone
nogil_check = Node.gil_error
gil_message = "Python type test"
def analyse_types(self, env):
return self
def may_be_none(self):
if self.notnone:
return False
return self.arg.may_be_none()
def is_simple(self):
return self.arg.is_simple()
def result_in_temp(self):
return self.arg.result_in_temp()
def is_ephemeral(self):
return self.arg.is_ephemeral()
def nonlocally_immutable(self):
return self.arg.nonlocally_immutable()
def coerce_to_temp(self, env):
self.arg = self.arg.coerce_to_temp(env)
return self
def reanalyse(self):
if self.type != self.arg.type or not self.arg.result_in_temp():
return self
if not self.type.typeobj_is_available():
return self
# The argument has the right type, known at compile time, so discard the type check.
if self.arg.may_be_none() and self.notnone:
return self.arg.as_none_safe_node("Cannot convert NoneType to %.200s" % self.type.name)
return self.arg
def calculate_constant_result(self):
# FIXME
pass
def calculate_result_code(self):
return self.arg.result()
def generate_result_code(self, code):
if not self.type.typeobj_is_available():
error(self.pos, "Cannot test type of extern C class without type object name specification")
return
allow_none = not self.notnone
is_builtin_type = self.type.is_builtin_type
if self.exact_builtin_type and is_builtin_type:
# Allow conversions instead of rejecting subtypes and compatible (number) types.
self.type.convert_to_basetype(code, self.pos, self.arg.py_result(), allow_none)
return
type_test = self.type.type_test_code(
code.funcstate.scope,
self.arg.py_result(),
allow_none,
exact=self.exact_builtin_type if is_builtin_type else False,
)
code.globalstate.use_utility_code(UtilityCode.load_cached(
"RaiseUnexpectedTypeError" if is_builtin_type else "ExtTypeTest",
"ObjectHandling.c"))
code.putln(f"if (!({type_test})) {code.error_goto(self.pos)}")
def generate_post_assignment_code(self, code):
self.arg.generate_post_assignment_code(code)
def allocate_temp_result(self, code):
pass
def release_temp_result(self, code):
pass
def free_temps(self, code):
self.arg.free_temps(code)
def free_subexpr_temps(self, code):
self.arg.free_subexpr_temps(code)
| PyTypeTestNode |
python | getsentry__sentry | src/sentry/api/serializers/models/project.py | {
"start": 23792,
"end": 25157
} | class ____(ProjectSerializer):
def get_attrs(
self, item_list: Sequence[Project], user: User | RpcUser | AnonymousUser, **kwargs: Any
) -> dict[Project, dict[str, Any]]:
attrs = super().get_attrs(item_list, user)
project_teams = list(
ProjectTeam.objects.filter(project__in=item_list).select_related("team")
)
teams = {
pt.team_id: {
"id": str(pt.team.id),
"slug": pt.team.slug,
"name": pt.team.name,
}
for pt in project_teams
}
teams_by_project_id = defaultdict(list)
for pt in project_teams:
teams_by_project_id[pt.project_id].append(teams[pt.team_id])
for item in item_list:
attrs[item]["teams"] = teams_by_project_id[item.id]
return attrs
def serialize(
self,
obj: Project,
attrs: Mapping[str, Any],
user: User | RpcUser | AnonymousUser,
**kwargs: Any,
) -> ProjectWithTeamResponseDict:
base = super().serialize(obj, attrs, user)
# TODO(jess): remove this when this is deprecated
try:
extra: _MaybeTeam = {"team": attrs["teams"][0]}
except IndexError:
extra = {}
return {**base, **extra, "teams": attrs["teams"]}
| ProjectWithTeamSerializer |
python | dask__distributed | distributed/diagnostics/plugin.py | {
"start": 23846,
"end": 25336
} | class ____:
INSTALLER = "pip"
packages: list[str]
pip_options: list[str]
def __init__(self, packages: list[str], pip_options: list[str] | None = None):
self.packages = packages
self.pip_options = pip_options or []
def __call__(self) -> None:
logger.info(
"%s installing the following packages: %s",
self.INSTALLER,
self.packages,
)
# Use a requirements file under the hood to support
# environment variables
# See https://pip.pypa.io/en/stable/reference/requirements-file-format/#using-environment-variables
with tempfile.NamedTemporaryFile(mode="w+") as f:
f.write("\n".join(self.packages))
f.flush()
proc = subprocess.Popen(
[
sys.executable,
"-m",
"pip",
"install",
*self.pip_options,
"-r",
f.name,
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
_, stderr = proc.communicate()
returncode = proc.wait()
if returncode != 0:
msg = f"pip install failed with '{stderr.decode().strip()}'"
logger.error(msg)
raise RuntimeError(msg)
# Adapted from https://github.com/dask/distributed/issues/3560#issuecomment-596138522
| _PipInstaller |
python | tensorflow__tensorflow | tensorflow/examples/adding_an_op/cuda_op_test.py | {
"start": 814,
"end": 1051
} | class ____(tf.test.TestCase):
def test(self):
if tf.test.is_built_with_cuda():
result = cuda_op.add_one([5, 4, 3, 2, 1])
self.assertAllEqual(result, [6, 5, 4, 3, 2])
if __name__ == '__main__':
tf.test.main()
| AddOneTest |
python | pytorch__pytorch | torch/_inductor/template_heuristics/triton.py | {
"start": 71094,
"end": 73433
} | class ____(BaseScaledMMConfigMixin):
"""Mixing for scaled mm with the regular mm template"""
def get_extra_kwargs(
self,
kernel_inputs: KernelInputs,
op_name: str,
) -> dict[str, Any]:
kwargs = super().get_extra_kwargs(kernel_inputs, op_name)
from ..kernel.mm_common import scale_mm_epilogue
return {
**kwargs,
"suffix_args": kernel_inputs.count - 2,
"epilogue_fn": scale_mm_epilogue(),
"epilogue_fn_hash": "scale_mm_epilogue",
}
def _valid(self, kernel_inputs: KernelInputs) -> bool:
assert isinstance(kernel_inputs, MMKernelInputs), (
"Expect MMKernelInputs for ScaledMMConfigMixin"
)
_, _, k = kernel_inputs.mnk_symbolic()
if V.graph.sizevars.guard_or_false(sympy.Le(k, 16)):
# Triton crashes however uncommon for real workloads
return False
# On NVIDIA B200 GPUs, K dim must be >= 32 for tcgen05.mma.kind::f8f6f4.* PTX instruction to be valid
# source: https://docs.nvidia.com/cuda/parallel-thread-execution/#tcgen05-matrix-shape
if using_b200() and V.graph.sizevars.guard_or_false(sympy.Lt(k, 32)):
return False
return True
# pyrefly: ignore [bad-override]
def _filter_configs(self, configs: list[BaseConfig]) -> list[BaseConfig]:
"""
Filter out bad configs for specific hardware.
On AMD MI350X (GFX 9.5+), skip configs with BLOCK_K<=64 due to lack of corresponding MFMA instructions.
"""
def should_skip_mi350x_config(config: BaseConfig) -> bool:
"""Skip config if BLOCK_K<=64 on MI350X (GFX 9.5+)"""
try:
return (
config.block_k <= 64
and torch.version.hip is not None
and torch.cuda.get_device_capability() >= (9, 5)
)
except RuntimeError:
# If no HIP GPUs are available, we can't check device capability
# so we don't skip any configs
return False
filtered_configs = [c for c in configs if not should_skip_mi350x_config(c)]
return super()._filter_configs(filtered_configs)
# Scaled TMA-specific mixin for scaled MM templates with TMA
| ScaledMMConfigMixin |
python | getsentry__sentry | src/sentry/quotas/base.py | {
"start": 7734,
"end": 8724
} | class ____:
"""
Return value of ``quotas.is_rate_limited``.
"""
__slots__ = ["is_limited", "retry_after", "reason", "reason_code"]
def __init__(self, is_limited, retry_after=None, reason=None, reason_code=None):
self.is_limited = is_limited
# delta of seconds in the future to retry
self.retry_after = retry_after
# human readable description
self.reason = reason
# machine readable description
self.reason_code = reason_code
def to_dict(self):
"""
Converts the object into a plain dictionary
:return: a dict containing the non None elm of the RateLimit
>>> x = RateLimit(is_limited = False, retry_after = 33)
>>> x.to_dict() == {'is_limited': False, 'retry_after': 33}
True
"""
return {
name: getattr(self, name, None)
for name in self.__slots__
if getattr(self, name, None) is not None
}
| RateLimit |
python | tensorflow__tensorflow | tensorflow/core/function/trace_type/default_types_test.py | {
"start": 1681,
"end": 2121
} | class ____(MockSupertypes2With3):
def is_subtype_of(self, other):
return other._object == 2
def most_specific_common_supertype(self, others):
if not all(isinstance(other, Mock2AsTopType) for other in others):
return None
return (
self
if all(self._object == other._object for other in others)
else Mock2AsTopType(2)
)
def __repr__(self) -> str:
return 'Mock2AsTopType'
| Mock2AsTopType |
python | run-llama__llama_index | llama-index-core/llama_index/core/response_synthesizers/tree_summarize.py | {
"start": 642,
"end": 8745
} | class ____(BaseSynthesizer):
"""
Tree summarize response builder.
This response builder recursively merges text chunks and summarizes them
in a bottom-up fashion (i.e. building a tree from leaves to root).
More concretely, at each recursively step:
1. we repack the text chunks so that each chunk fills the context window of the LLM
2. if there is only one chunk, we give the final response
3. otherwise, we summarize each chunk and recursively summarize the summaries.
"""
def __init__(
self,
llm: Optional[LLM] = None,
callback_manager: Optional[CallbackManager] = None,
prompt_helper: Optional[PromptHelper] = None,
summary_template: Optional[BasePromptTemplate] = None,
output_cls: Optional[Type[BaseModel]] = None,
streaming: bool = False,
use_async: bool = False,
verbose: bool = False,
) -> None:
super().__init__(
llm=llm,
callback_manager=callback_manager,
prompt_helper=prompt_helper,
streaming=streaming,
output_cls=output_cls,
)
self._summary_template = summary_template or DEFAULT_TREE_SUMMARIZE_PROMPT_SEL
self._use_async = use_async
self._verbose = verbose
def _get_prompts(self) -> PromptDictType:
"""Get prompts."""
return {"summary_template": self._summary_template}
def _update_prompts(self, prompts: PromptDictType) -> None:
"""Update prompts."""
if "summary_template" in prompts:
self._summary_template = prompts["summary_template"]
async def aget_response(
self,
query_str: str,
text_chunks: Sequence[str],
**response_kwargs: Any,
) -> RESPONSE_TEXT_TYPE:
"""Get tree summarize response."""
summary_template = self._summary_template.partial_format(query_str=query_str)
# repack text_chunks so that each chunk fills the context window
text_chunks = self._prompt_helper.repack(
summary_template, text_chunks=text_chunks, llm=self._llm
)
if self._verbose:
print(f"{len(text_chunks)} text chunks after repacking")
# give final response if there is only one chunk
if len(text_chunks) == 1:
response: RESPONSE_TEXT_TYPE
if self._streaming:
response = await self._llm.astream(
summary_template, context_str=text_chunks[0], **response_kwargs
)
else:
if self._output_cls is None:
response = await self._llm.apredict(
summary_template,
context_str=text_chunks[0],
**response_kwargs,
)
else:
response = await self._llm.astructured_predict(
self._output_cls,
summary_template,
context_str=text_chunks[0],
**response_kwargs,
)
# return pydantic object if output_cls is specified
return response
else:
# summarize each chunk
if self._output_cls is None:
str_tasks = [
self._llm.apredict(
summary_template,
context_str=text_chunk,
**response_kwargs,
)
for text_chunk in text_chunks
]
summaries = await asyncio.gather(*str_tasks)
else:
model_tasks = [
self._llm.astructured_predict(
self._output_cls,
summary_template,
context_str=text_chunk,
**response_kwargs,
)
for text_chunk in text_chunks
]
summary_models = await asyncio.gather(*model_tasks)
summaries = [summary.model_dump_json() for summary in summary_models]
# recursively summarize the summaries
return await self.aget_response(
query_str=query_str,
text_chunks=summaries,
**response_kwargs,
)
def get_response(
self,
query_str: str,
text_chunks: Sequence[str],
**response_kwargs: Any,
) -> RESPONSE_TEXT_TYPE:
"""Get tree summarize response."""
summary_template = self._summary_template.partial_format(query_str=query_str)
# repack text_chunks so that each chunk fills the context window
text_chunks = self._prompt_helper.repack(
summary_template, text_chunks=text_chunks, llm=self._llm
)
if self._verbose:
print(f"{len(text_chunks)} text chunks after repacking")
# give final response if there is only one chunk
if len(text_chunks) == 1:
response: RESPONSE_TEXT_TYPE
if self._streaming:
response = self._llm.stream(
summary_template, context_str=text_chunks[0], **response_kwargs
)
else:
if self._output_cls is None:
response = self._llm.predict(
summary_template,
context_str=text_chunks[0],
**response_kwargs,
)
else:
response = self._llm.structured_predict(
self._output_cls,
summary_template,
context_str=text_chunks[0],
**response_kwargs,
)
return response
else:
# summarize each chunk
if self._use_async:
if self._output_cls is None:
tasks = [
self._llm.apredict(
summary_template,
context_str=text_chunk,
**response_kwargs,
)
for text_chunk in text_chunks
]
else:
tasks = [
self._llm.astructured_predict(
self._output_cls,
summary_template,
context_str=text_chunk,
**response_kwargs,
)
for text_chunk in text_chunks
]
summary_responses = run_async_tasks(tasks)
if self._output_cls is not None:
summaries = [
summary.model_dump_json() for summary in summary_responses
]
else:
summaries = summary_responses
else:
if self._output_cls is None:
summaries = [
self._llm.predict(
summary_template,
context_str=text_chunk,
**response_kwargs,
)
for text_chunk in text_chunks
]
else:
summaries = [
self._llm.structured_predict(
self._output_cls,
summary_template,
context_str=text_chunk,
**response_kwargs,
)
for text_chunk in text_chunks
]
summaries = [summary.model_dump_json() for summary in summaries]
# recursively summarize the summaries
return self.get_response(
query_str=query_str, text_chunks=summaries, **response_kwargs
)
| TreeSummarize |
python | PrefectHQ__prefect | src/prefect/server/utilities/database.py | {
"start": 11743,
"end": 12268
} | class ____(functions.GenericFunction[datetime.timedelta]):
"""Platform-independent way to add two intervals."""
type: sa.Interval = sa.Interval()
inherit_cache: bool = True
def __init__(
self,
i1: _SQLExpressionOrLiteral[datetime.timedelta],
i2: _SQLExpressionOrLiteral[datetime.timedelta],
**kwargs: Any,
):
super().__init__(
sa.type_coerce(i1, sa.Interval()),
sa.type_coerce(i2, sa.Interval()),
**kwargs,
)
| interval_add |
python | PrefectHQ__prefect | src/prefect/server/events/schemas/events.py | {
"start": 3214,
"end": 6671
} | class ____(PrefectBaseModel):
"""The client-side view of an event that has happened to a Resource"""
occurred: prefect.types._datetime.DateTime = Field(
description="When the event happened from the sender's perspective",
)
event: Annotated[str, AfterValidator(_validate_event_name_length)] = Field(
description="The name of the event that happened",
)
resource: Resource = Field(
description="The primary Resource this event concerns",
)
related: list[RelatedResource] = Field(
default_factory=list,
description="A list of additional Resources involved in this event",
)
payload: dict[str, Any] = Field(
default_factory=dict,
description="An open-ended set of data describing what happened",
)
id: UUID = Field(
description="The client-provided identifier of this event",
)
follows: Optional[UUID] = Field(
default=None,
description=(
"The ID of an event that is known to have occurred prior to this one. "
"If set, this may be used to establish a more precise ordering of causally-"
"related events when they occur close enough together in time that the "
"system may receive them out-of-order."
),
)
@property
def size_bytes(self) -> int:
return len(self.model_dump_json().encode())
@property
def involved_resources(self) -> Sequence[Resource]:
return [self.resource] + list(self.related)
@property
def resource_in_role(self) -> Mapping[str, RelatedResource]:
"""Returns a mapping of roles to the first related resource in that role"""
return {related.role: related for related in reversed(self.related)}
@property
def resources_in_role(self) -> Mapping[str, Sequence[RelatedResource]]:
"""Returns a mapping of roles to related resources in that role"""
resources: Dict[str, List[RelatedResource]] = defaultdict(list)
for related in self.related:
resources[related.role].append(related)
return resources
@field_validator("related")
@classmethod
def enforce_maximum_related_resources(
cls, value: List[RelatedResource]
) -> List[RelatedResource]:
if len(value) > PREFECT_EVENTS_MAXIMUM_RELATED_RESOURCES.value():
raise ValueError(
"The maximum number of related resources "
f"is {PREFECT_EVENTS_MAXIMUM_RELATED_RESOURCES.value()}"
)
return value
def receive(
self, received: Optional[prefect.types._datetime.DateTime] = None
) -> "ReceivedEvent":
kwargs = self.model_dump()
if received is not None:
kwargs["received"] = received
return ReceivedEvent(**kwargs)
def find_resource_label(self, label: str) -> Optional[str]:
"""Finds the value of the given label in this event's resource or one of its
related resources. If the label starts with `related:<role>:`, search for the
first matching label in a related resource with that role."""
directive, _, related_label = label.rpartition(":")
directive, _, role = directive.partition(":")
if directive == "related":
for related in self.related:
if related.role == role:
return related.get(related_label)
return self.resource.get(label)
| Event |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 117176,
"end": 117471
} | class ____(sgqlc.types.Enum):
"""Properties by which team discussion connections can be ordered.
Enumeration Choices:
* `CREATED_AT`: Allows chronological ordering of team discussions.
"""
__schema__ = github_schema
__choices__ = ("CREATED_AT",)
| TeamDiscussionOrderField |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeNarrowingIsinstance2.py | {
"start": 277,
"end": 375
} | class ____(ClassA):
def calculate(self) -> int:
return 2 * 2
TC = TypeVar("TC")
| ChildB |
python | pytorch__pytorch | torch/ao/quantization/quantizer/xnnpack_quantizer_utils.py | {
"start": 2252,
"end": 41803
} | class ____(NamedTuple):
# fix List[str] with List[List[Union[nn.Module, FunctionType, BuiltinFunctionType]]]
# Basically we are mapping a quantization config to some list of patterns.
# a pattern is defined as a list of nn module, function or builtin function names
# e.g. [nn.Conv2d, torch.relu, torch.add]
# We have not resolved whether fusion can be considered internal details of the
# quantizer hence it does not need communication to user.
# Note this pattern is not really informative since it does not really
# tell us the graph structure resulting from the list of ops.
config: QuantizationConfig
operators: list[OperatorPatternType]
def _is_annotated(nodes: list[Node]):
"""
Given a list of nodes (that represents an operator pattern),
check if any of the node is annotated, return True if any of the node
is annotated, otherwise return False
"""
annotated = False
for node in nodes:
annotated = annotated or (
"quantization_annotation" in node.meta
and node.meta["quantization_annotation"]._annotated
)
return annotated
def _mark_nodes_as_annotated(nodes: list[Node]):
for node in nodes:
if node is not None:
if "quantization_annotation" not in node.meta:
node.meta["quantization_annotation"] = QuantizationAnnotation()
node.meta["quantization_annotation"]._annotated = True
def get_input_act_qspec(quantization_config: QuantizationConfig | None):
if quantization_config is None:
return None
if quantization_config.input_activation is None:
return None
quantization_spec: QuantizationSpec = quantization_config.input_activation
if quantization_spec.qscheme not in [
torch.per_tensor_affine,
torch.per_tensor_symmetric,
]:
raise AssertionError(
f"Unsupported activation qscheme: {quantization_spec.qscheme}"
)
return quantization_spec
def get_output_act_qspec(quantization_config: QuantizationConfig | None):
if quantization_config is None:
return None
if quantization_config.output_activation is None:
return None
quantization_spec: QuantizationSpec = quantization_config.output_activation
if quantization_spec.qscheme not in [
torch.per_tensor_affine,
torch.per_tensor_symmetric,
]:
raise AssertionError(
f"Unsupported activation qscheme: {quantization_spec.qscheme}"
)
return quantization_spec
def get_weight_qspec(quantization_config: QuantizationConfig | None):
if quantization_config is None:
return None
if quantization_config is None:
raise AssertionError("quantization_config must not be None")
if quantization_config.weight is None:
return None
quantization_spec: QuantizationSpec = quantization_config.weight
if quantization_spec.qscheme not in [
torch.per_tensor_symmetric,
torch.per_channel_symmetric,
None,
]:
raise ValueError(
f"Unsupported quantization_spec {quantization_spec} for weight"
)
return quantization_spec
def get_bias_qspec(quantization_config: QuantizationConfig | None):
if quantization_config is None:
return None
if quantization_config is None:
raise AssertionError("quantization_config must not be None")
if quantization_config.bias is None:
return None
quantization_spec: QuantizationSpec = quantization_config.bias
if quantization_spec.dtype != torch.float:
raise AssertionError(
"Only float dtype for bias is supported for bias right now"
)
return quantization_spec
@register_annotator("linear")
def _annotate_linear(
gm: torch.fx.GraphModule,
quantization_config: QuantizationConfig | None,
filter_fn: Callable[[Node], bool] | None = None,
) -> list[list[Node]] | None:
annotated_partitions = []
input_act_qspec = get_input_act_qspec(quantization_config)
output_act_qspec = get_output_act_qspec(quantization_config)
weight_qspec = get_weight_qspec(quantization_config)
bias_qspec = get_bias_qspec(quantization_config)
for node in gm.graph.nodes:
if node.op != "call_function" or node.target != torch.ops.aten.linear.default:
continue
if filter_fn and not filter_fn(node):
continue
act_node = node.args[0]
weight_node = node.args[1]
bias_node = None
if len(node.args) > 2:
bias_node = node.args[2]
if _is_annotated([node]) is False: # type: ignore[list-item]
_annotate_input_qspec_map(
node,
act_node,
input_act_qspec,
)
_annotate_input_qspec_map(
node,
weight_node,
weight_qspec,
)
nodes_to_mark_annotated = [node, weight_node]
if bias_node:
_annotate_input_qspec_map(
node,
bias_node,
bias_qspec,
)
nodes_to_mark_annotated.append(bias_node)
_annotate_output_qspec(node, output_act_qspec)
_mark_nodes_as_annotated(nodes_to_mark_annotated)
annotated_partitions.append(nodes_to_mark_annotated)
return annotated_partitions
@register_annotator("linear_relu")
def _annotate_linear_relu(
gm: torch.fx.GraphModule,
quantization_config: QuantizationConfig | None,
filter_fn: Callable[[Node], bool] | None = None,
) -> list[list[Node]] | None:
annotated_partitions = []
input_act_qspec = get_input_act_qspec(quantization_config)
output_act_qspec = get_output_act_qspec(quantization_config)
weight_qspec = get_weight_qspec(quantization_config)
bias_qspec = get_bias_qspec(quantization_config)
for node in gm.graph.nodes:
if node.op != "call_function" or node.target not in [
torch.ops.aten.relu.default,
torch.ops.aten.relu_.default,
]:
continue
relu_node = node
maybe_linear_node = node.args[0]
if (
not isinstance(maybe_linear_node, Node)
or maybe_linear_node.op != "call_function"
or maybe_linear_node.target != torch.ops.aten.linear.default
):
continue
linear_node = maybe_linear_node
if len(linear_node.users) > 1:
# if linear node has multiple users, then it can't be fused with relu
continue
input_qspec_map = {}
input_act = linear_node.args[0]
if not isinstance(input_act, Node):
raise AssertionError("input activation must be a FX Node")
input_qspec_map[input_act] = input_act_qspec
weight = linear_node.args[1]
if not isinstance(weight, Node):
raise AssertionError("weight must be a FX Node")
input_qspec_map[weight] = weight_qspec
# adding weight node to the partition as well
partition = [relu_node, linear_node, weight]
bias = linear_node.args[2] if len(linear_node.args) > 2 else None
if isinstance(bias, Node):
input_qspec_map[bias] = bias_qspec
partition.append(bias)
if _is_annotated(partition):
continue
if filter_fn and any(not filter_fn(n) for n in partition):
continue
linear_node.meta["quantization_annotation"] = QuantizationAnnotation(
input_qspec_map=input_qspec_map,
_annotated=True,
)
relu_node.meta["quantization_annotation"] = QuantizationAnnotation(
output_qspec=output_act_qspec,
_annotated=True,
)
_mark_nodes_as_annotated(partition)
annotated_partitions.append(partition)
return annotated_partitions
@register_annotator("conv")
def _annotate_conv(
gm: torch.fx.GraphModule,
quantization_config: QuantizationConfig | None,
filter_fn: Callable[[Node], bool] | None = None,
) -> list[list[Node]] | None:
annotated_partitions = []
for n in gm.graph.nodes:
if n.op != "call_function" or n.target not in [
torch.ops.aten.conv1d.default,
torch.ops.aten.conv2d.default,
]:
continue
conv_node = n
input_qspec_map = {}
input_act = conv_node.args[0]
if not isinstance(input_act, Node):
raise AssertionError("input activation must be a FX Node")
input_qspec_map[input_act] = get_input_act_qspec(quantization_config)
weight = conv_node.args[1]
if not isinstance(weight, Node):
raise AssertionError("weight must be a FX Node")
input_qspec_map[weight] = get_weight_qspec(quantization_config)
# adding weight node to the partition as well
partition = [conv_node, conv_node.args[1]]
bias = conv_node.args[2] if len(conv_node.args) > 2 else None
if isinstance(bias, Node):
input_qspec_map[bias] = get_bias_qspec(quantization_config)
partition.append(bias)
if _is_annotated(partition):
continue
if filter_fn and any(not filter_fn(n) for n in partition):
continue
conv_node.meta["quantization_annotation"] = QuantizationAnnotation(
input_qspec_map=input_qspec_map,
output_qspec=get_output_act_qspec(quantization_config),
_annotated=True,
)
_mark_nodes_as_annotated(partition)
annotated_partitions.append(partition)
return annotated_partitions
def _do_annotate_conv_relu(
gm: torch.fx.GraphModule,
quantization_config: QuantizationConfig | None,
filter_fn: Callable[[Node], bool] | None = None,
is_conv_transpose: bool = False,
):
annotated_partitions = []
for n in gm.graph.nodes:
if n.op != "call_function" or n.target not in [
torch.ops.aten.relu.default,
torch.ops.aten.relu_.default,
]:
continue
relu_node = n
maybe_conv_node = n.args[0]
is_conv_node = _is_conv_transpose_node if is_conv_transpose else _is_conv_node
if not isinstance(maybe_conv_node, Node) or not is_conv_node(maybe_conv_node):
continue
conv_node = maybe_conv_node
if len(conv_node.users) > 1:
# relu shouldn't be fuseable to conv if there are other users
# of convolution
continue
input_qspec_map = {}
input_act = conv_node.args[0]
if not isinstance(input_act, Node):
raise AssertionError("input activation must be a FX Node")
input_qspec_map[input_act] = get_input_act_qspec(quantization_config)
weight = conv_node.args[1]
if not isinstance(weight, Node):
raise AssertionError("weight must be a FX Node")
input_qspec_map[weight] = get_weight_qspec(quantization_config)
# adding weight node to the partition as well
partition = [relu_node, conv_node, conv_node.args[1]]
bias = conv_node.args[2] if len(conv_node.args) > 2 else None
if isinstance(bias, Node):
input_qspec_map[bias] = get_bias_qspec(quantization_config)
partition.append(bias)
# pyrefly: ignore [bad-argument-type]
if _is_annotated(partition):
continue
# pyrefly: ignore [bad-argument-type]
if filter_fn and any(not filter_fn(n) for n in partition):
continue
conv_node.meta["quantization_annotation"] = QuantizationAnnotation(
input_qspec_map=input_qspec_map, _annotated=True
)
relu_node.meta["quantization_annotation"] = QuantizationAnnotation(
output_qspec=get_output_act_qspec(quantization_config), # type: ignore[arg-type]
_annotated=True,
)
# pyrefly: ignore [bad-argument-type]
_mark_nodes_as_annotated(partition)
annotated_partitions.append(partition)
return annotated_partitions
@register_annotator("conv_relu")
def _annotate_conv_relu(
gm: torch.fx.GraphModule,
quantization_config: QuantizationConfig | None,
filter_fn: Callable[[Node], bool] | None = None,
) -> list[list[Node]] | None:
return _do_annotate_conv_relu(
gm, quantization_config, filter_fn, is_conv_transpose=False
)
@register_annotator("conv_transpose_relu")
def _annotate_conv_transpose_relu(
gm: torch.fx.GraphModule,
quantization_config: QuantizationConfig | None,
filter_fn: Callable[[Node], bool] | None = None,
) -> list[list[Node]] | None:
return _do_annotate_conv_relu(
gm, quantization_config, filter_fn, is_conv_transpose=True
)
@register_annotator("conv_bn")
def _annotate_conv_bn(
gm: torch.fx.GraphModule,
quantization_config: QuantizationConfig | None,
filter_fn: Callable[[Node], bool] | None = None,
) -> list[list[Node]] | None:
"""
Find conv + batchnorm partitions
Note: This is only used for QAT. In PTQ, batchnorm should already be fused into the conv.
"""
return _do_annotate_conv_bn(gm, quantization_config, filter_fn, has_relu=False)
@register_annotator("conv_bn_relu")
def _annotate_conv_bn_relu(
gm: torch.fx.GraphModule,
quantization_config: QuantizationConfig | None,
filter_fn: Callable[[Node], bool] | None = None,
) -> list[list[Node]] | None:
"""
Find conv + batchnorm + relu partitions
Note: This is only used for QAT. In PTQ, batchnorm should already be fused into the conv.
"""
return _do_annotate_conv_bn(gm, quantization_config, filter_fn, has_relu=True)
@register_annotator("conv_transpose_bn")
def _annotate_conv_transpose_bn(
gm: torch.fx.GraphModule,
quantization_config: QuantizationConfig | None,
filter_fn: Callable[[Node], bool] | None = None,
) -> list[list[Node]] | None:
"""
Find conv_transpose + batchnorm partitions
Note: This is only used for QAT. In PTQ, batchnorm should already be fused into the conv.
"""
return _do_annotate_conv_bn(
gm, quantization_config, filter_fn, has_relu=False, is_conv_transpose=True
)
@register_annotator("conv_transpose_bn_relu")
def _annotate_conv_transpose_bn_relu(
gm: torch.fx.GraphModule,
quantization_config: QuantizationConfig | None,
filter_fn: Callable[[Node], bool] | None = None,
) -> list[list[Node]] | None:
"""
Find conv_transpose + batchnorm + relu partitions
Note: This is only used for QAT. In PTQ, batchnorm should already be fused into the conv.
"""
return _do_annotate_conv_bn(
gm, quantization_config, filter_fn, has_relu=True, is_conv_transpose=True
)
def _do_annotate_conv_bn(
gm: torch.fx.GraphModule,
quantization_config: QuantizationConfig | None,
filter_fn: Callable[[Node], bool] | None,
has_relu: bool,
is_conv_transpose: bool = False,
) -> list[list[Node]]:
"""
Given a function that takes in a `conv_fn` and returns a conv-bn[-relu] pattern,
return a list of annotated partitions.
The output of the pattern must include a dictionary from string name to node
for the following names: "input", "conv", "weight", "bias", and "output".
"""
# Example inputs for conv-bn1d patterns
_conv1d_bn_example_inputs = (
torch.randn(1, 1, 3), # x
torch.randn(1, 1, 1), # conv_weight
torch.randn(1), # conv_bias
torch.randn(1), # bn_weight
torch.randn(1), # bn_bias
torch.randn(1), # bn_running_mean
torch.randn(1), # bn_running_var
)
# Example inputs for conv-bn2d patterns
_conv2d_bn_example_inputs = (
torch.randn(1, 1, 3, 3), # x
torch.randn(1, 1, 1, 1), # conv_weight
torch.randn(1), # conv_bias
torch.randn(1), # bn_weight
torch.randn(1), # bn_bias
torch.randn(1), # bn_running_mean
torch.randn(1), # bn_running_var
)
def get_pattern(conv_fn: Callable, relu_is_inplace: bool):
def _conv_bn(x, conv_weight, conv_bias, bn_weight, bn_bias, bn_rm, bn_rv):
conv = conv_fn(x, conv_weight, conv_bias)
bn = F.batch_norm(conv, bn_rm, bn_rv, bn_weight, bn_bias, training=True)
if has_relu:
output = F.relu_(bn) if relu_is_inplace else F.relu(bn)
else:
output = bn
return output, {
"input": x,
"conv": conv,
"weight": conv_weight,
"bias": conv_bias,
"output": output,
}
return _WrapperModule(_conv_bn)
# Needed for matching, otherwise the matches gets filtered out due to unused
# nodes returned by batch norm
gm.graph.eliminate_dead_code()
gm.recompile()
matches = []
if is_conv_transpose:
combinations = [
(F.conv_transpose1d, _conv1d_bn_example_inputs),
(F.conv_transpose2d, _conv2d_bn_example_inputs),
]
else:
combinations = [
(F.conv1d, _conv1d_bn_example_inputs), # type: ignore[list-item]
(F.conv2d, _conv2d_bn_example_inputs), # type: ignore[list-item]
]
# Add `is_cuda` and `relu_is_inplace` dimensions
combinations = itertools.product( # type: ignore[assignment]
combinations,
[True, False] if torch.cuda.is_available() else [False], # is_cuda
[True, False] if has_relu else [False], # relu_is_inplace
)
# Match against all conv dimensions and cuda variants
for (conv_fn, example_inputs), is_cuda, relu_is_inplace in combinations: # type: ignore[misc]
pattern = get_pattern(conv_fn, relu_is_inplace) # type: ignore[has-type]
pattern = _get_aten_graph_module_for_pattern(pattern, example_inputs, is_cuda) # type: ignore[has-type]
pattern.graph.eliminate_dead_code()
pattern.recompile()
matcher = SubgraphMatcherWithNameNodeMap(pattern, ignore_literals=True)
matches.extend(matcher.match(gm.graph))
# Annotate nodes returned in the matches
annotated_partitions = []
for match in matches:
name_node_map = match.name_node_map
input_node = name_node_map["input"]
conv_node = name_node_map["conv"]
weight_node = name_node_map["weight"]
bias_node = name_node_map["bias"]
output_node = name_node_map["output"]
# TODO: annotate the uses of input, weight, and bias separately instead
# of assuming they come from a single conv node. This is not possible today
# because input may have multiple users, and we can't rely on the conv node
# always being the first user. This was the case in models with skip
# connections like resnet18
# Validate conv args
if conv_node.args[0] is not input_node:
raise ValueError("Conv arg did not contain input node ", input_node)
if conv_node.args[1] is not weight_node:
raise ValueError("Conv arg did not contain weight node ", weight_node)
if len(conv_node.args) > 2 and conv_node.args[2] is not bias_node:
raise ValueError("Conv arg did not contain bias node ", bias_node)
# Skip if the partition is already annotated or is filtered out by the user
partition = [conv_node, weight_node]
if bias_node is not None:
partition.append(bias_node)
if _is_annotated(partition):
continue
if filter_fn and any(not filter_fn(n) for n in partition):
continue
# Annotate conv inputs and pattern output
input_qspec_map = {}
input_qspec_map[input_node] = get_input_act_qspec(quantization_config)
input_qspec_map[weight_node] = get_weight_qspec(quantization_config)
if bias_node is not None:
input_qspec_map[bias_node] = get_bias_qspec(quantization_config)
conv_node.meta["quantization_annotation"] = QuantizationAnnotation(
input_qspec_map=input_qspec_map,
_annotated=True,
)
output_node.meta["quantization_annotation"] = QuantizationAnnotation(
output_qspec=get_output_act_qspec(quantization_config), # type: ignore[arg-type]
_annotated=True,
)
_mark_nodes_as_annotated(partition)
annotated_partitions.append(partition)
return annotated_partitions
@register_annotator("gru_io_only")
def _annotate_gru_io_only(
gm: torch.fx.GraphModule,
quantization_config: QuantizationConfig | None,
filter_fn: Callable[[Node], bool] | None = None,
) -> list[list[Node]] | None:
gru_partitions = get_source_partitions(gm.graph, [torch.nn.GRU], filter_fn)
gru_partitions = list(itertools.chain.from_iterable(gru_partitions.values()))
annotated_partitions = []
for gru_partition in gru_partitions:
annotated_partitions.append(gru_partition.nodes)
output_nodes = gru_partition.output_nodes
input_nodes = gru_partition.input_nodes
# skip annotation if it is already annotated
if _is_annotated(input_nodes + output_nodes):
continue
# inside each GRU partition, we should be able to annotate each linear
# subgraph
input_act = input_nodes[0]
input_act_user = next(iter(input_act.users.keys()))
if not isinstance(input_act, Node):
raise AssertionError("input activation must be a FX Node")
if not isinstance(input_act_user, Node):
raise AssertionError("input activation user must be a FX Node")
input_act_user.meta["quantization_annotation"] = QuantizationAnnotation(
input_qspec_map={
input_act: get_input_act_qspec(quantization_config),
},
_annotated=True,
)
hidden_state = input_nodes[1]
hidden_state_user = next(iter(hidden_state.users.keys()))
if not isinstance(hidden_state, Node):
raise AssertionError("hidden state must be a FX Node")
if not isinstance(hidden_state_user, Node):
raise AssertionError("hidden state user must be a FX Node")
hidden_state_user.meta["quantization_annotation"] = QuantizationAnnotation(
input_qspec_map={
hidden_state: get_input_act_qspec(quantization_config),
},
_annotated=True,
)
if len(output_nodes) != 2:
raise AssertionError("expecting GRU to have two outputs")
for output in output_nodes:
output.meta["quantization_annotation"] = QuantizationAnnotation(
output_qspec=get_output_act_qspec(quantization_config),
_annotated=True,
)
nodes_to_mark_annotated = list(gru_partition.nodes)
_mark_nodes_as_annotated(nodes_to_mark_annotated)
return annotated_partitions
@register_annotator("adaptive_avg_pool2d")
def _annotate_adaptive_avg_pool2d(
gm: torch.fx.GraphModule,
quantization_config: QuantizationConfig | None,
filter_fn: Callable[[Node], bool] | None = None,
) -> list[list[Node]] | None:
"""Always annotate adaptive_avg_pool2d op"""
module_partitions = get_source_partitions(
gm.graph, [torch.nn.AdaptiveAvgPool2d, F.adaptive_avg_pool2d], filter_fn
)
partitions = list(itertools.chain.from_iterable(module_partitions.values()))
annotated_partitions = []
for partition in partitions:
pool_node = partition.output_nodes[0]
if (
pool_node.op != "call_function"
or pool_node.target != torch.ops.aten.adaptive_avg_pool2d.default
):
raise ValueError(f"{pool_node} is not an aten adaptive_avg_pool2d operator")
if _is_annotated([pool_node]):
continue
annotated_partitions.append(partition.nodes)
input_act = pool_node.args[0]
if not isinstance(input_act, Node):
raise AssertionError("input activation must be a FX Node")
# only annotate input output sharing operator
# when the output of the input node is annotated
if (
"quantization_annotation" not in input_act.meta
or not input_act.meta["quantization_annotation"]._annotated
or input_act.meta["quantization_annotation"].output_qspec is None
):
input_act_qspec = get_input_act_qspec(quantization_config)
else:
input_act_qspec = SharedQuantizationSpec(input_act)
# output sharing with input
output_act_qspec = SharedQuantizationSpec((input_act, pool_node))
pool_node.meta["quantization_annotation"] = QuantizationAnnotation(
input_qspec_map={
input_act: input_act_qspec,
},
output_qspec=output_act_qspec,
_annotated=True,
)
return annotated_partitions
def _is_input_large_scalar(node: Node, gm: torch.fx.GraphModule):
"""Check if input is a large scalar value. So that we can skip quantization for the node
since histc op (in HistogramObserver) only works for values up to certain upper bound
"""
if node.op == "get_attr":
qualified_name = str(node.target)
module_path, _, name = qualified_name.rpartition(".")
submod = gm.get_submodule(module_path)
tensor = getattr(submod, name)
# torch.histc works until this upper bound
HISTC_UPPER_BOUND = 3.4028235e15
return tensor.numel() == 1 and abs(tensor.item()) > HISTC_UPPER_BOUND
return False
def _is_input_non_float_tensor(node: Node):
"""Check if the input is not a float tensor, so that we can skip quantization for the node
since observers only works with float Tensors
"""
if "val" not in node.meta or not isinstance(node.meta["val"], FakeTensor):
return True
return node.meta["val"].dtype != torch.float32
@register_annotator("add_relu")
def _annotate_add_relu(
gm: torch.fx.GraphModule,
quantization_config: QuantizationConfig | None,
filter_fn: Callable[[Node], bool] | None = None,
) -> list[list[Node]] | None:
annotated_partitions = []
for node in gm.graph.nodes:
if node.op != "call_function" or node.target not in [
torch.ops.aten.relu.default,
torch.ops.aten.relu_.default,
]:
continue
relu_node = node
maybe_add = node.args[0]
if (
not isinstance(maybe_add, Node)
or maybe_add.op != "call_function"
or maybe_add.target
not in [
torch.ops.aten.add.Tensor,
torch.ops.aten.add_.Tensor,
]
):
continue
add_node = maybe_add
if len(add_node.users) > 1:
# add can't be fused with ReLU if the result of add is being used
# else where in the graph
continue
partition = [relu_node, add_node]
if _is_annotated(partition):
continue
if filter_fn and any(not filter_fn(n) for n in partition):
continue
input_act_qspec = get_input_act_qspec(quantization_config)
output_act_qspec = get_output_act_qspec(quantization_config)
input_qspec_map = {}
input_act0 = add_node.args[0]
if isinstance(input_act0, Node):
if _is_input_large_scalar(input_act0, gm):
continue
if _is_input_non_float_tensor(input_act0):
continue
partition.append(input_act0)
input_qspec_map[input_act0] = input_act_qspec
input_act1 = add_node.args[1]
if isinstance(input_act1, Node):
if _is_input_large_scalar(input_act1, gm):
continue
if _is_input_non_float_tensor(input_act1):
continue
partition.append(input_act1)
input_qspec_map[input_act1] = input_act_qspec
add_node.meta["quantization_annotation"] = QuantizationAnnotation(
input_qspec_map=input_qspec_map,
_annotated=True,
)
relu_node.meta["quantization_annotation"] = QuantizationAnnotation(
output_qspec=output_act_qspec,
_annotated=True,
)
annotated_partitions.append(partition)
return annotated_partitions
@register_annotator("add")
def _annotate_add(
gm: torch.fx.GraphModule,
quantization_config: QuantizationConfig | None,
filter_fn: Callable[[Node], bool] | None = None,
) -> list[list[Node]] | None:
annotated_partitions = []
for node in gm.graph.nodes:
if node.op != "call_function" or node.target not in [
torch.ops.aten.add.Tensor,
torch.ops.aten.add_.Tensor,
]:
continue
add_node = node
partition = [add_node]
if _is_annotated(partition):
continue
if filter_fn and any(not filter_fn(n) for n in partition):
continue
input_act_qspec = get_input_act_qspec(quantization_config)
output_act_qspec = get_output_act_qspec(quantization_config)
input_qspec_map = {}
input_act0 = add_node.args[0]
if isinstance(input_act0, Node):
if _is_input_large_scalar(input_act0, gm):
continue
if _is_input_non_float_tensor(input_act0):
continue
input_qspec_map[input_act0] = input_act_qspec
partition.append(input_act0)
input_act1 = add_node.args[1]
if isinstance(input_act1, Node):
if _is_input_large_scalar(input_act1, gm):
continue
if _is_input_non_float_tensor(input_act1):
continue
input_qspec_map[input_act1] = input_act_qspec
partition.append(input_act1)
add_node.meta["quantization_annotation"] = QuantizationAnnotation(
input_qspec_map=input_qspec_map,
output_qspec=output_act_qspec,
_annotated=True,
)
annotated_partitions.append(partition)
return annotated_partitions
@register_annotator("mul_relu")
def _annotate_mul_relu(
gm: torch.fx.GraphModule,
quantization_config: QuantizationConfig | None,
filter_fn: Callable[[Node], bool] | None = None,
) -> list[list[Node]] | None:
annotated_partitions = []
for node in gm.graph.nodes:
if node.op != "call_function" or node.target not in [
torch.ops.aten.relu.default,
torch.ops.aten.relu_.default,
]:
continue
relu_node = node
maybe_mul = node.args[0]
if (
not isinstance(maybe_mul, Node)
or maybe_mul.op != "call_function"
or maybe_mul.target
not in [
torch.ops.aten.mul.Tensor,
torch.ops.aten.mul_.Tensor,
]
):
continue
mul_node = maybe_mul
if len(mul_node.users) > 1:
# mul can't be fused with ReLU if the result of mul is being used
# else where in the graph
continue
partition = [relu_node, mul_node]
if _is_annotated(partition):
continue
if filter_fn and any(not filter_fn(n) for n in partition):
continue
input_act_qspec = get_input_act_qspec(quantization_config)
output_act_qspec = get_output_act_qspec(quantization_config)
input_qspec_map = {}
input_act0 = mul_node.args[0]
if isinstance(input_act0, Node):
if _is_input_large_scalar(input_act0, gm):
continue
if _is_input_non_float_tensor(input_act0):
continue
partition.append(input_act0)
input_qspec_map[input_act0] = input_act_qspec
input_act1 = mul_node.args[1]
if isinstance(input_act1, Node):
if _is_input_large_scalar(input_act1, gm):
continue
if _is_input_non_float_tensor(input_act1):
continue
partition.append(input_act1)
input_qspec_map[input_act1] = input_act_qspec
mul_node.meta["quantization_annotation"] = QuantizationAnnotation(
input_qspec_map=input_qspec_map,
_annotated=True,
)
relu_node.meta["quantization_annotation"] = QuantizationAnnotation(
output_qspec=output_act_qspec,
_annotated=True,
)
annotated_partitions.append(partition)
return annotated_partitions
@register_annotator("mul")
def _annotate_mul(
gm: torch.fx.GraphModule,
quantization_config: QuantizationConfig | None,
filter_fn: Callable[[Node], bool] | None = None,
) -> list[list[Node]] | None:
annotated_partitions = []
for node in gm.graph.nodes:
if node.op != "call_function" or node.target not in [
torch.ops.aten.mul.Tensor,
torch.ops.aten.mul_.Tensor,
]:
continue
mul_node = node
partition = [mul_node]
if _is_annotated(partition):
continue
if filter_fn and any(not filter_fn(n) for n in partition):
continue
input_act_qspec = get_input_act_qspec(quantization_config)
output_act_qspec = get_output_act_qspec(quantization_config)
input_qspec_map = {}
input_act0 = mul_node.args[0]
if isinstance(input_act0, Node):
if _is_input_large_scalar(input_act0, gm):
continue
if _is_input_non_float_tensor(input_act0):
continue
input_qspec_map[input_act0] = input_act_qspec
partition.append(input_act0)
input_act1 = mul_node.args[1]
if isinstance(input_act1, Node):
if _is_input_large_scalar(input_act1, gm):
continue
if _is_input_non_float_tensor(input_act1):
continue
input_qspec_map[input_act1] = input_act_qspec
partition.append(input_act0)
mul_node.meta["quantization_annotation"] = QuantizationAnnotation(
input_qspec_map=input_qspec_map,
output_qspec=output_act_qspec,
_annotated=True,
)
annotated_partitions.append(partition)
return annotated_partitions
# TODO: remove Optional in return type, fix annotated_partitions logic
@register_annotator("cat")
def _annotate_cat(
gm: torch.fx.GraphModule,
quantization_config: QuantizationConfig | None,
filter_fn: Callable[[Node], bool] | None = None,
) -> list[list[Node]] | None:
cat_partitions = get_source_partitions(gm.graph, [torch.cat], filter_fn)
cat_partitions = list(itertools.chain.from_iterable(cat_partitions.values()))
annotated_partitions = []
for cat_partition in cat_partitions:
cat_node = cat_partition.output_nodes[0]
if _is_annotated([cat_node]):
continue
if cat_node.target != torch.ops.aten.cat.default:
# TODO: change this to AnnotationException
raise Exception( # noqa: TRY002
f"Expected cat node: torch.ops.aten.cat.default, but found {cat_node.target}"
" please check if you are calling the correct capture API"
)
annotated_partitions.append(cat_partition.nodes)
input_act_qspec = get_input_act_qspec(quantization_config)
inputs = cat_node.args[0]
input_qspec_map = {}
input_act0 = inputs[0] # type: ignore[index]
if isinstance(input_act0, Node):
input_qspec_map[input_act0] = input_act_qspec
shared_with_input0_qspec = SharedQuantizationSpec((input_act0, cat_node)) # type: ignore[arg-type]
for input_act in inputs[1:]: # type: ignore[index, union-attr]
if input_act not in input_qspec_map:
input_qspec_map[input_act] = shared_with_input0_qspec # type: ignore[index]
output_act_qspec = shared_with_input0_qspec
cat_node.meta["quantization_annotation"] = QuantizationAnnotation(
input_qspec_map=input_qspec_map,
output_qspec=output_act_qspec,
_annotated=True,
)
return annotated_partitions
def _is_share_obs_or_fq_op(op: Callable) -> bool:
return op in [
torch.ops.aten.relu.default,
torch.ops.aten.hardtanh.default,
torch.ops.aten.hardtanh_.default,
torch.ops.aten.max_pool2d.default,
torch.ops.aten.mean.default,
torch.ops.aten.mean.dim,
torch.ops.aten.permute.default,
torch.ops.aten.permute_copy.default,
torch.ops.aten.squeeze.dim,
torch.ops.aten.squeeze_copy.dim,
# TODO: remove?
torch.ops.aten.adaptive_avg_pool2d.default,
torch.ops.aten.view_copy.default,
torch.ops.aten.view.default,
torch.ops.aten.slice_copy.Tensor,
torch.ops.aten.flatten.using_ints,
]
def propagate_annotation(model: torch.fx.GraphModule) -> None:
for n in model.graph.nodes:
if n.op != "call_function" or not _is_share_obs_or_fq_op(n.target):
continue
prev_node = n.args[0]
if not isinstance(prev_node, Node):
continue
quantization_annotation = prev_node.meta.get("quantization_annotation", None)
if not quantization_annotation:
continue
output_qspec = quantization_annotation.output_qspec
if not output_qspec:
continue
# make sure current node is not annotated
if (
"quantization_annotation" in n.meta
and n.meta["quantization_annotation"]._annotated
):
continue
shared_qspec = SharedQuantizationSpec(prev_node)
# propagate the previous output_qspec to the current node
n.meta["quantization_annotation"] = QuantizationAnnotation(
input_qspec_map={
prev_node: shared_qspec,
},
output_qspec=shared_qspec,
_annotated=True,
)
# TODO: make the list of ops customizable
def _convert_scalars_to_attrs(model: torch.fx.GraphModule) -> torch.fx.GraphModule:
for n in model.graph.nodes:
if n.op != "call_function" or n.target not in [
torch.ops.aten.add.Tensor,
torch.ops.aten.mul.Tensor,
]:
continue
args = list(n.args)
new_args = []
for i in range(len(args)):
if isinstance(args[i], torch.fx.Node):
new_args.append(args[i])
continue
prefix = "_tensor_constant_"
get_new_attr_name = get_new_attr_name_with_prefix(prefix)
tensor_constant_name = get_new_attr_name(model)
float_tensor = torch.tensor(float(args[i]))
model.register_buffer(tensor_constant_name, float_tensor)
fake_mode = n.meta["val"].fake_mode
with model.graph.inserting_before(n):
get_attr_node = model.graph.create_node(
"get_attr", tensor_constant_name, (), {}
)
get_attr_node.meta["val"] = fake_mode.from_tensor(
float_tensor, static_shapes=True
)
new_args.append(get_attr_node)
n.args = tuple(new_args)
model.recompile()
return model
| OperatorConfig |
python | scipy__scipy | scipy/stats/tests/test_morestats.py | {
"start": 34702,
"end": 37383
} | class ____:
def test_data(self, xp):
# https://www.itl.nist.gov/div898/handbook/eda/section3/eda35a.htm
args = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10]
args = [xp.asarray(arg) for arg in args]
W, pval = stats.levene(*args)
xp_assert_close(W, xp.asarray(1.7059176930008939))
xp_assert_close(pval, xp.asarray(0.0990829755522))
def test_mean(self, xp):
# numbers from R: leveneTest in package car
args = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10]
args = [xp.asarray(arg) for arg in args]
W, pval = stats.levene(*args, center="mean")
xp_assert_close(W, xp.asarray(2.15945985647285))
xp_assert_close(pval, xp.asarray(0.032236826559783))
def test_trimmed1(self, xp):
# Test that center='trimmed' gives the same result as center='mean'
# when proportiontocut=0.
args = (xp.asarray(g1), xp.asarray(g2), xp.asarray(g3))
W1, pval1 = stats.levene(*args, center='mean')
W2, pval2 = stats.levene(*args, center='trimmed', proportiontocut=0.0)
xp_assert_close(W1, W2)
xp_assert_close(pval1, pval2)
def test_trimmed2(self, xp):
# numbers from R: leveneTest in package car
args = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10]
args = [xp.asarray(arg) for arg in args]
W, pval = stats.levene(*args, center="trimmed", proportiontocut=0.25)
xp_assert_close(W, xp.asarray(2.07712845686874))
xp_assert_close(pval, xp.asarray(0.0397269688035377))
def test_equal_mean_median(self, xp):
x = np.linspace(-1, 1, 21)
rng = np.random.default_rng(4058827756)
x2 = rng.permutation(x)
y = x**3
x, x2, y = xp.asarray(x), xp.asarray(x2), xp.asarray(y)
W1, pval1 = stats.levene(x, y, center='mean')
W2, pval2 = stats.levene(x2, y, center='median')
xp_assert_close(W1, W2)
xp_assert_close(pval1, pval2)
def test_bad_center_value(self, xp):
x = xp.linspace(-1, 1, 21)
message = "center must be 'mean', 'median' or 'trimmed'."
with pytest.raises(ValueError, match=message):
stats.levene(x, x, center='trim')
def test_too_few_args(self, xp):
message = "Must provide at least two samples."
with pytest.raises(ValueError, match=message):
stats.levene(xp.asarray([1]))
def test_result_attributes(self, xp):
args = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10]
args = [xp.asarray(arg) for arg in args]
res = stats.levene(*args)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes, xp=xp)
| TestLevene |
python | langchain-ai__langchain | libs/langchain/langchain_classic/chains/question_answering/chain.py | {
"start": 1126,
"end": 9509
} | class ____(Protocol):
"""Interface for loading the combine documents chain."""
def __call__(
self,
llm: BaseLanguageModel,
**kwargs: Any,
) -> BaseCombineDocumentsChain:
"""Callable to load the combine documents chain."""
def _load_map_rerank_chain(
llm: BaseLanguageModel,
*,
prompt: BasePromptTemplate = MAP_RERANK_PROMPT,
verbose: bool = False,
document_variable_name: str = "context",
rank_key: str = "score",
answer_key: str = "answer",
callback_manager: BaseCallbackManager | None = None,
callbacks: Callbacks = None,
**kwargs: Any,
) -> MapRerankDocumentsChain:
llm_chain = LLMChain(
llm=llm,
prompt=prompt,
verbose=verbose,
callback_manager=callback_manager,
callbacks=callbacks,
)
return MapRerankDocumentsChain(
llm_chain=llm_chain,
rank_key=rank_key,
answer_key=answer_key,
document_variable_name=document_variable_name,
verbose=verbose,
callback_manager=callback_manager,
**kwargs,
)
def _load_stuff_chain(
llm: BaseLanguageModel,
*,
prompt: BasePromptTemplate | None = None,
document_variable_name: str = "context",
verbose: bool | None = None,
callback_manager: BaseCallbackManager | None = None,
callbacks: Callbacks = None,
**kwargs: Any,
) -> StuffDocumentsChain:
_prompt = prompt or stuff_prompt.PROMPT_SELECTOR.get_prompt(llm)
llm_chain = LLMChain(
llm=llm,
prompt=_prompt,
verbose=verbose,
callback_manager=callback_manager,
callbacks=callbacks,
)
# TODO: document prompt
return StuffDocumentsChain(
llm_chain=llm_chain,
document_variable_name=document_variable_name,
verbose=verbose,
callback_manager=callback_manager,
callbacks=callbacks,
**kwargs,
)
def _load_map_reduce_chain(
llm: BaseLanguageModel,
*,
question_prompt: BasePromptTemplate | None = None,
combine_prompt: BasePromptTemplate | None = None,
combine_document_variable_name: str = "summaries",
map_reduce_document_variable_name: str = "context",
collapse_prompt: BasePromptTemplate | None = None,
reduce_llm: BaseLanguageModel | None = None,
collapse_llm: BaseLanguageModel | None = None,
verbose: bool | None = None,
callback_manager: BaseCallbackManager | None = None,
callbacks: Callbacks = None,
token_max: int = 3000,
**kwargs: Any,
) -> MapReduceDocumentsChain:
_question_prompt = (
question_prompt or map_reduce_prompt.QUESTION_PROMPT_SELECTOR.get_prompt(llm)
)
_combine_prompt = (
combine_prompt or map_reduce_prompt.COMBINE_PROMPT_SELECTOR.get_prompt(llm)
)
map_chain = LLMChain(
llm=llm,
prompt=_question_prompt,
verbose=verbose,
callback_manager=callback_manager,
callbacks=callbacks,
)
_reduce_llm = reduce_llm or llm
reduce_chain = LLMChain(
llm=_reduce_llm,
prompt=_combine_prompt,
verbose=verbose,
callback_manager=callback_manager,
callbacks=callbacks,
)
# TODO: document prompt
combine_documents_chain = StuffDocumentsChain(
llm_chain=reduce_chain,
document_variable_name=combine_document_variable_name,
verbose=verbose,
callback_manager=callback_manager,
callbacks=callbacks,
)
if collapse_prompt is None:
collapse_chain = None
if collapse_llm is not None:
msg = (
"collapse_llm provided, but collapse_prompt was not: please "
"provide one or stop providing collapse_llm."
)
raise ValueError(msg)
else:
_collapse_llm = collapse_llm or llm
collapse_chain = StuffDocumentsChain(
llm_chain=LLMChain(
llm=_collapse_llm,
prompt=collapse_prompt,
verbose=verbose,
callback_manager=callback_manager,
callbacks=callbacks,
),
document_variable_name=combine_document_variable_name,
verbose=verbose,
callback_manager=callback_manager,
)
reduce_documents_chain = ReduceDocumentsChain(
combine_documents_chain=combine_documents_chain,
collapse_documents_chain=collapse_chain,
token_max=token_max,
verbose=verbose,
)
return MapReduceDocumentsChain(
llm_chain=map_chain,
document_variable_name=map_reduce_document_variable_name,
reduce_documents_chain=reduce_documents_chain,
verbose=verbose,
callback_manager=callback_manager,
callbacks=callbacks,
**kwargs,
)
def _load_refine_chain(
llm: BaseLanguageModel,
*,
question_prompt: BasePromptTemplate | None = None,
refine_prompt: BasePromptTemplate | None = None,
document_variable_name: str = "context_str",
initial_response_name: str = "existing_answer",
refine_llm: BaseLanguageModel | None = None,
verbose: bool | None = None,
callback_manager: BaseCallbackManager | None = None,
callbacks: Callbacks = None,
**kwargs: Any,
) -> RefineDocumentsChain:
_question_prompt = (
question_prompt or refine_prompts.QUESTION_PROMPT_SELECTOR.get_prompt(llm)
)
_refine_prompt = refine_prompt or refine_prompts.REFINE_PROMPT_SELECTOR.get_prompt(
llm,
)
initial_chain = LLMChain(
llm=llm,
prompt=_question_prompt,
verbose=verbose,
callback_manager=callback_manager,
callbacks=callbacks,
)
_refine_llm = refine_llm or llm
refine_chain = LLMChain(
llm=_refine_llm,
prompt=_refine_prompt,
verbose=verbose,
callback_manager=callback_manager,
callbacks=callbacks,
)
return RefineDocumentsChain(
initial_llm_chain=initial_chain,
refine_llm_chain=refine_chain,
document_variable_name=document_variable_name,
initial_response_name=initial_response_name,
verbose=verbose,
callback_manager=callback_manager,
callbacks=callbacks,
**kwargs,
)
@deprecated(
since="0.2.13",
removal="1.0",
message=(
"This class is deprecated. See the following migration guides for replacements "
"based on `chain_type`:\n"
"stuff: https://python.langchain.com/docs/versions/migrating_chains/stuff_docs_chain\n"
"map_reduce: https://python.langchain.com/docs/versions/migrating_chains/map_reduce_chain\n"
"refine: https://python.langchain.com/docs/versions/migrating_chains/refine_chain\n"
"map_rerank: https://python.langchain.com/docs/versions/migrating_chains/map_rerank_docs_chain\n"
"\nSee also guides on retrieval and question-answering here: "
"https://python.langchain.com/docs/how_to/#qa-with-rag"
),
)
def load_qa_chain(
llm: BaseLanguageModel,
chain_type: str = "stuff",
verbose: bool | None = None, # noqa: FBT001
callback_manager: BaseCallbackManager | None = None,
**kwargs: Any,
) -> BaseCombineDocumentsChain:
"""Load question answering chain.
Args:
llm: Language Model to use in the chain.
chain_type: Type of document combining chain to use. Should be one of "stuff",
"map_reduce", "map_rerank", and "refine".
verbose: Whether chains should be run in verbose mode or not. Note that this
applies to all chains that make up the final chain.
callback_manager: Callback manager to use for the chain.
**kwargs: Additional keyword arguments.
Returns:
A chain to use for question answering.
"""
loader_mapping: Mapping[str, LoadingCallable] = {
"stuff": _load_stuff_chain,
"map_reduce": _load_map_reduce_chain,
"refine": _load_refine_chain,
"map_rerank": _load_map_rerank_chain,
}
if chain_type not in loader_mapping:
msg = (
f"Got unsupported chain type: {chain_type}. "
f"Should be one of {loader_mapping.keys()}"
)
raise ValueError(msg)
return loader_mapping[chain_type](
llm,
verbose=verbose,
callback_manager=callback_manager,
**kwargs,
)
| LoadingCallable |
python | optuna__optuna | optuna/visualization/_slice.py | {
"start": 988,
"end": 1235
} | class ____(NamedTuple):
param_name: str
x: list[Any]
y: list[float]
trial_numbers: list[int]
is_log: bool
is_numerical: bool
constraints: list[bool]
x_labels: tuple[CategoricalChoiceType, ...] | None
| _SliceSubplotInfo |
python | pyinstaller__pyinstaller | PyInstaller/building/splash.py | {
"start": 969,
"end": 23950
} | class ____(Target):
"""
Bundles the required resources for the splash screen into a file, which will be included in the CArchive.
A Splash has two outputs, one is itself and one is stored in splash.binaries. Both need to be passed to other
build targets in order to enable the splash screen.
"""
def __init__(self, image_file, binaries, datas, **kwargs):
"""
:param str image_file:
A path-like object to the image to be used. Only the PNG file format is supported.
.. note:: If a different file format is supplied and PIL (Pillow) is installed, the file will be converted
automatically.
.. note:: *Windows*: The color ``'magenta'`` / ``'#ff00ff'`` must not be used in the image or text, as it is
used by splash screen to indicate transparent areas. Use a similar color (e.g., ``'#ff00fe'``) instead.
.. note:: If PIL (Pillow) is installed and the image is bigger than max_img_size, the image will be resized
to fit into the specified area.
:param list binaries:
The TOC list of binaries the Analysis build target found. This TOC includes all extension modules and their
binary dependencies. This is required to determine whether the user's program uses `tkinter`.
:param list datas:
The TOC list of data the Analysis build target found. This TOC includes all data-file dependencies of the
modules. This is required to check if all splash screen requirements can be bundled.
:keyword text_pos:
An optional two-integer tuple that represents the origin of the text on the splash screen image. The
origin of the text is its lower left corner. A unit in the respective coordinate system is a pixel of the
image, its origin lies in the top left corner of the image. This parameter also acts like a switch for
the text feature. If omitted, no text will be displayed on the splash screen. This text will be used to
show textual progress in onefile mode.
:type text_pos: Tuple[int, int]
:keyword text_size:
The desired size of the font. If the size argument is a positive number, it is interpreted as a size in
points. If size is a negative number, its absolute value is interpreted as a size in pixels. Default: ``12``
:type text_size: int
:keyword text_font:
An optional name of a font for the text. This font must be installed on the user system, otherwise the
system default font is used. If this parameter is omitted, the default font is also used.
:keyword text_color:
An optional color for the text. HTML color codes (``'#40e0d0'``) and color names (``'turquoise'``) are
supported. Default: ``'black'``
(Windows: the color ``'magenta'`` / ``'#ff00ff'`` is used to indicate transparency, and should not be used)
:type text_color: str
:keyword text_default:
The default text which will be displayed before the extraction starts. Default: ``"Initializing"``
:type text_default: str
:keyword full_tk:
By default Splash bundles only the necessary files for the splash screen (some tk components). This
options enables adding full tk and making it a requirement, meaning all tk files will be unpacked before
the splash screen can be started. This is useful during development of the splash screen script.
Default: ``False``
:type full_tk: bool
:keyword minify_script:
The splash screen is created by executing an Tcl/Tk script. This option enables minimizing the script,
meaning removing all non essential parts from the script. Default: ``True``
:keyword name:
An optional alternative filename for the .res file. If not specified, a name is generated.
:type name: str
:keyword script_name:
An optional alternative filename for the Tcl script, that will be generated. If not specified, a name is
generated.
:type script_name: str
:keyword max_img_size:
Maximum size of the splash screen image as a tuple. If the supplied image exceeds this limit, it will be
resized to fit the maximum width (to keep the original aspect ratio). This option can be disabled by
setting it to None. Default: ``(760, 480)``
:type max_img_size: Tuple[int, int]
:keyword always_on_top:
Force the splashscreen to be always on top of other windows. If disabled, other windows (e.g., from other
applications) can cover the splash screen by user bringing them to front. This might be useful for
frozen applications with long startup times. Default: ``True``
:type always_on_top: bool
"""
from PyInstaller.config import CONF
from PyInstaller.utils.hooks.tcl_tk import tcltk_info
Target.__init__(self)
# Splash screen is not supported on macOS. It operates in a secondary thread and macOS disallows UI operations
# in any thread other than main.
if is_darwin:
raise SystemExit("ERROR: Splash screen is not supported on macOS.")
# Ensure tkinter (and thus Tcl/Tk) is available.
if not tcltk_info.available:
raise SystemExit(
"ERROR: Your platform does not support the splash screen feature, since tkinter is not installed. "
"Please install tkinter and try again."
)
# Check if the Tcl/Tk version is supported.
logger.info("Verifying Tcl/Tk compatibility with splash screen requirements")
self._check_tcl_tk_compatibility(tcltk_info)
# Make image path relative to .spec file
if not os.path.isabs(image_file):
image_file = os.path.join(CONF['specpath'], image_file)
image_file = os.path.normpath(image_file)
if not os.path.exists(image_file):
raise ValueError("Image file '%s' not found" % image_file)
# Copy all arguments
self.image_file = image_file
self.full_tk = kwargs.get("full_tk", False)
self.name = kwargs.get("name", None)
self.script_name = kwargs.get("script_name", None)
self.minify_script = kwargs.get("minify_script", True)
self.max_img_size = kwargs.get("max_img_size", (760, 480))
# text options
self.text_pos = kwargs.get("text_pos", None)
self.text_size = kwargs.get("text_size", 12)
self.text_font = kwargs.get("text_font", "TkDefaultFont")
self.text_color = kwargs.get("text_color", "black")
self.text_default = kwargs.get("text_default", "Initializing")
# always-on-top behavior
self.always_on_top = kwargs.get("always_on_top", True)
# Save the generated file separately so that it is not necessary to generate the data again and again
root = os.path.splitext(self.tocfilename)[0]
if self.name is None:
self.name = root + '.res'
if self.script_name is None:
self.script_name = root + '_script.tcl'
# Internal variables
# Store path to _tkinter extension module, so that guts check can detect if the path changed for some reason.
self._tkinter_file = tcltk_info.tkinter_extension_file
# Calculated / analysed values
self.uses_tkinter = self._uses_tkinter(self._tkinter_file, binaries)
logger.debug("Program uses tkinter: %r", self.uses_tkinter)
self.script = self.generate_script()
self.tcl_lib = tcltk_info.tcl_shared_library # full path to shared library
self.tk_lib = tcltk_info.tk_shared_library
assert self.tcl_lib is not None
assert self.tk_lib is not None
logger.debug("Using Tcl shared library: %r", self.tcl_lib)
logger.debug("Using Tk shared library: %r", self.tk_lib)
self.splash_requirements = set([
# NOTE: the implicit assumption here is that Tcl and Tk shared library are collected into top-level
# application directory, which, at tme moment, is true in practically all cases.
os.path.basename(self.tcl_lib),
os.path.basename(self.tk_lib),
# The list of requirements below is based on the current implementation of splash screen script. If you want
# to extend the splash screen functionality and run into Tcl/Tk errors, chances are that additional Tk
# components need to be added here.
#
# NOTE: these paths use the *destination* layout for Tcl/Tk scripts, which uses unversioned tcl and tk
# directories (see `PyInstaller.utils.hooks.tcl_tk.collect_tcl_tk_files`).
os.path.join(tcltk_info.TCL_ROOTNAME, "init.tcl"),
# Core Tk
os.path.join(tcltk_info.TK_ROOTNAME, "license.terms"),
os.path.join(tcltk_info.TK_ROOTNAME, "text.tcl"),
os.path.join(tcltk_info.TK_ROOTNAME, "tk.tcl"),
# Used for customizable font
os.path.join(tcltk_info.TK_ROOTNAME, "ttk", "ttk.tcl"),
os.path.join(tcltk_info.TK_ROOTNAME, "ttk", "fonts.tcl"),
os.path.join(tcltk_info.TK_ROOTNAME, "ttk", "cursors.tcl"),
os.path.join(tcltk_info.TK_ROOTNAME, "ttk", "utils.tcl"),
])
if tcltk_info.tk_version >= (9, 0):
self.splash_requirements.update([
os.path.join(tcltk_info.TK_ROOTNAME, "scaling.tcl"),
os.path.join(tcltk_info.TK_ROOTNAME, "tclIndex"), # required for auto-load of scaling.tcl
])
logger.info("Collect Tcl/Tk data files for the splash screen")
tcltk_tree = tcltk_info.data_files # 3-element tuple TOC
if self.full_tk:
# The user wants a full copy of Tk, so make all Tk files a requirement.
self.splash_requirements.update(entry[0] for entry in tcltk_tree)
# Scan for binary dependencies of the Tcl/Tk shared libraries, and add them to `binaries` TOC list (which
# should really be called `dependencies` as it is not limited to binaries. But it is too late now, and
# existing spec files depend on this naming). We specify these binary dependencies (which include the
# Tcl and Tk shared libraries themselves) even if the user's program uses tkinter and they would be collected
# anyway; let the collection mechanism deal with potential duplicates.
tcltk_libs = [(os.path.basename(src_name), src_name, 'BINARY') for src_name in (self.tcl_lib, self.tk_lib)]
self.binaries = bindepend.binary_dependency_analysis(tcltk_libs)
# Put all shared library dependencies in `splash_requirements`, so they are made available in onefile mode.
self.splash_requirements.update(entry[0] for entry in self.binaries)
# If the user's program does not use tkinter, add resources from Tcl/Tk tree to the dependencies list.
# Do so only for the resources that are part of splash requirements.
if not self.uses_tkinter:
self.binaries.extend(entry for entry in tcltk_tree if entry[0] in self.splash_requirements)
# Check if all requirements were found.
collected_files = set(entry[0] for entry in (binaries + datas + self.binaries))
def _filter_requirement(filename):
if filename not in collected_files:
# Item is not bundled, so warn the user about it. This actually may happen on some tkinter installations
# that are missing the license.terms file - as this file has no effect on operation of splash screen,
# suppress the warning for it.
if os.path.basename(filename) == 'license.terms':
return False
logger.warning(
"The local Tcl/Tk installation is missing the file %s. The behavior of the splash screen is "
"therefore undefined and may be unsupported.", filename
)
return False
return True
# Remove all files which were not found.
self.splash_requirements = set(filter(_filter_requirement, self.splash_requirements))
logger.debug("Splash Requirements: %s", self.splash_requirements)
# On AIX, the Tcl and Tk shared libraries might in fact be ar archives with shared object inside it, and need to
# be `dlopen`'ed with full name (for example, `libtcl.a(libtcl.so.8.6)` and `libtk.a(libtk.so.8.6)`. So if the
# library's suffix is .a, adjust the name accordingly, assuming fixed format for the shared object name.
# Adjust the names at the end of this method, because preceding steps use `self.tcl_lib` and `self.tk_lib` for
# filesystem-based operations and need the original filenames.
if is_aix:
_, ext = os.path.splitext(self.tcl_lib)
if ext == '.a':
tcl_major, tcl_minor = tcltk_info.tcl_version
self.tcl_lib += f"(libtcl.so.{tcl_major}.{tcl_minor})"
_, ext = os.path.splitext(self.tk_lib)
if ext == '.a':
tk_major, tk_minor = tcltk_info.tk_version
self.tk_lib += f"(libtk.so.{tk_major}.{tk_minor})"
self.__postinit__()
_GUTS = (
# input parameters
('image_file', _check_guts_eq),
('name', _check_guts_eq),
('script_name', _check_guts_eq),
('text_pos', _check_guts_eq),
('text_size', _check_guts_eq),
('text_font', _check_guts_eq),
('text_color', _check_guts_eq),
('text_default', _check_guts_eq),
('always_on_top', _check_guts_eq),
('full_tk', _check_guts_eq),
('minify_script', _check_guts_eq),
('max_img_size', _check_guts_eq),
# calculated/analysed values
('uses_tkinter', _check_guts_eq),
('script', _check_guts_eq),
('tcl_lib', _check_guts_eq),
('tk_lib', _check_guts_eq),
('splash_requirements', _check_guts_eq),
('binaries', _check_guts_toc),
# internal value
# Check if the tkinter installation changed. This is theoretically possible if someone uses two different python
# installations of the same version.
('_tkinter_file', _check_guts_eq),
)
def _check_guts(self, data, last_build):
if Target._check_guts(self, data, last_build):
return True
# Check if the image has been modified.
if misc.mtime(self.image_file) > last_build:
logger.info("Building %s because file %s changed", self.tocbasename, self.image_file)
return True
return False
def assemble(self):
logger.info("Building Splash %s", self.name)
# Check if PIL/pillow is available.
try:
from PIL import Image as PILImage
except ImportError:
PILImage = None
# Required to pass tcltk_info.TK_ROOTNAME to SplashWriter
from PyInstaller.utils.hooks.tcl_tk import tcltk_info
# Function to resize a given image to fit into the area defined by max_img_size.
def _resize_image(_image, _orig_size):
if PILImage:
_w, _h = _orig_size
_ratio_w = self.max_img_size[0] / _w
if _ratio_w < 1:
# Image width exceeds limit
_h = int(_h * _ratio_w)
_w = self.max_img_size[0]
_ratio_h = self.max_img_size[1] / _h
if _ratio_h < 1:
# Image height exceeds limit
_w = int(_w * _ratio_h)
_h = self.max_img_size[1]
# If a file is given it will be open
if isinstance(_image, PILImage.Image):
_img = _image
else:
_img = PILImage.open(_image)
_img_resized = _img.resize((_w, _h))
# Save image into a stream
_image_stream = io.BytesIO()
_img_resized.save(_image_stream, format='PNG')
_img.close()
_img_resized.close()
_image_data = _image_stream.getvalue()
logger.info("Resized image %s from dimensions %r to (%d, %d)", self.image_file, _orig_size, _w, _h)
return _image_data
else:
raise ValueError(
"The splash image dimensions (w: %d, h: %d) exceed max_img_size (w: %d, h:%d), but the image "
"cannot be resized due to missing PIL.Image! Either install the Pillow package, adjust the "
"max_img_size, or use an image of compatible dimensions." %
(_orig_size[0], _orig_size[1], self.max_img_size[0], self.max_img_size[1])
)
# Open image file
image_file = open(self.image_file, 'rb')
# Check header of the file to identify it
if image_file.read(8) == b'\x89PNG\r\n\x1a\n':
# self.image_file is a PNG file
image_file.seek(16)
img_size = (struct.unpack("!I", image_file.read(4))[0], struct.unpack("!I", image_file.read(4))[0])
if img_size > self.max_img_size:
# The image exceeds the maximum image size, so resize it
image = _resize_image(self.image_file, img_size)
else:
image = os.path.abspath(self.image_file)
elif PILImage:
# Pillow is installed, meaning the image can be converted automatically
img = PILImage.open(self.image_file, mode='r')
if img.size > self.max_img_size:
image = _resize_image(img, img.size)
else:
image_data = io.BytesIO()
img.save(image_data, format='PNG')
img.close()
image = image_data.getvalue()
logger.info("Converted image %s to PNG format", self.image_file)
else:
raise ValueError(
"The image %s needs to be converted to a PNG file, but PIL.Image is not available! Either install the "
"Pillow package, or use a PNG image for you splash screen." % (self.image_file,)
)
image_file.close()
SplashWriter(
self.name,
self.splash_requirements,
os.path.basename(self.tcl_lib), # tcl86t.dll
os.path.basename(self.tk_lib), # tk86t.dll
tcltk_info.TCL_ROOTNAME,
tcltk_info.TK_ROOTNAME,
image,
self.script
)
@staticmethod
def _check_tcl_tk_compatibility(tcltk_info):
tcl_version = tcltk_info.tcl_version # (major, minor) tuple
tk_version = tcltk_info.tk_version
if is_darwin and tcltk_info.is_macos_system_framework:
# Outdated Tcl/Tk 8.5 system framework is not supported.
raise SystemExit(
"ERROR: The splash screen feature does not support macOS system framework version of Tcl/Tk."
)
# Test if tcl/tk version is supported
if tcl_version < (8, 6) or tk_version < (8, 6):
logger.warning(
"The installed Tcl/Tk (%d.%d / %d.%d) version might not work with the splash screen feature of the "
"bootloader, which was tested against Tcl/Tk 8.6", *tcl_version, *tk_version
)
# This should be impossible, since tcl/tk is released together with the same version number, but just in case
if tcl_version != tk_version:
logger.warning(
"The installed version of Tcl (%d.%d) and Tk (%d.%d) do not match. PyInstaller is tested against "
"matching versions", *tcl_version, *tk_version
)
# Ensure that Tcl is built with multi-threading support.
if not tcltk_info.tcl_threaded:
# This is a feature breaking problem, so exit.
raise SystemExit(
"ERROR: The installed Tcl version is not threaded. PyInstaller only supports the splash screen "
"using threaded Tcl."
)
# Ensure that Tcl and Tk shared libraries are available
if tcltk_info.tcl_shared_library is None or tcltk_info.tk_shared_library is None:
message = \
"ERROR: Could not determine the path to Tcl and/or Tk shared library, " \
"which are required for splash screen."
if not tcltk_info.tkinter_extension_file:
message += (
" The _tkinter module appears to be a built-in, which likely means that python was built with "
"statically-linked Tcl/Tk libraries and is incompatible with splash screen."
)
raise SystemExit(message)
def generate_script(self):
"""
Generate the script for the splash screen.
If minify_script is True, all unnecessary parts will be removed.
"""
d = {}
if self.text_pos is not None:
logger.debug("Add text support to splash screen")
d.update({
'pad_x': self.text_pos[0],
'pad_y': self.text_pos[1],
'color': self.text_color,
'font': self.text_font,
'font_size': self.text_size,
'default_text': self.text_default,
})
script = splash_templates.build_script(text_options=d, always_on_top=self.always_on_top)
if self.minify_script:
# Remove any documentation, empty lines and unnecessary spaces
script = '\n'.join(
line for line in map(lambda line: line.strip(), script.splitlines())
if not line.startswith('#') # documentation
and line # empty lines
)
# Remove unnecessary spaces
script = re.sub(' +', ' ', script)
# Write script to disk, so that it is transparent to the user what script is executed.
with open(self.script_name, "w", encoding="utf-8") as script_file:
script_file.write(script)
return script
@staticmethod
def _uses_tkinter(tkinter_file, binaries):
# Test for _tkinter extension instead of tkinter module, because user might use a different wrapping library for
# Tk. Use `pathlib.PurePath` in comparisons to account for case normalization and separator normalization.
tkinter_file = pathlib.PurePath(tkinter_file)
for dest_name, src_name, typecode in binaries:
if pathlib.PurePath(src_name) == tkinter_file:
return True
return False
| Splash |
python | ipython__ipython | IPython/utils/syspathcontext.py | {
"start": 131,
"end": 963
} | class ____:
"""A context for prepending a directory to sys.path for a second."""
dir: str
added: bool
def __init__(self, dir: str) -> None:
self.dir = dir
self.added = False
def __enter__(self) -> Self:
if self.dir not in sys.path:
sys.path.insert(0, self.dir)
self.added = True
else:
self.added = False
return self
def __exit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: TracebackType | None,
) -> Literal[False]:
if self.added:
try:
sys.path.remove(self.dir)
except ValueError:
pass
# Returning False causes any exceptions to be re-raised.
return False
| prepended_to_syspath |
python | realpython__materials | directory-tree-generator-python/source_code_step_2/rptree/rptree.py | {
"start": 386,
"end": 1830
} | class ____:
def __init__(self, root_dir):
self._root_dir = pathlib.Path(root_dir)
self._tree = []
def build_tree(self):
self._tree_head()
self._tree_body(self._root_dir)
return self._tree
def _tree_head(self):
self._tree.append(f"{self._root_dir}{os.sep}")
self._tree.append(PIPE)
def _tree_body(self, directory, prefix=""):
entries = directory.iterdir()
entries = sorted(entries, key=lambda entry: entry.is_file())
entries_count = len(entries)
for index, entry in enumerate(entries):
connector = ELBOW if index == entries_count - 1 else TEE
if entry.is_dir():
self._add_directory(
entry, index, entries_count, prefix, connector
)
else:
self._add_file(entry, prefix, connector)
def _add_directory(
self, directory, index, entries_count, prefix, connector
):
self._tree.append(f"{prefix}{connector} {directory.name}{os.sep}")
if index != entries_count - 1:
prefix += PIPE_PREFIX
else:
prefix += SPACE_PREFIX
self._tree_body(
directory=directory,
prefix=prefix,
)
self._tree.append(prefix.rstrip())
def _add_file(self, file, prefix, connector):
self._tree.append(f"{prefix}{connector} {file.name}")
| _TreeGenerator |
python | huggingface__transformers | src/transformers/models/qwen2_5_omni/modeling_qwen2_5_omni.py | {
"start": 124389,
"end": 127654
} | class ____(nn.Module):
"""This class implements an attentive statistic pooling layer for each channel.
It returns the concatenated mean and std of the input tensor.
"""
def __init__(self, channels, attention_channels=128):
super().__init__()
self.eps = 1e-12
self.tdnn = TimeDelayNetBlock(channels * 3, attention_channels, 1, 1)
self.tanh = nn.Tanh()
self.conv = nn.Conv1d(
in_channels=attention_channels,
out_channels=channels,
kernel_size=1,
padding="same",
padding_mode="reflect",
)
def _length_to_mask(self, length, max_len=None, dtype=None, device=None):
"""Creates a binary mask for each sequence.
Reference: https://discuss.pytorch.org/t/how-to-generate-variable-length-mask/23397/3
Arguments
---------
length : torch.LongTensor
Containing the length of each sequence in the batch. Must be 1D.
max_len : int
Max length for the mask, also the size of the second dimension.
dtype : torch.dtype, default: None
The dtype of the generated mask.
device: torch.device, default: None
The device to put the mask variable.
Returns
-------
mask : tensor
The binary mask.
"""
if max_len is None:
max_len = length.max().long().item() # using arange to generate mask
mask = torch.arange(max_len, device=length.device, dtype=length.dtype).expand(
len(length), max_len
) < length.unsqueeze(1)
mask = torch.as_tensor(mask, dtype=dtype, device=device)
return mask
def _compute_statistics(self, x, m, dim=2):
mean = (m * x).sum(dim)
std = torch.sqrt((m * (x - mean.unsqueeze(dim)).pow(2)).sum(dim).clamp(self.eps))
return mean, std
def forward(self, hidden_states):
seq_length = hidden_states.shape[-1]
lengths = torch.ones(hidden_states.shape[0], device=hidden_states.device)
# Make binary mask of shape [N, 1, L]
mask = self._length_to_mask(
lengths * seq_length, max_len=seq_length, dtype=hidden_states.dtype, device=hidden_states.device
)
mask = mask.unsqueeze(1)
# Expand the temporal context of the pooling layer by allowing the
# self-attention to look at global properties of the utterance.
total = mask.sum(dim=2, keepdim=True)
mean, std = self._compute_statistics(hidden_states, mask / total)
mean = mean.unsqueeze(2).repeat(1, 1, seq_length)
std = std.unsqueeze(2).repeat(1, 1, seq_length)
attention = torch.cat([hidden_states, mean, std], dim=1)
# Apply layers
attention = self.conv(self.tanh(self.tdnn(attention)))
# Filter out zero-paddings
attention = attention.masked_fill(mask == 0, float("-inf"))
attention = F.softmax(attention, dim=2)
mean, std = self._compute_statistics(hidden_states, attention)
# Append mean and std of the batch
pooled_stats = torch.cat((mean, std), dim=1)
pooled_stats = pooled_stats.unsqueeze(2)
return pooled_stats
| AttentiveStatisticsPooling |
python | keon__algorithms | tests/test_dfs.py | {
"start": 201,
"end": 805
} | class ____(unittest.TestCase):
def test_get_factors(self):
self.assertEqual([[2, 16], [2, 2, 8], [2, 2, 2, 4], [2, 2, 2, 2, 2],
[2, 4, 4], [4, 8]], get_factors(32))
def test_get_factors_iterative1(self):
self.assertEqual([[2, 16], [4, 8], [2, 2, 8], [2, 4, 4], [2, 2, 2, 4],
[2, 2, 2, 2, 2]], get_factors_iterative1(32))
def test_get_factors_iterative2(self):
self.assertEqual([[2, 2, 2, 2, 2], [2, 2, 2, 4], [2, 2, 8], [2, 4, 4],
[2, 16], [4, 8]], get_factors_iterative2(32))
| TestAllFactors |
python | mlflow__mlflow | mlflow/telemetry/events.py | {
"start": 10148,
"end": 10234
} | class ____(Event):
name: str = "traces_received_by_server"
| TracesReceivedByServerEvent |
python | numba__llvmlite | llvmlite/binding/ffi.py | {
"start": 8854,
"end": 8953
} | class ____(object):
"""
Dummy class to make error messages more helpful.
"""
| _DeadPointer |
python | sqlalchemy__sqlalchemy | test/aaa_profiling/test_misc.py | {
"start": 4646,
"end": 11380
} | class ____(fixtures.RemoveORMEventsGlobally, fixtures.TestBase):
__requires__ = ("cpython", "python_profiling_backend")
@testing.fixture
def t1(self, metadata):
return Table(
"t1",
metadata,
Column("id", Integer, primary_key=True),
Column("x1", Integer),
Column("x2", Integer),
Column("x3", Integer),
Column("x4", Integer),
Column("x5", Integer),
Column("x6", Integer),
Column("x7", Integer),
Column("x8", Integer),
Column("x9", Integer),
Column("x10", Integer),
)
@testing.fixture
def inheritance_model(self, decl_base):
class Employee(ConcreteBase, decl_base):
__tablename__ = "employee"
id = Column(Integer, primary_key=True)
name = Column(String(50))
x1 = Column(Integer)
x2 = Column(Integer)
x3 = Column(Integer)
x4 = Column(Integer)
x5 = Column(Integer)
x6 = Column(Integer)
x7 = Column(Integer)
x8 = Column(Integer)
x9 = Column(Integer)
x10 = Column(Integer)
x11 = Column(Integer)
x12 = Column(Integer)
x13 = Column(Integer)
x14 = Column(Integer)
x15 = Column(Integer)
x16 = Column(Integer)
__mapper_args__ = {
"polymorphic_identity": "employee",
"concrete": True,
}
class Manager(Employee):
__tablename__ = "manager"
id = Column(Integer, primary_key=True)
name = Column(String(50))
manager_data = Column(String(40))
m1 = Column(Integer)
m2 = Column(Integer)
m3 = Column(Integer)
m4 = Column(Integer)
m5 = Column(Integer)
m6 = Column(Integer)
m7 = Column(Integer)
m8 = Column(Integer)
m9 = Column(Integer)
m10 = Column(Integer)
m11 = Column(Integer)
m12 = Column(Integer)
m13 = Column(Integer)
m14 = Column(Integer)
m15 = Column(Integer)
m16 = Column(Integer)
__mapper_args__ = {
"polymorphic_identity": "manager",
"concrete": True,
}
class Engineer(Employee):
__tablename__ = "engineer"
id = Column(Integer, primary_key=True)
name = Column(String(50))
engineer_info = Column(String(40))
e1 = Column(Integer)
e2 = Column(Integer)
e3 = Column(Integer)
e4 = Column(Integer)
e5 = Column(Integer)
e6 = Column(Integer)
e7 = Column(Integer)
e8 = Column(Integer)
e9 = Column(Integer)
e10 = Column(Integer)
e11 = Column(Integer)
e12 = Column(Integer)
e13 = Column(Integer)
e14 = Column(Integer)
e15 = Column(Integer)
e16 = Column(Integer)
__mapper_args__ = {
"polymorphic_identity": "engineer",
"concrete": True,
}
decl_base.registry.configure()
return Employee
@testing.combinations(
("require_embedded",), ("no_embedded",), argnames="require_embedded"
)
def test_corresponding_column_isolated(self, t1, require_embedded):
subq = select(t1).union_all(select(t1)).subquery()
target = subq.c.x7
src = t1.c.x7
subq.c
require_embedded = require_embedded == "require_embedded"
@profiling.function_call_count(variance=0.15, warmup=1)
def go():
assert (
subq.corresponding_column(
src, require_embedded=require_embedded
)
is target
)
go()
@testing.combinations(
("require_embedded",), ("no_embedded",), argnames="require_embedded"
)
def test_gen_subq_to_table_single_corresponding_column(
self, t1, require_embedded
):
src = t1.c.x7
require_embedded = require_embedded == "require_embedded"
@profiling.function_call_count(variance=0.15, warmup=1)
def go():
subq = select(t1).union_all(select(t1)).subquery()
target = subq.c.x7
assert (
subq.corresponding_column(
src, require_embedded=require_embedded
)
is target
)
go()
@testing.combinations(
("require_embedded",), ("no_embedded",), argnames="require_embedded"
)
def test_gen_subq_to_table_many_corresponding_column(
self, t1, require_embedded
):
require_embedded = require_embedded == "require_embedded"
@profiling.function_call_count(variance=0.15, warmup=1)
def go():
subq = select(t1).union_all(select(t1)).subquery()
for name in ("x%d" % i for i in range(1, 10)):
target = subq.c[name]
src = t1.c[name]
assert (
subq.corresponding_column(
src, require_embedded=require_embedded
)
is target
)
go()
@testing.combinations(
("require_embedded",), ("no_embedded",), argnames="require_embedded"
)
def test_gen_subq_aliased_class_select(
self, t1, require_embedded, inheritance_model
):
A = inheritance_model
require_embedded = require_embedded == "require_embedded"
@profiling.function_call_count(variance=0.15, warmup=1)
def go():
a1a1 = aliased(A)
a1a2 = aliased(A)
subq = select(a1a1).union_all(select(a1a2)).subquery()
a1 = aliased(A, subq)
inspect(a1).__clause_element__()
go()
@testing.combinations(
("require_embedded",), ("no_embedded",), argnames="require_embedded"
)
def test_gen_subq_aliased_class_select_cols(
self, t1, require_embedded, inheritance_model
):
A = inheritance_model
require_embedded = require_embedded == "require_embedded"
@profiling.function_call_count(variance=0.15, warmup=1)
def go():
a1a1 = aliased(A)
a1a2 = aliased(A)
subq = select(a1a1).union_all(select(a1a2)).subquery()
a1 = aliased(A, subq)
select(a1.x1, a1.x2, a1.x3, a1.x4)
go()
| CCLookupTest |
python | Pylons__pyramid | src/pyramid/config/tweens.py | {
"start": 6979,
"end": 7758
} | class ____:
def __init__(self):
self.sorter = TopologicalSorter(
default_before=None,
default_after=INGRESS,
first=INGRESS,
last=MAIN,
)
self.explicit = []
def add_explicit(self, name, factory):
self.explicit.append((name, factory))
def add_implicit(self, name, factory, under=None, over=None):
self.sorter.add(name, factory, after=under, before=over)
def implicit(self):
return self.sorter.sorted()
def __call__(self, handler, registry):
if self.explicit:
use = self.explicit
else:
use = self.implicit()
for name, factory in use[::-1]:
handler = factory(handler, registry)
return handler
| Tweens |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_default_format15.py | {
"start": 315,
"end": 1644
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("default_format15.xlsx")
def test_create_file(self):
"""Test the creation of a file with user defined default format"""
workbook = Workbook(
self.got_filename,
{
"default_format_properties": {"font_name": "Arial", "font_size": 10},
"default_row_height": 17,
"default_column_width": 64,
},
)
worksheet = workbook.add_worksheet()
worksheet.write(0, 0, "https://www.cpan.org/")
workbook.close()
self.assertExcelEqual()
def test_create_file_with_character_units(self):
"""Test the creation of a file with user defined default format"""
# Same as
workbook = Workbook(
self.got_filename,
{
"default_format_properties": {"font_name": "Arial", "font_size": 10},
"default_row_height": 17,
"default_column_width": 64,
},
)
worksheet = workbook.add_worksheet()
worksheet.write(0, 0, "https://www.cpan.org/")
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | davidhalter__jedi | jedi/inference/imports.py | {
"start": 5225,
"end": 23087
} | class ____:
def __init__(self, inference_state, import_path, module_context, level=0):
"""
An implementation similar to ``__import__``. Use `follow`
to actually follow the imports.
*level* specifies whether to use absolute or relative imports. 0 (the
default) means only perform absolute imports. Positive values for level
indicate the number of parent directories to search relative to the
directory of the module calling ``__import__()`` (see PEP 328 for the
details).
:param import_path: List of namespaces (strings or Names).
"""
debug.speed('import %s %s' % (import_path, module_context))
self._inference_state = inference_state
self.level = level
self._module_context = module_context
self._fixed_sys_path = None
self._infer_possible = True
if level:
base = module_context.get_value().py__package__()
# We need to care for two cases, the first one is if it's a valid
# Python import. This import has a properly defined module name
# chain like `foo.bar.baz` and an import in baz is made for
# `..lala.` It can then resolve to `foo.bar.lala`.
# The else here is a heuristic for all other cases, if for example
# in `foo` you search for `...bar`, it's obviously out of scope.
# However since Jedi tries to just do it's best, we help the user
# here, because he might have specified something wrong in his
# project.
if level <= len(base):
# Here we basically rewrite the level to 0.
base = tuple(base)
if level > 1:
base = base[:-level + 1]
import_path = base + tuple(import_path)
else:
path = module_context.py__file__()
project_path = self._inference_state.project.path
import_path = list(import_path)
if path is None:
# If no path is defined, our best guess is that the current
# file is edited by a user on the current working
# directory. We need to add an initial path, because it
# will get removed as the name of the current file.
directory = project_path
else:
directory = os.path.dirname(path)
base_import_path, base_directory = _level_to_base_import_path(
project_path, directory, level,
)
if base_directory is None:
# Everything is lost, the relative import does point
# somewhere out of the filesystem.
self._infer_possible = False
else:
self._fixed_sys_path = [base_directory]
if base_import_path is None:
if import_path:
_add_error(
module_context, import_path[0],
message='Attempted relative import beyond top-level package.'
)
else:
import_path = base_import_path + import_path
self.import_path = import_path
@property
def _str_import_path(self):
"""Returns the import path as pure strings instead of `Name`."""
return tuple(
name.value if isinstance(name, tree.Name) else name
for name in self.import_path
)
def _sys_path_with_modifications(self, is_completion):
if self._fixed_sys_path is not None:
return self._fixed_sys_path
return (
# For import completions we don't want to see init paths, but for
# inference we want to show the user as much as possible.
# See GH #1446.
self._inference_state.get_sys_path(add_init_paths=not is_completion)
+ [
str(p) for p
in sys_path.check_sys_path_modifications(self._module_context)
]
)
def follow(self):
if not self.import_path:
if self._fixed_sys_path:
# This is a bit of a special case, that maybe should be
# revisited. If the project path is wrong or the user uses
# relative imports the wrong way, we might end up here, where
# the `fixed_sys_path == project.path` in that case we kind of
# use the project.path.parent directory as our path. This is
# usually not a problem, except if imports in other places are
# using the same names. Example:
#
# foo/ < #1
# - setup.py
# - foo/ < #2
# - __init__.py
# - foo.py < #3
#
# If the top foo is our project folder and somebody uses
# `from . import foo` in `setup.py`, it will resolve to foo #2,
# which means that the import for foo.foo is cached as
# `__init__.py` (#2) and not as `foo.py` (#3). This is usually
# not an issue, because this case is probably pretty rare, but
# might be an issue for some people.
#
# However for most normal cases where we work with different
# file names, this code path hits where we basically change the
# project path to an ancestor of project path.
from jedi.inference.value.namespace import ImplicitNamespaceValue
import_path = (os.path.basename(self._fixed_sys_path[0]),)
ns = ImplicitNamespaceValue(
self._inference_state,
string_names=import_path,
paths=self._fixed_sys_path,
)
return ValueSet({ns})
return NO_VALUES
if not self._infer_possible:
return NO_VALUES
# Check caches first
from_cache = self._inference_state.stub_module_cache.get(self._str_import_path)
if from_cache is not None:
return ValueSet({from_cache})
from_cache = self._inference_state.module_cache.get(self._str_import_path)
if from_cache is not None:
return from_cache
sys_path = self._sys_path_with_modifications(is_completion=False)
return import_module_by_names(
self._inference_state, self.import_path, sys_path, self._module_context
)
def _get_module_names(self, search_path=None, in_module=None):
"""
Get the names of all modules in the search_path. This means file names
and not names defined in the files.
"""
if search_path is None:
sys_path = self._sys_path_with_modifications(is_completion=True)
else:
sys_path = search_path
return list(iter_module_names(
self._inference_state, self._module_context, sys_path,
module_cls=ImportName if in_module is None else SubModuleName,
add_builtin_modules=search_path is None and in_module is None,
))
def completion_names(self, inference_state, only_modules=False):
"""
:param only_modules: Indicates wheter it's possible to import a
definition that is not defined in a module.
"""
if not self._infer_possible:
return []
names = []
if self.import_path:
# flask
if self._str_import_path == ('flask', 'ext'):
# List Flask extensions like ``flask_foo``
for mod in self._get_module_names():
modname = mod.string_name
if modname.startswith('flask_'):
extname = modname[len('flask_'):]
names.append(ImportName(self._module_context, extname))
# Now the old style: ``flaskext.foo``
for dir in self._sys_path_with_modifications(is_completion=True):
flaskext = os.path.join(dir, 'flaskext')
if os.path.isdir(flaskext):
names += self._get_module_names([flaskext])
values = self.follow()
for value in values:
# Non-modules are not completable.
if value.api_type not in ('module', 'namespace'): # not a module
continue
if not value.is_compiled():
# sub_modules_dict is not implemented for compiled modules.
names += value.sub_modules_dict().values()
if not only_modules:
from jedi.inference.gradual.conversion import convert_values
both_values = values | convert_values(values)
for c in both_values:
for filter in c.get_filters():
names += filter.values()
else:
if self.level:
# We only get here if the level cannot be properly calculated.
names += self._get_module_names(self._fixed_sys_path)
else:
# This is just the list of global imports.
names += self._get_module_names()
return names
def import_module_by_names(inference_state, import_names, sys_path=None,
module_context=None, prefer_stubs=True):
if sys_path is None:
sys_path = inference_state.get_sys_path()
str_import_names = tuple(
i.value if isinstance(i, tree.Name) else i
for i in import_names
)
value_set = [None]
for i, name in enumerate(import_names):
value_set = ValueSet.from_sets([
import_module(
inference_state,
str_import_names[:i+1],
parent_module_value,
sys_path,
prefer_stubs=prefer_stubs,
) for parent_module_value in value_set
])
if not value_set:
message = 'No module named ' + '.'.join(str_import_names)
if module_context is not None:
_add_error(module_context, name, message)
else:
debug.warning(message)
return NO_VALUES
return value_set
@plugin_manager.decorate()
@import_module_decorator
def import_module(inference_state, import_names, parent_module_value, sys_path):
"""
This method is very similar to importlib's `_gcd_import`.
"""
if import_names[0] in settings.auto_import_modules:
module = _load_builtin_module(inference_state, import_names, sys_path)
if module is None:
return NO_VALUES
return ValueSet([module])
module_name = '.'.join(import_names)
if parent_module_value is None:
# Override the sys.path. It works only good that way.
# Injecting the path directly into `find_module` did not work.
file_io_or_ns, is_pkg = inference_state.compiled_subprocess.get_module_info(
string=import_names[-1],
full_name=module_name,
sys_path=sys_path,
is_global_search=True,
)
if is_pkg is None:
return NO_VALUES
else:
paths = parent_module_value.py__path__()
if paths is None:
# The module might not be a package.
return NO_VALUES
file_io_or_ns, is_pkg = inference_state.compiled_subprocess.get_module_info(
string=import_names[-1],
path=paths,
full_name=module_name,
is_global_search=False,
)
if is_pkg is None:
return NO_VALUES
if isinstance(file_io_or_ns, ImplicitNSInfo):
from jedi.inference.value.namespace import ImplicitNamespaceValue
module = ImplicitNamespaceValue(
inference_state,
string_names=tuple(file_io_or_ns.name.split('.')),
paths=file_io_or_ns.paths,
)
elif file_io_or_ns is None:
module = _load_builtin_module(inference_state, import_names, sys_path)
if module is None:
return NO_VALUES
else:
module = _load_python_module(
inference_state, file_io_or_ns,
import_names=import_names,
is_package=is_pkg,
)
if parent_module_value is None:
debug.dbg('global search_module %s: %s', import_names[-1], module)
else:
debug.dbg('search_module %s in paths %s: %s', module_name, paths, module)
return ValueSet([module])
def _load_python_module(inference_state, file_io,
import_names=None, is_package=False):
module_node = inference_state.parse(
file_io=file_io,
cache=True,
diff_cache=settings.fast_parser,
cache_path=settings.cache_directory,
)
from jedi.inference.value import ModuleValue
return ModuleValue(
inference_state, module_node,
file_io=file_io,
string_names=import_names,
code_lines=get_cached_code_lines(inference_state.grammar, file_io.path),
is_package=is_package,
)
def _load_builtin_module(inference_state, import_names=None, sys_path=None):
project = inference_state.project
if sys_path is None:
sys_path = inference_state.get_sys_path()
if not project._load_unsafe_extensions:
safe_paths = set(project._get_base_sys_path(inference_state))
sys_path = [p for p in sys_path if p in safe_paths]
dotted_name = '.'.join(import_names)
assert dotted_name is not None
module = compiled.load_module(inference_state, dotted_name=dotted_name, sys_path=sys_path)
if module is None:
# The file might raise an ImportError e.g. and therefore not be
# importable.
return None
return module
def load_module_from_path(inference_state, file_io, import_names=None, is_package=None):
"""
This should pretty much only be used for get_modules_containing_name. It's
here to ensure that a random path is still properly loaded into the Jedi
module structure.
"""
path = Path(file_io.path)
if import_names is None:
e_sys_path = inference_state.get_sys_path()
import_names, is_package = sys_path.transform_path_to_dotted(e_sys_path, path)
else:
assert isinstance(is_package, bool)
is_stub = path.suffix == '.pyi'
if is_stub:
folder_io = file_io.get_parent_folder()
if folder_io.path.endswith('-stubs'):
folder_io = FolderIO(folder_io.path[:-6])
if path.name == '__init__.pyi':
python_file_io = folder_io.get_file_io('__init__.py')
else:
python_file_io = folder_io.get_file_io(import_names[-1] + '.py')
try:
v = load_module_from_path(
inference_state, python_file_io,
import_names, is_package=is_package
)
values = ValueSet([v])
except FileNotFoundError:
values = NO_VALUES
return create_stub_module(
inference_state, inference_state.latest_grammar, values,
parse_stub_module(inference_state, file_io), file_io, import_names
)
else:
module = _load_python_module(
inference_state, file_io,
import_names=import_names,
is_package=is_package,
)
inference_state.module_cache.add(import_names, ValueSet([module]))
return module
def load_namespace_from_path(inference_state, folder_io):
import_names, is_package = sys_path.transform_path_to_dotted(
inference_state.get_sys_path(),
Path(folder_io.path)
)
from jedi.inference.value.namespace import ImplicitNamespaceValue
return ImplicitNamespaceValue(inference_state, import_names, [folder_io.path])
def follow_error_node_imports_if_possible(context, name):
error_node = tree.search_ancestor(name, 'error_node')
if error_node is not None:
# Get the first command start of a started simple_stmt. The error
# node is sometimes a small_stmt and sometimes a simple_stmt. Check
# for ; leaves that start a new statements.
start_index = 0
for index, n in enumerate(error_node.children):
if n.start_pos > name.start_pos:
break
if n == ';':
start_index = index + 1
nodes = error_node.children[start_index:]
first_name = nodes[0].get_first_leaf().value
# Make it possible to infer stuff like `import foo.` or
# `from foo.bar`.
if first_name in ('from', 'import'):
is_import_from = first_name == 'from'
level, names = helpers.parse_dotted_names(
nodes,
is_import_from=is_import_from,
until_node=name,
)
return Importer(
context.inference_state, names, context.get_root_context(), level).follow()
return None
def iter_module_names(inference_state, module_context, search_path,
module_cls=ImportName, add_builtin_modules=True):
"""
Get the names of all modules in the search_path. This means file names
and not names defined in the files.
"""
# add builtin module names
if add_builtin_modules:
for name in inference_state.compiled_subprocess.get_builtin_module_names():
yield module_cls(module_context, name)
for name in inference_state.compiled_subprocess.iter_module_names(search_path):
yield module_cls(module_context, name)
| Importer |
python | fastapi__sqlmodel | docs_src/tutorial/fastapi/relationships/tutorial001_py310.py | {
"start": 795,
"end": 953
} | class ____(HeroBase, table=True):
id: int | None = Field(default=None, primary_key=True)
team: Team | None = Relationship(back_populates="heroes")
| Hero |
python | getsentry__sentry | src/sentry/api/endpoints/organization_recent_searches.py | {
"start": 581,
"end": 737
} | class ____(serializers.Serializer):
type = serializers.IntegerField(required=True)
query = serializers.CharField(required=True)
| RecentSearchSerializer |
python | Netflix__metaflow | test/unit/inheritance/flows/comprehensive_multi_hierarchy_base.py | {
"start": 354,
"end": 1288
} | class ____(FlowMutator):
"""Simple mutator that logs flow information"""
def pre_mutate(self, mutable_flow):
print("LoggingMutator: Analyzing flow structure")
param_count = sum(1 for _ in mutable_flow.parameters)
config_count = sum(1 for _ in mutable_flow.configs)
print(f" Found {param_count} parameters, {config_count} configs")
mutable_flow.add_parameter(
"logging_param_count",
Parameter(
"logging_param_count",
help="Parameter to store the result count",
default=param_count,
),
)
mutable_flow.add_parameter(
"logging_config_count",
Parameter(
"logging_config_count",
help="Parameter to store the result count",
default=config_count,
),
)
# First hierarchy
@LoggingMutator()
| LoggingMutator |
python | huggingface__transformers | src/transformers/models/starcoder2/modular_starcoder2.py | {
"start": 6102,
"end": 9148
} | class ____(MistralModel):
def __init__(self, config: Starcoder2Config):
super().__init__(config)
self.layers = nn.ModuleList(
[Starcoder2DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
)
self.norm = nn.LayerNorm(config.hidden_size, eps=config.norm_epsilon)
self.embedding_dropout = config.embedding_dropout
@check_model_inputs()
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> BaseModelOutputWithPast:
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
if use_cache and past_key_values is None:
past_key_values = DynamicCache(config=self.config)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position = torch.arange(
past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
mask_function = create_causal_mask if self.config.sliding_window is None else create_sliding_window_causal_mask
causal_mask = mask_function(
config=self.config,
input_embeds=inputs_embeds,
attention_mask=attention_mask,
cache_position=cache_position,
past_key_values=past_key_values,
position_ids=position_ids,
)
hidden_states = inputs_embeds
hidden_states = nn.functional.dropout(
hidden_states, p=self.embedding_dropout, training=self.training
) # main diff with Llama
position_embeddings = self.rotary_emb(hidden_states, position_ids=position_ids)
for decoder_layer in self.layers[: self.config.num_hidden_layers]:
hidden_states = decoder_layer(
hidden_states,
attention_mask=causal_mask,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = self.norm(hidden_states)
return BaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=past_key_values if use_cache else None,
)
| Starcoder2Model |
python | ZoranPandovski__al-go-rithms | sort/bubble_sort_optimized/python/bubble_sort_optimized.py | {
"start": 0,
"end": 408
} | class ____:
def bubbleSortOptimized(self, nums):
if len(nums) == 1:
return nums
else:
swapped = False
while not swapped:
swapped = True
for i in range(0, len(nums)-1):
if nums[i] > nums[i+1]:
swapped = False
nums[i], nums[i+1] = nums[i+1], nums[i]
| Solution |
python | encode__starlette | starlette/routing.py | {
"start": 5813,
"end": 7076
} | class ____:
def matches(self, scope: Scope) -> tuple[Match, Scope]:
raise NotImplementedError() # pragma: no cover
def url_path_for(self, name: str, /, **path_params: Any) -> URLPath:
raise NotImplementedError() # pragma: no cover
async def handle(self, scope: Scope, receive: Receive, send: Send) -> None:
raise NotImplementedError() # pragma: no cover
async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
"""
A route may be used in isolation as a stand-alone ASGI app.
This is a somewhat contrived case, as they'll almost always be used
within a Router, but could be useful for some tooling and minimal apps.
"""
match, child_scope = self.matches(scope)
if match == Match.NONE:
if scope["type"] == "http":
response = PlainTextResponse("Not Found", status_code=404)
await response(scope, receive, send)
elif scope["type"] == "websocket": # pragma: no branch
websocket_close = WebSocketClose()
await websocket_close(scope, receive, send)
return
scope.update(child_scope)
await self.handle(scope, receive, send)
| BaseRoute |
python | django__django | django/apps/registry.py | {
"start": 247,
"end": 17707
} | class ____:
"""
A registry that stores the configuration of installed applications.
It also keeps track of models, e.g. to provide reverse relations.
"""
def __init__(self, installed_apps=()):
# installed_apps is set to None when creating the main registry
# because it cannot be populated at that point. Other registries must
# provide a list of installed apps and are populated immediately.
if installed_apps is None and hasattr(sys.modules[__name__], "apps"):
raise RuntimeError("You must supply an installed_apps argument.")
# Mapping of app labels => model names => model classes. Every time a
# model is imported, ModelBase.__new__ calls apps.register_model which
# creates an entry in all_models. All imported models are registered,
# regardless of whether they're defined in an installed application
# and whether the registry has been populated. Since it isn't possible
# to reimport a module safely (it could reexecute initialization code)
# all_models is never overridden or reset.
self.all_models = defaultdict(dict)
# Mapping of labels to AppConfig instances for installed apps.
self.app_configs = {}
# Stack of app_configs. Used to store the current state in
# set_available_apps and set_installed_apps.
self.stored_app_configs = []
# Whether the registry is populated.
self.apps_ready = self.models_ready = self.ready = False
# For the autoreloader.
self.ready_event = threading.Event()
# Lock for thread-safe population.
self._lock = threading.RLock()
self.loading = False
# Maps ("app_label", "modelname") tuples to lists of functions to be
# called when the corresponding model is ready. Used by this class's
# `lazy_model_operation()` and `do_pending_operations()` methods.
self._pending_operations = defaultdict(list)
# Populate apps and models, unless it's the main registry.
if installed_apps is not None:
self.populate(installed_apps)
def populate(self, installed_apps=None):
"""
Load application configurations and models.
Import each application module and then each model module.
It is thread-safe and idempotent, but not reentrant.
"""
if self.ready:
return
# populate() might be called by two threads in parallel on servers
# that create threads before initializing the WSGI callable.
with self._lock:
if self.ready:
return
# An RLock prevents other threads from entering this section. The
# compare and set operation below is atomic.
if self.loading:
# Prevent reentrant calls to avoid running AppConfig.ready()
# methods twice.
raise RuntimeError("populate() isn't reentrant")
self.loading = True
# Phase 1: initialize app configs and import app modules.
for entry in installed_apps:
if isinstance(entry, AppConfig):
app_config = entry
else:
app_config = AppConfig.create(entry)
if app_config.label in self.app_configs:
raise ImproperlyConfigured(
"Application labels aren't unique, "
"duplicates: %s" % app_config.label
)
self.app_configs[app_config.label] = app_config
app_config.apps = self
# Check for duplicate app names.
counts = Counter(
app_config.name for app_config in self.app_configs.values()
)
duplicates = [name for name, count in counts.most_common() if count > 1]
if duplicates:
raise ImproperlyConfigured(
"Application names aren't unique, "
"duplicates: %s" % ", ".join(duplicates)
)
self.apps_ready = True
# Phase 2: import models modules.
for app_config in self.app_configs.values():
app_config.import_models()
self.clear_cache()
self.models_ready = True
# Phase 3: run ready() methods of app configs.
for app_config in self.get_app_configs():
app_config.ready()
self.ready = True
self.ready_event.set()
def check_apps_ready(self):
"""Raise an exception if all apps haven't been imported yet."""
if not self.apps_ready:
from django.conf import settings
# If "not ready" is due to unconfigured settings, accessing
# INSTALLED_APPS raises a more helpful ImproperlyConfigured
# exception.
settings.INSTALLED_APPS
raise AppRegistryNotReady("Apps aren't loaded yet.")
def check_models_ready(self):
"""Raise an exception if all models haven't been imported yet."""
if not self.models_ready:
raise AppRegistryNotReady("Models aren't loaded yet.")
def get_app_configs(self):
"""Import applications and return an iterable of app configs."""
self.check_apps_ready()
return self.app_configs.values()
def get_app_config(self, app_label):
"""
Import applications and returns an app config for the given label.
Raise LookupError if no application exists with this label.
"""
self.check_apps_ready()
try:
return self.app_configs[app_label]
except KeyError:
message = "No installed app with label '%s'." % app_label
for app_config in self.get_app_configs():
if app_config.name == app_label:
message += " Did you mean '%s'?" % app_config.label
break
raise LookupError(message)
# This method is performance-critical at least for Django's test suite.
@functools.cache
def get_models(self, include_auto_created=False, include_swapped=False):
"""
Return a list of all installed models.
By default, the following models aren't included:
- auto-created models for many-to-many relations without
an explicit intermediate table,
- models that have been swapped out.
Set the corresponding keyword argument to True to include such models.
"""
self.check_models_ready()
result = []
for app_config in self.app_configs.values():
result.extend(app_config.get_models(include_auto_created, include_swapped))
return result
def get_model(self, app_label, model_name=None, require_ready=True):
"""
Return the model matching the given app_label and model_name.
As a shortcut, app_label may be in the form <app_label>.<model_name>.
model_name is case-insensitive.
Raise LookupError if no application exists with this label, or no
model exists with this name in the application. Raise ValueError if
called with a single argument that doesn't contain exactly one dot.
"""
if require_ready:
self.check_models_ready()
else:
self.check_apps_ready()
if model_name is None:
app_label, model_name = app_label.split(".")
app_config = self.get_app_config(app_label)
if not require_ready and app_config.models is None:
app_config.import_models()
return app_config.get_model(model_name, require_ready=require_ready)
def register_model(self, app_label, model):
# Since this method is called when models are imported, it cannot
# perform imports because of the risk of import loops. It mustn't
# call get_app_config().
model_name = model._meta.model_name
app_models = self.all_models[app_label]
if model_name in app_models:
if (
model.__name__ == app_models[model_name].__name__
and model.__module__ == app_models[model_name].__module__
):
warnings.warn(
"Model '%s.%s' was already registered. Reloading models is not "
"advised as it can lead to inconsistencies, most notably with "
"related models." % (app_label, model_name),
RuntimeWarning,
stacklevel=2,
)
else:
raise RuntimeError(
"Conflicting '%s' models in application '%s': %s and %s."
% (model_name, app_label, app_models[model_name], model)
)
app_models[model_name] = model
self.do_pending_operations(model)
self.clear_cache()
def is_installed(self, app_name):
"""
Check whether an application with this name exists in the registry.
app_name is the full name of the app e.g. 'django.contrib.admin'.
"""
self.check_apps_ready()
return any(ac.name == app_name for ac in self.app_configs.values())
def get_containing_app_config(self, object_name):
"""
Look for an app config containing a given object.
object_name is the dotted Python path to the object.
Return the app config for the inner application in case of nesting.
Return None if the object isn't in any registered app config.
"""
self.check_apps_ready()
candidates = []
for app_config in self.app_configs.values():
if object_name.startswith(app_config.name):
subpath = object_name.removeprefix(app_config.name)
if subpath == "" or subpath[0] == ".":
candidates.append(app_config)
if candidates:
return sorted(candidates, key=lambda ac: -len(ac.name))[0]
def get_registered_model(self, app_label, model_name):
"""
Similar to get_model(), but doesn't require that an app exists with
the given app_label.
It's safe to call this method at import time, even while the registry
is being populated.
"""
model = self.all_models[app_label].get(model_name.lower())
if model is None:
raise LookupError("Model '%s.%s' not registered." % (app_label, model_name))
return model
@functools.cache
def get_swappable_settings_name(self, to_string):
"""
For a given model string (e.g. "auth.User"), return the name of the
corresponding settings name if it refers to a swappable model. If the
referred model is not swappable, return None.
This method is decorated with @functools.cache because it's performance
critical when it comes to migrations. Since the swappable settings
don't change after Django has loaded the settings, there is no reason
to get the respective settings attribute over and over again.
"""
to_string = to_string.lower()
for model in self.get_models(include_swapped=True):
swapped = model._meta.swapped
# Is this model swapped out for the model given by to_string?
if swapped and swapped.lower() == to_string:
return model._meta.swappable
# Is this model swappable and the one given by to_string?
if model._meta.swappable and model._meta.label_lower == to_string:
return model._meta.swappable
return None
def set_available_apps(self, available):
"""
Restrict the set of installed apps used by get_app_config[s].
available must be an iterable of application names.
set_available_apps() must be balanced with unset_available_apps().
Primarily used for performance optimization in TransactionTestCase.
This method is safe in the sense that it doesn't trigger any imports.
"""
available = set(available)
installed = {app_config.name for app_config in self.get_app_configs()}
if not available.issubset(installed):
raise ValueError(
"Available apps isn't a subset of installed apps, extra apps: %s"
% ", ".join(available - installed)
)
self.stored_app_configs.append(self.app_configs)
self.app_configs = {
label: app_config
for label, app_config in self.app_configs.items()
if app_config.name in available
}
self.clear_cache()
def unset_available_apps(self):
"""Cancel a previous call to set_available_apps()."""
self.app_configs = self.stored_app_configs.pop()
self.clear_cache()
def set_installed_apps(self, installed):
"""
Enable a different set of installed apps for get_app_config[s].
installed must be an iterable in the same format as INSTALLED_APPS.
set_installed_apps() must be balanced with unset_installed_apps(),
even if it exits with an exception.
Primarily used as a receiver of the setting_changed signal in tests.
This method may trigger new imports, which may add new models to the
registry of all imported models. They will stay in the registry even
after unset_installed_apps(). Since it isn't possible to replay
imports safely (e.g. that could lead to registering listeners twice),
models are registered when they're imported and never removed.
"""
if not self.ready:
raise AppRegistryNotReady("App registry isn't ready yet.")
self.stored_app_configs.append(self.app_configs)
self.app_configs = {}
self.apps_ready = self.models_ready = self.loading = self.ready = False
self.clear_cache()
self.populate(installed)
def unset_installed_apps(self):
"""Cancel a previous call to set_installed_apps()."""
self.app_configs = self.stored_app_configs.pop()
self.apps_ready = self.models_ready = self.ready = True
self.clear_cache()
def clear_cache(self):
"""
Clear all internal caches, for methods that alter the app registry.
This is mostly used in tests.
"""
self.get_swappable_settings_name.cache_clear()
# Call expire cache on each model. This will purge
# the relation tree and the fields cache.
self.get_models.cache_clear()
if self.ready:
# Circumvent self.get_models() to prevent that the cache is
# refilled. This particularly prevents that an empty value is
# cached while cloning.
for app_config in self.app_configs.values():
for model in app_config.get_models(include_auto_created=True):
model._meta._expire_cache()
def lazy_model_operation(self, function, *model_keys):
"""
Take a function and a number of ("app_label", "modelname") tuples, and
when all the corresponding models have been imported and registered,
call the function with the model classes as its arguments.
The function passed to this method must accept exactly n models as
arguments, where n=len(model_keys).
"""
# Base case: no arguments, just execute the function.
if not model_keys:
function()
# Recursive case: take the head of model_keys, wait for the
# corresponding model class to be imported and registered, then apply
# that argument to the supplied function. Pass the resulting partial
# to lazy_model_operation() along with the remaining model args and
# repeat until all models are loaded and all arguments are applied.
else:
next_model, *more_models = model_keys
# This will be executed after the class corresponding to next_model
# has been imported and registered. The `func` attribute provides
# duck-type compatibility with partials.
def apply_next_model(model):
next_function = partial(apply_next_model.func, model)
self.lazy_model_operation(next_function, *more_models)
apply_next_model.func = function
# If the model has already been imported and registered, partially
# apply it to the function now. If not, add it to the list of
# pending operations for the model, where it will be executed with
# the model class as its sole argument once the model is ready.
try:
model_class = self.get_registered_model(*next_model)
except LookupError:
self._pending_operations[next_model].append(apply_next_model)
else:
apply_next_model(model_class)
def do_pending_operations(self, model):
"""
Take a newly-prepared model and pass it to each function waiting for
it. This is called at the very end of Apps.register_model().
"""
key = model._meta.app_label, model._meta.model_name
for function in self._pending_operations.pop(key, []):
function(model)
apps = Apps(installed_apps=None)
| Apps |
python | walkccc__LeetCode | solutions/3524. Find X Value of Array I/3524.py | {
"start": 0,
"end": 590
} | class ____:
def resultArray(self, nums: list[int], k: int) -> list[int]:
ans = [0] * k
# dp[r] := the number of subarrays ending at current position with
# product % k == r
dp = [0] * k
for num in nums:
newDp = [0] * k
numMod = num % k
# Start new subarray with only `num`
newDp[numMod] = 1
# Extend all previous subarrays
for i in range(k):
newMod = (i * numMod) % k
newDp[newMod] += dp[i]
# Accumulate counts into ans
for i in range(k):
ans[i] += newDp[i]
dp = newDp
return ans
| Solution |
python | networkx__networkx | networkx/algorithms/centrality/tests/test_group.py | {
"start": 84,
"end": 3040
} | class ____:
def test_group_betweenness_single_node(self):
"""
Group betweenness centrality for single node group
"""
G = nx.path_graph(5)
C = [1]
b = nx.group_betweenness_centrality(
G, C, weight=None, normalized=False, endpoints=False
)
b_answer = 3.0
assert b == b_answer
def test_group_betweenness_with_endpoints(self):
"""
Group betweenness centrality for single node group
"""
G = nx.path_graph(5)
C = [1]
b = nx.group_betweenness_centrality(
G, C, weight=None, normalized=False, endpoints=True
)
b_answer = 7.0
assert b == b_answer
def test_group_betweenness_normalized(self):
"""
Group betweenness centrality for group with more than
1 node and normalized
"""
G = nx.path_graph(5)
C = [1, 3]
b = nx.group_betweenness_centrality(
G, C, weight=None, normalized=True, endpoints=False
)
b_answer = 1.0
assert b == b_answer
def test_two_group_betweenness_value_zero(self):
"""
Group betweenness centrality value of 0
"""
G = nx.cycle_graph(7)
C = [[0, 1, 6], [0, 1, 5]]
b = nx.group_betweenness_centrality(G, C, weight=None, normalized=False)
b_answer = [0.0, 3.0]
assert b == b_answer
def test_group_betweenness_value_zero(self):
"""
Group betweenness centrality value of 0
"""
G = nx.cycle_graph(6)
C = [0, 1, 5]
b = nx.group_betweenness_centrality(G, C, weight=None, normalized=False)
b_answer = 0.0
assert b == b_answer
def test_group_betweenness_disconnected_graph(self):
"""
Group betweenness centrality in a disconnected graph
"""
G = nx.path_graph(5)
G.remove_edge(0, 1)
C = [1]
b = nx.group_betweenness_centrality(G, C, weight=None, normalized=False)
b_answer = 0.0
assert b == b_answer
def test_group_betweenness_node_not_in_graph(self):
"""
Node(s) in C not in graph, raises NodeNotFound exception
"""
with pytest.raises(nx.NodeNotFound):
nx.group_betweenness_centrality(nx.path_graph(5), [4, 7, 8])
def test_group_betweenness_directed_weighted(self):
"""
Group betweenness centrality in a directed and weighted graph
"""
G = nx.DiGraph()
G.add_edge(1, 0, weight=1)
G.add_edge(0, 2, weight=2)
G.add_edge(1, 2, weight=3)
G.add_edge(3, 1, weight=4)
G.add_edge(2, 3, weight=1)
G.add_edge(4, 3, weight=6)
G.add_edge(2, 4, weight=7)
C = [1, 2]
b = nx.group_betweenness_centrality(G, C, weight="weight", normalized=False)
b_answer = 5.0
assert b == b_answer
| TestGroupBetweennessCentrality |
python | PyCQA__pylint | pylint/checkers/variables.py | {
"start": 47319,
"end": 140475
} | class ____(BaseChecker):
"""BaseChecker for variables.
Checks for
* unused variables / imports
* undefined variables
* redefinition of variable from builtins or from an outer scope or except handler
* use of variable before assignment
* __all__ consistency
* self/cls assignment
"""
name = "variables"
msgs = MSGS
options = (
(
"init-import",
{
"default": False,
"type": "yn",
"metavar": "<y or n>",
"help": "Tells whether we should check for unused import in "
"__init__ files.",
},
),
(
"dummy-variables-rgx",
{
"default": "_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_",
"type": "regexp",
"metavar": "<regexp>",
"help": "A regular expression matching the name of dummy "
"variables (i.e. expected to not be used).",
},
),
(
"additional-builtins",
{
"default": (),
"type": "csv",
"metavar": "<comma separated list>",
"help": "List of additional names supposed to be defined in "
"builtins. Remember that you should avoid defining new builtins "
"when possible.",
},
),
(
"callbacks",
{
"default": ("cb_", "_cb"),
"type": "csv",
"metavar": "<callbacks>",
"help": "List of strings which can identify a callback "
"function by name. A callback name must start or "
"end with one of those strings.",
},
),
(
"redefining-builtins-modules",
{
"default": (
"six.moves",
"past.builtins",
"future.builtins",
"builtins",
"io",
),
"type": "csv",
"metavar": "<comma separated list>",
"help": "List of qualified module names which can have objects "
"that can redefine builtins.",
},
),
(
"ignored-argument-names",
{
"default": IGNORED_ARGUMENT_NAMES,
"type": "regexp",
"metavar": "<regexp>",
"help": "Argument names that match this expression will be ignored.",
},
),
(
"allow-global-unused-variables",
{
"default": True,
"type": "yn",
"metavar": "<y or n>",
"help": "Tells whether unused global variables should be treated as a violation.",
},
),
(
"allowed-redefined-builtins",
{
"default": (),
"type": "csv",
"metavar": "<comma separated list>",
"help": "List of names allowed to shadow builtins",
},
),
)
def __init__(self, linter: PyLinter) -> None:
super().__init__(linter)
self._to_consume: list[NamesConsumer] = []
self._type_annotation_names: list[str] = []
self._except_handler_names_queue: list[
tuple[nodes.ExceptHandler, nodes.AssignName]
] = []
"""This is a queue, last in first out."""
self._reported_type_checking_usage_scopes: dict[
str, list[nodes.LocalsDictNodeNG]
] = {}
self._postponed_evaluation_enabled = False
def open(self) -> None:
py_version = self.linter.config.py_version
self._py314_plus = py_version >= (3, 14)
@utils.only_required_for_messages(
"unbalanced-dict-unpacking",
)
def visit_for(self, node: nodes.For) -> None:
if not isinstance(node.target, nodes.Tuple):
return
targets = node.target.elts
inferred = utils.safe_infer(node.iter)
if not isinstance(inferred, DICT_TYPES):
return
values = self._nodes_to_unpack(inferred)
if not values:
# no dict items returned
return
if isinstance(inferred, objects.DictItems):
# dict.items() is a bit special because values will be a tuple
# So as long as there are always 2 targets and values each are
# a tuple with two items, this will unpack correctly.
# Example: `for key, val in {1: 2, 3: 4}.items()`
if len(targets) == 2 and all(len(x.elts) == 2 for x in values):
return
# Starred nodes indicate ambiguous unpacking
# if `dict.items()` is used so we won't flag them.
if any(isinstance(target, nodes.Starred) for target in targets):
return
if isinstance(inferred, nodes.Dict):
if isinstance(node.iter, nodes.Name):
# If this a case of 'dict-items-missing-iter', we don't want to
# report it as an 'unbalanced-dict-unpacking' as well
# TODO (performance), merging both checks would streamline this
if len(targets) == 2:
return
else:
is_starred_targets = any(
isinstance(target, nodes.Starred) for target in targets
)
for value in values:
value_length = self._get_value_length(value)
is_valid_star_unpack = is_starred_targets and value_length >= len(
targets
)
if len(targets) != value_length and not is_valid_star_unpack:
details = _get_unpacking_extra_info(node, inferred)
self._report_unbalanced_unpacking(
node, inferred, targets, value_length, details
)
break
def leave_for(self, node: nodes.For) -> None:
self._store_type_annotation_names(node)
def visit_module(self, node: nodes.Module) -> None:
"""Visit module : update consumption analysis variable
checks globals doesn't overrides builtins.
"""
self._to_consume = [NamesConsumer(node, "module")]
self._postponed_evaluation_enabled = (
self._py314_plus or is_postponed_evaluation_enabled(node)
)
for name, stmts in node.locals.items():
if utils.is_builtin(name):
if self._should_ignore_redefined_builtin(stmts[0]) or name == "__doc__":
continue
self.add_message("redefined-builtin", args=name, node=stmts[0])
@utils.only_required_for_messages(
"unused-import",
"unused-wildcard-import",
"redefined-builtin",
"undefined-all-variable",
"invalid-all-object",
"invalid-all-format",
"unused-variable",
"undefined-variable",
)
def leave_module(self, node: nodes.Module) -> None:
"""Leave module: check globals."""
assert len(self._to_consume) == 1
self._check_metaclasses(node)
not_consumed = self._to_consume.pop().to_consume
# attempt to check for __all__ if defined
if "__all__" in node.locals:
self._check_all(node, not_consumed)
# check for unused globals
self._check_globals(not_consumed)
# don't check unused imports in __init__ files
if not self.linter.config.init_import and node.package:
return
self._check_imports(not_consumed)
self._type_annotation_names = []
def visit_classdef(self, node: nodes.ClassDef) -> None:
"""Visit class: update consumption analysis variable."""
self._to_consume.append(NamesConsumer(node, "class"))
def leave_classdef(self, node: nodes.ClassDef) -> None:
"""Leave class: update consumption analysis variable."""
# Check for hidden ancestor names
# e.g. "six" in: Class X(six.with_metaclass(ABCMeta, object)):
for name_node in node.nodes_of_class(nodes.Name):
match name_node.parent:
case nodes.Call(func=nodes.Attribute(expr=nodes.Name(name=name))):
for consumer in self._to_consume:
if name in consumer.to_consume:
consumer.mark_as_consumed(name, consumer.to_consume[name])
break
self._to_consume.pop()
def visit_lambda(self, node: nodes.Lambda) -> None:
"""Visit lambda: update consumption analysis variable."""
self._to_consume.append(NamesConsumer(node, "lambda"))
def leave_lambda(self, _: nodes.Lambda) -> None:
"""Leave lambda: update consumption analysis variable."""
# do not check for not used locals here
self._to_consume.pop()
def visit_generatorexp(self, node: nodes.GeneratorExp) -> None:
"""Visit genexpr: update consumption analysis variable."""
self._to_consume.append(NamesConsumer(node, "comprehension"))
def leave_generatorexp(self, _: nodes.GeneratorExp) -> None:
"""Leave genexpr: update consumption analysis variable."""
# do not check for not used locals here
self._to_consume.pop()
def visit_dictcomp(self, node: nodes.DictComp) -> None:
"""Visit dictcomp: update consumption analysis variable."""
self._to_consume.append(NamesConsumer(node, "comprehension"))
def leave_dictcomp(self, _: nodes.DictComp) -> None:
"""Leave dictcomp: update consumption analysis variable."""
# do not check for not used locals here
self._to_consume.pop()
def visit_setcomp(self, node: nodes.SetComp) -> None:
"""Visit setcomp: update consumption analysis variable."""
self._to_consume.append(NamesConsumer(node, "comprehension"))
def leave_setcomp(self, _: nodes.SetComp) -> None:
"""Leave setcomp: update consumption analysis variable."""
# do not check for not used locals here
self._to_consume.pop()
def visit_functiondef(self, node: nodes.FunctionDef) -> None:
"""Visit function: update consumption analysis variable and check locals."""
self._to_consume.append(NamesConsumer(node, "function"))
if not (
self.linter.is_message_enabled("redefined-outer-name")
or self.linter.is_message_enabled("redefined-builtin")
):
return
globs = node.root().globals
for name, stmt in node.items():
if name in globs and not isinstance(stmt, nodes.Global):
definition = globs[name][0]
if (
isinstance(definition, nodes.ImportFrom)
and definition.modname == FUTURE
):
# It is a __future__ directive, not a symbol.
continue
# Do not take in account redefined names for the purpose
# of type checking.:
if any(
in_type_checking_block(definition) for definition in globs[name]
):
continue
# Suppress emitting the message if the outer name is in the
# scope of an exception assignment.
# For example: the `e` in `except ValueError as e`
match globs[name][0]:
case nodes.AssignName(parent=nodes.ExceptHandler()):
continue
line = definition.fromlineno
if not self._is_name_ignored(stmt, name):
self.add_message(
"redefined-outer-name", args=(name, line), node=stmt
)
elif (
utils.is_builtin(name)
and not self._allowed_redefined_builtin(name)
and not self._should_ignore_redefined_builtin(stmt)
):
# do not print Redefining builtin for additional builtins
self.add_message("redefined-builtin", args=name, node=stmt)
def leave_functiondef(self, node: nodes.FunctionDef) -> None:
"""Leave function: check function's locals are consumed."""
self._check_metaclasses(node)
if node.type_comment_returns:
self._store_type_annotation_node(node.type_comment_returns)
if node.type_comment_args:
for argument_annotation in node.type_comment_args:
self._store_type_annotation_node(argument_annotation)
not_consumed = self._to_consume.pop().to_consume
if not (
self.linter.is_message_enabled("unused-variable")
or self.linter.is_message_enabled("possibly-unused-variable")
or self.linter.is_message_enabled("unused-argument")
):
return
# Don't check arguments of function which are only raising an exception.
if utils.is_error(node):
return
# Don't check arguments of abstract methods or within an interface.
is_method = node.is_method()
if is_method and node.is_abstract():
return
global_names = _flattened_scope_names(node.nodes_of_class(nodes.Global))
nonlocal_names = _flattened_scope_names(node.nodes_of_class(nodes.Nonlocal))
comprehension_target_names: set[str] = set()
for comprehension_scope in node.nodes_of_class(nodes.ComprehensionScope):
for generator in comprehension_scope.generators:
for name in utils.find_assigned_names_recursive(generator.target):
comprehension_target_names.add(name)
for name, stmts in not_consumed.items():
self._check_is_unused(
name,
node,
stmts[0],
global_names,
nonlocal_names,
comprehension_target_names,
)
visit_asyncfunctiondef = visit_functiondef
leave_asyncfunctiondef = leave_functiondef
@utils.only_required_for_messages(
"global-variable-undefined",
"global-variable-not-assigned",
"global-statement",
"global-at-module-level",
"redefined-builtin",
)
def visit_global(self, node: nodes.Global) -> None:
"""Check names imported exists in the global scope."""
frame = node.frame()
if isinstance(frame, nodes.Module):
self.add_message("global-at-module-level", node=node, confidence=HIGH)
return
module = frame.root()
default_message = True
module_locals = node.root().locals
for name in node.names:
try:
assign_nodes = module.getattr(name)
except astroid.NotFoundError:
# unassigned global, skip
assign_nodes = []
not_defined_locally_by_import = not any(
isinstance(local, (nodes.Import, nodes.ImportFrom))
for local in module_locals.get(name, ())
)
if (
not utils.is_reassigned_after_current(node, name)
and not utils.is_deleted_after_current(node, name)
and not_defined_locally_by_import
):
self.add_message(
"global-variable-not-assigned",
args=name,
node=node,
confidence=HIGH,
)
default_message = False
continue
for anode in assign_nodes:
if (
isinstance(anode, nodes.AssignName)
and anode.name in module.special_attributes
):
self.add_message("redefined-builtin", args=name, node=node)
break
if anode.frame() is module:
# module level assignment
break
if (
isinstance(anode, (nodes.ClassDef, nodes.FunctionDef))
and anode.parent is module
):
# module level function assignment
break
else:
if not_defined_locally_by_import:
# global undefined at the module scope
self.add_message(
"global-variable-undefined",
args=name,
node=node,
confidence=HIGH,
)
default_message = False
if default_message:
self.add_message("global-statement", node=node, confidence=HIGH)
def visit_assignname(self, node: nodes.AssignName) -> None:
if isinstance(node.assign_type(), nodes.AugAssign):
self.visit_name(node)
def visit_delname(self, node: nodes.DelName) -> None:
self.visit_name(node)
def visit_name(self, node: nodes.Name | nodes.AssignName | nodes.DelName) -> None:
"""Don't add the 'utils.only_required_for_messages' decorator here!
It's important that all 'Name' nodes are visited, otherwise the
'NamesConsumers' won't be correct.
"""
stmt = node.statement()
if stmt.fromlineno is None:
# name node from an astroid built from live code, skip
assert not stmt.root().file.endswith(".py")
return
self._undefined_and_used_before_checker(node, stmt)
self._loopvar_name(node)
@utils.only_required_for_messages("redefined-outer-name")
def visit_excepthandler(self, node: nodes.ExceptHandler) -> None:
if not isinstance(node.name, nodes.AssignName):
return
for outer_except, outer_except_assign_name in self._except_handler_names_queue:
if node.name.name == outer_except_assign_name.name:
self.add_message(
"redefined-outer-name",
args=(outer_except_assign_name.name, outer_except.fromlineno),
node=node,
)
break
self._except_handler_names_queue.append((node, node.name))
@utils.only_required_for_messages("redefined-outer-name")
def leave_excepthandler(self, node: nodes.ExceptHandler) -> None:
if not (node.name and isinstance(node.name, nodes.AssignName)):
return
self._except_handler_names_queue.pop()
def _undefined_and_used_before_checker(
self,
node: nodes.Name,
stmt: nodes.NodeNG,
) -> None:
frame = stmt.scope()
start_index = len(self._to_consume) - 1
# iterates through parent scopes, from the inner to the outer
base_scope_type = self._to_consume[start_index].scope_type
for i in range(start_index, -1, -1):
current_consumer = self._to_consume[i]
# Certain nodes shouldn't be checked as they get checked another time
if self._should_node_be_skipped(node, current_consumer, i == start_index):
continue
action, nodes_to_consume = self._check_consumer(
node, stmt, frame, current_consumer, base_scope_type
)
if nodes_to_consume:
# Any nodes added to consumed_uncertain by get_next_to_consume()
# should be added back so that they are marked as used.
# They will have already had a chance to emit used-before-assignment.
# We check here instead of before every single return in _check_consumer()
nodes_to_consume += current_consumer.consumed_uncertain[node.name]
current_consumer.mark_as_consumed(node.name, nodes_to_consume)
if action is VariableVisitConsumerAction.CONTINUE:
continue
if action is VariableVisitConsumerAction.RETURN:
return
# we have not found the name, if it isn't a builtin, that's an
# undefined name !
if not (
node.name in nodes.Module.scope_attrs
or utils.is_builtin(node.name)
or node.name in self.linter.config.additional_builtins
or (
node.name == "__class__"
and any(
i.is_method()
for i in node.node_ancestors()
if isinstance(i, nodes.FunctionDef)
)
)
) and not utils.node_ignores_exception(node, NameError):
self.add_message("undefined-variable", args=node.name, node=node)
def _should_node_be_skipped(
self,
node: nodes.Name,
consumer: NamesConsumer,
is_start_index: bool,
) -> bool:
"""Tests a consumer and node for various conditions in which the node shouldn't
be checked for the undefined-variable and used-before-assignment checks.
"""
if consumer.scope_type == "class":
# The list of base classes in the class definition is not part
# of the class body.
# If the current scope is a class scope but it's not the inner
# scope, ignore it. This prevents to access this scope instead of
# the globals one in function members when there are some common
# names.
if utils.is_ancestor_name(consumer.node, node) or (
not is_start_index and self._ignore_class_scope(node)
):
if any(
node.name == param.name.name for param in consumer.node.type_params
):
return False
return True
match node.parent:
case nodes.Keyword(parent=nodes.ClassDef()):
# Ignore inner class scope for keywords in class definition
return True
elif consumer.scope_type == "function" and self._defined_in_function_definition(
node, consumer.node
):
if any(node.name == param.name.name for param in consumer.node.type_params):
return False
# If the name node is used as a function default argument's value or as
# a decorator, then start from the parent frame of the function instead
# of the function frame - and thus open an inner class scope
return True
elif consumer.scope_type == "lambda" and utils.is_default_argument(
node, consumer.node
):
return True
return False
# pylint: disable = too-many-return-statements, too-many-branches
def _check_consumer(
self,
node: nodes.Name,
stmt: nodes.NodeNG,
frame: nodes.LocalsDictNodeNG,
current_consumer: NamesConsumer,
base_scope_type: str,
) -> tuple[VariableVisitConsumerAction, list[nodes.NodeNG] | None]:
"""Checks a consumer for conditions that should trigger messages."""
# If the name has already been consumed, only check it's not a loop
# variable used outside the loop.
if node.name in current_consumer.consumed:
# Avoid the case where there are homonyms inside function scope and
# comprehension current scope (avoid bug #1731)
if utils.is_func_decorator(current_consumer.node) or not isinstance(
node, nodes.ComprehensionScope
):
self._check_late_binding_closure(node)
return (VariableVisitConsumerAction.RETURN, None)
found_nodes = current_consumer.get_next_to_consume(node)
if found_nodes is None:
return (VariableVisitConsumerAction.CONTINUE, None)
if not found_nodes:
is_reported = self._report_unfound_name_definition(node, current_consumer)
# Mark for consumption any nodes added to consumed_uncertain by
# get_next_to_consume() because they might not have executed.
nodes_to_consume = current_consumer.consumed_uncertain[node.name]
nodes_to_consume = self._filter_type_checking_definitions_from_consumption(
node, nodes_to_consume, is_reported
)
return (
VariableVisitConsumerAction.RETURN,
nodes_to_consume,
)
self._check_late_binding_closure(node)
defnode = utils.assign_parent(found_nodes[0])
defstmt = defnode.statement()
defframe = defstmt.frame()
# The class reuses itself in the class scope.
is_recursive_klass: bool = (
frame is defframe
and defframe.parent_of(node)
and isinstance(defframe, nodes.ClassDef)
and node.name == defframe.name
)
if (
is_recursive_klass
and utils.get_node_first_ancestor_of_type(node, nodes.Lambda)
and not (
utils.is_default_argument(node)
and node.scope().parent.scope() is defframe
)
):
# Self-referential class references are fine in lambda's --
# As long as they are not part of the default argument directly
# under the scope of the parent self-referring class.
# Example of valid default argument:
# class MyName3:
# myattr = 1
# mylambda3 = lambda: lambda a=MyName3: a
# Example of invalid default argument:
# class MyName4:
# myattr = 1
# mylambda4 = lambda a=MyName4: lambda: a
# If the above conditional is True,
# there is no possibility of undefined-variable
# Also do not consume class name
# (since consuming blocks subsequent checks)
# -- quit
return (VariableVisitConsumerAction.RETURN, None)
(
maybe_before_assign,
annotation_return,
use_outer_definition,
) = self._is_variable_violation(
node,
defnode,
stmt,
defstmt,
frame,
defframe,
base_scope_type,
is_recursive_klass,
)
if use_outer_definition:
return (VariableVisitConsumerAction.CONTINUE, None)
if (
maybe_before_assign
and not utils.is_defined_before(node)
and not astroid.are_exclusive(stmt, defstmt, ("NameError",))
):
# Used and defined in the same place, e.g `x += 1` and `del x`
defined_by_stmt = defstmt is stmt and isinstance(
node, (nodes.DelName, nodes.AssignName)
)
if (
is_recursive_klass
or defined_by_stmt
or annotation_return
or isinstance(defstmt, nodes.Delete)
):
if not utils.node_ignores_exception(node, NameError):
# Handle postponed evaluation of annotations
if not (
self._postponed_evaluation_enabled
and isinstance(
stmt,
(
nodes.AnnAssign,
nodes.FunctionDef,
nodes.Arguments,
),
)
and node.name in node.root().locals
):
if defined_by_stmt:
return (VariableVisitConsumerAction.CONTINUE, [node])
return (VariableVisitConsumerAction.CONTINUE, None)
elif base_scope_type != "lambda":
# E0601 may *not* occurs in lambda scope.
# Skip postponed evaluation of annotations
# and unevaluated annotations inside a function body
# as well as TypeAlias nodes.
if not (
self._postponed_evaluation_enabled # noqa: RUF021
and (
isinstance(stmt, nodes.AnnAssign)
or isinstance(stmt, nodes.FunctionDef) # noqa: RUF021
and node
not in {
*(stmt.args.defaults or ()),
*(stmt.args.kw_defaults or ()),
}
)
or isinstance(stmt, nodes.AnnAssign) # noqa: RUF021
and utils.get_node_first_ancestor_of_type(stmt, nodes.FunctionDef)
or isinstance(stmt, nodes.TypeAlias)
):
self.add_message(
"used-before-assignment",
args=node.name,
node=node,
confidence=HIGH,
)
return (VariableVisitConsumerAction.RETURN, found_nodes)
elif base_scope_type == "lambda":
# E0601 can occur in class-level scope in lambdas, as in
# the following example:
# class A:
# x = lambda attr: f + attr
# f = 42
# We check lineno because doing the following is fine:
# class A:
# x = 42
# y = lambda attr: x + attr
if (
isinstance(frame, nodes.ClassDef)
and node.name in frame.locals
and stmt.fromlineno <= defstmt.fromlineno
):
self.add_message(
"used-before-assignment",
args=node.name,
node=node,
confidence=HIGH,
)
elif not self._is_builtin(node.name) and self._is_only_type_assignment(
node, defstmt
):
if node.scope().locals.get(node.name):
self.add_message(
"used-before-assignment", args=node.name, node=node, confidence=HIGH
)
else:
self.add_message(
"undefined-variable", args=node.name, node=node, confidence=HIGH
)
return (VariableVisitConsumerAction.RETURN, found_nodes)
elif (
isinstance(defstmt, nodes.ClassDef) and defnode not in defframe.type_params
):
return self._is_first_level_self_reference(node, defstmt, found_nodes)
elif isinstance(defnode, nodes.NamedExpr):
if isinstance(defnode.parent, nodes.IfExp):
if self._is_never_evaluated(defnode, defnode.parent):
self.add_message(
"undefined-variable",
args=node.name,
node=node,
confidence=INFERENCE,
)
return (VariableVisitConsumerAction.RETURN, found_nodes)
return (VariableVisitConsumerAction.RETURN, found_nodes)
def _report_unfound_name_definition(
self,
node: nodes.Name,
current_consumer: NamesConsumer,
) -> bool:
"""Reports used-before-assignment error when all name definition nodes
are filtered out by NamesConsumer.
Returns True if an error is reported; otherwise, returns False.
"""
if (
self._postponed_evaluation_enabled
and utils.is_node_in_type_annotation_context(node)
) or utils.is_node_in_pep695_type_context(node):
return False
if self._is_builtin(node.name):
return False
if self._is_variable_annotation_in_function(node):
return False
if self._has_nonlocal_in_enclosing_frame(
node, current_consumer.consumed_uncertain.get(node.name, [])
):
return False
if (
node.name in self._reported_type_checking_usage_scopes
and node.scope() in self._reported_type_checking_usage_scopes[node.name]
):
return False
confidence = HIGH
if node.name in current_consumer.names_under_always_false_test:
confidence = INFERENCE
elif node.name in current_consumer.consumed_uncertain:
confidence = CONTROL_FLOW
if node.name in current_consumer.names_defined_under_one_branch_only:
msg = "possibly-used-before-assignment"
else:
msg = "used-before-assignment"
self.add_message(
msg,
args=node.name,
node=node,
confidence=confidence,
)
return True
def _filter_type_checking_definitions_from_consumption(
self,
node: nodes.NodeNG,
nodes_to_consume: list[nodes.NodeNG],
is_reported: bool,
) -> list[nodes.NodeNG]:
"""Filters out type-checking definition nodes (e.g. imports, class definitions)
from consumption, as used-before-assignment may invoke in a different context.
If used-before-assignment is reported for the usage of a type-checking definition,
track the scope of that usage for future evaluation.
"""
type_checking_definitions = {
n
for n in nodes_to_consume
if isinstance(n, (nodes.Import, nodes.ImportFrom, nodes.ClassDef))
and in_type_checking_block(n)
}
if type_checking_definitions and is_reported:
self._reported_type_checking_usage_scopes.setdefault(node.name, []).append(
node.scope()
)
return [n for n in nodes_to_consume if n not in type_checking_definitions]
@utils.only_required_for_messages("no-name-in-module")
def visit_import(self, node: nodes.Import) -> None:
"""Check modules attribute accesses."""
if not self._analyse_fallback_blocks and utils.is_from_fallback_block(node):
# No need to verify this, since ImportError is already
# handled by the client code.
return
# Don't verify import if part of guarded import block
if in_type_checking_block(node):
return
if isinstance(node.parent, nodes.If) and is_sys_guard(node.parent):
return
for name, _ in node.names:
parts = name.split(".")
try:
module = next(_infer_name_module(node, parts[0]))
except astroid.ResolveError:
continue
if not isinstance(module, nodes.Module):
continue
self._check_module_attrs(node, module, parts[1:])
@utils.only_required_for_messages("no-name-in-module")
def visit_importfrom(self, node: nodes.ImportFrom) -> None:
"""Check modules attribute accesses."""
if not self._analyse_fallback_blocks and utils.is_from_fallback_block(node):
# No need to verify this, since ImportError is already
# handled by the client code.
return
# Don't verify import if part of guarded import block
# I.e. `sys.version_info` or `typing.TYPE_CHECKING`
if in_type_checking_block(node):
return
if isinstance(node.parent, nodes.If) and is_sys_guard(node.parent):
return
name_parts = node.modname.split(".")
try:
module = node.do_import_module(name_parts[0])
except astroid.AstroidBuildingError:
return
module = self._check_module_attrs(node, module, name_parts[1:])
if not module:
return
for name, _ in node.names:
if name == "*":
continue
self._check_module_attrs(node, module, name.split("."))
@utils.only_required_for_messages(
"unbalanced-tuple-unpacking",
"unpacking-non-sequence",
"self-cls-assignment",
"unbalanced_dict_unpacking",
)
def visit_assign(self, node: nodes.Assign) -> None:
"""Check unbalanced tuple unpacking for assignments and unpacking
non-sequences as well as in case self/cls get assigned.
"""
self._check_self_cls_assign(node)
if not isinstance(node.targets[0], (nodes.Tuple, nodes.List)):
return
targets = node.targets[0].itered()
# Check if we have starred nodes.
if any(isinstance(target, nodes.Starred) for target in targets):
return
try:
inferred = node.value.inferred()
if inferred is not None and len(inferred) == 1:
self._check_unpacking(inferred[0], node, targets)
except astroid.InferenceError:
return
# listcomp have now also their scope
def visit_listcomp(self, node: nodes.ListComp) -> None:
"""Visit listcomp: update consumption analysis variable."""
self._to_consume.append(NamesConsumer(node, "comprehension"))
def leave_listcomp(self, _: nodes.ListComp) -> None:
"""Leave listcomp: update consumption analysis variable."""
# do not check for not used locals here
self._to_consume.pop()
def leave_assign(self, node: nodes.Assign) -> None:
self._store_type_annotation_names(node)
def leave_with(self, node: nodes.With) -> None:
self._store_type_annotation_names(node)
def visit_arguments(self, node: nodes.Arguments) -> None:
for annotation in node.type_comment_args:
self._store_type_annotation_node(annotation)
# Relying on other checker's options, which might not have been initialized yet.
@cached_property
def _analyse_fallback_blocks(self) -> bool:
return bool(self.linter.config.analyse_fallback_blocks)
@cached_property
def _ignored_modules(self) -> Iterable[str]:
return self.linter.config.ignored_modules # type: ignore[no-any-return]
@cached_property
def _allow_global_unused_variables(self) -> bool:
return bool(self.linter.config.allow_global_unused_variables)
@staticmethod
def _defined_in_function_definition(
node: nodes.NodeNG,
frame: nodes.NodeNG,
) -> bool:
in_annotation_or_default_or_decorator = False
if isinstance(frame, nodes.FunctionDef) and node.statement() is frame:
in_annotation_or_default_or_decorator = (
(
node in frame.args.annotations
or node in frame.args.posonlyargs_annotations
or node in frame.args.kwonlyargs_annotations
or node is frame.args.varargannotation
or node is frame.args.kwargannotation
)
or frame.args.parent_of(node)
or (frame.decorators and frame.decorators.parent_of(node))
or (
frame.returns
and (node is frame.returns or frame.returns.parent_of(node))
)
)
return in_annotation_or_default_or_decorator
@staticmethod
def _in_lambda_or_comprehension_body(
node: nodes.NodeNG,
frame: nodes.NodeNG,
) -> bool:
"""Return True if node within a lambda/comprehension body (or similar) and thus
should not have access to class attributes in frame.
"""
child = node
parent = node.parent
while parent is not None:
if parent is frame:
return False
match parent:
case nodes.Lambda() if child is not parent.args:
# Body of lambda should not have access to class attributes.
return True
case nodes.Comprehension() if child is not parent.iter:
# Only iter of list/set/dict/generator comprehension should have access.
return True
case nodes.ComprehensionScope() if not (
parent.generators and child is parent.generators[0]
):
# Body of list/set/dict/generator comprehension should not have access to class attributes.
# Furthermore, only the first generator (if multiple) in comprehension should have access.
return True
child = parent
parent = parent.parent
return False
@staticmethod
def _is_variable_violation(
node: nodes.Name,
defnode: nodes.NodeNG,
stmt: _base_nodes.Statement,
defstmt: _base_nodes.Statement,
frame: nodes.LocalsDictNodeNG, # scope of statement of node
defframe: nodes.LocalsDictNodeNG,
base_scope_type: str,
is_recursive_klass: bool,
) -> tuple[bool, bool, bool]:
maybe_before_assign = True
annotation_return = False
use_outer_definition = False
if frame is not defframe:
maybe_before_assign = _detect_global_scope(node, frame, defframe)
elif defframe.parent is None:
# we are at the module level, check the name is not
# defined in builtins
if (
node.name in defframe.scope_attrs
or astroid.builtin_lookup(node.name)[1]
):
maybe_before_assign = False
else:
# we are in a local scope, check the name is not
# defined in global or builtin scope
# skip this lookup if name is assigned later in function scope/lambda
# Note: the node.frame() is not the same as the `frame` argument which is
# equivalent to frame.statement().scope()
forbid_lookup = (
isinstance(frame, nodes.FunctionDef)
or isinstance(node.frame(), nodes.Lambda)
) and _assigned_locally(node)
if not forbid_lookup and defframe.root().lookup(node.name)[1]:
maybe_before_assign = False
use_outer_definition = stmt == defstmt and not isinstance(
defnode, nodes.Comprehension
)
# check if we have a nonlocal
elif node.name in defframe.locals:
maybe_before_assign = not _is_nonlocal_name(node, defframe)
if (
base_scope_type == "lambda"
and isinstance(frame, nodes.ClassDef)
and node.name in frame.locals
):
# This rule verifies that if the definition node of the
# checked name is an Arguments node and if the name
# is used a default value in the arguments defaults
# and the actual definition of the variable label
# is happening before the Arguments definition.
#
# bar = None
# foo = lambda bar=bar: bar
#
# In this case, maybe_before_assign should be False, otherwise
# it should be True.
maybe_before_assign = not (
isinstance(defnode, nodes.Arguments)
and node in defnode.defaults
and frame.locals[node.name][0].fromlineno < defstmt.fromlineno
)
elif isinstance(defframe, nodes.ClassDef) and isinstance(
frame, nodes.FunctionDef
):
# Special rules for function return annotations.
if node is frame.returns:
# Using a name defined earlier in the class containing the function.
if defframe.parent_of(frame.returns):
annotation_return = True
if frame.returns.name in defframe.locals:
definition = defframe.locals[node.name][0]
# no warning raised if a name was defined earlier in the class
maybe_before_assign = (
definition.lineno is not None
and definition.lineno >= frame.lineno
)
else:
maybe_before_assign = True
# Using a name defined in the module if this is a nested function.
elif (
# defframe is the class containing the function.
# It shouldn't be nested: expect its parent to be a module.
(defframe_parent := next(defframe.node_ancestors()))
and isinstance(defframe_parent, nodes.Module)
# frame is the function inside the class.
and (frame_ancestors := tuple(frame.node_ancestors()))
# Does that function have any functions as ancestors?
and any(
isinstance(ancestor, nodes.FunctionDef)
for ancestor in frame_ancestors
)
# And is its last ancestor the same module as the class's?
and frame_ancestors[-1] is defframe_parent
):
annotation_return = True
maybe_before_assign = False
if isinstance(node.parent, nodes.Arguments):
maybe_before_assign = stmt.fromlineno <= defstmt.fromlineno
elif is_recursive_klass:
maybe_before_assign = True
else:
maybe_before_assign = (
maybe_before_assign and stmt.fromlineno <= defstmt.fromlineno
)
if maybe_before_assign and stmt.fromlineno == defstmt.fromlineno:
if (
isinstance(defframe, nodes.FunctionDef)
and frame is defframe
and defframe.parent_of(node)
and (
defnode in defframe.type_params
# Single statement function, with the statement on the
# same line as the function definition
or stmt is not defstmt
)
):
maybe_before_assign = False
elif (
isinstance(defstmt, NODES_WITH_VALUE_ATTR)
and VariablesChecker._maybe_used_and_assigned_at_once(defstmt)
and frame is defframe
and defframe.parent_of(node)
and stmt is defstmt
):
# Single statement if, with assignment expression on same
# line as assignment
# x = b if (b := True) else False
maybe_before_assign = False
elif (
isinstance(defnode, nodes.NamedExpr)
and frame is defframe
and defframe.parent_of(stmt)
and stmt is defstmt
and _is_before(defnode, node)
):
# Relation of a name to the same name in a named expression
# Could be used before assignment if self-referencing:
# (b := b)
# Otherwise, safe if used after assignment:
# (b := 2) and b
maybe_before_assign = defnode.value is node or any(
a is defnode.value for a in node.node_ancestors()
)
elif (
isinstance(defframe, nodes.ClassDef)
and defnode in defframe.type_params
):
# Generic on parent class:
# class Child[_T](Parent[_T])
maybe_before_assign = False
return maybe_before_assign, annotation_return, use_outer_definition
@staticmethod
def _maybe_used_and_assigned_at_once(defstmt: _base_nodes.Statement) -> bool:
"""Check if `defstmt` has the potential to use and assign a name in the
same statement.
"""
if isinstance(defstmt, nodes.Match):
return any(case.guard for case in defstmt.cases)
if isinstance(defstmt, nodes.IfExp):
return True
if isinstance(defstmt, nodes.TypeAlias):
return True
if isinstance(defstmt.value, nodes.BaseContainer):
return any(
VariablesChecker._maybe_used_and_assigned_at_once(elt)
for elt in defstmt.value.elts
if isinstance(elt, (*NODES_WITH_VALUE_ATTR, nodes.IfExp, nodes.Match))
)
match value := defstmt.value:
case nodes.IfExp():
return True
case nodes.Lambda(body=nodes.IfExp()):
return True
case nodes.Dict() if any(
isinstance(item[0], nodes.IfExp) or isinstance(item[1], nodes.IfExp)
for item in value.items
):
return True
case nodes.Call():
pass
case _:
return False
return any(
any(isinstance(kwarg.value, nodes.IfExp) for kwarg in call.keywords)
or any(isinstance(arg, nodes.IfExp) for arg in call.args)
or (
isinstance(call.func, nodes.Attribute)
and isinstance(call.func.expr, nodes.IfExp)
)
for call in value.nodes_of_class(klass=nodes.Call)
)
def _is_builtin(self, name: str) -> bool:
return name in self.linter.config.additional_builtins or utils.is_builtin(name)
def _has_nonlocal_in_enclosing_frame(
self, node: nodes.Name, uncertain_definitions: list[nodes.NodeNG]
) -> bool:
"""Check if there is a nonlocal declaration in the nearest frame that encloses
both usage and definitions.
"""
defining_frames = {definition.frame() for definition in uncertain_definitions}
frame = node.frame()
is_enclosing_frame = False
while frame and not is_enclosing_frame:
is_enclosing_frame = all(
(frame is defining_frame) or frame.parent_of(defining_frame)
for defining_frame in defining_frames
)
if is_enclosing_frame and _is_nonlocal_name(node, frame):
return True
frame = frame.parent.frame() if frame.parent else None
return False
@staticmethod
def _is_only_type_assignment(
node: nodes.Name,
defstmt: _base_nodes.Statement,
) -> bool:
"""Check if variable only gets assigned a type and never a value."""
if not (isinstance(defstmt, nodes.AnnAssign) and defstmt.value is None):
return False
defstmt_frame = defstmt.frame()
node_frame = node.frame()
parent = node
while parent not in {defstmt_frame.parent, None}:
parent_scope = parent.scope()
# Find out if any nonlocals receive values in nested functions
for inner_func in parent_scope.nodes_of_class(nodes.FunctionDef):
if inner_func is parent_scope:
continue
if any(
node.name in nl.names
for nl in inner_func.nodes_of_class(nodes.Nonlocal)
) and any(
node.name == an.name
for an in inner_func.nodes_of_class(nodes.AssignName)
):
return False
local_refs = parent_scope.locals.get(node.name, [])
for ref_node in local_refs:
# If local ref is in the same frame as our node, but on a later lineno
# we don't actually care about this local ref.
# Local refs are ordered, so we break.
# print(var)
# var = 1 # <- irrelevant
if defstmt_frame == node_frame and ref_node.lineno > node.lineno:
break
# If the parent of the local reference is anything but an AnnAssign
# Or if the AnnAssign adds a value the variable will now have a value
# var = 1 # OR
# var: int = 1
if (
not isinstance(ref_node.parent, nodes.AnnAssign)
or ref_node.parent.value
) and not (
# EXCEPTION: will not have a value if a self-referencing named expression
# var: int
# if (var := var * var) <-- "var" still undefined
isinstance(ref_node.parent, nodes.NamedExpr)
and any(a is ref_node.parent.value for a in node.node_ancestors())
):
return False
parent = parent_scope.parent
return True
def _is_first_level_self_reference(
self,
node: nodes.Name,
defstmt: nodes.ClassDef,
found_nodes: list[nodes.NodeNG],
) -> tuple[VariableVisitConsumerAction, list[nodes.NodeNG] | None]:
"""Check if a first level method's annotation or default values
refers to its own class, and return a consumer action.
"""
if node.frame().parent == defstmt and node.statement() == node.frame():
# Check if used as type annotation
# Break if postponed evaluation is enabled
if utils.is_node_in_type_annotation_context(node):
if not self._postponed_evaluation_enabled:
return (VariableVisitConsumerAction.CONTINUE, None)
return (VariableVisitConsumerAction.RETURN, None)
# Check if used as default value by calling the class
match node.parent:
case nodes.Call(parent=nodes.Arguments()):
return (VariableVisitConsumerAction.CONTINUE, None)
return (VariableVisitConsumerAction.RETURN, found_nodes)
@staticmethod
def _is_never_evaluated(
defnode: nodes.NamedExpr,
defnode_parent: nodes.IfExp,
) -> bool:
"""Check if a NamedExpr is inside a side of if ... else that never
gets evaluated.
"""
match utils.safe_infer(defnode_parent.test):
case nodes.Const(value=True) if defnode == defnode_parent.orelse:
return True
case nodes.Const(value=False) if defnode == defnode_parent.body:
return True
case _:
return False
@staticmethod
def _is_variable_annotation_in_function(node: nodes.Name) -> bool:
ann_assign = utils.get_node_first_ancestor_of_type(node, nodes.AnnAssign)
return (
ann_assign
and (node is ann_assign.annotation or ann_assign.annotation.parent_of(node))
and utils.get_node_first_ancestor_of_type( # type: ignore[return-value]
ann_assign, nodes.FunctionDef
)
)
def _ignore_class_scope(self, node: nodes.NodeNG) -> bool:
"""Return True if the node is in a local class scope, as an assignment.
Detect if we are in a local class scope, as an assignment.
For example, the following is fair game.
class A:
b = 1
c = lambda b=b: b * b
class B:
tp = 1
def func(self, arg: tp):
...
class C:
tp = 2
def func(self, arg=tp):
...
class C:
class Tp:
pass
class D(Tp):
...
"""
name = node.name
frame = node.statement().scope()
in_annotation_or_default_or_decorator = self._defined_in_function_definition(
node, frame
)
in_ancestor_list = utils.is_ancestor_name(frame, node)
if in_annotation_or_default_or_decorator or in_ancestor_list:
frame_locals = frame.parent.scope().locals
else:
frame_locals = frame.locals
return not (
(isinstance(frame, nodes.ClassDef) or in_annotation_or_default_or_decorator)
and not self._in_lambda_or_comprehension_body(node, frame)
and name in frame_locals
)
# pylint: disable-next=too-many-branches,too-many-statements
def _loopvar_name(self, node: nodes.Name) -> None:
# filter variables according to node's scope
astmts = [s for s in node.lookup(node.name)[1] if hasattr(s, "assign_type")]
# If this variable usage exists inside a function definition
# that exists in the same loop,
# the usage is safe because the function will not be defined either if
# the variable is not defined.
scope = node.scope()
if isinstance(scope, (nodes.Lambda, nodes.FunctionDef)) and any(
asmt.scope().parent_of(scope) for asmt in astmts
):
return
# Filter variables according to their respective scope. Test parent
# and statement to avoid #74747. This is not a total fix, which would
# introduce a mechanism similar to special attribute lookup in
# modules. Also, in order to get correct inference in this case, the
# scope lookup rules would need to be changed to return the initial
# assignment (which does not exist in code per se) as well as any later
# modifications.
if (
not astmts # pylint: disable=too-many-boolean-expressions
or (
astmts[0].parent == astmts[0].root()
and astmts[0].parent.parent_of(node)
)
or (
astmts[0].is_statement
or (
not isinstance(astmts[0].parent, nodes.Module)
and astmts[0].statement().parent_of(node)
)
)
):
_astmts = []
else:
_astmts = astmts[:1]
for i, stmt in enumerate(astmts[1:]):
try:
astmt_statement = astmts[i].statement()
except astroid.exceptions.ParentMissingError:
continue
if astmt_statement.parent_of(stmt) and not utils.in_for_else_branch(
astmt_statement, stmt
):
continue
_astmts.append(stmt)
astmts = _astmts
if len(astmts) != 1:
return
assign = astmts[0].assign_type()
if not (
isinstance(assign, (nodes.For, nodes.Comprehension, nodes.GeneratorExp))
and assign.statement() is not node.statement()
):
return
if not isinstance(assign, nodes.For):
self.add_message("undefined-loop-variable", args=node.name, node=node)
return
for else_stmt in assign.orelse:
if isinstance(
else_stmt, (nodes.Return, nodes.Raise, nodes.Break, nodes.Continue)
):
return
# TODO: 4.0: Consider using utils.is_terminating_func
# after merging it with RefactoringChecker._is_function_def_never_returning
if isinstance(else_stmt, nodes.Expr) and isinstance(
else_stmt.value, nodes.Call
):
inferred_func = utils.safe_infer(else_stmt.value.func)
if (
isinstance(inferred_func, nodes.FunctionDef)
and inferred_func.returns
):
inferred_return = utils.safe_infer(inferred_func.returns)
if isinstance(
inferred_return, nodes.FunctionDef
) and inferred_return.qname() in {
*TYPING_NORETURN,
*TYPING_NEVER,
"typing._SpecialForm",
}:
return
# typing_extensions.NoReturn returns a _SpecialForm
if (
isinstance(inferred_return, bases.Instance)
and inferred_return.qname() == "typing._SpecialForm"
):
return
maybe_walrus = utils.get_node_first_ancestor_of_type(node, nodes.NamedExpr)
if maybe_walrus:
maybe_comprehension = utils.get_node_first_ancestor_of_type(
maybe_walrus, nodes.Comprehension
)
if maybe_comprehension:
comprehension_scope = utils.get_node_first_ancestor_of_type(
maybe_comprehension, nodes.ComprehensionScope
)
if comprehension_scope is None:
# Should not be possible.
pass
elif (
comprehension_scope.parent.scope() is scope
and node.name in comprehension_scope.locals
):
return
# For functions we can do more by inferring the length of the itered object
try:
inferred = next(assign.iter.infer())
# Prefer the target of enumerate() rather than the enumerate object itself
if (
isinstance(inferred, astroid.Instance)
and inferred.qname() == "builtins.enumerate"
):
likely_call = assign.iter
if isinstance(assign.iter, nodes.IfExp):
likely_call = assign.iter.body
if isinstance(likely_call, nodes.Call) and likely_call.args:
inferred = next(likely_call.args[0].infer())
except astroid.InferenceError:
self.add_message("undefined-loop-variable", args=node.name, node=node)
else:
if (
isinstance(inferred, astroid.Instance)
and inferred.qname() == BUILTIN_RANGE
):
# Consider range() objects safe, even if they might not yield any results.
return
# Consider sequences.
sequences = (
nodes.List,
nodes.Tuple,
nodes.Dict,
nodes.Set,
objects.FrozenSet,
)
if not isinstance(inferred, sequences):
self.add_message("undefined-loop-variable", args=node.name, node=node)
return
elements = getattr(inferred, "elts", getattr(inferred, "items", []))
if not elements:
self.add_message("undefined-loop-variable", args=node.name, node=node)
# pylint: disable = too-many-branches
def _check_is_unused(
self,
name: str,
node: nodes.FunctionDef,
stmt: nodes.NodeNG,
global_names: set[str],
nonlocal_names: Iterable[str],
comprehension_target_names: Iterable[str],
) -> None:
# Ignore some special names specified by user configuration.
if self._is_name_ignored(stmt, name):
return
# Ignore names that were added dynamically to the Function scope
match node:
case nodes.FunctionDef(locals={"__class__": [nodes.ClassDef()]}) if (
name == "__class__"
):
return
# Ignore names imported by the global statement.
if isinstance(stmt, (nodes.Global, nodes.Import, nodes.ImportFrom)):
# Detect imports, assigned to global statements.
if global_names and _import_name_is_global(stmt, global_names):
return
# Ignore names in comprehension targets
if name in comprehension_target_names:
return
# Ignore names in string literal type annotation.
if name in self._type_annotation_names:
return
argnames = node.argnames()
# Care about functions with unknown argument (builtins)
if name in argnames:
if node.name == "__new__":
is_init_def = False
# Look for the `__init__` method in all the methods of the same class.
for n in node.parent.get_children():
is_init_def = hasattr(n, "name") and (n.name == "__init__")
if is_init_def:
break
# Ignore unused arguments check for `__new__` if `__init__` is defined.
if is_init_def:
return
self._check_unused_arguments(name, node, stmt, argnames, nonlocal_names)
else:
if stmt.parent and isinstance(
stmt.parent, (nodes.Assign, nodes.AnnAssign, nodes.Tuple, nodes.For)
):
if name in nonlocal_names:
return
qname = asname = None
if isinstance(stmt, (nodes.Import, nodes.ImportFrom)):
# Need the complete name, which we don't have in .locals.
if len(stmt.names) > 1:
import_names = next(
(names for names in stmt.names if name in names), None
)
else:
import_names = stmt.names[0]
if import_names:
qname, asname = import_names
name = asname or qname
if _has_locals_call_after_node(stmt, node.scope()):
message_name = "possibly-unused-variable"
else:
match stmt:
case nodes.Import():
if asname is not None:
msg = f"{qname} imported as {asname}"
else:
msg = f"import {name}"
self.add_message("unused-import", args=msg, node=stmt)
return
case nodes.ImportFrom():
if asname is not None:
msg = f"{qname} imported from {stmt.modname} as {asname}"
else:
msg = f"{name} imported from {stmt.modname}"
self.add_message("unused-import", args=msg, node=stmt)
return
message_name = "unused-variable"
if isinstance(stmt, nodes.FunctionDef) and stmt.decorators:
return
# Don't check function stubs created only for type information
if utils.is_overload_stub(node):
return
# Special case for exception variable
if self._is_exception_binding_used_in_handler(stmt, name):
return
self.add_message(message_name, args=name, node=stmt)
def _is_name_ignored(
self,
stmt: nodes.NodeNG,
name: str,
) -> re.Pattern[str] | re.Match[str] | None:
authorized_rgx = self.linter.config.dummy_variables_rgx
match stmt:
case nodes.AssignName(parent=nodes.Arguments()) | nodes.Arguments():
regex: re.Pattern[str] = self.linter.config.ignored_argument_names
case _:
regex = authorized_rgx
# See https://stackoverflow.com/a/47007761/2519059 to
# understand what this function return. Please do NOT use
# this elsewhere, this is confusing for no benefit
return regex and regex.match(name)
def _check_unused_arguments(
self,
name: str,
node: nodes.FunctionDef,
stmt: nodes.NodeNG,
argnames: list[str],
nonlocal_names: Iterable[str],
) -> None:
is_method = node.is_method()
klass = node.parent.frame()
if is_method and isinstance(klass, nodes.ClassDef):
confidence = (
INFERENCE if utils.has_known_bases(klass) else INFERENCE_FAILURE
)
else:
confidence = HIGH
if is_method:
# Don't warn for the first argument of a (non static) method
if node.type != "staticmethod" and name == argnames[0]:
return
# Don't warn for argument of an overridden method
overridden = overridden_method(klass, node.name)
if overridden is not None and name in overridden.argnames():
return
if node.name in utils.PYMETHODS and node.name not in (
"__init__",
"__new__",
):
return
# Don't check callback arguments
if any(
node.name.startswith(cb) or node.name.endswith(cb)
for cb in self.linter.config.callbacks
):
return
# Don't check arguments of singledispatch.register function.
if utils.is_registered_in_singledispatch_function(node):
return
# Don't check function stubs created only for type information
if utils.is_overload_stub(node):
return
# Don't check protocol classes
if utils.is_protocol_class(klass):
return
if name in nonlocal_names:
return
self.add_message("unused-argument", args=name, node=stmt, confidence=confidence)
def _is_exception_binding_used_in_handler(
self, stmt: nodes.NodeNG, name: str
) -> bool:
return (
isinstance(stmt.parent, nodes.ExceptHandler)
and stmt is stmt.parent.name
and any(n.name == name for n in stmt.parent.nodes_of_class(nodes.Name))
)
def _check_late_binding_closure(self, node: nodes.Name) -> None:
"""Check whether node is a cell var that is assigned within a containing loop.
Special cases where we don't care about the error:
1. When the node's function is immediately called, e.g. (lambda: i)()
2. When the node's function is returned from within the loop, e.g. return lambda: i
"""
if not self.linter.is_message_enabled("cell-var-from-loop"):
return
node_scope = node.frame()
# If node appears in a default argument expression,
# look at the next enclosing frame instead
if utils.is_default_argument(node, node_scope):
node_scope = node_scope.parent.frame()
# Check if node is a cell var
if (
not isinstance(node_scope, (nodes.Lambda, nodes.FunctionDef))
or node.name in node_scope.locals
):
return
assign_scope, stmts = node.lookup(node.name)
if not (stmts and assign_scope.parent_of(node_scope)):
return
if utils.is_comprehension(assign_scope):
self.add_message("cell-var-from-loop", node=node, args=node.name)
else:
# Look for an enclosing For loop.
# Currently, we only consider the first assignment
assignment_node = stmts[0]
maybe_for = assignment_node
while maybe_for and not isinstance(maybe_for, nodes.For):
if maybe_for is assign_scope:
break
maybe_for = maybe_for.parent
else:
if (
maybe_for
and maybe_for.parent_of(node_scope)
and not utils.is_being_called(node_scope)
and node_scope.parent
and not isinstance(node_scope.statement(), nodes.Return)
):
self.add_message("cell-var-from-loop", node=node, args=node.name)
def _should_ignore_redefined_builtin(self, stmt: nodes.NodeNG) -> bool:
if not isinstance(stmt, nodes.ImportFrom):
return False
return stmt.modname in self.linter.config.redefining_builtins_modules
def _allowed_redefined_builtin(self, name: str) -> bool:
return name in self.linter.config.allowed_redefined_builtins
@staticmethod
def _comprehension_between_frame_and_node(node: nodes.Name) -> bool:
"""Return True if a ComprehensionScope intervenes between `node` and its
frame.
"""
closest_comprehension_scope = utils.get_node_first_ancestor_of_type(
node, nodes.ComprehensionScope
)
return closest_comprehension_scope is not None and node.frame().parent_of(
closest_comprehension_scope
)
def _store_type_annotation_node(self, type_annotation: nodes.NodeNG) -> None:
"""Given a type annotation, store all the name nodes it refers to."""
match type_annotation:
case nodes.Name():
self._type_annotation_names.append(type_annotation.name)
return
case nodes.Attribute():
self._store_type_annotation_node(type_annotation.expr)
return
case nodes.Subscript():
pass
case _:
return
match type_annotation.value:
case nodes.Attribute(expr=nodes.Name(name=name)) if name == TYPING_MODULE:
self._type_annotation_names.append(TYPING_MODULE)
return
self._type_annotation_names.extend(
annotation.name for annotation in type_annotation.nodes_of_class(nodes.Name)
)
def _store_type_annotation_names(
self,
node: nodes.For | nodes.Assign | nodes.With,
) -> None:
type_annotation = node.type_annotation
if not type_annotation:
return
self._store_type_annotation_node(node.type_annotation)
def _check_self_cls_assign(self, node: nodes.Assign) -> None:
"""Check that self/cls don't get assigned."""
assign_names: set[str | None] = set()
for target in node.targets:
match target:
case nodes.AssignName():
assign_names.add(target.name)
case nodes.Tuple():
assign_names.update(
elt.name
for elt in target.elts
if isinstance(elt, nodes.AssignName)
)
scope = node.scope()
nonlocals_with_same_name = node.scope().parent and any(
child for child in scope.body if isinstance(child, nodes.Nonlocal)
)
if nonlocals_with_same_name:
scope = node.scope().parent.scope()
if not (
isinstance(scope, nodes.FunctionDef)
and scope.is_method()
and "builtins.staticmethod" not in scope.decoratornames()
):
return
argument_names = scope.argnames()
if not argument_names:
return
self_cls_name = argument_names[0]
if self_cls_name in assign_names:
self.add_message("self-cls-assignment", node=node, args=(self_cls_name,))
def _check_unpacking(
self,
inferred: InferenceResult,
node: nodes.Assign,
targets: list[nodes.NodeNG],
) -> None:
"""Check for unbalanced tuple unpacking
and unpacking non sequences.
"""
if utils.is_inside_abstract_class(node):
return
if utils.is_comprehension(node):
return
if isinstance(inferred, util.UninferableBase):
return
if (
isinstance(inferred.parent, nodes.Arguments)
and isinstance(node.value, nodes.Name)
and node.value.name == inferred.parent.vararg
):
# Variable-length argument, we can't determine the length.
return
# Attempt to check unpacking is properly balanced
values = self._nodes_to_unpack(inferred)
details = _get_unpacking_extra_info(node, inferred)
if values is not None:
if len(targets) != len(values):
self._report_unbalanced_unpacking(
node, inferred, targets, len(values), details
)
# attempt to check unpacking may be possible (i.e. RHS is iterable)
elif not utils.is_iterable(inferred):
self._report_unpacking_non_sequence(node, details)
@staticmethod
def _get_value_length(value_node: nodes.NodeNG) -> int:
value_subnodes = VariablesChecker._nodes_to_unpack(value_node)
if value_subnodes is not None:
return len(value_subnodes)
match value_node:
case nodes.Const(value=str() | bytes()):
return len(value_node.value)
case nodes.Subscript():
step = value_node.slice.step or 1
splice_range = (
value_node.slice.upper.value - value_node.slice.lower.value
)
# RUF046 says the return of 'math.ceil' is always an int, mypy doesn't see it
return math.ceil(splice_range / step) # type: ignore[no-any-return]
return 1
@staticmethod
def _nodes_to_unpack(node: nodes.NodeNG) -> list[nodes.NodeNG] | None:
"""Return the list of values of the `Assign` node."""
if isinstance(node, (nodes.Tuple, nodes.List, nodes.Set, *DICT_TYPES)):
return node.itered() # type: ignore[no-any-return]
if isinstance(node, astroid.Instance) and any(
ancestor.qname() == "typing.NamedTuple" for ancestor in node.ancestors()
):
return [i for i in node.values() if isinstance(i, nodes.AssignName)]
return None
def _report_unbalanced_unpacking(
self,
node: nodes.NodeNG,
inferred: InferenceResult,
targets: list[nodes.NodeNG],
values_count: int,
details: str,
) -> None:
args = (
details,
len(targets),
"" if len(targets) == 1 else "s",
values_count,
"" if values_count == 1 else "s",
)
symbol = (
"unbalanced-dict-unpacking"
if isinstance(inferred, DICT_TYPES)
else "unbalanced-tuple-unpacking"
)
self.add_message(symbol, node=node, args=args, confidence=INFERENCE)
def _report_unpacking_non_sequence(self, node: nodes.NodeNG, details: str) -> None:
if details and not details.startswith(" "):
details = f" {details}"
self.add_message("unpacking-non-sequence", node=node, args=details)
def _check_module_attrs(
self,
node: _base_nodes.ImportNode,
module: nodes.Module,
module_names: list[str],
) -> nodes.Module | None:
"""Check that module_names (list of string) are accessible through the
given module, if the latest access name corresponds to a module, return it.
"""
while module_names:
name = module_names.pop(0)
if name == "__dict__":
module = None
break
try:
module = module.getattr(name)[0]
if not isinstance(module, nodes.Module):
module = next(module.infer())
if not isinstance(module, nodes.Module):
return None
except astroid.NotFoundError:
# Unable to import `name` from `module`. Since `name` may itself be a
# module, we first check if it matches the ignored modules.
if is_module_ignored(f"{module.qname()}.{name}", self._ignored_modules):
return None
self.add_message(
"no-name-in-module", args=(name, module.name), node=node
)
return None
except astroid.InferenceError:
return None
if module_names:
modname = module.name if module else "__dict__"
self.add_message(
"no-name-in-module", node=node, args=(".".join(module_names), modname)
)
return None
if isinstance(module, nodes.Module):
return module
return None
def _check_all(
self,
node: nodes.Module,
not_consumed: Consumption,
) -> None:
try:
assigned = next(node.igetattr("__all__"))
except astroid.InferenceError:
return
if isinstance(assigned, util.UninferableBase):
return
if assigned.pytype() not in {"builtins.list", "builtins.tuple"}:
line, col = assigned.tolineno, assigned.col_offset
self.add_message("invalid-all-format", line=line, col_offset=col, node=node)
return
for elt in getattr(assigned, "elts", ()):
try:
elt_name = next(elt.infer())
except astroid.InferenceError:
continue
if isinstance(elt_name, util.UninferableBase):
continue
if not elt_name.parent:
continue
if not (
isinstance(elt_name, nodes.Const) and isinstance(elt_name.value, str)
):
self.add_message("invalid-all-object", args=elt.as_string(), node=elt)
continue
elt_name = elt_name.value
# If elt is in not_consumed, remove it from not_consumed
if elt_name in not_consumed:
del not_consumed[elt_name]
continue
if elt_name not in node.locals:
if not node.package:
self.add_message(
"undefined-all-variable", args=(elt_name,), node=elt
)
else:
basename = os.path.splitext(node.file)[0]
if os.path.basename(basename) == "__init__":
name = node.name + "." + elt_name
try:
astroid.modutils.file_from_modpath(name.split("."))
except ImportError:
self.add_message(
"undefined-all-variable", args=(elt_name,), node=elt
)
except SyntaxError:
# don't yield a syntax-error warning,
# because it will be later yielded
# when the file will be checked
pass
def _check_globals(self, not_consumed: Consumption) -> None:
if self._allow_global_unused_variables:
return
for name, node_lst in not_consumed.items():
for node in node_lst:
if in_type_checking_block(node):
continue
if self._is_exception_binding_used_in_handler(node, name):
continue
if isinstance(node, nodes.AssignName) and node.name == "__all__":
continue
if (
isinstance(node, nodes.ImportFrom)
and name == "annotations"
and node.modname == "__future__"
):
continue
self.add_message("unused-variable", args=(name,), node=node)
# pylint: disable = too-many-branches
def _check_imports(self, not_consumed: Consumption) -> None:
local_names = _fix_dot_imports(not_consumed)
checked = set()
unused_wildcard_imports: defaultdict[
tuple[str, nodes.ImportFrom],
list[str],
] = defaultdict(list)
for name, stmt in local_names:
for imports in stmt.names:
real_name = imported_name = imports[0]
if imported_name == "*":
real_name = name
as_name = imports[1]
if real_name in checked:
continue
if name not in (real_name, as_name):
continue
checked.add(real_name)
is_type_annotation_import = (
imported_name in self._type_annotation_names
or as_name in self._type_annotation_names
)
is_dummy_import = (
as_name
and self.linter.config.dummy_variables_rgx
and self.linter.config.dummy_variables_rgx.match(as_name)
)
if isinstance(stmt, nodes.Import) or (
isinstance(stmt, nodes.ImportFrom) and not stmt.modname
):
if isinstance(stmt, nodes.ImportFrom) and SPECIAL_OBJ.search(
imported_name
):
# Filter special objects (__doc__, __all__) etc.,
# because they can be imported for exporting.
continue
if is_type_annotation_import or is_dummy_import:
# Most likely a typing import if it wasn't used so far.
# Also filter dummy variables.
continue
if as_name is None:
msg = f"import {imported_name}"
else:
msg = f"{imported_name} imported as {as_name}"
if not in_type_checking_block(stmt):
self.add_message("unused-import", args=msg, node=stmt)
elif isinstance(stmt, nodes.ImportFrom) and stmt.modname != FUTURE:
if SPECIAL_OBJ.search(imported_name):
# Filter special objects (__doc__, __all__) etc.,
# because they can be imported for exporting.
continue
if _is_from_future_import(stmt, name):
# Check if the name is in fact loaded from a
# __future__ import in another module.
continue
if is_type_annotation_import or is_dummy_import:
# Most likely a typing import if it wasn't used so far.
# Also filter dummy variables.
continue
if imported_name == "*":
unused_wildcard_imports[(stmt.modname, stmt)].append(name)
else:
if as_name is None:
msg = f"{imported_name} imported from {stmt.modname}"
else:
msg = f"{imported_name} imported from {stmt.modname} as {as_name}"
if not in_type_checking_block(stmt):
self.add_message("unused-import", args=msg, node=stmt)
# Construct string for unused-wildcard-import message
for module, unused_list in unused_wildcard_imports.items():
if len(unused_list) == 1:
arg_string = unused_list[0]
else:
arg_string = (
f"{', '.join(i for i in unused_list[:-1])} and {unused_list[-1]}"
)
self.add_message(
"unused-wildcard-import", args=(arg_string, module[0]), node=module[1]
)
del self._to_consume
def _check_metaclasses(self, node: nodes.Module | nodes.FunctionDef) -> None:
"""Update consumption analysis for metaclasses."""
consumed: list[tuple[Consumption, str]] = []
for child_node in node.get_children():
if isinstance(child_node, nodes.ClassDef):
consumed.extend(self._check_classdef_metaclasses(child_node, node))
# Pop the consumed items, in order to avoid having
# unused-import and unused-variable false positives
for scope_locals, name in consumed:
scope_locals.pop(name, None)
def _check_classdef_metaclasses(
self,
klass: nodes.ClassDef,
parent_node: nodes.Module | nodes.FunctionDef,
) -> list[tuple[Consumption, str]]:
if not klass._metaclass:
# Skip if this class doesn't use explicitly a metaclass, but inherits it from ancestors
return []
consumed: list[tuple[Consumption, str]] = []
metaclass = klass.metaclass()
name = ""
match klass._metaclass:
case nodes.Name(name=name):
# bind name
pass
case nodes.Attribute(expr=attr):
while not isinstance(attr, nodes.Name):
attr = attr.expr
name = attr.name
case nodes.Call(func=nodes.Name(name=name)):
# bind name
pass
case _ if metaclass:
name = metaclass.root().name
found = False
name = METACLASS_NAME_TRANSFORMS.get(name, name)
if name:
# check enclosing scopes starting from most local
for to_consume in self._to_consume[::-1]:
scope_locals = to_consume.to_consume
found_nodes = scope_locals.get(name, [])
for found_node in found_nodes:
if found_node.lineno <= klass.lineno:
consumed.append((scope_locals, name))
found = True
break
# Check parent scope
nodes_in_parent_scope = parent_node.locals.get(name, [])
for found_node_parent in nodes_in_parent_scope:
if found_node_parent.lineno <= klass.lineno:
found = True
break
if (
not found
and not metaclass
and not (
name in nodes.Module.scope_attrs
or utils.is_builtin(name)
or name in self.linter.config.additional_builtins
)
):
self.add_message("undefined-variable", node=klass, args=(name,))
return consumed
def visit_subscript(self, node: nodes.Subscript) -> None:
inferred_slice = utils.safe_infer(node.slice)
self._check_potential_index_error(node, inferred_slice)
def _inferred_iterable_length(self, iterable: nodes.Tuple | nodes.List) -> int:
length = 0
for elt in iterable.elts:
if not isinstance(elt, nodes.Starred):
length += 1
continue
unpacked = utils.safe_infer(elt.value)
if isinstance(unpacked, nodes.BaseContainer):
length += len(unpacked.elts)
else:
length += 1
return length
def _check_potential_index_error(
self,
node: nodes.Subscript,
inferred_slice: nodes.NodeNG | None,
) -> None:
"""Check for the potential-index-error message."""
# Currently we only check simple slices of a single integer
if not (
isinstance(inferred_slice, nodes.Const)
and isinstance(inferred_slice.value, int)
):
return
# If the node.value is a Tuple or List without inference it is defined in place
if isinstance(node.value, (nodes.Tuple, nodes.List)):
# Add 1 because iterables are 0-indexed
if self._inferred_iterable_length(node.value) < inferred_slice.value + 1:
self.add_message(
"potential-index-error", node=node, confidence=INFERENCE
)
return
@utils.only_required_for_messages(
"unused-import",
"unused-variable",
)
def visit_const(self, node: nodes.Const) -> None:
"""Take note of names that appear inside string literal type annotations
unless the string is a parameter to `typing.Literal` or `typing.Annotation`.
"""
if node.pytype() != "builtins.str":
return
if not utils.is_node_in_type_annotation_context(node):
return
# Check if parent's or grandparent's first child is typing.Literal
parent = node.parent
if isinstance(parent, nodes.Tuple):
parent = parent.parent
if isinstance(parent, nodes.Subscript):
origin = next(parent.get_children(), None)
if origin is not None and utils.is_typing_member(
origin, ("Annotated", "Literal")
):
return
try:
annotation = extract_node(node.value)
self._store_type_annotation_node(annotation)
except ValueError:
# e.g. node.value is white space
pass
except astroid.AstroidSyntaxError:
# e.g. "?" or ":" in typing.Literal["?", ":"]
pass
def register(linter: PyLinter) -> None:
linter.register_checker(VariablesChecker(linter))
| VariablesChecker |
python | ApeWorX__ape | src/ape/exceptions.py | {
"start": 17278,
"end": 17479
} | class ____(ProviderError):
"""
Raised when not connected to a provider.
"""
def __init__(self):
super().__init__("Not connected to a network provider.")
| ProviderNotConnectedError |
python | altair-viz__altair | altair/utils/plugin_registry.py | {
"start": 1064,
"end": 1291
} | class ____(Exception):
def __init__(self, group, name):
self.group = group
self.name = name
def __str__(self):
return f"No {self.name!r} entry point found in group {self.group!r}"
| NoSuchEntryPoint |
python | joblib__joblib | joblib/memory.py | {
"start": 34897,
"end": 45404
} | class ____(Logger):
"""A context object for caching a function's return value each time it
is called with the same input arguments.
All values are cached on the filesystem, in a deep directory
structure.
Read more in the :ref:`User Guide <memory>`.
Parameters
----------
location: str, pathlib.Path or None
The path of the base directory to use as a data store
or None. If None is given, no caching is done and
the Memory object is completely transparent. This option
replaces cachedir since version 0.12.
backend: str, optional, default='local'
Type of store backend for reading/writing cache files.
The 'local' backend is using regular filesystem operations to
manipulate data (open, mv, etc) in the backend.
mmap_mode: {None, 'r+', 'r', 'w+', 'c'}, optional
The memmapping mode used when loading from cache
numpy arrays. See numpy.load for the meaning of the
arguments.
compress: boolean, or integer, optional
Whether to zip the stored data on disk. If an integer is
given, it should be between 1 and 9, and sets the amount
of compression. Note that compressed arrays cannot be
read by memmapping.
verbose: int, optional
Verbosity flag, controls the debug messages that are issued
as functions are evaluated.
backend_options: dict, optional
Contains a dictionary of named parameters used to configure
the store backend.
"""
# ------------------------------------------------------------------------
# Public interface
# ------------------------------------------------------------------------
def __init__(
self,
location=None,
backend="local",
mmap_mode=None,
compress=False,
verbose=1,
backend_options=None,
):
Logger.__init__(self)
self._verbose = verbose
self.mmap_mode = mmap_mode
self.timestamp = time.time()
self.backend = backend
self.compress = compress
if backend_options is None:
backend_options = {}
self.backend_options = backend_options
if compress and mmap_mode is not None:
warnings.warn("Compressed results cannot be memmapped", stacklevel=2)
self.location = location
if isinstance(location, str):
location = os.path.join(location, "joblib")
self.store_backend = _store_backend_factory(
backend,
location,
verbose=self._verbose,
backend_options=dict(
compress=compress, mmap_mode=mmap_mode, **backend_options
),
)
def cache(
self,
func=None,
ignore=None,
verbose=None,
mmap_mode=False,
cache_validation_callback=None,
):
"""Decorates the given function func to only compute its return
value for input arguments not cached on disk.
Parameters
----------
func: callable, optional
The function to be decorated
ignore: list of strings
A list of arguments name to ignore in the hashing
verbose: integer, optional
The verbosity mode of the function. By default that
of the memory object is used.
mmap_mode: {None, 'r+', 'r', 'w+', 'c'}, optional
The memmapping mode used when loading from cache
numpy arrays. See numpy.load for the meaning of the
arguments. By default that of the memory object is used.
cache_validation_callback: callable, optional
Callable to validate whether or not the cache is valid. When
the cached function is called with arguments for which a cache
exists, this callable is called with the metadata of the cached
result as its sole argument. If it returns True, then the
cached result is returned, else the cache for these arguments
is cleared and recomputed.
Returns
-------
decorated_func: MemorizedFunc object
The returned object is a MemorizedFunc object, that is
callable (behaves like a function), but offers extra
methods for cache lookup and management. See the
documentation for :class:`joblib.memory.MemorizedFunc`.
"""
if cache_validation_callback is not None and not callable(
cache_validation_callback
):
raise ValueError(
"cache_validation_callback needs to be callable. "
f"Got {cache_validation_callback}."
)
if func is None:
# Partial application, to be able to specify extra keyword
# arguments in decorators
return functools.partial(
self.cache,
ignore=ignore,
mmap_mode=mmap_mode,
verbose=verbose,
cache_validation_callback=cache_validation_callback,
)
if self.store_backend is None:
cls = (
AsyncNotMemorizedFunc
if inspect.iscoroutinefunction(func)
else NotMemorizedFunc
)
return cls(func)
if verbose is None:
verbose = self._verbose
if mmap_mode is False:
mmap_mode = self.mmap_mode
if isinstance(func, MemorizedFunc):
func = func.func
cls = AsyncMemorizedFunc if inspect.iscoroutinefunction(func) else MemorizedFunc
return cls(
func,
location=self.store_backend,
backend=self.backend,
ignore=ignore,
mmap_mode=mmap_mode,
compress=self.compress,
verbose=verbose,
timestamp=self.timestamp,
cache_validation_callback=cache_validation_callback,
)
def clear(self, warn=True):
"""Erase the complete cache directory."""
if warn:
self.warn("Flushing completely the cache")
if self.store_backend is not None:
self.store_backend.clear()
# As the cache is completely clear, make sure the _FUNCTION_HASHES
# cache is also reset. Else, for a function that is present in this
# table, results cached after this clear will be have cache miss
# as the function code is not re-written.
_FUNCTION_HASHES.clear()
def reduce_size(self, bytes_limit=None, items_limit=None, age_limit=None):
"""Remove cache elements to make the cache fit its limits.
The limitation can impose that the cache size fits in ``bytes_limit``,
that the number of cache items is no more than ``items_limit``, and
that all files in cache are not older than ``age_limit``.
Parameters
----------
bytes_limit: int | str, optional
Limit in bytes of the size of the cache. By default, the size of
the cache is unlimited. When reducing the size of the cache,
``joblib`` keeps the most recently accessed items first. If a
str is passed, it is converted to a number of bytes using units
{ K | M | G} for kilo, mega, giga.
items_limit: int, optional
Number of items to limit the cache to. By default, the number of
items in the cache is unlimited. When reducing the size of the
cache, ``joblib`` keeps the most recently accessed items first.
age_limit: datetime.timedelta, optional
Maximum age of items to limit the cache to. When reducing the size
of the cache, any items last accessed more than the given length of
time ago are deleted. Example: to remove files older than 5 days,
use datetime.timedelta(days=5). Negative timedelta are not
accepted.
"""
if self.store_backend is None:
# No cached results, this function does nothing.
return
if bytes_limit is None and items_limit is None and age_limit is None:
# No limitation to impose, returning
return
# Defers the actual limits enforcing to the store backend.
self.store_backend.enforce_store_limits(bytes_limit, items_limit, age_limit)
def eval(self, func, *args, **kwargs):
"""Eval function func with arguments `*args` and `**kwargs`,
in the context of the memory.
This method works similarly to the builtin `apply`, except
that the function is called only if the cache is not
up to date.
"""
if self.store_backend is None:
return func(*args, **kwargs)
return self.cache(func)(*args, **kwargs)
# ------------------------------------------------------------------------
# Private `object` interface
# ------------------------------------------------------------------------
def __repr__(self):
return "{class_name}(location={location})".format(
class_name=self.__class__.__name__,
location=(
None if self.store_backend is None else self.store_backend.location
),
)
def __getstate__(self):
"""We don't store the timestamp when pickling, to avoid the hash
depending from it.
"""
state = self.__dict__.copy()
state["timestamp"] = None
return state
###############################################################################
# cache_validation_callback helpers
###############################################################################
def expires_after(
days=0, seconds=0, microseconds=0, milliseconds=0, minutes=0, hours=0, weeks=0
):
"""Helper cache_validation_callback to force recompute after a duration.
Parameters
----------
days, seconds, microseconds, milliseconds, minutes, hours, weeks: numbers
argument passed to a timedelta.
"""
delta = datetime.timedelta(
days=days,
seconds=seconds,
microseconds=microseconds,
milliseconds=milliseconds,
minutes=minutes,
hours=hours,
weeks=weeks,
)
def cache_validation_callback(metadata):
computation_age = time.time() - metadata["time"]
return computation_age < delta.total_seconds()
return cache_validation_callback
| Memory |
python | getsentry__sentry | src/sentry/release_health/metrics_sessions_v2.py | {
"start": 6998,
"end": 9230
} | class ____(Field):
"""Base class for sum(sessions) and count_unique(user)"""
status_to_metric_field: Mapping[SessionStatus | None, MetricField] = {}
def get_all_field(self) -> MetricField:
return self.status_to_metric_field[None]
def _get_metric_fields(
self, raw_groupby: Sequence[str], status_filter: StatusFilter
) -> Sequence[MetricField]:
if status_filter:
# Restrict fields to the included ones
metric_fields = [self.status_to_metric_field[status] for status in status_filter]
if UNSORTABLE & status_filter:
self._hidden_fields.add(self.get_all_field())
# We always order the results by one of the selected fields,
# even if no orderBy is specified (see _primary_field).
metric_fields = [self.get_all_field()] + metric_fields
return metric_fields
if "session.status" in raw_groupby:
self._hidden_fields.add(self.get_all_field())
return [
# Always also get ALL, because this is what we sort by
# in the sessions implementation, with which we want to be consistent
self.get_all_field(),
# These are the fields we actually need:
self.status_to_metric_field[SessionStatus.HEALTHY],
self.status_to_metric_field[SessionStatus.ABNORMAL],
self.status_to_metric_field[SessionStatus.CRASHED],
self.status_to_metric_field[SessionStatus.ERRORED],
self.status_to_metric_field[SessionStatus.UNHANDLED],
]
return [self.get_all_field()]
def _get_session_status(self, metric_field: MetricField) -> SessionStatus | None:
if "session.status" in self._raw_groupby:
reverse_lookup = {v: k for k, v in self.status_to_metric_field.items()}
return reverse_lookup[metric_field]
return None
def normalize(self, value: Scalar) -> Scalar:
value = super().normalize(value)
# In the sessions API, sum() and count_unique() return integers
if isinstance(value, float):
return int(value)
return value
| CountField |
python | apache__airflow | airflow-core/src/airflow/api_fastapi/core_api/datamodels/pools.py | {
"start": 1910,
"end": 2063
} | class ____(BaseModel):
"""Pool Collection serializer for responses."""
pools: Iterable[PoolResponse]
total_entries: int
| PoolCollectionResponse |
python | huggingface__transformers | src/transformers/models/llava_next_video/modeling_llava_next_video.py | {
"start": 3445,
"end": 5267
} | class ____(ModelOutput):
r"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Language modeling loss (for next-token prediction).
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
`past_key_values` input) to speed up sequential decoding.
image_hidden_states (`torch.FloatTensor`, *optional*):
A `torch.FloatTensor` of size (batch_size * num_patches, num_images, sequence_length, hidden_size)`.
image_hidden_states of the model produced by the vision encoder and after projecting the last hidden state.
video_hidden_states (`torch.FloatTensor`, *optional*):
A `torch.FloatTensor` of size `(batch_size * num_frames, num_videos, sequence_length, hidden_size)`.
video_hidden_states of the model produced by the vision encoder and after projecting the last hidden state.
"""
loss: Optional[torch.FloatTensor] = None
logits: Optional[torch.FloatTensor] = None
past_key_values: Optional[Cache] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
image_hidden_states: Optional[torch.FloatTensor] = None
video_hidden_states: Optional[torch.FloatTensor] = None
| LlavaNextVideoCausalLMOutputWithPast |
python | huggingface__transformers | src/transformers/models/esm/modeling_esm.py | {
"start": 15201,
"end": 15664
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = hidden_states + input_tensor
return hidden_states
| EsmSelfOutput |
python | pydantic__pydantic | .github/actions/people/people.py | {
"start": 6022,
"end": 6304
} | class ____(BaseModel):
"""Represents a GitHub pull request with its metadata and interactions."""
number: int
labels: Labels
author: Author | None = None
title: str
createdAt: datetime
state: str
comments: Comments
reviews: Reviews
| PullRequestNode |
python | pytorch__pytorch | test/distributed/checkpoint/test_consolidate_hf_safetensors.py | {
"start": 876,
"end": 14312
} | class ____(DTensorTestBase):
def _create_d_tensors(self) -> None:
global_tensor = torch.arange(16, dtype=torch.float).view(4, 4)
mesh_shape = (self.world_size,)
mesh_1d = init_device_mesh(self.device_type, mesh_shape)
# Create local tensor with row-wise sharding
rows_per_rank = global_tensor.shape[0] // self.world_size
start_row = self.rank * rows_per_rank
end_row = start_row + rows_per_rank
local_tensor = global_tensor[start_row:end_row].clone()
# Create DTensor with row-wise sharding
dtensor = DTensor.from_local(
local_tensor,
device_mesh=mesh_1d,
placements=[Shard(0)],
shape=global_tensor.shape,
stride=(4, 1),
)
# Create local tensor with column-wise sharding
cols_per_rank = global_tensor.shape[1] // self.world_size
start_col = self.rank * cols_per_rank
end_col = start_col + cols_per_rank
local_tensor_col = global_tensor[:, start_col:end_col].clone()
# Create DTensor with column-wise sharding
dtensor_col = DTensor.from_local(
local_tensor_col,
device_mesh=mesh_1d,
placements=[Shard(1)], # Column-wise sharding
shape=global_tensor.shape,
stride=(4, 1),
)
state_dict_to_save = {"dtensor": dtensor, "dtensor_col": dtensor_col}
dist_cp.save(
state_dict=state_dict_to_save,
storage_writer=dist_cp.HuggingFaceStorageWriter(
path=self.temp_dir, save_distributed=True
),
)
dist.barrier()
os.sync()
@with_comms
@with_temp_dir
@skip_if_lt_x_gpu(2)
def test_consolidate_to_one_file(self) -> None:
if importlib.util.find_spec("safetensors") is None:
print("safetensors not installed")
return
import safetensors
checkpoint_dir = self.temp_dir
output_dir = os.path.join(checkpoint_dir, "consolidated")
os.makedirs(output_dir, exist_ok=True)
self._create_d_tensors()
global_tensor = torch.arange(16, dtype=torch.float).view(4, 4)
if self.rank == 0:
consolidate_safetensors_files(
checkpoint_dir,
output_dir,
fqn_to_index_mapping={"dtensor": 1, "dtensor_col": 1},
)
file_path = os.path.join(output_dir, "model-00001-of-00001.safetensors")
loaded_dict = safetensors.torch.load_file(file_path)
self.assertEqual(loaded_dict.keys(), {"dtensor", "dtensor_col"})
self.assertTrue(torch.equal(loaded_dict["dtensor"], global_tensor))
self.assertTrue(torch.equal(loaded_dict["dtensor_col"], global_tensor))
with open(os.path.join(output_dir, _metadata_fn)) as f:
metadata = json.load(f)
self.assertEqual(metadata["metadata"]["total_size"], 16 * 4 * 2)
self.assertEqual(
metadata["weight_map"],
{
"dtensor": "model-00001-of-00001.safetensors",
"dtensor_col": "model-00001-of-00001.safetensors",
},
)
dist.barrier()
@with_comms
@with_temp_dir
@skip_if_lt_x_gpu(2)
def test_consolidate_to_two_files(self):
if importlib.util.find_spec("safetensors") is None:
print("safetensors not installed")
return
import safetensors
checkpoint_dir = self.temp_dir
output_dir = os.path.join(checkpoint_dir, "consolidated")
os.makedirs(output_dir, exist_ok=True)
self._create_d_tensors()
global_tensor = torch.arange(16, dtype=torch.float).view(4, 4)
if self.rank == 0:
fqn_to_index_mapping = {"dtensor": 1, "dtensor_col": 2}
consolidate_safetensors_files(
checkpoint_dir, output_dir, fqn_to_index_mapping=fqn_to_index_mapping
)
file1_path = os.path.join(output_dir, "model-00001-of-00002.safetensors")
file2_path = os.path.join(output_dir, "model-00002-of-00002.safetensors")
loaded_dict = safetensors.torch.load_file(file1_path)
self.assertEqual(loaded_dict.keys(), {"dtensor"})
self.assertTrue(torch.equal(loaded_dict["dtensor"], global_tensor))
loaded_dict_col = safetensors.torch.load_file(file2_path)
self.assertEqual(loaded_dict_col.keys(), {"dtensor_col"})
self.assertTrue(torch.equal(loaded_dict_col["dtensor_col"], global_tensor))
with open(os.path.join(output_dir, _metadata_fn)) as f:
metadata = json.load(f)
self.assertEqual(metadata["metadata"]["total_size"], 16 * 4 * 2)
self.assertEqual(
metadata["weight_map"],
{
"dtensor": "model-00001-of-00002.safetensors",
"dtensor_col": "model-00002-of-00002.safetensors",
},
)
dist.barrier()
def test_calculate_max_contiguous_elements_validations(self) -> None:
"""Test validation logic in _calculate_max_contiguous_elements function."""
# Test empty lists validation
with self.assertRaisesRegex(ValueError, "Input lists cannot be empty"):
_calculate_max_contiguous_elements([], [2, 3], [4, 5])
# Test mismatched list lengths validation
with self.assertRaisesRegex(
ValueError, "All input lists must have the same length"
):
_calculate_max_contiguous_elements([1], [2, 3], [4, 5])
# Test indices out of bounds validation
with self.assertRaisesRegex(
ValueError, "Index .* at dimension .* is out of bounds for sub-tensor shape"
):
_calculate_max_contiguous_elements(
[2, 1], [2, 3], [4, 5]
) # indices[0] >= sub_tensor_shape[0]
# Test sub-tensor dimensions exceeding tensor dimensions validation
with self.assertRaisesRegex(
ValueError,
"Sub-tensor dimension .* at position .* exceeds tensor dimension",
):
_calculate_max_contiguous_elements(
[1, 2], [2, 6], [4, 5]
) # sub_tensor_shape[1] > tensor_shape[1]
def test_calculate_max_contiguous_elements_valid_cases(self) -> None:
"""Test valid cases for _calculate_max_contiguous_elements function."""
# Test 1D case - simple remaining elements
result = _calculate_max_contiguous_elements([2], [5], [10])
self.assertEqual(result, 3) # 5 - 2 = 3 elements remaining
# Test 2D case - at start of row, can write complete rows
result = _calculate_max_contiguous_elements([1, 0], [3, 4], [6, 4])
self.assertEqual(result, 8) # 2 rows * 4 columns = 8 elements
# Test 2D case - middle of row, only remaining in current row
result = _calculate_max_contiguous_elements([1, 2], [3, 4], [6, 8])
self.assertEqual(result, 2) # 4 - 2 = 2 elements remaining in row
# Test 3D case - at start of 2D slice, can write complete slices
result = _calculate_max_contiguous_elements([1, 0, 0], [3, 2, 4], [5, 2, 4])
self.assertEqual(result, 16) # 2 slices * 2 rows * 4 columns = 16 elements
# Test edge case - at last position
result = _calculate_max_contiguous_elements([2, 3], [3, 4], [6, 8])
self.assertEqual(result, 1) # Only 1 element remaining
# Test case where sub-tensor spans full width
result = _calculate_max_contiguous_elements([0, 0], [2, 5], [4, 5])
self.assertEqual(result, 10) # 2 rows * 5 columns = 10 elements
# Test column-wise sharded case - sub-tensor doesn't span full width
# Even at start of row, can only write width of one row due to column sharding
result = _calculate_max_contiguous_elements([1, 0], [3, 2], [4, 8])
self.assertEqual(
result, 2
) # Only 2 elements (width of sub-tensor) can be written contiguously
# Test another column-wise sharded case - middle of tensor
result = _calculate_max_contiguous_elements([0, 0], [2, 3], [6, 10])
self.assertEqual(
result, 3
) # Only 3 elements (width of sub-tensor) can be written contiguously
@with_comms
@with_temp_dir
@skip_if_lt_x_gpu(2)
def test_consolidate_with_two_ranks(self):
if importlib.util.find_spec("safetensors") is None:
print("safetensors not installed")
return
import safetensors
checkpoint_dir = self.temp_dir
output_dir = os.path.join(checkpoint_dir, "consolidated")
os.makedirs(output_dir, exist_ok=True)
self._create_d_tensors()
global_tensor = torch.arange(16, dtype=torch.float).view(4, 4)
fqn_to_index_mapping = {"dtensor": 1, "dtensor_col": 2}
consolidate_safetensors_files_on_every_rank(
checkpoint_dir, output_dir, fqn_to_index_mapping=fqn_to_index_mapping
)
file1_path = os.path.join(output_dir, "model-00001-of-00002.safetensors")
file2_path = os.path.join(output_dir, "model-00002-of-00002.safetensors")
loaded_dict = safetensors.torch.load_file(file1_path)
self.assertEqual(loaded_dict.keys(), {"dtensor"})
self.assertTrue(torch.equal(loaded_dict["dtensor"], global_tensor))
loaded_dict_col = safetensors.torch.load_file(file2_path)
self.assertEqual(loaded_dict_col.keys(), {"dtensor_col"})
self.assertTrue(torch.equal(loaded_dict_col["dtensor_col"], global_tensor))
dist.barrier()
@with_comms
@with_temp_dir
@skip_if_lt_x_gpu(2)
def test_consolidate_one_file_with_two_ranks(self):
if importlib.util.find_spec("safetensors") is None:
print("safetensors not installed")
return
import safetensors
# this is testing the case where one rank has no data to write
# and the other rank has two tensors to write.
# the rank with no work should wait properly for the other rank to finish
checkpoint_dir = self.temp_dir
output_dir = os.path.join(checkpoint_dir, "consolidated")
os.makedirs(output_dir, exist_ok=True)
self._create_d_tensors()
global_tensor = torch.arange(16, dtype=torch.float).view(4, 4)
fqn_to_index_mapping = {"dtensor": 1, "dtensor_col": 1}
consolidate_safetensors_files_on_every_rank(
checkpoint_dir, output_dir, fqn_to_index_mapping=fqn_to_index_mapping
)
file1_path = os.path.join(output_dir, "model-00001-of-00001.safetensors")
loaded_dict = safetensors.torch.load_file(file1_path)
self.assertEqual(loaded_dict.keys(), {"dtensor", "dtensor_col"})
self.assertTrue(torch.equal(loaded_dict["dtensor"], global_tensor))
self.assertTrue(torch.equal(loaded_dict["dtensor_col"], global_tensor))
def test_write_sub_tensor_to_file_optimized(self) -> None:
"""Test the _write_sub_tensor_to_file_optimized function with various scenarios."""
# Test case 1: Simple 2D tensor, row-wise sharding
full_tensor_shape = [4, 6]
sub_tensor_shape = [2, 6]
sub_tensor_offsets = [1, 0]
element_size = 4 # float32
# Create test data
sub_tensor_data = torch.arange(12, dtype=torch.float32)
sub_tensor_bytes = sub_tensor_data.numpy().tobytes()
# Create full tensor buffer
full_tensor_buffer = bytearray(4 * 6 * element_size)
full_tensor_mv = memoryview(full_tensor_buffer)
# Call the function
_write_sub_tensor_to_file_optimized(
full_tensor_mv,
sub_tensor_bytes,
element_size,
full_tensor_shape,
sub_tensor_offsets,
sub_tensor_shape,
)
# Verify the result
result_tensor = torch.frombuffer(full_tensor_buffer, dtype=torch.float32).view(
4, 6
)
expected_tensor = torch.zeros(4, 6, dtype=torch.float32)
expected_tensor[1:3, :] = sub_tensor_data.view(2, 6)
self.assertTrue(torch.equal(result_tensor, expected_tensor))
# Test case 2: Column-wise sharding
full_tensor_shape = [3, 8]
sub_tensor_shape = [3, 2]
sub_tensor_offsets = [0, 3]
sub_tensor_data = torch.arange(6, dtype=torch.float32)
sub_tensor_bytes = sub_tensor_data.numpy().tobytes()
full_tensor_buffer = bytearray(3 * 8 * element_size)
full_tensor_mv = memoryview(full_tensor_buffer)
_write_sub_tensor_to_file_optimized(
full_tensor_mv,
sub_tensor_bytes,
element_size,
full_tensor_shape,
sub_tensor_offsets,
sub_tensor_shape,
)
result_tensor = torch.frombuffer(full_tensor_buffer, dtype=torch.float32).view(
3, 8
)
expected_tensor = torch.zeros(3, 8, dtype=torch.float32)
expected_tensor[:, 3:5] = sub_tensor_data.view(3, 2)
self.assertTrue(torch.equal(result_tensor, expected_tensor))
if __name__ == "__main__":
run_tests()
| TestConsolidateHFSafeTensors |
python | django__django | django/utils/archive.py | {
"start": 6781,
"end": 8301
} | class ____(BaseArchive):
def __init__(self, file):
self._archive = zipfile.ZipFile(file)
def list(self, *args, **kwargs):
self._archive.printdir(*args, **kwargs)
def extract(self, to_path):
namelist = self._archive.namelist()
leading = self.has_leading_dir(namelist)
for name in namelist:
data = self._archive.read(name)
info = self._archive.getinfo(name)
if leading:
name = self.split_leading_dir(name)[1]
if not name:
continue
filename = self.target_filename(to_path, name)
if name.endswith(("/", "\\")):
# A directory
os.makedirs(filename, exist_ok=True)
else:
dirname = os.path.dirname(filename)
if dirname:
os.makedirs(dirname, exist_ok=True)
with open(filename, "wb") as outfile:
outfile.write(data)
# Convert ZipInfo.external_attr to mode
mode = info.external_attr >> 16
self._copy_permissions(mode, filename)
def close(self):
self._archive.close()
extension_map = dict.fromkeys(
(
".tar",
".tar.bz2",
".tbz2",
".tbz",
".tz2",
".tar.gz",
".tgz",
".taz",
".tar.lzma",
".tlz",
".tar.xz",
".txz",
),
TarArchive,
)
extension_map[".zip"] = ZipArchive
| ZipArchive |
python | django__django | tests/model_forms/tests.py | {
"start": 29508,
"end": 32019
} | class ____(SimpleTestCase):
def test_widget_overrides(self):
form = FieldOverridesByFormMetaForm()
self.assertHTMLEqual(
str(form["name"]),
'<textarea id="id_name" rows="10" cols="40" name="name" maxlength="20" '
"required></textarea>",
)
self.assertHTMLEqual(
str(form["url"]),
'<input id="id_url" type="text" class="url" name="url" maxlength="40" '
"required>",
)
self.assertHTMLEqual(
str(form["slug"]),
'<input id="id_slug" type="text" name="slug" maxlength="20" '
'aria-describedby="id_slug_helptext" required>',
)
def test_label_overrides(self):
form = FieldOverridesByFormMetaForm()
self.assertHTMLEqual(
str(form["name"].label_tag()),
'<label for="id_name">Title:</label>',
)
self.assertHTMLEqual(
str(form["url"].label_tag()),
'<label for="id_url">The URL:</label>',
)
self.assertHTMLEqual(
str(form["slug"].label_tag()),
'<label for="id_slug">Slug:</label>',
)
self.assertHTMLEqual(
form["name"].legend_tag(),
"<legend>Title:</legend>",
)
self.assertHTMLEqual(
form["url"].legend_tag(),
"<legend>The URL:</legend>",
)
self.assertHTMLEqual(
form["slug"].legend_tag(),
"<legend>Slug:</legend>",
)
def test_help_text_overrides(self):
form = FieldOverridesByFormMetaForm()
self.assertEqual(
form["slug"].help_text,
"Watch out! Letters, numbers, underscores and hyphens only.",
)
def test_error_messages_overrides(self):
form = FieldOverridesByFormMetaForm(
data={
"name": "Category",
"url": "http://www.example.com/category/",
"slug": "!%#*@",
}
)
form.full_clean()
error = [
"Didn't you read the help text? "
"We said letters, numbers, underscores and hyphens only!",
]
self.assertEqual(form.errors, {"slug": error})
def test_field_type_overrides(self):
form = FieldOverridesByFormMetaForm()
self.assertIs(Category._meta.get_field("url").__class__, models.CharField)
self.assertIsInstance(form.fields["url"], forms.URLField)
| TestFieldOverridesByFormMeta |
python | oauthlib__oauthlib | oauthlib/openid/connect/core/grant_types/refresh_token.py | {
"start": 292,
"end": 1035
} | class ____(GrantTypeBase):
def __init__(self, request_validator=None, **kwargs):
self.proxy_target = OAuth2RefreshTokenGrant(
request_validator=request_validator, **kwargs)
self.register_token_modifier(self.add_id_token)
def add_id_token(self, token, token_handler, request):
"""
Construct an initial version of id_token, and let the
request_validator sign or encrypt it.
The authorization_code version of this method is used to
retrieve the nonce accordingly to the code storage.
"""
if not self.request_validator.refresh_id_token(request):
return token
return super().add_id_token(token, token_handler, request)
| RefreshTokenGrant |
python | getsentry__sentry | tests/sentry/utils/sdk_crashes/test_sdk_crash_detection_cocoa.py | {
"start": 2943,
"end": 20876
} | class ____(BaseSDKCrashDetectionMixin):
def test_unhandled_is_detected(self, mock_sdk_crash_reporter: MagicMock) -> None:
self.execute_test(get_crash_event(), True, mock_sdk_crash_reporter)
def test_handled_is_not_detected(self, mock_sdk_crash_reporter: MagicMock) -> None:
self.execute_test(get_crash_event(handled=True), False, mock_sdk_crash_reporter)
def test_wrong_function_not_detected(self, mock_sdk_crash_reporter: MagicMock) -> None:
self.execute_test(get_crash_event(function="Senry"), False, mock_sdk_crash_reporter)
def test_beta_sdk_version_detected(self, mock_sdk_crash_reporter: MagicMock) -> None:
event = get_crash_event()
set_path(event, "sdk", "version", value="8.2.1-beta.1")
self.execute_test(event, True, mock_sdk_crash_reporter)
def test_too_low_min_sdk_version_not_detected(self, mock_sdk_crash_reporter: MagicMock) -> None:
event = get_crash_event()
set_path(event, "sdk", "version", value="8.1.1")
self.execute_test(event, False, mock_sdk_crash_reporter)
def test_invalid_sdk_version_not_detected(self, mock_sdk_crash_reporter: MagicMock) -> None:
event = get_crash_event()
set_path(event, "sdk", "version", value="foo")
self.execute_test(event, False, mock_sdk_crash_reporter)
def test_no_exception_not_detected(self, mock_sdk_crash_reporter: MagicMock) -> None:
self.execute_test(get_crash_event(exception=[]), False, mock_sdk_crash_reporter)
def test_sdk_crash_detected_event_is_not_reported(
self, mock_sdk_crash_reporter: MagicMock
) -> None:
event = get_crash_event()
set_path(
event,
"contexts",
"sdk_crash_detection",
value={
"original_project_id": 1234,
"original_event_id": 1234,
},
)
self.execute_test(event, False, mock_sdk_crash_reporter)
def test_cocoa_sdk_crash_detection_without_context(
self, mock_sdk_crash_reporter: MagicMock
) -> None:
event = get_crash_event(function="-[SentryHub getScope]")
event["contexts"] = {}
self.execute_test(event, True, mock_sdk_crash_reporter)
def test_metric_kit_crash_is_detected(self, mock_sdk_crash_reporter: MagicMock) -> None:
"""
The frames stem from a real world crash caused by our MetricKit integration.
All data was anonymized.
"""
frames = [
{
"function": "_dispatch_workloop_worker_thread",
"package": "/usr/lib/system/libdispatch.dylib",
"in_app": False,
},
{
"function": "_dispatch_lane_serial_drain$VARIANT$armv81",
"package": "/usr/lib/system/libdispatch.dylib",
"in_app": False,
},
{
"function": "__44-[MXMetricManager deliverDiagnosticPayload:]_block_invoke",
"package": "/System/Library/Frameworks/MetricKit.framework/MetricKit",
"in_app": False,
},
{
"function": "Sequence.forEach",
"raw_function": "specialized Sequence.forEach((A.Element))",
"package": "/private/var/containers/Bundle/Application/CA061D22-C965-4C50-B383-59D8F14A6DDF/Sentry.app/Sentry",
"filename": "<compiler-generated>",
"abs_path": "<compiler-generated>",
"in_app": True,
},
{
"function": "SentryMXManager.didReceive",
"raw_function": "closure #1 (MXDiagnosticPayload) in SentryMXManager.didReceive([MXDiagnosticPayload])",
"package": "/private/var/containers/Bundle/Application/CA061D22-C965-4C50-B383-59D8F14A6DDF/Sentry.app/Sentry",
"filename": "SentryMXManager.swift",
"abs_path": "/Users/sentry/Library/Developer/Xcode/DerivedData/Consumer/SourcePackages/checkouts/sentry-cocoa/Sources/Swift/MetricKit/SentryMXManager.swift",
"in_app": True,
},
{
"function": "Sequence.forEach",
"raw_function": "specialized Sequence.forEach((A.Element))",
"package": "/private/var/containers/Bundle/Application/CA061D22-C965-4C50-B383-59D8F14A6DDF/Sentry.app/Sentry",
"filename": "<compiler-generated>",
"abs_path": "<compiler-generated>",
"in_app": True,
},
{
"function": "SentryMXManager.didReceive",
"raw_function": "closure #1 (SentryMXCallStackTree) in closure #3 (MXCPUExceptionDiagnostic) in closure #1 (MXDiagnosticPayload) in SentryMXManager.didReceive([MXDiagnosticPayload])",
"package": "/private/var/containers/Bundle/Application/CA061D22-C965-4C50-B383-59D8F14A6DDF/Sentry.app/Sentry",
"filename": "SentryMXManager.swift",
"abs_path": "/Users/sentry/Library/Developer/Xcode/DerivedData/Consumer/SourcePackages/checkouts/sentry-cocoa/Sources/Swift/MetricKit/SentryMXManager.swift",
"in_app": True,
},
{
"function": "-[SentryMetricKitIntegration captureEventNotPerThread:params:]",
"package": "/private/var/containers/Bundle/Application/CA061D22-C965-4C50-B383-59D8F14A6DDF/Sentry.app/Sentry",
"filename": "SentryMetricKitIntegration.m",
"abs_path": "/Users/sentry/Library/Developer/Xcode/DerivedData/Consumer/SourcePackages/checkouts/sentry-cocoa/Sources/Sentry/SentryMetricKitIntegration.m",
"in_app": False,
},
{
"function": "+[SentrySDK captureEvent:]",
"package": "/private/var/containers/Bundle/Application/CA061D22-C965-4C50-B383-59D8F14A6DDF/Sentry.app/Sentry",
"filename": "SentrySDK.m",
"abs_path": "/Users/sentry/Library/Developer/Xcode/DerivedData/Consumer/SourcePackages/checkouts/sentry-cocoa/Sources/Sentry/SentrySDK.m",
"in_app": False,
},
{
"function": "-[SentryFileManager readAppStateFrom:]",
"package": "/private/var/containers/Bundle/Application/CA061D22-C965-4C50-B383-59D8F14A6DDF/Sentry.app/Sentry",
"filename": "SentryFileManager.m",
"abs_path": "/Users/sentry/Library/Developer/Xcode/DerivedData/Consumer/SourcePackages/checkouts/sentry-cocoa/Sources/Sentry/SentryFileManager.m",
"in_app": False,
},
{
"function": "+[SentrySerialization appStateWithData:]",
"package": "/private/var/containers/Bundle/Application/CA061D22-C965-4C50-B383-59D8F14A6DDF/Sentry.app/Sentry",
"filename": "SentrySerialization.m",
"abs_path": "/Users/sentry/Library/Developer/Xcode/DerivedData/Consumer/SourcePackages/checkouts/sentry-cocoa/Sources/Sentry/SentrySerialization.m",
"in_app": False,
},
{
"function": "-[SentryAppState initWithJSONObject:]",
"package": "/private/var/containers/Bundle/Application/CA061D22-C965-4C50-B383-59D8F14A6DDF/Sentry.app/Sentry",
"filename": "SentryAppState.m",
"abs_path": "/Users/sentry/Library/Developer/Xcode/DerivedData/Consumer/SourcePackages/checkouts/sentry-cocoa/Sources/Sentry/SentryAppState.m",
"in_app": False,
},
{
"function": "+[NSDate(SentryExtras) sentry_fromIso8601String:]",
"package": "/private/var/containers/Bundle/Application/CA061D22-C965-4C50-B383-59D8F14A6DDF/Sentry.app/Sentry",
"filename": "NSDate+SentryExtras.m",
"abs_path": "/Users/sentry/Library/Developer/Xcode/DerivedData/Consumer/SourcePackages/checkouts/sentry-cocoa/Sources/Sentry/NSDate+SentryExtras.m",
"in_app": True,
},
{
"function": "-[NSDateFormatter getObjectValue:forString:errorDescription:]",
"package": "/System/Library/Frameworks/Foundation.framework/Foundation",
"in_app": False,
},
{
"function": "CFDateFormatterGetAbsoluteTimeFromString",
"package": "/System/Library/Frameworks/CoreFoundation.framework/CoreFoundation",
"in_app": False,
},
{
"function": "__cficu_ucal_clear",
"package": "/System/Library/Frameworks/CoreFoundation.framework/CoreFoundation",
"in_app": False,
},
{
"function": "icu::Calendar::clear",
"raw_function": "icu::Calendar::clear()",
"package": "/usr/lib/libicucore.A.dylib",
"in_app": False,
},
]
event = get_crash_event_with_frames(frames)
self.execute_test(event, True, mock_sdk_crash_reporter)
reported_event_data = mock_sdk_crash_reporter.report.call_args.args[0]
actual_frames = get_path(
reported_event_data, "exception", "values", -1, "stacktrace", "frames"
)
assert actual_frames == [
{
"function": "_dispatch_workloop_worker_thread",
"package": "/usr/lib/system/libdispatch.dylib",
"in_app": False,
},
{
"function": "_dispatch_lane_serial_drain$VARIANT$armv81",
"package": "/usr/lib/system/libdispatch.dylib",
"in_app": False,
},
{
"function": "__44-[MXMetricManager deliverDiagnosticPayload:]_block_invoke",
"package": "/System/Library/Frameworks/MetricKit.framework/MetricKit",
"in_app": False,
},
{
"function": "SentryMXManager.didReceive",
"raw_function": "closure #1 (MXDiagnosticPayload) in SentryMXManager.didReceive([MXDiagnosticPayload])",
"package": "Sentry.framework",
"abs_path": "Sentry.framework",
"filename": "Sentry.framework",
"in_app": True,
},
{
"function": "SentryMXManager.didReceive",
"raw_function": "closure #1 (SentryMXCallStackTree) in closure #3 (MXCPUExceptionDiagnostic) in closure #1 (MXDiagnosticPayload) in SentryMXManager.didReceive([MXDiagnosticPayload])",
"package": "Sentry.framework",
"abs_path": "Sentry.framework",
"filename": "Sentry.framework",
"in_app": True,
},
{
"function": "-[SentryMetricKitIntegration captureEventNotPerThread:params:]",
"package": "Sentry.framework",
"abs_path": "Sentry.framework",
"filename": "Sentry.framework",
"in_app": True,
},
{
"function": "+[SentrySDK captureEvent:]",
"package": "Sentry.framework",
"abs_path": "Sentry.framework",
"filename": "Sentry.framework",
"in_app": True,
},
{
"function": "-[SentryFileManager readAppStateFrom:]",
"package": "Sentry.framework",
"abs_path": "Sentry.framework",
"filename": "Sentry.framework",
"in_app": True,
},
{
"function": "+[SentrySerialization appStateWithData:]",
"package": "Sentry.framework",
"abs_path": "Sentry.framework",
"filename": "Sentry.framework",
"in_app": True,
},
{
"function": "-[SentryAppState initWithJSONObject:]",
"package": "Sentry.framework",
"abs_path": "Sentry.framework",
"filename": "Sentry.framework",
"in_app": True,
},
{
"function": "+[NSDate(SentryExtras) sentry_fromIso8601String:]",
"package": "Sentry.framework",
"abs_path": "Sentry.framework",
"filename": "Sentry.framework",
"in_app": True,
},
{
"function": "-[NSDateFormatter getObjectValue:forString:errorDescription:]",
"package": "/System/Library/Frameworks/Foundation.framework/Foundation",
"in_app": False,
},
{
"function": "CFDateFormatterGetAbsoluteTimeFromString",
"package": "/System/Library/Frameworks/CoreFoundation.framework/CoreFoundation",
"in_app": False,
},
{
"function": "__cficu_ucal_clear",
"package": "/System/Library/Frameworks/CoreFoundation.framework/CoreFoundation",
"in_app": False,
},
{
"function": "icu::Calendar::clear",
"raw_function": "icu::Calendar::clear()",
"package": "/usr/lib/libicucore.A.dylib",
"in_app": False,
},
]
def test_thread_inspector_crash_is_detected(self, mock_sdk_crash_reporter: MagicMock) -> None:
"""
The frames stem from a real world crash caused by our MetricKit integration.
All data was anonymized.
"""
frames = [
{
"function": "_pthread_start",
"package": "/usr/lib/system/libdispatch.dylib",
"in_app": False,
},
{
"function": "__NSThread__start__",
"package": "/System/Library/Frameworks/Foundation.framework/Foundation",
"in_app": False,
},
{
"function": "-[SentryANRTracker detectANRs]",
"package": "/private/var/containers/Bundle/Application/CA061D22-C965-4C50-B383-59D8F14A6DDF/Sentry.app/Sentry",
"filename": "SentryANRTracker.m",
"abs_path": "/Users/sentry/Library/Developer/Xcode/DerivedData/SentryApp/SourcePackages/checkouts/sentry-cocoa/Sources/Sentry/SentryANRTracker.m",
"in_app": False,
},
{
"function": "-[SentryANRTracker ANRDetected]",
"package": "/private/var/containers/Bundle/Application/CA061D22-C965-4C50-B383-59D8F14A6DDF/Sentry.app/Sentry",
"filename": "SentryANRTracker.m",
"abs_path": "/Users/sentry/Library/Developer/Xcode/DerivedData/SentryApp/SourcePackages/checkouts/sentry-cocoa/Sources/Sentry/SentryANRTracker.m",
"in_app": False,
},
{
"function": "-[SentryANRTrackingIntegration anrDetected]",
"package": "/private/var/containers/Bundle/Application/CA061D22-C965-4C50-B383-59D8F14A6DDF/Sentry.app/Sentry",
"filename": "SentryANRTrackingIntegration.m",
"abs_path": "/Users/sentry/Library/Developer/Xcode/DerivedData/SentryApp/SourcePackages/checkouts/sentry-cocoa/Sources/Sentry/SentryANRTrackingIntegration.m",
"in_app": False,
},
{
"function": "getStackEntriesFromThread",
"package": "/private/var/containers/Bundle/Application/CA061D22-C965-4C50-B383-59D8F14A6DDF/Sentry.app/Sentry",
"filename": "SentryThreadInspector.m",
"abs_path": "/Users/sentry/Library/Developer/Xcode/DerivedData/SentryApp/SourcePackages/checkouts/sentry-cocoa/Sources/Sentry/SentryThreadInspector.m",
"in_app": True,
},
]
event = get_crash_event_with_frames(frames)
self.execute_test(event, True, mock_sdk_crash_reporter)
reported_event_data = mock_sdk_crash_reporter.report.call_args.args[0]
actual_frames = get_path(
reported_event_data, "exception", "values", -1, "stacktrace", "frames"
)
assert actual_frames == [
{
"function": "_pthread_start",
"package": "/usr/lib/system/libdispatch.dylib",
"in_app": False,
},
{
"function": "__NSThread__start__",
"package": "/System/Library/Frameworks/Foundation.framework/Foundation",
"in_app": False,
},
{
"function": "-[SentryANRTracker detectANRs]",
"package": "Sentry.framework",
"abs_path": "Sentry.framework",
"filename": "Sentry.framework",
"in_app": True,
},
{
"function": "-[SentryANRTracker ANRDetected]",
"package": "Sentry.framework",
"abs_path": "Sentry.framework",
"filename": "Sentry.framework",
"in_app": True,
},
{
"function": "-[SentryANRTrackingIntegration anrDetected]",
"package": "Sentry.framework",
"abs_path": "Sentry.framework",
"filename": "Sentry.framework",
"in_app": True,
},
{
"function": "getStackEntriesFromThread",
"package": "Sentry.framework",
"abs_path": "Sentry.framework",
"filename": "Sentry.framework",
"in_app": True,
},
]
@patch("sentry.utils.sdk_crashes.sdk_crash_detection.sdk_crash_detection.sdk_crash_reporter")
| CococaSDKTestMixin |
python | networkx__networkx | networkx/classes/tests/test_coreviews.py | {
"start": 6192,
"end": 7833
} | class ____:
# node->nbr->data
def setup_method(self):
dd = {"color": "blue", "weight": 1.2}
self.nd = {0: dd, 1: {}, 2: {"color": 1}}
self.s = {3: self.nd, 0: {}, 1: {}, 2: {3: {"color": 1}}}
self.p = {3: {}, 0: {3: dd}, 1: {0: {}}, 2: {1: {"color": 1}}}
self.adjview = nx.classes.coreviews.UnionAdjacency(self.s, self.p)
def test_pickle(self):
view = self.adjview
pview = pickle.loads(pickle.dumps(view, -1))
assert view == pview
assert view.__slots__ == pview.__slots__
def test_len(self):
assert len(self.adjview) == len(self.s)
def test_iter(self):
assert sorted(self.adjview) == sorted(self.s)
def test_getitem(self):
assert self.adjview[1] is not self.s[1]
assert self.adjview[3][0] is self.adjview[0][3]
assert self.adjview[2][3]["color"] == 1
pytest.raises(KeyError, self.adjview.__getitem__, 4)
def test_copy(self):
avcopy = self.adjview.copy()
assert avcopy[0] == self.adjview[0]
assert avcopy[0] is not self.adjview[0]
avcopy[2][3]["ht"] = 4
assert avcopy[2] != self.adjview[2]
self.adjview[2][3]["ht"] = 4
assert avcopy[2] == self.adjview[2]
del self.adjview[2][3]["ht"]
assert not hasattr(self.adjview, "__setitem__")
def test_str(self):
out = str(dict(self.adjview))
assert str(self.adjview) == out
def test_repr(self):
clsname = self.adjview.__class__.__name__
out = f"{clsname}({self.s}, {self.p})"
assert repr(self.adjview) == out
| TestUnionAdjacency |
python | ApeWorX__ape | tests/functional/test_plugins.py | {
"start": 15248,
"end": 17379
} | class ____:
@parametrize_pip_cmd
def test_prepare_package_manager_args_with_python_location(self, pip_command, mock_python_path):
metadata = PluginMetadata(name="test", pip_command=pip_command)
args = metadata.prepare_package_manager_args("install", python_location=mock_python_path)
expected = (
[*pip_command, "install", "--python", mock_python_path]
if pip_command[0] == "uv"
else [*pip_command, "--python", mock_python_path, "install"]
)
assert args == expected
@parametrize_pip_cmd
def test_prepare_package_manager_args_with_extra_args(self, pip_command):
metadata = PluginMetadata(name="test", pip_command=pip_command)
extra = ["--quiet", "--no-deps"]
args = metadata.prepare_package_manager_args("install", extra_args=extra)
expected = pip_command + ["install"] + extra
assert args == expected
@parametrize_pip_cmd
def test_prepare_package_manager_args_with_both_options(self, pip_command, mock_python_path):
metadata = PluginMetadata(name="test", pip_command=pip_command)
extra = ["--quiet", "--no-deps"]
args = metadata.prepare_package_manager_args(
"install", python_location=mock_python_path, extra_args=extra
)
expected = (
[*pip_command, "install", "--python", mock_python_path] + extra
if pip_command[0] == "uv"
else [*pip_command, "--python", mock_python_path, "install"] + extra
)
assert args == expected
@parametrize_pip_cmd
def test_prepare_package_manager_args_with_uninstall_verb(self, pip_command, mock_python_path):
metadata = PluginMetadata(name="test", pip_command=pip_command)
args = metadata.prepare_package_manager_args("uninstall", python_location=mock_python_path)
expected = (
[*pip_command, "uninstall", "--python", mock_python_path]
if pip_command[0] == "uv"
else [*pip_command, "--python", mock_python_path, "uninstall"]
)
assert args == expected
| TestBuildPipArgs |
python | eth-brownie__brownie | brownie/utils/_color.py | {
"start": 2272,
"end": 8088
} | class ____:
__cache__: Final[Dict[Optional[str], str]] = {}
def __call__(self, color_str: Optional[str] = None) -> str:
if not CONFIG.settings["console"]["show_colors"]:
return ""
try:
return Color.__cache__[color_str]
except KeyError:
if not color_str:
return f"{BASE}m"
try:
if " " in color_str:
modifier, color_str = color_str.split()
color = f"{BASE}{MODIFIERS[modifier]}{COLORS[color_str]}m"
else:
color = f"{BASE}{COLORS[color_str]}m"
except (KeyError, ValueError):
color = f"{BASE}m"
Color.__cache__[color_str] = color
return color
def __str__(self):
return f"{BASE}m"
# format dicts for console printing
def pretty_dict(self, value: Dict, _indent: int = 0) -> str:
text = ""
if not _indent:
text = "{"
_indent += 4
for c, k in enumerate(sorted(value.keys(), key=str)):
v = value[k]
if c:
text += ","
s = "'" if isinstance(k, str) else ""
text += f"\n{' '*_indent}{s}{k}{s}: "
if isinstance(v, dict):
text += "{" + self.pretty_dict(v, _indent)
continue
if isinstance(v, (list, tuple, set)):
text += str(v)[0] + self.pretty_sequence(v, _indent) # type: ignore [arg-type]
continue
text += self._write(v)
_indent -= 4
text += f"\n{' '*_indent}}}"
return text
# format lists for console printing
def pretty_sequence(self, value: Sequence, _indent: int = 0) -> str:
text = ""
string = str(value)
start_bracket, stop_bracket = string[0], string[-1]
if not _indent:
text += f"{start_bracket}"
if value and not [i for i in value if not isinstance(i, dict)]:
# list of dicts
text += f"\n{' '*(_indent+4)}{{"
text += f",\n{' '*(_indent+4)}{{".join(self.pretty_dict(i, _indent + 4) for i in value)
text += f"\n{' '*_indent}{stop_bracket}"
elif value and not [i for i in value if not isinstance(i, str) or len(i) != 64]:
# list of bytes32 hexstrings (stack trace)
text += ", ".join(f"\n{' '*(_indent+4)}{self._write(i)}" for i in value)
text += f"\n{' '*_indent}{stop_bracket}"
else:
# all other cases
text += ", ".join(self._write(i) for i in value)
text += stop_bracket
return text
def _write(self, value: Any) -> str:
s = '"' if isinstance(value, str) else ""
return f"{s}{value}{s}"
def format_tb(
self,
exc: BaseException,
filename: Optional[str] = None,
start: Optional[int] = None,
stop: Optional[int] = None,
) -> str:
if isinstance(exc, SyntaxError) and exc.text is not None:
return self.format_syntaxerror(exc)
tb = [i.replace("./", "") for i in traceback.format_tb(exc.__traceback__)]
if filename and not CONFIG.argv["tb"]:
try:
start = tb.index(next(i for i in tb if filename in i))
stop = tb.index(next(i for i in tb[::-1] if filename in i)) + 1
except Exception:
pass
tb = tb[start:stop]
for i in range(len(tb)):
info, code = tb[i].split("\n")[:2]
info = info.replace(base_path, ".")
info_lines = [x.strip(",") for x in info.strip().split(" ")]
if "site-packages/" in info_lines[1]:
info_lines[1] = '"' + info_lines[1].split("site-packages/")[1]
tb[i] = (
f" {dark_white}File {bright_magenta}{info_lines[1]}"
f"{dark_white}, line {bright_blue}{info_lines[3]}"
f"{dark_white}, in {bright_cyan}{info_lines[5]}{BASE}m"
)
if code:
tb[i] += f"\n{code}"
msg = str(exc)
if isinstance(exc, VyperException):
# apply syntax highlight and remove traceback on vyper exceptions
msg = self.highlight(msg)
if not CONFIG.argv["tb"]:
tb.clear()
from brownie.exceptions import CompilerError
if isinstance(exc, CompilerError):
# apply syntax highlighting on solc exceptions
if exc.compiler == "solc":
msg = self.highlight(msg, SolidityLexer())
else:
msg = self.highlight(msg)
if not CONFIG.argv["tb"]:
tb.clear()
tb.append(f"{bright_red}{type(exc).__name__}{BASE}m: {msg}")
return "\n".join(tb)
def format_syntaxerror(self, exc: SyntaxError) -> str:
offset = exc.offset + len(exc.text.lstrip()) - len(exc.text) + 3 # type: ignore
exc.filename = exc.filename.replace(base_path, ".") # type: ignore [union-attr]
return (
f' {dark_white}File "{bright_magenta}{exc.filename}' # type: ignore [union-attr]
f'{dark_white}", line {bright_blue}{exc.lineno}'
f"{dark_white},\n{BASE}m {exc.text.strip()}\n" # type: ignore [union-attr]
f"{' '*offset}^\n{bright_red}SyntaxError{BASE}m: {exc.msg}"
)
def highlight(self, text, lexer=PythonLexer()):
"""
Apply syntax highlighting to a string.
"""
return pygments.highlight(text, lexer, formatter)
def notify(type_: NotifyType, msg):
"""Prepends a message with a colored tag and outputs it to the console."""
color = Color()
print(f"{color(NOTIFY_COLORS[type_])}{type_}{BASE}m: {msg}")
| Color |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.