language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | rq__rq | tests/test_queue.py | {
"start": 955,
"end": 37146
} | class ____(RQTestCase):
def test_create_queue(self):
"""Creating queues."""
q = Queue('my-queue', connection=self.connection)
self.assertEqual(q.name, 'my-queue')
self.assertEqual(str(q), '<Queue my-queue>')
def test_create_queue_with_serializer(self):
"""Creating queues with serializer."""
# Test using json serializer
q = Queue('queue-with-serializer', connection=self.connection, serializer=json)
self.assertEqual(q.name, 'queue-with-serializer')
self.assertEqual(str(q), '<Queue queue-with-serializer>')
self.assertIsNotNone(q.serializer)
def test_create_default_queue(self):
"""Instantiating the default queue."""
q = Queue(connection=self.connection)
self.assertEqual(q.name, 'default')
def test_equality(self):
"""Mathematical equality of queues."""
q1 = Queue('foo', connection=self.connection)
q2 = Queue('foo', connection=self.connection)
q3 = Queue('bar', connection=self.connection)
self.assertEqual(q1, q2)
self.assertEqual(q2, q1)
self.assertNotEqual(q1, q3)
self.assertNotEqual(q2, q3)
self.assertGreater(q1, q3)
self.assertRaises(TypeError, lambda: q1 == 'some string')
self.assertRaises(TypeError, lambda: q1 < 'some string')
def test_empty_queue(self):
"""Emptying queues."""
q = Queue('example', connection=self.connection)
self.connection.rpush('rq:queue:example', 'foo')
self.connection.rpush('rq:queue:example', 'bar')
self.assertEqual(q.is_empty(), False)
q.empty()
self.assertEqual(q.is_empty(), True)
self.assertIsNone(self.connection.lpop('rq:queue:example'))
def test_empty_removes_jobs(self):
"""Emptying a queue deletes the associated job objects"""
q = Queue('example', connection=self.connection)
job = q.enqueue(say_hello)
self.assertTrue(Job.exists(job.id, connection=self.connection))
q.empty()
self.assertFalse(Job.exists(job.id, connection=self.connection))
def test_queue_is_empty(self):
"""Detecting empty queues."""
q = Queue('example', connection=self.connection)
self.assertEqual(q.is_empty(), True)
self.connection.rpush('rq:queue:example', 'sentinel message')
self.assertEqual(q.is_empty(), False)
def test_queue_delete(self):
"""Test queue.delete properly removes queue"""
q = Queue('example', connection=self.connection)
job = q.enqueue(say_hello)
job2 = q.enqueue(say_hello)
self.assertEqual(2, len(q.get_job_ids()))
q.delete()
self.assertEqual(0, len(q.get_job_ids()))
self.assertEqual(False, self.connection.exists(job.key))
self.assertEqual(False, self.connection.exists(job2.key))
self.assertEqual(0, len(self.connection.smembers(Queue.redis_queues_keys)))
self.assertEqual(False, self.connection.exists(q.key))
def test_queue_delete_but_keep_jobs(self):
"""Test queue.delete properly removes queue but keeps the job keys in the redis store"""
q = Queue('example', connection=self.connection)
job = q.enqueue(say_hello)
job2 = q.enqueue(say_hello)
self.assertEqual(2, len(q.get_job_ids()))
q.delete(delete_jobs=False)
self.assertEqual(0, len(q.get_job_ids()))
self.assertEqual(True, self.connection.exists(job.key))
self.assertEqual(True, self.connection.exists(job2.key))
self.assertEqual(0, len(self.connection.smembers(Queue.redis_queues_keys)))
self.assertEqual(False, self.connection.exists(q.key))
def test_position(self):
"""Test queue.delete properly removes queue but keeps the job keys in the redis store"""
q = Queue('example', connection=self.connection)
job = q.enqueue(say_hello)
job2 = q.enqueue(say_hello)
job3 = q.enqueue(say_hello)
self.assertEqual(0, q.get_job_position(job.id))
self.assertEqual(1, q.get_job_position(job2.id))
self.assertEqual(2, q.get_job_position(job3))
self.assertEqual(None, q.get_job_position('no_real_job'))
def test_remove(self):
"""Ensure queue.remove properly removes Job from queue."""
q = Queue('example', serializer=JSONSerializer, connection=self.connection)
job = q.enqueue(say_hello)
self.assertIn(job.id, q.job_ids)
q.remove(job)
self.assertNotIn(job.id, q.job_ids)
job = q.enqueue(say_hello)
self.assertIn(job.id, q.job_ids)
q.remove(job.id)
self.assertNotIn(job.id, q.job_ids)
def test_jobs(self):
"""Getting jobs out of a queue."""
q = Queue('example', connection=self.connection)
self.assertEqual(q.jobs, [])
job = q.enqueue(say_hello)
self.assertEqual(q.jobs, [job])
# Deleting job removes it from queue
job.delete()
self.assertEqual(q.job_ids, [])
def test_compact(self):
"""Queue.compact() removes non-existing jobs."""
q = Queue(connection=self.connection)
q.enqueue(say_hello, 'Alice')
q.enqueue(say_hello, 'Charlie')
self.connection.lpush(q.key, '1', '2')
self.assertEqual(q.count, 4)
self.assertEqual(len(q), 4)
q.compact()
self.assertEqual(q.count, 2)
self.assertEqual(len(q), 2)
def test_enqueue(self):
"""Enqueueing job onto queues."""
q = Queue(connection=self.connection)
self.assertEqual(q.is_empty(), True)
# say_hello spec holds which queue this is sent to
job = q.enqueue(say_hello, 'Nick', foo='bar')
job_id = job.id
self.assertEqual(job.origin, q.name)
# Inspect data inside Redis
q_key = 'rq:queue:default'
self.assertEqual(self.connection.llen(q_key), 1)
self.assertEqual(self.connection.lrange(q_key, 0, -1)[0].decode('ascii'), job_id)
def test_enqueue_sets_metadata(self):
"""Enqueueing job onto queues modifies meta data."""
q = Queue(connection=self.connection)
job = Job.create(func=say_hello, args=('Nick',), kwargs=dict(foo='bar'), connection=self.connection)
# Preconditions
self.assertIsNone(job.enqueued_at)
# Action
q.enqueue_job(job)
# Postconditions
self.assertIsNotNone(job.enqueued_at)
def test_pop_job_id(self):
"""Popping job IDs from queues."""
# Set up
q = Queue(connection=self.connection)
uuid = '112188ae-4e9d-4a5b-a5b3-f26f2cb054da'
q.push_job_id(uuid)
# Pop it off the queue...
self.assertEqual(q.count, 1)
self.assertEqual(q.pop_job_id(), uuid)
# ...and assert the queue count when down
self.assertEqual(q.count, 0)
def test_dequeue_any(self):
"""Fetching work from any given queue."""
fooq = Queue('foo', connection=self.connection)
barq = Queue('bar', connection=self.connection)
self.assertRaises(ValueError, Queue.dequeue_any, [fooq, barq], timeout=0, connection=self.connection)
self.assertEqual(Queue.dequeue_any([fooq, barq], connection=self.connection, timeout=None), None)
# Enqueue a single item
barq.enqueue(say_hello)
job, queue = Queue.dequeue_any([fooq, barq], connection=self.connection, timeout=None)
self.assertEqual(job.func, say_hello)
self.assertEqual(queue, barq)
# Enqueue items on both queues
barq.enqueue(say_hello, 'for Bar')
fooq.enqueue(say_hello, 'for Foo')
job, queue = Queue.dequeue_any([fooq, barq], connection=self.connection, timeout=None)
self.assertEqual(queue, fooq)
self.assertEqual(job.func, say_hello)
self.assertEqual(job.origin, fooq.name)
self.assertEqual(job.args[0], 'for Foo', 'Foo should be dequeued first.')
job, queue = Queue.dequeue_any([fooq, barq], connection=self.connection, timeout=None)
self.assertEqual(queue, barq)
self.assertEqual(job.func, say_hello)
self.assertEqual(job.origin, barq.name)
self.assertEqual(job.args[0], 'for Bar', 'Bar should be dequeued second.')
@min_redis_version((6, 2, 0))
def test_dequeue_any_reliable(self):
"""Dequeueing job from a single queue moves job to intermediate queue."""
foo_queue = Queue('foo', connection=self.connection)
job_1 = foo_queue.enqueue(say_hello)
self.assertRaises(ValueError, Queue.dequeue_any, [foo_queue], timeout=0, connection=self.connection)
# Job ID is not in intermediate queue
self.assertIsNone(self.connection.lpos(foo_queue.intermediate_queue_key, job_1.id))
job, queue = Queue.dequeue_any([foo_queue], timeout=None, connection=self.connection)
self.assertEqual(queue, foo_queue)
self.assertEqual(job.func, say_hello)
# After job is dequeued, the job ID is in the intermediate queue
self.assertEqual(self.connection.lpos(foo_queue.intermediate_queue_key, job.id), 0)
# Test the blocking version
foo_queue.enqueue(say_hello)
job, queue = Queue.dequeue_any([foo_queue], timeout=1, connection=self.connection)
self.assertEqual(queue, foo_queue)
self.assertEqual(job.func, say_hello)
# After job is dequeued, the job ID is in the intermediate queue
self.assertEqual(self.connection.lpos(foo_queue.intermediate_queue_key, job.id), 1)
@min_redis_version((6, 2, 0))
def test_intermediate_queue(self):
"""Job should be stuck in intermediate queue if execution fails after dequeued."""
queue = Queue('foo', connection=self.connection)
job = queue.enqueue(say_hello)
# If job execution fails after it's dequeued, job should be in the intermediate queue
# # and it's status is still QUEUED
with patch.object(Worker, 'execute_job'):
# mocked.execute_job.side_effect = Exception()
worker = Worker(queue, connection=self.connection)
worker.work(burst=True)
# Job status is still QUEUED even though it's already dequeued
self.assertEqual(job.get_status(refresh=True), JobStatus.QUEUED)
self.assertNotIn(job.id, queue.get_job_ids())
self.assertIsNotNone(self.connection.lpos(queue.intermediate_queue_key, job.id))
def test_dequeue_any_ignores_nonexisting_jobs(self):
"""Dequeuing (from any queue) silently ignores non-existing jobs."""
q = Queue('low', connection=self.connection)
uuid = '49f205ab-8ea3-47dd-a1b5-bfa186870fc8'
q.push_job_id(uuid)
# Dequeue simply ignores the missing job and returns None
self.assertEqual(q.count, 1)
self.assertEqual(
Queue.dequeue_any(
[Queue(connection=self.connection), Queue('low', connection=self.connection)],
timeout=None,
connection=self.connection,
),
None,
)
self.assertEqual(q.count, 0)
def test_enqueue_with_ttl(self):
"""Negative TTL value is not allowed"""
queue = Queue(connection=self.connection)
self.assertRaises(ValueError, queue.enqueue, echo, 1, ttl=0)
self.assertRaises(ValueError, queue.enqueue, echo, 1, ttl=-1)
def test_enqueue_sets_status(self):
"""Enqueueing a job sets its status to "queued"."""
q = Queue(connection=self.connection)
job = q.enqueue(say_hello)
self.assertEqual(job.get_status(), JobStatus.QUEUED)
def test_enqueue_meta_arg(self):
"""enQueue(connection=self.connection) can set the job.meta contents."""
q = Queue(connection=self.connection)
job = q.enqueue(say_hello, meta={'foo': 'bar', 'baz': 42})
self.assertEqual(job.meta['foo'], 'bar')
self.assertEqual(job.meta['baz'], 42)
def test_enqueue_with_failure_ttl(self):
"""enQueue(connection=self.connection) properly sets job.failure_ttl"""
q = Queue(connection=self.connection)
job = q.enqueue(say_hello, failure_ttl=10)
job.refresh()
self.assertEqual(job.failure_ttl, 10)
def test_job_timeout(self):
"""Timeout can be passed via job_timeout argument"""
queue = Queue(connection=self.connection)
job = queue.enqueue(echo, 1, job_timeout=15)
self.assertEqual(job.timeout, 15)
# Not passing job_timeout will use queue._default_timeout
job = queue.enqueue(echo, 1)
self.assertEqual(job.timeout, queue._default_timeout)
# job_timeout = 0 is not allowed
self.assertRaises(ValueError, queue.enqueue, echo, 1, job_timeout=0)
def test_default_timeout(self):
"""Timeout can be passed via job_timeout argument"""
queue = Queue(connection=self.connection)
job = queue.enqueue(echo, 1)
self.assertEqual(job.timeout, queue.DEFAULT_TIMEOUT)
job = Job.create(func=echo, connection=self.connection)
job = queue.enqueue_job(job)
self.assertEqual(job.timeout, queue.DEFAULT_TIMEOUT)
queue = Queue(connection=self.connection, default_timeout=15)
job = queue.enqueue(echo, 1)
self.assertEqual(job.timeout, 15)
job = Job.create(func=echo, connection=self.connection)
job = queue.enqueue_job(job)
self.assertEqual(job.timeout, 15)
def test_synchronous_timeout(self):
queue = Queue(is_async=False, connection=self.connection)
self.assertFalse(queue.is_async)
no_expire_job = queue.enqueue(echo, result_ttl=-1)
self.assertEqual(queue.connection.ttl(no_expire_job.key), -1)
delete_job = queue.enqueue(echo, result_ttl=0)
self.assertEqual(queue.connection.ttl(delete_job.key), -2)
keep_job = queue.enqueue(echo, result_ttl=100)
self.assertLessEqual(queue.connection.ttl(keep_job.key), 100)
def test_synchronous_ended_at(self):
queue = Queue(is_async=False, connection=self.connection)
echo_job = queue.enqueue(echo)
self.assertIsNotNone(echo_job.ended_at)
def test_enqueue_explicit_args(self):
"""enQueue(connection=self.connection) works for both implicit/explicit args."""
q = Queue(connection=self.connection)
# Implicit args/kwargs mode
job = q.enqueue(echo, 1, job_timeout=1, result_ttl=1, bar='baz')
self.assertEqual(job.timeout, 1)
self.assertEqual(job.result_ttl, 1)
self.assertEqual(job.perform(), ((1,), {'bar': 'baz'}))
# Explicit kwargs mode
kwargs = {
'timeout': 1,
'result_ttl': 1,
}
job = q.enqueue(echo, job_timeout=2, result_ttl=2, args=[1], kwargs=kwargs)
self.assertEqual(job.timeout, 2)
self.assertEqual(job.result_ttl, 2)
self.assertEqual(job.perform(), ((1,), {'timeout': 1, 'result_ttl': 1}))
# Explicit args and kwargs should also work with enqueue_at
time = datetime.now(timezone.utc) + timedelta(seconds=10)
job = q.enqueue_at(time, echo, job_timeout=2, result_ttl=2, args=[1], kwargs=kwargs)
self.assertEqual(job.timeout, 2)
self.assertEqual(job.result_ttl, 2)
self.assertEqual(job.perform(), ((1,), {'timeout': 1, 'result_ttl': 1}))
# Positional arguments is not allowed if explicit args and kwargs are used
self.assertRaises(Exception, q.enqueue, echo, 1, kwargs=kwargs)
def test_all_queues(self):
"""All queues"""
q1 = Queue('first-queue', connection=self.connection)
q2 = Queue('second-queue', connection=self.connection)
q3 = Queue('third-queue', connection=self.connection)
# Ensure a queue is added only once a job is enqueued
self.assertEqual(len(Queue.all(connection=self.connection)), 0)
q1.enqueue(say_hello)
self.assertEqual(len(Queue.all(connection=self.connection)), 1)
# Ensure this holds true for multiple queues
q2.enqueue(say_hello)
q3.enqueue(say_hello)
names = [q.name for q in Queue.all(connection=self.connection)]
self.assertEqual(len(Queue.all(connection=self.connection)), 3)
# Verify names
self.assertIn('first-queue', names)
self.assertIn('second-queue', names)
self.assertIn('third-queue', names)
# Now empty two queues
w = Worker([q2, q3], connection=self.connection)
w.work(burst=True)
# Queue.all(connection=self.connection) should still report the empty queues
self.assertEqual(len(Queue.all(connection=self.connection)), 3)
def test_all_custom_job(self):
class CustomJob(Job):
pass
q = Queue('all-queue', connection=self.connection)
q.enqueue(say_hello)
queues = Queue.all(job_class=CustomJob, connection=self.connection)
self.assertEqual(len(queues), 1)
self.assertIs(queues[0].job_class, CustomJob)
def test_all_queues_with_only_deferred_jobs(self):
"""All queues with only deferred jobs"""
queue_with_queued_jobs = Queue('queue_with_queued_jobs', connection=self.connection)
queue_with_deferred_jobs = Queue('queue_with_deferred_jobs', connection=self.connection)
parent_job = queue_with_queued_jobs.enqueue(say_hello)
queue_with_deferred_jobs.enqueue(say_hello, depends_on=parent_job)
# Ensure all queues are listed
self.assertEqual(len(Queue.all(connection=self.connection)), 2)
names = [q.name for q in Queue.all(connection=self.connection)]
# Verify names
self.assertIn('queue_with_queued_jobs', names)
self.assertIn('queue_with_deferred_jobs', names)
def test_from_queue_key(self):
"""Ensure being able to get a Queue instance manually from Redis"""
q = Queue(connection=self.connection)
key = Queue.redis_queue_namespace_prefix + 'default'
reverse_q = Queue.from_queue_key(key, connection=self.connection)
self.assertEqual(q, reverse_q)
def test_from_queue_key_error(self):
"""Ensure that an exception is raised if the queue prefix is wrong"""
key = 'some:weird:prefix:' + 'default'
self.assertRaises(ValueError, Queue.from_queue_key, key, connection=self.connection)
def test_enqueue_dependents(self):
"""Enqueueing dependent jobs pushes all jobs in the depends set to the queue
and removes them from DeferredJobQueue."""
q = Queue(connection=self.connection)
parent_job = Job.create(func=say_hello, connection=self.connection)
parent_job.save()
job_1 = q.enqueue(say_hello, depends_on=parent_job)
job_2 = q.enqueue(say_hello, depends_on=parent_job)
registry = DeferredJobRegistry(q.name, connection=self.connection)
parent_job.set_status(JobStatus.FINISHED)
self.assertEqual(set(registry.get_job_ids()), set([job_1.id, job_2.id]))
# After dependents is enqueued, job_1 and job_2 should be in queue
self.assertEqual(q.job_ids, [])
q.enqueue_dependents(parent_job)
self.assertEqual(set(q.job_ids), set([job_2.id, job_1.id]))
self.assertFalse(self.connection.exists(parent_job.dependents_key))
# DeferredJobRegistry should also be empty
self.assertEqual(registry.get_job_ids(), [])
def test_enqueue_dependents_on_multiple_queues(self):
"""Enqueueing dependent jobs on multiple queues pushes jobs in the queues
and removes them from DeferredJobRegistry for each different queue."""
q_1 = Queue('queue_1', connection=self.connection)
q_2 = Queue('queue_2', connection=self.connection)
parent_job = Job.create(func=say_hello, connection=self.connection)
parent_job.save()
job_1 = q_1.enqueue(say_hello, depends_on=parent_job)
job_2 = q_2.enqueue(say_hello, depends_on=parent_job)
# Each queue has its own DeferredJobRegistry
registry_1 = DeferredJobRegistry(q_1.name, connection=self.connection)
self.assertEqual(set(registry_1.get_job_ids()), set([job_1.id]))
registry_2 = DeferredJobRegistry(q_2.name, connection=self.connection)
parent_job.set_status(JobStatus.FINISHED)
self.assertEqual(set(registry_2.get_job_ids()), set([job_2.id]))
# After dependents is enqueued, job_1 on queue_1 and
# job_2 should be in queue_2
self.assertEqual(q_1.job_ids, [])
self.assertEqual(q_2.job_ids, [])
q_1.enqueue_dependents(parent_job)
q_2.enqueue_dependents(parent_job)
self.assertEqual(set(q_1.job_ids), set([job_1.id]))
self.assertEqual(set(q_2.job_ids), set([job_2.id]))
self.assertFalse(self.connection.exists(parent_job.dependents_key))
# DeferredJobRegistry should also be empty
self.assertEqual(registry_1.get_job_ids(), [])
self.assertEqual(registry_2.get_job_ids(), [])
def test_enqueue_job_with_dependency(self):
"""Jobs are enqueued only when their dependencies are finished."""
# Job with unfinished dependency is not immediately enqueued
parent_job = Job.create(func=say_hello, connection=self.connection)
parent_job.save()
q = Queue(connection=self.connection)
job = q.enqueue_call(say_hello, depends_on=parent_job)
self.assertEqual(q.job_ids, [])
self.assertEqual(job.get_status(), JobStatus.DEFERRED)
# Jobs dependent on finished jobs are immediately enqueued
parent_job.set_status(JobStatus.FINISHED)
parent_job.save()
job = q.enqueue_call(say_hello, depends_on=parent_job)
self.assertEqual(q.job_ids, [job.id])
self.assertEqual(job.timeout, Queue.DEFAULT_TIMEOUT)
self.assertEqual(job.get_status(), JobStatus.QUEUED)
def test_enqueue_job_with_dependency_and_pipeline(self):
"""Jobs are enqueued only when their dependencies are finished, and by the caller when passing a pipeline."""
# Job with unfinished dependency is not immediately enqueued
parent_job = Job.create(func=say_hello, connection=self.connection)
parent_job.save()
q = Queue(connection=self.connection)
with q.connection.pipeline() as pipe:
job = q.enqueue_call(say_hello, depends_on=parent_job, pipeline=pipe)
self.assertEqual(q.job_ids, [])
self.assertEqual(job.get_status(refresh=False), JobStatus.DEFERRED)
# Not in registry before execute, since passed in pipeline
self.assertEqual(len(q.deferred_job_registry), 0)
pipe.execute()
# Only in registry after execute, since passed in pipeline
self.assertEqual(len(q.deferred_job_registry), 1)
# Jobs dependent on finished jobs are immediately enqueued
parent_job.set_status(JobStatus.FINISHED)
parent_job.save()
with q.connection.pipeline() as pipe:
job = q.enqueue_call(say_hello, depends_on=parent_job, pipeline=pipe)
# Pre execute conditions
self.assertEqual(q.job_ids, [])
self.assertEqual(job.timeout, Queue.DEFAULT_TIMEOUT)
self.assertEqual(job.get_status(refresh=False), JobStatus.QUEUED)
pipe.execute()
# Post execute conditions
self.assertEqual(q.job_ids, [job.id])
self.assertEqual(job.timeout, Queue.DEFAULT_TIMEOUT)
self.assertEqual(job.get_status(refresh=False), JobStatus.QUEUED)
def test_enqueue_job_with_no_dependency_prior_watch_and_pipeline(self):
"""Jobs are enqueued only when their dependencies are finished, and by the caller when passing a pipeline."""
q = Queue(connection=self.connection)
with q.connection.pipeline() as pipe:
pipe.watch(b'fake_key') # Test watch then enqueue
job = q.enqueue_call(say_hello, pipeline=pipe)
self.assertEqual(q.job_ids, [])
self.assertEqual(job.get_status(refresh=False), JobStatus.QUEUED)
# Not in queue before execute, since passed in pipeline
self.assertEqual(len(q), 0)
# Make sure modifying key doesn't cause issues, if in multi mode won't fail
pipe.set(b'fake_key', b'fake_value')
pipe.execute()
# Only in registry after execute, since passed in pipeline
self.assertEqual(len(q), 1)
def test_enqueue_many_internal_pipeline(self):
"""Jobs should be enqueued in bulk with an internal pipeline, enqueued in order provided
(but at_front still applies)"""
# Job with unfinished dependency is not immediately enqueued
q = Queue(connection=self.connection)
job_1_data = Queue.prepare_data(say_hello, job_id='fake_job_id_1', at_front=False)
job_2_data = Queue.prepare_data(say_hello, job_id='fake_job_id_2', at_front=False)
job_3_data = Queue.prepare_data(say_hello, job_id='fake_job_id_3', at_front=True)
jobs = q.enqueue_many(
[job_1_data, job_2_data, job_3_data],
)
for job in jobs:
self.assertEqual(job.get_status(refresh=False), JobStatus.QUEUED)
# Only in registry after execute, since passed in pipeline
self.assertEqual(len(q), 3)
self.assertEqual(q.job_ids, ['fake_job_id_3', 'fake_job_id_1', 'fake_job_id_2'])
self.assertEqual(len(Queue.all(connection=self.connection)), 1)
def test_enqueue_many_with_passed_pipeline(self):
"""Jobs should be enqueued in bulk with a passed pipeline, enqueued in order provided
(but at_front still applies)"""
# Job with unfinished dependency is not immediately enqueued
q = Queue(connection=self.connection)
with q.connection.pipeline() as pipe:
job_1_data = Queue.prepare_data(say_hello, job_id='fake_job_id_1', at_front=False)
job_2_data = Queue.prepare_data(say_hello, job_id='fake_job_id_2', at_front=False)
job_3_data = Queue.prepare_data(say_hello, job_id='fake_job_id_3', at_front=True)
jobs = q.enqueue_many([job_1_data, job_2_data, job_3_data], pipeline=pipe)
self.assertEqual(q.job_ids, [])
for job in jobs:
self.assertEqual(job.get_status(refresh=False), JobStatus.QUEUED)
pipe.execute()
# Only in registry after execute, since passed in pipeline
self.assertEqual(len(q), 3)
self.assertEqual(q.job_ids, ['fake_job_id_3', 'fake_job_id_1', 'fake_job_id_2'])
def test_enqueue_different_queues_with_passed_pipeline(self):
"""Jobs should be enqueued into different queues in a provided pipeline"""
q1 = Queue(name='q1', connection=self.connection)
q2 = Queue(name='q2', connection=self.connection)
q3 = Queue(name='q3', connection=self.connection)
queues = [q1, q2, q3]
jobs = []
with self.connection.pipeline() as pipe:
for idx, q in enumerate(queues):
jobs.append(q.enqueue_call(say_hello, job_id=f'fake_job_id_{idx}', pipeline=pipe))
for job in jobs:
self.assertEqual(job.get_status(refresh=False), JobStatus.QUEUED)
pipe.execute()
self.assertEqual(len(jobs), 3)
for idx, (job, q) in enumerate(zip(jobs, queues)):
# Check job is in the correct queue
self.assertEqual(job.id, f'fake_job_id_{idx}')
self.assertEqual(job.origin, q.name)
# Check queue contains the job
self.assertIn(job.id, q.job_ids)
def test_enqueue_job_with_dependency_by_id(self):
"""Can specify job dependency with job object or job id."""
parent_job = Job.create(func=say_hello, connection=self.connection)
parent_job.save()
q = Queue(connection=self.connection)
q.enqueue_call(say_hello, depends_on=parent_job.id)
self.assertEqual(q.job_ids, [])
# Jobs dependent on finished jobs are immediately enqueued
parent_job.set_status(JobStatus.FINISHED)
parent_job.save()
job = q.enqueue_call(say_hello, depends_on=parent_job.id)
self.assertEqual(q.job_ids, [job.id])
self.assertEqual(job.timeout, Queue.DEFAULT_TIMEOUT)
def test_enqueue_job_with_dependency_and_timeout(self):
"""Jobs remember their timeout when enqueued as a dependency."""
# Job with unfinished dependency is not immediately enqueued
parent_job = Job.create(func=say_hello, connection=self.connection)
parent_job.save()
q = Queue(connection=self.connection)
job = q.enqueue_call(say_hello, depends_on=parent_job, timeout=123)
self.assertEqual(q.job_ids, [])
self.assertEqual(job.timeout, 123)
# Jobs dependent on finished jobs are immediately enqueued
parent_job.set_status(JobStatus.FINISHED)
parent_job.save()
job = q.enqueue_call(say_hello, depends_on=parent_job, timeout=123)
self.assertEqual(q.job_ids, [job.id])
self.assertEqual(job.timeout, 123)
def test_enqueue_job_with_multiple_queued_dependencies(self):
parent_jobs = [Job.create(func=say_hello, connection=self.connection) for _ in range(2)]
for job in parent_jobs:
job._status = JobStatus.QUEUED
job.save()
q = Queue(connection=self.connection)
with patch('rq.queue.Job.create', new=MultipleDependencyJob.create):
job = q.enqueue(say_hello, depends_on=parent_jobs[0], _dependency_ids=[job.id for job in parent_jobs])
self.assertEqual(job.get_status(), JobStatus.DEFERRED)
self.assertEqual(q.job_ids, [])
self.assertEqual(job.fetch_dependencies(), parent_jobs)
def test_enqueue_job_with_multiple_finished_dependencies(self):
parent_jobs = [Job.create(func=say_hello, connection=self.connection) for _ in range(2)]
for job in parent_jobs:
job._status = JobStatus.FINISHED
job.save()
q = Queue(connection=self.connection)
with patch('rq.queue.Job.create', new=MultipleDependencyJob.create):
job = q.enqueue(say_hello, depends_on=parent_jobs[0], _dependency_ids=[job.id for job in parent_jobs])
self.assertEqual(job.get_status(), JobStatus.QUEUED)
self.assertEqual(q.job_ids, [job.id])
self.assertEqual(job.fetch_dependencies(), parent_jobs)
def test_enqueues_dependent_if_other_dependencies_finished(self):
parent_jobs = [Job.create(func=say_hello, connection=self.connection) for _ in range(3)]
parent_jobs[0]._status = JobStatus.STARTED
parent_jobs[0].save()
parent_jobs[1]._status = JobStatus.FINISHED
parent_jobs[1].save()
parent_jobs[2]._status = JobStatus.FINISHED
parent_jobs[2].save()
q = Queue(connection=self.connection)
with patch('rq.queue.Job.create', new=MultipleDependencyJob.create):
# dependent job deferred, b/c parent_job 0 is still 'started'
dependent_job = q.enqueue(
say_hello, depends_on=parent_jobs[0], _dependency_ids=[job.id for job in parent_jobs]
)
self.assertEqual(dependent_job.get_status(), JobStatus.DEFERRED)
# now set parent job 0 to 'finished'
parent_jobs[0].set_status(JobStatus.FINISHED)
q.enqueue_dependents(parent_jobs[0])
self.assertEqual(dependent_job.get_status(), JobStatus.QUEUED)
self.assertEqual(q.job_ids, [dependent_job.id])
def test_does_not_enqueue_dependent_if_other_dependencies_not_finished(self):
started_dependency = Job.create(func=say_hello, status=JobStatus.STARTED, connection=self.connection)
started_dependency.save()
queued_dependency = Job.create(func=say_hello, status=JobStatus.QUEUED, connection=self.connection)
queued_dependency.save()
q = Queue(connection=self.connection)
with patch('rq.queue.Job.create', new=MultipleDependencyJob.create):
dependent_job = q.enqueue(
say_hello,
depends_on=[started_dependency],
_dependency_ids=[started_dependency.id, queued_dependency.id],
)
self.assertEqual(dependent_job.get_status(), JobStatus.DEFERRED)
q.enqueue_dependents(started_dependency)
self.assertEqual(dependent_job.get_status(), JobStatus.DEFERRED)
self.assertEqual(q.job_ids, [])
def test_fetch_job_successful(self):
"""Fetch a job from a queue."""
q = Queue('example', connection=self.connection)
job_orig = q.enqueue(say_hello)
job_fetch: Job = q.fetch_job(job_orig.id) # type: ignore
self.assertIsNotNone(job_fetch)
self.assertEqual(job_orig.id, job_fetch.id)
self.assertEqual(job_orig.description, job_fetch.description)
def test_fetch_job_missing(self):
"""Fetch a job from a queue which doesn't exist."""
q = Queue('example', connection=self.connection)
job = q.fetch_job('123')
self.assertIsNone(job)
def test_fetch_job_different_queue(self):
"""Fetch a job from a queue which is in a different queue."""
q1 = Queue('example1', connection=self.connection)
q2 = Queue('example2', connection=self.connection)
job_orig = q1.enqueue(say_hello)
job_fetch = q2.fetch_job(job_orig.id)
self.assertIsNone(job_fetch)
job_fetch = q1.fetch_job(job_orig.id)
self.assertIsNotNone(job_fetch)
def test_getting_registries(self):
"""Getting job registries from queue object"""
queue = Queue('example', connection=self.connection)
self.assertEqual(queue.scheduled_job_registry, ScheduledJobRegistry(queue=queue))
self.assertEqual(queue.started_job_registry, StartedJobRegistry(queue=queue))
self.assertEqual(queue.failed_job_registry, FailedJobRegistry(queue=queue))
self.assertEqual(queue.deferred_job_registry, DeferredJobRegistry(queue=queue))
self.assertEqual(queue.finished_job_registry, FinishedJobRegistry(queue=queue))
self.assertEqual(queue.canceled_job_registry, CanceledJobRegistry(queue=queue))
def test_getting_registries_with_serializer(self):
"""Getting job registries from queue object (with custom serializer)"""
queue = Queue('example', connection=self.connection, serializer=JSONSerializer)
self.assertEqual(queue.scheduled_job_registry, ScheduledJobRegistry(queue=queue))
self.assertEqual(queue.started_job_registry, StartedJobRegistry(queue=queue))
self.assertEqual(queue.failed_job_registry, FailedJobRegistry(queue=queue))
self.assertEqual(queue.deferred_job_registry, DeferredJobRegistry(queue=queue))
self.assertEqual(queue.finished_job_registry, FinishedJobRegistry(queue=queue))
self.assertEqual(queue.canceled_job_registry, CanceledJobRegistry(queue=queue))
# Make sure we don't use default when queue has custom
self.assertEqual(queue.scheduled_job_registry.serializer, JSONSerializer)
self.assertEqual(queue.started_job_registry.serializer, JSONSerializer)
self.assertEqual(queue.failed_job_registry.serializer, JSONSerializer)
self.assertEqual(queue.deferred_job_registry.serializer, JSONSerializer)
self.assertEqual(queue.finished_job_registry.serializer, JSONSerializer)
self.assertEqual(queue.canceled_job_registry.serializer, JSONSerializer)
def test_enqueue_with_retry(self):
"""Enqueueing with retry_strategy works"""
queue = Queue('example', connection=self.connection)
job = queue.enqueue(say_hello, retry=Retry(max=3, interval=5))
job = Job.fetch(job.id, connection=self.connection)
self.assertEqual(job.retries_left, 3)
self.assertEqual(job.retry_intervals, [5])
| TestQueue |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/asset_sensor_definition.py | {
"start": 1903,
"end": 10356
} | class ____(SensorDefinition, IHasInternalInit):
"""Define an asset sensor that initiates a set of runs based on the materialization of a given
asset.
If the asset has been materialized multiple times between since the last sensor tick, the
evaluation function will only be invoked once, with the latest materialization.
Args:
name (str): The name of the sensor to create.
asset_key (AssetKey): The asset_key this sensor monitors.
asset_materialization_fn (Callable[[SensorEvaluationContext, EventLogEntry], Union[Iterator[Union[RunRequest, SkipReason]], RunRequest, SkipReason]]): The core
evaluation function for the sensor, which is run at an interval to determine whether a
run should be launched or not. Takes a :py:class:`~dagster.SensorEvaluationContext` and
an EventLogEntry corresponding to an AssetMaterialization event.
This function must return a generator, which must yield either a single SkipReason
or one or more RunRequest objects.
minimum_interval_seconds (Optional[int]): The minimum number of seconds that will elapse
between sensor evaluations.
description (Optional[str]): A human-readable description of the sensor.
job (Optional[Union[GraphDefinition, JobDefinition, UnresolvedAssetJobDefinition]]): The job
object to target with this sensor.
jobs (Optional[Sequence[Union[GraphDefinition, JobDefinition, UnresolvedAssetJobDefinition]]]):
A list of jobs to be executed when the sensor fires.
tags (Optional[Mapping[str, str]]): A set of key-value tags that annotate the sensor and can
be used for searching and filtering in the UI.
metadata (Optional[Mapping[str, object]]): A set of metadata entries that annotate the
sensor. Values will be normalized to typed `MetadataValue` objects.
default_status (DefaultSensorStatus): Whether the sensor starts as running or not. The default
status can be overridden from the Dagster UI or via the GraphQL API.
"""
def __init__(
self,
name: str,
asset_key: AssetKey,
job_name: Optional[str],
asset_materialization_fn: Callable[
...,
SensorReturnTypesUnion,
],
minimum_interval_seconds: Optional[int] = None,
description: Optional[str] = None,
job: Optional[ExecutableDefinition] = None,
jobs: Optional[Sequence[ExecutableDefinition]] = None,
default_status: DefaultSensorStatus = DefaultSensorStatus.STOPPED,
required_resource_keys: Optional[set[str]] = None,
tags: Optional[Mapping[str, str]] = None,
metadata: Optional[RawMetadataMapping] = None,
):
self._asset_key = check.inst_param(asset_key, "asset_key", AssetKey)
self._asset_materialization_fn = asset_materialization_fn
self._job_name = job_name
from dagster._core.event_api import AssetRecordsFilter
resource_arg_names: set[str] = {
arg.name for arg in get_resource_args(asset_materialization_fn)
}
combined_required_resource_keys = (
check.opt_set_param(required_resource_keys, "required_resource_keys", of_type=str)
| resource_arg_names
)
self._raw_required_resource_keys = combined_required_resource_keys
def _wrap_asset_fn(materialization_fn) -> Any:
def _fn(context) -> Any:
after_cursor = None
if context.cursor:
try:
after_cursor = int(context.cursor)
except ValueError:
after_cursor = None
event_records = context.instance.fetch_materializations(
AssetRecordsFilter(
asset_key=self._asset_key,
after_storage_id=after_cursor,
),
ascending=False,
limit=1,
).records
if not event_records:
yield SkipReason(
f"No new materialization events found for asset key {self._asset_key}"
)
return
event_record = event_records[0]
(
context_param_name,
event_log_entry_param_name,
) = get_asset_sensor_param_names(materialization_fn)
resource_args_populated = validate_and_get_resource_dict(
context.resources, name, resource_arg_names
)
# Build asset sensor function args, which can include any subset of
# context arg, event log entry arg, and any resource args
args = resource_args_populated
if context_param_name:
args[context_param_name] = context
if event_log_entry_param_name:
args[event_log_entry_param_name] = event_record.event_log_entry
result = materialization_fn(**args)
if inspect.isgenerator(result) or isinstance(result, list):
yield from result
elif isinstance(result, (SkipReason, RunRequest)):
yield result
context.update_cursor(str(event_record.storage_id))
return _fn
super().__init__(
name=check_valid_name(name),
job_name=job_name,
evaluation_fn=_wrap_asset_fn(
check.callable_param(asset_materialization_fn, "asset_materialization_fn"),
),
minimum_interval_seconds=minimum_interval_seconds,
description=description,
job=job,
jobs=jobs,
default_status=default_status,
required_resource_keys=combined_required_resource_keys,
tags=tags,
metadata=metadata,
)
@public
@property
def asset_key(self) -> AssetKey:
"""AssetKey: The key of the asset targeted by this sensor."""
return self._asset_key
@property
def sensor_type(self) -> SensorType:
return SensorType.ASSET
@staticmethod
def dagster_internal_init( # type: ignore
*,
name: str,
asset_key: AssetKey,
job_name: Optional[str],
asset_materialization_fn: Callable[..., SensorReturnTypesUnion],
minimum_interval_seconds: Optional[int],
description: Optional[str],
job: Optional[ExecutableDefinition],
jobs: Optional[Sequence[ExecutableDefinition]],
default_status: DefaultSensorStatus,
required_resource_keys: Optional[set[str]],
tags: Optional[Mapping[str, str]],
metadata: Optional[RawMetadataMapping],
) -> "AssetSensorDefinition":
return AssetSensorDefinition(
name=name,
asset_key=asset_key,
job_name=job_name,
asset_materialization_fn=asset_materialization_fn,
minimum_interval_seconds=minimum_interval_seconds,
description=description,
job=job,
jobs=jobs,
default_status=default_status,
required_resource_keys=required_resource_keys,
tags=tags,
metadata=metadata,
)
def with_attributes(
self,
*,
jobs: Optional[Sequence[ExecutableDefinition]] = None,
metadata: Optional[RawMetadataMapping] = None,
) -> "AssetSensorDefinition":
"""Returns a copy of this sensor with the attributes replaced."""
job_name, new_job, new_jobs = resolve_jobs_from_targets_for_with_attributes(self, jobs)
return AssetSensorDefinition.dagster_internal_init(
name=self.name,
asset_key=self._asset_key,
job_name=job_name,
asset_materialization_fn=self._asset_materialization_fn,
minimum_interval_seconds=self.minimum_interval_seconds,
description=self.description,
job=new_job,
jobs=new_jobs,
default_status=self.default_status,
required_resource_keys=self._raw_required_resource_keys,
tags=self._tags,
metadata=metadata if metadata is not None else self._metadata,
)
| AssetSensorDefinition |
python | sqlalchemy__sqlalchemy | test/engine/test_reflection.py | {
"start": 48597,
"end": 53158
} | class ____(fixtures.TablesTest):
__sparse_driver_backend__ = True
run_create_tables = None
@classmethod
def teardown_test_class(cls):
# TablesTest is used here without
# run_create_tables, so add an explicit drop of whatever is in
# metadata
cls._tables_metadata.drop_all(testing.db)
@classmethod
def define_tables(cls, metadata):
Table(
"users",
metadata,
Column(
"user_id",
sa.Integer,
normalize_sequence(
config, sa.Sequence("user_id_seq", optional=True)
),
primary_key=True,
),
Column("user_name", sa.String(40)),
)
Table(
"email_addresses",
metadata,
Column(
"address_id",
sa.Integer,
normalize_sequence(
config, sa.Sequence("address_id_seq", optional=True)
),
primary_key=True,
),
Column("user_id", sa.Integer, sa.ForeignKey("users.user_id")),
Column("email_address", sa.String(40)),
)
Table(
"orders",
metadata,
Column(
"order_id",
sa.Integer,
normalize_sequence(
config, sa.Sequence("order_id_seq", optional=True)
),
primary_key=True,
),
Column("user_id", sa.Integer, sa.ForeignKey("users.user_id")),
Column("description", sa.String(50)),
Column("isopen", sa.Integer),
)
Table(
"items",
metadata,
Column(
"item_id",
sa.INT,
normalize_sequence(
config, sa.Sequence("items_id_seq", optional=True)
),
primary_key=True,
),
Column("order_id", sa.INT, sa.ForeignKey("orders")),
Column("item_name", sa.VARCHAR(50)),
)
def test_sorter(self):
tables = self.tables_test_metadata.sorted_tables
table_names = [t.name for t in tables]
ua = [n for n in table_names if n in ("users", "email_addresses")]
oi = [n for n in table_names if n in ("orders", "items")]
eq_(ua, ["users", "email_addresses"])
eq_(oi, ["orders", "items"])
def test_checkfirst(self, connection: Connection) -> None:
insp = inspect(connection)
users = self.tables.users
is_false(insp.has_table("users"))
users.create(connection)
insp.clear_cache()
is_true(insp.has_table("users"))
users.create(connection, checkfirst=True)
users.drop(connection)
users.drop(connection, checkfirst=True)
insp.clear_cache()
is_false(insp.has_table("users"))
users.create(connection, checkfirst=True)
users.drop(connection)
def test_createdrop(self, connection: Connection) -> None:
insp = inspect(connection)
metadata = self.tables_test_metadata
assert metadata is not None
metadata.create_all(connection)
is_true(insp.has_table("items"))
is_true(insp.has_table("email_addresses"))
metadata.create_all(connection)
insp.clear_cache()
is_true(insp.has_table("items"))
metadata.drop_all(connection)
insp.clear_cache()
is_false(insp.has_table("items"))
is_false(insp.has_table("email_addresses"))
metadata.drop_all(connection)
insp.clear_cache()
is_false(insp.has_table("items"))
def test_has_table_and_table_names(self, connection):
"""establish that has_table and get_table_names are consistent w/
each other with regard to caching
"""
metadata = self.tables_test_metadata
metadata.create_all(bind=connection)
insp = inspect(connection)
# ensure all tables we created are in the list.
is_true(set(insp.get_table_names()).issuperset(metadata.tables))
assert insp.has_table("items")
assert "items" in insp.get_table_names()
self.tables.items.drop(connection)
# cached
assert insp.has_table("items")
assert "items" in insp.get_table_names()
insp = inspect(connection)
assert not insp.has_table("items")
assert "items" not in insp.get_table_names()
| CreateDropTest |
python | readthedocs__readthedocs.org | readthedocs/api/v3/views.py | {
"start": 18677,
"end": 19531
} | class ____(
APIv3Settings,
NestedViewSetMixin,
ProjectQuerySetMixin,
FlexFieldsMixin,
ModelViewSet,
):
model = Redirect
lookup_field = "pk"
lookup_url_kwarg = "redirect_pk"
permission_classes = (IsAuthenticated & IsProjectAdmin,)
def get_queryset(self):
queryset = super().get_queryset()
return queryset.select_related("project")
def get_serializer_class(self):
if self.action in ("create", "update", "partial_update"):
return RedirectCreateSerializer
return RedirectDetailSerializer
def perform_create(self, serializer):
# Inject the project from the URL into the serializer
serializer.validated_data.update(
{
"project": self._get_parent_project(),
}
)
serializer.save()
| RedirectsViewSet |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 816693,
"end": 817027
} | class ____(
sgqlc.types.Type, Node, AuditEntry, OrganizationAuditEntryData
):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("permission",)
permission = sgqlc.types.Field(
OrgAddMemberAuditEntryPermission, graphql_name="permission"
)
| OrgAddMemberAuditEntry |
python | lepture__authlib | authlib/jose/errors.py | {
"start": 1780,
"end": 1919
} | class ____(JoseError):
error = "missing_encryption_algorithm"
description = "Missing 'enc' in header"
| MissingEncryptionAlgorithmError |
python | doocs__leetcode | solution/2900-2999/2967.Minimum Cost to Make Array Equalindromic/Solution.py | {
"start": 158,
"end": 453
} | class ____:
def minimumCost(self, nums: List[int]) -> int:
def f(x: int) -> int:
return sum(abs(v - x) for v in nums)
nums.sort()
i = bisect_left(ps, nums[len(nums) // 2])
return min(f(ps[j]) for j in range(i - 1, i + 2) if 0 <= j < len(ps))
| Solution |
python | sympy__sympy | sympy/assumptions/predicates/calculus.py | {
"start": 1058,
"end": 1646
} | class ____(Predicate):
"""
Infinite number predicate.
Explanation
===========
``Q.infinite(x)`` is true iff the absolute value of ``x`` is
infinity.
Examples
========
>>> from sympy import Q, ask, oo, zoo, I
>>> ask(Q.infinite(oo))
True
>>> ask(Q.infinite(-oo))
True
>>> ask(Q.infinite(zoo))
True
>>> ask(Q.infinite(1))
False
>>> ask(Q.infinite(I*oo))
True
"""
name = 'infinite'
handler = Dispatcher(
"InfiniteHandler",
doc="""Handler for Q.infinite key."""
)
| InfinitePredicate |
python | django__django | tests/model_fields/models.py | {
"start": 3983,
"end": 4069
} | class ____(models.Model):
value = models.PositiveIntegerField()
| PositiveIntegerModel |
python | huggingface__transformers | src/transformers/models/siglip/tokenization_siglip.py | {
"start": 1246,
"end": 14320
} | class ____(SentencePieceBackend):
"""
Construct a Siglip tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece).
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
[SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
contains the vocabulary necessary to instantiate a tokenizer.
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (`str`, *optional*, defaults to `"</s>"`):
The token used for padding, for example when batching sequences of different lengths.
additional_special_tokens (`list[str]`, *optional*):
Additional special tokens used by the tokenizer.
sp_model_kwargs (`dict`, *optional*):
Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
to set:
- `enable_sampling`: Enable subword regularization.
- `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
- `nbest_size = {0,1}`: No sampling is performed.
- `nbest_size > 1`: samples from the nbest_size results.
- `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
using forward-filtering-and-backward-sampling algorithm.
- `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
BPE-dropout.
model_max_length (`int`, *optional*, defaults to 64):
The maximum length (in number of tokens) for model inputs.
do_lower_case (`bool`, *optional*, defaults to `True`):
Whether or not to lowercase the input when tokenizing.
"""
vocab_files_names = VOCAB_FILES_NAMES
model_input_names = ["input_ids", "attention_mask"]
def __init__(
self,
vocab_file,
eos_token="</s>",
unk_token="<unk>",
pad_token="</s>",
additional_special_tokens=None,
sp_model_kwargs: Optional[dict[str, Any]] = None,
model_max_length=64,
do_lower_case=True,
**kwargs,
) -> None:
requires_backends(self, "protobuf")
pad_token = (
AddedToken(pad_token, rstrip=True, lstrip=True, normalized=False, special=True)
if isinstance(pad_token, str)
else pad_token
)
unk_token = (
AddedToken(unk_token, rstrip=True, lstrip=True, normalized=False, special=True)
if isinstance(unk_token, str)
else unk_token
)
eos_token = (
AddedToken(eos_token, rstrip=True, lstrip=True, normalized=False, special=True)
if isinstance(eos_token, str)
else eos_token
)
self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
self.do_lower_case = do_lower_case
super().__init__(
vocab_file=vocab_file,
eos_token=eos_token,
unk_token=unk_token,
pad_token=pad_token,
additional_special_tokens=additional_special_tokens,
sp_model_kwargs=self.sp_model_kwargs,
model_max_length=model_max_length,
do_lower_case=do_lower_case,
**kwargs,
)
@property
def vocab_size(self):
return self.sp_model.get_piece_size()
def get_vocab(self):
vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def get_special_tokens_mask(
self, token_ids_0: list[int], token_ids_1: Optional[list[int]] = None, already_has_special_tokens: bool = False
) -> list[int]:
"""
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` method.
Args:
token_ids_0 (`list[int]`):
List of IDs.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
`list[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
)
# normal case: some special tokens
if token_ids_1 is None:
return ([0] * len(token_ids_0)) + [1]
return ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
def _add_eos_if_not_present(self, token_ids: list[int]) -> list[int]:
"""Do not add eos again if user already added it."""
if len(token_ids) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"
" eos tokens being added."
)
return token_ids
else:
return token_ids + [self.eos_token_id]
def create_token_type_ids_from_sequences(
self, token_ids_0: list[int], token_ids_1: Optional[list[int]] = None
) -> list[int]:
"""
Create a mask from the two sequences passed to be used in a sequence-pair classification task. T5 does not make
use of token type ids, therefore a list of zeros is returned.
Args:
token_ids_0 (`list[int]`):
List of IDs.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`list[int]`: List of zeros.
"""
eos = [self.eos_token_id]
if token_ids_1 is None:
return len(token_ids_0 + eos) * [0]
return len(token_ids_0 + eos + token_ids_1 + eos) * [0]
def build_inputs_with_special_tokens(
self, token_ids_0: list[int], token_ids_1: Optional[list[int]] = None
) -> list[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A sequence has the following format:
- single sequence: `X </s>`
- pair of sequences: `A </s> B </s>`
Args:
token_ids_0 (`list[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`list[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
"""
token_ids_0 = self._add_eos_if_not_present(token_ids_0)
if token_ids_1 is None:
return token_ids_0
else:
token_ids_1 = self._add_eos_if_not_present(token_ids_1)
return token_ids_0 + token_ids_1
def __getstate__(self):
state = self.__dict__.copy()
state["sp_model"] = None
return state
def __setstate__(self, d):
self.__dict__ = d
# for backward compatibility
if not hasattr(self, "sp_model_kwargs"):
self.sp_model_kwargs = {}
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def remove_punctuation(self, text: str) -> str:
return text.translate(str.maketrans("", "", string.punctuation))
# source: https://github.com/google-research/big_vision/blob/3b8e5ab6ad4f96e32b32826f9e1b8fd277914f9c/big_vision/evaluators/proj/image_text/prompt_engineering.py#L94
def canonicalize_text(self, text, *, keep_punctuation_exact_string=None):
"""Returns canonicalized `text` (puncuation removed).
Args:
text (`str`):
String to be canonicalized.
keep_punctuation_exact_string (`str`, *optional*):
If provided, then this exact string is kept. For example providing '{}' will keep any occurrences of '{}'
(but will still remove '{' and '}' that appear separately).
"""
if self.do_lower_case:
text = text.lower()
if keep_punctuation_exact_string:
text = keep_punctuation_exact_string.join(
self.remove_punctuation(part) for part in text.split(keep_punctuation_exact_string)
)
else:
text = self.remove_punctuation(text)
text = re.sub(r"\s+", " ", text)
text = text.strip()
return text
def tokenize(self, text: "TextInput", add_special_tokens=False, **kwargs) -> list[str]:
"""
Converts a string to a list of tokens.
"""
tokens = super().tokenize(SPIECE_UNDERLINE + text.replace(SPIECE_UNDERLINE, " "), **kwargs)
if len(tokens) > 1 and tokens[0] == SPIECE_UNDERLINE and tokens[1] in self.all_special_tokens:
tokens = tokens[1:]
return tokens
@property
def unk_token_length(self):
return len(self.sp_model.encode(str(self.unk_token)))
def _tokenize(self, text, **kwargs):
"""
Returns a tokenized string.
We de-activated the `add_dummy_prefix` option, thus the sentencepiece internals will always strip any
SPIECE_UNDERLINE.
For example: `self.sp_model.encode(f"{SPIECE_UNDERLINE}Hey", out_type = str)` will give `['H', 'e', 'y']` instead of `['▁He', 'y']`.
Thus we always encode `f"{unk_token}text"` and strip the `unk_token`. Here is an example with `unk_token = "<unk>"` and `unk_token_length = 4`.
`self.tokenizer.sp_model.encode("<unk> Hey", out_type = str)[4:]`.
"""
text = self.canonicalize_text(text, keep_punctuation_exact_string=None)
tokens = self.sp_model.encode(text, out_type=str)
# 1. Encode string + prefix ex: "<unk> Hey"
tokens = self.sp_model.encode(self.unk_token + text, out_type=str)
# 2. Remove self.unk_token from ['<','unk','>', '▁Hey']
return tokens[self.unk_token_length :] if len(tokens) >= self.unk_token_length else tokens
def _convert_token_to_id(self, token):
"""Converts a token (str) in an id using the vocab."""
return self.sp_model.piece_to_id(token)
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
token = self.sp_model.IdToPiece(index)
return token
def convert_tokens_to_string(self, tokens):
"""Converts a sequence of tokens (string) in a single string."""
current_sub_tokens = []
out_string = ""
prev_is_special = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(current_sub_tokens) + token
prev_is_special = True
current_sub_tokens = []
else:
current_sub_tokens.append(token)
prev_is_special = False
out_string += self.sp_model.decode(current_sub_tokens)
return out_string.strip()
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> tuple[str]:
if not os.path.isdir(save_directory):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
out_vocab_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
)
if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file, out_vocab_file)
elif not os.path.isfile(self.vocab_file):
with open(out_vocab_file, "wb") as fi:
content_spiece_model = self.sp_model.serialized_model_proto()
fi.write(content_spiece_model)
return (out_vocab_file,)
__all__ = ["SiglipTokenizer"]
| SiglipTokenizer |
python | python-pillow__Pillow | src/PIL/PcfFontFile.py | {
"start": 1338,
"end": 7223
} | class ____(FontFile.FontFile):
"""Font file plugin for the X11 PCF format."""
name = "name"
def __init__(self, fp: BinaryIO, charset_encoding: str = "iso8859-1"):
self.charset_encoding = charset_encoding
magic = l32(fp.read(4))
if magic != PCF_MAGIC:
msg = "not a PCF file"
raise SyntaxError(msg)
super().__init__()
count = l32(fp.read(4))
self.toc = {}
for i in range(count):
type = l32(fp.read(4))
self.toc[type] = l32(fp.read(4)), l32(fp.read(4)), l32(fp.read(4))
self.fp = fp
self.info = self._load_properties()
metrics = self._load_metrics()
bitmaps = self._load_bitmaps(metrics)
encoding = self._load_encoding()
#
# create glyph structure
for ch, ix in enumerate(encoding):
if ix is not None:
(
xsize,
ysize,
left,
right,
width,
ascent,
descent,
attributes,
) = metrics[ix]
self.glyph[ch] = (
(width, 0),
(left, descent - ysize, xsize + left, descent),
(0, 0, xsize, ysize),
bitmaps[ix],
)
def _getformat(
self, tag: int
) -> tuple[BinaryIO, int, Callable[[bytes], int], Callable[[bytes], int]]:
format, size, offset = self.toc[tag]
fp = self.fp
fp.seek(offset)
format = l32(fp.read(4))
if format & 4:
i16, i32 = b16, b32
else:
i16, i32 = l16, l32
return fp, format, i16, i32
def _load_properties(self) -> dict[bytes, bytes | int]:
#
# font properties
properties = {}
fp, format, i16, i32 = self._getformat(PCF_PROPERTIES)
nprops = i32(fp.read(4))
# read property description
p = [(i32(fp.read(4)), i8(fp.read(1)), i32(fp.read(4))) for _ in range(nprops)]
if nprops & 3:
fp.seek(4 - (nprops & 3), io.SEEK_CUR) # pad
data = fp.read(i32(fp.read(4)))
for k, s, v in p:
property_value: bytes | int = sz(data, v) if s else v
properties[sz(data, k)] = property_value
return properties
def _load_metrics(self) -> list[tuple[int, int, int, int, int, int, int, int]]:
#
# font metrics
metrics: list[tuple[int, int, int, int, int, int, int, int]] = []
fp, format, i16, i32 = self._getformat(PCF_METRICS)
append = metrics.append
if (format & 0xFF00) == 0x100:
# "compressed" metrics
for i in range(i16(fp.read(2))):
left = i8(fp.read(1)) - 128
right = i8(fp.read(1)) - 128
width = i8(fp.read(1)) - 128
ascent = i8(fp.read(1)) - 128
descent = i8(fp.read(1)) - 128
xsize = right - left
ysize = ascent + descent
append((xsize, ysize, left, right, width, ascent, descent, 0))
else:
# "jumbo" metrics
for i in range(i32(fp.read(4))):
left = i16(fp.read(2))
right = i16(fp.read(2))
width = i16(fp.read(2))
ascent = i16(fp.read(2))
descent = i16(fp.read(2))
attributes = i16(fp.read(2))
xsize = right - left
ysize = ascent + descent
append((xsize, ysize, left, right, width, ascent, descent, attributes))
return metrics
def _load_bitmaps(
self, metrics: list[tuple[int, int, int, int, int, int, int, int]]
) -> list[Image.Image]:
#
# bitmap data
fp, format, i16, i32 = self._getformat(PCF_BITMAPS)
nbitmaps = i32(fp.read(4))
if nbitmaps != len(metrics):
msg = "Wrong number of bitmaps"
raise OSError(msg)
offsets = [i32(fp.read(4)) for _ in range(nbitmaps)]
bitmap_sizes = [i32(fp.read(4)) for _ in range(4)]
# byteorder = format & 4 # non-zero => MSB
bitorder = format & 8 # non-zero => MSB
padindex = format & 3
bitmapsize = bitmap_sizes[padindex]
offsets.append(bitmapsize)
data = fp.read(bitmapsize)
pad = BYTES_PER_ROW[padindex]
mode = "1;R"
if bitorder:
mode = "1"
bitmaps = []
for i in range(nbitmaps):
xsize, ysize = metrics[i][:2]
b, e = offsets[i : i + 2]
bitmaps.append(
Image.frombytes("1", (xsize, ysize), data[b:e], "raw", mode, pad(xsize))
)
return bitmaps
def _load_encoding(self) -> list[int | None]:
fp, format, i16, i32 = self._getformat(PCF_BDF_ENCODINGS)
first_col, last_col = i16(fp.read(2)), i16(fp.read(2))
first_row, last_row = i16(fp.read(2)), i16(fp.read(2))
i16(fp.read(2)) # default
nencoding = (last_col - first_col + 1) * (last_row - first_row + 1)
# map character code to bitmap index
encoding: list[int | None] = [None] * min(256, nencoding)
encoding_offsets = [i16(fp.read(2)) for _ in range(nencoding)]
for i in range(first_col, len(encoding)):
try:
encoding_offset = encoding_offsets[
ord(bytearray([i]).decode(self.charset_encoding))
]
if encoding_offset != 0xFFFF:
encoding[i] = encoding_offset
except UnicodeDecodeError:
# character is not supported in selected encoding
pass
return encoding
| PcfFontFile |
python | ipython__ipython | IPython/extensions/tests/test_deduperreload.py | {
"start": 37882,
"end": 56557
} | class ____(ShellFixture):
def test_autoreload_class_basic(self):
self.shell.magic_autoreload("2")
mod_name, mod_fn = self.new_module(
"""
x = 9
class C:
def foo():
return 1
"""
)
self.shell.run_code("import %s" % mod_name)
self.shell.run_code("pass")
self.write_file(
mod_fn,
"""
x = 9
class C:
def foo():
return 1
""",
)
self.shell.run_code("pass")
self.assertIn(mod_name, self.shell.user_ns)
mod = sys.modules[mod_name]
assert mod.C.foo() == 1
def test_remove_overridden_method(self):
self.shell.magic_autoreload("2")
mod_name, mod_fn = self.new_module(
"""
class A:
def foo(self):
return 1
class B(A):
def foo(self):
return 42
"""
)
self.shell.run_code(f"from {mod_name} import B; b = B()")
self.shell.run_code("assert b.foo() == 42")
self.write_file(
mod_fn,
"""
class A:
def foo(self):
return 1
class B(A):
pass
""",
)
self.shell.run_code("assert b.foo() == 1")
def test_autoreload_class_use_outside_func(self):
self.shell.magic_autoreload("2")
mod_name, mod_fn = self.new_module(
"""
x = 9
class C:
def foo():
return 1
"""
)
self.shell.run_code("import %s" % mod_name)
self.shell.run_code("pass")
self.write_file(
mod_fn,
"""
x = 9
class C:
def foo():
return 1+x
""",
)
self.shell.run_code("pass")
self.assertIn(mod_name, self.shell.user_ns)
mod = sys.modules[mod_name]
assert mod.C.foo() == 10
def test_autoreload_class_use_class_member(self):
self.shell.magic_autoreload("2")
mod_name, mod_fn = self.new_module(
"""
x = 9
class C:
def foo():
return 1
"""
)
self.shell.run_code("import %s" % mod_name)
self.shell.run_code("pass")
self.write_file(
mod_fn,
"""
class C:
x = 9
def foo():
return 1+C.x
""",
)
self.shell.run_code("pass")
self.assertIn(mod_name, self.shell.user_ns)
mod = sys.modules[mod_name]
assert mod.C.foo() == 10
def test_autoreload_class_pass(self):
self.shell.magic_autoreload("2")
mod_name, mod_fn = self.new_module(
"""
x = 9
class C:
pass
"""
)
self.shell.run_code("import %s" % mod_name)
self.shell.run_code("pass")
self.write_file(
mod_fn,
"""
class C:
x = 9
def foo():
return 1+C.x
""",
)
self.shell.run_code("pass")
self.assertIn(mod_name, self.shell.user_ns)
mod = sys.modules[mod_name]
assert mod.C.foo() == 10
def test_autoreload_class_ellipsis(self):
self.shell.magic_autoreload("2")
mod_name, mod_fn = self.new_module(
"""
x = 9
class C:
...
"""
)
self.shell.run_code("import %s" % mod_name)
self.shell.run_code("pass")
self.write_file(
mod_fn,
"""
class C:
x = 9
def foo():
return 1+C.x
""",
)
self.shell.run_code("pass")
self.assertIn(mod_name, self.shell.user_ns)
mod = sys.modules[mod_name]
assert mod.C.foo() == 10
def test_autoreload_class_default_autoreload(self):
self.shell.magic_autoreload("2")
mod_name, mod_fn = self.new_module(
"""
class C:
x = 9
def foo():
return 1+C.x
"""
)
self.shell.run_code("import %s" % mod_name)
self.shell.run_code("pass")
self.write_file(
mod_fn,
"""
class C:
x = 20
def foo():
return 1+C.x
""",
)
self.shell.run_code("pass")
self.assertIn(mod_name, self.shell.user_ns)
mod = sys.modules[mod_name]
assert mod.C.foo() == 21
def test_autoreload_class_nested(self):
self.shell.magic_autoreload("2")
mod_name, mod_fn = self.new_module(
"""
class C:
x = 9
class D:
def foo():
pass
"""
)
self.shell.run_code("import %s" % mod_name)
self.shell.run_code("pass")
self.write_file(
mod_fn,
"""
class C:
x = 9
class D:
def foo():
return 10
""",
)
self.shell.run_code("pass")
self.assertIn(mod_name, self.shell.user_ns)
mod = sys.modules[mod_name]
assert mod.C.D.foo() == 10
def test_autoreload_class_nested2(self):
self.shell.magic_autoreload("2")
mod_name, mod_fn = self.new_module(
"""
class C:
x = 9
def c():
return 1
class D:
def foo():
pass
"""
)
self.shell.run_code("import %s" % mod_name)
self.shell.run_code("pass")
self.write_file(
mod_fn,
"""
class C:
x = 9
def c():
return 1
class D:
def foo():
return 10
""",
)
self.shell.run_code("pass")
self.assertIn(mod_name, self.shell.user_ns)
mod = sys.modules[mod_name]
assert mod.C.D.foo() == 10
assert mod.C.c() == 1
def test_autoreload_class_nested3(self):
self.shell.magic_autoreload("2")
mod_name, mod_fn = self.new_module(
"""
class C:
x = 9
def c():
return 1
class D:
def foo():
pass
"""
)
self.shell.run_code("import %s" % mod_name)
self.shell.run_code("pass")
self.write_file(
mod_fn,
"""
class C:
x = 9
def c():
return 13
class D:
def foo():
return 10
""",
)
self.shell.run_code("pass")
self.assertIn(mod_name, self.shell.user_ns)
mod = sys.modules[mod_name]
assert mod.C.D.foo() == 10
assert mod.C.c() == 13
def test_autoreload_new_class_added(self):
self.shell.magic_autoreload("2")
mod_name, mod_fn = self.new_module(
"""
class C:
x = 9
def c():
return 1
"""
)
self.shell.run_code("import %s" % mod_name)
self.shell.run_code("pass")
self.write_file(
mod_fn,
"""
class C:
x = 9
def c():
return 13
class D:
def c():
return 1
""",
)
self.shell.run_code("pass")
self.assertIn(mod_name, self.shell.user_ns)
mod = sys.modules[mod_name]
assert mod.C.c() == 13
assert mod.D.c() == 1
def test_autoreload_class_nested_default(self):
self.shell.magic_autoreload("2")
mod_name, mod_fn = self.new_module(
"""
class C:
pass
"""
)
self.shell.run_code("import %s" % mod_name)
self.shell.run_code("pass")
self.write_file(
mod_fn,
"""
class C:
x = 9
def c():
return 13
class D:
def foo():
return 10
""",
)
self.shell.run_code("pass")
self.assertIn(mod_name, self.shell.user_ns)
mod = sys.modules[mod_name]
assert mod.C.D.foo() == 10
assert mod.C.c() == 13
def test_autoreload_class_nested_using_members(self):
self.shell.magic_autoreload("2")
mod_name, mod_fn = self.new_module(
"""
class C:
x = 9
def c():
return 13
class D:
def foo():
return 10
"""
)
self.shell.run_code("import %s" % mod_name)
self.shell.run_code("pass")
self.write_file(
mod_fn,
"""
class C:
x = 9
def c():
return 13
class D:
def foo():
return 10 + C.x
""",
)
self.shell.run_code("pass")
self.assertIn(mod_name, self.shell.user_ns)
mod = sys.modules[mod_name]
assert mod.C.D.foo() == 19
assert mod.C.c() == 13
def test_autoreload_class_nested_using_members_ellipsis(self):
self.shell.magic_autoreload("2")
mod_name, mod_fn = self.new_module(
"""
class C:
x = 9
def c():
return 13
class D:
def foo():
...
"""
)
self.shell.run_code("import %s" % mod_name)
self.shell.run_code("pass")
self.write_file(
mod_fn,
"""
class C:
x = 9
def c():
return 13
class D:
def foo():
return 10 + C.x
""",
)
self.shell.run_code("pass")
self.assertIn(mod_name, self.shell.user_ns)
mod = sys.modules[mod_name]
assert mod.C.D.foo() == 19
assert mod.C.c() == 13
def test_method_decorators_no_changes(self):
self.shell.magic_autoreload("2")
mod_name, mod_file = self.new_module(
"""
class Foo:
@classmethod
def bar(cls):
return 0
@classmethod
def foo(cls):
return 42 + cls.bar()
foo = Foo.foo
"""
)
self.shell.run_code("import %s" % mod_name)
self.shell.run_code("pass")
self.assertIn(mod_name, self.shell.user_ns)
mod = sys.modules[mod_name]
assert mod.foo() == 42
self.shell.run_code(f"assert {mod_name}.foo() == 42")
self.write_file(
mod_file,
"""
class Foo:
@classmethod
def bar(cls):
return 0
@classmethod
def foo(cls):
return 42 + cls.bar()
foo = Foo.foo
""",
)
self.shell.run_code(f"assert {mod_name}.foo() == 42")
def test_method_decorators_no_changes1(self):
self.shell.magic_autoreload("2")
mod_name, mod_file = self.new_module(
"""
class Foo:
@classmethod
def bar(cls):
return 0
@classmethod
def foo(cls):
return 42 + cls.bar()
foo = Foo.foo
"""
)
self.shell.run_code(f"from {mod_name} import foo")
self.shell.run_code("assert foo() == 42")
self.write_file(
mod_file,
"""
class Foo:
@classmethod
def bar(cls):
return 0
@classmethod
def foo(cls):
return 42 + cls.bar()
foo = Foo.foo
""",
)
self.shell.run_code("assert foo() == 42")
def test_method_classmethod_one_change(self):
self.shell.magic_autoreload("2")
mod_name, mod_file = self.new_module(
"""
class Foo:
@classmethod
def bar(cls):
return 0
@classmethod
def func(cls):
return 42 + cls.bar()
func = Foo.func
"""
)
self.shell.run_code("import %s" % mod_name)
self.shell.run_code(f"assert {mod_name}.func() == 42")
self.write_file(
mod_file,
"""
class Foo:
@classmethod
def bar(cls):
return 1
@classmethod
def func(cls):
return 42 + cls.bar()
func = Foo.func
""",
)
mod = sys.modules[mod_name]
self.shell.run_code(f"assert {mod_name}.func() == 43")
def test_method_staticmethod_one_change(self):
self.shell.magic_autoreload("2")
mod_name, mod_file = self.new_module(
"""
class Foo:
@staticmethod
def bar():
return 0
@staticmethod
def func():
return 42 + Foo.bar()
func = Foo.func
"""
)
self.shell.run_code(f"from {mod_name} import func")
self.shell.run_code("assert func() == 42")
self.write_file(
mod_file,
"""
class Foo:
@staticmethod
def bar():
return 1
@staticmethod
def func():
return 42 + Foo.bar()
func = Foo.func
""",
)
self.shell.run_code("assert func() == 43")
def test_autoreload_class_default_args(self):
self.shell.magic_autoreload("2")
mod_name, mod_fn = self.new_module(
"""
x = 42
class Foo:
def foo(self, y): return y
"""
)
self.shell.run_code("import %s" % mod_name)
self.shell.run_code("pass")
self.write_file(
mod_fn,
"""
x = 42
class Foo:
def foo(self, y=x): return y
""",
)
self.shell.run_code("pass")
self.assertIn(mod_name, self.shell.user_ns)
mod = sys.modules[mod_name]
obj = mod.Foo()
assert obj.foo(2) == 2
assert obj.foo() == 42
def test_autoreload_class_change_default_args(self):
self.shell.magic_autoreload("2")
mod_name, mod_fn = self.new_module(
"""
x = 42
class Foo:
def foo(y): return y
"""
)
self.shell.run_code("import %s" % mod_name)
self.shell.run_code("pass")
self.write_file(
mod_fn,
"""
x = 44
class Foo:
def foo(y=x): return y
""",
)
self.shell.run_code("pass")
self.assertIn(mod_name, self.shell.user_ns)
mod = sys.modules[mod_name]
assert mod.Foo.foo() == 44
def test_autoreload_class_new_class(self):
self.shell.magic_autoreload("2")
mod_name, mod_fn = self.new_module(
"""
x = 42
class Foo:
def foo(y=x): return y
"""
)
self.shell.run_code("import %s" % mod_name)
self.shell.run_code("pass")
prev_foo = self.shell.user_ns[mod_name].Foo.foo
self.write_file(
mod_fn,
"""
x = 42
class Foo:
def foo(y=x): return y
class C:
def foo(): return 200
""",
)
self.shell.run_code("pass")
self.assertIn(mod_name, self.shell.user_ns)
self.assertIs(prev_foo, self.shell.user_ns[mod_name].Foo.foo)
mod = sys.modules[mod_name]
assert mod.Foo.foo() == 42
assert mod.C.foo() == 200
def test_autoreload_overloaded_vars(self):
self.shell.magic_autoreload("2")
mod_name, mod_fn = self.new_module(
"""
x = 42
class Foo:
pass
"""
)
self.shell.run_code("import %s" % mod_name)
self.shell.run_code("pass")
mod = sys.modules[mod_name]
self.write_file(
mod_fn,
"""
x = 44
class Foo:
def foo():
x = 2
return x
""",
)
self.shell.run_code("pass")
self.assertIn(mod_name, self.shell.user_ns)
assert mod.Foo.foo() == 2
def test_autoreload_overloaded_vars2(self):
self.shell.magic_autoreload("2")
mod_name, mod_fn = self.new_module(
"""
x = 42
def foo():
return x
"""
)
self.shell.run_code("import %s" % mod_name)
self.shell.run_code("pass")
mod = sys.modules[mod_name]
self.write_file(
mod_fn,
"""
x = 44
def foo():
x = 2
return x
""",
)
self.shell.run_code("pass")
self.assertIn(mod_name, self.shell.user_ns)
assert mod.foo() == 2
| AutoreloadReliabilitySuite |
python | sanic-org__sanic | sanic/touchup/meta.py | {
"start": 114,
"end": 702
} | class ____(SanicMeta):
def __new__(cls, name, bases, attrs, **kwargs):
gen_class = super().__new__(cls, name, bases, attrs, **kwargs)
methods = attrs.get("__touchup__")
attrs["__touched__"] = False
if methods:
for method in methods:
if method not in attrs:
raise SanicException(
"Cannot perform touchup on non-existent method: "
f"{name}.{method}"
)
TouchUp.register(gen_class, method)
return gen_class
| TouchUpMeta |
python | spyder-ide__spyder | spyder/widgets/elementstable.py | {
"start": 6084,
"end": 8204
} | class ____(CustomSortFilterProxy):
FUZZY = False
# ---- Public API
# -------------------------------------------------------------------------
def filter_row(self, row_num, text=None):
# Use the pattern set by set_filter if no text is passed. Otherwise
# use `text` as pattern
if text is None:
pattern = self.pattern
else:
pattern = re.compile(f".*{text}.*", re.IGNORECASE)
element = self.sourceModel().elements[row_num]
# A title is always available
r_title = re.search(pattern, element["title"])
# Description and additional info are optional
if element.get("description"):
r_description = re.search(pattern, element["description"])
else:
r_description = None
if element.get("additional_info"):
r_additional_info = re.search(
pattern, element["additional_info"]
)
else:
r_additional_info = None
if (
r_title is None
and r_description is None
and r_additional_info is None
):
return False
else:
return True
# ---- Qt methods
# -------------------------------------------------------------------------
def sourceModel(self) -> ElementsModel:
# To get better code completions
return super().sourceModel()
def filterAcceptsRow(self, row_num: int, parent: QModelIndex) -> bool:
if self.parent()._with_widgets:
# We don't filter rows using this method when the table has widgets
# because they are deleted by Qt.
return True
else:
return self.filter_row(row_num)
def lessThan(self, left: QModelIndex, right: QModelIndex) -> bool:
# left and right are indexes from the source model. So this simply
# preserves its ordering
row_left = left.row()
row_right = right.row()
if row_left > row_right:
return True
else:
return False
| SortElementsFilterProxy |
python | sqlalchemy__sqlalchemy | test/ext/test_mutable.py | {
"start": 26601,
"end": 27086
} | class ____(
_MutableNoHashFixture, _MutableDictTestBase, fixtures.MappedTest
):
@classmethod
def define_tables(cls, metadata):
MutableDict = cls._type_fixture()
mutable_pickle = MutableDict.as_mutable(PickleType)
Table(
"foo",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("data", mutable_pickle),
)
| MutableDictNoHashTest |
python | HypothesisWorks__hypothesis | hypothesis-python/src/hypothesis/internal/conjecture/choice.py | {
"start": 1116,
"end": 1253
} | class ____(TypedDict):
min_value: float
max_value: float
allow_nan: bool
smallest_nonzero_magnitude: float
| FloatConstraints |
python | jazzband__django-simple-history | simple_history/registry_tests/migration_test_app/models.py | {
"start": 767,
"end": 968
} | class ____(models.Model):
what_i_mean = CustomAttrNameForeignKey(
WhatIMean, models.CASCADE, attr_name="custom_attr_name"
)
history = HistoricalRecords()
| ModelWithCustomAttrForeignKey |
python | django-import-export__django-import-export | tests/core/tests/test_import_export_tags.py | {
"start": 91,
"end": 369
} | class ____(TestCase):
def test_compare_values(self):
target = (
'<del style="background:#ffe6e6;">a</del>'
'<ins style="background:#e6ffe6;">b</ins>'
)
self.assertEqual(target, import_export_tags.compare_values("a", "b"))
| TagsTest |
python | ray-project__ray | python/ray/_private/thirdparty/pynvml/pynvml.py | {
"start": 65040,
"end": 65411
} | class ____(_PrintableStructure):
_fields_ = [
('version', c_uint),
('sampleValType', _nvmlValueType_t),
('vgpuInstanceCount', c_uint),
('lastSeenTimeStamp', c_ulonglong),
('vgpuUtilArray', POINTER(c_nvmlVgpuInstanceUtilizationInfo_v1_t)),
]
VgpuInstancesUtilizationInfo_v1 = 0x01000020
| c_nvmlVgpuInstancesUtilizationInfo_v1_t |
python | astropy__astropy | astropy/modeling/tests/test_bounding_box.py | {
"start": 7890,
"end": 27006
} | class ____:
def setup_method(self):
class BoundingDomain(_BoundingDomain):
def fix_inputs(self, model, fix_inputs):
super().fix_inputs(model, fixed_inputs=fix_inputs)
def prepare_inputs(self, input_shape, inputs):
super().prepare_inputs(input_shape, inputs)
self.BoundingDomain = BoundingDomain
def test_create(self):
model = mk.MagicMock()
bounding_box = self.BoundingDomain(model)
assert bounding_box._model == model
assert bounding_box._ignored == []
assert bounding_box._order == "C"
bounding_box = self.BoundingDomain(model, order="F")
assert bounding_box._model == model
assert bounding_box._ignored == []
assert bounding_box._order == "F"
bounding_box = self.BoundingDomain(Gaussian2D(), ["x"])
assert bounding_box._ignored == [0]
assert bounding_box._order == "C"
# Error
MESSAGE = r"order must be either 'C' .* or 'F' .*, got: .*"
with pytest.raises(ValueError, match=MESSAGE):
self.BoundingDomain(model, order=mk.MagicMock())
def test_model(self):
model = mk.MagicMock()
bounding_box = self.BoundingDomain(model)
assert bounding_box._model == model
assert bounding_box.model == model
def test_order(self):
bounding_box = self.BoundingDomain(mk.MagicMock(), order="C")
assert bounding_box._order == "C"
assert bounding_box.order == "C"
bounding_box = self.BoundingDomain(mk.MagicMock(), order="F")
assert bounding_box._order == "F"
assert bounding_box.order == "F"
bounding_box._order = "test"
assert bounding_box.order == "test"
def test_ignored(self):
ignored = [0]
model = mk.MagicMock()
model.n_inputs = 1
model.inputs = ["x"]
bounding_box = self.BoundingDomain(model, ignored=ignored)
assert bounding_box._ignored == ignored
assert bounding_box.ignored == ignored
def test__get_order(self):
bounding_box = self.BoundingDomain(mk.MagicMock())
# Success (default 'C')
assert bounding_box._order == "C"
assert bounding_box._get_order() == "C"
assert bounding_box._get_order("C") == "C"
assert bounding_box._get_order("F") == "F"
# Success (default 'F')
bounding_box._order = "F"
assert bounding_box._order == "F"
assert bounding_box._get_order() == "F"
assert bounding_box._get_order("C") == "C"
assert bounding_box._get_order("F") == "F"
# Error
MESSAGE = r"order must be either 'C' .* or 'F' .*, got: .*"
with pytest.raises(ValueError, match=MESSAGE):
bounding_box._get_order(mk.MagicMock())
def test__get_index(self):
bounding_box = self.BoundingDomain(Gaussian2D())
# Pass input name
assert bounding_box._get_index("x") == 0
assert bounding_box._get_index("y") == 1
# Pass invalid input name
MESSAGE = r"'z' is not one of the inputs: .*"
with pytest.raises(ValueError, match=MESSAGE):
bounding_box._get_index("z")
# Pass valid index
assert bounding_box._get_index(0) == 0
assert bounding_box._get_index(1) == 1
assert bounding_box._get_index(np.int32(0)) == 0
assert bounding_box._get_index(np.int32(1)) == 1
assert bounding_box._get_index(np.int64(0)) == 0
assert bounding_box._get_index(np.int64(1)) == 1
# Pass invalid index
MESSAGE = r"Integer key: .* must be non-negative and < 2"
with pytest.raises(IndexError, match=MESSAGE):
bounding_box._get_index(2)
with pytest.raises(IndexError, match=MESSAGE):
bounding_box._get_index(np.int32(2))
with pytest.raises(IndexError, match=MESSAGE):
bounding_box._get_index(np.int64(2))
with pytest.raises(IndexError, match=MESSAGE):
bounding_box._get_index(-1)
# Pass invalid key
MESSAGE = r"Key value: .* must be string or integer"
with pytest.raises(ValueError, match=MESSAGE):
bounding_box._get_index(mk.MagicMock())
def test__get_name(self):
model = mk.MagicMock()
model.n_inputs = 1
model.inputs = ["x"]
bounding_box = self.BoundingDomain(model)
index = mk.MagicMock()
name = mk.MagicMock()
model.inputs = mk.MagicMock()
model.inputs.__getitem__.return_value = name
assert bounding_box._get_name(index) == name
assert model.inputs.__getitem__.call_args_list == [mk.call(index)]
def test_ignored_inputs(self):
model = mk.MagicMock()
ignored = list(range(4, 8))
model.n_inputs = 8
model.inputs = [mk.MagicMock() for _ in range(8)]
bounding_box = self.BoundingDomain(model, ignored=ignored)
inputs = bounding_box.ignored_inputs
assert isinstance(inputs, list)
for index, _input in enumerate(inputs):
assert _input in model.inputs
assert model.inputs[index + 4] == _input
for index, _input in enumerate(model.inputs):
if _input in inputs:
assert inputs[index - 4] == _input
else:
assert index < 4
def test__validate_ignored(self):
bounding_box = self.BoundingDomain(Gaussian2D())
# Pass
assert bounding_box._validate_ignored(None) == []
assert bounding_box._validate_ignored(["x", "y"]) == [0, 1]
assert bounding_box._validate_ignored([0, 1]) == [0, 1]
assert bounding_box._validate_ignored([np.int32(0), np.int64(1)]) == [0, 1]
# Fail
with pytest.raises(
ValueError, match=r"Key value: .* must be string or integer"
):
bounding_box._validate_ignored([mk.MagicMock()])
with pytest.raises(ValueError, match=r"'.*' is not one of the inputs: .*"):
bounding_box._validate_ignored(["z"])
MESSAGE = r"Integer key: 3 must be non-negative and < 2"
with pytest.raises(IndexError, match=MESSAGE):
bounding_box._validate_ignored([3])
with pytest.raises(IndexError, match=MESSAGE):
bounding_box._validate_ignored([np.int32(3)])
with pytest.raises(IndexError, match=MESSAGE):
bounding_box._validate_ignored([np.int64(3)])
def test___call__(self):
bounding_box = self.BoundingDomain(mk.MagicMock())
args = tuple(mk.MagicMock() for _ in range(3))
kwargs = {f"test{idx}": mk.MagicMock() for idx in range(3)}
MESSAGE = (
r"This bounding box is fixed by the model and does not have adjustable"
r" parameters"
)
with pytest.raises(RuntimeError, match=MESSAGE):
bounding_box(*args, **kwargs)
def test_fix_inputs(self):
bounding_box = self.BoundingDomain(mk.MagicMock())
model = mk.MagicMock()
fixed_inputs = mk.MagicMock()
with pytest.raises(
NotImplementedError, match=r"This should be implemented by a child class"
):
bounding_box.fix_inputs(model, fixed_inputs)
def test__prepare_inputs(self):
bounding_box = self.BoundingDomain(mk.MagicMock())
with pytest.raises(
NotImplementedError,
match=r"This has not been implemented for BoundingDomain",
):
bounding_box.prepare_inputs(mk.MagicMock(), mk.MagicMock())
def test__base_ouput(self):
bounding_box = self.BoundingDomain(mk.MagicMock())
# Simple shape
input_shape = (13,)
output = bounding_box._base_output(input_shape, 0)
assert (output == 0).all()
assert output.shape == input_shape
output = bounding_box._base_output(input_shape, np.nan)
assert (np.isnan(output)).all()
assert output.shape == input_shape
output = bounding_box._base_output(input_shape, 14)
assert (output == 14).all()
assert output.shape == input_shape
# Complex shape
input_shape = (13, 7)
output = bounding_box._base_output(input_shape, 0)
assert (output == 0).all()
assert output.shape == input_shape
output = bounding_box._base_output(input_shape, np.nan)
assert (np.isnan(output)).all()
assert output.shape == input_shape
output = bounding_box._base_output(input_shape, 14)
assert (output == 14).all()
assert output.shape == input_shape
def test__all_out_output(self):
model = mk.MagicMock()
bounding_box = self.BoundingDomain(model)
# Simple shape
model.n_outputs = 1
input_shape = (13,)
output, output_unit = bounding_box._all_out_output(input_shape, 0)
assert (np.array(output) == 0).all()
assert np.array(output).shape == (1, 13)
assert output_unit is None
# Complex shape
model.n_outputs = 6
input_shape = (13, 7)
output, output_unit = bounding_box._all_out_output(input_shape, 0)
assert (np.array(output) == 0).all()
assert np.array(output).shape == (6, 13, 7)
assert output_unit is None
def test__modify_output(self):
bounding_box = self.BoundingDomain(mk.MagicMock())
valid_index = mk.MagicMock()
input_shape = mk.MagicMock()
fill_value = mk.MagicMock()
# Simple shape
with mk.patch.object(
_BoundingDomain,
"_base_output",
autospec=True,
return_value=np.asanyarray(0),
) as mkBase:
assert (
np.array([1, 2, 3])
== bounding_box._modify_output(
[1, 2, 3], valid_index, input_shape, fill_value
)
).all()
assert mkBase.call_args_list == [mk.call(input_shape, fill_value)]
# Replacement
with mk.patch.object(
_BoundingDomain,
"_base_output",
autospec=True,
return_value=np.array([1, 2, 3, 4, 5, 6]),
) as mkBase:
assert (
np.array([7, 2, 8, 4, 9, 6])
== bounding_box._modify_output(
[7, 8, 9], np.array([[0, 2, 4]]), input_shape, fill_value
)
).all()
assert mkBase.call_args_list == [mk.call(input_shape, fill_value)]
def test__prepare_outputs(self):
bounding_box = self.BoundingDomain(mk.MagicMock())
valid_index = mk.MagicMock()
input_shape = mk.MagicMock()
fill_value = mk.MagicMock()
valid_outputs = [mk.MagicMock() for _ in range(3)]
effects = [mk.MagicMock() for _ in range(3)]
with mk.patch.object(
_BoundingDomain, "_modify_output", autospec=True, side_effect=effects
) as mkModify:
assert effects == bounding_box._prepare_outputs(
valid_outputs, valid_index, input_shape, fill_value
)
assert mkModify.call_args_list == [
mk.call(
bounding_box,
valid_outputs[idx],
valid_index,
input_shape,
fill_value,
)
for idx in range(3)
]
def test_prepare_outputs(self):
model = mk.MagicMock()
bounding_box = self.BoundingDomain(model)
valid_outputs = mk.MagicMock()
valid_index = mk.MagicMock()
input_shape = mk.MagicMock()
fill_value = mk.MagicMock()
with mk.patch.object(
_BoundingDomain, "_prepare_outputs", autospec=True
) as mkPrepare:
# Reshape valid_outputs
model.n_outputs = 1
assert mkPrepare.return_value == bounding_box.prepare_outputs(
valid_outputs, valid_index, input_shape, fill_value
)
assert mkPrepare.call_args_list == [
mk.call(
bounding_box, [valid_outputs], valid_index, input_shape, fill_value
)
]
mkPrepare.reset_mock()
# No reshape valid_outputs
model.n_outputs = 2
assert mkPrepare.return_value == bounding_box.prepare_outputs(
valid_outputs, valid_index, input_shape, fill_value
)
assert mkPrepare.call_args_list == [
mk.call(
bounding_box, valid_outputs, valid_index, input_shape, fill_value
)
]
def test__get_valid_outputs_unit(self):
bounding_box = self.BoundingDomain(mk.MagicMock())
# Don't get unit
assert bounding_box._get_valid_outputs_unit(mk.MagicMock(), False) is None
# Get unit from unitless
assert bounding_box._get_valid_outputs_unit(7, True) is None
# Get unit
assert bounding_box._get_valid_outputs_unit(25 * u.m, True) == u.m
def test__evaluate_model(self):
bounding_box = self.BoundingDomain(mk.MagicMock())
evaluate = mk.MagicMock()
valid_inputs = mk.MagicMock()
input_shape = mk.MagicMock()
valid_index = mk.MagicMock()
fill_value = mk.MagicMock()
with_units = mk.MagicMock()
with mk.patch.object(
_BoundingDomain, "_get_valid_outputs_unit", autospec=True
) as mkGet:
with mk.patch.object(
_BoundingDomain, "prepare_outputs", autospec=True
) as mkPrepare:
assert bounding_box._evaluate_model(
evaluate,
valid_inputs,
valid_index,
input_shape,
fill_value,
with_units,
) == (mkPrepare.return_value, mkGet.return_value)
assert mkPrepare.call_args_list == [
mk.call(
bounding_box,
evaluate.return_value,
valid_index,
input_shape,
fill_value,
)
]
assert mkGet.call_args_list == [
mk.call(evaluate.return_value, with_units)
]
assert evaluate.call_args_list == [mk.call(valid_inputs)]
def test__evaluate(self):
bounding_box = self.BoundingDomain(mk.MagicMock())
evaluate = mk.MagicMock()
inputs = mk.MagicMock()
input_shape = mk.MagicMock()
fill_value = mk.MagicMock()
with_units = mk.MagicMock()
valid_inputs = mk.MagicMock()
valid_index = mk.MagicMock()
effects = [
(valid_inputs, valid_index, True),
(valid_inputs, valid_index, False),
]
with mk.patch.object(
self.BoundingDomain, "prepare_inputs", autospec=True, side_effect=effects
) as mkPrepare:
with mk.patch.object(
_BoundingDomain, "_all_out_output", autospec=True
) as mkAll:
with mk.patch.object(
_BoundingDomain, "_evaluate_model", autospec=True
) as mkEvaluate:
# all_out
assert (
bounding_box._evaluate(
evaluate, inputs, input_shape, fill_value, with_units
)
== mkAll.return_value
)
assert mkAll.call_args_list == [
mk.call(bounding_box, input_shape, fill_value)
]
assert mkEvaluate.call_args_list == []
assert mkPrepare.call_args_list == [
mk.call(bounding_box, input_shape, inputs)
]
mkAll.reset_mock()
mkPrepare.reset_mock()
# not all_out
assert (
bounding_box._evaluate(
evaluate, inputs, input_shape, fill_value, with_units
)
== mkEvaluate.return_value
)
assert mkAll.call_args_list == []
assert mkEvaluate.call_args_list == [
mk.call(
bounding_box,
evaluate,
valid_inputs,
valid_index,
input_shape,
fill_value,
with_units,
)
]
assert mkPrepare.call_args_list == [
mk.call(bounding_box, input_shape, inputs)
]
def test__set_outputs_unit(self):
bounding_box = self.BoundingDomain(mk.MagicMock())
# set no unit
assert bounding_box._set_outputs_unit(27, None) == 27
# set unit
assert bounding_box._set_outputs_unit(27, u.m) == 27 * u.m
def test_evaluate(self):
bounding_box = self.BoundingDomain(Gaussian2D())
evaluate = mk.MagicMock()
inputs = mk.MagicMock()
fill_value = mk.MagicMock()
outputs = mk.MagicMock()
valid_outputs_unit = mk.MagicMock()
value = (outputs, valid_outputs_unit)
with mk.patch.object(
_BoundingDomain, "_evaluate", autospec=True, return_value=value
) as mkEvaluate:
with mk.patch.object(
_BoundingDomain, "_set_outputs_unit", autospec=True
) as mkSet:
with mk.patch.object(Model, "input_shape", autospec=True) as mkShape:
with mk.patch.object(
Model, "bbox_with_units", new_callable=mk.PropertyMock
) as mkUnits:
assert tuple(mkSet.return_value) == bounding_box.evaluate(
evaluate, inputs, fill_value
)
assert mkSet.call_args_list == [
mk.call(outputs, valid_outputs_unit)
]
assert mkEvaluate.call_args_list == [
mk.call(
bounding_box,
evaluate,
inputs,
mkShape.return_value,
fill_value,
mkUnits.return_value,
)
]
assert mkShape.call_args_list == [
mk.call(bounding_box._model, inputs)
]
assert mkUnits.call_args_list == [mk.call()]
| Test_BoundingDomain |
python | getsentry__sentry | src/sentry/workflow_engine/processors/delayed_workflow.py | {
"start": 8829,
"end": 17247
} | class ____:
"""
Represents all the data that uniquely identifies a condition and its
single respective Snuba query that must be made. Multiple instances of the
same condition can share a single query.
"""
handler: type[BaseEventFrequencyQueryHandler]
interval: str
environment_id: int | None
comparison_interval: str | None = None
# Hashable representation of the filters
frozen_filters: Sequence[frozenset[tuple[str, Any]]] | None = None
@staticmethod
def freeze_filters(
filters: Sequence[Mapping[str, Any]] | None,
) -> Sequence[frozenset[tuple[str, Any]]] | None:
"""
Convert the sorted representation of filters into a frozen one that can
be safely hashed.
"""
if filters is None:
return None
return tuple(frozenset(sorted(filter.items())) for filter in filters)
@property
def filters(self) -> list[QueryFilter] | None:
if self.frozen_filters is None:
return None
return [dict(filter) for filter in self.frozen_filters]
def __repr__(self) -> str:
return f"UniqueConditionQuery(handler={self.handler.__name__}, interval={self.interval}, environment_id={self.environment_id}, comparison_interval={self.comparison_interval}, filters={self.filters})"
def fetch_project(project_id: int) -> Project | None:
try:
return Project.objects.get_from_cache(id=project_id)
except Project.DoesNotExist:
logger.info(
"delayed_processing.project_does_not_exist",
extra={"project_id": project_id},
)
return None
def fetch_workflows_envs(
workflow_ids: list[WorkflowId],
) -> Mapping[WorkflowId, int | None]:
return {
workflow_id: env_id
for workflow_id, env_id in Workflow.objects.filter(id__in=workflow_ids).values_list(
"id", "environment_id"
)
}
def fetch_data_condition_groups(
dcg_ids: list[DataConditionGroupId],
) -> list[DataConditionGroup]:
"""
Fetch DataConditionGroups with enabled detectors/workflows
"""
return list(DataConditionGroup.objects.filter(id__in=dcg_ids))
def generate_unique_queries(
condition: DataCondition, environment_id: int | None
) -> list[UniqueConditionQuery]:
"""
Returns a list of all unique condition queries that must be made for the
given condition instance.
Count comparison conditions will only have one unique query, while percent
comparison conditions will have two unique queries.
"""
try:
condition_type = Condition(condition.type)
except ValueError:
logger.exception(
"Invalid condition type",
extra={"type": condition.type, "id": condition.id},
)
return []
if condition_type not in SLOW_CONDITIONS:
return []
try:
handler = slow_condition_query_handler_registry.get(condition_type)
except NoRegistrationExistsError:
logger.exception(
"No registration exists for condition",
extra={"type": condition.type, "id": condition.id},
)
return []
unique_queries = [
UniqueConditionQuery(
handler=handler,
interval=condition.comparison["interval"],
environment_id=environment_id,
frozen_filters=UniqueConditionQuery.freeze_filters(condition.comparison.get("filters")),
)
]
if condition_type in PERCENT_CONDITIONS:
unique_queries.append(
UniqueConditionQuery(
handler=handler,
interval=condition.comparison["interval"],
environment_id=environment_id,
comparison_interval=condition.comparison.get("comparison_interval"),
frozen_filters=UniqueConditionQuery.freeze_filters(
condition.comparison.get("filters")
),
)
)
return unique_queries
@sentry_sdk.trace
def get_condition_query_groups(
data_condition_groups: list[DataConditionGroup],
event_data: EventRedisData,
workflows_to_envs: Mapping[WorkflowId, int | None],
dcg_to_slow_conditions: dict[DataConditionGroupId, list[DataCondition]],
) -> dict[UniqueConditionQuery, GroupQueryParams]:
"""
Map unique condition queries to the group IDs that need to checked for that query.
"""
condition_groups: dict[UniqueConditionQuery, GroupQueryParams] = defaultdict(GroupQueryParams)
now = timezone.now()
for dcg in data_condition_groups:
slow_conditions = dcg_to_slow_conditions[dcg.id]
workflow_id = event_data.dcg_to_workflow.get(dcg.id)
workflow_env = workflows_to_envs[workflow_id] if workflow_id else None
timestamp = event_data.dcg_to_timestamp[dcg.id]
if timestamp is not None:
delay = now - timestamp
# If it's been more than 1.5 minutes, we're taking too long to process the event and
# want to know how bad it is. It's a biased sample, but let's us see if we've somewhat
# over or very over.
if delay.total_seconds() > 90:
metrics.timing(
"workflow_engine.overdue_event_lag",
delay.total_seconds(),
sample_rate=1.0,
)
for condition in slow_conditions:
for condition_query in generate_unique_queries(condition, workflow_env):
condition_groups[condition_query].update(
group_ids=event_data.dcg_to_groups[dcg.id], timestamp=timestamp
)
return condition_groups
@metrics.wraps(
"workflow_engine.delayed_workflow.get_condition_group_results",
# We want this to be accurate enough for alerting, so sample 100%
sample_rate=1.0,
)
@sentry_sdk.trace
def get_condition_group_results(
queries_to_groups: dict[UniqueConditionQuery, GroupQueryParams],
) -> dict[UniqueConditionQuery, QueryResult]:
condition_group_results = {}
current_time = timezone.now()
all_group_ids: set[GroupId] = set()
# bulk gather groups and fetch them
for time_and_groups in queries_to_groups.values():
all_group_ids.update(time_and_groups.group_ids)
all_groups: list[GroupValues] = list(
Group.objects.filter(id__in=all_group_ids).values(
"id", "type", "project_id", "project__organization_id"
)
)
last_try = False
if task := current_task():
last_try = not task.retries_remaining
for unique_condition, time_and_groups in queries_to_groups.items():
handler = unique_condition.handler()
group_ids = time_and_groups.group_ids
groups_to_query = [group for group in all_groups if group["id"] in group_ids]
time = time_and_groups.timestamp or current_time
_, duration = handler.intervals[unique_condition.interval]
comparison_interval: timedelta | None = None
if unique_condition.comparison_interval is not None:
comparison_interval = COMPARISON_INTERVALS_VALUES.get(
unique_condition.comparison_interval
)
try:
result = handler.get_rate_bulk(
duration=duration,
groups=groups_to_query,
environment_id=unique_condition.environment_id,
current_time=time,
comparison_interval=comparison_interval,
filters=unique_condition.filters,
)
absent_group_ids = group_ids - set(result.keys())
if absent_group_ids:
logger.warning(
"workflow_engine.delayed_workflow.absent_group_ids",
extra={"group_ids": absent_group_ids, "unique_condition": unique_condition},
)
condition_group_results[unique_condition] = result
except RateLimitExceeded as e:
# If we're on our final attempt and encounter a rate limit error, we log it and continue.
# The condition will evaluate as false, which may be wrong, but this is better for users
# than allowing the whole task to fail.
if last_try:
logger.info("delayed_workflow.snuba_rate_limit_exceeded", extra={"error": e})
else:
raise
return condition_group_results
| UniqueConditionQuery |
python | kubernetes-client__python | kubernetes/client/models/v1beta2_counter.py | {
"start": 383,
"end": 3633
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'value': 'str'
}
attribute_map = {
'value': 'value'
}
def __init__(self, value=None, local_vars_configuration=None): # noqa: E501
"""V1beta2Counter - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._value = None
self.discriminator = None
self.value = value
@property
def value(self):
"""Gets the value of this V1beta2Counter. # noqa: E501
Value defines how much of a certain device counter is available. # noqa: E501
:return: The value of this V1beta2Counter. # noqa: E501
:rtype: str
"""
return self._value
@value.setter
def value(self, value):
"""Sets the value of this V1beta2Counter.
Value defines how much of a certain device counter is available. # noqa: E501
:param value: The value of this V1beta2Counter. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and value is None: # noqa: E501
raise ValueError("Invalid value for `value`, must not be `None`") # noqa: E501
self._value = value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta2Counter):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1beta2Counter):
return True
return self.to_dict() != other.to_dict()
| V1beta2Counter |
python | pytorch__pytorch | torch/testing/_internal/distributed/rpc/dist_autograd_test.py | {
"start": 6155,
"end": 6534
} | class ____(Function):
_simulate_error = True
@staticmethod
def forward(ctx, input):
return input
@staticmethod
@once_differentiable
def backward(ctx, input):
if SimulateBackwardError._simulate_error:
raise Exception("Simulate error on backward pass") # noqa: TRY002
else:
return input
| SimulateBackwardError |
python | huggingface__transformers | src/transformers/models/bertweet/tokenization_bertweet.py | {
"start": 21199,
"end": 24418
} | class ____:
r"""
Examples:
```python
>>> # Tokenizer for tweets.
>>> from nltk.tokenize import TweetTokenizer
>>> tknzr = TweetTokenizer()
>>> s0 = "This is a cooool #dummysmiley: :-) :-P <3 and some arrows < > -> <--"
>>> tknzr.tokenize(s0)
['This', 'is', 'a', 'cooool', '#dummysmiley', ':', ':-)', ':-P', '<3', 'and', 'some', 'arrows', '<', '>', '->', '<--']
>>> # Examples using *strip_handles* and *reduce_len parameters*:
>>> tknzr = TweetTokenizer(strip_handles=True, reduce_len=True)
>>> s1 = "@remy: This is waaaaayyyy too much for you!!!!!!"
>>> tknzr.tokenize(s1)
[':', 'This', 'is', 'waaayyy', 'too', 'much', 'for', 'you', '!', '!', '!']
```"""
def __init__(self, preserve_case=True, reduce_len=False, strip_handles=False):
self.preserve_case = preserve_case
self.reduce_len = reduce_len
self.strip_handles = strip_handles
def tokenize(self, text):
"""
Args:
text: str
Returns: list(str) A tokenized list of strings; concatenating this list returns the original string if
`preserve_case=False`
"""
# Fix HTML character entities:
text = _replace_html_entities(text)
# Remove username handles
if self.strip_handles:
text = remove_handles(text)
# Normalize word lengthening
if self.reduce_len:
text = reduce_lengthening(text)
# Shorten problematic sequences of characters
safe_text = HANG_RE.sub(r"\1\1\1", text)
# Tokenize:
words = WORD_RE.findall(safe_text)
# Possibly alter the case, but avoid changing emoticons like :D into :d:
if not self.preserve_case:
words = [x if EMOTICON_RE.search(x) else x.lower() for x in words]
return words
######################################################################
# Normalization Functions
######################################################################
def reduce_lengthening(text):
"""
Replace repeated character sequences of length 3 or greater with sequences of length 3.
"""
pattern = regex.compile(r"(.)\1{2,}")
return pattern.sub(r"\1\1\1", text)
def remove_handles(text):
"""
Remove Twitter username handles from text.
"""
pattern = regex.compile(
r"(?<![A-Za-z0-9_!@#\$%&*])@(([A-Za-z0-9_]){20}(?!@))|(?<![A-Za-z0-9_!@#\$%&*])@(([A-Za-z0-9_]){1,19})(?![A-Za-z0-9_]*@)"
)
# Substitute handles with ' ' to ensure that text on either side of removed handles are tokenized correctly
return pattern.sub(" ", text)
######################################################################
# Tokenization Function
######################################################################
def casual_tokenize(text, preserve_case=True, reduce_len=False, strip_handles=False):
"""
Convenience function for wrapping the tokenizer.
"""
return TweetTokenizer(preserve_case=preserve_case, reduce_len=reduce_len, strip_handles=strip_handles).tokenize(
text
)
###############################################################################
__all__ = ["BertweetTokenizer"]
| TweetTokenizer |
python | PyCQA__pylint | tests/functional/u/unused/unused_typing_imports.py | {
"start": 749,
"end": 1748
} | class ____:
def __enter__(self):
return {1}
def __exit__(self, *_args):
pass
with ContextManager() as SOME_DICT: # type: Set[int]
print(SOME_DICT)
def func_test_type_comment(param):
# type: (NamedTuple) -> Tuple[NamedTuple, Pattern]
return param, re.compile('good')
def typing_fully_qualified():
variable = None # type: typing.Optional[str]
other_variable: 'typing.Optional[str]' = None
return variable, other_variable
def function(arg1, # type: Iterable
arg2 # type: List
):
# type: (...) -> Sequence
"""docstring"""
print(arg1, arg2)
def magic(alpha, beta, gamma):
# type: (str, Optional[str], Optional[datetime]) -> Any
"""going strong"""
return alpha, beta, gamma
def unused_assignment_import():
foo_or_bar = 42 # type: defaultdict
return foo_or_bar
def unused_reassigned_import(counter):
# type: (CollectionCounter) -> int
print(counter)
return 42
| ContextManager |
python | weaviate__weaviate-python-client | weaviate/collections/classes/cluster.py | {
"start": 990,
"end": 2976
} | class ____:
@staticmethod
def nodes_verbose(nodes: List[NodeREST]) -> List[NodeVerbose]:
return [
Node(
git_hash=node.get("gitHash", "None"),
name=node["name"],
shards=(
[
Shard(
collection=shard["class"],
name=shard["name"],
node=node["name"],
object_count=shard["objectCount"],
vector_indexing_status=shard["vectorIndexingStatus"],
vector_queue_length=shard["vectorQueueLength"],
compressed=shard["compressed"],
loaded=shard.get("loaded"),
)
for shard in cast(List[ShardREST], node["shards"])
]
if "shards" in node and node["shards"] is not None
else []
),
stats=(
Stats(
object_count=node["stats"]["objectCount"],
shard_count=node["stats"]["shardCount"],
)
if "stats" in node
else Stats(
object_count=0,
shard_count=0,
)
),
status=node["status"],
version=node.get("version", ""),
)
for node in nodes
]
@staticmethod
def nodes_minimal(nodes: List[NodeREST]) -> List[NodeMinimal]:
return [
Node(
git_hash=node.get("gitHash", "None"),
name=node["name"],
shards=None,
stats=None,
status=node["status"],
version=node.get("version", ""),
)
for node in nodes
]
| _ConvertFromREST |
python | tiangolo__fastapi | tests/test_no_schema_split.py | {
"start": 612,
"end": 701
} | class ____(BaseModel):
body: str = ""
events: List[MessageEvent] = []
| MessageOutput |
python | numba__numba | numba/cuda/simulator/kernelapi.py | {
"start": 319,
"end": 756
} | class ____(object):
'''
Used to implement thread/block indices/dimensions
'''
def __init__(self, x, y, z):
self.x = x
self.y = y
self.z = z
def __str__(self):
return '(%s, %s, %s)' % (self.x, self.y, self.z)
def __repr__(self):
return 'Dim3(%s, %s, %s)' % (self.x, self.y, self.z)
def __iter__(self):
yield self.x
yield self.y
yield self.z
| Dim3 |
python | PrefectHQ__prefect | tests/cli/test_work_pool.py | {
"start": 18855,
"end": 19561
} | class ____:
async def test_resume(self, prefect_client, work_pool):
assert work_pool.is_paused is False
# set paused
await prefect_client.update_work_pool(
work_pool_name=work_pool.name,
work_pool=WorkPoolUpdate(is_paused=True),
)
work_pool = await prefect_client.read_work_pool(work_pool.name)
assert work_pool.is_paused is True
res = await run_sync_in_worker_thread(
invoke_and_assert,
f"work-pool resume {work_pool.name}",
)
assert res.exit_code == 0
client_res = await prefect_client.read_work_pool(work_pool.name)
assert client_res.is_paused is False
| TestResume |
python | tensorflow__tensorflow | tensorflow/python/eager/forwardprop_test.py | {
"start": 36600,
"end": 37719
} | class ____(test.TestCase):
@test_util.assert_no_new_pyobjects_executing_eagerly()
def testOfFunctionWhile(self):
y = constant_op.constant(1.)
with forwardprop.ForwardAccumulator(y, 1.) as acc:
self.assertAllClose(10., acc.jvp(_has_loop(constant_op.constant(5), y)))
@test_util.assert_no_new_pyobjects_executing_eagerly()
def testOfFunctionCond(self):
y = constant_op.constant(1.)
with forwardprop.ForwardAccumulator(y, 1.) as acc:
self.assertAllClose(3., acc.jvp(_has_cond(constant_op.constant(5), y)))
self.assertAllClose(0., acc.jvp(_has_cond(constant_op.constant(0), y)))
@test_util.assert_no_new_pyobjects_executing_eagerly()
def testInFunctionWhile(self):
self.assertAllClose(
10., _fprop_while(constant_op.constant(5), constant_op.constant(1.)))
@test_util.assert_no_new_pyobjects_executing_eagerly()
def testInFunctionCond(self):
self.assertAllClose(
3., _fprop_cond(constant_op.constant(5), constant_op.constant(1.)))
self.assertAllClose(
0., _fprop_cond(constant_op.constant(0), constant_op.constant(1.)))
| ControlFlowTests |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/executor_definition.py | {
"start": 2687,
"end": 9985
} | class ____(NamedConfigurableDefinition):
"""An executor is responsible for executing the steps of a job.
Args:
name (str): The name of the executor.
config_schema (Optional[ConfigSchema]): The schema for the config. Configuration data
available in `init_context.executor_config`. If not set, Dagster will accept any config
provided.
requirements (Optional[List[ExecutorRequirement]]): Any requirements that must
be met in order for the executor to be usable for a particular job execution.
executor_creation_fn(Optional[Callable]): Should accept an :py:class:`InitExecutorContext`
and return an instance of :py:class:`Executor`
required_resource_keys (Optional[Set[str]]): Keys for the resources required by the
executor.
description (Optional[str]): A description of the executor.
"""
def __init__(
self,
name: str,
config_schema: Optional[UserConfigSchema] = None,
requirements: Union[
ExecutorRequirementsFunction, Optional[Sequence[ExecutorRequirement]]
] = None,
executor_creation_fn: Optional[ExecutorCreationFunction] = None,
description: Optional[str] = None,
):
self._name = check.str_param(name, "name")
self._requirements_fn: ExecutorRequirementsFunction
if callable(requirements):
self._requirements_fn = requirements
else:
requirements_lst = check.opt_list_param(
requirements, "requirements", of_type=ExecutorRequirement
)
self._requirements_fn = lambda _: requirements_lst
self._config_schema = convert_user_facing_definition_config_schema(config_schema)
self._executor_creation_fn = check.opt_callable_param(
executor_creation_fn, "executor_creation_fn"
)
self._description = check.opt_str_param(description, "description")
@public
@property
def name(self) -> str:
"""Name of the executor."""
return self._name
@public
@property
def description(self) -> Optional[str]:
"""Description of executor, if provided."""
return self._description
@property
def config_schema(self) -> IDefinitionConfigSchema:
return self._config_schema
def get_requirements(
self, executor_config: Mapping[str, object]
) -> Sequence[ExecutorRequirement]:
return self._requirements_fn(executor_config)
@public
@property
def executor_creation_fn(self) -> Optional[ExecutorCreationFunction]:
"""Callable that takes an :py:class:`InitExecutorContext` and returns an instance of
:py:class:`Executor`.
"""
return self._executor_creation_fn
def copy_for_configured(self, name, description, config_schema) -> Self:
return ExecutorDefinition(
name=name,
config_schema=config_schema, # type: ignore
executor_creation_fn=self.executor_creation_fn,
description=description or self.description,
requirements=self._requirements_fn,
)
@staticmethod
def hardcoded_executor(executor: "Executor"):
return ExecutorDefinition(
# Executor name was only relevant in the pipeline/solid/mode world, so we
# can put a dummy value
name="__executor__",
executor_creation_fn=lambda _init_context: executor,
)
# Backcompat: Overrides configured method to provide name as a keyword argument.
# If no name is provided, the name is pulled off of this ExecutorDefinition.
@public
def configured(
self,
config_or_config_fn: Any,
name: Optional[str] = None,
config_schema: Optional[UserConfigSchema] = None,
description: Optional[str] = None,
) -> Self:
"""Wraps this object in an object of the same type that provides configuration to the inner
object.
Using ``configured`` may result in config values being displayed in
the Dagster UI, so it is not recommended to use this API with sensitive values,
such as secrets.
Args:
config_or_config_fn (Union[Any, Callable[[Any], Any]]): Either (1) Run configuration
that fully satisfies this object's config schema or (2) A function that accepts run
configuration and returns run configuration that fully satisfies this object's
config schema. In the latter case, config_schema must be specified. When
passing a function, it's easiest to use :py:func:`configured`.
name (Optional[str]): Name of the new definition. If not provided, the emitted
definition will inherit the name of the `ExecutorDefinition` upon which this
function is called.
config_schema (Optional[ConfigSchema]): If config_or_config_fn is a function, the config
schema that its input must satisfy. If not set, Dagster will accept any config
provided.
description (Optional[str]): Description of the new definition. If not specified,
inherits the description of the definition being configured.
Returns (ConfigurableDefinition): A configured version of this object.
"""
name = check.opt_str_param(name, "name")
new_config_schema = ConfiguredDefinitionConfigSchema(
self, convert_user_facing_definition_config_schema(config_schema), config_or_config_fn
)
return self.copy_for_configured(name or self.name, description, new_config_schema)
@overload
def executor(name: ExecutorCreationFunction) -> ExecutorDefinition: ...
@overload
def executor(
name: Optional[str] = ...,
config_schema: Optional[UserConfigSchema] = ...,
requirements: Optional[
Union[ExecutorRequirementsFunction, Sequence[ExecutorRequirement]]
] = ...,
) -> "_ExecutorDecoratorCallable": ...
@public
def executor(
name: Union[ExecutorCreationFunction, Optional[str]] = None,
config_schema: Optional[UserConfigSchema] = None,
requirements: Optional[
Union[ExecutorRequirementsFunction, Sequence[ExecutorRequirement]]
] = None,
) -> Union[ExecutorDefinition, "_ExecutorDecoratorCallable"]:
"""Define an executor.
The decorated function should accept an :py:class:`InitExecutorContext` and return an instance
of :py:class:`Executor`.
Args:
name (Optional[str]): The name of the executor.
config_schema (Optional[ConfigSchema]): The schema for the config. Configuration data available in
`init_context.executor_config`. If not set, Dagster will accept any config provided for.
requirements (Optional[List[ExecutorRequirement]]): Any requirements that must
be met in order for the executor to be usable for a particular job execution.
"""
if callable(name):
check.invariant(config_schema is None)
check.invariant(requirements is None)
return _ExecutorDecoratorCallable()(name)
return _ExecutorDecoratorCallable(
name=name, config_schema=config_schema, requirements=requirements
)
| ExecutorDefinition |
python | encode__django-rest-framework | rest_framework/authtoken/migrations/0004_alter_tokenproxy_options.py | {
"start": 84,
"end": 379
} | class ____(migrations.Migration):
dependencies = [
('authtoken', '0003_tokenproxy'),
]
operations = [
migrations.AlterModelOptions(
name='tokenproxy',
options={'verbose_name': 'Token', 'verbose_name_plural': 'Tokens'},
),
]
| Migration |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chartsheet08.py | {
"start": 315,
"end": 1934
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chartsheet08.xlsx")
self.ignore_files = ["xl/drawings/drawing1.xml"]
def test_create_file(self):
"""Test the worksheet properties of an XlsxWriter chartsheet file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chartsheet = workbook.add_chartsheet()
chart = workbook.add_chart({"type": "bar"})
chart.axis_ids = [61297792, 61299328]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series({"values": "=Sheet1!$A$1:$A$5"})
chart.add_series({"values": "=Sheet1!$B$1:$B$5"})
chart.add_series({"values": "=Sheet1!$C$1:$C$5"})
chartsheet.set_margins(
left="0.51181102362204722",
right="0.51181102362204722",
top="0.55118110236220474",
bottom="0.94488188976377963",
)
chartsheet.set_header("&CPage &P", "0.11811023622047245")
chartsheet.set_footer("&C&A", "0.11811023622047245")
chartsheet.set_paper(9)
chartsheet.set_portrait()
chartsheet.horizontal_dpi = 200
chartsheet.vertical_dpi = 200
chartsheet.set_chart(chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | run-llama__llama_index | llama-index-core/llama_index/core/schema.py | {
"start": 6351,
"end": 6860
} | class ____(str, Enum):
"""
Node relationships used in `BaseNode` class.
Attributes:
SOURCE: The node is the source document.
PREVIOUS: The node is the previous node in the document.
NEXT: The node is the next node in the document.
PARENT: The node is the parent node in the document.
CHILD: The node is a child node in the document.
"""
SOURCE = auto()
PREVIOUS = auto()
NEXT = auto()
PARENT = auto()
CHILD = auto()
| NodeRelationship |
python | google__jax | tests/api_util_test.py | {
"start": 824,
"end": 3101
} | class ____(jtu.JaxTestCase):
def test_donation_vector(self):
params = {"a": jnp.ones([]), "b": jnp.ones([])}
state = {"c": jnp.ones([]), "d": jnp.ones([])}
x = jnp.ones([])
args = params, state, x
for size in range(4):
for donate_argnums in it.permutations((0, 1, 2), size):
for kwargs in ({}, {"a": x}):
expected = ()
expected += (True, True) if 0 in donate_argnums else (False, False)
expected += (True, True) if 1 in donate_argnums else (False, False)
expected += (True,) if 2 in donate_argnums else (False,)
if kwargs:
expected += (False,)
self.assertEqual(
expected,
api_util.donation_vector(donate_argnums, (),
jax.tree.structure((args, kwargs))))
@parameterized.parameters(
((0,), (0,)),
((0, 1), (1, 2)),
((0, 1, 2), (0, 1, 2)),
)
def test_rebase_donate_argnums_rejects_overlapping(self, donate, static):
with self.assertRaisesRegex(ValueError, "cannot intersect"):
api_util.rebase_donate_argnums(donate, static)
@parameterized.parameters(
((), (), ()),
((), (1, 2, 3), ()),
((0,), (2, 3), (0,)),
((0, 1), (2, 3), (0, 1)),
((2, 3), (0, 1), (0, 1)),
((3, 2), (1, 0), (0, 1)),
((3,), (0, 1), (1,)),
((3, 3, 3,), (0, 1), (1,)),
)
def test_rebase_donate_argnums(self, donate, static, expected):
self.assertEqual(expected,
api_util.rebase_donate_argnums(donate, static))
def test_resolve_kwargs(self):
def fun(x, y, z=3):
return x, y, z
assert api_util.resolve_kwargs(fun, (1,), {"y": 2}) == (1, 2, 3)
assert api_util.resolve_kwargs(fun, (1, 2), {"z": 3}) == (1, 2, 3)
assert api_util.resolve_kwargs(
fun, (), {"x": 1, "y": 2, "z": 3}) == (1, 2, 3)
def test_resolve_kwargs_with_keyword(self):
def fun(x, y, z, *, kw=True):
del kw
return x, y, z
assert api_util.resolve_kwargs(fun, (1, 2), {"z": 3}) == (1, 2, 3)
with self.assertRaisesRegex(TypeError, "keyword arguments"):
api_util.resolve_kwargs(fun, (1, 2), {"z": 3, "kw": False})
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
| ApiUtilTest |
python | encode__django-rest-framework | tests/test_fields.py | {
"start": 78095,
"end": 78454
} | class ____(FieldValues):
valid_inputs = ()
invalid_inputs = [
((0, 1), ['Ensure this field has at least 3 elements.']),
((0, 1, 2, 3, 4, 5), ['Ensure this field has no more than 4 elements.']),
]
outputs = ()
field = serializers.ListField(child=serializers.IntegerField(), min_length=3, max_length=4)
| TestListFieldLengthLimit |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/classes5.py | {
"start": 549,
"end": 2400
} | class ____(ParentClass1):
# This should generate an error.
cv1 = ""
# This should generate an error if reportIncompatibleVariableOverride
# is enabled.
cv2: int = 3
cv3 = 3
# This should generate an error if reportIncompatibleVariableOverride
# is enabled because it's overriding a non-final with a final.
cv4: Final = 3
# This should generate an error if reportIncompatibleVariableOverride is
# enabled because the type is incompatible.
var1: str
var2: str
# This should generate an error if reportIncompatibleVariableOverride is
# enabled because the member is mutable, and is therefore invariant.
var3: int
# This should generate an error.
var4 = ""
var5 = 5
# This should generate an error if reportIncompatibleVariableOverride is
# enabled because a property cannot override a variable.
@property
def var6(self) -> int:
return 3
# This should not generate an error because the inherited (expected)
# type of var7 is List[float], so the expression "[3, 4, 5]" should
# be inferred as List[float] rather than List[int].
var7 = [3, 4, 5]
# This should generate an error because floats are not allowed
# in a List[int].
var8 = [3.3, 45.6, 5.9]
# This should generate an error if reportIncompatibleVariableOverride is
# enabled.
var9: ClassVar[int] = 3
# This should generate an error if reportIncompatibleVariableOverride
# is enabled.
_var1: str
# This should not generate an error because it's a private name.
__var1: str
def __init__(self):
# This should generate an error if reportIncompatibleVariableOverride
# is enabled.
self.var10: str = ""
# This should generate an error.
self.var11 = ""
self.var12 = ""
| Subclass1 |
python | walkccc__LeetCode | solutions/2546. Apply Bitwise Operations to Make Strings Equal/2546.py | {
"start": 0,
"end": 116
} | class ____:
def makeStringsEqual(self, s: str, target: str) -> bool:
return ('1' in s) == ('1' in target)
| Solution |
python | joke2k__faker | faker/providers/job/fr_CH/__init__.py | {
"start": 108,
"end": 41739
} | class ____(BaseProvider):
jobs = [
"Accompagnant socioprofessionnel diplômé",
"Accompagnateur de randonnée avec brevet fédéral",
"Accompagnateur social avec brevet fédéral",
"Acousticien en systèmes auditifs CFC",
"Administrateur diplomé de biens immobiliers",
"Agent commercial de l'imprimerie avec brevet fédéral (BF)",
"Agent d'entretien de bateaux",
"Agent d'exploitation CFC",
"Agent de détention avec brevet fédéral",
"Agent de maintenance avec brevet fédéral",
"Agent de processus avec brevet fédéral",
"Agent de propreté AFP",
"Agent de propreté CFC",
"Agent de transport et logistique avec brevet fédéral",
"Agent de transports publics CFC",
"Agent de voyages avec brevet fédéral",
"Agent d’entretien de bateaux CFC",
"Agent d’entretien en assainissement AFP",
"Agent d’exploitation CFC",
"Agent en automatique avec brevet fédéral",
"Agent en information documentaire CFC",
"Agent en produits textiles AFP",
"Agent fiduciaire avec brevet fédéral",
"Agent professionnel de protection de personnes et de biens avec brevet fédéral",
"Agent professionnel de sécurité et de surveillance avec brevet fédéral",
"Agent relation client CFC",
"Agent technico-commercial avec brevet fédéral",
"Agent technique d'exploitation avec brevet fédéral",
"Agent technique des matières synthétiques CFC",
"Agriculteur CFC (Champ professionnel de l'agriculture)",
"agrocommerçant diplômé ES",
"Agropraticien AFP",
"agrotechnicien diplômé ES",
"Aide en informatique AFP",
"Aide en soins et accompagnement AFP",
"Aide en technique du bâtiment AFP",
"Aide-carreleur AFP",
"Aide-constructeur métallique AFP",
"Aide-maçon AFP",
"Aide-menuisier AFP",
"Aide-monteur frigoriste AFP",
"Aide-mouleur AFP",
"Aide-peintre AFP",
"Aide-plâtrier AFP",
"Aide-polybâtisseur AFP",
"ambulancier diplômé ES",
"Analyste financier et gestionnaire de fortunes diplômé",
"Arboriculteur CFC (Champ professionnel de l'agriculture)",
"Armurier",
"Armurier CFC",
"Art-thérapeute diplômé (DF)",
"Artisan du bois CFC",
"Artisan du cuir et du textile CFC",
"Artisan en conservation des monument historiques avec brevet fédéral",
"Assistant de bureau AFP",
"Assistant de direction avec brevet fédéral",
"Assistant de gestion en pharmacie avec brevet fédéral",
"Assistant dentaire CFC",
"Assistant du commerce de détail AFP",
"Assistant en audiovision avec brevet fédéral",
"Assistant en boucherie et charcuterie AFP",
"assistant en droit diplômé ES",
"Assistant en maintenance d'automobiles AFP",
"Assistant en médecine vétérinaire CFC",
"Assistant en médias imprimés AFP",
"Assistant en pharmacie CFC",
"Assistant en podologie CFC",
"Assistant en podologie CFC",
"Assistant en promotion de l’activité physique et de la santé CFC",
"Assistant en soins et santé communautaire CFC",
"Assistant en tourisme avec brevet fédéral",
"Assistant en traitement de surface AFP",
"Assistant médical CFC",
"Assistant socio-éducatif CFC",
"Assistant spécialisé en soins de longue durée et accompagnement avec brevet fédéral",
"Assistant vernisseur AFP",
"Assistant-constructeur de fondations AFP (champ professionnel Construction de voies de communication)",
"Assistant-constructeur de fondations AFP (champ professionnel Construction de voies de communication)",
"Assistant-constructeur de routes AFP (champ professionnel Construction de voies de communication)",
"Assistant-constructeur de routes AFP (champ professionnel Construction de voies de communication)",
"Assistant-constructeur de sols industriels et de chapes AFP "
"(champ professionnel Construction de voies de communication)",
"Assistant-constructeur de sols industriels et de chapes AFP "
"(champ professionnel Construction de voies de communication)",
"Assistant-constructeur de voies ferrées AFP (champ professionnel Construction de voies de communication)",
"Audioprothésiste avec brevet fédéral",
"Automaticien CFC",
"Aviculteur CFC (Champ professionnel de l'agriculture)",
"Bijoutier CFC",
"Bijoutier, maître",
"Bottier-orthopédiste",
"Bottier-orthopédiste CFC",
"Bottier-orthopédiste diplômé, maître",
"Boucher-charcutier CFC",
"Boucher-charcutier, maître",
"Boulanger-pâtissier-confiseur AFP",
"Boulanger-pâtissier-confiseur CFC",
"Cabin Crew Member avec brevet fédéral",
"Cadre diplomé des organisations de secours",
"Calorifugeur-tôlier",
"Calorifugeur-tôlier CFC",
"Calorifugeur-tôlier, maître",
"Carreleur CFC",
"Carreleur, maître",
"Carrossier, maître",
"Carrossier-peintre CFC",
"Carrossier-tôlier CFC",
"Caviste CFC (Champ professionnel de l'agriculture)",
"Céramiste",
"Céramiste CFC",
"Céramiste industriel CFC",
"Charpentier",
"Charpentier CFC",
"Charpentier, maître",
"Charron",
"Charron, maître",
"Chef boulanger-pâtissier-confiseur avec brevet fédéral",
"Chef carreleur avec brevet fédéral",
"Chef confiseur-pâtissier-glacier avec brevet fédéral",
"Chef cuisinier avec brevet fédéral",
"Chef d'atelier en machines agricoles avec brevet fédéral",
"Chef d'atelier en machines de chantier avec brevet fédéral",
"Chef d'atelier d'appareils à moteur avec brevet fédéral",
"Chef d'atelier et de montage en construction métallique avec brevet fédéral",
"Chef d'entreprise de l'hôtellerie et de la restauration diplomé",
"Chef d'entreprise diplomée en boulangerie-pâtisserie-confiserie",
"Chef d'établissement de l'hôtellerie et de la restauration avec brevet fédéral",
"Chef d'expédition diplômé",
"Chef d'exploitation agricole avec brevet fédéral",
"Chef d'exploitation arboricole avec brevet fédéral",
"Chef d'exploitation avicole avec brevet fédéral",
"Chef d'exploitation caviste avec brevet fédéral",
"Chef d'exploitation économie carnée avec brevet fédéral",
"Chef d'exploitation maraîchère avec brevet fédéral",
"Chef d'exploitation viticole avec brevet fédéral",
"Chef de commerce international diplômé",
"Chef de cuisine avec diplôme fédéral",
"Chef de la restauration avec diplôme fédéral",
"Chef de la restauration collective avec diplôme fédéral",
"Chef de logistique diplômé",
"Chef de marketing diplômé",
"Chef de production en menuiserie",
"Chef de production technique des matières synthétiques diplômé",
"Chef de projet en menuiserie",
"Chef de projet en montage solaire avec brevet fédéral",
"Chef de projet en technique du bâtiment avec brevet fédéral",
"Chef de projet et chef d'atelier en construction de tableaux électriques avec brevet fédéral",
"Chef de projet systèmes de sécurité avec brevet fédéral",
"Chef de projet verre avec brevet fédéral",
"Chef de projets constructeur sur métal avec diplôme fédéral",
"Chef de réception avec brevet fédéral",
"Chef de service après-vente diplômé",
"Chef de vente diplômé",
"Chef du secteur hôtelier-intendance avec diplôme fédéral",
"Chef monteur-frigoriste avec brevet fédéral",
"Chef poseur de revêtements de sols avec brevet fédéral",
"Chef-monteur en échafaudage avec brevet fédéral",
"Cidrier avec diplôme de maîtrise",
"Coiffeur AFP",
"Coiffeur avec brevet fédéral",
"Coiffeur avec diplôme fédéral",
"Coiffeur CFC",
"Coiffeur CFC",
"Coloriste avec brevet fédéral",
"Concepteur artisan avec brevet fédéral",
"Concepteur en multimédia",
"Concierge avec brevet fédéral",
"Conducteur d'engins forestier avec brevet fédéral",
"Conducteur de camion",
"Conducteur de véhicules légers AFP",
"Conducteur de véhicules lourds CFC",
"Confectionneur AFP",
"Conseil en brevets",
"Conseiller dans le domaine psychosocial avec diplôme fédéral",
"Conseiller de service à la clientèle dans la branche automobile avec brevet fédéral",
"Conseiller de vente automobile avec brevet fédéral",
"Conseiller de vente en horlogerie avec brevet fédéral",
"Conseiller en affections respiratoires et tuberculose avec brevet fédéral",
"Conseiller en aménagement intérieur avec brevet fédéral",
"Conseiller en couleurs et en styles de mode avec brevet fédéral",
"Conseiller en énergie et en efficacité énergétique avec diplôme fédéral",
"Conseiller en environnement avec brevet federal",
"Conseiller en milieu rural diplômé",
"Conseiller en organisation avec diplôme fédéral",
"Conseiller en relations publiques avec diplôme fédéral",
"Conseiller en revêtements de sols avec brevet fédéral",
"Conseiller en sécurité électrique avec brevet fédéral",
"Conseiller énergétique du bâtiment avec brevet fédéral",
"Conseiller financier avec brevet fédéral",
"Constructeur de ski (LU)",
"Constructeur d'appareils industriels",
"Constructeur d'éléments en béton préfabriqués CFC",
"Constructeur d'installation de ventilation CFC",
"Constructeur de bateaux",
"Constructeur de bateaux CFC",
"Constructeur de fondations CFC (champ professionnel Construction de voies de communication)",
"Constructeur de fondations CFC (champ professionnel Construction de voies de communication)",
"Constructeur de routes CFC (champ professionnel Construction de voies de communication)",
"Constructeur de routes CFC (champ professionnel Construction de voies de communication)",
"Constructeur de sols industriels et de chapes CFC "
"(champ professionnel Construction de voies de communication)",
"Constructeur de sols industriels et de chapes CFC "
"(champ professionnel Construction de voies de communication)",
"Constructeur de voies ferrées CFC (champ professionnel Construction de voies de communication)",
"Constructeur de voies ferrées CFC (champ professionnel Construction de voies de communication)",
"Constructeur d’appareils industriels CFC",
"Constructeur métallique CFC",
"Constructeur métallique, maître",
"Constructeur naval, maître",
"Contact Center Supervisor avec brevet fédéral",
"Contremaître charpentier avec brevet fédéral",
"Contremaître de construction de fondations avec brevet fédéral "
"(champ professionnel Construction de voies de communication)",
"Contremaître de construction de routes avec brevet fédéral "
"(champ professionnel Construction de voies de communication)",
"Contremaître de construction de sols industriels et de chapes avec brevet fédéral "
"(champ professionnel Construction de voies de communication)",
"Contremaître de sciage d'édifice avec brevet fédéral",
"Contremaître de voies ferrées avec brevet fédéral "
"(champ professionnel Construction de voies de communication)",
"Contremaître en chauffage avec brevet fédéral",
"Contremaître en entretien de routes avec brevet fédéral "
"(champ professionnel Construction de voies de communication)",
"Contremaître en ferblanterie avec brevet fédéral",
"Contremaître en ventilation avec brevet fédéral",
"Contremaître forestier avec brevet fédéral",
"Contremaître jardinier avec brevet fédéral",
"Contremaître maçon avec brevet fédéral",
"Contremaître paveur avec brevet fédéral (champ professionnel Construction de voies de communication)",
"Contremaître peintre avec brevet fédéral",
"Contremaître plâtrier-constructeur à sec avec brevet fédéral",
"Contremaître Polybat avec brevet fédéral",
"Contremaître ramoneur avec brevet fédéral",
"Contremaître sanitaire avec brevet fédéral",
"Contremaître vitrier avec brevet fédéral",
"Contrôleur de combustion avec brevet fédéral",
"contrôleur de la circulation aérienne diplomé ES",
"Contrôleur de matériaux de construction avec brevet fédéral",
"Coordinateur d'atelier automobile avec brevet fédéral",
"Coordinateur en médecine ambulatoire orientation clinique avec brevet fédéral",
"Coordinateur en médecine ambulatoire orientation gestion avec brevet fédéral",
"Cordonnier CFC",
"Cordonnier, maître",
"Correcteur avec brevet fédéral (BF)",
"Courtepointier CFC",
"Courtepointier diplômé",
"Courtier en immeubles avec brevet fédéral",
"Coutelier",
"Coutelier CFC",
"Coutelier, maître",
"Couturier avec brevet fédéral",
"Couturier diplômé",
"couturier d’intérieur AFP",
"Créateur de textiles intérieurs avec brevet fédéral",
"Créateur de tissu CFC",
"Créateur de vêtements",
"Créateur de vêtements CFC",
"Croupier avec brevet fédéral",
"Cuisinier CFC",
"Cuisinier en diététique CFC",
"Danseur interprète CFC",
"Déclarant de douane avec brevet fédéral",
"Décorateur d'intérieur avec brevet fédéral",
"Décorateur d'intérieurs",
"Décorateur d'intérieurs diplômé",
"Dermapigmentologue avec brevet fédéral",
"designer diplômé ES en arts visuels",
"designer diplômé ES en communication visuelle",
"designer diplômé ES en design de produit",
"Designer graphique diplomé",
"Dessinateur CFC (champ professionnel planification du territoireet de la construction)",
"Dessinateur en construction microtechnique CFC",
"Dessinateur-constructeur en génie civil diplômé",
"Dessinateur-constructeur en microtechnique",
"Dessinateur-constructeur industriel CFC",
"Dessinateur-constructeur sur métal CFC",
"Développeur immobilier avec brevet fédéral",
"Diagnosticien d'automobiles avec brevet fédéral",
"dipl. Abteilungsleiter Gesundheit NDS HF",
"dipl. Baubetriebsmanager NDS HF",
"dipl. Bauprojekt- und Immobilienmanager NDS HF",
"dipl. Berater in Veränderungsprozessen NDS HF",
"dipl. Betriebswirtschafter NDS HF",
"dipl. Business Analyst NDS HF",
"dipl. Business Banker NDS HF",
"dipl. Business Coach NDS HF",
"dipl. Business Engineer NDS HF",
"dipl. Controller NDS HF",
"dipl. Energieberater NDS HF",
"dipl. Erlebnispädagoge NDS HF",
"dipl. Experte in Leadership und Change Management NDS HF",
"dipl. Experte in strategischem Management NDS HF",
"dipl. Finanzchef NDS HF",
"dipl. Finanzexperte NDS HF",
"dipl. Geschäftsführer NDS HF",
"dipl. Hotelmanager NDS HF",
"dipl. HR-Experte NDS HF",
"dipl. Human Resource Manager NDS HF",
"dipl. in Leadership und Management NDS HF",
"dipl. in Unternehmens- und Mitarbeiterführung NDS HF",
"dipl. IT-Service-Manager NDS HF",
"dipl. Kommunikationstrainer NDS HF",
"dipl. Leiter Finanzen und Dienste NDS HF",
"dipl. Leiter interne Kommunikation und Management Support NDS",
"dipl. Logistikleiter NDS HF",
"dipl. logotherapeutischer Berater NDS HF",
"dipl. Marketing- und Verkaufsleiter NDS HF",
"dipl. Marketing- und Verkaufsmanager NDS HF",
"dipl. Marketingmanager NDS HF",
"dipl. Network Engineer NDS HF",
"dipl. Online-Marketingmanager NDS HF",
"dipl. Personalleiter NDS HF",
"dipl. Produktmanager Textil NDS HF",
"dipl. Projekt- und Prozessmanager NDS HF",
"dipl. Projektleiter NDS HF",
"dipl. Projektmanager NDS HF",
"dipl. Qualitäts- und Prozessmanager NDS HF",
"dipl. Qualitätsmanager NDS HF",
"dipl. Umfassende Leadership NDS HF",
"dipl. Umfassendes Controlling NDS HF",
"dipl. Unternehmensführer NDS HF",
"dipl. Unternehmensleiter NDS HF",
"Diplom in Energiemanagement NDS HF",
"Diplom in Unternehmensführung NDS HF",
"Directeur d'école d'une discipline sportive avec diplôme fédéral",
"Directeur d'institution de l'enfance diplômé EPD ES",
"Directeur d'institution sociale et médico-sociale diolomé",
"Directeur de studio de gymnastique avec brevet fédéral",
"Directeur des travaux du bâtiment diplômé",
"Directeur des travaux du génie civil diplômé",
"Dirigeant d'entreprise avec diplôme fédéral",
"Dirigeant de maintenance diplômé",
"Dirigeant diplomé en facility management",
"Doreur-Encadreur CFC",
"Droguiste CFC",
"Ebéniste CFC",
"Ebéniste, maître",
"Ecobiologiste de la construction avec brevet fédéral",
"Ecobiologiste de la construction diplômé",
"économiste bancaire diplomé ES",
"économiste d'assurance diplômé ES",
"économiste d'entreprise diplômé ES",
"Economiste d'entreprise PME avec diplôme fédéral",
"éducateur de l'enfance diplômé. ES",
"éducateur social diplomé ES",
"éducateur social diplômé ES",
"Electricien chef de projet avec brevet fédéral",
"Electricien de montage CFC",
"Electricien de montage CFC",
"Electricien de réseau",
"Electricien de réseau CFC",
"Electricien de réseau, maître, diplomé",
"Electronicien CFC",
"Electronicien en multimédia",
"Electronicien en multimédia avec brevet fédéral",
"Electronicien en multimédia CFC",
"Electroplaste avec brevet fédéral",
"Electroplaste CFC",
"Electroplaste diplômé",
"Employé de commerce",
"Employé de commerce CFC",
"Employé de remontées mécaniques AFP",
"Employé d’exploitation AFP",
"Employé en cuisine AFP",
"Employé en hôtellerie AFP",
"Employé en industrie laitière AFP",
"Employé en intendance AFP",
"Employé en intendance AFP",
"Employé en restauration AFP",
"Enquêteur de douane avec diplôme fédéral",
"Entraîneur de sport de performance avec brevet fédéral",
"Entraîneur diplômé de sport d'Elite",
"Entrepreneur de pompes funèbres avec brevet fédéral",
"Entrepreneur-construction diplômé",
"Esthéticien avec brevet fédéral",
"Esthéticien CFC",
"Esthéticien diplômé",
"Expert diplômé en assurance-maladie",
"Expert diplômé en assurances de pension",
"Expert diplômé en assurances sociales",
"Expert diplômé en conduite organisationelle",
"Expert diplômé en finance et en controlling",
"Expert diplômé en finance et investissements",
"Expert diplômé en tourisme",
"Expert domaine opératoire avec diplôme fédéral",
"Expert du domaine des privations de liberté avec diplôme fédéral",
"Expert du domaine éguin avec diplôme fédéral",
"Expert en analyses biomédicales et gestion de laboratoire avec diplôme fédéral",
"Expert en cytodiagnostic avec diplôme fédéral",
"Expert en douane diplomé ES",
"Expert en estimations immobilières avec brevet fédéral",
"Expert en finance de PME avec diplôme fédéral",
"Expert en gestion hospitalière avec diplôme fédéral",
"Expert en management de l'organisation avec diplôme fédéral",
"Expert en opérations des marchés financiers avec diplôme fédéral",
"Expert en prévention des infections associées aux soins avec diplôme fédéral",
"Expert en production avec brevet fédéral",
"Expert en soins d’anesthésie diplômé EPD ES",
"Expert en soins d’urgence diplômé EPD ES",
"Expert en soins intensifs diplômé EPD ES",
"Expert fiduciaire diplômé",
"Expert fiscal diplômé",
"Expert protection incendie avec diplôme fédéral",
"Expert-comptable diplômé",
"Expert-soudeur avec brevet fédéral",
"Exploitant de station d'épuration avec brevet fédéral",
"Facteur d'instruments de musique CFC",
"Fashion spécialiste avec brevet fédéral",
"Fashiondesigner avec diplôme fédéral",
"Ferblantier CFC",
"Fleuriste AFP",
"Fleuriste avec brevet fédéral",
"Fleuriste CFC",
"Fleuriste, maître",
"Flexographe CFC",
"Fontainier avec brevet fédéral",
"forestier diplômé ES",
"Forestier-bûcheron CFC",
"Forgeron, maître",
"Formateur avec brevet fédéral",
"Fourreur, maître",
"Gainier",
"Garde-faune avec brevet fédéral",
"Garde-frontière avec brevet fédéral",
"Garde-frontière avec diplôme fédéral",
"Garde-pêche avec brevet fédéral",
"Gardien d'immeuble diplômé",
"Gardien de cheval AFP",
"Gardien de chevaux AFP",
"Gardien d’animaux CFC",
"Garnisseur de meubles CFC",
"Garnisseur de meubles industriels avec brevet fédéral",
"Garnisseur de meubles industriels, maître",
"Garnisseur de meubles spécialisé avec brevet fédéral",
"Garnisseur en carrosserie",
"Géomaticien CFC",
"Gérant d'immeubles avec brevet fédéral",
"Gérant de caisse de pension diplômé",
"Gestionnaire d'entreprise diplômé de la branche automobile",
"Gestionnaire d'entreprise diplômé secteur automobile",
"Gestionnaire du commerce de détail CFC",
"Gestionnaire en intendance CFC",
"Gestionnaire en intendance CFC",
"Gestionnaire en tourisme de santé et d'activité avec diplôme fédéral",
"gestionnaire en tourisme diplômé ES",
"Gouvernant de maison avec brevet fédéral",
"Graphiste CFC",
"Graveur",
"Graveur CFC",
"Guide de montagne avec brevet fédéral",
"Guide touristique avec brevet fédéral",
"Horloger CFC",
"Horloger dans le domaine professionnel de l'industrie",
"Horloger dans le domaine professionnel du rhabillage",
"Horloger de production CFC",
"Horloger praticien",
"Horloger, maître",
"Horticulteur",
"Horticulteur AFP",
"Horticulteur CFC",
"Hôtelier-Restaurateur diplômé ES",
"hygiéniste dentaire diplômé ES",
"ICT-Manager diplomé",
"infirmier diplômé ES",
"infirmier diplômé ES",
"Informaticien CFC",
"Informaticien CFC",
"Informaticien de gestion avec brevet fédéral",
"informaticien de gestion diplômé ES",
"Informaticien en développement d'application TIC avec brevet fédéral",
"Informaticien en technique des systèmes et réseaux TIC avec brevet fédéral",
"Installateur en chauffage CFC",
"Installateur sanitaire CFC",
"Installateur-électricien CFC",
"Installateur-électricien CFC",
"Installateur-électricien diplômé",
"Instructeur de chiens guides d'aveugles avec diplôme fédéral",
"Instructeur de fitness avec brevet fédéral",
"intendant du secteur hôtelier d'établissement diplômé ES",
"Interactive Media Designer CFC",
"Key account manager diplômé",
"Laborantin CFC",
"Laborantin en physique",
"Laborantin en physique CFC",
"Laboriste",
"Libraire avec brevet fédéral",
"Libraire CFC",
"Logisticien AFP",
"Logisticien AFP",
"Logisticien avec brevet fédéral",
"Logisticien avec brevet fédéral",
"Logisticien avec diplôme fédéral",
"Logisticien CFC",
"Logisticien CFC",
"Luthier",
"Luthier, maître",
"Maçon CFC",
"Maître agriculteur",
"Maître arboriculteur",
"Maître aviculteur",
"Maître caviste",
"Maître chauffagiste avec diplôme fédéral",
"Maître dans l'industrie",
"Maître du cuir et du textile",
"Maître ferblantier avec diplôme fédéral",
"Maître jardinier",
"Maître maraîcher",
"Maître peintre",
"Maître Polybat",
"Maître poseur de revêtements de sols",
"Maître Ramoneur",
"Maître sanitaire avec diplôme fédéral",
"Maître scultpeur sur bois",
"maître socioprofessionnel diplômé ES",
"Maître tableaux électriques et automation",
"Maître textile avec diplôme fédéral",
"Maître viticulteur",
"Maître vitrier",
"Manager de médias diplômé",
"Manager de remontées mécanique avec diplôme fédéral",
"Manager en commerce de détail avec diplôme fédéral",
"Manager en publication avec diplôme fédéral (DF)",
"Manager en systèmes de transports publics diplômé",
"Maquettiste d'architecture",
"Maquilleur professionnel avec brevet fédéral",
"Maraîcher CFC (Champ professionnel de l'agriculture)",
"Marbrier CFC (Champ professionnel travail de la pierre)",
"Marbrier du bâtiment CFC (Champ professionnel travail de la pierre)",
"Marbrier, maître",
"Maréchal-ferrant CFC",
"Maréchal-forgeron, maître",
"marketing manager diplômé ES",
"Masseur médical avec brevet fédéral",
"Matelot de la navigation intérieure CFC",
"Mécanicien d'appareils à moteur CFC",
"Mécanicien d'appareils à moteur, maître",
"Mécanicien de production CFC",
"Mécanicien deux-roues",
"Mécanicien en bicyclettes et motocyclettes, maître",
"Mécanicien en bicyclettes, maître",
"Mécanicien en cycles CFC",
"Mécanicien en machines agricoles CFC",
"Mécanicien en machines agricoles, maître",
"Mécanicien en machines de chantier CFC",
"Mécanicien en machines de chantier, maître",
"Mécanicien en maintenance d'automobiles CFC",
"Mécanicien en motocycles",
"Mécanicien en motocycles CFC",
"Mécanicien en motocycles de petite cylindrée et cycles CFC",
"Mécatronicien d'automobiles CFC",
"Mécatronicien de remontées mécaniques CFC",
"Médiamaticien",
"Médiamaticien avec brevet fédéral",
"Médiamaticien CFC",
"Mentor d'entreprise avec brevet fédéral",
"Menuisier, maître",
"Menuisier",
"Meunier",
"Meunier CFC",
"Micromécanicien",
"Micromécanicien CFC",
"Modeleur-céramiste",
"Moniteur de conduite avec brevet fédéral",
"Monteur automaticien CFC",
"Monteur de faux plafonds avec brevet fédéral",
"Monteur de réseaux eau et gaz avec brevet fédéral",
"Monteur frigoriste CFC",
"Monteur-frigoriste",
"Mouleur CFC",
"Mouleur de fonderie",
"Mouleur de fonderie CFC",
"Naturopathe avec diplôme fédéral",
"Négociant de vins diplômé",
"Nettoyeur chimique diplômé",
"Nettoyeur de bâtiments diplomé",
"Nettoyeur de textiles CFC",
"Officier de l'état civil avec brevet fédéral",
"Opérateur d'installations de centrale nucléaire avec brevet fédéral",
"Opérateur de machines automatisées CFC",
"Opérateur de médias imprimés CFC",
"Opérateur de sciage d’édifice CFC",
"Opérateur en horlogerie AFP",
"Opérateur en horlogerie AFP",
"Opticien CFC",
"Opticien en instruments",
"Opticien en instruments de précision CFC",
"Orfèvre en argenterie, maître",
"Orthopédiste CFC",
"Orthopédiste diplômé",
"orthoptiste diplômé ES",
"Packaging Manager avec diplôme fédéral",
"Papetier",
"Papetier CFC",
"Pâtissier-confiseur diplômé",
"Paveur CFC (champ professionnel Construction de voies de communication)",
"Paveur CFC (champ professionnel Construction de voies de communication)",
"Paveur, maître",
"Pêcheur professionnel avec brevet fédéral",
"Peintre CFC",
"Peintre en automobiles avec brevet fédéral",
"Peintre en bâtiments",
"Peintre en décors de théâtre",
"Peintre en décors de théâtre CFC",
"Peintre verrier CFC",
"Photodesigner diplomé",
"Photographe",
"Photographe CFC",
"Photographe de laboratoire",
"Physiothérapeut pour animaux avec diplôme fédéral",
"pilote diplômé ES",
"Planificateur éclairagiste avec brevet fédéral",
"Planificateur en communication avec brevet fédéral",
"Planificateur-électricien CFC",
"Planificateur-électricien CFC",
"Planificateur-électricien diplômé",
"Plâtrier",
"Plâtrier constructeur à sec CFC",
"Plâtrier, maître",
"Plâtrier-peintre",
"podologue diplômé ES",
"Poêlier-fumiste CFC",
"Poêlier-fumiste, maître",
"Poêlier-fumiste, maître",
"Policier avec brevet fédéral",
"Policier avec diplôme fédéral",
"Polisseur AFP",
"Polybâtisseur CFC",
"Polydesigner 3D CFC",
"Polygraphe CFC",
"Polygraphe CFC",
"Polymécanicien CFC",
"Poseur de pierres AFP (champ professionnel Construction de voies de communication)",
"Poseur de pierres AFP (champ professionnel Construction de voies de communication)",
"Poseur de revêtements de sols",
"Poseur de sol – parquet CFC",
"Praticien des matières synthétiques AFP",
"Praticien en denrées alimentaires AFP",
"Praticien en mécanique AFP",
"Praticien en pneumatiques AFP",
"Praticien forestier AFP",
"Praticien sur bois AFP",
"Préparateur Humains avec brevet fédéral",
"Professeur d'escalade avec brevet fédéral",
"Professeur de canoë-kayak avec brevet fédéral",
"Professeur de danse de couple avec brevet fédéral",
"Professeur de golf avec brevet fédéral",
"Professeur de ju-jitsu avec brevet fédéral",
"Professeur de judo avec brevet fédéral",
"Professeur de karaté avec brevet fédéral",
"Professeur de patinage avec brevet fédéral",
"Professeur de planche à voile avec brevet fédéral",
"Professeur de sport de neige avec brevet fédéral",
"Professeur de sports aquatiques avec brevet fédéral",
"Professeur de tennis avec brevet fédéral",
"Professeur de voile avec brevet fédéral",
"Professeur en navigation de bateau moteur avec brevet fédéral",
"Professionnel du cheval CFC",
"Professionnel du cheval CFC",
"Projeteur constructeur sur métal avec brevet fédéral",
"Projeteur en sanitaire avec diplôme fédéral",
"Projeteur en technique du bâtiment",
"Projeteur en technique du bâtiment chauffage CFC "
"(champ professionnel Planification en technique du bâtiment)",
"Projeteur en technique du bâtiment sanitaire CFC "
"(champ professionnel Planification en technique du bâtiment)",
"Projeteur en technique du bâtiment ventilation CFC "
"(champ professionnel Planification en technique du bâtiment)",
"Projeteur frigoriste CFC",
"Ramoneur CFC",
"Réalisateur publicaire avec diplôme fédéral",
"Réalisateur publicitaire CFC",
"Réalisateur publicitaire CFC",
"Recycleur CFC",
"Rédacteur publicitaire avec brevet fédéral",
"Rédacteur technique avec brevet fédéral",
"Réparateur de chaussures AFP",
"Repousseur-emboutisseur",
"Responsable Achats avec diplôme fédéral",
"Responsable d'équipe dans des institutions sociales et médico-sociales avec brevet fédéral",
"Responsable de formation avec diplôme fédéral",
"Responsable de la communication diplomé",
"Responsable de la restauration avec brevet fédéral",
"Responsable de ménage agricole avec brevet fédéral",
"Responsable de ménage agricole diplomé",
"Responsable diplômé en documentation technique",
"Responsable du secteur hôtelier - intendance avec brevet fédéral",
"Responsable en transport et logistique diplômé",
"Responsable diplômé en ressources humaines",
"Restaurateur de livres diplômé",
"Sapeur-pompier professionnel avec brevet fédéral",
"Scieur de l'industrie du bois CFC",
"Screen Communicator avec brevet fédéral",
"Sculpteur sur bois CFC",
"Sculpteur sur pierre CFC (Champ professionnel travail de la pierre)",
"Sculpteur sur pierre, maître",
"Secouriste routier avec brevet féderal",
"Sellier",
"Sérigraphe, maître",
"Serrurier sur véhicules avec brevet fédéral",
"Serrurier sur véhicules CFC",
"Sertisseur en joaillerie, maître",
"Sommelier avec brevet fédéral",
"Specialista in soluzioni mobile aziendali dipl. SPD SSS",
"Spécialiste assainissement d'ouvrage avec brevet federal",
"Spécialiste câble-crue avec brevet fédéral",
"Spécialiste d'achat",
"Spécialiste de commerce international avec brevet fédéral",
"Spécialiste de douane avec brevet fédéral",
"Spécialiste de la conduite d'équipe avec brevet fédéral",
"Spécialiste de la gestion et du développement des organisations syndicales avec brevet fédéral",
"Spécialiste de la migration avec brevet fédéral",
"Spécialiste de la nature et de l'environnement avec brevet fédéral",
"Spécialiste de la prévoyance en faveur du personnel avec brevet fédéral",
"Spécialiste de la sécurité dans les institutions de santé et du social avec brevet fédéral",
"Spécialiste de logistique avec brevet fédéral",
"Spécialiste de médias avec brevet fédéral",
"Spécialiste de réseau avec brevet fédéral",
"Spécialiste de traitement des matières premières avec brevet fédéral",
"Spécialiste de vente avec brevet federal",
"Spécialiste des branches de la boulangerie-pâtisserie-confiserie avec brevet fédéral",
"Spécialiste des installations de transport à câbles avec brevet fédéral",
"spécialiste des services de la navigation aérienne diplomé ES",
"Spécialiste du commerce de détail avec brevet fédéral",
"Spécialiste du commerce fruits et légumes avec brevet fédéral",
"Spécialiste du domain équin avec brevet fédéral",
"Spécialiste du service de pistes et de sauvetage avec brevet fédéral",
"spécialiste en activation diplômé ES",
"Spécialiste en administration publique avec brevet fédéral",
"Spécialiste en agriculture biodynamique avec brevet fédéral",
"Spécialiste en assurance avec brevet fédéral",
"Spécialiste en assurance-maladie avec brevet fédéral",
"Spécialiste en assurances sociales avec brevet fédéral",
"Spécialiste en bois avec brevet fédéral",
"Spécialiste en codage médical avec brevet fédéral",
"Spécialiste en commerce de textile avec brevet fédéral",
"Spécialiste en diagnostic neurophysiologique avec brevet fédéral",
"Spécialiste en finance et comptabilité avec brevet fédéral",
"Spécialiste en finition des médias imprimés avec brevet fédéral (BF)",
"Spécialiste en formation professionnelle avec brevet fédéral",
"Spécialiste en gestion de PME avec brevet fédéral",
"Spécialiste en gestion hospitalière avec brevet fédéral",
"Spécialiste en hôtellerie CFC",
"Spécialiste en impression et emballage avec brevet fédéral (BF)",
"Spécialiste en interprétariat communautaire et médiation interculturelle avec brevet fédéral",
"Spécialiste en marketing avec brevet fédéral",
"Spécialiste en matière de poursuite pour dettes et de la faillite avec brevet fédéral",
"Spécialiste en nettoyage de bâtiments avec brevet fédéral",
"Spécialiste en organisation d'entreprise avec brevet fédéral",
"Spécialiste en photographie avec brevet fédéral",
"Spécialiste en photographie CFC",
"Spécialiste en photographie diplômé",
"Spécialiste en pneumatiques avec brevet fédéral",
"Spécialiste en protection incendie avec brevet fédéral",
"Spécialiste en réadaptation de personnes malvoyantes et aveugles avec diplôme fédéral",
"Spécialiste en relations publiques avec brevet fédéral",
"Spécialiste en ressources humaines avec brevet fédéral",
"Spécialiste en restauration CFC",
"Spécialiste en restauration de système CFC",
"Spécialiste en soin aux arbres avec brevet fédéral",
"Spécialiste en systèmes thermiques avec brevet fédéral",
"Spécialiste en technologies de réadaptation avec brevet fédéral",
"Spécialiste en thermique avec brevet fédéral",
"Spécialiste en transports publics avec brevet fédéral",
"Spécialiste en vernissage industriel avec brevet fédéral",
"Spécialiste établissements de bains avec brevet fédéral",
"Spécialiste minage avec brevet fédéral",
"Spécialiste pharmaceutique avec brevet fédéral",
"Spécialiste pour installations de traitement des déchets avec brevet fédéral",
"Spécialiste pour la sécurité des citernes avec brevet fédéral",
"Styliste d'ongles avec brevet fédéral",
"Superviseur-Coach avec diplôme fédéral",
"Supply Chain Manager diplomé",
"Tailleur de pierre CFC (Champ professionnel travail de la pierre)",
"Tailleur, maître",
"Technicien ambulancier avec brevet fédéral",
"Technicien de fouilles archéologiques avec brevet fédéral",
"Technicien de laboratoire en sciences naturelles avec diplôme fédéral",
"technicien diplômé ES agroalimentaire",
"technicien diplômé ES en informatique",
"technicien diplômé ES en microtechnique",
"technicien diplômé ES en télécommunications",
"technicien diplômé ES génie électrique",
"technicien diplômé ES génie mécanique",
"technicien diplômé ES planification des travaux",
"technicien diplomé ES processus d'entreprise",
"technicien diplômé ES systèmes industriels",
"technicien diplômé ES technique des bâtiments",
"technicien diplômé ES technique du bois",
"Technicien du film avec brevet fédéral",
"Technicien du son avec brevet fédéral",
"Technicien du spectacle avec brevet fédéral",
"technicien en analyses biomédicales diplômé ES",
"Technicien en géomatique avec brevet fédéral",
"technicien en radiologie médicale diplômé ES",
"technicien en salle d’opération diplômé ES",
"Technicien sur aéronefs avec brevet fédéral",
"Technicien vitivinicole dipl. ES",
"Technicien-dentiste CFC",
"Technicien-dentiste, maître",
"Techniscéniste CFC",
"Techno-polygraphe avec brevet fédéral (BF)",
"Technologue de fonderie",
"Technologue de fonderie CFC",
"Technologue du lait CFC",
"Technologue en assainissement CFC",
"Technologue en chimie diplômé",
"Technologue en denrées alimentaires",
"Technologue en denrées alimentaires avec brevet fédéral",
"Technologue en denrées alimentaires avec diplôme fédéral",
"Technologue en denrées alimentaires CFC",
"Technologue en emballage CFC",
"Technologue en impression CFC",
"Technologue en industrie laitière",
"Technologue en industrie laitière avec brevet fédéral",
"Technologue en industrie laitière diplomé",
"Technologue en production chimique et pharmaceutique CFC",
"Technologue en production chimique et pharmaceutique CFC",
"Technologue en textile CFC",
"Télématicien CFC",
"Télématicien CFC",
"Télématicien chef de projet avec brevet fédéral",
"Télématicien diplômé",
"Termineur en habillage horloger CFC",
"Thérapeute Complémentaire avec diplôme fédéral",
"Thermiste avec brevet fédéral",
"Tôlier en carrosserie avec brevet fédéral",
"Tonnelier CFC",
"Tourneur, maître",
"Transitaire avec brevet fédéral",
"Typographiste pour la communication visuelle avec brevet fédéral",
"Vannier créateur CFC",
"Vérificateur des poids et mesures diplômé",
"Vernisseur industriel CFC",
"Vernisseur industriel CFC",
"Viticulteur CFC (Champ professionnel de l'agriculture)",
"Vitrier",
"Vitrier CFC",
"Web Project Manager diplômé",
"Zingueur avec brevet fédéral",
"Zingueur diplômé",
]
| Provider |
python | zarr-developers__zarr-python | src/zarr/testing/buffer.py | {
"start": 440,
"end": 544
} | class ____(np.ndarray):
"""An example of a ndarray-like class"""
__test__ = False
| TestNDArrayLike |
python | mlflow__mlflow | mlflow/server/graphql/autogenerated_graphql_schema.py | {
"start": 3741,
"end": 3876
} | class ____(graphene.ObjectType):
path = graphene.String()
is_dir = graphene.Boolean()
file_size = LongString()
| MlflowFileInfo |
python | walkccc__LeetCode | solutions/3281. Maximize Score of Numbers in Ranges/3281.py | {
"start": 0,
"end": 498
} | class ____:
def maxPossibleScore(self, start: list[int], d: int) -> int:
def isPossible(m: int) -> bool:
lastPick = start[0]
for i in range(1, len(start)):
if lastPick + m > start[i] + d:
return False
lastPick = max(lastPick + m, start[i])
return True
start.sort()
maxScore = (start[-1] + d) - start[0] + 1
l = bisect.bisect_left(range(maxScore), True,
key=lambda m: not isPossible(m))
return l - 1
| Solution |
python | davidhalter__jedi | jedi/inference/value/module.py | {
"start": 1296,
"end": 2011
} | class ____:
@inference_state_method_cache()
def sub_modules_dict(self):
"""
Lists modules in the directory of this module (if this module is a
package).
"""
names = {}
if self.is_package():
mods = self.inference_state.compiled_subprocess.iter_module_names(
self.py__path__()
)
for name in mods:
# It's obviously a relative import to the current module.
names[name] = SubModuleName(self.as_context(), name)
# In the case of an import like `from x.` we don't need to
# add all the variables, this is only about submodules.
return names
| SubModuleDictMixin |
python | pytorch__pytorch | torch/_inductor/codegen/cpp_grouped_gemm_template.py | {
"start": 6159,
"end": 20910
} | class ____(CppGemmTemplate):
def __init__(
self,
input_nodes: list[ir.IRNode],
layout: ir.Layout,
num_threads: int,
register_blocking: GemmBlocking,
beta: int = 1,
alpha: int = 1,
has_bias: bool = False,
epilogue_creator: Optional[Callable[[ir.Buffer], ir.Pointwise]] = None,
act_mapping: Optional[dict[int, ir.IRNode]] = None,
gemm_grouped_num: int = 1,
) -> None:
"""
Template for Group of GEMMs:
* Each GEMM has the same dimensions (m, n, k) and the same leading dimensions (lda, ldb, ldc)
for their A, B, and C matrices.
* Each GEMM has distinct or shared activations, has distinct weight, has unique bias or no bias, has distinct epilogues.
* In the current implementation, the outputs of all GEMMs are accumulated using pointwise epilogues.
This behavior can be extended in the future if needed.
"""
super().__init__(
input_nodes,
layout,
num_threads,
register_blocking,
beta,
alpha,
has_bias,
epilogue_creator,
)
self.act_mapping = act_mapping
self.gemm_grouped_num = gemm_grouped_num
# pyrefly: ignore [bad-override]
self.output_node: list[ir.Buffer] = [
ir.Buffer(name="buf_out" + str(idx), layout=layout)
for idx in range(gemm_grouped_num)
]
@classmethod
# pyrefly: ignore [bad-override]
def add_choices(
cls,
choices: list[ChoiceCaller],
layout: ir.Layout,
input_nodes: list[ir.IRNode],
beta: int = 1,
alpha: int = 1,
has_bias: tuple[bool, ...] = (False, False),
trans_w: bool = False,
input_indices: Optional[list[int]] = None,
epilogue_creator: Optional[Callable[[ir.Buffer], ir.Pointwise]] = None,
act_mapping: Optional[dict[int, ir.IRNode]] = None, # gemm idx to its act buf
) -> DataProcessorTemplateWrapper:
# Input nodes order: x, optional[x1], ... w0, w1, ... optional[b0], optional[b1], ...
gemm_grouped_num = len(has_bias)
assert act_mapping
act_deduplicated = get_deduplicated_act(act_mapping)
wgt_start_idx = len(act_deduplicated)
bias_start_idx = wgt_start_idx + gemm_grouped_num
input_indices = list(range(len(input_nodes)))
_T = TypeVar("_T", ir.IRNode, torch.Tensor)
_U = TypeVar("_U", ir.Layout, torch.Tensor)
def reorder_and_filter(
inputs: list[_T],
layout_or_out: _U,
) -> tuple[list[_T], _U]:
assert input_indices is not None, "input_indices must be set"
return [inputs[idx] for idx in input_indices], layout_or_out
new_inputs, new_layout = reorder_and_filter(input_nodes, layout)
def maybe_to_dense(
inputs: list[_T],
layout_or_out: _U,
) -> tuple[list[_T], _U]:
new_inputs = list(inputs)
for idx in range(wgt_start_idx, wgt_start_idx + gemm_grouped_num):
if isinstance(inputs[idx], torch.Tensor):
W = inputs[idx]
assert isinstance(W, torch.Tensor), "W must be a torch.Tensor"
# pyrefly: ignore [unsupported-operation]
new_inputs[idx] = W.to_dense() if W.is_mkldnn else W
return new_inputs, layout_or_out
def normalize_shapes(
inputs: list[_T],
layout_or_out: _U,
) -> tuple[list[_T], _U]:
new_inputs: list[_T] = list(inputs)
if not trans_w:
return new_inputs, layout_or_out
X = new_inputs[0]
for wgt_idx in range(wgt_start_idx, wgt_start_idx + gemm_grouped_num):
new_input = new_inputs[wgt_idx]
new_inputs[wgt_idx] = transpose_w(new_input, trans_w)
for bias_idx in range(bias_start_idx, len(new_inputs)):
# pyrefly: ignore [bad-argument-type]
new_bias = expand_bias(new_inputs[bias_idx], X)
assert new_bias is not None
# pyrefly: ignore [unsupported-operation]
new_inputs[bias_idx] = new_bias
return new_inputs, layout_or_out
num_threads = parallel_num_threads()
new_inputs, _ = normalize_shapes(*maybe_to_dense(new_inputs, new_layout))
m, n, k, *_ = mm_args(new_inputs[0], new_inputs[wgt_start_idx])
output_dtype, compute_dtype = get_gemm_template_output_and_compute_dtype(
new_inputs[0].get_dtype()
)
micro_gemm = create_micro_gemm(
"micro_gemm",
m,
n,
k,
input_dtype=new_inputs[0].get_dtype(),
input2_dtype=new_inputs[wgt_start_idx].get_dtype(),
output_dtype=output_dtype,
compute_dtype=compute_dtype,
alpha=alpha,
num_threads=num_threads,
)
assert micro_gemm is not None
_, block_n, _ = micro_gemm.register_blocking
new_size, padded_n = cls.get_padded_size(
n, block_n, k, should_block_weight=True
)
padding = padded_n - n
def pack_weight(
inputs: list[_T],
layout_or_out: _U,
) -> tuple[list[_T], _U]:
new_W_list = []
new_inputs = list(inputs)
W_list = new_inputs[wgt_start_idx : wgt_start_idx + gemm_grouped_num]
for W in W_list:
blocked_w = cls.block_weight(W, new_size, padding)
new_W_list.append(cls.pack_vnni_weight(blocked_w, micro_gemm, new_size))
new_inputs[wgt_start_idx : wgt_start_idx + gemm_grouped_num] = new_W_list
return new_inputs, layout_or_out
def preprocessor(
inputs: list[_T],
layout: _U,
) -> tuple[list[_T], _U]:
return pack_weight(
*normalize_shapes(*maybe_to_dense(*reorder_and_filter(inputs, layout)))
)
def postprocessor(output: _T) -> _T:
if isinstance(output, ir.TensorBox):
template_buffer = ir.InputsKernel.unwrap_storage_for_input(output)
assert isinstance(template_buffer, ir.CppTemplateBuffer)
new_input_nodes, _ = reorder_and_filter(input_nodes, layout)
W_nodes = new_input_nodes[
wgt_start_idx : wgt_start_idx + gemm_grouped_num
]
W_tensor = []
for W_node in W_nodes:
assert W_node.get_name() in V.graph.constants
# pyrefly: ignore [bad-argument-type]
W_tensor.append(V.graph.constants[W_node.get_name()])
new_input_nodes[wgt_start_idx : wgt_start_idx + gemm_grouped_num] = (
W_tensor # type: ignore[assignment]
)
new_input_nodes, _ = pack_weight(
*normalize_shapes(*maybe_to_dense(new_input_nodes, layout))
)
# Prune unused tensors
prune_tensors(input_nodes, new_input_nodes)
for idx in range(wgt_start_idx, wgt_start_idx + gemm_grouped_num):
W_packed = new_input_nodes[idx]
assert isinstance(W_packed, torch.Tensor)
W_packed_constant = V.graph.add_tensor_constant(W_packed)
template_buffer.inputs[idx] = (
ir.InputsKernel.unwrap_storage_for_input(W_packed_constant)
)
# pyrefly: ignore [bad-return]
return output
template = DataProcessorTemplateWrapper(
CppGroupedGemmTemplate,
preprocessor,
postprocessor,
input_nodes=input_nodes,
layout=layout,
num_threads=num_threads,
register_blocking=micro_gemm.register_blocking,
beta=beta,
alpha=alpha,
has_bias=has_bias,
epilogue_creator=epilogue_creator,
act_mapping=act_mapping,
gemm_grouped_num=gemm_grouped_num,
)
template.maybe_append_choice(choices)
return template
def render( # type: ignore[override,return,no-untyped-def]
self,
kernel: CppTemplateKernel,
template_buffer_node: Optional[ir.CppTemplateBuffer] = None,
flag_template_buffer_has_other_users: Optional[bool] = None,
epilogue_nodes: Optional[list[ir.IRNode]] = None,
**kwargs,
) -> str:
assert self.act_mapping
act_deduplicated = get_deduplicated_act(self.act_mapping)
wgt_start_idx = len(act_deduplicated)
bias_start_idx = wgt_start_idx + self.gemm_grouped_num
X_list = list(self.act_mapping.values())
W_list = self.input_nodes[wgt_start_idx : wgt_start_idx + self.gemm_grouped_num]
inp_list = []
cur_idx = bias_start_idx
for inp_idx in range(self.gemm_grouped_num):
inp = None
# pyrefly: ignore [index-error]
if self.has_bias[inp_idx]:
inp = self.input_nodes[cur_idx]
cur_idx += 1
inp_list.append(inp)
Y_list = self.output_node
multi_output_buffers = None
if template_buffer_node is not None:
W_list = template_buffer_node.inputs[
wgt_start_idx : wgt_start_idx + self.gemm_grouped_num
]
assert isinstance(template_buffer_node.outputs, list)
Y_list = template_buffer_node.outputs
counters["inductor"]["cpp_grouped_gemm_template"] += 1
multi_output_buffers = template_buffer_node.outputs
template_buffer = Y_list[0]
fake_buffers: list[ir.Buffer] = []
Y_2d_list = Y_list
output_dtype, compute_dtype = get_gemm_template_output_and_compute_dtype(
X_list[0].get_dtype()
)
micro_gemm = create_micro_gemm(
f"{kernel.kernel_name}_micro_gemm",
self.m,
self.n,
self.k,
input_dtype=X_list[0].get_dtype(),
# pyrefly: ignore [missing-attribute]
input2_dtype=W_list[0].get_dtype(),
output_dtype=output_dtype,
compute_dtype=compute_dtype,
alpha=self.alpha,
num_threads=self.num_threads,
)
assert micro_gemm is not None
assert self.register_blocking == micro_gemm.register_blocking
self.log_blockings()
if isinstance(micro_gemm, CppMicroGemmAMX):
counters["inductor"]["cpp_micro_gemm_amx_counter"] += 1
L1_cache_size = torch._C._cpu._L1d_cache_size() # per core cache size in Bytes
assert L1_cache_size > 0, f"Expect L1_cache_size > 0 but got {L1_cache_size}"
L2_cache_size = torch._C._cpu._L2_cache_size() # per core cache size in Bytes
assert L2_cache_size > 0, f"Expect L2_cache_size > 0 but got {L2_cache_size}"
epilogues: list[ir.IRNode] = []
reindexers: list[Optional[Callable[[list[Any]], list[Any]]]] = []
gemm_output_buffers: list[ir.Buffer] = []
for out_buf_idx in range(self.gemm_grouped_num):
gemm_output_name = f"{template_buffer.get_name()}_GemmOut" + str(
out_buf_idx
)
gemm_output_buffers.append(
ir.Buffer(name=gemm_output_name, layout=template_buffer.layout)
)
assert not self.epilogue_creator, (
"epilogue_creator is not supported yet in Grouped GEMM Template"
)
kernel_args: dict[str, Optional[ir.IRNode]] = {}
for x_idx in range(wgt_start_idx):
kernel_args["X" + str(x_idx)] = act_deduplicated[x_idx]
for w_idx in range(self.gemm_grouped_num):
# pyrefly: ignore [unsupported-operation]
kernel_args["W" + str(w_idx)] = W_list[w_idx]
for inp_idx in range(self.gemm_grouped_num):
kernel_args["inp" + str(inp_idx)] = inp_list[inp_idx]
def _bias_add_epilogue(buf: ir.IRNode, inp: ir.IRNode) -> ir.Pointwise:
return create_epilogue_with_attr(
buf, "bias_add", other=inp, beta=self.beta, dtype=self.layout.dtype
)
for gemm_idx, inp in enumerate(inp_list):
if inp:
buffer_name = Y_list[gemm_idx].get_name()
epilogues.append(
ir.ComputedBuffer(
name=buffer_name,
layout=template_buffer.layout,
data=_bias_add_epilogue(gemm_output_buffers[gemm_idx], inp),
)
)
reindexers.append(None)
if epilogue_nodes:
epilogues.extend(epilogue_nodes)
for epilogue_node in epilogue_nodes:
Y = cast(ir.Buffer, epilogue_node)
_, reindexers = gen_2d_view_of_epilogue_buf(
Y,
template_buffer,
[
epilogue_node,
],
reindexers,
default_reindexers=[
None,
],
)
options = dict(
N=self.n,
K=self.k,
PADDED_N=self.padded_n,
aliases={},
beta=self.beta,
alpha=self.alpha,
num_threads=self.num_threads,
micro_gemm=micro_gemm,
is_dynamic_M=self.is_dynamic_M,
template=self,
kernel=kernel,
export_declaration=get_export_declaration(),
acc_buf_dtype=torch.float,
DTYPE_TO_CPP=DTYPE_TO_CPP,
L1_cache_size=L1_cache_size,
L2_cache_size=L2_cache_size,
config=config,
epilogue_nodes=epilogues,
GemmOuts=gemm_output_buffers,
reindexers=reindexers,
kernel_args=kernel_args,
X_list=X_list,
W_list=W_list,
gemm_grouped_num=self.gemm_grouped_num,
Y_list={"Y" + str(idx): Y for idx, Y in enumerate(Y_list)},
Y_2d_list=Y_2d_list,
multi_output_buffers=multi_output_buffers,
)
with contextlib.ExitStack() as stack:
stack.enter_context(
patch.object(V.graph, "get_dtype", self._fake_get_dtype(fake_buffers))
)
return self._template_from_string(GEMM_TEMPLATE).render(**options)
| CppGroupedGemmTemplate |
python | cython__cython | Cython/Compiler/ModuleNode.py | {
"start": 7618,
"end": 191002
} | class ____(Nodes.Node, Nodes.BlockNode):
# doc string or None
# body StatListNode
#
# referenced_modules [ModuleScope]
# full_module_name string
#
# scope The module scope.
# compilation_source A CompilationSource (see Main)
# directives Top-level compiler directives
child_attrs = ["body"]
directives = None
# internal - used in merging
pxd_stats = None
utility_code_stats = None
@property
def local_scope(self):
# Make the module node (and its init function) look like a FuncDefNode.
return self.scope
def merge_in(self, tree, scope, stage):
# Merges in the contents of another tree, and possibly scope. With the
# current implementation below, this must be done right prior
# to code generation.
# Stage is one of "pxd" or "utility" to indicate pxd file or utility
# code. This helps define the order.
#
# Note: This way of doing it seems strange -- I believe the
# right concept is to split ModuleNode into a ModuleNode and a
# CodeGenerator, and tell that CodeGenerator to generate code
# from multiple sources.
assert isinstance(self.body, Nodes.StatListNode)
assert stage in ('pxd', 'utility')
if self.pxd_stats is None:
self.pxd_stats = Nodes.StatListNode(self.body.pos, stats=[])
self.utility_code_stats = Nodes.StatListNode(self.body.pos, stats=[])
self.body.stats.insert(0, self.pxd_stats)
self.body.stats.insert(0, self.utility_code_stats)
if scope.directives != self.scope.directives:
# merged in nodes should keep their original compiler directives
# (for example inline cdef functions)
tree = Nodes.CompilerDirectivesNode(tree.pos, body=tree, directives=scope.directives)
target_stats = self.pxd_stats if stage == "pxd" else self.utility_code_stats
if isinstance(tree, Nodes.StatListNode):
target_stats.stats.extend(tree.stats)
else:
target_stats.stats.append(tree)
self.scope.utility_code_list.extend(scope.utility_code_list)
for inc in scope.c_includes.values():
self.scope.process_include(inc)
def extend_if_not_in(L1, L2):
for x in L2:
if x not in L1:
L1.append(x)
extend_if_not_in(self.scope.included_files, scope.included_files)
def merge_scope(self, scope, internalise_c_class_entries=True):
# Ensure that we don't generate import code for these entries!
for entry in scope.c_class_entries:
entry.type.module_name = self.full_module_name
entry.type.scope.directives["internal"] = internalise_c_class_entries
self.scope.merge_in(scope)
def with_compiler_directives(self):
# When merging a utility code module into the user code we need to preserve
# the original compiler directives. This returns the body of the module node,
# wrapped in its set of directives.
body = Nodes.CompilerDirectivesNode(self.pos, directives=self.directives, body=self.body)
return body
def analyse_declarations(self, env):
if has_np_pythran(env):
Pythran.include_pythran_generic(env)
if self.directives:
env.old_style_globals = self.directives['old_style_globals']
if not Options.docstrings:
env.doc = self.doc = None
elif Options.embed_pos_in_docstring:
env.doc = EncodedString('File: %s (starting at line %s)' % Nodes.relative_position(self.pos))
if self.doc is not None:
env.doc = EncodedString(env.doc + '\n' + self.doc)
env.doc.encoding = self.doc.encoding
else:
env.doc = self.doc
env.directives = self.directives
self.body.analyse_declarations(env)
cy_pymutex_type = PyrexTypes.get_cy_pymutex_type()
if env.find_shared_usages_of_type(cy_pymutex_type):
# Be very suspicious of cython locks that are shared.
# They have the potential to cause ABI issues.
self.scope.use_utility_code(
UtilityCode.load_cached(
"CythonPyMutexPublicCheck", "Synchronization.c"
))
def prepare_utility_code(self):
# prepare any utility code that must be created before code generation
# specifically: CythonUtilityCode
env = self.scope
if env.has_import_star:
self.create_import_star_conversion_utility_code(env)
for name, entry in sorted(env.entries.items()):
if (entry.create_wrapper and entry.scope is env
and entry.is_type and (entry.type.is_enum or entry.type.is_cpp_enum)):
entry.type.create_type_wrapper(env)
def process_implementation(self, options, result):
env = self.scope
env.return_type = PyrexTypes.c_void_type
self.referenced_modules = []
self.find_referenced_modules(env, self.referenced_modules, {})
self.sort_cdef_classes(env)
self.generate_c_code(env, options, result)
self.generate_h_code(env, options, result)
self.generate_api_code(env, options, result)
def has_imported_c_functions(self):
for module in self.referenced_modules:
for entry in module.cfunc_entries:
if entry.defined_in_pxd:
return 1
return 0
def assure_safe_target(self, path, allow_failed=False):
# Check for a common gotcha for new users: naming your .pyx file after the .c file you want to wrap
if not is_cython_generated_file(path, allow_failed=allow_failed, if_not_found=True):
# Raising a fatal CompileError instead of calling error() to prevent castrating an existing file.
raise CompileError(
self.pos, 'The output file already exists and does not look like it was generated by Cython: "%s"' %
os.path.basename(path))
def generate_h_code(self, env, options, result):
def h_entries(entries, api=0, pxd=0):
return [entry for entry in entries
if ((entry.visibility == 'public') or
(api and entry.api) or
(pxd and entry.defined_in_pxd))]
h_types = h_entries(env.type_entries, api=1)
h_vars = h_entries(env.var_entries)
h_funcs = h_entries(env.cfunc_entries)
h_extension_types = h_entries(env.c_class_entries)
if h_types or h_vars or h_funcs or h_extension_types:
result.h_file = replace_suffix_encoded(result.c_file, ".h")
self.assure_safe_target(result.h_file)
h_code_writer = Code.CCodeWriter()
c_code_config = generate_c_code_config(env, options)
globalstate = Code.GlobalState(h_code_writer, self, c_code_config)
globalstate.initialize_main_h_code() # in-case utility code is used in the header
h_code_start = globalstate.parts['h_code']
h_code_main = globalstate.parts['type_declarations']
h_code_end = globalstate.parts['end']
if options.generate_pxi:
result.i_file = replace_suffix_encoded(result.c_file, ".pxi")
i_code = Code.PyrexCodeWriter(result.i_file)
else:
i_code = None
h_code_start.put_generated_by()
h_guard = self.api_name(Naming.h_guard_prefix, env)
h_code_start.put_h_guard(h_guard)
h_code_start.putln("")
h_code_start.putln('#include "Python.h"')
self.generate_type_header_code(h_types, h_code_main)
if options.capi_reexport_cincludes:
self.generate_includes(env, [], h_code_main)
h_code_main.putln("")
api_guard = self.api_name(Naming.api_guard_prefix, env)
h_code_main.putln("#ifndef %s" % api_guard)
h_code_main.putln("")
self.generate_extern_c_macro_definition(h_code_main, env.is_cpp())
h_code_main.putln("")
self.generate_dl_import_macro(h_code_main)
if h_extension_types:
h_code_main.putln("")
for entry in h_extension_types:
self.generate_cclass_header_code(entry.type, h_code_main)
if i_code:
self.generate_cclass_include_code(entry.type, i_code)
globalstate.use_entry_utility_code(entry)
if h_funcs:
h_code_main.putln("")
for entry in h_funcs:
self.generate_public_declaration(entry, h_code_main, i_code)
globalstate.use_entry_utility_code(entry)
if h_vars:
h_code_main.putln("")
for entry in h_vars:
self.generate_public_declaration(entry, h_code_main, i_code)
globalstate.use_entry_utility_code(entry)
h_code_main.putln("")
h_code_main.putln("#endif /* !%s */" % api_guard)
h_code_main.putln("")
h_code_main.putln("/* WARNING: the interface of the module init function changed in CPython 3.5. */")
h_code_main.putln("/* It now returns a PyModuleDef instance instead of a PyModule instance. */")
h_code_main.putln("")
py3_mod_func_name = self.mod_init_func_cname('PyInit', env)
warning_string = EncodedString('Use PyImport_AppendInittab(%s, %s) instead of calling %s directly.' % (
env.module_name.as_c_string_literal(), py3_mod_func_name, py3_mod_func_name))
h_code_main.putln('/* WARNING: %s from Python 3.5 */' % warning_string.rstrip('.'))
h_code_main.putln("PyMODINIT_FUNC %s(void);" % py3_mod_func_name)
h_code_main.putln("")
h_code_main.putln("#if PY_VERSION_HEX >= 0x03050000 "
"&& (defined(__GNUC__) || defined(__clang__) || defined(_MSC_VER) "
"|| (defined(__cplusplus) && __cplusplus >= 201402L))")
h_code_main.putln("#if defined(__cplusplus) && __cplusplus >= 201402L")
h_code_main.putln("[[deprecated(%s)]] inline" % warning_string.as_c_string_literal())
h_code_main.putln("#elif defined(__GNUC__) || defined(__clang__)")
h_code_main.putln('__attribute__ ((__deprecated__(%s), __unused__)) __inline__' % (
warning_string.as_c_string_literal()))
h_code_main.putln("#elif defined(_MSC_VER)")
h_code_main.putln('__declspec(deprecated(%s)) __inline' % (
warning_string.as_c_string_literal()))
h_code_main.putln('#endif')
h_code_main.putln("static PyObject* __PYX_WARN_IF_%s_INIT_CALLED(PyObject* res) {" % py3_mod_func_name)
h_code_main.putln("return res;")
h_code_main.putln("}")
# Function call is converted to warning macro; uncalled (pointer) is not
h_code_main.putln('#define %s() __PYX_WARN_IF_%s_INIT_CALLED(%s())' % (
py3_mod_func_name, py3_mod_func_name, py3_mod_func_name))
h_code_main.putln('#endif')
h_code_end.putln("")
h_code_end.putln("#endif /* !%s */" % h_guard)
with open_new_file(result.h_file) as f:
h_code_writer.copyto(f)
def generate_public_declaration(self, entry, h_code, i_code):
h_code.putln("%s %s;" % (
Naming.extern_c_macro,
entry.type.declaration_code(entry.cname)))
if i_code:
i_code.putln("cdef extern %s" % (
entry.type.declaration_code(entry.cname, pyrex=1)))
def api_name(self, prefix, env):
api_name = self.punycode_module_name(prefix, env.qualified_name)
return api_name.replace(".", "__")
def generate_api_code(self, env, options, result):
def api_entries(entries, pxd=0):
return [entry for entry in entries
if entry.api or (pxd and entry.defined_in_pxd)]
api_vars = api_entries(env.var_entries)
api_funcs = api_entries(env.cfunc_entries)
api_extension_types = api_entries(env.c_class_entries)
if not (api_vars or api_funcs or api_extension_types):
return
result.api_file = replace_suffix_encoded(result.c_file, "_api.h")
self.assure_safe_target(result.api_file)
h_code = Code.CCodeWriter()
c_code_config = generate_c_code_config(env, options)
globalstate = Code.GlobalState(h_code, self, c_code_config)
globalstate.initialize_main_h_code() # in-case utility code is used in the header
h_code.put_generated_by()
api_guard = self.api_name(Naming.api_guard_prefix, env)
h_code.put_h_guard(api_guard)
# Work around https://bugs.python.org/issue4709
h_code.putln('#ifdef __MINGW64__')
h_code.putln('#define MS_WIN64')
h_code.putln('#endif')
def put_utility_code(name, src_file, include_requires=True):
proto, impl = UtilityCode.load_as_string(name, src_file, include_requires=include_requires)
if proto:
h_code.put(proto)
if impl:
h_code.put(impl)
h_code.putln('#include "Python.h"')
if result.h_file:
h_filename = os.path.basename(result.h_file)
h_filename = as_encoded_filename(h_filename)
h_code.putln('#include %s' % h_filename.as_c_string_literal())
if api_extension_types:
h_code.putln("")
for entry in api_extension_types:
type = entry.type
h_code.putln("static PyTypeObject *%s = 0;" % type.typeptr_cname)
h_code.putln("#define %s (*%s)" % (
type.typeobj_cname, type.typeptr_cname))
h_code.globalstate.use_entry_utility_code(entry)
if api_funcs:
h_code.putln("")
for entry in api_funcs:
type = CPtrType(entry.type)
cname = env.mangle(Naming.func_prefix_api, entry.name)
h_code.putln("static %s = 0;" % type.declaration_code(cname))
h_code.putln("#define %s %s" % (entry.name, cname))
h_code.globalstate.use_entry_utility_code(entry)
if api_vars:
h_code.putln("")
for entry in api_vars:
type = CPtrType(entry.type)
cname = env.mangle(Naming.varptr_prefix_api, entry.name)
h_code.putln("static %s = 0;" % type.declaration_code(cname))
h_code.putln("#define %s (*%s)" % (entry.name, cname))
h_code.globalstate.use_entry_utility_code(entry)
if api_vars:
put_utility_code("VoidPtrImport", "ImportExport.c")
if api_funcs:
put_utility_code("FunctionImport", "ImportExport.c")
if api_extension_types:
put_utility_code("TypeImport", "ImportExport.c")
h_code.putln("")
h_code.putln("static int %s(void) {" % self.api_name("import", env))
h_code.putln("PyObject *module = 0;")
h_code.putln('module = PyImport_ImportModule(%s);' % env.qualified_name.as_c_string_literal())
h_code.putln("if (!module) goto bad;")
for entry in api_funcs:
cname = env.mangle(Naming.func_prefix_api, entry.name)
sig = entry.type.signature_string()
h_code.putln(
'if (__Pyx_ImportFunction_%s(module, %s, (void (**)(void))&%s, "%s") < 0) goto bad;'
% (Naming.cyversion, entry.name.as_c_string_literal(), cname, sig))
for entry in api_vars:
cname = env.mangle(Naming.varptr_prefix_api, entry.name)
sig = entry.type.empty_declaration_code()
h_code.putln(
'if (__Pyx_ImportVoidPtr_%s(module, %s, (void **)&%s, "%s") < 0) goto bad;'
% (Naming.cyversion, entry.name.as_c_string_literal(), cname, sig))
with ModuleImportGenerator(h_code, imported_modules={env.qualified_name: 'module'}) as import_generator:
for entry in api_extension_types:
self.generate_type_import_call(entry.type, h_code, import_generator, error_code="goto bad;", is_api=True)
h_code.putln("Py_DECREF(module); module = 0;")
h_code.putln("return 0;")
h_code.putln("bad:")
h_code.putln("Py_XDECREF(module);")
h_code.putln("return -1;")
h_code.putln("}")
h_code.putln("")
h_code.putln("#endif /* !%s */" % api_guard)
f = open_new_file(result.api_file)
try:
h_code.copyto(f)
finally:
f.close()
def generate_cclass_header_code(self, type, h_code):
h_code.putln("%s %s %s;" % (
Naming.extern_c_macro,
PyrexTypes.public_decl("PyTypeObject", "DL_IMPORT"),
type.typeobj_cname))
def generate_cclass_include_code(self, type, i_code):
i_code.putln("cdef extern class %s.%s:" % (
type.module_name, type.name))
i_code.indent()
var_entries = type.scope.var_entries
if var_entries:
for entry in var_entries:
i_code.putln("cdef %s" % (
entry.type.declaration_code(entry.cname, pyrex=1)))
else:
i_code.putln("pass")
i_code.dedent()
def generate_c_code(self, env, options, result):
self.assure_safe_target(result.c_file, allow_failed=True)
modules = self.referenced_modules
if Options.annotate or options.annotate:
show_entire_c_code = Options.annotate == "fullc" or options.annotate == "fullc"
rootwriter = Annotate.AnnotationCCodeWriter(
show_entire_c_code=show_entire_c_code,
source_desc=self.compilation_source.source_desc,
)
else:
rootwriter = Code.CCodeWriter()
c_code_config = generate_c_code_config(env, options)
globalstate = Code.GlobalState(
rootwriter, self,
code_config=c_code_config,
common_utility_include_dir=options.common_utility_include_dir,
)
globalstate.initialize_main_c_code()
h_code = globalstate['h_code']
globalstate.module_pos = self.pos
globalstate.directives = self.directives
self.generate_module_preamble(env, options, modules, result.embedded_metadata, h_code)
globalstate.use_utility_code(refnanny_utility_code)
code = globalstate['before_global_var']
code.putln('#define __Pyx_MODULE_NAME %s' %
self.full_module_name.as_c_string_literal())
module_is_main = self.is_main_module_flag_cname()
code.putln("extern int %s;" % module_is_main)
code.putln("int %s = 0;" % module_is_main)
code.putln("")
code.putln("/* Implementation of %s */" % env.qualified_name.as_c_string_literal())
code = globalstate['late_includes']
self.generate_includes(env, modules, code, early=False)
code = globalstate['module_code']
self.generate_cached_builtins_decls(env, code)
# generate normal variable and function definitions
self.generate_lambda_definitions(env, code)
self.generate_variable_definitions(env, code)
self.body.generate_function_definitions(env, code)
# generate extension types and methods
code = globalstate['module_exttypes']
self.generate_typeobj_definitions(env, code)
self.generate_method_table(env, code)
if env.has_import_star:
self.generate_import_star(env, code)
# initialise the macro to reduce the code size of one-time functionality
globalstate['module_state'].put_code_here(
UtilityCode.load("SmallCodeConfig", "ModuleSetupCode.c"))
self.generate_module_state_start(env, globalstate['module_state'])
self.generate_module_state_clear(env, globalstate['module_state_clear'])
self.generate_module_state_traverse(env, globalstate['module_state_traverse'])
shared_utility_exporter = SharedUtilityExporter(
self.pos,
self.mod_init_subfunction(self.pos, self.scope, globalstate['init_module']),
self.scope
)
# init_globals is inserted before this
self.generate_module_init_func(
modules[:-1], shared_utility_exporter, env, globalstate['init_module']
)
self.generate_module_cleanup_func(env, globalstate['cleanup_module'])
if Options.embed:
self.generate_main_method(env, globalstate['main_method'])
self.generate_filename_table(globalstate['filename_table'])
self.generate_declarations_for_modules(env, modules, globalstate)
h_code.write('\n')
for utilcode in env.utility_code_list[:]:
globalstate.use_utility_code(utilcode)
shared_utility_exporter.generate_exporting_functions(code)
globalstate.finalize_main_c_code()
self.generate_module_state_end(env, modules, globalstate)
f = open_new_file(result.c_file)
try:
rootwriter.copyto(f)
finally:
f.close()
result.c_file_generated = 1
if options.gdb_debug:
self._serialize_lineno_map(env, rootwriter)
if Options.annotate or options.annotate:
self._generate_annotations(rootwriter, result, options)
def _generate_annotations(self, rootwriter, result, options):
self.annotate(rootwriter)
coverage_xml_filename = Options.annotate_coverage_xml or options.annotate_coverage_xml
if coverage_xml_filename and os.path.exists(coverage_xml_filename):
import xml.etree.ElementTree as ET
coverage_xml = ET.parse(coverage_xml_filename).getroot()
for el in coverage_xml.iter():
el.tail = None # save some memory
else:
coverage_xml = None
rootwriter.save_annotation(result.main_source_file, result.c_file, coverage_xml=coverage_xml)
# if we included files, additionally generate one annotation file for each
if not self.scope.included_files:
return
search_include_file = self.scope.context.search_include_directories
target_dir = os.path.abspath(os.path.dirname(result.c_file))
for included_file in self.scope.included_files:
target_file = os.path.abspath(os.path.join(target_dir, included_file))
target_file_dir = os.path.dirname(target_file)
if not target_file_dir.startswith(target_dir):
# any other directories may not be writable => avoid trying
continue
source_file = search_include_file(included_file, source_pos=self.pos, include=True)
if not source_file:
continue
if target_file_dir != target_dir and not os.path.exists(target_file_dir):
try:
os.makedirs(target_file_dir)
except OSError as e:
import errno
if e.errno != errno.EEXIST:
raise
rootwriter.save_annotation(source_file, target_file, coverage_xml=coverage_xml)
def _serialize_lineno_map(self, env, ccodewriter):
tb = env.context.gdb_debug_outputwriter
markers = ccodewriter.buffer.allmarkers()
d = defaultdict(list)
for c_lineno, (src_desc, src_lineno) in enumerate(markers):
if src_lineno > 0 and src_desc.filename is not None:
d[src_desc, src_lineno].append(c_lineno + 1)
tb.start('LineNumberMapping')
for (src_desc, src_lineno), c_linenos in sorted(d.items()):
assert src_desc.filename is not None
tb.add_entry(
'LineNumber',
c_linenos=' '.join(map(str, c_linenos)),
src_path=src_desc.filename,
src_lineno=str(src_lineno),
)
tb.end('LineNumberMapping')
tb.serialize()
def find_referenced_modules(self, env, module_list, modules_seen):
if env not in modules_seen:
modules_seen[env] = 1
for imported_module in env.cimported_modules:
self.find_referenced_modules(imported_module, module_list, modules_seen)
module_list.append(env)
def sort_types_by_inheritance(self, type_dict, type_order, getkey):
subclasses = defaultdict(list) # maps type key to list of subclass keys
for key in type_order:
new_entry = type_dict[key]
# collect all base classes to check for children
base = new_entry.type.base_type
while base:
base_key = getkey(base)
subclasses[base_key].append(key)
base_entry = type_dict.get(base_key)
if base_entry is None:
break
base = base_entry.type.base_type
# Simple topological sort using recursive DFS, based on
# https://en.wikipedia.org/wiki/Topological_sorting#Depth-first_search
seen = set()
result = []
def dfs(u):
if u in seen:
return
seen.add(u)
for v in subclasses[getkey(u.type)]:
dfs(type_dict[v])
result.append(u)
for key in reversed(type_order):
dfs(type_dict[key])
result.reverse()
return result
def sort_type_hierarchy(self, module_list, env):
# poor developer's OrderedDict
vtab_dict, vtab_dict_order = {}, []
vtabslot_dict, vtabslot_dict_order = {}, []
for module in module_list:
for entry in module.c_class_entries:
if entry.used and not entry.in_cinclude:
type = entry.type
key = type.vtabstruct_cname
if not key:
continue
if key in vtab_dict:
# FIXME: this should *never* happen, but apparently it does
# for Cython generated utility code
from .UtilityCode import NonManglingModuleScope
assert isinstance(entry.scope, NonManglingModuleScope), str(entry.scope)
assert isinstance(vtab_dict[key].scope, NonManglingModuleScope), str(vtab_dict[key].scope)
else:
vtab_dict[key] = entry
vtab_dict_order.append(key)
all_defined_here = module is env
for entry in module.type_entries:
if entry.used and (all_defined_here or entry.defined_in_pxd):
type = entry.type
if type.is_extension_type and not entry.in_cinclude:
type = entry.type
key = type.objstruct_cname
assert key not in vtabslot_dict, key
vtabslot_dict[key] = entry
vtabslot_dict_order.append(key)
def vtabstruct_cname(entry_type):
return entry_type.vtabstruct_cname
vtab_list = self.sort_types_by_inheritance(
vtab_dict, vtab_dict_order, vtabstruct_cname)
def objstruct_cname(entry_type):
return entry_type.objstruct_cname
vtabslot_list = self.sort_types_by_inheritance(
vtabslot_dict, vtabslot_dict_order, objstruct_cname)
return (vtab_list, vtabslot_list)
def sort_cdef_classes(self, env):
key_func = operator.attrgetter('objstruct_cname')
entry_dict, entry_order = {}, []
for entry in env.c_class_entries:
key = key_func(entry.type)
assert key not in entry_dict, key
entry_dict[key] = entry
entry_order.append(key)
env.c_class_entries[:] = self.sort_types_by_inheritance(
entry_dict, entry_order, key_func)
def generate_type_definitions(self, env, modules, vtab_list, vtabslot_list, code):
# TODO: Why are these separated out?
for entry in vtabslot_list:
self.generate_objstruct_predeclaration(entry.type, code)
vtabslot_entries = set(vtabslot_list)
ctuple_names = set()
for module in modules:
definition = module is env
type_entries = []
for entry in module.type_entries:
if entry.type.is_ctuple and entry.used:
if entry.name not in ctuple_names:
ctuple_names.add(entry.name)
type_entries.append(entry)
elif definition or entry.defined_in_pxd:
type_entries.append(entry)
type_entries = [t for t in type_entries if t not in vtabslot_entries]
self.generate_type_header_code(type_entries, code)
for entry in vtabslot_list:
self.generate_objstruct_definition(entry.type, code)
self.generate_typeobj_predeclaration(entry, code)
for entry in vtab_list:
self.generate_typeobj_predeclaration(entry, code)
self.generate_exttype_vtable_struct(entry, code)
self.generate_exttype_vtabptr_declaration(entry, code)
self.generate_exttype_final_methods_declaration(entry, code)
def generate_declarations_for_modules(self, env, modules, globalstate):
typecode = globalstate['type_declarations']
typecode.putln("")
typecode.putln("/*--- Type declarations ---*/")
# This is to work around the fact that array.h isn't part of the C-API,
# but we need to declare it earlier than utility code.
if 'cpython.array' in [m.qualified_name for m in modules]:
typecode.putln('#ifndef _ARRAYARRAY_H')
typecode.putln('struct arrayobject;')
typecode.putln('typedef struct arrayobject arrayobject;')
typecode.putln('#endif')
vtab_list, vtabslot_list = self.sort_type_hierarchy(modules, env)
self.generate_type_definitions(
env, modules, vtab_list, vtabslot_list, typecode)
modulecode = globalstate['module_declarations']
for module in modules:
defined_here = module is env
modulecode.putln("")
modulecode.putln("/* Module declarations from %s */" % module.qualified_name.as_c_string_literal())
self.generate_c_class_declarations(module, modulecode, defined_here, globalstate)
self.generate_cvariable_declarations(module, modulecode, defined_here)
self.generate_cfunction_declarations(module, modulecode, defined_here)
@staticmethod
def _put_setup_code(code, name):
code.put_code_here(UtilityCode.load(name, "ModuleSetupCode.c"))
def generate_module_preamble(self, env, options, cimported_modules, metadata, code):
code.put_generated_by()
if metadata:
code.putln("/* BEGIN: Cython Metadata")
code.putln(json.dumps(metadata, indent=4, sort_keys=True))
code.putln("END: Cython Metadata */")
code.putln("")
code.putln("#ifndef PY_SSIZE_T_CLEAN")
code.putln("#define PY_SSIZE_T_CLEAN")
code.putln("#endif /* PY_SSIZE_T_CLEAN */")
self._put_setup_code(code, "InitLimitedAPI")
for inc in sorted(env.c_includes.values(), key=IncludeCode.sortkey):
if inc.location == inc.INITIAL:
inc.write(code)
code.putln("#ifndef Py_PYTHON_H")
code.putln(" #error Python headers needed to compile C extensions, "
"please install development version of Python.")
code.putln("#elif PY_VERSION_HEX < 0x03090000")
code.putln(" #error Cython requires Python 3.9+.")
code.putln("#else")
code.globalstate["end"].putln("#endif /* Py_PYTHON_H */")
from .. import __version__
code.putln(f'#define __PYX_ABI_VERSION "{__version__.replace(".", "_")}"')
code.putln('#define CYTHON_HEX_VERSION %s' % build_hex_version(__version__))
code.putln("#define CYTHON_FUTURE_DIVISION %d" % (
Future.division in env.context.future_directives))
code.globalstate.use_utility_code(
UtilityCode.load("CythonABIVersion", "ModuleSetupCode.c"))
self._put_setup_code(code, "CModulePreamble")
if env.context.options.cplus:
self._put_setup_code(code, "CppInitCode")
else:
self._put_setup_code(code, "CInitCode")
self._put_setup_code(code, "PythonCompatibility")
self._put_setup_code(code, "MathInitCode")
# Error handling and position macros.
# Using "(void)cname" to prevent "unused" warnings.
mark_errpos_code = (
"#define __PYX_MARK_ERR_POS(f_index, lineno) {"
f" {Naming.filename_cname} = {Naming.filetable_cname}[f_index];"
f" (void) {Naming.filename_cname};"
f" {Naming.lineno_cname} = lineno;"
f" (void) {Naming.lineno_cname};"
"%s" # for C line info
f" (void) {Naming.clineno_cname}; " # always suppress warnings
"}"
)
cline_info = f" {Naming.clineno_cname} = {Naming.line_c_macro};"
# Show the C code line in tracebacks or not? C macros take precedence over (deprecated) options.
# 1) "CYTHON_CLINE_IN_TRACEBACK=0" always disables C lines in tracebacks
# 2) "CYTHON_CLINE_IN_TRACEBACK_RUNTIME=1" enables the feature + runtime configuration
# 2a) "options.c_line_in_traceback=True" changes the default to CYTHON_CLINE_IN_TRACEBACK_RUNTIME=1
# 2b) "options.c_line_in_traceback=False" changes the default to disable C lines
# 4) "CYTHON_CLINE_IN_TRACEBACK=1" enables C lines without runtime configuration
# 5) if nothing is set, the default is to disable the feature
default_cline_runtime = 0
if options.c_line_in_traceback is not None:
# explicitly set by user
default_cline_runtime = int(options.c_line_in_traceback)
code.putln("#ifndef CYTHON_CLINE_IN_TRACEBACK_RUNTIME")
code.putln(f"#define CYTHON_CLINE_IN_TRACEBACK_RUNTIME {default_cline_runtime}")
code.putln("#endif")
code.putln("#ifndef CYTHON_CLINE_IN_TRACEBACK")
code.putln("#define CYTHON_CLINE_IN_TRACEBACK CYTHON_CLINE_IN_TRACEBACK_RUNTIME")
code.putln("#endif")
code.putln("#if CYTHON_CLINE_IN_TRACEBACK")
code.putln(mark_errpos_code % cline_info)
code.putln("#else")
code.putln(mark_errpos_code % "")
code.putln("#endif")
code.putln("#define __PYX_ERR(f_index, lineno, Ln_error) \\")
code.putln(" { __PYX_MARK_ERR_POS(f_index, lineno) goto Ln_error; }")
code.putln("")
self.generate_extern_c_macro_definition(code, env.is_cpp())
code.putln("")
code.putln("#define %s" % self.api_name(Naming.h_guard_prefix, env))
code.putln("#define %s" % self.api_name(Naming.api_guard_prefix, env))
code.putln("/* Early includes */")
self.generate_includes(env, cimported_modules, code, late=False)
code.putln("")
code.putln("#if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS)")
code.putln("#define CYTHON_WITHOUT_ASSERTIONS")
code.putln("#endif")
code.putln("")
if env.directives['ccomplex']:
code.putln("")
code.putln("#if !defined(CYTHON_CCOMPLEX)")
code.putln("#define CYTHON_CCOMPLEX 1")
code.putln("#endif")
code.putln("")
c_string_type = env.directives['c_string_type']
c_string_encoding = env.directives['c_string_encoding']
if c_string_type not in ('bytes', 'bytearray') and not c_string_encoding:
error(self.pos, "a default encoding must be provided if c_string_type is not a byte type")
code.putln(f"#define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII {int(c_string_encoding == 'ascii')}")
code.putln(f"#define __PYX_DEFAULT_STRING_ENCODING_IS_UTF8 {int(c_string_encoding == 'utf8')}")
if c_string_encoding not in ('ascii', 'utf8'):
code.putln(f'#define __PYX_DEFAULT_STRING_ENCODING "{c_string_encoding}"')
if c_string_type == 'bytearray':
c_string_func_name = 'ByteArray'
elif c_string_type == 'str':
c_string_func_name = 'Unicode'
else:
c_string_func_name = c_string_type.title()
code.putln(f'#define __Pyx_PyObject_FromString __Pyx_Py{c_string_func_name}_FromString')
code.putln(f'#define __Pyx_PyObject_FromStringAndSize __Pyx_Py{c_string_func_name}_FromStringAndSize')
code.put(UtilityCode.load_as_string("TypeConversions", "TypeConversion.c")[0])
env.use_utility_code(UtilityCode.load_cached("FormatTypeName", "ObjectHandling.c"))
# These utility functions are assumed to exist and used elsewhere.
PyrexTypes.c_long_type.create_to_py_utility_code(env)
PyrexTypes.c_long_type.create_from_py_utility_code(env)
PyrexTypes.c_int_type.create_from_py_utility_code(env)
code.put(Nodes.branch_prediction_macros)
self._put_setup_code(code, "PretendToInitialize")
code.putln('')
code.putln('#if !CYTHON_USE_MODULE_STATE')
code.putln('static PyObject *%s = NULL;' % env.module_cname)
if Options.pre_import is not None:
code.putln('static PyObject *%s;' % Naming.preimport_cname)
code.putln('#endif')
code.putln('static int %s;' % Naming.lineno_cname)
code.putln('static int %s = 0;' % Naming.clineno_cname)
code.putln('static const char * const %s = %s;' % (Naming.cfilenm_cname, Naming.file_c_macro))
code.putln('static const char *%s;' % Naming.filename_cname)
env.use_utility_code(UtilityCode.load_cached("FastTypeChecks", "ModuleSetupCode.c"))
env.use_utility_code(UtilityCode.load("GetRuntimeVersion", "ModuleSetupCode.c"))
env.use_utility_code(UtilityCode.load_cached("AddModuleRef", "ModuleSetupCode.c"))
if has_np_pythran(env):
env.use_utility_code(UtilityCode.load_cached("PythranConversion", "CppSupport.cpp"))
def generate_extern_c_macro_definition(self, code, is_cpp):
name = Naming.extern_c_macro
code.putln("#ifdef CYTHON_EXTERN_C")
# make sure that user overrides always take precedence
code.putln(' #undef %s' % name)
code.putln(' #define %s CYTHON_EXTERN_C' % name)
code.putln("#elif defined(%s)" % name)
code.putln(" #ifdef _MSC_VER")
code.putln(" #pragma message (\"Please do not define the '%s' macro externally. Use 'CYTHON_EXTERN_C' instead.\")" % name)
code.putln(" #else")
code.putln(" #warning Please do not define the '%s' macro externally. Use 'CYTHON_EXTERN_C' instead." % name)
code.putln(" #endif")
code.putln("#else")
if is_cpp:
code.putln(' #define %s extern "C++"' % name)
else:
code.putln(" #ifdef __cplusplus")
code.putln(' #define %s extern "C"' % name)
code.putln(" #else")
code.putln(" #define %s extern" % name)
code.putln(" #endif")
code.putln("#endif")
def generate_dl_import_macro(self, code):
code.putln("#ifndef DL_IMPORT")
code.putln(" #define DL_IMPORT(_T) _T")
code.putln("#endif")
def generate_includes(self, env, cimported_modules, code, early=True, late=True):
for inc in sorted(env.c_includes.values(), key=IncludeCode.sortkey):
if inc.location == inc.EARLY:
if early:
inc.write(code)
elif inc.location == inc.LATE:
if late:
inc.write(code)
if early:
code.putln_openmp("#include <omp.h>")
def generate_filename_table(self, code):
from os.path import isabs, basename
code.putln("")
code.putln("static const char* const %s[] = {" % Naming.filetable_cname)
if code.globalstate.filename_list:
for source_desc in code.globalstate.filename_list:
file_path = source_desc.get_filenametable_entry()
if isabs(file_path):
# never include absolute paths
file_path = source_desc.get_description()
# Always use / as separator
file_path = pathlib.Path(file_path).as_posix()
escaped_filename = as_encoded_filename(file_path)
code.putln('%s,' % escaped_filename.as_c_string_literal())
else:
# Some C compilers don't like an empty array
code.putln("0")
code.putln("};")
def generate_type_predeclarations(self, env, code):
pass
def generate_type_header_code(self, type_entries, code):
# Generate definitions of structs/unions/enums/typedefs/objstructs.
#self.generate_gcc33_hack(env, code) # Is this still needed?
# Forward declarations
for entry in type_entries:
if not entry.in_cinclude:
#print "generate_type_header_code:", entry.name, repr(entry.type) ###
type = entry.type
if type.is_typedef: # Must test this first!
pass
elif type.is_struct_or_union or type.is_cpp_class:
self.generate_struct_union_predeclaration(entry, code)
elif type.is_ctuple and not type.is_fused and entry.used:
self.generate_struct_union_predeclaration(entry.type.struct_entry, code)
elif type.is_extension_type:
self.generate_objstruct_predeclaration(type, code)
# Actual declarations
for entry in type_entries:
if not entry.in_cinclude:
#print "generate_type_header_code:", entry.name, repr(entry.type) ###
type = entry.type
if type.is_typedef: # Must test this first!
self.generate_typedef(entry, code)
elif type.is_enum or type.is_cpp_enum:
self.generate_enum_definition(entry, code)
elif type.is_struct_or_union:
self.generate_struct_union_definition(entry, code)
elif type.is_ctuple and not type.is_fused and entry.used:
self.generate_struct_union_definition(entry.type.struct_entry, code)
elif type.is_cpp_class:
self.generate_cpp_class_definition(entry, code)
elif type.is_extension_type:
self.generate_objstruct_definition(type, code)
if getattr(type, "scope", None):
for var_entry in type.scope.var_entries:
code.globalstate.use_entry_utility_code(var_entry)
def generate_gcc33_hack(self, env, code):
# Workaround for spurious warning generation in gcc 3.3
code.putln("")
for entry in env.c_class_entries:
type = entry.type
if not type.typedef_flag:
name = type.objstruct_cname
if name.startswith("__pyx_"):
tail = name[6:]
else:
tail = name
code.putln("typedef struct %s __pyx_gcc33_%s;" % (
name, tail))
def generate_typedef(self, entry, code):
base_type = entry.type.typedef_base_type
enclosing_scope = entry.scope
if base_type.is_numeric and not enclosing_scope.is_cpp_class_scope:
try:
writer = code.globalstate['numeric_typedefs']
except KeyError:
writer = code
else:
writer = code
writer.mark_pos(entry.pos)
writer.putln("typedef %s;" % base_type.declaration_code(entry.cname))
def sue_predeclaration(self, type, kind, name):
if type.typedef_flag:
return "%s %s;\ntypedef %s %s %s;" % (
kind, name,
kind, name, name)
else:
return "%s %s;" % (kind, name)
def generate_struct_union_predeclaration(self, entry, code):
type = entry.type
if type.is_cpp_class and type.templates:
code.putln("template <typename %s>" % ", typename ".join(
[T.empty_declaration_code() for T in type.templates]))
code.putln(self.sue_predeclaration(type, type.kind, type.cname))
def sue_header_footer(self, type, kind, name):
header = "%s %s {" % (kind, name)
footer = "};"
return header, footer
def generate_struct_union_definition(self, entry, code):
code.mark_pos(entry.pos)
type = entry.type
scope = type.scope
if scope:
kind = type.kind
packed = type.is_struct and type.packed
if packed:
kind = "%s %s" % (type.kind, "__Pyx_PACKED")
code.globalstate.use_utility_code(packed_struct_utility_code)
header, footer = \
self.sue_header_footer(type, kind, type.cname)
if packed:
code.putln("#if defined(__SUNPRO_C)")
code.putln(" #pragma pack(1)")
code.putln("#elif !defined(__GNUC__)")
code.putln(" #pragma pack(push, 1)")
code.putln("#endif")
code.putln(header)
var_entries = scope.var_entries
for attr in var_entries:
code.putln(
"%s;" % attr.type.declaration_code(attr.cname))
code.putln(footer)
if packed:
code.putln("#if defined(__SUNPRO_C)")
code.putln(" #pragma pack()")
code.putln("#elif !defined(__GNUC__)")
code.putln(" #pragma pack(pop)")
code.putln("#endif")
def generate_cpp_constructor_code(self, arg_decls, arg_names, is_implementing, py_attrs, constructor, type, code):
if is_implementing:
code.putln("%s(%s) {" % (type.cname, ", ".join(arg_decls)))
needs_gil = py_attrs or (constructor and not constructor.type.nogil)
if needs_gil:
code.put_ensure_gil()
if py_attrs:
for attr in py_attrs:
code.put_init_var_to_py_none(attr, nanny=False)
if constructor:
code.putln("%s(%s);" % (constructor.cname, ", ".join(arg_names)))
if needs_gil:
code.put_release_ensured_gil()
code.putln("}")
else:
code.putln("%s(%s);" % (type.cname, ", ".join(arg_decls)))
def generate_cpp_class_definition(self, entry, code):
code.mark_pos(entry.pos)
type = entry.type
scope = type.scope
if scope:
if type.templates:
code.putln("template <class %s>" % ", class ".join(
[T.empty_declaration_code() for T in type.templates]))
# Just let everything be public.
code.put("struct %s" % type.cname)
if type.base_classes:
base_class_decl = ", public ".join(
[base_class.empty_declaration_code() for base_class in type.base_classes])
code.put(" : public %s" % base_class_decl)
code.putln(" {")
self.generate_type_header_code(scope.type_entries, code)
py_attrs = [e for e in scope.entries.values()
if e.type.is_pyobject and not e.is_inherited]
has_virtual_methods = False
constructor = None
destructor = None
for attr in scope.var_entries:
if attr.type.is_cfunction and attr.type.is_static_method:
code.put("static ")
elif attr.name == "<init>":
constructor = scope.lookup_here("<init>")
elif attr.name == "<del>":
destructor = attr
elif attr.type.is_cfunction:
code.put("virtual ")
has_virtual_methods = True
code.putln("%s;" % attr.type.declaration_code(attr.cname))
is_implementing = 'init_module' in code.globalstate.parts
if constructor or py_attrs:
if constructor:
for constructor_alternative in constructor.all_alternatives():
arg_decls = []
arg_names = []
for arg in constructor_alternative.type.original_args[
:len(constructor_alternative.type.args)-constructor_alternative.type.optional_arg_count]:
arg_decls.append(arg.declaration_code())
arg_names.append(arg.cname)
if constructor_alternative.type.optional_arg_count:
arg_decls.append(constructor_alternative.type.op_arg_struct.declaration_code(Naming.optional_args_cname))
arg_names.append(Naming.optional_args_cname)
if not arg_decls:
default_constructor = True
arg_decls = []
self.generate_cpp_constructor_code(arg_decls, arg_names, is_implementing, py_attrs, constructor_alternative, type, code)
else:
arg_decls = []
arg_names = []
self.generate_cpp_constructor_code(arg_decls, arg_names, is_implementing, py_attrs, constructor, type, code)
if destructor or py_attrs or has_virtual_methods:
if has_virtual_methods:
code.put("virtual ")
if is_implementing:
code.putln("~%s() {" % type.cname)
if py_attrs:
code.put_ensure_gil()
if destructor:
code.putln("%s();" % destructor.cname)
if py_attrs:
for attr in py_attrs:
code.put_var_xdecref(attr, nanny=False)
code.put_release_ensured_gil()
code.putln("}")
else:
code.putln("~%s();" % type.cname)
if py_attrs:
# Also need copy constructor and assignment operators.
if is_implementing:
code.putln("%s(const %s& __Pyx_other) {" % (type.cname, type.cname))
code.put_ensure_gil()
for attr in scope.var_entries:
if not attr.type.is_cfunction:
code.putln("%s = __Pyx_other.%s;" % (attr.cname, attr.cname))
code.put_var_incref(attr, nanny=False)
code.put_release_ensured_gil()
code.putln("}")
code.putln("%s& operator=(const %s& __Pyx_other) {" % (type.cname, type.cname))
code.putln("if (this != &__Pyx_other) {")
code.put_ensure_gil()
for attr in scope.var_entries:
if not attr.type.is_cfunction:
code.put_var_xdecref(attr, nanny=False)
code.putln("%s = __Pyx_other.%s;" % (attr.cname, attr.cname))
code.put_var_incref(attr, nanny=False)
code.put_release_ensured_gil()
code.putln("}")
code.putln("return *this;")
code.putln("}")
else:
code.putln("%s(const %s& __Pyx_other);" % (type.cname, type.cname))
code.putln("%s& operator=(const %s& __Pyx_other);" % (type.cname, type.cname))
code.putln("};")
def generate_enum_definition(self, entry, code):
code.mark_pos(entry.pos)
type = entry.type
name = entry.cname or entry.name or ""
kind = "enum class" if entry.type.is_cpp_enum else "enum"
header, footer = self.sue_header_footer(type, kind, name)
code.putln(header)
enum_values = entry.enum_values
if not enum_values:
error(entry.pos, "Empty enum definition not allowed outside a 'cdef extern from' block")
else:
last_entry = enum_values[-1]
# this does not really generate code, just builds the result value
for value_entry in enum_values:
if value_entry.value_node is not None:
value_entry.value_node.generate_evaluation_code(code)
for value_entry in enum_values:
if value_entry.value_node is None:
value_code = value_entry.cname.split("::")[-1]
else:
value_code = ("%s = %s" % (
value_entry.cname.split("::")[-1],
value_entry.value_node.result()))
if value_entry is not last_entry:
value_code += ","
code.putln(value_code)
code.putln(footer)
if entry.type.is_enum:
if entry.type.typedef_flag:
# Not pre-declared.
code.putln("typedef enum %s %s;" % (name, name))
def generate_typeobj_predeclaration(self, entry, code):
code.putln("")
name = entry.type.typeobj_cname
if name:
if entry.visibility == 'extern' and not entry.in_cinclude:
code.putln("%s %s %s;" % (
Naming.extern_c_macro,
PyrexTypes.public_decl("PyTypeObject", "DL_IMPORT"),
name))
elif entry.visibility == 'public':
code.putln("%s %s %s;" % (
Naming.extern_c_macro,
PyrexTypes.public_decl("PyTypeObject", "DL_EXPORT"),
name))
# ??? Do we really need the rest of this? ???
#else:
# code.putln("static PyTypeObject %s;" % name)
def generate_exttype_vtable_struct(self, entry, code):
if not entry.used:
return
code.mark_pos(entry.pos)
# Generate struct declaration for an extension type's vtable.
type = entry.type
scope = type.scope
self.specialize_fused_types(scope)
if type.vtabstruct_cname:
code.putln("")
code.putln("struct %s {" % type.vtabstruct_cname)
if type.base_type and type.base_type.vtabstruct_cname:
code.putln("struct %s %s;" % (
type.base_type.vtabstruct_cname,
Naming.obj_base_cname))
for method_entry in scope.cfunc_entries:
if not method_entry.is_inherited:
code.putln("%s;" % method_entry.type.declaration_code("(*%s)" % method_entry.cname))
code.putln("};")
def generate_exttype_vtabptr_declaration(self, entry, code):
if not entry.used:
return
code.mark_pos(entry.pos)
# Generate declaration of pointer to an extension type's vtable.
type = entry.type
if type.vtabptr_cname:
code.putln("static struct %s *%s;" % (
type.vtabstruct_cname,
type.vtabptr_cname))
def generate_exttype_final_methods_declaration(self, entry, code):
if not entry.used:
return
code.mark_pos(entry.pos)
# Generate final methods prototypes
for method_entry in entry.type.scope.cfunc_entries:
if not method_entry.is_inherited and method_entry.final_func_cname:
declaration = method_entry.type.declaration_code(
method_entry.final_func_cname)
modifiers = code.build_function_modifiers(method_entry.func_modifiers)
code.putln("static %s%s;" % (modifiers, declaration))
def generate_objstruct_predeclaration(self, type, code):
if not type.scope:
return
code.putln(self.sue_predeclaration(type, "struct", type.objstruct_cname))
def generate_objstruct_definition(self, type, code):
code.mark_pos(type.pos)
# Generate object struct definition for an
# extension type.
if not type.scope:
return # Forward declared but never defined
header, footer = \
self.sue_header_footer(type, "struct", type.objstruct_cname)
code.putln(header)
base_type = type.base_type
if base_type:
basestruct_cname = base_type.objstruct_cname
if basestruct_cname == "PyTypeObject":
# User-defined subclasses of type are heap allocated.
basestruct_cname = "PyHeapTypeObject"
code.putln(
"%s%s %s;" % (
("struct ", "")[base_type.typedef_flag],
basestruct_cname,
Naming.obj_base_cname))
else:
code.putln(
"PyObject_HEAD")
if type.vtabslot_cname and not (type.base_type and type.base_type.vtabslot_cname):
code.putln(
"struct %s *%s;" % (
type.vtabstruct_cname,
type.vtabslot_cname))
for attr in type.scope.var_entries:
if attr.is_declared_generic:
attr_type = py_object_type
else:
attr_type = attr.type
if attr.is_cpp_optional:
decl = attr_type.cpp_optional_declaration_code(attr.cname)
else:
decl = attr_type.declaration_code(attr.cname)
code.globalstate.use_entry_utility_code(attr)
code.putln("%s;" % decl)
code.putln(footer)
if type.objtypedef_cname is not None:
# Only for exposing public typedef name.
code.putln("typedef struct %s %s;" % (type.objstruct_cname, type.objtypedef_cname))
def generate_c_class_declarations(self, env, code, definition, globalstate):
module_state = globalstate['module_state']
module_state_clear = globalstate['module_state_clear']
module_state_traverse = globalstate['module_state_traverse']
module_state_typeobj = module_state.insertion_point()
for entry in env.c_class_entries:
if definition or entry.defined_in_pxd:
module_state.putln("PyTypeObject *%s;" % entry.type.typeptr_cname)
module_state_clear.putln(
"Py_CLEAR(clear_module_state->%s);" %
entry.type.typeptr_cname)
module_state_traverse.putln(
"Py_VISIT(traverse_module_state->%s);" %
entry.type.typeptr_cname)
if entry.type.typeobj_cname is not None:
module_state_typeobj.putln("PyObject *%s;" % entry.type.typeobj_cname)
module_state_clear.putln(
"Py_CLEAR(clear_module_state->%s);" % (
entry.type.typeobj_cname))
module_state_traverse.putln(
"Py_VISIT(traverse_module_state->%s);" % (
entry.type.typeobj_cname))
def generate_cvariable_declarations(self, env, code, definition):
if env.is_cython_builtin:
return
for entry in env.var_entries:
if (entry.in_cinclude or entry.in_closure or
(entry.visibility == 'private' and not (entry.defined_in_pxd or entry.used))):
continue
storage_class = None
dll_linkage = None
init = None
if entry.visibility == 'extern':
storage_class = Naming.extern_c_macro
dll_linkage = "DL_IMPORT"
elif entry.visibility == 'public':
storage_class = Naming.extern_c_macro
if definition:
dll_linkage = "DL_EXPORT"
else:
dll_linkage = "DL_IMPORT"
elif entry.visibility == 'private':
storage_class = "static"
dll_linkage = None
if entry.init is not None:
init = entry.type.literal_code(entry.init)
type = entry.type
cname = entry.cname
if entry.defined_in_pxd and not definition:
storage_class = "static"
dll_linkage = None
type = CPtrType(type)
cname = env.mangle(Naming.varptr_prefix, entry.name)
init = 0
if storage_class:
code.put("%s " % storage_class)
if entry.is_cpp_optional:
code.put(type.cpp_optional_declaration_code(
cname, dll_linkage=dll_linkage))
else:
code.put(type.declaration_code(
cname, dll_linkage=dll_linkage))
if init is not None:
code.put_safe(" = %s" % init)
code.putln(";")
if entry.cname != cname:
code.putln("#define %s (*%s)" % (entry.cname, cname))
code.globalstate.use_entry_utility_code(entry)
def generate_cfunction_declarations(self, env, code, definition):
for entry in env.cfunc_entries:
from_pyx = Options.cimport_from_pyx and not entry.visibility == 'extern'
if (entry.used
or entry.visibility == 'public'
or entry.api
or from_pyx):
generate_cfunction_declaration(entry, env, code, definition)
def generate_variable_definitions(self, env, code):
for entry in env.var_entries:
if not entry.in_cinclude and entry.visibility == "public":
code.put(entry.type.declaration_code(entry.cname))
if entry.init is not None:
init = entry.type.literal_code(entry.init)
code.put_safe(" = %s" % init)
code.putln(";")
def generate_typeobj_definitions(self, env, code):
full_module_name = env.qualified_name
for entry in env.c_class_entries:
#print "generate_typeobj_definitions:", entry.name
#print "...visibility =", entry.visibility
if entry.visibility != 'extern':
type = entry.type
scope = type.scope
if scope: # could be None if there was an error
self.generate_exttype_vtable(scope, code)
self.generate_new_function(scope, code, entry)
self.generate_del_function(scope, code)
self.generate_dealloc_function(scope, code)
if scope.needs_gc():
self.generate_traverse_function(scope, code, entry)
if scope.needs_tp_clear():
self.generate_clear_function(scope, code, entry)
if scope.defines_any_special(["__getitem__"]):
self.generate_getitem_int_function(scope, code)
if scope.defines_any_special(["__setitem__", "__delitem__"]):
self.generate_ass_subscript_function(scope, code)
if scope.defines_any_special(["__getslice__", "__setslice__", "__delslice__"]):
warning(self.pos,
"__getslice__, __setslice__, and __delslice__ are not supported by Python 3, "
"use __getitem__, __setitem__, and __delitem__ instead", 1)
code.putln("#error __getslice__, __setslice__, and __delslice__ not supported in Python 3.")
if scope.defines_any_special(["__setslice__", "__delslice__"]):
self.generate_ass_slice_function(scope, code)
if scope.defines_any_special(["__getattr__", "__getattribute__"]):
self.generate_getattro_function(scope, code)
if scope.defines_any_special(["__setattr__", "__delattr__"]):
self.generate_setattro_function(scope, code)
if scope.defines_any_special(["__get__"]):
self.generate_descr_get_function(scope, code)
if scope.defines_any_special(["__set__", "__delete__"]):
self.generate_descr_set_function(scope, code)
if not (scope.is_closure_class_scope or scope.is_defaults_class_scope) and scope.defines_any(["__dict__"]):
self.generate_dict_getter_function(scope, code)
if scope.defines_any_special(TypeSlots.richcmp_special_methods):
self.generate_richcmp_function(scope, code)
elif 'total_ordering' in scope.directives:
# Warn if this is used when it can't have any effect.
warning(scope.parent_type.pos,
"total_ordering directive used, but no comparison and equality methods defined")
for slot in TypeSlots.get_slot_table(code.globalstate.directives).PyNumberMethods:
if slot.is_binop and scope.defines_any_special(slot.user_methods):
self.generate_binop_function(scope, slot, code, entry.pos)
self.generate_property_accessors(scope, code)
self.generate_method_table(scope, code)
self.generate_getset_table(scope, code)
code.putln("#if CYTHON_USE_TYPE_SPECS")
self.generate_typeobj_spec(entry, code)
code.putln("#else")
self.generate_typeobj_definition(full_module_name, entry, code)
code.putln("#endif")
def generate_exttype_vtable(self, scope, code):
# Generate the definition of an extension type's vtable.
type = scope.parent_type
if type.vtable_cname:
code.putln("static struct %s %s;" % (
type.vtabstruct_cname,
type.vtable_cname))
def generate_self_cast(self, scope, code):
type = scope.parent_type
code.putln(
"%s = (%s)o;" % (
type.declaration_code("p"),
type.empty_declaration_code()))
@staticmethod
def generate_freelist_condition(code, size_check, type_cname, type):
code.globalstate.use_utility_code(
UtilityCode.load_cached("CheckTypeForFreelists", "ExtensionTypes.c"))
if type.is_final_type:
freelist_check = '__PYX_CHECK_FINAL_TYPE_FOR_FREELISTS'
else:
freelist_check = '__PYX_CHECK_TYPE_FOR_FREELISTS'
obj_struct = type.declaration_code("", deref=True)
typeptr_cname = code.name_in_slot_module_state(type.typeptr_cname)
code.putln(
f"if (likely((int)({size_check}) & {freelist_check}({type_cname}, {typeptr_cname}, sizeof({obj_struct}))))")
def generate_new_function(self, scope, code, cclass_entry):
tp_slot = TypeSlots.ConstructorSlot("tp_new", "__cinit__")
slot_func = scope.mangle_internal("tp_new")
if tp_slot.slot_code(scope) != slot_func:
return # never used
type = scope.parent_type
base_type = type.base_type
have_entries, (py_attrs, py_buffers, memoryview_slices) = \
scope.get_refcounted_entries()
is_final_type = scope.parent_type.is_final_type
if scope.is_internal:
# internal classes (should) never need None inits, normal zeroing will do
py_attrs = []
explicitly_constructable_attrs = [
entry for entry in scope.var_entries
if entry.type.needs_explicit_construction(scope)
]
cinit_func_entry = scope.lookup_here("__cinit__")
if cinit_func_entry and not cinit_func_entry.is_special:
cinit_func_entry = None
if base_type or (cinit_func_entry and not cinit_func_entry.trivial_signature):
unused_marker = ''
else:
unused_marker = 'CYTHON_UNUSED '
if base_type:
freelist_size = 0 # not currently supported
else:
freelist_size = scope.directives.get('freelist', 0)
freelist_name = scope.mangle_internal(Naming.freelist_name)
freecount_name = scope.mangle_internal(Naming.freecount_name)
if freelist_size:
module_state = code.globalstate['module_state_contents']
module_state.putln("")
module_state.putln("#if CYTHON_USE_FREELISTS")
module_state.putln("%s[%d];" % (
scope.parent_type.declaration_code(freelist_name),
freelist_size))
module_state.putln("int %s;" % freecount_name)
module_state.putln("#endif")
code.start_slotfunc(
scope, PyrexTypes.py_objptr_type, "tp_new",
f"PyTypeObject *t, {unused_marker}PyObject *a, {unused_marker}PyObject *k", needs_prototype=True)
need_self_cast = (type.vtabslot_cname or
(py_buffers or memoryview_slices or py_attrs) or
explicitly_constructable_attrs)
if need_self_cast:
code.putln("%s;" % scope.parent_type.declaration_code("p"))
if base_type:
tp_new = TypeSlots.get_base_slot_function(scope, tp_slot)
base_type_typeptr_cname = base_type.typeptr_cname
if not base_type.is_builtin_type:
base_type_typeptr_cname = code.name_in_slot_module_state(base_type_typeptr_cname)
if tp_new is None:
tp_new = f"__Pyx_PyType_GetSlot({base_type_typeptr_cname}, tp_new, newfunc)"
code.putln("PyObject *o = %s(t, a, k);" % tp_new)
else:
code.putln("PyObject *o;")
if freelist_size:
code.globalstate.use_utility_code(
UtilityCode.load_cached("IncludeStringH", "StringTools.c"))
code.putln("#if CYTHON_USE_FREELISTS")
freecount_name = code.name_in_slot_module_state(freecount_name)
freelist_name = code.name_in_slot_module_state(freelist_name)
self.generate_freelist_condition(code, f"{freecount_name} > 0", "t", type)
code.putln("{")
code.putln("o = (PyObject*)%s[--%s];" % (
freelist_name,
freecount_name))
obj_struct = type.declaration_code("", deref=True)
code.putln("#if CYTHON_USE_TYPE_SPECS")
# We still hold a reference to the type object held by the previous
# user of the freelist object - release it.
code.putln("Py_DECREF(Py_TYPE(o));")
code.putln("#endif")
code.putln("memset(o, 0, sizeof(%s));" % obj_struct)
code.putln("#if CYTHON_COMPILING_IN_LIMITED_API")
# Although PyObject_INIT should be part of the Limited API, it causes
# link errors on some combinations of Python versions and OSs.
code.putln("(void) PyObject_Init(o, t);")
code.putln("#else")
code.putln("(void) PyObject_INIT(o, t);")
code.putln("#endif")
if scope.needs_gc():
code.putln("PyObject_GC_Track(o);")
code.putln("} else")
code.putln("#endif")
code.putln("{")
code.globalstate.use_utility_code(
UtilityCode.load_cached("AllocateExtensionType", "ExtensionTypes.c")
)
code.putln(f"o = __Pyx_AllocateExtensionType(t, {is_final_type:d});")
code.putln("if (unlikely(!o)) return 0;")
if freelist_size and not base_type:
code.putln('}')
if need_self_cast:
code.putln("p = %s;" % type.cast_code("o"))
#if need_self_cast:
# self.generate_self_cast(scope, code)
# from this point on, ensure DECREF(o) on failure
needs_error_cleanup = False
if type.vtabslot_cname:
vtab_base_type = type
while vtab_base_type.base_type and vtab_base_type.base_type.vtabstruct_cname:
vtab_base_type = vtab_base_type.base_type
if vtab_base_type is not type:
struct_type_cast = "(struct %s*)" % vtab_base_type.vtabstruct_cname
else:
struct_type_cast = ""
code.putln("p->%s = %s%s;" % (
type.vtabslot_cname,
struct_type_cast, type.vtabptr_cname))
for entry in explicitly_constructable_attrs:
entry.type.generate_explicit_construction(
code, entry, extra_access_code="p->")
for entry in py_attrs:
if entry.name == "__dict__":
needs_error_cleanup = True
code.put("p->%s = PyDict_New(); if (unlikely(!p->%s)) goto bad;" % (
entry.cname, entry.cname))
else:
code.put_init_var_to_py_none(entry, "p->%s", nanny=False)
for entry in memoryview_slices:
code.putln("p->%s.data = NULL;" % entry.cname)
code.putln("p->%s.memview = NULL;" % entry.cname)
for entry in py_buffers:
code.putln("p->%s.obj = NULL;" % entry.cname)
if cclass_entry.cname == '__pyx_memoryviewslice':
code.putln("p->from_slice.memview = NULL;")
if cinit_func_entry:
if cinit_func_entry.trivial_signature:
cinit_args = f"o, {Naming.modulestateglobal_cname}->{Naming.empty_tuple}, NULL"
else:
cinit_args = "o, a, k"
needs_error_cleanup = True
code.putln("if (unlikely(%s(%s) < 0)) goto bad;" % (
cinit_func_entry.func_cname, cinit_args))
code.putln(
"return o;")
if needs_error_cleanup:
code.putln("bad:")
code.put_decref_clear("o", py_object_type, nanny=False)
code.putln("return NULL;")
code.putln(
"}")
code.exit_cfunc_scope()
def generate_del_function(self, scope, code):
tp_slot = TypeSlots.get_slot_by_name("tp_finalize", scope.directives)
slot_func_cname = scope.mangle_internal("tp_finalize")
if tp_slot.slot_code(scope) != slot_func_cname:
return # never used
entry = scope.lookup_here("__del__")
if entry is None or not entry.is_special:
return # nothing to wrap
code.putln("")
if tp_slot.used_ifdef:
code.putln("#if %s" % tp_slot.used_ifdef)
code.start_slotfunc(scope, PyrexTypes.c_void_type, "tp_finalize", "PyObject *o", needs_funcstate=False)
code.putln("PyObject *etype, *eval, *etb;")
code.putln("PyErr_Fetch(&etype, &eval, &etb);")
code.putln("%s(o);" % entry.func_cname)
code.putln("PyErr_Restore(etype, eval, etb);")
code.putln("}")
code.exit_cfunc_scope()
if tp_slot.used_ifdef:
code.putln("#endif")
def generate_dealloc_function(self, scope, code):
tp_slot = TypeSlots.ConstructorSlot("tp_dealloc", '__dealloc__')
slot_func = scope.mangle_internal("tp_dealloc")
base_type = scope.parent_type.base_type
if tp_slot.slot_code(scope) != slot_func:
return # never used
slot_func_cname = scope.mangle_internal("tp_dealloc")
code.start_slotfunc(scope, PyrexTypes.c_void_type, "tp_dealloc", "PyObject *o")
is_final_type = scope.parent_type.is_final_type
needs_gc = scope.needs_gc()
needs_trashcan = scope.needs_trashcan()
weakref_slot = scope.lookup_here("__weakref__") if not (scope.is_closure_class_scope or scope.is_defaults_class_scope) else None
if weakref_slot not in scope.var_entries:
weakref_slot = None
dict_slot = scope.lookup_here("__dict__") if not (scope.is_closure_class_scope or scope.is_defaults_class_scope) else None
if dict_slot not in scope.var_entries:
dict_slot = None
_, (py_attrs, _, memoryview_slices) = scope.get_refcounted_entries()
explicitly_destructable_attrs = [
entry for entry in scope.var_entries
if entry.type.needs_explicit_destruction(scope)
]
if py_attrs or explicitly_destructable_attrs or memoryview_slices or weakref_slot or dict_slot:
self.generate_self_cast(scope, code)
if not is_final_type or scope.may_have_finalize():
# in Py3.4+, call tp_finalize() as early as possible
code.putln("#if CYTHON_USE_TP_FINALIZE")
if needs_gc:
finalised_check = '!__Pyx_PyObject_GC_IsFinalized(o)'
else:
finalised_check = (
'(!PyType_IS_GC(Py_TYPE(o)) || !__Pyx_PyObject_GC_IsFinalized(o))')
code.putln(
"if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && %s) {" % finalised_check)
code.putln("if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == %s) {" % slot_func_cname)
# if instance was resurrected by finaliser, return
code.putln("if (PyObject_CallFinalizerFromDealloc(o)) return;")
code.putln("}")
code.putln("}")
code.putln("#endif")
if needs_gc:
# We must mark this object as (gc) untracked while tearing
# it down, lest the garbage collection is invoked while
# running this destructor.
code.putln("PyObject_GC_UnTrack(o);")
if needs_trashcan:
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyTrashcan", "ExtensionTypes.c"))
code.putln("__Pyx_TRASHCAN_BEGIN(o, %s)" % slot_func_cname)
if weakref_slot:
# We must clean the weakreferences before calling the user's __dealloc__
# because if the __dealloc__ releases the GIL, a weakref can be
# dereferenced accessing the object in an inconsistent state or
# resurrecting it.
code.putln("if (p->__weakref__) PyObject_ClearWeakRefs(o);")
# call the user's __dealloc__
self.generate_usr_dealloc_call(scope, code)
if dict_slot:
code.putln("if (p->__dict__) PyDict_Clear(p->__dict__);")
for entry in explicitly_destructable_attrs:
entry.type.generate_explicit_destruction(code, entry, extra_access_code="p->")
for entry in (py_attrs + memoryview_slices):
code.put_xdecref_clear("p->%s" % entry.cname, entry.type, nanny=False,
clear_before_decref=True, have_gil=True)
if base_type:
base_cname = base_type.typeptr_cname
if not base_type.is_builtin_type:
base_cname = code.name_in_slot_module_state(base_cname)
tp_dealloc = TypeSlots.get_base_slot_function(scope, tp_slot)
if tp_dealloc is not None:
if needs_gc and base_type.scope and base_type.scope.needs_gc():
# We know that the base class uses GC, so probably expects it to be tracked.
# Undo the untracking above.
code.putln("PyObject_GC_Track(o);")
code.putln("%s(o);" % tp_dealloc)
elif base_type.is_builtin_type:
if needs_gc and base_type.scope and base_type.scope.needs_gc():
# We know that the base class uses GC, so probably expects it to be tracked.
# Undo the untracking above.
code.putln("PyObject_GC_Track(o);")
code.putln("__Pyx_PyType_GetSlot(%s, tp_dealloc, destructor)(o);" % base_cname)
else:
if needs_gc:
# We don't know if the base class uses GC or not, so must find out at runtime
# whether we should undo the untracking above or not.
code.putln("if (PyType_IS_GC(%s)) PyObject_GC_Track(o);" % base_cname)
# This is an externally defined type. Calling through the
# cimported base type pointer directly interacts badly with
# the module cleanup, which may already have cleared it.
# In that case, fall back to traversing the type hierarchy.
# If we're using the module state then always go through the
# type hierarchy, because our access to the module state may
# have been lost (at least for the limited API version of
# using module state).
code.putln("#if !CYTHON_USE_MODULE_STATE")
code.putln("if (likely(%s)) __Pyx_PyType_GetSlot(%s, tp_dealloc, destructor)(o); else" % (
base_cname, base_cname))
code.putln("#endif")
code.putln("__Pyx_call_next_tp_dealloc(o, %s);" % slot_func_cname)
code.globalstate.use_utility_code(
UtilityCode.load_cached("CallNextTpDealloc", "ExtensionTypes.c"))
else:
freelist_size = scope.directives.get('freelist', 0)
if freelist_size:
freelist_name = code.name_in_slot_module_state(
scope.mangle_internal(Naming.freelist_name))
freecount_name = code.name_in_slot_module_state(
scope.mangle_internal(Naming.freecount_name))
type = scope.parent_type
code.putln("#if CYTHON_USE_FREELISTS")
self.generate_freelist_condition(
code, f"{freecount_name} < {freelist_size}",
"Py_TYPE(o)", type)
code.putln("{")
code.putln("%s[%s++] = %s;" % (
freelist_name,
freecount_name,
type.cast_code("o")))
# Deliberately don't DECREF the type object for objects returned to the freelist:
# we hold a reference to the type to allow them to be cleaned up properly.
code.putln("} else")
code.putln("#endif")
code.putln("{")
code.putln("PyTypeObject *tp = Py_TYPE(o);")
code.putln("#if CYTHON_USE_TYPE_SLOTS")
# Asking for PyType_GetSlot(..., Py_tp_free) seems to cause an error in pypy
code.putln("(*tp->tp_free)(o);")
code.putln("#else")
code.putln("{")
code.putln("freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);")
code.putln("if (tp_free) tp_free(o);")
code.putln("}")
code.putln("#endif")
code.putln("#if CYTHON_USE_TYPE_SPECS")
# Undo the INCREF of the type object in tp_new
code.putln("Py_DECREF(tp);")
code.putln("#endif")
if freelist_size:
code.putln("}")
if needs_trashcan:
code.putln("__Pyx_TRASHCAN_END")
code.putln(
"}")
code.exit_cfunc_scope()
def generate_usr_dealloc_call(self, scope, code):
entry = scope.lookup_here("__dealloc__")
if not entry or not entry.is_special:
return
code.putln("{")
code.putln("PyObject *etype, *eval, *etb;")
code.putln("PyErr_Fetch(&etype, &eval, &etb);")
# increase the refcount while we are calling into user code
# to prevent recursive deallocation
code.putln("Py_SET_REFCNT(o, Py_REFCNT(o) + 1);")
code.putln("%s(o);" % entry.func_cname)
code.putln("Py_SET_REFCNT(o, Py_REFCNT(o) - 1);")
code.putln("PyErr_Restore(etype, eval, etb);")
code.putln("}")
def generate_traverse_function(self, scope, code, cclass_entry):
tp_slot = TypeSlots.GCDependentSlot("tp_traverse")
slot_func = scope.mangle_internal("tp_traverse")
base_type = scope.parent_type.base_type
if tp_slot.slot_code(scope) != slot_func:
return # never used
code.start_slotfunc(scope, PyrexTypes.c_returncode_type, "tp_traverse", "PyObject *o, visitproc v, void *a")
have_entries, (py_attrs, py_buffers, memoryview_slices) = (
scope.get_refcounted_entries(include_gc_simple=False))
needs_type_traverse = not base_type
# we don't know statically if we need to traverse the type
maybe_needs_type_traverse = False
code.putln("int e;")
if py_attrs or py_buffers:
self.generate_self_cast(scope, code)
if base_type:
# want to call it explicitly if possible so inlining can be performed
static_call = TypeSlots.get_base_slot_function(scope, tp_slot)
if static_call:
code.putln("e = %s(o, v, a); if (e) return e;" % static_call)
# No need to call type traverse - base class will do it
elif base_type.is_builtin_type:
base_cname = base_type.typeptr_cname
code.putln("{")
code.putln(
f"traverseproc traverse = __Pyx_PyType_GetSlot({base_cname}, tp_traverse, traverseproc);")
code.putln("if (!traverse); else { e = traverse(o,v,a); if (e) return e; }")
code.putln("}")
maybe_needs_type_traverse = True
else:
# This is an externally defined type. Calling through the
# cimported base type pointer directly interacts badly with
# the module cleanup, which may already have cleared it.
# In that case, fall back to traversing the type hierarchy.
# If we're using the module state then always go through the
# type hierarchy, because our access to the module state may
# have been lost (at least for the limited API version of
# using module state).
base_cname = code.name_in_slot_module_state(base_type.typeptr_cname)
code.putln("#if !CYTHON_USE_MODULE_STATE")
code.putln("e = 0;")
code.putln("if (likely(%s)) {" % base_cname)
code.putln(
f"traverseproc traverse = __Pyx_PyType_GetSlot({base_cname}, tp_traverse, traverseproc);")
code.putln("if (traverse) { e = traverse(o, v, a); }")
code.putln("} else")
code.putln("#endif")
code.putln("{ e = __Pyx_call_next_tp_traverse(o, v, a, %s); }" % slot_func)
code.putln("if (e) return e;")
code.globalstate.use_utility_code(
UtilityCode.load_cached("CallNextTpTraverse", "ExtensionTypes.c"))
maybe_needs_type_traverse = True
if needs_type_traverse or maybe_needs_type_traverse:
code.putln("{")
code.putln(f"e = __Pyx_call_type_traverse(o, {int(not maybe_needs_type_traverse)}, v, a);")
code.putln("if (e) return e;")
code.putln("}")
code.globalstate.use_utility_code(
UtilityCode.load_cached("CallTypeTraverse", "ExtensionTypes.c"))
for entry in py_attrs:
var_code = "p->%s" % entry.cname
var_as_pyobject = PyrexTypes.typecast(py_object_type, entry.type, var_code)
code.putln("if (%s) {" % var_code)
code.putln("e = (*v)(%s, a); if (e) return e;" % var_as_pyobject)
code.putln("}")
# Traverse buffer exporting objects.
# Note: not traversing memoryview attributes of memoryview slices!
# When triggered by the GC, it would cause multiple visits (gc_refs
# subtractions which is not matched by its reference count!)
for entry in py_buffers:
cname = entry.cname + ".obj"
code.putln("if (p->%s) {" % cname)
code.putln("e = (*v)(p->%s, a); if (e) return e;" % cname)
code.putln("}")
code.putln("return 0;")
code.putln("}")
code.exit_cfunc_scope()
def generate_clear_function(self, scope, code, cclass_entry):
tp_slot = TypeSlots.get_slot_by_name("tp_clear", scope.directives)
slot_func = scope.mangle_internal("tp_clear")
base_type = scope.parent_type.base_type
if tp_slot.slot_code(scope) != slot_func:
return # never used
have_entries, (py_attrs, py_buffers, memoryview_slices) = (
scope.get_refcounted_entries(include_gc_simple=False))
if py_attrs or py_buffers or base_type:
unused = ''
else:
unused = 'CYTHON_UNUSED '
code.start_slotfunc(scope, PyrexTypes.c_returncode_type, "tp_clear", f"{unused}PyObject *o")
if py_attrs and Options.clear_to_none:
code.putln("PyObject* tmp;")
if py_attrs or py_buffers:
self.generate_self_cast(scope, code)
if base_type:
# want to call it explicitly if possible so inlining can be performed
static_call = TypeSlots.get_base_slot_function(scope, tp_slot)
if static_call:
code.putln("%s(o);" % static_call)
elif base_type.is_builtin_type:
base_cname = base_type.typeptr_cname
code.putln("{")
code.putln(f"inquiry clear = __Pyx_PyType_GetSlot({base_cname}, tp_clear, inquiry);")
code.putln("if (clear) clear(o);")
code.putln("}")
else:
# This is an externally defined type. Calling through the
# cimported base type pointer directly interacts badly with
# the module cleanup, which may already have cleared it.
# In that case, fall back to traversing the type hierarchy.
# If we're using the module state then always go through the
# type hierarchy, because our access to the module state may
# have been lost (at least for the limited API version of
# using module state).
base_cname = code.name_in_slot_module_state(base_type.typeptr_cname)
code.putln("#if !CYTHON_USE_MODULE_STATE")
code.putln("if (likely(%s)) {" % base_cname)
code.putln(f"inquiry clear = __Pyx_PyType_GetSlot({base_cname}, tp_clear, inquiry);")
code.putln("if (clear) clear(o);")
code.putln("} else")
code.putln("#endif")
code.putln("{ __Pyx_call_next_tp_clear(o, %s); }" % slot_func)
code.globalstate.use_utility_code(
UtilityCode.load_cached("CallNextTpClear", "ExtensionTypes.c"))
if Options.clear_to_none:
for entry in py_attrs:
name = "p->%s" % entry.cname
code.putln("tmp = ((PyObject*)%s);" % name)
if entry.is_declared_generic:
code.put_init_to_py_none(name, py_object_type, nanny=False)
else:
code.put_init_to_py_none(name, entry.type, nanny=False)
code.putln("Py_XDECREF(tmp);")
else:
for entry in py_attrs:
code.putln("Py_CLEAR(p->%s);" % entry.cname)
for entry in py_buffers:
# Note: shouldn't this call PyBuffer_Release ??
code.putln("Py_CLEAR(p->%s.obj);" % entry.cname)
if cclass_entry.cname == '__pyx_memoryviewslice':
code.putln("__PYX_XCLEAR_MEMVIEW(&p->from_slice, 1);")
code.putln("return 0;")
code.putln("}")
code.exit_cfunc_scope()
def generate_getitem_int_function(self, scope, code):
# This function is put into the sq_item slot when
# a __getitem__ method is present. It converts its
# argument to a Python integer and calls mp_subscript.
code.start_slotfunc(scope, PyrexTypes.py_objptr_type, "sq_item", "PyObject *o, Py_ssize_t i", needs_funcstate=False)
code.putln(
"PyObject *r;")
code.putln(
"PyObject *x = PyLong_FromSsize_t(i); if(!x) return 0;")
# Note that PyType_GetSlot only works on heap-types before 3.10, so not using type slots
# and defining cdef classes as non-heap types is probably impossible
code.putln("#if CYTHON_USE_TYPE_SLOTS || (!CYTHON_USE_TYPE_SPECS && __PYX_LIMITED_VERSION_HEX < 0x030A0000)")
code.putln(
"r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x);")
code.putln("#else")
code.putln("r = ((binaryfunc)PyType_GetSlot(Py_TYPE(o), Py_mp_subscript))(o, x);")
code.putln("#endif")
code.putln(
"Py_DECREF(x);")
code.putln(
"return r;")
code.putln(
"}")
code.exit_cfunc_scope()
def generate_ass_subscript_function(self, scope, code):
# Setting and deleting an item are both done through
# the ass_subscript method, so we dispatch to user's __setitem__
# or __delitem__, or raise an exception.
base_type = scope.parent_type.base_type
set_entry = scope.lookup_here("__setitem__")
del_entry = scope.lookup_here("__delitem__")
code.start_slotfunc(scope, PyrexTypes.c_returncode_type, "mp_ass_subscript", "PyObject *o, PyObject *i, PyObject *v")
code.putln(
"if (v) {")
if set_entry:
code.putln("return %s(o, i, v);" % set_entry.func_cname)
else:
code.putln(
"__Pyx_TypeName o_type_name;")
self.generate_guarded_basetype_call(
base_type, "tp_as_mapping", "mp_ass_subscript", "objobjargproc", "o, i, v", code)
code.putln(
"o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));")
code.putln(
"PyErr_Format(PyExc_NotImplementedError,")
code.putln(
' "Subscript assignment not supported by " __Pyx_FMT_TYPENAME, o_type_name);')
code.putln(
"__Pyx_DECREF_TypeName(o_type_name);")
code.putln(
"return -1;")
code.putln(
"}")
code.putln(
"else {")
if del_entry:
code.putln(
"return %s(o, i);" % (
del_entry.func_cname))
else:
code.putln(
"__Pyx_TypeName o_type_name;")
self.generate_guarded_basetype_call(
base_type, "tp_as_mapping", "mp_ass_subscript", "objobjargproc", "o, i, v", code)
code.putln(
"o_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(o));")
code.putln(
"PyErr_Format(PyExc_NotImplementedError,")
code.putln(
' "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name);')
code.putln(
"__Pyx_DECREF_TypeName(o_type_name);")
code.putln(
"return -1;")
code.putln(
"}")
code.putln(
"}")
code.exit_cfunc_scope()
def generate_guarded_basetype_call(
self, base_type, substructure, slot, functype, args, code):
if base_type:
base_tpname = code.typeptr_cname_in_module_state(base_type)
# Note that the limited API versions will only work for non-heaptypes on Python3.10+.
# I think that's unavoidable and the best we can do.
if substructure:
code.putln(
f"{functype} f = __Pyx_PyType_TryGetSubSlot({base_tpname}, {substructure}, {slot}, {functype});")
else:
code.putln(
f"{functype} f = __Pyx_PyType_TryGetSlot({base_tpname}, {slot}, {functype});")
code.putln("if (f)")
code.putln(f"return f({args});")
def generate_richcmp_function(self, scope, code):
if scope.lookup_here("__richcmp__"):
# user implemented, nothing to do
return
# otherwise, we have to generate it from the Python special methods
code.start_slotfunc(scope, PyrexTypes.py_objptr_type, "tp_richcompare", "PyObject *o1, PyObject *o2, int op")
code.putln("switch (op) {")
class_scopes = []
cls = scope.parent_type
while cls is not None and not cls.entry.visibility == 'extern':
class_scopes.append(cls.scope)
cls = cls.scope.parent_type.base_type
assert scope in class_scopes
extern_parent = None
if cls and cls.entry.visibility == 'extern':
# need to call up into base classes as we may not know all implemented comparison methods
extern_parent = cls if cls.typeptr_cname else scope.parent_type.base_type
total_ordering = 'total_ordering' in scope.directives
comp_entry = {}
for cmp_method in TypeSlots.richcmp_special_methods:
for class_scope in class_scopes:
entry = class_scope.lookup_here(cmp_method)
if entry is not None:
comp_entry[cmp_method] = entry
break
if total_ordering:
# Check this is valid - we must have at least 1 operation defined.
comp_names = [from_name for from_name, to_name in TOTAL_ORDERING if from_name in comp_entry]
if not comp_names:
if '__eq__' not in comp_entry and '__ne__' not in comp_entry:
warning(scope.parent_type.pos,
"total_ordering directive used, but no comparison and equality methods defined")
else:
warning(scope.parent_type.pos,
"total_ordering directive used, but no comparison methods defined")
total_ordering = False
else:
if '__eq__' not in comp_entry and '__ne__' not in comp_entry:
warning(scope.parent_type.pos, "total_ordering directive used, but no equality method defined")
total_ordering = False
# Same priority as functools, prefers
# __lt__ to __le__ to __gt__ to __ge__
ordering_source = max(comp_names)
for cmp_method in TypeSlots.richcmp_special_methods:
cmp_type = cmp_method.strip('_').upper() # e.g. "__eq__" -> EQ
entry = comp_entry.get(cmp_method)
if entry is None and (not total_ordering or cmp_type in ('NE', 'EQ')):
# No definition, fall back to superclasses.
# eq/ne methods shouldn't use the total_ordering code.
continue
code.putln("case Py_%s: {" % cmp_type)
if entry is None:
assert total_ordering
# We need to generate this from the other methods.
invert_comp, comp_op, invert_equals = TOTAL_ORDERING[ordering_source, cmp_method]
# First we always do the comparison.
code.putln("PyObject *ret;")
code.putln("ret = %s(o1, o2);" % comp_entry[ordering_source].func_cname)
code.putln("if (likely(ret && ret != Py_NotImplemented)) {")
code.putln("int order_res = __Pyx_PyObject_IsTrue(ret);")
code.putln("Py_DECREF(ret);")
code.putln("if (unlikely(order_res < 0)) return NULL;")
# We may need to check equality too. For some combos it's never required.
if invert_equals is not None:
# Implement the and/or check with an if.
if comp_op == '&&':
code.putln("if (%s order_res) {" % ('!!' if invert_comp else '!'))
code.putln("ret = __Pyx_NewRef(Py_False);")
code.putln("} else {")
elif comp_op == '||':
code.putln("if (%s order_res) {" % ('!' if invert_comp else ''))
code.putln("ret = __Pyx_NewRef(Py_True);")
code.putln("} else {")
else:
raise AssertionError('Unknown op %s' % (comp_op, ))
if '__eq__' in comp_entry:
eq_func = '__eq__'
else:
# Fall back to NE, which is defined here.
eq_func = '__ne__'
invert_equals = not invert_equals
code.putln("ret = %s(o1, o2);" % comp_entry[eq_func].func_cname)
code.putln("if (likely(ret && ret != Py_NotImplemented)) {")
code.putln("int eq_res = __Pyx_PyObject_IsTrue(ret);")
code.putln("Py_DECREF(ret);")
code.putln("if (unlikely(eq_res < 0)) return NULL;")
if invert_equals:
code.putln("ret = eq_res ? Py_False : Py_True;")
else:
code.putln("ret = eq_res ? Py_True : Py_False;")
code.putln("Py_INCREF(ret);")
code.putln("}") # equals success
code.putln("}") # Needs to try equals
else:
# Convert direct to a boolean.
if invert_comp:
code.putln("ret = order_res ? Py_False : Py_True;")
else:
code.putln("ret = order_res ? Py_True : Py_False;")
code.putln("Py_INCREF(ret);")
code.putln("}") # comp_op
code.putln("return ret;")
else:
code.putln("return %s(o1, o2);" % entry.func_cname)
code.putln("}") # Case
if '__eq__' in comp_entry and '__ne__' not in comp_entry and not extern_parent:
code.putln("case Py_NE: {")
code.putln("PyObject *ret;")
# Python itself does not do this optimisation, it seems...
#code.putln("if (o1 == o2) return __Pyx_NewRef(Py_False);")
code.putln("ret = %s(o1, o2);" % comp_entry['__eq__'].func_cname)
code.putln("if (likely(ret && ret != Py_NotImplemented)) {")
code.putln("int b = __Pyx_PyObject_IsTrue(ret);")
code.putln("Py_DECREF(ret);")
code.putln("if (unlikely(b < 0)) return NULL;")
code.putln("ret = (b) ? Py_False : Py_True;")
code.putln("Py_INCREF(ret);")
code.putln("}")
code.putln("return ret;")
code.putln("}")
code.putln("default: {")
if extern_parent and extern_parent.typeptr_cname:
code.putln("if (likely(%s->tp_richcompare)) return %s->tp_richcompare(o1, o2, op);" % (
extern_parent.typeptr_cname, extern_parent.typeptr_cname))
code.putln("return __Pyx_NewRef(Py_NotImplemented);")
code.putln("}")
code.putln("}") # switch
code.putln("}")
code.exit_cfunc_scope()
def generate_binop_function(self, scope, slot, code, pos):
func_name = scope.mangle_internal(slot.slot_name)
if scope.directives['c_api_binop_methods']:
code.putln('#define %s %s' % (func_name, slot.left_slot.slot_code(scope)))
return
if slot.left_slot.signature in (TypeSlots.binaryfunc, TypeSlots.ibinaryfunc):
slot_type = 'binaryfunc'
extra_arg = extra_arg_decl = ''
elif slot.left_slot.signature in (TypeSlots.powternaryfunc, TypeSlots.ipowternaryfunc):
slot_type = 'ternaryfunc'
extra_arg = ', extra_arg'
extra_arg_decl = ', PyObject* extra_arg'
else:
error(pos, "Unexpected type slot signature: %s" % slot)
return
def get_slot_method_cname(method_name):
entry = scope.lookup(method_name)
return entry.func_cname if entry and entry.is_special else None
def call_slot_method(method_name, reverse):
func_cname = get_slot_method_cname(method_name)
if func_cname:
return "%s(%s%s)" % (
func_cname,
"right, left" if reverse else "left, right",
extra_arg)
else:
return '%s_maybe_call_slot(__Pyx_PyType_GetSlot(%s, tp_base, PyTypeObject*), left, right %s)' % (
func_name,
code.name_in_module_state(scope.parent_type.typeptr_cname),
extra_arg)
if get_slot_method_cname(slot.left_slot.method_name) and not get_slot_method_cname(slot.right_slot.method_name):
warning(pos, "Extension type implements %s() but not %s(). "
"The behaviour has changed from previous Cython versions to match Python semantics. "
"You can implement both special methods in a backwards compatible way." % (
slot.left_slot.method_name,
slot.right_slot.method_name,
))
code.putln()
preprocessor_guard = slot.preprocessor_guard_code()
if preprocessor_guard:
code.putln(preprocessor_guard)
code.enter_cfunc_scope(scope) # C class scope, not function scope
overloads_left = int(bool(get_slot_method_cname(slot.left_slot.method_name)))
overloads_right = int(bool(get_slot_method_cname(slot.right_slot.method_name)))
parent_type_cname = scope.parent_type.typeptr_cname
if scope.parent_type.is_extension_type:
parent_type_cname = code.name_in_module_state(parent_type_cname)
code.putln(
TempitaUtilityCode.load_as_string(
"BinopSlot", "ExtensionTypes.c",
context={
"func_name": func_name,
"slot_name": slot.slot_name,
"overloads_left": overloads_left,
"overloads_right": overloads_right,
"call_left": call_slot_method(slot.left_slot.method_name, reverse=False),
"call_right": call_slot_method(slot.right_slot.method_name, reverse=True),
"type_cname": parent_type_cname,
"slot_type": slot_type,
"extra_arg": extra_arg,
"extra_arg_decl": extra_arg_decl,
})[1])
code.exit_cfunc_scope()
if preprocessor_guard:
code.putln("#endif")
def generate_getattro_function(self, scope, code):
# First try to get the attribute using __getattribute__, if defined, or
# PyObject_GenericGetAttr.
#
# If that raises an AttributeError, call the __getattr__ if defined.
#
# In both cases, defined can be in this class, or any base class.
def lookup_here_or_base(n, tp=None, extern_return=None):
# Recursive lookup
if tp is None:
tp = scope.parent_type
r = tp.scope.lookup_here(n)
if r is None:
if tp.is_external and extern_return is not None:
return extern_return
if tp.base_type is not None:
return lookup_here_or_base(n, tp.base_type)
return r
getattr_entry = lookup_here_or_base("__getattr__")
getattribute_entry = lookup_here_or_base("__getattribute__")
code.start_slotfunc(scope, PyrexTypes.py_objptr_type, "tp_getattro", "PyObject *o, PyObject *n", needs_funcstate=False)
if getattribute_entry is not None:
code.putln(
"PyObject *v = %s(o, n);" % (
getattribute_entry.func_cname))
else:
code.putln(
"PyObject *v = PyObject_GenericGetAttr(o, n);")
if getattr_entry is not None:
code.putln(
"if (!v && PyErr_ExceptionMatches(PyExc_AttributeError)) {")
code.putln(
"PyErr_Clear();")
code.putln(
"v = %s(o, n);" % (
getattr_entry.func_cname))
code.putln(
"}")
code.putln(
"return v;")
code.putln(
"}")
code.exit_cfunc_scope()
def generate_setattro_function(self, scope, code):
# Setting and deleting an attribute are both done through
# the setattro method, so we dispatch to user's __setattr__
# or __delattr__ or fall back on PyObject_GenericSetAttr.
base_type = scope.parent_type.base_type
set_entry = scope.lookup_here("__setattr__")
del_entry = scope.lookup_here("__delattr__")
code.start_slotfunc(scope, PyrexTypes.c_returncode_type, "tp_setattro", "PyObject *o, PyObject *n, PyObject *v")
code.putln(
"if (v) {")
if set_entry:
code.putln(
"return %s(o, n, v);" % (
set_entry.func_cname))
else:
self.generate_guarded_basetype_call(
base_type, None, "tp_setattro", "setattrofunc", "o, n, v", code)
code.putln(
"return PyObject_GenericSetAttr(o, n, v);")
code.putln(
"}")
code.putln(
"else {")
if del_entry:
code.putln(
"return %s(o, n);" % (
del_entry.func_cname))
else:
self.generate_guarded_basetype_call(
base_type, None, "tp_setattro", "setattrofunc", "o, n, v", code)
code.putln(
"return PyObject_GenericSetAttr(o, n, 0);")
code.putln(
"}")
code.putln(
"}")
code.exit_cfunc_scope()
def generate_descr_get_function(self, scope, code):
# The __get__ function of a descriptor object can be
# called with NULL for the second or third arguments
# under some circumstances, so we replace them with
# None in that case.
user_get_entry = scope.lookup_here("__get__")
code.start_slotfunc(scope, PyrexTypes.py_objptr_type, "tp_descr_get", "PyObject *o, PyObject *i, PyObject *c", needs_funcstate=False)
code.putln(
"PyObject *r = 0;")
code.putln(
"if (!i) i = Py_None;")
code.putln(
"if (!c) c = Py_None;")
#code.put_incref("i", py_object_type)
#code.put_incref("c", py_object_type)
code.putln(
"r = %s(o, i, c);" % (
user_get_entry.func_cname))
#code.put_decref("i", py_object_type)
#code.put_decref("c", py_object_type)
code.putln(
"return r;")
code.putln(
"}")
code.exit_cfunc_scope()
def generate_descr_set_function(self, scope, code):
# Setting and deleting are both done through the __set__
# method of a descriptor, so we dispatch to user's __set__
# or __delete__ or raise an exception.
base_type = scope.parent_type.base_type
user_set_entry = scope.lookup_here("__set__")
user_del_entry = scope.lookup_here("__delete__")
code.start_slotfunc(scope, PyrexTypes.c_returncode_type, "tp_descr_set", "PyObject *o, PyObject *i, PyObject *v")
code.putln(
"if (v) {")
if user_set_entry:
code.putln(
"return %s(o, i, v);" % (
user_set_entry.func_cname))
else:
self.generate_guarded_basetype_call(
base_type, None, "tp_descr_set", "descrsetfunc", "o, i, v", code)
code.putln(
'PyErr_SetString(PyExc_NotImplementedError, "__set__");')
code.putln(
"return -1;")
code.putln(
"}")
code.putln(
"else {")
if user_del_entry:
code.putln(
"return %s(o, i);" % (
user_del_entry.func_cname))
else:
self.generate_guarded_basetype_call(
base_type, None, "tp_descr_set", "descrsetfunc", "o, i, v", code)
code.putln(
'PyErr_SetString(PyExc_NotImplementedError, "__delete__");')
code.putln(
"return -1;")
code.putln(
"}")
code.putln(
"}")
code.exit_cfunc_scope()
def generate_property_accessors(self, cclass_scope, code):
for entry in cclass_scope.property_entries:
property_scope = entry.scope
if property_scope.defines_any(["__get__"]):
self.generate_property_get_function(entry, code)
if property_scope.defines_any(["__set__", "__del__"]):
self.generate_property_set_function(entry, code)
def generate_property_get_function(self, property_entry, code):
property_scope = property_entry.scope
property_entry.getter_cname = property_scope.parent_scope.mangle(
Naming.prop_get_prefix, property_entry.name)
get_entry = property_scope.lookup_here("__get__")
code.putln("")
code.putln(
"static PyObject *%s(PyObject *o, CYTHON_UNUSED void *x) {" % (
property_entry.getter_cname))
code.putln(
"return %s(o);" % (
get_entry.func_cname))
code.putln(
"}")
def generate_property_set_function(self, property_entry, code):
property_scope = property_entry.scope
property_entry.setter_cname = property_scope.parent_scope.mangle(
Naming.prop_set_prefix, property_entry.name)
set_entry = property_scope.lookup_here("__set__")
del_entry = property_scope.lookup_here("__del__")
code.putln("")
code.putln(
"static int %s(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {" % (
property_entry.setter_cname))
code.putln(
"if (v) {")
if set_entry:
code.putln(
"return %s(o, v);" % (
set_entry.func_cname))
else:
code.putln(
'PyErr_SetString(PyExc_NotImplementedError, "__set__");')
code.putln(
"return -1;")
code.putln(
"}")
code.putln(
"else {")
if del_entry:
code.putln(
"return %s(o);" % (
del_entry.func_cname))
else:
code.putln(
'PyErr_SetString(PyExc_NotImplementedError, "__del__");')
code.putln(
"return -1;")
code.putln(
"}")
code.putln(
"}")
def generate_typeobj_spec(self, entry, code):
ext_type = entry.type
scope = ext_type.scope
members_slot = TypeSlots.get_slot_by_name("tp_members", code.globalstate.directives)
members_slot.generate_substructure_spec(scope, code)
buffer_slot = TypeSlots.get_slot_by_name("tp_as_buffer", code.globalstate.directives)
if not buffer_slot.is_empty(scope):
code.putln("#if !CYTHON_COMPILING_IN_LIMITED_API")
buffer_slot.generate_substructure(scope, code)
code.putln("#endif")
if ext_type.typedef_flag:
objstruct = ext_type.objstruct_cname
else:
objstruct = "struct %s" % ext_type.objstruct_cname
weakref_entry = scope.lookup_here("__weakref__") if not scope.is_closure_class_scope else None
if weakref_entry and weakref_entry.is_inherited:
weakref_entry = None # only generate it for the defining class
generate_members = bool(weakref_entry)
if generate_members:
code.globalstate.use_utility_code(
UtilityCode.load_cached("IncludeStructmemberH", "ModuleSetupCode.c"))
code.putln("static PyMemberDef %s_members[] = {" % ext_type.typeobj_cname)
code.putln("#if !CYTHON_USE_TYPE_SLOTS")
if weakref_entry:
# Note that unlike the assignment of tp_weaklistoffset in the type-ready code
# used in the non-limited API case, this doesn't preserve the weaklistoffset
# from base classes.
# Practically that doesn't matter, but it isn't exactly the identical.
code.putln('{"__weaklistoffset__", T_PYSSIZET, offsetof(%s, %s), READONLY, 0},'
% (objstruct, weakref_entry.cname))
code.putln("#endif")
code.putln("{0, 0, 0, 0, 0}")
code.putln("};")
code.putln("static PyType_Slot %s_slots[] = {" % ext_type.typeobj_cname)
for slot in TypeSlots.get_slot_table(code.globalstate.directives):
slot.generate_spec(scope, code)
if generate_members:
code.putln("{Py_tp_members, (void*)%s_members}," % ext_type.typeobj_cname)
code.putln("{0, 0},")
code.putln("};")
classname = scope.class_name.as_c_string_literal()
code.putln("static PyType_Spec %s_spec = {" % ext_type.typeobj_cname)
code.putln('"%s.%s",' % (self.full_module_name, classname.replace('"', '')))
code.putln("sizeof(%s)," % objstruct)
code.putln("0,")
code.putln("%s," % TypeSlots.get_slot_by_name("tp_flags", scope.directives).slot_code(scope))
code.putln("%s_slots," % ext_type.typeobj_cname)
code.putln("};")
def generate_typeobj_definition(self, modname, entry, code):
type = entry.type
scope = type.scope
for suite in TypeSlots.get_slot_table(code.globalstate.directives).substructures:
suite.generate_substructure(scope, code)
code.putln("")
if entry.visibility == 'public':
header = "DL_EXPORT(PyTypeObject) %s = {"
else:
header = "static PyTypeObject %s = {"
#code.putln(header % scope.parent_type.typeobj_cname)
code.putln(header % type.typeobj_cname)
code.putln(
"PyVarObject_HEAD_INIT(0, 0)")
classname = scope.class_name.as_c_string_literal()
code.putln(
'"%s."%s, /*tp_name*/' % (
self.full_module_name,
classname))
if type.typedef_flag:
objstruct = type.objstruct_cname
else:
objstruct = "struct %s" % type.objstruct_cname
code.putln(
"sizeof(%s), /*tp_basicsize*/" % objstruct)
code.putln(
"0, /*tp_itemsize*/")
for slot in TypeSlots.get_slot_table(code.globalstate.directives):
slot.generate(scope, code)
code.putln(
"};")
def generate_method_table(self, env, code):
if env.is_c_class_scope and not env.pyfunc_entries:
return
binding = env.directives['binding']
code.putln("")
wrapper_code_writer = code.insertion_point()
code.putln(
"static PyMethodDef %s[] = {" % (
env.method_table_cname))
for entry in env.pyfunc_entries:
if not entry.fused_cfunction and not (binding and entry.is_overridable):
code.put_pymethoddef(entry, ",", wrapper_code_writer=wrapper_code_writer)
code.putln(
"{0, 0, 0, 0}")
code.putln(
"};")
if wrapper_code_writer.getvalue():
wrapper_code_writer.putln("")
def generate_dict_getter_function(self, scope, code):
dict_attr = scope.lookup_here("__dict__")
if not dict_attr or not dict_attr.is_variable:
return
func_name = scope.mangle_internal("__dict__getter")
dict_name = dict_attr.cname
code.putln("")
code.putln("#if CYTHON_COMPILING_IN_LIMITED_API && __PYX_LIMITED_VERSION_HEX < 0x030A0000")
code.putln("static PyObject *%s(PyObject *o, CYTHON_UNUSED void *x) {" % func_name)
self.generate_self_cast(scope, code)
code.putln("if (unlikely(!p->%s)){" % dict_name)
code.putln("p->%s = PyDict_New();" % dict_name)
code.putln("}")
code.putln("Py_XINCREF(p->%s);" % dict_name)
code.putln("return p->%s;" % dict_name)
code.putln("}")
code.putln("#else")
# PyObject_GenericGetDict has the advantage that it's freethreading thread-safe,
# handles both managed and unmanaged dicts (in case we switch to managed in future),
# and can potentially do optimizations with per-class shared keys.
code.putln(f"#define {func_name} PyObject_GenericGetDict")
code.putln("#endif")
def generate_getset_table(self, env, code):
if env.property_entries:
code.putln("")
code.putln(
"static struct PyGetSetDef %s[] = {" %
env.getset_table_cname)
for entry in env.property_entries:
doc = entry.doc
if doc:
if doc.is_unicode:
doc = doc.as_utf8_string()
doc_code = "PyDoc_STR(%s)" % doc.as_c_string_literal()
else:
doc_code = "0"
code.putln(
'{%s, %s, %s, %s, 0},' % (
entry.name.as_c_string_literal(),
entry.getter_cname or "0",
entry.setter_cname or "0",
doc_code))
code.putln(
"{0, 0, 0, 0, 0}")
code.putln(
"};")
def create_import_star_conversion_utility_code(self, env):
# Create all conversion helpers that are needed for "import *" assignments.
# Must be done before code generation to support CythonUtilityCode.
for name, entry in sorted(env.entries.items()):
if entry.is_cglobal and entry.used:
if not entry.type.is_pyobject:
entry.type.create_from_py_utility_code(env)
def generate_import_star(self, env, code):
env.use_utility_code(UtilityCode.load_cached("CStringEquals", "StringTools.c"))
code.start_initcfunc(
f"int {Naming.import_star_set}("
f"{Naming.modulestatetype_cname} *{Naming.modulestatevalue_cname},"
"PyObject *o, PyObject* py_name, const char *name)")
code.putln("static const char* internal_type_names[] = {")
for name, entry in sorted(env.entries.items()):
if entry.is_type:
code.putln('"%s",' % name)
code.putln("0")
code.putln("};")
code.putln("const char** type_name = internal_type_names;")
code.putln("while (*type_name) {")
code.putln("if (__Pyx_StrEq(name, *type_name)) {")
code.putln('PyErr_Format(PyExc_TypeError, "Cannot overwrite C type %s", name);')
code.putln('goto bad;')
code.putln("}")
code.putln("type_name++;")
code.putln("}")
old_error_label = code.new_error_label()
code.putln("if (0);") # so the first one can be "else if"
msvc_count = 0
for name, entry in sorted(env.entries.items()):
if entry.is_cglobal and entry.used and not entry.type.is_const:
msvc_count += 1
if msvc_count % 100 == 0:
code.putln("#ifdef _MSC_VER")
code.putln("if (0); /* Workaround for MSVC C1061. */")
code.putln("#endif")
code.putln('else if (__Pyx_StrEq(name, "%s")) {' % name)
if entry.type.is_pyobject:
if entry.type.is_extension_type or entry.type.is_builtin_type:
type_test = entry.type.type_test_code(
env, "o")
code.putln("if (!(%s)) %s;" % (
type_test,
code.error_goto(entry.pos)))
code.putln("Py_INCREF(o);")
code.put_decref(entry.cname, entry.type, nanny=False)
code.putln("%s = %s;" % (
entry.cname,
PyrexTypes.typecast(entry.type, py_object_type, "o")))
elif entry.type.create_from_py_utility_code(env):
# if available, utility code was already created in self.prepare_utility_code()
code.putln(entry.type.from_py_call_code(
'o', entry.cname, entry.pos, code))
else:
code.putln('PyErr_Format(PyExc_TypeError, "Cannot convert Python object %s to %s");' % (
name, entry.type))
code.putln(code.error_goto(entry.pos))
code.putln("}")
code.putln("else {")
code.putln("if (PyObject_SetAttr(%s, py_name, o) < 0) goto bad;" % Naming.module_cname)
code.putln("}")
code.putln("return 0;")
if code.label_used(code.error_label):
code.put_label(code.error_label)
# This helps locate the offending name.
code.put_add_traceback(EncodedString(self.full_module_name))
code.error_label = old_error_label
code.putln("bad:")
code.putln("return -1;")
code.putln("}")
code.putln("")
code.put_code_here(UtilityCode.load("ImportStar", "ImportExport.c"))
code.exit_cfunc_scope() # done with labels
def generate_module_state_start(self, env, code):
# TODO: Refactor to move module state struct decl closer to the static decl
code.putln("#ifdef __cplusplus")
code.putln("namespace {")
code.putln("#endif")
code.putln('typedef struct {')
code.putln('PyObject *%s;' % env.module_dict_cname)
code.putln('PyObject *%s;' % Naming.builtins_cname)
code.putln('PyObject *%s;' % Naming.cython_runtime_cname)
code.putln('PyObject *%s;' % Naming.empty_tuple)
code.putln('PyObject *%s;' % Naming.empty_bytes)
code.putln('PyObject *%s;' % Naming.empty_unicode)
if Options.pre_import is not None:
code.putln('PyObject *%s;' % Naming.preimport_cname)
def generate_module_state_end(self, env, modules, globalstate):
module_state = globalstate['module_state_end']
module_state_clear = globalstate['module_state_clear_end']
module_state_traverse = globalstate['module_state_traverse_end']
module_state.putln('} %s;' % Naming.modulestatetype_cname)
module_state.putln("#ifdef __cplusplus")
module_state.putln("} /* anonymous namespace */")
module_state.putln("#endif")
module_state.putln('')
globalstate.use_utility_code(
UtilityCode.load("MultiPhaseInitModuleState", "ModuleSetupCode.c")
)
module_state.putln("#if CYTHON_USE_MODULE_STATE")
module_state.putln('#ifdef __cplusplus')
module_state.putln('namespace {')
module_state.putln('extern struct PyModuleDef %s;' % Naming.pymoduledef_cname)
module_state.putln('} /* anonymous namespace */')
module_state.putln('#else')
module_state.putln('static struct PyModuleDef %s;' % Naming.pymoduledef_cname)
module_state.putln('#endif')
module_state.putln('')
module_state.putln('#define %s (__Pyx_PyModule_GetState(__Pyx_State_FindModule(&%s)))' % (
Naming.modulestateglobal_cname,
Naming.pymoduledef_cname))
module_state.putln('')
module_state.putln('#define %s (__Pyx_State_FindModule(&%s))' % (
env.module_cname,
Naming.pymoduledef_cname))
module_state.putln("#else")
module_state.putln('static %s %s_static =' % (
Naming.modulestatetype_cname,
Naming.modulestateglobal_cname
))
module_state.putln('#ifdef __cplusplus')
# C++ likes to be initialized with {} to avoid "missing initializer" warnings
# but it isn't valid C
module_state.putln(' {};')
module_state.putln('#else')
module_state.putln(' {0};')
module_state.putln('#endif')
module_state.putln('static %s * const %s = &%s_static;' % (
Naming.modulestatetype_cname,
Naming.modulestateglobal_cname,
Naming.modulestateglobal_cname
))
module_state.putln("#endif")
module_state_clear.putln("return 0;")
module_state_clear.putln("}")
module_state_clear.putln("#endif")
module_state_traverse.putln("return 0;")
module_state_traverse.putln("}")
module_state_traverse.putln("#endif")
def generate_module_state_clear(self, env, code):
code.putln("#if CYTHON_USE_MODULE_STATE")
code.putln("static CYTHON_SMALL_CODE int %s_clear(PyObject *m) {" % Naming.module_cname)
code.putln(f"{Naming.modulestatetype_cname} *clear_module_state = __Pyx_PyModule_GetState(m);")
code.putln("if (!clear_module_state) return 0;")
code.putln('Py_CLEAR(clear_module_state->%s);' %
env.module_dict_cname)
code.putln('Py_CLEAR(clear_module_state->%s);' %
Naming.builtins_cname)
code.putln('Py_CLEAR(clear_module_state->%s);' %
Naming.cython_runtime_cname)
code.putln('Py_CLEAR(clear_module_state->%s);' %
Naming.empty_tuple)
code.putln('Py_CLEAR(clear_module_state->%s);' %
Naming.empty_bytes)
code.putln('Py_CLEAR(clear_module_state->%s);' %
Naming.empty_unicode)
code.putln("#if CYTHON_PEP489_MULTI_PHASE_INIT")
# In this case we have to remove the module from our lookup table ourself
# because Python isn't going to do it.
code.putln("__Pyx_State_RemoveModule(NULL);")
code.putln("#endif")
def generate_module_state_traverse(self, env, code):
code.putln("#if CYTHON_USE_MODULE_STATE")
code.putln("static CYTHON_SMALL_CODE int %s_traverse(PyObject *m, visitproc visit, void *arg) {" % Naming.module_cname)
code.putln(f"{Naming.modulestatetype_cname} *traverse_module_state = __Pyx_PyModule_GetState(m);")
code.putln("if (!traverse_module_state) return 0;")
code.putln(f'Py_VISIT(traverse_module_state->{env.module_dict_cname});')
code.putln(f'Py_VISIT(traverse_module_state->{Naming.builtins_cname});')
code.putln(f'Py_VISIT(traverse_module_state->{Naming.cython_runtime_cname});')
code.putln(f'__Pyx_VISIT_CONST(traverse_module_state->{Naming.empty_tuple});')
code.putln(f'__Pyx_VISIT_CONST(traverse_module_state->{Naming.empty_bytes});')
code.putln(f'__Pyx_VISIT_CONST(traverse_module_state->{Naming.empty_unicode});')
def generate_module_init_func(self, imported_modules, shared_utility_exporter, env, code):
subfunction = self.mod_init_subfunction(self.pos, self.scope, code)
self.generate_pymoduledef_struct(env, code)
code.enter_cfunc_scope(self.scope)
code.putln("")
code.put_code_here(UtilityCode.load("PyModInitFuncType", "ModuleSetupCode.c"))
modinit_func_name = EncodedString(f"PyInit_{env.module_name}")
header3 = "__Pyx_PyMODINIT_FUNC %s(void)" % self.mod_init_func_cname('PyInit', env)
# Optimise for small code size as the module init function is only executed once.
code.putln("%s CYTHON_SMALL_CODE; /*proto*/" % header3)
if self.scope.is_package:
code.putln("#if !defined(CYTHON_NO_PYINIT_EXPORT) && (defined(_WIN32) || defined(WIN32) || defined(MS_WINDOWS))")
code.putln("__Pyx_PyMODINIT_FUNC PyInit___init__(void) { return %s(); }" % (
self.mod_init_func_cname('PyInit', env)))
code.putln("#endif")
# Hack for a distutils bug - https://bugs.python.org/issue39432
# distutils attempts to make visible a slightly wrong PyInitU module name. Just create a dummy
# function to keep it quiet
wrong_punycode_module_name = self.wrong_punycode_module_name(env.module_name)
if wrong_punycode_module_name:
code.putln("#if !defined(CYTHON_NO_PYINIT_EXPORT) && (defined(_WIN32) || defined(WIN32) || defined(MS_WINDOWS))")
code.putln("void %s(void) {} /* workaround for https://bugs.python.org/issue39432 */" % wrong_punycode_module_name)
code.putln("#endif")
code.putln(header3)
# CPython 3.5+ supports multi-phase module initialisation (gives access to __spec__, __file__, etc.)
code.putln("#if CYTHON_PEP489_MULTI_PHASE_INIT")
code.putln("{")
code.putln("return PyModuleDef_Init(&%s);" % Naming.pymoduledef_cname)
code.putln("}")
mod_create_func = UtilityCode.load("ModuleCreationPEP489", "ModuleSetupCode.c")
code.put_code_here(mod_create_func)
code.putln("")
# main module init code lives in Py_mod_exec function, not in PyInit function
code.putln("static CYTHON_SMALL_CODE int %s(PyObject *%s)" % (
self.module_init_func_cname(),
Naming.pymodinit_module_arg))
code.putln("#endif") # PEP489
# start of module init/exec function (pre/post PEP 489)
code.putln("{")
code.putln('int stringtab_initialized = 0;')
code.putln("#if CYTHON_USE_MODULE_STATE")
code.putln('int pystate_addmodule_run = 0;')
code.putln("#endif")
code.putln(f"{Naming.modulestatetype_cname} *{Naming.modulestatevalue_cname} = NULL;")
tempdecl_code = code.insertion_point()
profile = code.globalstate.directives['profile']
linetrace = code.globalstate.directives['linetrace']
if profile or linetrace:
if linetrace:
code.use_fast_gil_utility_code()
code.globalstate.use_utility_code(UtilityCode.load_cached("Profile", "Profile.c"))
code.put_declare_refcount_context()
code.putln("#if CYTHON_PEP489_MULTI_PHASE_INIT")
# Most extension modules simply can't deal with it, and Cython isn't ready either.
# See issues listed here: https://docs.python.org/3/c-api/init.html#sub-interpreter-support
code.putln("if (%s) {" % Naming.module_cname)
# Hack: enforce single initialisation.
code.putln("if (%s == %s) return 0;" % (
Naming.module_cname,
Naming.pymodinit_module_arg,
))
code.putln('PyErr_SetString(PyExc_RuntimeError,'
' "Module \'%s\' has already been imported. Re-initialisation is not supported.");' %
env.module_name.as_c_string_literal()[1:-1])
code.putln("return -1;")
code.putln("}")
code.putln("#else")
# Hack: enforce single initialisation also on reimports under different names (with PEP 3121/489).
code.putln("if (%s) return __Pyx_NewRef(%s);" % (
Naming.module_cname,
Naming.module_cname,
))
code.putln("#endif")
code.putln("/*--- Module creation code ---*/")
self.generate_module_creation_code(env, code)
if profile or linetrace:
tempdecl_code.put_trace_declarations()
code.put_trace_frame_init()
refnanny_import_code = UtilityCode.load("ImportRefnannyAPI", "ModuleSetupCode.c")
code.put_code_here(refnanny_import_code)
code.put_setup_refcount_context(modinit_func_name)
code.putln("__Pyx_init_runtime_version();")
env.use_utility_code(UtilityCode.load("CheckBinaryVersion", "ModuleSetupCode.c"))
code.put_error_if_neg(self.pos, "__Pyx_check_binary_version("
"__PYX_LIMITED_VERSION_HEX, "
"__Pyx_get_runtime_version(), "
"CYTHON_COMPILING_IN_LIMITED_API)"
)
empty_tuple = code.name_in_main_c_code_module_state(Naming.empty_tuple)
code.putln("%s = PyTuple_New(0); %s" % (
empty_tuple, code.error_goto_if_null(empty_tuple, self.pos)))
empty_bytes = code.name_in_main_c_code_module_state(Naming.empty_bytes)
code.putln("%s = PyBytes_FromStringAndSize(\"\", 0); %s" % (
empty_bytes, code.error_goto_if_null(empty_bytes, self.pos)))
empty_unicode = code.name_in_main_c_code_module_state(Naming.empty_unicode)
code.putln("%s = PyUnicode_FromStringAndSize(\"\", 0); %s" % (
empty_unicode, code.error_goto_if_null(empty_unicode, self.pos)))
code.putln("/*--- Library function declarations ---*/")
if env.directives['np_pythran']:
code.put_error_if_neg(self.pos, "_import_array()")
code.putln("/*--- Initialize various global constants etc. ---*/")
code.put_error_if_neg(self.pos, f"__Pyx_InitConstants({Naming.modulestatevalue_cname})")
code.putln("stringtab_initialized = 1;")
code.put_error_if_neg(self.pos, "__Pyx_InitGlobals()") # calls any utility code
code.putln("if (%s) {" % self.is_main_module_flag_cname())
code.put_error_if_neg(self.pos, 'PyObject_SetAttr(%s, %s, %s)' % (
env.module_cname,
code.intern_identifier(EncodedString("__name__")),
code.intern_identifier(EncodedString("__main__"))))
code.putln("}")
# set up __file__ and __path__, then add the module to sys.modules
self.generate_module_import_setup(env, code)
if Options.cache_builtins:
code.putln("/*--- Builtin init code ---*/")
code.put_error_if_neg(
self.pos,
f"__Pyx_InitCachedBuiltins({Naming.modulestatevalue_cname})")
code.putln("/*--- Constants init code ---*/")
code.put_error_if_neg(
self.pos,
f"__Pyx_InitCachedConstants({Naming.modulestatevalue_cname})")
# code objects come after the other globals (since they use strings and tuples)
code.put_error_if_neg(
self.pos,
f"__Pyx_CreateCodeObjects({Naming.modulestatevalue_cname})")
code.putln("/*--- Global type/function init code ---*/")
with subfunction("Global init code") as inner_code:
self.generate_global_init_code(env, inner_code)
with subfunction("Variable export code") as inner_code:
self.generate_c_variable_export_code(env, inner_code)
with subfunction("Function export code") as inner_code:
self.generate_c_function_export_code(env, inner_code)
shared_utility_exporter.call_export_code(code)
with subfunction("Type init code") as inner_code:
self.generate_type_init_code(env, inner_code)
with subfunction("Type import code") as inner_code:
for module in imported_modules:
self.generate_type_import_code_for_module(module, env, inner_code)
with subfunction("Variable import code") as inner_code:
for module in imported_modules:
self.generate_c_variable_import_code_for_module(module, env, inner_code)
with subfunction("Function import code") as inner_code:
for module in imported_modules:
self.specialize_fused_types(module)
self.generate_c_function_import_code_for_module(module, env, inner_code)
shared_utility_exporter.call_import_code(code)
code.putln("/*--- Execution code ---*/")
code.mark_pos(None)
if profile or linetrace:
assert code.funcstate.gil_owned
code.put_trace_start(modinit_func_name, self.pos)
code.funcstate.can_trace = True
code.mark_pos(None)
self.body.generate_execution_code(code)
code.mark_pos(None)
if profile or linetrace:
code.funcstate.can_trace = False
assert code.funcstate.gil_owned
code.put_trace_return("Py_None", pos=self.pos)
code.put_trace_exit()
code.putln()
code.putln("/*--- Wrapped vars code ---*/")
self.generate_wrapped_entries_code(env, code)
code.putln()
if Options.generate_cleanup_code:
code.globalstate.use_utility_code(
UtilityCode.load_cached("RegisterModuleCleanup", "ModuleSetupCode.c"))
code.putln("if (__Pyx_RegisterCleanup()) %s" % code.error_goto(self.pos))
code.put_goto(code.return_label)
code.put_label(code.error_label)
for cname, type in code.funcstate.all_managed_temps():
code.put_xdecref(cname, type)
if profile or linetrace:
code.put_trace_exception_propagating()
code.put_trace_unwind(self.pos)
code.putln('if (%s) {' % env.module_cname)
code.putln(
f'if ({code.name_in_main_c_code_module_state(env.module_dict_cname)} && stringtab_initialized) {{')
# We can run into errors before the module or stringtab are initialized.
# In this case it is not safe to add a traceback (because it uses the stringtab)
code.put_add_traceback(EncodedString("init %s" % env.qualified_name))
code.globalstate.use_utility_code(Nodes.traceback_utility_code)
# Module reference and module dict are in global variables which might still be needed
# for cleanup, atexit code, etc., so leaking is better than crashing.
# At least clearing the module dict here might be a good idea, but could still break
# user code in atexit or other global registries.
##code.put_decref_clear(env.module_dict_cname, py_object_type, nanny=False)
code.putln('}')
code.putln("#if !CYTHON_USE_MODULE_STATE")
code.put_decref_clear(env.module_cname, py_object_type, nanny=False, clear_before_decref=True)
code.putln("#else")
# This section is mainly for the limited API. env.module_cname still owns a reference so
# decrement that
code.put_decref(env.module_cname, py_object_type, nanny=False)
# Also remove the failed module from the module state lookup
# fetch/restore the error indicator because PyState_RemoveModule might fail itself
code.putln("if (pystate_addmodule_run) {")
code.putln("PyObject *tp, *value, *tb;")
code.putln("PyErr_Fetch(&tp, &value, &tb);")
code.putln("PyState_RemoveModule(&%s);" % Naming.pymoduledef_cname)
code.putln("PyErr_Restore(tp, value, tb);")
code.putln("}")
code.putln("#endif")
code.putln('} else if (!PyErr_Occurred()) {')
code.putln('PyErr_SetString(PyExc_ImportError, "init %s");' %
env.qualified_name.as_c_string_literal()[1:-1])
code.putln('}')
code.put_label(code.return_label)
code.put_finish_refcount_context()
code.putln("#if CYTHON_PEP489_MULTI_PHASE_INIT")
code.putln("return (%s != NULL) ? 0 : -1;" % env.module_cname)
code.putln("#else")
code.putln("return %s;" % env.module_cname)
code.putln("#endif")
code.putln('}')
tempdecl_code.put_temp_declarations(code.funcstate)
code.exit_cfunc_scope()
def mod_init_subfunction(self, pos, scope, orig_code):
"""
Return a context manager that allows deviating the module init code generation
into a separate function and instead inserts a call to it.
Can be reused sequentially to create multiple functions.
The functions get inserted at the point where the context manager was created.
The call gets inserted where the context manager is used (on entry).
"""
function_code = orig_code.insertion_point()
class ModInitSubfunction:
def __init__(self, code_type):
cname = '_'.join(code_type.lower().split())
assert re.match("^[a-z0-9_]+$", cname)
self.cfunc_name = "__Pyx_modinit_%s" % cname
self.description = code_type
self.tempdecl_code = None
self.call_code = None
def set_call_code(self, code):
self.call_code = code.insertion_point()
def __enter__(self):
if self.call_code is None:
self.call_code = orig_code.insertion_point()
code = function_code
code.start_initcfunc(
f"int {self.cfunc_name}({Naming.modulestatetype_cname} *{Naming.modulestatevalue_cname})",
scope, refnanny=True)
code.putln(f"CYTHON_UNUSED_VAR({Naming.modulestatevalue_cname});")
self.tempdecl_code = code.insertion_point()
code.put_setup_refcount_context(EncodedString(self.cfunc_name))
# Leave a grepable marker that makes it easy to find the generator source.
code.putln("/*--- %s ---*/" % self.description)
return code
def __exit__(self, exc_type, exc_value, exc_tb):
if exc_type is not None:
# Don't generate any code or do any validations on errors.
self.tempdecl_code = self.call_code = None
return
code = function_code
code.put_finish_refcount_context()
code.putln("return 0;")
self.tempdecl_code.put_temp_declarations(code.funcstate)
self.tempdecl_code = None
needs_error_handling = code.label_used(code.error_label)
if needs_error_handling:
code.put_label(code.error_label)
for cname, type in code.funcstate.all_managed_temps():
code.put_xdecref(cname, type)
code.put_finish_refcount_context()
code.putln("return -1;")
code.putln("}")
code.exit_cfunc_scope()
if needs_error_handling:
self.call_code.putln(
self.call_code.error_goto_if_neg("%s(%s)" % (
self.cfunc_name, Naming.modulestatevalue_cname), pos))
else:
self.call_code.putln(
f"(void){self.cfunc_name}({Naming.modulestatevalue_cname});")
self.call_code = None
return ModInitSubfunction
def generate_module_import_setup(self, env, code):
module_path = env.directives['set_initial_path']
if module_path == 'SOURCEFILE':
module_path = self.pos[0].filename
if module_path:
code.putln('if (!CYTHON_PEP489_MULTI_PHASE_INIT) {')
code.putln('if (PyObject_SetAttrString(%s, "__file__", %s) < 0) %s;' % (
env.module_cname,
code.get_py_string_const(
EncodedString(decode_filename(module_path))),
code.error_goto(self.pos)))
code.putln("}")
if env.is_package:
# set __path__ to mark the module as package
code.putln('if (!CYTHON_PEP489_MULTI_PHASE_INIT) {')
temp = code.funcstate.allocate_temp(py_object_type, True)
code.putln('%s = Py_BuildValue("[O]", %s); %s' % (
temp,
code.get_py_string_const(
EncodedString(decode_filename(
os.path.dirname(module_path)))),
code.error_goto_if_null(temp, self.pos)))
code.put_gotref(temp, py_object_type)
code.putln(
'if (PyObject_SetAttrString(%s, "__path__", %s) < 0) %s;' % (
env.module_cname, temp, code.error_goto(self.pos)))
code.put_decref_clear(temp, py_object_type)
code.funcstate.release_temp(temp)
code.putln("}")
elif env.is_package:
# packages require __path__, so all we can do is try to figure
# out the module path at runtime by rerunning the import lookup
code.putln("if (!CYTHON_PEP489_MULTI_PHASE_INIT) {")
code.globalstate.use_utility_code(UtilityCode.load(
"SetPackagePathFromImportLib", "ImportExport.c"))
code.putln(code.error_goto_if_neg(
'__Pyx_SetPackagePathFromImportLib(%s)' % (
code.get_py_string_const(
EncodedString(self.full_module_name))),
self.pos))
code.putln("}")
# CPython may not have put us into sys.modules yet, but relative imports and reimports require it
fq_module_name = self.full_module_name
if fq_module_name.endswith('.__init__'):
fq_module_name = EncodedString(fq_module_name[:-len('.__init__')])
fq_module_name_cstring = fq_module_name.as_c_string_literal()
code.putln("{")
code.putln("PyObject *modules = PyImport_GetModuleDict(); %s" %
code.error_goto_if_null("modules", self.pos))
code.putln('if (!PyDict_GetItemString(modules, %s)) {' % fq_module_name_cstring)
code.putln(code.error_goto_if_neg('PyDict_SetItemString(modules, %s, %s)' % (
fq_module_name_cstring, env.module_cname), self.pos))
code.putln("}")
code.putln("}")
def generate_module_cleanup_func(self, env, code):
if not Options.generate_cleanup_code:
return
code.putln('static void %s(CYTHON_UNUSED PyObject *self) {' %
Naming.cleanup_cname)
code.enter_cfunc_scope(env)
code.putln(f"{Naming.modulestatetype_cname} *{Naming.modulestatevalue_cname};")
# TODO - this should go away when module-state has been refactored more and
# we are able to access the module state through "self". Currently the
# `#define` for each entry forces us to access it through PyState_FindModule
# which is sometime unreliable during destruction
# (e.g. during interpreter shutdown).
# In that case the safest thing is to give up.
code.putln("#if CYTHON_USE_MODULE_STATE")
code.putln(f"if (!__Pyx_State_FindModule(&{Naming.pymoduledef_cname})) return;")
code.putln("#endif")
code.putln(f"{Naming.modulestatevalue_cname} = __Pyx_PyModule_GetState(self);")
if Options.generate_cleanup_code >= 2:
code.putln("/*--- Global cleanup code ---*/")
rev_entries = list(env.var_entries)
rev_entries.reverse()
for entry in rev_entries:
if entry.visibility != 'extern':
if entry.type.is_pyobject and entry.used:
if entry.is_cglobal:
# TODO - eventually these should probably be in the module state too
entry_cname = entry.cname
else:
entry_cname = code.name_in_module_state(entry.cname)
code.put_xdecref_clear(
entry_cname, entry.type,
clear_before_decref=True,
nanny=False)
if entry.type.needs_explicit_destruction(env):
entry.type.generate_explicit_destruction(code, entry)
code.putln(f"__Pyx_CleanupGlobals({Naming.modulestatevalue_cname});")
if Options.generate_cleanup_code >= 3:
code.putln("/*--- Type import cleanup code ---*/")
for ext_type in sorted(env.types_imported, key=operator.attrgetter('typeptr_cname')):
typeptr_cname = code.name_in_main_c_code_module_state(ext_type.typeptr_cname)
code.put_xdecref_clear(
typeptr_cname, ext_type,
clear_before_decref=True,
nanny=False)
if Options.cache_builtins:
code.putln("/*--- Builtin cleanup code ---*/")
for entry in env.cached_builtins:
code.put_xdecref_clear(
entry.cname, PyrexTypes.py_object_type,
clear_before_decref=True,
nanny=False)
code.putln("/*--- Intern cleanup code ---*/")
code.put_decref_clear(f"{code.name_in_main_c_code_module_state(Naming.empty_tuple)}",
PyrexTypes.py_object_type,
clear_before_decref=True,
nanny=False)
for entry in env.c_class_entries:
cclass_type = entry.type
if cclass_type.is_external or cclass_type.base_type:
continue
if cclass_type.scope.directives.get('freelist', 0):
scope = cclass_type.scope
freelist_name = code.name_in_main_c_code_module_state(
scope.mangle_internal(Naming.freelist_name))
freecount_name = code.name_in_main_c_code_module_state(
scope.mangle_internal(Naming.freecount_name))
code.putln('#if CYTHON_USE_FREELISTS')
code.putln("while (%s > 0) {" % freecount_name)
code.putln("PyObject* o = (PyObject*)%s[--%s];" % (
freelist_name, freecount_name))
code.putln("PyTypeObject *tp = Py_TYPE(o);")
code.putln("#if CYTHON_USE_TYPE_SLOTS")
code.putln("(*tp->tp_free)(o);")
code.putln("#else")
# Asking for PyType_GetSlot(..., Py_tp_free) seems to cause an error in pypy
code.putln("freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free);")
code.putln("if (tp_free) tp_free(o);")
code.putln("#endif")
code.putln("#if CYTHON_USE_TYPE_SPECS")
# Release the reference that "o" owned for its type.
code.putln("Py_DECREF(tp);")
code.putln("#endif")
code.putln("}")
code.putln('#endif') # CYTHON_USE_FREELISTS
# for entry in env.pynum_entries:
# code.put_decref_clear(entry.cname,
# PyrexTypes.py_object_type,
# nanny=False)
# for entry in env.all_pystring_entries:
# if entry.is_interned:
# code.put_decref_clear(entry.pystring_cname,
# PyrexTypes.py_object_type,
# nanny=False)
# for entry in env.default_entries:
# if entry.type.is_pyobject and entry.used:
# code.putln("Py_DECREF(%s); %s = 0;" % (
# code.entry_as_pyobject(entry), entry.cname))
if Options.pre_import is not None:
code.put_decref_clear(Naming.preimport_cname, py_object_type,
nanny=False, clear_before_decref=True)
for cname in [Naming.cython_runtime_cname, Naming.builtins_cname]:
cname = code.name_in_main_c_code_module_state(cname)
code.put_decref_clear(cname, py_object_type, nanny=False, clear_before_decref=True)
code.put_decref_clear(
code.name_in_main_c_code_module_state(env.module_dict_cname),
py_object_type, nanny=False, clear_before_decref=True)
def generate_main_method(self, env, code):
module_is_main = self.is_main_module_flag_cname()
if Options.embed == "main":
wmain = "wmain"
else:
wmain = Options.embed
main_method = TempitaUtilityCode.load_cached(
"MainFunction", "Embed.c",
context={
'module_name': env.module_name,
'module_is_main': module_is_main,
'main_method': Options.embed,
'wmain_method': wmain,
'embed_modules': tuple(Options.embed_modules)})
code.globalstate.use_utility_code(main_method)
def punycode_module_name(self, prefix, name):
# adapted from PEP483
if name.isascii():
name = '_' + name
else:
name = 'U_' + name.encode('punycode').replace(b'-', b'_').decode('ascii')
return "%s%s" % (prefix, name)
def wrong_punycode_module_name(self, name):
# to work around a distutils bug by also generating an incorrect symbol...
if name.isascii():
return None # workaround is not needed
return "PyInitU" + ("_"+name).encode('punycode').replace(b'-', b'_').decode('ascii')
def mod_init_func_cname(self, prefix, env):
# from PEP483
return self.punycode_module_name(prefix, env.module_name)
# Returns the name of the C-function that corresponds to the module initialisation.
# (module initialisation == the cython code outside of functions)
# Note that this should never be the name of a wrapper and always the name of the
# function containing the actual code. Otherwise, cygdb will experience problems.
def module_init_func_cname(self):
env = self.scope
return self.mod_init_func_cname(Naming.pymodule_exec_func_cname, env)
def generate_pymoduledef_struct(self, env, code):
if env.doc:
doc = "%s" % code.get_string_const(env.doc)
else:
doc = "0"
if Options.generate_cleanup_code:
cleanup_func = "(freefunc)%s" % Naming.cleanup_cname
else:
cleanup_func = 'NULL'
code.putln("")
code.putln("#if CYTHON_PEP489_MULTI_PHASE_INIT")
exec_func_cname = self.module_init_func_cname()
code.putln("static PyObject* %s(PyObject *spec, PyModuleDef *def); /*proto*/" %
Naming.pymodule_create_func_cname)
code.putln("static int %s(PyObject* module); /*proto*/" % exec_func_cname)
code.putln("static PyModuleDef_Slot %s[] = {" % Naming.pymoduledef_slots_cname)
code.putln("{Py_mod_create, (void*)%s}," % Naming.pymodule_create_func_cname)
code.putln("{Py_mod_exec, (void*)%s}," % exec_func_cname)
code.putln("#if CYTHON_COMPILING_IN_CPYTHON_FREETHREADING")
gil_option = ("Py_MOD_GIL_NOT_USED"
if env.directives["freethreading_compatible"]
else "Py_MOD_GIL_USED")
code.putln("{Py_mod_gil, %s}," % gil_option)
code.putln("#endif")
code.putln("#if PY_VERSION_HEX >= 0x030C0000 && CYTHON_USE_MODULE_STATE")
subinterp_option = {
'no': 'Py_MOD_MULTIPLE_INTERPRETERS_NOT_SUPPORTED',
'shared_gil': 'Py_MOD_MULTIPLE_INTERPRETERS_SUPPORTED',
'own_gil': 'Py_MOD_PER_INTERPRETER_GIL_SUPPORTED'
}.get(env.directives["subinterpreters_compatible"])
code.putln("{Py_mod_multiple_interpreters, %s}," % subinterp_option)
code.putln("#endif")
code.putln("{0, NULL}")
code.putln("};")
if not env.module_name.isascii():
code.putln("#else /* CYTHON_PEP489_MULTI_PHASE_INIT */")
code.putln('#error "Unicode module names are only supported with multi-phase init'
' as per PEP489"')
code.putln("#endif")
code.putln("")
code.putln('#ifdef __cplusplus')
code.putln('namespace {')
code.putln("struct PyModuleDef %s =" % Naming.pymoduledef_cname)
code.putln('#else')
code.putln("static struct PyModuleDef %s =" % Naming.pymoduledef_cname)
code.putln('#endif')
code.putln('{')
code.putln(" PyModuleDef_HEAD_INIT,")
code.putln(' %s,' % env.module_name.as_c_string_literal())
code.putln(" %s, /* m_doc */" % doc)
code.putln("#if CYTHON_USE_MODULE_STATE")
code.putln(f" sizeof({Naming.modulestatetype_cname}), /* m_size */")
code.putln("#else")
code.putln(" (CYTHON_PEP489_MULTI_PHASE_INIT) ? 0 : -1, /* m_size */")
code.putln("#endif")
code.putln(" %s /* m_methods */," % env.method_table_cname)
code.putln("#if CYTHON_PEP489_MULTI_PHASE_INIT")
code.putln(" %s, /* m_slots */" % Naming.pymoduledef_slots_cname)
code.putln("#else")
code.putln(" NULL, /* m_reload */")
code.putln("#endif")
code.putln("#if CYTHON_USE_MODULE_STATE")
code.putln(" %s_traverse, /* m_traverse */" % Naming.module_cname)
code.putln(" %s_clear, /* m_clear */" % Naming.module_cname)
code.putln(" %s /* m_free */" % cleanup_func)
code.putln("#else")
code.putln(" NULL, /* m_traverse */")
code.putln(" NULL, /* m_clear */")
code.putln(" %s /* m_free */" % cleanup_func)
code.putln("#endif")
code.putln("};")
code.putln('#ifdef __cplusplus')
code.putln('} /* anonymous namespace */')
code.putln('#endif')
def generate_module_creation_code(self, env, code):
# Generate code to create the module object and
# install the builtins.
if env.doc:
doc = "%s" % code.get_string_const(env.doc)
else:
doc = "0"
# manage_ref is False (and refnanny calls are omitted) because refnanny isn't yet initialized.
module_temp = code.funcstate.allocate_temp(py_object_type, manage_ref=False)
code.putln("#if CYTHON_PEP489_MULTI_PHASE_INIT")
code.putln("%s = %s;" % (
module_temp,
Naming.pymodinit_module_arg))
code.put_incref(module_temp, py_object_type, nanny=False)
code.putln("#else")
code.putln(
"%s = PyModule_Create(&%s); %s" % (
module_temp,
Naming.pymoduledef_cname,
code.error_goto_if_null(module_temp, self.pos)))
code.putln("#endif")
code.putln("#if CYTHON_USE_MODULE_STATE")
code.putln("{")
# So that PyState_FindModule works in the init function:
code.putln("int add_module_result = __Pyx_State_AddModule(%s, &%s);" % (
module_temp, Naming.pymoduledef_cname))
code.putln("%s = 0; /* transfer ownership from %s to %s pseudovariable */" % (
module_temp, module_temp, env.module_name.as_c_string_literal()
))
# At this stage the module likely has a refcount of 2 - one owned by the list
# inside PyState_AddModule and one owned by "__pyx_m" (and returned from this
# function as a new reference).
code.putln(code.error_goto_if_neg("add_module_result", self.pos))
code.putln("pystate_addmodule_run = 1;")
code.putln("}")
code.putln('#else') # !CYTHON_USE_MODULE_STATE
code.putln(f"{env.module_cname} = {module_temp};")
code.putln("#endif")
code.funcstate.release_temp(module_temp)
code.putln("#if CYTHON_COMPILING_IN_CPYTHON_FREETHREADING")
gil_option = ("Py_MOD_GIL_NOT_USED"
if env.directives["freethreading_compatible"]
else "Py_MOD_GIL_USED")
code.putln(f"PyUnstable_Module_SetGIL({env.module_cname}, {gil_option});")
code.putln("#endif")
code.putln(f"{Naming.modulestatevalue_cname} = {Naming.modulestateglobal_cname};")
code.putln("CYTHON_UNUSED_VAR(%s);" % module_temp) # only used in limited API
dict_cname = code.name_in_main_c_code_module_state(env.module_dict_cname)
code.putln(
"%s = PyModule_GetDict(%s); %s" % (
dict_cname, env.module_cname,
code.error_goto_if_null(dict_cname, self.pos)))
code.put_incref(dict_cname, py_object_type, nanny=False)
builtins_cname = code.name_in_main_c_code_module_state(Naming.builtins_cname)
code.putln(
'%s = __Pyx_PyImport_AddModuleRef(__Pyx_BUILTIN_MODULE_NAME); %s' % (
builtins_cname,
code.error_goto_if_null(builtins_cname, self.pos)))
runtime_cname = code.name_in_main_c_code_module_state(Naming.cython_runtime_cname)
code.putln(
'%s = __Pyx_PyImport_AddModuleRef("cython_runtime"); %s' % (
runtime_cname,
code.error_goto_if_null(runtime_cname, self.pos)))
code.putln(
'if (PyObject_SetAttrString(%s, "__builtins__", %s) < 0) %s' % (
env.module_cname,
builtins_cname,
code.error_goto(self.pos)))
if Options.pre_import is not None:
code.putln(
'%s = __Pyx_PyImport_AddModuleRef("%s"); %s' % (
Naming.preimport_cname,
Options.pre_import,
code.error_goto_if_null(Naming.preimport_cname, self.pos)))
def generate_global_init_code(self, env, code):
# Generate code to initialise global PyObject *
# variables to None.
for entry in env.var_entries:
if entry.visibility != 'extern':
if entry.used:
entry.type.global_init_code(entry, code)
if entry.type.needs_explicit_construction(env):
# TODO - this is slightly redundant with global_init_code
entry.type.generate_explicit_construction(code, entry)
def generate_wrapped_entries_code(self, env, code):
for name, entry in sorted(env.entries.items()):
if (entry.create_wrapper
and not entry.is_type
and entry.scope is env):
if not entry.type.create_to_py_utility_code(env):
error(entry.pos, "Cannot convert '%s' to Python object" % entry.type)
code.putln("{")
code.putln("PyObject* wrapped = %s(%s);" % (
entry.type.to_py_function,
entry.cname))
code.putln(code.error_goto_if_null("wrapped", entry.pos))
code.putln(
'if (PyObject_SetAttrString(%s, "%s", wrapped) < 0) %s;' % (
env.module_cname,
name,
code.error_goto(entry.pos)))
code.putln("}")
def _select_exported_entries(self, all_entries):
return [
entry for entry in all_entries
if entry.api or entry.defined_in_pxd or (Options.cimport_from_pyx and entry.visibility != 'extern')
]
def generate_c_variable_export_code(self, env, code):
"""Generate code to create PyCFunction wrappers for exported C functions.
"""
entries = self._select_exported_entries(env.var_entries)
if not entries:
return
exports = [
# (signature, name, cname)
(entry.type.empty_declaration_code(), entry.name, entry.cname)
for entry in entries
]
code.globalstate.use_utility_code(
UtilityCode.load_cached("VoidPtrExport", "ImportExport.c"))
_generate_export_code(code, self.pos, exports, "__Pyx_ExportVoidPtr", "void *{name}")
def generate_c_function_export_code(self, env, code):
"""Generate code to create PyCFunction wrappers for exported C functions.
"""
entries = self._select_exported_entries(env.cfunc_entries)
if not entries:
return
exports = [
# (signature, name, cname)
(entry.type.signature_string(), entry.name, entry.cname)
for entry in entries
]
code.globalstate.use_utility_code(
UtilityCode.load_cached("FunctionExport", "ImportExport.c"))
_generate_export_code(code, self.pos, exports, "__Pyx_ExportFunction", "void (*{name})(void)")
def generate_type_import_code_for_module(self, module, env, code):
# Generate type import code for all exported extension types in
# an imported module.
#if module.c_class_entries:
with ModuleImportGenerator(code) as import_generator:
for entry in module.c_class_entries:
if entry.defined_in_pxd:
self.generate_type_import_code(env, entry.type, entry.pos, code, import_generator)
def specialize_fused_types(self, pxd_env):
"""
If fused c(p)def functions are defined in an imported pxd, but not
used in this implementation file, we still have fused entries and
not specialized ones. This method replaces any fused entries with their
specialized ones.
"""
for entry in pxd_env.cfunc_entries[:]:
if entry.type.is_fused:
# This call modifies the cfunc_entries in-place
entry.type.get_all_specialized_function_types()
def _select_imported_entries(self, all_entries, used_only=False):
return [
entry for entry in all_entries
if entry.defined_in_pxd and (not used_only or entry.used)
]
def generate_c_variable_import_code_for_module(self, module, env, code):
"""Generate import code for all exported C functions in a cimported module.
"""
entries = self._select_imported_entries(module.var_entries, used_only=False)
if not entries:
return
is_module_scope = env is module
imports = [
# (signature, name, cname)
(entry.type.empty_declaration_code(),
entry.name,
entry.cname if is_module_scope else module.mangle(Naming.varptr_prefix, entry.name))
for entry in entries
]
code.globalstate.use_utility_code(
UtilityCode.load_cached("VoidPtrImport", "ImportExport.c"))
_generate_import_code(
code, self.pos, imports, module.qualified_name, f"__Pyx_ImportVoidPtr_{Naming.cyversion}", "void **{name}")
def generate_c_function_import_code_for_module(self, module, env, code):
"""Generate import code for all exported C functions in a cimported module.
"""
entries = self._select_imported_entries(module.cfunc_entries, used_only=True)
if not entries:
return
imports = [
# (signature, name, cname)
(entry.type.signature_string(), entry.name, entry.cname)
for entry in entries
]
code.globalstate.use_utility_code(
UtilityCode.load_cached("FunctionImport", "ImportExport.c"))
_generate_import_code(
code, self.pos, imports, module.qualified_name, f"__Pyx_ImportFunction_{Naming.cyversion}", "void (**{name})(void)")
def generate_type_init_code(self, env, code):
# Generate type import code for extern extension types
# and type ready code for non-extern ones.
with ModuleImportGenerator(code) as import_generator:
for entry in env.c_class_entries:
if entry.visibility == 'extern' and not entry.utility_code_definition:
self.generate_type_import_code(env, entry.type, entry.pos, code, import_generator)
else:
self.generate_base_type_import_code(env, entry, code, import_generator)
self.generate_exttype_vtable_init_code(entry, code)
if entry.type.early_init:
self.generate_type_ready_code(entry, code)
def generate_base_type_import_code(self, env, entry, code, import_generator):
base_type = entry.type.base_type
if (base_type and base_type.module_name != env.qualified_name and not
(base_type.is_builtin_type or base_type.is_cython_builtin_type)
and not entry.utility_code_definition):
self.generate_type_import_code(env, base_type, self.pos, code, import_generator)
def generate_type_import_code(self, env, type, pos, code, import_generator):
# If not already done, generate code to import the typeobject of an
# extension type defined in another module, and extract its C method
# table pointer if any.
if type in env.types_imported:
return
if type.name not in Code.ctypedef_builtins_map:
# see corresponding condition in generate_type_import_call() below!
code.globalstate.use_utility_code(
UtilityCode.load_cached("TypeImport", "ImportExport.c"))
self.generate_type_import_call(type, code, import_generator, error_pos=pos)
if type.vtabptr_cname:
code.globalstate.use_utility_code(
UtilityCode.load_cached('GetVTable', 'ImportExport.c'))
code.putln("%s = (struct %s*)__Pyx_GetVtable(%s); %s" % (
type.vtabptr_cname,
type.vtabstruct_cname,
code.name_in_main_c_code_module_state(type.typeptr_cname),
code.error_goto_if_null(type.vtabptr_cname, pos)))
env.types_imported.add(type)
def generate_type_import_call(self, type, code, import_generator, error_code=None, error_pos=None, is_api=False):
sizeof_objstruct = objstruct = type.objstruct_cname if type.typedef_flag else f"struct {type.objstruct_cname}"
module_name = type.module_name
type_name = type.name
is_builtin = module_name in ('__builtin__', 'builtins')
if not is_builtin:
module_name = f'"{module_name}"'
elif type_name in Code.ctypedef_builtins_map:
# Fast path for special builtins, don't actually import
code.putln(
f'{code.name_in_module_state(type.typeptr_cname)} = {Code.ctypedef_builtins_map[type_name]};')
return
else:
module_name = '__Pyx_BUILTIN_MODULE_NAME'
if type_name in Code.renamed_py2_builtins_map:
type_name = Code.renamed_py2_builtins_map[type_name]
if objstruct in Code.basicsize_builtins_map:
# Some builtin types have a tp_basicsize which differs from sizeof(...):
sizeof_objstruct = Code.basicsize_builtins_map[objstruct]
if not error_code:
assert error_pos is not None
error_code = code.error_goto(error_pos)
module = import_generator.imported_module(module_name, error_code)
typeptr_cname = type.typeptr_cname
if not is_api:
typeptr_cname = code.name_in_main_c_code_module_state(typeptr_cname)
code.putln(
f"{typeptr_cname} = __Pyx_ImportType_{Naming.cyversion}("
f"{module}, {module_name}, {type.name.as_c_string_literal()},"
)
alignment_func = f"__PYX_GET_STRUCT_ALIGNMENT_{Naming.cyversion}"
code.putln("#if defined(PYPY_VERSION_NUM) && PYPY_VERSION_NUM < 0x050B0000")
code.putln(f'sizeof({objstruct}), {alignment_func}({objstruct}),')
code.putln("#elif CYTHON_COMPILING_IN_LIMITED_API")
if is_builtin:
# Builtin types are opaque in when the limited API is enabled
# and subsequents attempt to access their fields will trigger
# compile errors. Skip the struct size check here so things keep
# working when a builtin type is imported but not actually used.
code.putln('0, 0,')
else:
code.putln(f'sizeof({objstruct}), {alignment_func}({objstruct}),')
code.putln('#else')
code.putln(f'sizeof({sizeof_objstruct}), {alignment_func}({sizeof_objstruct}),')
code.putln("#endif")
# check_size
if type.check_size and type.check_size in ('error', 'warn', 'ignore'):
check_size = type.check_size
elif not type.is_external or type.is_subclassed:
check_size = 'error'
else:
raise RuntimeError(
f"invalid value for check_size '{type.check_size}' when compiling {module_name}.{type.name}")
code.put(f'__Pyx_ImportType_CheckSize_{check_size.title()}_{Naming.cyversion});')
code.putln(f' if (!{typeptr_cname}) {error_code}')
def generate_type_ready_code(self, entry, code):
Nodes.CClassDefNode.generate_type_ready_code(entry, code)
def is_main_module_flag_cname(self):
full_module_name = self.full_module_name.replace('.', '__')
return self.punycode_module_name(Naming.module_is_main, full_module_name)
def generate_exttype_vtable_init_code(self, entry, code):
# Generate code to initialise the C method table of an
# extension type.
type = entry.type
if type.vtable_cname:
code.putln(
"%s = &%s;" % (
type.vtabptr_cname,
type.vtable_cname))
if type.base_type and type.base_type.vtabptr_cname:
code.putln(
"%s.%s = *%s;" % (
type.vtable_cname,
Naming.obj_base_cname,
type.base_type.vtabptr_cname))
c_method_entries = [
entry for entry in type.scope.cfunc_entries
if entry.func_cname]
if c_method_entries:
for meth_entry in c_method_entries:
vtable_type = meth_entry.vtable_type or meth_entry.type
cast = vtable_type.signature_cast_string()
code.putln(
"%s.%s = %s%s;" % (
type.vtable_cname,
meth_entry.cname,
cast,
meth_entry.func_cname))
# cimport/export code for functions and pointers.
def _deduplicate_inout_signatures(item_tuples):
# We can save runtime space for identical signatures by reusing the same C strings.
# To deduplicate the signatures, we sort by them and store duplicates as empty C strings.
signatures, names, items = zip(*sorted(item_tuples))
signatures = list(signatures) # tuple -> list, to allow reassignments again
last_sig = None
for i, signature in enumerate(signatures):
if signature == last_sig:
signatures[i] = ''
else:
last_sig = signature
return signatures, names, items
def _generate_import_export_code(code: Code.CCodeWriter, pos, inout_item_tuples, per_item_func, target, pointer_decl, use_pybytes, is_import):
signatures, names, inout_items = _deduplicate_inout_signatures(inout_item_tuples)
pyx = f"{Naming.pyrex_prefix}{'import' if is_import else 'export'}_"
pointer_cast = pointer_decl.format(name='')
sig_bytes = '\0'.join(signatures).encode('utf-8')
names_bytes = '\0'.join(names).encode('utf-8')
pointers = [f"({pointer_cast})&{item_cname}" for item_cname in inout_items]
sigs_and_names_bytes = bytes_literal(sig_bytes + b'\0' + names_bytes, 'utf-8')
if use_pybytes:
code.putln(f"const char * {pyx}signature = __Pyx_PyBytes_AsString({code.get_py_string_const(sigs_and_names_bytes)});")
code.putln("#if !CYTHON_ASSUME_SAFE_MACROS")
code.putln(code.error_goto_if_null(f'{pyx}signature', pos))
code.putln("#endif")
else:
code.putln(f"const char * {pyx}signature = {sigs_and_names_bytes.as_c_string_literal()};")
code.putln(f"const char * {pyx}name = {pyx}signature + {len(sig_bytes) + 1};")
code.putln(f"{pointer_decl.format(name=f'const {pyx}pointers[]')} = {{{', '.join(pointers)}, ({pointer_cast}) NULL}};")
code.globalstate.use_utility_code(
UtilityCode.load_cached("IncludeStringH", "StringTools.c"))
code.putln(f"{pointer_decl.format(name=f'const *{pyx}pointer')} = {pyx}pointers;")
code.putln(f"const char *{pyx}current_signature = {pyx}signature;")
code.putln(f"while (*{pyx}pointer) {{")
code.put_error_if_neg(
pos,
f"{per_item_func}({target}, {pyx}name, *{pyx}pointer, {pyx}current_signature)"
)
code.putln(f"++{pyx}pointer;")
code.putln(f"{pyx}name = strchr({pyx}name, '\\0') + 1;")
code.putln(f"{pyx}signature = strchr({pyx}signature, '\\0') + 1;")
# Keep reusing the current signature until we find a new non-empty one.
code.putln(f"if (*{pyx}signature != '\\0') {pyx}current_signature = {pyx}signature;")
code.putln("}") # while
def _generate_export_code(code: Code.CCodeWriter, pos, exports, export_func, pointer_decl):
"""Generate function/pointer export code.
'exports' is a list of (signature, name, exported_cname) tuples.
"""
code.putln("{")
api_dict = code.funcstate.allocate_temp(py_object_type, manage_ref=True)
code.globalstate.use_utility_code(
UtilityCode.load_cached("GetApiDict", "ImportExport.c"))
code.putln(
f"{api_dict} = __Pyx_ApiExport_GetApiDict(); "
f"{code.error_goto_if_null(api_dict, pos)}"
)
code.put_gotref(api_dict, py_object_type)
_generate_import_export_code(code, pos, exports, export_func, api_dict, pointer_decl, use_pybytes=True, is_import=False)
code.put_decref_clear(api_dict, py_object_type)
code.funcstate.release_temp(api_dict)
code.putln("}")
def _generate_import_code(code, pos, imports, qualified_module_name, import_func, pointer_decl):
"""Generate function/pointer import code.
'imports' is a list of (signature, name, imported_cname) tuples.
"""
code.putln("{")
module_ref = code.funcstate.allocate_temp(py_object_type, manage_ref=True)
code.putln(
f'{module_ref} = PyImport_ImportModule({qualified_module_name.as_c_string_literal()}); '
f'{code.error_goto_if_null(module_ref, pos)}'
)
code.put_gotref(module_ref, py_object_type)
_generate_import_export_code(code, pos, imports, import_func, module_ref, pointer_decl, use_pybytes=True, is_import=True)
code.put_decref_clear(module_ref, py_object_type)
code.funcstate.release_temp(module_ref)
code.putln("}")
# Module import helper
| ModuleNode |
python | PyCQA__pylint | tests/functional/i/invalid/invalid_getnewargs/invalid_getnewargs_returned.py | {
"start": 216,
"end": 351
} | class ____:
"""__getnewargs__ returns <type 'tuple'>"""
def __getnewargs__(self):
return (1, "2", 3)
| FirstGoodGetNewArgs |
python | facebook__pyre-check | tools/upgrade/commands/tests/fix_configuration_test.py | {
"start": 783,
"end": 6784
} | class ____(unittest.TestCase):
@patch("subprocess.check_output")
@patch.object(Configuration, "get_errors")
@patch.object(Repository, "commit_changes")
@patch.object(Repository, "remove_paths")
def test_run_fix_configuration(
self, remove_paths, commit_changes, get_errors, check_output
) -> None:
arguments = MagicMock()
arguments.lint = False
arguments.no_commit = False
with tempfile.TemporaryDirectory() as root:
arguments.path = Path(root)
configuration_path = os.path.join(root, ".pyre_configuration.local")
# Healthy configuration.
with open(configuration_path, "w+") as configuration_file:
json.dump({"targets": ["//my:target"]}, configuration_file)
configuration_file.seek(0)
FixConfiguration.from_arguments(arguments, repository).run()
get_errors.assert_called_once()
commit_changes.assert_called_once()
# Remove bad targets.
get_errors.reset_mock()
commit_changes.reset_mock()
check_output.side_effect = _raise_on_bad_target
with open(configuration_path, "w+") as configuration_file:
json.dump(
{
"targets": [
"//good:target",
"//timeout:target",
"//bad:target1",
"//bad:target2",
]
},
configuration_file,
)
configuration_file.seek(0)
FixConfiguration.from_arguments(arguments, repository).run()
configuration_file.seek(0)
self.assertEqual(
json.load(configuration_file),
{"targets": ["//good:target", "//timeout:target"]},
)
get_errors.assert_called_once()
commit_changes.assert_called_once()
# Remove configuration with only bad targets.
get_errors.reset_mock()
commit_changes.reset_mock()
with open(configuration_path, "w+") as configuration_file:
json.dump(
{"targets": ["//bad:target1", "//bad:target2"]}, configuration_file
)
configuration_file.seek(0)
FixConfiguration.from_arguments(arguments, repository).run()
remove_paths.assert_called_once_with([Path(configuration_path)])
get_errors.assert_not_called()
commit_changes.assert_called_once()
# Consolidate nested configurations.
remove_paths.reset_mock()
get_errors.reset_mock()
commit_changes.reset_mock()
with open(configuration_path, "w+") as configuration_file:
json.dump({"targets": ["//parent:target"]}, configuration_file)
configuration_file.seek(0)
subdirectory = os.path.join(root, "subdirectory")
os.mkdir(subdirectory)
nested_configuration_path = os.path.join(
subdirectory, ".pyre_configuration.local"
)
arguments.path = Path(subdirectory)
with open(nested_configuration_path, "w+") as nested_configuration_file:
json.dump(
{"targets": ["//nested:target"]}, nested_configuration_file
)
nested_configuration_file.seek(0)
FixConfiguration.from_arguments(arguments, repository).run()
configuration_file.seek(0)
parent_contents = json.load(configuration_file)
self.assertEqual(
parent_contents,
{"targets": ["//parent:target", "//nested:target"]},
)
remove_paths.assert_called_once_with(
[Path(nested_configuration_path)]
)
get_errors.assert_called_once()
commit_changes.assert_called_once()
# Skip consolidation on properly ignored nested configurations.
remove_paths.reset_mock()
get_errors.reset_mock()
commit_changes.reset_mock()
with open(configuration_path, "w+") as configuration_file:
json.dump(
{
"targets": ["//parent:target"],
"ignore_all_errors": ["subdirectory"],
},
configuration_file,
)
configuration_file.seek(0)
subdirectory = os.path.join(root, "subdirectory")
nested_configuration_path = os.path.join(
subdirectory, ".pyre_configuration.local"
)
arguments.path = Path(subdirectory)
with open(nested_configuration_path, "w+") as nested_configuration_file:
json.dump(
{"targets": ["//nested:target"]}, nested_configuration_file
)
nested_configuration_file.seek(0)
FixConfiguration.from_arguments(arguments, repository).run()
configuration_file.seek(0)
parent_contents = json.load(configuration_file)
self.assertEqual(
parent_contents,
{
"targets": ["//parent:target"],
"ignore_all_errors": ["subdirectory"],
},
)
remove_paths.assert_not_called()
get_errors.assert_called_once()
commit_changes.assert_called_once()
| FixmeConfigurationTest |
python | scipy__scipy | scipy/optimize/_trustregion_constr/report.py | {
"start": 1410,
"end": 1782
} | class ____(ReportBase):
COLUMN_NAMES = ["niter", "f evals", "CG iter", "obj func", "tr radius",
"opt", "c viol", "penalty", "barrier param", "CG stop"]
COLUMN_WIDTHS = [7, 7, 7, 13, 10, 10, 10, 10, 13, 7]
ITERATION_FORMATS = ["^7", "^7", "^7", "^+13.4e", "^10.2e", "^10.2e",
"^10.2e", "^10.2e", "^13.2e", "^7"]
| IPReport |
python | pypa__pip | src/pip/_vendor/platformdirs/android.py | {
"start": 190,
"end": 9013
} | class ____(PlatformDirsABC):
"""
Follows the guidance `from here <https://android.stackexchange.com/a/216132>`_.
Makes use of the `appname <platformdirs.api.PlatformDirsABC.appname>`, `version
<platformdirs.api.PlatformDirsABC.version>`, `ensure_exists <platformdirs.api.PlatformDirsABC.ensure_exists>`.
"""
@property
def user_data_dir(self) -> str:
""":return: data directory tied to the user, e.g. ``/data/user/<userid>/<packagename>/files/<AppName>``"""
return self._append_app_name_and_version(cast("str", _android_folder()), "files")
@property
def site_data_dir(self) -> str:
""":return: data directory shared by users, same as `user_data_dir`"""
return self.user_data_dir
@property
def user_config_dir(self) -> str:
"""
:return: config directory tied to the user, e.g. \
``/data/user/<userid>/<packagename>/shared_prefs/<AppName>``
"""
return self._append_app_name_and_version(cast("str", _android_folder()), "shared_prefs")
@property
def site_config_dir(self) -> str:
""":return: config directory shared by the users, same as `user_config_dir`"""
return self.user_config_dir
@property
def user_cache_dir(self) -> str:
""":return: cache directory tied to the user, e.g.,``/data/user/<userid>/<packagename>/cache/<AppName>``"""
return self._append_app_name_and_version(cast("str", _android_folder()), "cache")
@property
def site_cache_dir(self) -> str:
""":return: cache directory shared by users, same as `user_cache_dir`"""
return self.user_cache_dir
@property
def user_state_dir(self) -> str:
""":return: state directory tied to the user, same as `user_data_dir`"""
return self.user_data_dir
@property
def user_log_dir(self) -> str:
"""
:return: log directory tied to the user, same as `user_cache_dir` if not opinionated else ``log`` in it,
e.g. ``/data/user/<userid>/<packagename>/cache/<AppName>/log``
"""
path = self.user_cache_dir
if self.opinion:
path = os.path.join(path, "log") # noqa: PTH118
return path
@property
def user_documents_dir(self) -> str:
""":return: documents directory tied to the user e.g. ``/storage/emulated/0/Documents``"""
return _android_documents_folder()
@property
def user_downloads_dir(self) -> str:
""":return: downloads directory tied to the user e.g. ``/storage/emulated/0/Downloads``"""
return _android_downloads_folder()
@property
def user_pictures_dir(self) -> str:
""":return: pictures directory tied to the user e.g. ``/storage/emulated/0/Pictures``"""
return _android_pictures_folder()
@property
def user_videos_dir(self) -> str:
""":return: videos directory tied to the user e.g. ``/storage/emulated/0/DCIM/Camera``"""
return _android_videos_folder()
@property
def user_music_dir(self) -> str:
""":return: music directory tied to the user e.g. ``/storage/emulated/0/Music``"""
return _android_music_folder()
@property
def user_desktop_dir(self) -> str:
""":return: desktop directory tied to the user e.g. ``/storage/emulated/0/Desktop``"""
return "/storage/emulated/0/Desktop"
@property
def user_runtime_dir(self) -> str:
"""
:return: runtime directory tied to the user, same as `user_cache_dir` if not opinionated else ``tmp`` in it,
e.g. ``/data/user/<userid>/<packagename>/cache/<AppName>/tmp``
"""
path = self.user_cache_dir
if self.opinion:
path = os.path.join(path, "tmp") # noqa: PTH118
return path
@property
def site_runtime_dir(self) -> str:
""":return: runtime directory shared by users, same as `user_runtime_dir`"""
return self.user_runtime_dir
@lru_cache(maxsize=1)
def _android_folder() -> str | None: # noqa: C901
""":return: base folder for the Android OS or None if it cannot be found"""
result: str | None = None
# type checker isn't happy with our "import android", just don't do this when type checking see
# https://stackoverflow.com/a/61394121
if not TYPE_CHECKING:
try:
# First try to get a path to android app using python4android (if available)...
from android import mActivity # noqa: PLC0415
context = cast("android.content.Context", mActivity.getApplicationContext()) # noqa: F821
result = context.getFilesDir().getParentFile().getAbsolutePath()
except Exception: # noqa: BLE001
result = None
if result is None:
try:
# ...and fall back to using plain pyjnius, if python4android isn't available or doesn't deliver any useful
# result...
from jnius import autoclass # noqa: PLC0415
context = autoclass("android.content.Context")
result = context.getFilesDir().getParentFile().getAbsolutePath()
except Exception: # noqa: BLE001
result = None
if result is None:
# and if that fails, too, find an android folder looking at path on the sys.path
# warning: only works for apps installed under /data, not adopted storage etc.
pattern = re.compile(r"/data/(data|user/\d+)/(.+)/files")
for path in sys.path:
if pattern.match(path):
result = path.split("/files")[0]
break
else:
result = None
if result is None:
# one last try: find an android folder looking at path on the sys.path taking adopted storage paths into
# account
pattern = re.compile(r"/mnt/expand/[a-fA-F0-9-]{36}/(data|user/\d+)/(.+)/files")
for path in sys.path:
if pattern.match(path):
result = path.split("/files")[0]
break
else:
result = None
return result
@lru_cache(maxsize=1)
def _android_documents_folder() -> str:
""":return: documents folder for the Android OS"""
# Get directories with pyjnius
try:
from jnius import autoclass # noqa: PLC0415
context = autoclass("android.content.Context")
environment = autoclass("android.os.Environment")
documents_dir: str = context.getExternalFilesDir(environment.DIRECTORY_DOCUMENTS).getAbsolutePath()
except Exception: # noqa: BLE001
documents_dir = "/storage/emulated/0/Documents"
return documents_dir
@lru_cache(maxsize=1)
def _android_downloads_folder() -> str:
""":return: downloads folder for the Android OS"""
# Get directories with pyjnius
try:
from jnius import autoclass # noqa: PLC0415
context = autoclass("android.content.Context")
environment = autoclass("android.os.Environment")
downloads_dir: str = context.getExternalFilesDir(environment.DIRECTORY_DOWNLOADS).getAbsolutePath()
except Exception: # noqa: BLE001
downloads_dir = "/storage/emulated/0/Downloads"
return downloads_dir
@lru_cache(maxsize=1)
def _android_pictures_folder() -> str:
""":return: pictures folder for the Android OS"""
# Get directories with pyjnius
try:
from jnius import autoclass # noqa: PLC0415
context = autoclass("android.content.Context")
environment = autoclass("android.os.Environment")
pictures_dir: str = context.getExternalFilesDir(environment.DIRECTORY_PICTURES).getAbsolutePath()
except Exception: # noqa: BLE001
pictures_dir = "/storage/emulated/0/Pictures"
return pictures_dir
@lru_cache(maxsize=1)
def _android_videos_folder() -> str:
""":return: videos folder for the Android OS"""
# Get directories with pyjnius
try:
from jnius import autoclass # noqa: PLC0415
context = autoclass("android.content.Context")
environment = autoclass("android.os.Environment")
videos_dir: str = context.getExternalFilesDir(environment.DIRECTORY_DCIM).getAbsolutePath()
except Exception: # noqa: BLE001
videos_dir = "/storage/emulated/0/DCIM/Camera"
return videos_dir
@lru_cache(maxsize=1)
def _android_music_folder() -> str:
""":return: music folder for the Android OS"""
# Get directories with pyjnius
try:
from jnius import autoclass # noqa: PLC0415
context = autoclass("android.content.Context")
environment = autoclass("android.os.Environment")
music_dir: str = context.getExternalFilesDir(environment.DIRECTORY_MUSIC).getAbsolutePath()
except Exception: # noqa: BLE001
music_dir = "/storage/emulated/0/Music"
return music_dir
__all__ = [
"Android",
]
| Android |
python | joke2k__faker | tests/providers/test_currency.py | {
"start": 5329,
"end": 6528
} | class ____:
"""Test de_AT currency provider"""
num_samples = 100
@classmethod
def setup_class(cls):
from faker.providers.currency.de_AT import Provider as DeAtCurrencyProvider
cls.provider = DeAtCurrencyProvider
cls.currencies = cls.provider.currencies
cls.currency_names = [currency_name for currency_code, currency_name in cls.currencies]
cls.currency_codes = [currency_code for currency_code, currency_name in cls.currencies]
def test_pricetag(self, faker, num_samples):
for _ in range(num_samples):
pricetag = faker.pricetag()
assert isinstance(pricetag, str)
def test_currency(self, faker, num_samples):
for _ in range(num_samples):
cur = faker.currency()
assert cur in self.provider.currencies
def test_currency_name(self, faker, num_samples):
for _ in range(num_samples):
name = faker.currency_name()
assert name in self.currency_names
def test_currency_code(self, faker, num_samples):
for _ in range(num_samples):
code = faker.currency_code()
assert code in self.currency_codes
| TestDeAt |
python | keras-team__keras | keras/src/legacy/saving/legacy_h5_format_test.py | {
"start": 11340,
"end": 20239
} | class ____(testing.TestCase):
def _check_reloading_model(self, ref_input, model, tf_keras_model):
# Whole model file
ref_output = tf_keras_model(ref_input)
temp_filepath = os.path.join(self.get_temp_dir(), "model.h5")
tf_keras_model.save(temp_filepath)
loaded = legacy_h5_format.load_model_from_hdf5(temp_filepath)
output = loaded(ref_input)
self.assertAllClose(ref_output, output, atol=1e-5)
def test_sequential_model(self):
model = get_sequential_model(keras)
tf_keras_model = get_sequential_model(tf_keras)
ref_input = np.random.random((2, 3))
self._check_reloading_model(ref_input, model, tf_keras_model)
def test_functional_model(self):
tf_keras_model = get_functional_model(tf_keras)
model = get_functional_model(keras)
ref_input = np.random.random((2, 3))
self._check_reloading_model(ref_input, model, tf_keras_model)
def test_compiled_model_with_various_layers(self):
model = models.Sequential()
model.add(layers.Dense(2, input_shape=(3,)))
model.add(layers.RepeatVector(3))
model.add(layers.TimeDistributed(layers.Dense(3)))
model.compile(optimizer="rmsprop", loss="mse")
tf_keras_model = tf_keras.Sequential()
tf_keras_model.add(tf_keras.layers.Dense(2, input_shape=(3,)))
tf_keras_model.add(tf_keras.layers.RepeatVector(3))
tf_keras_model.add(
tf_keras.layers.TimeDistributed(tf_keras.layers.Dense(3))
)
tf_keras_model.compile(optimizer="rmsprop", loss="mean_squared_error")
ref_input = np.random.random((1, 3))
self._check_reloading_model(ref_input, model, tf_keras_model)
def test_saving_lambda(self):
mean = np.random.random((4, 2, 3))
std = np.abs(np.random.random((4, 2, 3))) + 1e-5
inputs = tf_keras.layers.Input(shape=(4, 2, 3))
output = tf_keras.layers.Lambda(
lambda image, mu, std: (image - mu) / std,
arguments={"mu": mean, "std": std},
output_shape=inputs.shape,
)(inputs)
tf_keras_model = tf_keras.Model(inputs, output)
tf_keras_model.compile(
loss="mean_squared_error", optimizer="sgd", metrics=["acc"]
)
temp_filepath = os.path.join(self.get_temp_dir(), "lambda_model.h5")
tf_keras_model.save(temp_filepath)
with self.assertRaisesRegex(ValueError, "arbitrary code execution"):
legacy_h5_format.load_model_from_hdf5(temp_filepath)
loaded = legacy_h5_format.load_model_from_hdf5(
temp_filepath, safe_mode=False
)
self.assertAllClose(mean, loaded.layers[1].arguments["mu"])
self.assertAllClose(std, loaded.layers[1].arguments["std"])
def test_saving_include_optimizer_false(self):
tf_keras_model = tf_keras.Sequential()
tf_keras_model.add(tf_keras.layers.Dense(1))
tf_keras_model.compile("adam", loss="mse")
x, y = np.ones((10, 10)), np.ones((10, 1))
tf_keras_model.fit(x, y)
ref_output = tf_keras_model(x)
temp_filepath = os.path.join(self.get_temp_dir(), "model.h5")
tf_keras_model.save(temp_filepath, include_optimizer=False)
loaded = legacy_h5_format.load_model_from_hdf5(temp_filepath)
output = loaded(x)
# Assert that optimizer does not exist in loaded model
with self.assertRaises(AttributeError):
_ = loaded.optimizer
# Compare output
self.assertAllClose(ref_output, output, atol=1e-5)
def test_custom_sequential_registered_no_scope(self):
@tf_keras.saving.register_keras_serializable(package="my_package")
class MyDense(tf_keras.layers.Dense):
def __init__(self, units, **kwargs):
super().__init__(units, **kwargs)
inputs = tf_keras.layers.Input(shape=[1])
custom_layer = MyDense(1)
tf_keras_model = tf_keras.Sequential(layers=[inputs, custom_layer])
# Re-implement and re-register in Keras 3
@object_registration.register_keras_serializable(package="my_package")
class MyDense(layers.Dense):
def __init__(self, units, **kwargs):
super().__init__(units, **kwargs)
inputs = layers.Input(shape=[1])
custom_layer = MyDense(1)
model = models.Sequential(layers=[inputs, custom_layer])
ref_input = np.array([5])
self._check_reloading_model(ref_input, model, tf_keras_model)
def test_custom_functional_registered_no_scope(self):
@tf_keras.saving.register_keras_serializable(package="my_package")
class MyDense(tf_keras.layers.Dense):
def __init__(self, units, **kwargs):
super().__init__(units, **kwargs)
inputs = tf_keras.layers.Input(shape=[1])
outputs = MyDense(1)(inputs)
tf_keras_model = tf_keras.Model(inputs, outputs)
# Re-implement and re-register in Keras 3
@object_registration.register_keras_serializable(package="my_package")
class MyDense(layers.Dense):
def __init__(self, units, **kwargs):
super().__init__(units, **kwargs)
inputs = layers.Input(shape=[1])
outputs = MyDense(1)(inputs)
model = models.Model(inputs, outputs)
ref_input = np.array([5])
self._check_reloading_model(ref_input, model, tf_keras_model)
def test_nested_layers(self):
class MyLayer(tf_keras.layers.Layer):
def __init__(self, sublayers, **kwargs):
super().__init__(**kwargs)
self.sublayers = sublayers
def call(self, x):
prev_input = x
for layer in self.sublayers:
prev_input = layer(prev_input)
return prev_input
def get_config(self):
config = super().get_config()
config["sublayers"] = tf_keras.saving.serialize_keras_object(
self.sublayers
)
return config
@classmethod
def from_config(cls, config):
config["sublayers"] = tf_keras.saving.deserialize_keras_object(
config["sublayers"]
)
return cls(**config)
@tf_keras.saving.register_keras_serializable(package="Foo")
class RegisteredSubLayer(layers.Layer):
def call(self, x):
return x
layer = MyLayer(
[
tf_keras.layers.Dense(2, name="MyDense"),
RegisteredSubLayer(name="MySubLayer"),
]
)
tf_keras_model = tf_keras.Sequential([layer])
x = np.random.random((4, 2))
ref_output = tf_keras_model(x)
# Save TF Keras model to H5 file
temp_filepath = os.path.join(self.get_temp_dir(), "model.h5")
tf_keras_model.save(temp_filepath)
# Re-implement in Keras 3
class MyLayer(layers.Layer):
def __init__(self, sublayers, **kwargs):
super().__init__(**kwargs)
self.sublayers = sublayers
def call(self, x):
prev_input = x
for layer in self.sublayers:
prev_input = layer(prev_input)
return prev_input
def get_config(self):
config = super().get_config()
config["sublayers"] = serialization_lib.serialize_keras_object(
self.sublayers
)
return config
@classmethod
def from_config(cls, config):
config["sublayers"] = (
serialization_lib.deserialize_keras_object(
config["sublayers"]
)
)
return cls(**config)
# Re-implement and re-register in Keras 3
@object_registration.register_keras_serializable(package="Foo")
class RegisteredSubLayer(layers.Layer):
def call(self, x):
return x
# Load in Keras 3
loaded_model = legacy_h5_format.load_model_from_hdf5(
temp_filepath, custom_objects={"MyLayer": MyLayer}
)
loaded_layer = loaded_model.layers[0]
output = loaded_model(x)
# Ensure nested layer structure
self.assertIsInstance(loaded_layer.sublayers[0], layers.Dense)
self.assertEqual(loaded_layer.sublayers[0].name, "MyDense")
self.assertIsInstance(loaded_layer.sublayers[1], RegisteredSubLayer)
self.assertEqual(loaded_layer.sublayers[1].name, "MySubLayer")
# Compare output
self.assertAllClose(ref_output, output, atol=1e-5)
@pytest.mark.requires_trainable_backend
| LegacyH5BackwardsCompatTest |
python | huggingface__transformers | tests/models/idefics2/test_image_processing_idefics2.py | {
"start": 1117,
"end": 6478
} | class ____:
def __init__(
self,
parent,
batch_size=7,
num_channels=3,
num_images=1,
image_size=18,
min_resolution=30,
max_resolution=400,
do_resize=True,
size=None,
do_rescale=True,
rescale_factor=1 / 255,
do_normalize=True,
image_mean=[0.5, 0.5, 0.5],
image_std=[0.5, 0.5, 0.5],
do_convert_rgb=True,
do_pad=True,
do_image_splitting=True,
):
size = size if size is not None else {"shortest_edge": 378, "longest_edge": 980}
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.num_images = num_images
self.image_size = image_size
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.do_resize = do_resize
self.size = size
self.do_normalize = do_normalize
self.image_mean = image_mean
self.image_std = image_std
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_convert_rgb = do_convert_rgb
self.do_pad = do_pad
self.do_image_splitting = do_image_splitting
def prepare_image_processor_dict(self):
return {
"do_convert_rgb": self.do_convert_rgb,
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
"do_image_splitting": self.do_image_splitting,
}
def get_expected_values(self, image_inputs, batched=False):
if not batched:
shortest_edge = self.size["shortest_edge"]
longest_edge = self.size["longest_edge"]
image = image_inputs[0]
if isinstance(image, Image.Image):
w, h = image.size
elif isinstance(image, np.ndarray):
h, w = image.shape[0], image.shape[1]
else:
h, w = image.shape[1], image.shape[2]
aspect_ratio = w / h
if w > h and w >= longest_edge:
w = longest_edge
h = int(w / aspect_ratio)
elif h > w and h >= longest_edge:
h = longest_edge
w = int(h * aspect_ratio)
w = max(w, shortest_edge)
h = max(h, shortest_edge)
expected_height = h
expected_width = w
else:
expected_values = []
for images in image_inputs:
for image in images:
expected_height, expected_width = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
expected_height = max(expected_values, key=lambda item: item[0])[0]
expected_width = max(expected_values, key=lambda item: item[1])[1]
return expected_height, expected_width
def expected_output_image_shape(self, images):
height, width = self.get_expected_values(images, batched=True)
effective_nb_images = self.num_images * 5 if self.do_image_splitting else 1
return effective_nb_images, self.num_channels, height, width
def prepare_image_inputs(
self,
batch_size=None,
min_resolution=None,
max_resolution=None,
num_channels=None,
num_images=None,
size_divisor=None,
equal_resolution=False,
numpify=False,
torchify=False,
):
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
batch_size = batch_size if batch_size is not None else self.batch_size
min_resolution = min_resolution if min_resolution is not None else self.min_resolution
max_resolution = max_resolution if max_resolution is not None else self.max_resolution
num_channels = num_channels if num_channels is not None else self.num_channels
num_images = num_images if num_images is not None else self.num_images
images_list = []
for i in range(batch_size):
images = []
for j in range(num_images):
if equal_resolution:
width = height = max_resolution
else:
if size_divisor is not None:
min_resolution = max(size_divisor, min_resolution)
width, height = np.random.choice(np.arange(min_resolution, max_resolution), 2)
images.append(np.random.randint(255, size=(num_channels, width, height), dtype=np.uint8))
images_list.append(images)
if not numpify and not torchify:
images_list = [[Image.fromarray(np.moveaxis(image, 0, -1)) for image in images] for images in images_list]
if torchify:
images_list = [[torch.from_numpy(image) for image in images] for images in images_list]
if numpify:
images_list = [[image.transpose(1, 2, 0) for image in images] for images in images_list]
return images_list
@require_torch
@require_vision
| Idefics2ImageProcessingTester |
python | spyder-ide__spyder | spyder/plugins/preferences/api.py | {
"start": 188,
"end": 266
} | class ____:
Show = 'show_action'
Reset = 'reset_action'
| PreferencesActions |
python | kamyu104__LeetCode-Solutions | Python/fruits-into-baskets-ii.py | {
"start": 63,
"end": 1910
} | class ____(object):
def numOfUnplacedFruits(self, fruits, baskets):
"""
:type fruits: List[int]
:type baskets: List[int]
:rtype: int
"""
class SegmentTree(object):
def __init__(self, N,
build_fn=lambda _: 0,
query_fn=lambda x, y: y if x is None else x if y is None else max(x, y),
update_fn=lambda x: x):
self.tree = [None]*(2*2**((N-1).bit_length()))
self.base = len(self.tree)//2
self.query_fn = query_fn
self.update_fn = update_fn
for i in xrange(self.base, self.base+N):
self.tree[i] = build_fn(i-self.base)
for i in reversed(xrange(1, self.base)):
self.tree[i] = query_fn(self.tree[2*i], self.tree[2*i+1])
def update(self, i, h):
x = self.base+i
self.tree[x] = self.update_fn(h)
while x > 1:
x //= 2
self.tree[x] = self.query_fn(self.tree[x*2], self.tree[x*2+1])
def binary_search(self, x):
if self.tree[1] < x:
return -1
i = 1
while not i >= self.base:
if self.tree[2*i] >= x:
i = 2*i
else:
i = 2*i+1
return i-self.base
def build(i):
return baskets[i]
st = SegmentTree(len(baskets), build_fn=build)
result = 0
for x in fruits:
i = st.binary_search(x)
if i == -1:
result += 1
else:
st.update(i, 0)
return result
# Time: O(n^2)
# Space: O(1)
# brute force
| Solution |
python | great-expectations__great_expectations | great_expectations/expectations/metrics/column_map_metrics/column_values_unique.py | {
"start": 665,
"end": 3486
} | class ____(ColumnMapMetricProvider):
condition_metric_name = "column_values.unique"
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, **kwargs):
return ~column.duplicated(keep=False)
# NOTE: 20201119 - JPC - We cannot split per-dialect into window and non-window functions
# @column_condition_partial(
# engine=SqlAlchemyExecutionEngine,
# )
# def _sqlalchemy(cls, column, _table, **kwargs):
# dup_query = (
# sa.select(column)
# .select_from(_table)
# .group_by(column)
# .having(sa.func.count(column) > 1)
# )
#
# return column.notin_(dup_query)
@column_condition_partial(
engine=SqlAlchemyExecutionEngine,
partial_fn_type=MetricPartialFunctionTypes.WINDOW_CONDITION_FN,
)
def _sqlalchemy_window(cls, column, _table, **kwargs):
# Will - 20210126
# This is a special case that needs to be handled for mysql, where you cannot refer to a temp_table # noqa: E501 # FIXME CoP
# more than once in the same query. So instead of passing dup_query as-is, a second temp_table is created with # noqa: E501 # FIXME CoP
# the column we will be performing the expectation on, and the query is performed against it. # noqa: E501 # FIXME CoP
dialect = kwargs.get("_dialect")
sql_engine = kwargs.get("_sqlalchemy_engine")
execution_engine = kwargs.get("_execution_engine")
try:
dialect_name = dialect.dialect.name
except AttributeError:
try:
dialect_name = dialect.name
except AttributeError:
dialect_name = ""
if sql_engine and dialect and dialect_name == "mysql":
temp_table_name = generate_temporary_table_name()
temp_table_stmt = f"CREATE TEMPORARY TABLE {temp_table_name} AS SELECT tmp.{column.name} FROM {_table} tmp" # noqa: E501 # FIXME CoP
execution_engine.execute_query_in_transaction(sa.text(temp_table_stmt))
dup_query = (
sa.select(column)
.select_from(sa.text(temp_table_name))
.group_by(column)
.having(sa.func.count(column) > 1)
)
else:
dup_query = (
sa.select(column)
.select_from(_table)
.group_by(column)
.having(sa.func.count(column) > 1)
)
return column.notin_(dup_query)
@column_condition_partial(
engine=SparkDFExecutionEngine,
partial_fn_type=MetricPartialFunctionTypes.WINDOW_CONDITION_FN,
)
def _spark(cls, column, **kwargs):
return F.count(F.lit(1)).over(pyspark.Window.partitionBy(column)) <= 1
| ColumnValuesUnique |
python | tensorflow__tensorflow | tensorflow/python/autograph/core/ag_ctx.py | {
"start": 2310,
"end": 3093
} | class ____(object):
"""Helper substitute for contextlib.nullcontext."""
def __enter__(self):
pass
def __exit__(self, unused_type, unused_value, unused_traceback):
pass
def _default_control_status_ctx():
return ControlStatusCtx(status=Status.UNSPECIFIED)
INSPECT_SOURCE_SUPPORTED = True
try:
inspect.getsource(ag_logging.log)
except OSError:
INSPECT_SOURCE_SUPPORTED = False
ag_logging.warning(
'AutoGraph is not available in this environment: functions lack code'
' information. This is typical of some environments like the interactive'
' Python shell. See'
' https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/autograph/g3doc/reference/limitations.md#access-to-source-code'
' for more information.')
| NullCtx |
python | getsentry__sentry | src/sentry/notifications/utils/__init__.py | {
"start": 17089,
"end": 18507
} | class ____(PerformanceProblemContext):
def to_dict(self) -> dict[str, str | float | list[str]]:
return {
"transaction_name": self.transaction,
"repeating_spans": self.path_prefix,
"parameters": self.parameters,
"num_repeating_spans": (
str(len(self.problem.offender_span_ids)) if self.problem.offender_span_ids else ""
),
}
@property
def path_prefix(self) -> str:
if not self.repeating_spans or len(self.repeating_spans) == 0:
return ""
url = get_url_from_span(self.repeating_spans)
parsed_url = urlparse(url)
return parsed_url.path or ""
@property
def parameters(self) -> list[str]:
if not self.spans or len(self.spans) == 0:
return []
urls = [
get_url_from_span(span)
for span in self.spans
if span.get("span_id") in self.problem.offender_span_ids
]
all_parameters: Mapping[str, list[str]] = defaultdict(list)
for url in urls:
parsed_url = urlparse(url)
parameters = parse_qs(parsed_url.query)
for key, value in parameters.items():
all_parameters[key] += value
return [
"{{{}: {}}}".format(key, ",".join(values)) for key, values in all_parameters.items()
]
| NPlusOneAPICallProblemContext |
python | facebook__pyre-check | tools/generate_taint_models/tests/get_exit_nodes_test.py | {
"start": 362,
"end": 2070
} | class ____(unittest.TestCase):
def test_compute_models(self) -> None:
self.maxDiff = None
sink = "TaintSink[ReturnedToUser]"
self.assertEqual(
[
*map(
str,
ExitNodeGenerator(django_urls=MagicMock()).compute_models(
all_functions
),
)
],
[
f"def {qualifier}.TestClass.methodA(self, x) -> {sink}: ...",
f"def {qualifier}.TestClass.methodB(self, *args) -> {sink}: ...",
f"def {qualifier}.testA() -> {sink}: ...",
f"def {qualifier}.testB(x) -> {sink}: ...",
f"def {qualifier}.testC(x) -> {sink}: ...",
f"def {qualifier}.testD(x, *args) -> {sink}: ...",
f"def {qualifier}.testE(x, **kwargs) -> {sink}: ...",
],
)
self.assertEqual(
[
*map(
str,
ExitNodeGenerator(
django_urls=MagicMock(),
whitelisted_views=[f"{qualifier}.TestClass.methodA"],
).compute_models(all_functions),
)
],
[
f"def {qualifier}.TestClass.methodB(self, *args) -> {sink}: ...",
f"def {qualifier}.testA() -> {sink}: ...",
f"def {qualifier}.testB(x) -> {sink}: ...",
f"def {qualifier}.testC(x) -> {sink}: ...",
f"def {qualifier}.testD(x, *args) -> {sink}: ...",
f"def {qualifier}.testE(x, **kwargs) -> {sink}: ...",
],
)
| GetExitNodesTest |
python | python-openxml__python-docx | src/docx/image/image.py | {
"start": 6381,
"end": 8005
} | class ____:
"""Base class for image header subclasses like |Jpeg| and |Tiff|."""
def __init__(self, px_width: int, px_height: int, horz_dpi: int, vert_dpi: int):
self._px_width = px_width
self._px_height = px_height
self._horz_dpi = horz_dpi
self._vert_dpi = vert_dpi
@property
def content_type(self) -> str:
"""Abstract property definition, must be implemented by all subclasses."""
msg = "content_type property must be implemented by all subclasses of BaseImageHeader"
raise NotImplementedError(msg)
@property
def default_ext(self) -> str:
"""Default filename extension for images of this type.
An abstract property definition, must be implemented by all subclasses.
"""
raise NotImplementedError(
"default_ext property must be implemented by all subclasses of BaseImageHeader"
)
@property
def px_width(self):
"""The horizontal pixel dimension of the image."""
return self._px_width
@property
def px_height(self):
"""The vertical pixel dimension of the image."""
return self._px_height
@property
def horz_dpi(self):
"""Integer dots per inch for the width of this image.
Defaults to 72 when not present in the file, as is often the case.
"""
return self._horz_dpi
@property
def vert_dpi(self):
"""Integer dots per inch for the height of this image.
Defaults to 72 when not present in the file, as is often the case.
"""
return self._vert_dpi
| BaseImageHeader |
python | getsentry__sentry | src/sentry/sentry_metrics/indexer/base.py | {
"start": 1523,
"end": 3064
} | class ____:
"""
A KeyCollection is a way of keeping track of a group of keys
used to fetch ids, whose results are stored in KeyResults.
A key is a org_id, string pair, either represented as a
tuple e.g (1, "a"), or a string "1:a".
Initial mapping is org_id's to sets of strings:
{ 1: {"a", "b", "c"}, 2: {"e", "f"} }
"""
def __init__(self, mapping: Mapping[OrgId, set[str]]):
self.mapping = mapping
self.size = self._size()
def __eq__(self, value: Any) -> bool:
return (
isinstance(value, self.__class__)
and self.size == value.size
and self.mapping == value.mapping
)
def _size(self) -> int:
total_size = 0
for org_id in self.mapping.keys():
total_size += len(self.mapping[org_id])
return total_size
def as_tuples(self) -> Sequence[tuple[int, str]]:
"""
Returns all the keys, each key represented as tuple -> (1, "a")
"""
key_pairs: MutableSequence[tuple[int, str]] = []
for org_id in self.mapping:
key_pairs.extend([(org_id, string) for string in self.mapping[org_id]])
return key_pairs
def as_strings(self) -> Sequence[str]:
"""
Returns all the keys, each key represented as string -> "1:a"
"""
keys: MutableSequence[str] = []
for org_id in self.mapping:
keys.extend([f"{org_id}:{string}" for string in self.mapping[org_id]])
return keys
| KeyCollection |
python | astropy__astropy | astropy/utils/iers/tests/test_leap_second.py | {
"start": 14883,
"end": 15436
} | class ____:
"""Base class for tests that change the ERFA leap-second tables.
It ensures the original state is restored.
"""
def setup_method(self):
# Keep current leap-second table and expiration.
self.erfa_ls = self._erfa_ls = erfa.leap_seconds.get()
self.erfa_expires = self._expires = erfa.leap_seconds._expires
def teardown_method(self):
# Restore leap-second table and expiration.
erfa.leap_seconds.set(self.erfa_ls)
erfa.leap_seconds._expires = self._expires
| ERFALeapSecondsSafe |
python | networkx__networkx | networkx/algorithms/centrality/tests/test_betweenness_centrality.py | {
"start": 16088,
"end": 24867
} | class ____:
def test_K5(self):
"""Weighted betweenness centrality: K5"""
G = nx.complete_graph(5)
b = nx.betweenness_centrality(G, weight="weight", normalized=False)
b_answer = {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0}
for n in sorted(G):
assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
def test_P3_normalized(self):
"""Weighted betweenness centrality: P3 normalized"""
G = nx.path_graph(3)
b = nx.betweenness_centrality(G, weight="weight", normalized=True)
b_answer = {0: 0.0, 1: 1.0, 2: 0.0}
for n in sorted(G):
assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
def test_P3(self):
"""Weighted betweenness centrality: P3"""
G = nx.path_graph(3)
b_answer = {0: 0.0, 1: 1.0, 2: 0.0}
b = nx.betweenness_centrality(G, weight="weight", normalized=False)
for n in sorted(G):
assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
def test_krackhardt_kite_graph(self):
"""Weighted betweenness centrality: Krackhardt kite graph"""
G = nx.krackhardt_kite_graph()
b_answer = {
0: 1.667,
1: 1.667,
2: 0.000,
3: 7.333,
4: 0.000,
5: 16.667,
6: 16.667,
7: 28.000,
8: 16.000,
9: 0.000,
}
for b in b_answer:
b_answer[b] /= 2
b = nx.betweenness_centrality(G, weight="weight", normalized=False)
for n in sorted(G):
assert b[n] == pytest.approx(b_answer[n], abs=1e-3)
def test_krackhardt_kite_graph_normalized(self):
"""Weighted betweenness centrality:
Krackhardt kite graph normalized
"""
G = nx.krackhardt_kite_graph()
b_answer = {
0: 0.023,
1: 0.023,
2: 0.000,
3: 0.102,
4: 0.000,
5: 0.231,
6: 0.231,
7: 0.389,
8: 0.222,
9: 0.000,
}
b = nx.betweenness_centrality(G, weight="weight", normalized=True)
for n in sorted(G):
assert b[n] == pytest.approx(b_answer[n], abs=1e-3)
def test_florentine_families_graph(self):
"""Weighted betweenness centrality:
Florentine families graph"""
G = nx.florentine_families_graph()
b_answer = {
"Acciaiuoli": 0.000,
"Albizzi": 0.212,
"Barbadori": 0.093,
"Bischeri": 0.104,
"Castellani": 0.055,
"Ginori": 0.000,
"Guadagni": 0.255,
"Lamberteschi": 0.000,
"Medici": 0.522,
"Pazzi": 0.000,
"Peruzzi": 0.022,
"Ridolfi": 0.114,
"Salviati": 0.143,
"Strozzi": 0.103,
"Tornabuoni": 0.092,
}
b = nx.betweenness_centrality(G, weight="weight", normalized=True)
for n in sorted(G):
assert b[n] == pytest.approx(b_answer[n], abs=1e-3)
def test_les_miserables_graph(self):
"""Weighted betweenness centrality: Les Miserables graph"""
G = nx.les_miserables_graph()
b_answer = {
"Napoleon": 0.000,
"Myriel": 0.177,
"MlleBaptistine": 0.000,
"MmeMagloire": 0.000,
"CountessDeLo": 0.000,
"Geborand": 0.000,
"Champtercier": 0.000,
"Cravatte": 0.000,
"Count": 0.000,
"OldMan": 0.000,
"Valjean": 0.454,
"Labarre": 0.000,
"Marguerite": 0.009,
"MmeDeR": 0.000,
"Isabeau": 0.000,
"Gervais": 0.000,
"Listolier": 0.000,
"Tholomyes": 0.066,
"Fameuil": 0.000,
"Blacheville": 0.000,
"Favourite": 0.000,
"Dahlia": 0.000,
"Zephine": 0.000,
"Fantine": 0.114,
"MmeThenardier": 0.046,
"Thenardier": 0.129,
"Cosette": 0.075,
"Javert": 0.193,
"Fauchelevent": 0.026,
"Bamatabois": 0.080,
"Perpetue": 0.000,
"Simplice": 0.001,
"Scaufflaire": 0.000,
"Woman1": 0.000,
"Judge": 0.000,
"Champmathieu": 0.000,
"Brevet": 0.000,
"Chenildieu": 0.000,
"Cochepaille": 0.000,
"Pontmercy": 0.023,
"Boulatruelle": 0.000,
"Eponine": 0.023,
"Anzelma": 0.000,
"Woman2": 0.000,
"MotherInnocent": 0.000,
"Gribier": 0.000,
"MmeBurgon": 0.026,
"Jondrette": 0.000,
"Gavroche": 0.285,
"Gillenormand": 0.024,
"Magnon": 0.005,
"MlleGillenormand": 0.036,
"MmePontmercy": 0.005,
"MlleVaubois": 0.000,
"LtGillenormand": 0.015,
"Marius": 0.072,
"BaronessT": 0.004,
"Mabeuf": 0.089,
"Enjolras": 0.003,
"Combeferre": 0.000,
"Prouvaire": 0.000,
"Feuilly": 0.004,
"Courfeyrac": 0.001,
"Bahorel": 0.007,
"Bossuet": 0.028,
"Joly": 0.000,
"Grantaire": 0.036,
"MotherPlutarch": 0.000,
"Gueulemer": 0.025,
"Babet": 0.015,
"Claquesous": 0.042,
"Montparnasse": 0.050,
"Toussaint": 0.011,
"Child1": 0.000,
"Child2": 0.000,
"Brujon": 0.002,
"MmeHucheloup": 0.034,
}
b = nx.betweenness_centrality(G, weight="weight", normalized=True)
for n in sorted(G):
assert b[n] == pytest.approx(b_answer[n], abs=1e-3)
def test_ladder_graph(self):
"""Weighted betweenness centrality: Ladder graph"""
G = nx.Graph() # ladder_graph(3)
G.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3), (2, 4), (4, 5), (3, 5)])
b_answer = {0: 1.667, 1: 1.667, 2: 6.667, 3: 6.667, 4: 1.667, 5: 1.667}
for b in b_answer:
b_answer[b] /= 2
b = nx.betweenness_centrality(G, weight="weight", normalized=False)
for n in sorted(G):
assert b[n] == pytest.approx(b_answer[n], abs=1e-3)
def test_G(self):
"""Weighted betweenness centrality: G"""
G = weighted_G()
b_answer = {0: 2.0, 1: 0.0, 2: 4.0, 3: 3.0, 4: 4.0, 5: 0.0}
b = nx.betweenness_centrality(G, weight="weight", normalized=False)
for n in sorted(G):
assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
def test_G2(self):
"""Weighted betweenness centrality: G2"""
G = nx.DiGraph()
G.add_weighted_edges_from(
[
("s", "u", 10),
("s", "x", 5),
("u", "v", 1),
("u", "x", 2),
("v", "y", 1),
("x", "u", 3),
("x", "v", 5),
("x", "y", 2),
("y", "s", 7),
("y", "v", 6),
]
)
b_answer = {"y": 5.0, "x": 5.0, "s": 4.0, "u": 2.0, "v": 2.0}
b = nx.betweenness_centrality(G, weight="weight", normalized=False)
for n in sorted(G):
assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
def test_G3(self):
"""Weighted betweenness centrality: G3"""
G = nx.MultiGraph(weighted_G())
es = list(G.edges(data=True))[::2] # duplicate every other edge
G.add_edges_from(es)
b_answer = {0: 2.0, 1: 0.0, 2: 4.0, 3: 3.0, 4: 4.0, 5: 0.0}
b = nx.betweenness_centrality(G, weight="weight", normalized=False)
for n in sorted(G):
assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
def test_G4(self):
"""Weighted betweenness centrality: G4"""
G = nx.MultiDiGraph()
G.add_weighted_edges_from(
[
("s", "u", 10),
("s", "x", 5),
("s", "x", 6),
("u", "v", 1),
("u", "x", 2),
("v", "y", 1),
("v", "y", 1),
("x", "u", 3),
("x", "v", 5),
("x", "y", 2),
("x", "y", 3),
("y", "s", 7),
("y", "v", 6),
("y", "v", 6),
]
)
b_answer = {"y": 5.0, "x": 5.0, "s": 4.0, "u": 2.0, "v": 2.0}
b = nx.betweenness_centrality(G, weight="weight", normalized=False)
for n in sorted(G):
assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
| TestWeightedBetweennessCentrality |
python | more-itertools__more-itertools | tests/test_more.py | {
"start": 119678,
"end": 120704
} | class ____(TestCase):
def test_normal(self):
iterable = [10, 20, 30, 40, 50]
actual = list(mi.difference(iterable))
expected = [10, 10, 10, 10, 10]
self.assertEqual(actual, expected)
def test_custom(self):
iterable = [10, 20, 30, 40, 50]
actual = list(mi.difference(iterable, add))
expected = [10, 30, 50, 70, 90]
self.assertEqual(actual, expected)
def test_roundtrip(self):
original = list(range(100))
accumulated = accumulate(original)
actual = list(mi.difference(accumulated))
self.assertEqual(actual, original)
def test_one(self):
self.assertEqual(list(mi.difference([0])), [0])
def test_empty(self):
self.assertEqual(list(mi.difference([])), [])
def test_initial(self):
original = list(range(100))
accumulated = accumulate(original, initial=100)
actual = list(mi.difference(accumulated, initial=100))
self.assertEqual(actual, original)
| DifferenceTest |
python | tensorflow__tensorflow | tensorflow/python/util/lazy_loader.py | {
"start": 3736,
"end": 7820
} | class ____(LazyLoader):
"""LazyLoader that handles routing to different Keras version."""
def __init__( # pylint: disable=super-init-not-called
self, parent_module_globals, mode=None, submodule=None, name="keras"):
self._tfll_parent_module_globals = parent_module_globals
self._tfll_mode = mode
self._tfll_submodule = submodule
self._tfll_name = name
self._tfll_initialized = False
def _initialize(self):
"""Resolve the Keras version to use and initialize the loader."""
self._tfll_initialized = True
package_name = None
keras_version = None
if os.environ.get("TF_USE_LEGACY_KERAS", None) in ("true", "True", "1"):
try:
import tf_keras # pylint: disable=g-import-not-at-top,unused-import
keras_version = "tf_keras"
if self._tfll_mode == "v1":
package_name = "tf_keras.api._v1.keras"
else:
package_name = "tf_keras.api._v2.keras"
except ImportError:
logging.warning(
"Your environment has TF_USE_LEGACY_KERAS set to True, but you "
"do not have the tf_keras package installed. You must install it "
"in order to use the legacy tf.keras. Install it via: "
"`pip install tf_keras`"
)
else:
try:
import keras # pylint: disable=g-import-not-at-top
if keras.__version__.startswith("3."):
# This is the Keras 3.x case.
keras_version = "keras_3"
package_name = "keras._tf_keras.keras"
else:
# This is the Keras 2.x case.
keras_version = "keras_2"
if self._tfll_mode == "v1":
package_name = "keras.api._v1.keras"
else:
package_name = "keras.api._v2.keras"
except ImportError:
raise ImportError( # pylint: disable=raise-missing-from
"Keras cannot be imported. Check that it is installed."
)
self._tfll_keras_version = keras_version
if keras_version is not None:
if self._tfll_submodule is not None:
package_name += "." + self._tfll_submodule
super().__init__(
self._tfll_name, self._tfll_parent_module_globals, package_name
)
else:
raise ImportError( # pylint: disable=raise-missing-from
"Keras cannot be imported. Check that it is installed."
)
def __getattr__(self, item):
if item in ("_tfll_mode", "_tfll_initialized", "_tfll_name"):
return super(types.ModuleType, self).__getattribute__(item)
if not self._tfll_initialized:
self._initialize()
if self._tfll_keras_version == "keras_3":
if (
self._tfll_mode == "v1"
and not self._tfll_submodule
and item.startswith("compat.v1.")
):
raise AttributeError(
"`tf.compat.v1.keras` is not available with Keras 3. Keras 3 has "
"no support for TF 1 APIs. You can install the `tf_keras` package "
"as an alternative, and set the environment variable "
"`TF_USE_LEGACY_KERAS=True` to configure TensorFlow to route "
"`tf.compat.v1.keras` to `tf_keras`."
)
elif (
self._tfll_mode == "v2"
and not self._tfll_submodule
and item.startswith("compat.v2.")
):
raise AttributeError(
"`tf.compat.v2.keras` is not available with Keras 3. Just use "
"`import keras` instead."
)
elif self._tfll_submodule and self._tfll_submodule.startswith(
"__internal__.legacy."
):
raise AttributeError(
f"`{item}` is not available with Keras 3."
)
module = self._load()
return getattr(module, item)
def __repr__(self):
if self._tfll_initialized:
return (
f"<KerasLazyLoader ({self._tfll_keras_version}) "
f"{self.__name__} as {self._tfll_local_name} mode={self._tfll_mode}>"
)
return "<KerasLazyLoader>"
def __dir__(self):
if not self._tfll_initialized:
self._initialize()
return super().__dir__()
| KerasLazyLoader |
python | pytorch__pytorch | torch/distributed/_pycute/layout.py | {
"start": 2713,
"end": 17979
} | class ____(LayoutBase):
def __init__(self, _shape: IntTuple, _stride: Optional[IntTuple] = None) -> None:
self.shape = _shape
if _stride is None:
self.stride = suffix_product(self.shape)
else:
self.stride = _stride
# operator ==
def __eq__(self, other: object) -> bool:
if not isinstance(other, Layout):
return False
return self.shape == other.shape and self.stride == other.stride
# operator len(L) (len [rank] like tuples)
def __len__(self) -> int:
if is_tuple(self.shape):
return len(self.shape)
else:
return 1
# operator () (map coord to idx)
def __call__(self, *args: CoordinateType) -> Union["Layout", int]:
"""
Map a logical coordinate to a linear index (Coord has no Underscore slice operators)
OR
Slice the layout and return the sublayout (Coord has an Underscore slice op)
Follow the same behavior of `Layout::operator(Coord const&)` in cute C++
"""
if has_none(args):
if len(args) == 1:
return Layout(slice_(args[0], self.shape), slice_(args[0], self.stride))
else:
return Layout(slice_(args, self.shape), slice_(args, self.stride))
else:
if len(args) == 1:
return crd2idx(args[0], self.shape, self.stride) # type: ignore[arg-type]
else:
return crd2idx(args, self.shape, self.stride) # type: ignore[arg-type]
# operator [] (get-i like tuples)
def __getitem__(self, i: int) -> "Layout":
if is_tuple(self.shape):
return Layout(self.shape[i], self.stride[i]) # type: ignore[index]
else:
assert i == 0
return Layout(self.shape, self.stride)
# size(layout) Size of the domain
def size(self) -> int:
return product(self.shape)
# cosize(layout) Size of the codomain
def cosize(self) -> int:
return self(self.size() - 1) + 1 # type: ignore[operator]
# print and str
def __str__(self) -> str:
return f"{self.shape}:{self.stride}"
# error msgs and representation
def __repr__(self) -> str:
return f"Layout({self.shape},{self.stride})"
# Make Layout from a list of layouts (each layout it's own mode in the result)
def make_layout(*layouts: Union[Layout, tuple[Layout, ...]]) -> Layout:
if len(layouts) == 1 and not is_layout(layouts[0]):
layouts = layouts[0]
shape, stride = zip(*((a.shape, a.stride) for a in layouts)) # type: ignore[union-attr]
return Layout(shape, stride)
# Size of the domain
def size(layout: LayoutOrIntTuple) -> int:
if is_layout(layout):
return layout.size()
return product(layout)
# Size of the codomain
def cosize(layout: Layout) -> int:
return layout.cosize()
# Layout coalesce -- flatten and combine as many modes as possible while preserving the int-to-int function
def coalesce(layout: Layout, profile: LayoutProfile = None) -> Layout:
if is_tuple(profile):
assert len(layout) >= len(profile)
return make_layout(
chain(
(coalesce(layout[i], profile[i]) for i in range(len(profile))), # type: ignore[arg-type]
(layout[i] for i in range(len(profile), len(layout))),
)
)
result_shape = [1]
result_stride = [0]
# Since we now follow lexicographic order, we need to process from right to left.
# And to make implementation more efficient, we append to the end of list and reverse it in the end.
for shape, stride in zip(
reversed(flatten(layout.shape)), reversed(flatten(layout.stride))
):
# skip their shape-1s
if shape == 1:
continue
# replace our shape-1 with anything
elif result_shape[-1] == 1:
result_shape[-1] = shape
result_stride[-1] = stride
# merge modes if the shape*stride match
elif result_shape[-1] * result_stride[-1] == stride:
result_shape[-1] = result_shape[-1] * shape
# append a new mode
else:
result_shape.append(shape)
result_stride.append(stride)
if len(result_shape) == 1:
return Layout(result_shape[0], result_stride[0])
else:
result_shape.reverse()
result_stride.reverse()
return Layout(tuple(result_shape), tuple(result_stride))
# Layout filter -- replace all stride-0 modes with size-1 and then coalesce to remove them
def filter(layout: Layout, profile: LayoutProfile = None) -> Layout:
if is_tuple(profile):
assert len(layout) >= len(profile)
return make_layout(
chain(
(filter(layout[i], profile[i]) for i in range(len(profile))), # type: ignore[arg-type]
(layout[i] for i in range(len(profile), len(layout))),
)
)
result_shape = []
result_stride = []
for shape, stride in zip(flatten(layout.shape), flatten(layout.stride)):
# skip their shape-1s and stride-0s
if not (shape == 1 or stride == 0):
result_shape.append(shape)
result_stride.append(stride)
if len(result_shape) == 0:
return Layout(1, 0)
else:
return coalesce(Layout(tuple(result_shape), tuple(result_stride)))
# Layout composition
# Use tuples-of-layouts to perform this operation by-mode and None as no-op
def composition(layoutA: Layout, layoutB: LayoutInput) -> Layout:
if layoutB is None:
return layoutA
elif is_int(layoutB):
return composition(layoutA, Layout(layoutB))
elif is_tuple(layoutB):
assert len(layoutA) >= len(layoutB)
return make_layout(
chain(
(composition(layoutA[i], layoutB[i]) for i in range(len(layoutB))), # type: ignore[arg-type]
(layoutA[i] for i in range(len(layoutB), len(layoutA))),
)
)
elif is_tuple(layoutB.shape):
return make_layout(composition(layoutA, layoutB_i) for layoutB_i in layoutB) # type: ignore[arg-type, attr-defined]
if layoutB.stride == 0:
return Layout(layoutB.shape, 0)
else:
result_shape = []
result_stride = []
rest_shape = layoutB.shape
rest_stride = layoutB.stride
flat_A = coalesce(layoutA)
# when left layout is multi-dimensional sublayout, aka, self = (a,b,...,c):(x,y,...,z), layout = s:d,
# for integral s and d means that we want:
# (1) “remove” the first d elements from left, starting from rightmost. (This will increase the stride.)
# (2) “keep” the first s of those strided elements. (This does not affect the stride.)
# For example, if self = (6,2):(2,1), layout = (3:2)
# Step 1: remove the first 2 elements from self with stride increase, i.e., (6,2):(2,1) -> (6,1):(2,2)
# Step 2: keep the first 3 of those strided elements, i.e., (6,1):(2,2) -> (3,1):(2,2)
# Because we are going lexicographically, we go through left layout from right to left.
for curr_shape, curr_stride in zip(
reversed(flatten(flat_A.shape)[1:]), reversed(flatten(flat_A.stride)[1:])
):
assert curr_shape % rest_stride == 0 or rest_stride % curr_shape == 0 # type: ignore[operator]
new_shape = min(max(1, curr_shape // rest_stride), rest_shape) # type: ignore[operator]
if new_shape != 1:
result_shape.append(new_shape) # Append to end, will reverse later
result_stride.append(rest_stride * curr_stride)
rest_shape = rest_shape // new_shape # type: ignore[operator]
rest_stride = -(
-rest_stride // curr_shape # type: ignore[operator]
) # Python exclusive impl: "//" is always floor div so == ceil_div(abs(rest_stride), curr_shape) * signum(rest_stride)
# When left has single-size sublayout or reach the last sublayout, aka, left = a:b, layout = s:d,
# the result is rather trivial: left o layout = a:b o s:d = s:(b*d).
# For example, if self = (6:2), layout = (3:2), the result is (3:(2*2)) = (3:4).
if rest_shape != 1 or len(result_shape) == 0:
result_shape.append(rest_shape) # Append to end, will reverse later
result_stride.append(rest_stride * flatten(flat_A.stride)[0])
# Reverse the lists because we build lists in reverse order (append to end), this way it is more efficient.
result_shape.reverse()
result_stride.reverse()
if len(result_shape) == 1:
return Layout(result_shape[0], result_stride[0]) # type: ignore[arg-type]
else:
return Layout(tuple(result_shape), tuple(result_stride)) # type: ignore[arg-type]
# Layout complement
def complement(layout: LayoutOrIntTuple, max_idx: int = 1) -> Layout:
if is_int(layout):
return complement(Layout(layout))
result_shape = []
result_stride = []
current_idx = 1
sorted_DS = sorted(zip(flatten(layout.stride), flatten(layout.shape))) # type: ignore[union-attr]
for stride, shape in sorted_DS:
if stride == 0 or shape == 1:
continue
in_bound = current_idx <= shape * stride
# To support symbolic value which can't be evaluated now
assert (type(in_bound) is not bool) or in_bound
result_shape.append(stride // current_idx)
result_stride.append(current_idx)
current_idx = shape * stride
result_shape.append((max_idx + current_idx - 1) // current_idx) # ceil_div
result_stride.append(current_idx)
# This is different from original pycute implementation, because we want to follow the lexicographic order here
# where the right-most dimension is the innermost dimension (smallest stride).
result_shape.reverse()
result_stride.reverse()
return coalesce(Layout(tuple(result_shape), tuple(result_stride)))
# Layout right inverse
def right_inverse(layout: Optional[LayoutOrIntTuple]) -> Optional[Layout]:
if layout is None:
return None
elif is_int(layout):
return Layout(layout)
result_shape = []
result_stride = []
current_idx = 1
flat_shape = flatten(layout.shape) # type: ignore[union-attr]
flat_stride = flatten(layout.stride) # type: ignore[union-attr]
sorted_DSA = sorted(zip(flat_stride, flat_shape, suffix_product(flat_shape))) # type: ignore[arg-type]
for stride, shape, rstride in sorted_DSA:
if shape == 1:
continue
if current_idx != stride:
break
result_shape.append(shape)
result_stride.append(rstride)
current_idx = shape * stride
result_shape.reverse()
result_stride.reverse()
return coalesce(Layout(tuple(result_shape), tuple(result_stride)))
# Layout left inverse
def left_inverse(layout: Optional[LayoutOrIntTuple]) -> Optional[Layout]:
if layout is None:
return None
elif is_int(layout):
return Layout(layout)
return right_inverse(make_layout(complement(layout), layout)) # type: ignore[arg-type]
# Split a layout by the composition of B and the "rest"
# Use tuples-of-layouts to perform this operation by-mode and None as no-op
def logical_divide(layoutA: Layout, layoutB: LayoutInput) -> Layout:
if layoutB is None:
return layoutA
elif is_int(layoutB):
return logical_divide(layoutA, Layout(layoutB))
elif is_tuple(layoutB):
assert len(layoutA) >= len(layoutB)
return make_layout(
chain(
(
logical_divide(layoutA[i], layoutB[i]) # type: ignore[arg-type]
for i in range(len(layoutB))
),
(layoutA[i] for i in range(len(layoutB), len(layoutA))),
)
)
return composition(
layoutA,
make_layout(layoutB, complement(layoutB, size(layoutA))),
)
# Reproduce a layoutA over a layoutB
# Use tuples-of-layouts to perform this operation by-mode and None as no-op
def logical_product(layoutA: Layout, layoutB: LayoutInput) -> Layout:
if layoutB is None:
return layoutA
elif is_int(layoutB):
return logical_divide(layoutA, Layout(layoutB))
elif is_tuple(layoutB):
assert len(layoutA) >= len(layoutB)
return make_layout(
chain(
(
logical_product(layoutA[i], layoutB[i]) # type: ignore[arg-type]
for i in range(len(layoutB))
),
(layoutA[i] for i in range(len(layoutB), len(layoutA))),
)
)
return make_layout(
layoutA,
composition(complement(layoutA, size(layoutA) * cosize(layoutB)), layoutB),
)
# Gather the modes from a hierarchical logical_divide or logical_product
def hier_unzip(
splitter: object,
layoutA: Layout,
layoutB: LayoutInput,
) -> Layout:
if layoutB is None:
return make_layout(Layout(1, 0), layoutA)
elif is_tuple(layoutB):
assert len(layoutA) >= len(layoutB)
# A layout with shape ((A,a),(B,b),(C,c))
split = make_layout(
hier_unzip(splitter, layoutA[i], layoutB[i]) # type: ignore[arg-type]
for i in range(len(layoutB))
)
# Gather to shape ((A,B,C,...),(a,b,c,...,y,z))
return make_layout(
make_layout(split[i][0] for i in range(len(layoutB))), # type: ignore[arg-type]
make_layout(
chain( # type: ignore[arg-type]
(split[i][1] for i in range(len(layoutB))),
(layoutA[i] for i in range(len(layoutB), len(layoutA))),
)
),
)
# splitter must return a rank-2 layout
return splitter(layoutA, layoutB) # type: ignore[operator]
# Apply logical divide hierarchically and gather the split modes into two modes
def zipped_divide(layoutA: Layout, layoutB: LayoutInput) -> Layout:
return hier_unzip(logical_divide, layoutA, layoutB)
# Perform logical divide hierarchically and gather tiles (B-layouts) into a new mode
def tiled_divide(layoutA: Layout, layoutB: LayoutInput) -> Layout:
result = zipped_divide(layoutA, layoutB)
return make_layout([result[0]] + [result[1][i] for i in range(len(result[1]))]) # type: ignore[arg-type]
# Apply logical product hierarchically and gather the split modes into two modes
def zipped_product(layoutA: Layout, layoutB: LayoutInput) -> Layout:
return hier_unzip(logical_product, layoutA, layoutB)
# Perform logical product hierarchically and gather tiles (B-layouts) into a new mode
def tiled_product(layoutA: Layout, layoutB: LayoutInput) -> Layout:
result = zipped_product(layoutA, layoutB)
return make_layout([result[0]] + [result[1][i] for i in range(len(result[1]))]) # type: ignore[arg-type]
def slice_and_offset(crd: tuple[object, ...], layout: Layout) -> tuple[Layout, int]:
return (
Layout(slice_(crd, layout.shape), slice_(crd, layout.stride)),
crd2idx(crd, layout.shape, layout.stride), # type: ignore[arg-type]
)
| Layout |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/protocol3.py | {
"start": 3102,
"end": 3168
} | class ____(NamedTuple):
x: str = ""
@dataclass(frozen=False)
| NT9 |
python | google__jax | jax/_src/stages.py | {
"start": 14409,
"end": 17607
} | class ____(Stage):
"""Traced form of a function specialized to argument types and values.
A traced computation is ready for lowering. This class carries the
traced representation with the remaining information needed to later
lower, compile, and execute it.
"""
__slots__ = ['_meta_tys_flat', '_params', '_in_tree', 'out_tree', '_consts']
def __init__(self, meta_tys_flat, params, in_tree, out_tree, consts):
self._meta_tys_flat = meta_tys_flat
self._params = params
self._in_tree = in_tree
self.out_tree = out_tree
self._consts = consts
jaxpr = property(lambda self: self._params['jaxpr'])
fun_name = property(lambda self: self._params['name'])
args_info = property(_traced_args_info)
out_info = property(_traced_out_info)
_num_consts = property(lambda self: len(self._consts))
@property
def out_avals(self):
return tree_unflatten(self.out_tree, self.jaxpr.out_avals)
def fall(self):
if not self.jaxpr.is_high:
return Fallen(self._meta_tys_flat, self._params, self._in_tree,
self.out_tree, (self._in_tree, self.jaxpr.in_avals),
(self.out_tree, self.jaxpr.out_avals),
self._consts)
# TODO(mattjj): when pmap is deleted, merge with pjit.py BUILD rule
from jax._src.interpreters import partial_eval as pe # type:ignore
hi_jaxpr = self.jaxpr
_, closed_over_himutables = pe.convert_const_himutables(hi_jaxpr)
if closed_over_himutables: raise NotImplementedError # TODO(mattjj)
lo_jaxpr = pe.lower_jaxpr(hi_jaxpr)
in_tree = lojax_pytree(hi_jaxpr.in_aval_qdds, self._in_tree)
out_tree = lojax_pytree(hi_jaxpr.out_avals, self.out_tree)
params = dict(lojax_expand_params(hi_jaxpr, self._params), jaxpr=lo_jaxpr)
lo_meta_tys = [mty.replace(aval=lo_ty)
for mty, aq in zip(self._meta_tys_flat, hi_jaxpr.in_aval_qdds)
for lo_ty in (mty.aval.lo_ty_qdd(aq.qdd)
if mty.aval.has_qdd else mty.aval.lo_ty())]
return Fallen(lo_meta_tys, params, in_tree, out_tree,
(self._in_tree, hi_jaxpr.final_aval_qdds),
(self.out_tree, hi_jaxpr.out_avals),
self._consts)
def lower(self, *, lowering_platforms: tuple[str, ...] | None = None,
_private_parameters: mlir.LoweringParameters | None = None):
"""Lower to compiler input, returning a ``Lowered`` instance."""
return self.fall().lower(lowering_platforms=lowering_platforms,
_private_parameters=_private_parameters)
def lojax_expand_params(jaxpr, params):
from jax._src.pjit import _lojax_expand_params # type: ignore
lo_nums_in = [len(aval.lo_ty()) for aval in jaxpr.in_aval_qdds]
lo_nums_out = [len(t.lo_ty()) for t in jaxpr.out_avals]
lo_muts_out = sum(len(aval.lo_ty()) for aval in jaxpr.final_aval_qdds
if aval.has_qdd)
return _lojax_expand_params(lo_nums_in, lo_nums_out, lo_muts_out,
**dict(params, jaxpr=jaxpr))
def lojax_pytree(hi_avals, tree):
lo_avals = [t.lo_ty() for t in hi_avals]
return tree_structure(tree_unflatten(tree, lo_avals))
| Traced |
python | psf__black | src/blib2to3/pgen2/tokenize.py | {
"start": 2706,
"end": 7378
} | class ____(Exception): ...
def transform_whitespace(
token: pytokens.Token, source: str, prev_token: pytokens.Token | None
) -> pytokens.Token:
r"""
Black treats `\\\n` at the end of a line as a 'NL' token, while it
is ignored as whitespace in the regular Python parser.
But, only the first one. If there's a `\\\n` following it
(as in, a \ just by itself on a line), that is not made into NL.
"""
if (
token.type == TokenType.whitespace
and prev_token is not None
and prev_token.type not in (TokenType.nl, TokenType.newline)
):
token_str = source[token.start_index : token.end_index]
if token_str.startswith("\\\r\n"):
return pytokens.Token(
TokenType.nl,
token.start_index,
token.start_index + 3,
token.start_line,
token.start_col,
token.start_line,
token.start_col + 3,
)
elif token_str.startswith("\\\n") or token_str.startswith("\\\r"):
return pytokens.Token(
TokenType.nl,
token.start_index,
token.start_index + 2,
token.start_line,
token.start_col,
token.start_line,
token.start_col + 2,
)
return token
def tokenize(source: str, grammar: Grammar | None = None) -> Iterator[TokenInfo]:
lines = source.split("\n")
lines += [""] # For newline tokens in files that don't end in a newline
line, column = 1, 0
prev_token: pytokens.Token | None = None
try:
for token in pytokens.tokenize(source):
token = transform_whitespace(token, source, prev_token)
line, column = token.start_line, token.start_col
if token.type == TokenType.whitespace:
continue
token_str = source[token.start_index : token.end_index]
if token.type == TokenType.newline and token_str == "":
# Black doesn't yield empty newline tokens at the end of a file
# if there's no newline at the end of a file.
prev_token = token
continue
source_line = lines[token.start_line - 1]
if token.type == TokenType.identifier and token_str in ("async", "await"):
# Black uses `async` and `await` token types just for those two keywords
yield (
ASYNC if token_str == "async" else AWAIT,
token_str,
(token.start_line, token.start_col),
(token.end_line, token.end_col),
source_line,
)
elif token.type == TokenType.op and token_str == "...":
# Black doesn't have an ellipsis token yet, yield 3 DOTs instead
assert token.start_line == token.end_line
assert token.end_col == token.start_col + 3
token_str = "."
for start_col in range(token.start_col, token.start_col + 3):
end_col = start_col + 1
yield (
TOKEN_TYPE_MAP[token.type],
token_str,
(token.start_line, start_col),
(token.end_line, end_col),
source_line,
)
else:
token_type = TOKEN_TYPE_MAP.get(token.type)
if token_type is None:
raise ValueError(f"Unknown token type: {token.type!r}")
yield (
TOKEN_TYPE_MAP[token.type],
token_str,
(token.start_line, token.start_col),
(token.end_line, token.end_col),
source_line,
)
prev_token = token
except pytokens.UnexpectedEOF:
raise TokenError("Unexpected EOF in multi-line statement", (line, column))
except pytokens.TokenizeError as exc:
raise TokenError(f"Failed to parse: {type(exc).__name__}", (line, column))
def printtoken(
type: int, token: str, srow_col: Coord, erow_col: Coord, line: str
) -> None: # for testing
srow, scol = srow_col
erow, ecol = erow_col
print(f"{srow},{scol}-{erow},{ecol}:\t{tok_name[type]}\t{token!r}")
if __name__ == "__main__": # testing
if len(sys.argv) > 1:
token_iterator = tokenize(open(sys.argv[1]).read())
else:
token_iterator = tokenize(sys.stdin.read())
for tok in token_iterator:
printtoken(*tok)
| TokenError |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 95629,
"end": 96123
} | class ____(sgqlc.types.Enum):
"""The level of enforcement for a rule or ruleset.
Enumeration Choices:
* `ACTIVE`: Rules will be enforced
* `DISABLED`: Do not evaluate or enforce rules
* `EVALUATE`: Allow admins to test rules before enforcing them.
Admins can view insights on the Rule Insights page (`evaluate`
is only available with GitHub Enterprise).
"""
__schema__ = github_schema
__choices__ = ("ACTIVE", "DISABLED", "EVALUATE")
| RuleEnforcement |
python | run-llama__llama_index | llama-index-core/llama_index/core/response_synthesizers/type.py | {
"start": 24,
"end": 2102
} | class ____(str, Enum):
"""Response modes of the response builder (and synthesizer)."""
REFINE = "refine"
"""
Refine is an iterative way of generating a response.
We first use the context in the first node, along with the query, to generate an \
initial answer.
We then pass this answer, the query, and the context of the second node as input \
into a “refine prompt” to generate a refined answer. We refine through N-1 nodes, \
where N is the total number of nodes.
"""
COMPACT = "compact"
"""
Compact and refine mode first combine text chunks into larger consolidated chunks \
that more fully utilize the available context window, then refine answers \
across them.
This mode is faster than refine since we make fewer calls to the LLM.
"""
SIMPLE_SUMMARIZE = "simple_summarize"
"""
Merge all text chunks into one, and make a LLM call.
This will fail if the merged text chunk exceeds the context window size.
"""
TREE_SUMMARIZE = "tree_summarize"
"""
Build a tree index over the set of candidate nodes, with a summary prompt seeded \
with the query.
The tree is built in a bottoms-up fashion, and in the end the root node is \
returned as the response
"""
GENERATION = "generation"
"""Ignore context, just use LLM to generate a response."""
NO_TEXT = "no_text"
"""Return the retrieved context nodes, without synthesizing a final response."""
CONTEXT_ONLY = "context_only"
"""Returns a concatenated string of all text chunks."""
ACCUMULATE = "accumulate"
"""Synthesize a response for each text chunk, and then return the concatenation."""
COMPACT_ACCUMULATE = "compact_accumulate"
"""
Compact and accumulate mode first combine text chunks into larger consolidated \
chunks that more fully utilize the available context window, then accumulate \
answers for each of them and finally return the concatenation.
This mode is faster than accumulate since we make fewer calls to the LLM.
"""
| ResponseMode |
python | networkx__networkx | networkx/algorithms/tests/test_matching.py | {
"start": 13369,
"end": 14833
} | class ____:
"""Unit tests for the
:func:`~networkx.algorithms.matching.is_matching` function.
"""
def test_dict(self):
G = nx.path_graph(4)
assert nx.is_matching(G, {0: 1, 1: 0, 2: 3, 3: 2})
def test_empty_matching(self):
G = nx.path_graph(4)
assert nx.is_matching(G, set())
def test_single_edge(self):
G = nx.path_graph(4)
assert nx.is_matching(G, {(1, 2)})
def test_edge_order(self):
G = nx.path_graph(4)
assert nx.is_matching(G, {(0, 1), (2, 3)})
assert nx.is_matching(G, {(1, 0), (2, 3)})
assert nx.is_matching(G, {(0, 1), (3, 2)})
assert nx.is_matching(G, {(1, 0), (3, 2)})
def test_valid_matching(self):
G = nx.path_graph(4)
assert nx.is_matching(G, {(0, 1), (2, 3)})
def test_selfloops(self):
G = nx.path_graph(4)
# selfloop edge not in G
assert not nx.is_matching(G, {(0, 0), (1, 2), (2, 3)})
# selfloop edge in G
G.add_edge(0, 0)
assert not nx.is_matching(G, {(0, 0), (1, 2)})
def test_invalid_matching(self):
G = nx.path_graph(4)
assert not nx.is_matching(G, {(0, 1), (1, 2), (2, 3)})
def test_invalid_edge(self):
G = nx.path_graph(4)
assert not nx.is_matching(G, {(0, 3), (1, 2)})
G = nx.DiGraph(G.edges)
assert nx.is_matching(G, {(0, 1)})
assert not nx.is_matching(G, {(1, 0)})
| TestIsMatching |
python | Netflix__metaflow | test/core/tests/nested_unbounded_foreach.py | {
"start": 72,
"end": 2549
} | class ____(MetaflowTest):
PRIORITY = 1
SKIP_GRAPHS = [
"simple_switch",
"nested_switch",
"branch_in_switch",
"foreach_in_switch",
"switch_in_branch",
"switch_in_foreach",
"recursive_switch",
"recursive_switch_inside_foreach",
]
@steps(0, ["foreach-nested-split"], required=True)
def split_z(self):
from metaflow.plugins import InternalTestUnboundedForeachInput
self.z = InternalTestUnboundedForeachInput(self.z)
@tag("unbounded_test_foreach_internal")
@steps(0, ["foreach-nested-inner"], required=True)
def inner(self):
[x, y, z] = self.foreach_stack()
# assert that lengths are correct
assert_equals(len(self.x), x[1])
assert_equals(len(self.y), y[1])
# Note: We can't assert the actual num_splits for unbounded-foreach.
assert_equals(None, z[1]) # expected=len(self.z) for bounded.
# assert that variables are correct given their indices
assert_equals(x[2], self.x[x[0]])
assert_equals(y[2], self.y[y[0]])
assert_equals(z[2], self.z[z[0]])
assert_equals(self.input, z[2])
self.combo = x[2] + y[2] + z[2]
@steps(1, ["all"])
def step_all(self):
pass
def check_results(self, flow, checker):
from itertools import product
run = checker.get_run()
if type(checker).__name__ == "CliCheck":
# CliCheck doesn't support enlisting of tasks nor can disambiguate
# control vs ubf tasks while dumping artifacts.
assert run is None
else:
assert run is not None
foreach_inner_tasks = {t.pathspec for t in run["foreach_inner"].tasks()}
assert_equals(42, len(foreach_inner_tasks))
assert_equals(6, len(list(run["foreach_inner"].control_tasks())))
artifacts = checker.artifact_dict_if_exists("foreach_inner", "combo")
# Explicitly only consider UBF tasks since the CLIChecker isn't aware of them.
step_prefix = run["foreach_inner"].pathspec
import os
got = sorted(
val["combo"]
for task, val in artifacts.items()
if os.path.join(step_prefix, task) in foreach_inner_tasks
)
expected = sorted("".join(p) for p in product("abc", "de", "fghijk"))
assert_equals(expected, got)
| NestedUnboundedForeachTest |
python | google__pytype | pytype/tests/test_attr1.py | {
"start": 159,
"end": 19885
} | class ____(test_base.BaseTest):
"""Tests for attr.ib."""
def test_basic(self):
ty = self.Infer("""
import attr
@attr.s
class Foo:
x = attr.ib()
y = attr.ib(type=int)
z = attr.ib(type=str)
""")
self.assertTypesMatchPytd(
ty,
"""
import attr
from typing import Any
@attr.s
class Foo:
x: Any
y: int
z: str
def __init__(self, x, y: int, z: str) -> None: ...
""",
)
def test_interpreter_class(self):
ty = self.Infer("""
import attr
class A: pass
@attr.s
class Foo:
x = attr.ib(type=A)
""")
self.assertTypesMatchPytd(
ty,
"""
import attr
class A: ...
@attr.s
class Foo:
x: A
def __init__(self, x: A) -> None: ...
""",
)
def test_typing(self):
ty = self.Infer("""
from typing import List
import attr
@attr.s
class Foo:
x = attr.ib(type=List[int])
""")
self.assertTypesMatchPytd(
ty,
"""
import attr
from typing import List
@attr.s
class Foo:
x: List[int]
def __init__(self, x: List[int]) -> None: ...
""",
)
def test_union_types(self):
ty = self.Infer("""
from typing import Union
import attr
@attr.s
class Foo:
x = attr.ib(type=Union[str, int])
""")
self.assertTypesMatchPytd(
ty,
"""
import attr
from typing import Union
@attr.s
class Foo:
x: Union[str, int]
def __init__(self, x: Union[str, int]) -> None: ...
""",
)
def test_comment_annotations(self):
ty = self.Infer("""
from typing import Union
import attr
@attr.s
class Foo:
x = attr.ib() # type: Union[str, int]
y = attr.ib(type=str)
""")
self.assertTypesMatchPytd(
ty,
"""
import attr
from typing import Union
@attr.s
class Foo:
x: Union[str, int]
y: str
def __init__(self, x: Union[str, int], y: str) -> None: ...
""",
)
def test_late_annotations(self):
ty = self.Infer("""
import attr
@attr.s
class Foo:
x = attr.ib() # type: 'Foo'
y = attr.ib() # type: str
""")
self.assertTypesMatchPytd(
ty,
"""
import attr
from typing import Union
@attr.s
class Foo:
x: Foo
y: str
def __init__(self, x: Foo, y: str) -> None: ...
""",
)
def test_late_annotation_in_type(self):
ty = self.Infer("""
import attr
@attr.s
class Foo:
x = attr.ib(type='Foo')
""")
self.assertTypesMatchPytd(
ty,
"""
import attr
@attr.s
class Foo:
x: Foo
def __init__(self, x: Foo) -> None: ...
""",
)
def test_classvar(self):
ty = self.Infer("""
import attr
@attr.s
class Foo:
x = attr.ib() # type: int
y = attr.ib(type=str)
z = 1 # class var, should not be in __init__
""")
self.assertTypesMatchPytd(
ty,
"""
import attr
from typing import Union
@attr.s
class Foo:
x: int
y: str
z: int
def __init__(self, x: int, y: str) -> None: ...
""",
)
def test_type_clash(self):
self.CheckWithErrors("""
import attr
@attr.s # invalid-annotation>=3.11
class Foo: # invalid-annotation<3.11
x = attr.ib(type=str) # type: int
y = attr.ib(type=str, default="") # type: int
Foo(x="") # should not report an error
""")
def test_bad_type(self):
self.CheckWithErrors("""
import attr
@attr.s
class Foo:
x = attr.ib(type=10) # invalid-annotation
""")
def test_name_mangling(self):
# NOTE: Python itself mangles names starting with two underscores.
ty = self.Infer("""
import attr
@attr.s
class Foo:
_x = attr.ib(type=int)
__y = attr.ib(type=int)
___z = attr.ib(type=int)
""")
self.assertTypesMatchPytd(
ty,
"""
import attr
@attr.s
class Foo:
_x: int
_Foo__y: int
_Foo___z: int
def __init__(self, x: int, Foo__y: int, Foo___z: int) -> None: ...
""",
)
def test_defaults(self):
ty, err = self.InferWithErrors("""
import attr
@attr.s
class Foo:
x = attr.ib(default=42)
y = attr.ib(type=int, default=6)
z = attr.ib(type=str, default=28) # annotation-type-mismatch[e]
a = attr.ib(type=str, default=None)
""")
self.assertTypesMatchPytd(
ty,
"""
import attr
from typing import Union
@attr.s
class Foo:
x: int = ...
y: int = ...
z: str = ...
a: str = ...
def __init__(self, x: int = ..., y: int = ..., z: str = ...,
a: str = ...) -> None: ...
""",
)
self.assertErrorRegexes(err, {"e": "annotation for z"})
def test_defaults_with_typecomment(self):
# Typecomments should override the type of default
ty, err = self.InferWithErrors("""
import attr
@attr.s
class Foo:
x = attr.ib(default=42) # type: int
y = attr.ib(default=42) # type: str # annotation-type-mismatch[e]
""")
self.assertTypesMatchPytd(
ty,
"""
import attr
from typing import Union
@attr.s
class Foo:
x: int = ...
y: str = ...
def __init__(self, x: int = ..., y: str = ...) -> None: ...
""",
)
self.assertErrorRegexes(err, {"e": "annotation for y"})
def test_factory_class(self):
ty = self.Infer("""
import attr
class CustomClass:
pass
@attr.s
class Foo:
x = attr.ib(factory=list)
y = attr.ib(factory=CustomClass)
""")
self.assertTypesMatchPytd(
ty,
"""
import attr
from typing import Union
class CustomClass: ...
@attr.s
class Foo:
x: list = ...
y: CustomClass = ...
def __init__(self, x: list = ..., y: CustomClass = ...) -> None: ...
""",
)
def test_factory_function(self):
ty = self.Infer("""
import attr
class CustomClass:
pass
def unannotated_func():
return CustomClass()
@attr.s
class Foo:
x = attr.ib(factory=locals)
y = attr.ib(factory=unannotated_func)
""")
self.assertTypesMatchPytd(
ty,
"""
import attr
from typing import Any, Dict, Union
class CustomClass: ...
def unannotated_func() -> CustomClass: ...
@attr.s
class Foo:
x: Dict[str, Any] = ...
y: Any = ... # b/64832148: the return type isn't inferred early enough
def __init__(self, x: Dict[str, object] = ..., y = ...) -> None: ...
""",
)
def test_verbose_factory(self):
ty = self.Infer("""
import attr
@attr.s
class Foo:
x = attr.ib(default=attr.Factory(list))
""")
self.assertTypesMatchPytd(
ty,
"""
import attr
from typing import Union
@attr.s
class Foo:
x: list = ...
def __init__(self, x: list = ...) -> None: ...
""",
)
def test_bad_factory(self):
errors = self.CheckWithErrors("""
import attr
@attr.s
class Foo:
x = attr.ib(default=attr.Factory(42)) # wrong-arg-types[e1]
y = attr.ib(factory=42) # wrong-arg-types[e2]
""")
self.assertErrorRegexes(
errors, {"e1": r"Callable.*int", "e2": r"Callable.*int"}
)
def test_default_factory_clash(self):
errors = self.CheckWithErrors("""
import attr
@attr.s
class Foo:
x = attr.ib(default=None, factory=list) # duplicate-keyword-argument[e]
""")
self.assertErrorRegexes(errors, {"e": r"default"})
def test_takes_self(self):
ty = self.Infer("""
import attr
@attr.s
class Foo:
x = attr.ib(default=attr.Factory(len, takes_self=True))
""")
self.assertTypesMatchPytd(
ty,
"""
import attr
@attr.s
class Foo:
x: int = ...
def __init__(self, x: int = ...) -> None: ...
""",
)
def test_default_none(self):
ty = self.Infer("""
import attr
@attr.s
class Foo:
x = attr.ib(default=None)
""")
self.assertTypesMatchPytd(
ty,
"""
import attr
from typing import Any
@attr.s
class Foo:
x: Any = ...
def __init__(self, x: Any = ...) -> None: ...
""",
)
def test_annotation_type(self):
ty = self.Infer("""
from typing import List
import attr
@attr.s
class Foo:
x = attr.ib(type=List)
x = Foo([]).x
""")
self.assertTypesMatchPytd(
ty,
"""
import attr
@attr.s
class Foo:
x: list
def __init__(self, x: list) -> None: ...
x: list
""",
)
def test_instantiation(self):
self.Check("""
import attr
class A:
def __init__(self):
self.w = None
@attr.s
class Foo:
x = attr.ib(type=A)
y = attr.ib() # type: A
z = attr.ib(factory=A)
foo = Foo(A(), A())
foo.x.w
foo.y.w
foo.z.w
""")
def test_init(self):
self.Check("""
import attr
@attr.s
class Foo:
x = attr.ib(init=False, default='') # type: str
y = attr.ib() # type: int
foo = Foo(42)
foo.x
foo.y
""")
def test_init_type(self):
ty = self.Infer("""
import attr
@attr.s
class Foo:
x = attr.ib(init=False, default='') # type: str
y = attr.ib() # type: int
""")
self.assertTypesMatchPytd(
ty,
"""
import attr
from typing import Union
@attr.s
class Foo:
x: str = ...
y: int
def __init__(self, y: int) -> None: ...
""",
)
def test_init_bad_constant(self):
err = self.CheckWithErrors("""
import attr
@attr.s
class Foo:
x = attr.ib(init=0) # wrong-arg-types[e]
""")
self.assertErrorRegexes(err, {"e": r"bool.*int"})
def test_init_bad_kwarg(self):
self.CheckWithErrors("""
import attr
@attr.s
class Foo:
x = attr.ib(init=__random__) # type: str # not-supported-yet
""")
def test_class(self):
self.assertNoCrash(
self.Check,
"""
import attr
class X(attr.make_class('X', {'y': attr.ib(default=None)})):
pass
""",
)
def test_base_class_attrs(self):
self.Check("""
import attr
@attr.s
class A:
a = attr.ib() # type: int
@attr.s
class B:
b = attr.ib() # type: str
@attr.s
class C(A, B):
c = attr.ib() # type: int
x = C(10, 'foo', 42)
x.a
x.b
x.c
""")
def test_base_class_attrs_type(self):
ty = self.Infer("""
import attr
@attr.s
class A:
a = attr.ib() # type: int
@attr.s
class B:
b = attr.ib() # type: str
@attr.s
class C(A, B):
c = attr.ib() # type: int
""")
self.assertTypesMatchPytd(
ty,
"""
import attr
from typing import Union
@attr.s
class A:
a: int
def __init__(self, a: int) -> None: ...
@attr.s
class B:
b: str
def __init__(self, b: str) -> None: ...
@attr.s
class C(A, B):
c: int
def __init__(self, a: int, b: str, c: int) -> None: ...
""",
)
def test_base_class_attrs_override_type(self):
ty = self.Infer("""
import attr
@attr.s
class A:
a = attr.ib() # type: int
@attr.s
class B:
b = attr.ib() # type: str
@attr.s
class C(A, B):
a = attr.ib() # type: str
c = attr.ib() # type: int
""")
self.assertTypesMatchPytd(
ty,
"""
import attr
from typing import Union
@attr.s
class A:
a: int
def __init__(self, a: int) -> None: ...
@attr.s
class B:
b: str
def __init__(self, b: str) -> None: ...
@attr.s
class C(A, B):
a: str
c: int
def __init__(self, b: str, a: str, c: int) -> None: ...
""",
)
def test_base_class_attrs_init(self):
ty = self.Infer("""
import attr
@attr.s
class A:
a = attr.ib(init=False) # type: int
@attr.s
class B:
b = attr.ib() # type: str
@attr.s
class C(A, B):
c = attr.ib() # type: int
""")
self.assertTypesMatchPytd(
ty,
"""
import attr
from typing import Union
@attr.s
class A:
a: int
def __init__(self) -> None: ...
@attr.s
class B:
b: str
def __init__(self, b: str) -> None: ...
@attr.s
class C(A, B):
c: int
def __init__(self, b: str, c: int) -> None: ...
""",
)
def test_base_class_attrs_abstract_type(self):
ty = self.Infer("""
import attr
@attr.s
class Foo(__any_object__):
a = attr.ib() # type: int
""")
self.assertTypesMatchPytd(
ty,
"""
import attr
from typing import Any
@attr.s
class Foo(Any):
a: int
def __init__(self, a: int) -> None: ...
""",
)
def test_method_decorators(self):
# Test for:
# - validator decorator does not throw an error
# - default decorator sets type if it isn't set
# - default decorator does not override type
ty, err = self.InferWithErrors("""
import attr
@attr.s
class Foo:
a = attr.ib()
b = attr.ib()
c = attr.ib(type=str) # annotation-type-mismatch[e]
@a.validator
def validate(self, attribute, value):
pass
@a.default
def default_a(self):
# type: (...) -> int
return 10
@b.default
def default_b(self):
return 10
@c.default
def default_c(self):
# type: (...) -> int
return 10
""")
self.assertTypesMatchPytd(
ty,
"""
import attr
from typing import Any, Union
@attr.s
class Foo:
a: int = ...
b: int = ...
c: str = ...
def __init__(self, a: int = ..., b: int = ..., c: str = ...) -> None: ...
def default_a(self) -> int: ...
def default_b(self) -> int: ...
def default_c(self) -> int: ...
def validate(self, attribute, value) -> None: ...
""",
)
self.assertErrorRegexes(err, {"e": "annotation for c"})
def test_default_decorator_using_self(self):
# default_b refers to self.a; the method itself will be annotated with the
# correct type, but since this happens after the attribute defaults have
# been processed, b will have an inferred default types of `Any` rather than
# `int`.
#
# default_c refers to self.b, which has been inferred as `Any`, so default_c
# gets a type of `-> Any`, but since the type annotation for c is more
# specific it overrides that.
ty = self.Infer("""
import attr
@attr.s
class Foo:
a = attr.ib(default=42)
b = attr.ib()
c = attr.ib(type=str)
@b.default
def default_b(self):
return self.a
@c.default
def default_c(self):
return self.b
""")
self.assertTypesMatchPytd(
ty,
"""
import attr
from typing import Any
@attr.s
class Foo:
a: int = ...
b: Any = ...
c: str = ...
def __init__(self, a: int = ..., b = ..., c: str = ...) -> None: ...
def default_b(self) -> int: ...
def default_c(self) -> Any: ...
""",
)
def test_repeated_default(self):
# Regression test for a bug where `params` and `calls` shared an underlying
# list object, so modifying one affected the type of the other.
self.Check("""
import attr
class Call:
pass
@attr.s
class Function:
params = attr.ib(factory=list)
calls = attr.ib(factory=list)
class FunctionMap:
def __init__(self, index):
self.fmap = {"": Function()}
def print_params(self):
for param in self.fmap[""].params:
print(param.name)
def add_call(self, call):
self.fmap[""].calls.append(Call())
""")
def test_empty_factory(self):
ty = self.Infer("""
import attr
FACTORIES = []
@attr.s
class Foo:
x = attr.ib(factory=FACTORIES[0])
Foo(x=0) # should not be an error
""")
self.assertTypesMatchPytd(
ty,
"""
import attr
from typing import Any, List
FACTORIES: List[nothing]
@attr.s
class Foo:
x: Any = ...
def __init__(self, x = ...) -> None: ...
""",
)
def test_empty_tuple_default(self):
ty = self.Infer("""
import attr
@attr.s
class Foo:
x = attr.ib(default=())
""")
self.assertTypesMatchPytd(
ty,
"""
import attr
@attr.s
class Foo:
x: tuple = ...
def __init__(self, x: tuple = ...) -> None: ...
""",
)
def test_long_alias(self):
# Tests an [annotation-type-mismatch] bug that appears when the
# "serious-business alias" for attr.ib is used.
self.Check("""
import attr
@attr.s
class Foo:
x= attr.attrib(default=0) # type: int
""")
def test_typevar_in_type_arg(self):
self.Check("""
import attr
from typing import Callable, TypeVar
T = TypeVar('T')
@attr.s
class Foo:
f = attr.ib(type=Callable[[T], T])
assert_type(Foo(__any_object__).f(0), int)
""")
def test_bad_typevar_in_type_arg(self):
self.CheckWithErrors("""
import attr
from typing import TypeVar
T = TypeVar('T')
@attr.s
class Foo:
x = attr.ib(type=T) # invalid-annotation
""")
def test_bad_constructor(self):
self.CheckWithErrors("""
import attr
@attr.s
class Foo:
x = attr.ib(default=10, init=0) # wrong-arg-types
a = Foo().x
assert_type(a, int)
""")
def test_bad_factory_constructor(self):
self.CheckWithErrors("""
import attr
@attr.s
class Foo:
x = attr.ib(default=10)
y = attr.ib(factory=10, type=int) # wrong-arg-types
""")
def test_multiple_bad_constructor_args(self):
self.CheckWithErrors("""
import attr
@attr.s
class Foo:
x = attr.ib(init=0, validator=10, type=int) # wrong-arg-types # wrong-arg-types
a = Foo(10).x
assert_type(a, int)
""")
def test_extra_constructor_args(self):
self.CheckWithErrors("""
import attr
@attr.s
class Foo:
x = attr.ib(bar=10, type=int) # wrong-keyword-args
a = Foo(10).x
assert_type(a, int)
""")
@test_base.skip("b/203591182")
def test_duplicate_constructor_args(self):
self.CheckWithErrors("""
import attr
@attr.s
class Foo:
x = attr.ib(10, default='a') # duplicate-keyword-argument
a = Foo().x
assert_type(a, int)
""")
| TestAttrib |
python | streamlit__streamlit | lib/tests/streamlit/elements/pyplot_test.py | {
"start": 1050,
"end": 8189
} | class ____(DeltaGeneratorTestCase):
def setUp(self):
super().setUp()
if mpl.get_backend().lower() != "agg":
plt.switch_backend("agg")
def tearDown(self):
# Clear the global pyplot figure between tests
plt.clf()
super().tearDown()
def test_st_pyplot(self):
"""Test st.pyplot.
Need to test:
- Failed import of matplotlib.
- Passing in a figure.
"""
# Make this deterministic
np.random.seed(19680801)
data = np.random.randn(2, 20)
# Generate a 2 inch x 2 inch figure
fig, ax = plt.subplots(figsize=(2, 2))
# Add 20 random points to scatter plot.
ax.scatter(data[0], data[1])
st.pyplot(fig)
el = self.get_delta_from_queue().new_element
assert el.width_config.use_stretch
assert el.imgs.imgs[0].caption == ""
assert el.imgs.imgs[0].url.startswith(MEDIA_ENDPOINT)
@parameterized.expand([("true", True), ("false", False), ("none", None)])
def test_st_pyplot_clear_global_figure(self, _, clear_figure: bool | None):
"""st.pyplot should clear the global figure if `clear_figure` is
True *or* None.
"""
plt.hist(np.random.normal(1, 1, size=100), bins=20)
with patch.object(plt, "clf", wraps=plt.clf, autospec=True) as plt_clf:
st.pyplot(clear_figure=clear_figure)
if clear_figure in (True, None):
plt_clf.assert_called_once()
else:
plt_clf.assert_not_called()
@patch("streamlit.elements.pyplot.show_deprecation_warning")
def test_global_object_deprecation_warning(self, show_warning_mock: Mock):
"""We show deprecation warnings when st.pyplot is called without a figure object."""
plt.hist(np.random.normal(1, 1, size=100), bins=20)
st.pyplot()
show_warning_mock.assert_called_once()
@parameterized.expand([("true", True), ("false", False), ("none", None)])
def test_st_pyplot_clear_figure(self, _, clear_figure: bool | None):
"""st.pyplot should clear the passed-in figure if `clear_figure` is True."""
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.hist(np.random.normal(1, 1, size=100), bins=20)
with patch.object(fig, "clf", wraps=fig.clf, autospec=True) as fig_clf:
st.pyplot(fig, clear_figure=clear_figure)
if clear_figure is True:
fig_clf.assert_called_once()
else:
fig_clf.assert_not_called()
@parameterized.expand([(True, "use_stretch"), (False, "use_content")])
def test_st_pyplot_use_container_width(
self, use_container_width: bool, expected_attribute: str
):
"""st.pyplot should set image width."""
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.hist(np.random.normal(1, 1, size=100), bins=20)
st.pyplot(fig, use_container_width=use_container_width)
el = self.get_delta_from_queue().new_element
assert getattr(el.width_config, expected_attribute)
def test_st_pyplot_width_stretch(self):
"""Test st.pyplot with width='stretch'."""
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot([1, 2, 3], [1, 2, 3])
st.pyplot(fig, width="stretch")
el = self.get_delta_from_queue().new_element
assert el.width_config.use_stretch
def test_st_pyplot_width_content(self):
"""Test st.pyplot with width='content'."""
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot([1, 2, 3], [1, 2, 3])
st.pyplot(fig, width="content")
el = self.get_delta_from_queue().new_element
assert el.width_config.use_content
def test_st_pyplot_width_pixel(self):
"""Test st.pyplot with integer pixel width."""
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot([1, 2, 3], [1, 2, 3])
st.pyplot(fig, width=400)
el = self.get_delta_from_queue().new_element
assert el.width_config.pixel_width == 400
def test_st_pyplot_width_default(self):
"""Test st.pyplot default width behavior."""
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot([1, 2, 3], [1, 2, 3])
st.pyplot(fig)
el = self.get_delta_from_queue().new_element
# Default for pyplot is "stretch"
assert el.width_config.use_stretch
@parameterized.expand(
[
(
"invalid",
"Invalid width value: 'invalid'. Width must be either an integer (pixels), 'stretch', or 'content'.",
),
(
"",
"Invalid width value: ''. Width must be either an integer (pixels), 'stretch', or 'content'.",
),
(
0,
"Invalid width value: 0. Width must be either an integer (pixels), 'stretch', or 'content'.",
),
(
-1,
"Invalid width value: -1. Width must be either an integer (pixels), 'stretch', or 'content'.",
),
(
None,
"Invalid width value: None. Width must be either an integer (pixels), 'stretch', or 'content'.",
),
]
)
def test_st_pyplot_invalid_width(self, invalid_width, expected_error_message):
"""Test st.pyplot with invalid width values."""
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot([1, 2, 3], [1, 2, 3])
with pytest.raises(StreamlitAPIException) as exc_info:
st.pyplot(fig, width=invalid_width)
assert str(exc_info.value) == expected_error_message
@parameterized.expand(
[
(
True,
"content",
"use_stretch",
), # use_container_width=True overrides width="content"
(
False,
"stretch",
"use_content",
), # use_container_width=False overrides width="stretch"
(True, 400, "use_stretch"), # use_container_width=True overrides width=400
(
False,
400,
"use_content",
), # use_container_width=False overrides width=400
]
)
@patch("streamlit.elements.pyplot.show_deprecation_warning")
def test_st_pyplot_use_container_width_overrides_width(
self,
use_container_width: bool,
original_width,
expected_attribute: str,
show_warning_mock: Mock,
):
"""Test that use_container_width parameter overrides the width parameter."""
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot([1, 2, 3], [1, 2, 3])
st.pyplot(fig, width=original_width, use_container_width=use_container_width)
# Should show deprecation warning
show_warning_mock.assert_called_once()
el = self.get_delta_from_queue().new_element
assert getattr(el.width_config, expected_attribute)
| PyplotTest |
python | mlflow__mlflow | mlflow/gateway/providers/cohere.py | {
"start": 452,
"end": 11157
} | class ____(ProviderAdapter):
@staticmethod
def _scale_temperature(payload):
# The range of Cohere's temperature is 0-5, but ours is 0-2, so we scale it.
if temperature := payload.get("temperature"):
payload["temperature"] = 2.5 * temperature
return payload
@classmethod
def model_to_completions(cls, resp, config):
# Response example (https://docs.cohere.com/reference/generate)
# ```
# {
# "id": "string",
# "generations": [
# {
# "id": "string",
# "text": "string"
# }
# ],
# "prompt": "string"
# }
# ```
return completions.ResponsePayload(
created=int(time.time()),
object="text_completion",
model=config.model.name,
choices=[
completions.Choice(
index=idx,
text=c["text"],
finish_reason=None,
)
for idx, c in enumerate(resp["generations"])
],
usage=completions.CompletionsUsage(
prompt_tokens=None,
completion_tokens=None,
total_tokens=None,
),
)
@classmethod
def model_to_completions_streaming(cls, resp, config):
# Response example (https://docs.cohere.com/reference/generate)
#
# Streaming chunks:
# ```
# {"index":0,"text":" Hi","is_finished":false,"event_type":"text-generation"}
# ```
# ```
# {"index":1,"text":" Hi","is_finished":false,"event_type":"text-generation"}
# ```
# notes: "index" is only present if "num_generations" > 1
#
# Final chunk:
# ```
# {"is_finished":true,"event_type":"stream-end","finish_reason":"COMPLETE",
# "response":{"id":"b32a70c5-8c91-4f96-958f-d942801ed22f",
# "generations":[
# {
# "id":"5d5d0851-35ac-4c25-a9a9-2fbb391bd415",
# "index":0,
# "text":" Hi there! How can I assist you today? ",
# "finish_reason":"COMPLETE"
# },
# {
# "id":"0a24787f-504e-470e-a088-0bf801a2c72d",
# "index":1,
# "text":" Hi there, how can I assist you today? ",
# "finish_reason":"COMPLETE"
# }
# ],
# "prompt":"Hello"
# }}
# ```
response = resp.get("response")
return completions.StreamResponsePayload(
id=response["id"] if response else None,
created=int(time.time()),
model=config.model.name,
choices=[
completions.StreamChoice(
index=resp.get("index", 0),
finish_reason=resp.get("finish_reason"),
text=resp.get("text"),
)
],
usage=completions.CompletionsUsage(
prompt_tokens=None,
completion_tokens=None,
total_tokens=None,
),
)
@classmethod
def model_to_embeddings(cls, resp, config):
# Response example (https://docs.cohere.com/reference/embed):
# ```
# {
# "id": "bc57846a-3e56-4327-8acc-588ca1a37b8a",
# "texts": [
# "hello world"
# ],
# "embeddings": [
# [
# 3.25,
# 0.7685547,
# 2.65625,
# ...
# -0.30126953,
# -2.3554688,
# 1.2597656
# ]
# ],
# "meta": [
# {
# "api_version": [
# {
# "version": "1"
# }
# ]
# }
# ]
# }
# ```
return embeddings.ResponsePayload(
data=[
embeddings.EmbeddingObject(
embedding=output,
index=idx,
)
for idx, output in enumerate(resp["embeddings"])
],
model=config.model.name,
usage=embeddings.EmbeddingsUsage(
prompt_tokens=None,
total_tokens=None,
),
)
@classmethod
def completions_to_model(cls, payload, config):
key_mapping = {
"stop": "stop_sequences",
"n": "num_generations",
}
cls.check_keys_against_mapping(key_mapping, payload)
payload = cls._scale_temperature(payload)
return rename_payload_keys(payload, key_mapping)
@classmethod
def completions_streaming_to_model(cls, payload, config):
return cls.completions_to_model(payload, config)
@classmethod
def embeddings_to_model(cls, payload, config):
key_mapping = {"input": "texts"}
for k1, k2 in key_mapping.items():
if k2 in payload:
raise AIGatewayException(
status_code=422, detail=f"Invalid parameter {k2}. Use {k1} instead."
)
return rename_payload_keys(payload, key_mapping)
@classmethod
def chat_to_model(cls, payload, config):
if payload["n"] != 1:
raise AIGatewayException(
status_code=422,
detail=f"Parameter n must be 1 for Cohere chat, got {payload['n']}.",
)
del payload["n"]
if "stop" in payload:
raise AIGatewayException(
status_code=422,
detail="Parameter stop is not supported for Cohere chat.",
)
payload = cls._scale_temperature(payload)
messages = payload.pop("messages")
last_message = messages.pop() # pydantic enforces min_length=1
if last_message["role"] != "user":
raise AIGatewayException(
status_code=422,
detail=f"Last message must be from user, got {last_message['role']}.",
)
payload["message"] = last_message["content"]
# Cohere uses `preamble_override` to set the system message
# we concatenate all system messages from the user with a newline
system_messages = [m for m in messages if m["role"] == "system"]
if len(system_messages) > 0:
payload["preamble_override"] = "\n".join(m["content"] for m in system_messages)
# remaining messages are chat history
# we want to include only user and assistant messages
if messages := [m for m in messages if m["role"] in ("user", "assistant")]:
payload["chat_history"] = [
{
"role": "USER" if m["role"] == "user" else "CHATBOT",
"message": m["content"],
}
for m in messages
]
return payload
@classmethod
def chat_streaming_to_model(cls, payload, config):
return cls.chat_to_model(payload, config)
@classmethod
def model_to_chat(cls, resp, config):
# Response example (https://docs.cohere.com/reference/chat)
# ```
# {
# "response_id": "string",
# "text": "string",
# "generation_id": "string",
# "token_count": {
# "prompt_tokens": 0,
# "response_tokens": 0,
# "total_tokens": 0,
# "billed_tokens": 0
# },
# "meta": {
# "api_version": {
# "version": "1"
# },
# "billed_units": {
# "input_tokens": 0,
# "output_tokens": 0
# }
# },
# "tool_inputs": null
# }
# ```
return chat.ResponsePayload(
id=resp["response_id"],
object="chat.completion",
created=int(time.time()),
model=config.model.name,
choices=[
chat.Choice(
index=0,
message=chat.ResponseMessage(
role="assistant",
content=resp["text"],
),
finish_reason=None,
),
],
usage=chat.ChatUsage(
prompt_tokens=resp["token_count"]["prompt_tokens"],
completion_tokens=resp["token_count"]["response_tokens"],
total_tokens=resp["token_count"]["total_tokens"],
),
)
@classmethod
def model_to_chat_streaming(cls, resp, config):
# Response example (https://docs.cohere.com/reference/chat)
# Streaming chunks:
# ```
# {
# "is_finished":false,
# "event_type":"stream-start",
# "generation_id":"string"
# }
# {"is_finished":false,"event_type":"text-generation","text":"How"}
# {"is_finished":false,"event_type":"text-generation","text":" are"}
# {"is_finished":false,"event_type":"text-generation","text":" you"}
# {
# "is_finished":true,
# "event_type":"stream-end",
# "response":{
# "response_id":"string",
# "text":"How are you",
# "generation_id":"string",
# "token_count":{
# "prompt_tokens":83,"response_tokens":63,"total_tokens":146,"billed_tokens":128
# },
# "tool_inputs":null
# },
# "finish_reason":"COMPLETE"
# }
# ```
response = resp.get("response")
return chat.StreamResponsePayload(
# first chunk has "generation_id" but not "response_id"
id=response["response_id"] if response else None,
created=int(time.time()),
model=config.model.name,
choices=[
chat.StreamChoice(
index=0,
finish_reason=resp.get("finish_reason"),
delta=chat.StreamDelta(
role=None,
content=resp.get("text"),
),
)
],
usage=chat.ChatUsage(
prompt_tokens=response["token_count"]["prompt_tokens"] if response else None,
completion_tokens=response["token_count"]["response_tokens"] if response else None,
total_tokens=response["token_count"]["total_tokens"] if response else None,
),
)
| CohereAdapter |
python | huggingface__transformers | src/transformers/models/textnet/modeling_textnet.py | {
"start": 8111,
"end": 9762
} | class ____(TextNetPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.stem = TextNetConvLayer(config)
self.encoder = TextNetEncoder(config)
self.pooler = nn.AdaptiveAvgPool2d((2, 2))
self.post_init()
@auto_docstring
def forward(
self, pixel_values: Tensor, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None
) -> Union[tuple[Any, list[Any]], tuple[Any], BaseModelOutputWithPoolingAndNoAttention]:
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
hidden_state = self.stem(pixel_values)
encoder_outputs = self.encoder(
hidden_state, output_hidden_states=output_hidden_states, return_dict=return_dict
)
last_hidden_state = encoder_outputs[0]
pooled_output = self.pooler(last_hidden_state)
if not return_dict:
output = (last_hidden_state, pooled_output)
return output + (encoder_outputs[1],) if output_hidden_states else output
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=last_hidden_state,
pooler_output=pooled_output,
hidden_states=encoder_outputs[1] if output_hidden_states else None,
)
@auto_docstring(
custom_intro="""
TextNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
"""
)
| TextNetModel |
python | altair-viz__altair | altair/utils/data.py | {
"start": 7091,
"end": 14835
} | class ____(TypedDict):
url: str
format: _FormatDict
@overload
def to_json(
data: None = ...,
prefix: str = ...,
extension: str = ...,
filename: str = ...,
urlpath: str = ...,
) -> partial: ...
@overload
def to_json(
data: DataType,
prefix: str = ...,
extension: str = ...,
filename: str = ...,
urlpath: str = ...,
) -> _ToFormatReturnUrlDict: ...
def to_json(
data: DataType | None = None,
prefix: str = "altair-data",
extension: str = "json",
filename: str = "{prefix}-{hash}.{extension}",
urlpath: str = "",
) -> partial | _ToFormatReturnUrlDict:
"""Write the data model to a .json file and return a url based data model."""
kwds = _to_text_kwds(prefix, extension, filename, urlpath)
if data is None:
return partial(to_json, **kwds)
else:
data_str = _data_to_json_string(data)
return _to_text(data_str, **kwds, format=_FormatDict(type="json"))
@overload
def to_csv(
data: None = ...,
prefix: str = ...,
extension: str = ...,
filename: str = ...,
urlpath: str = ...,
) -> partial: ...
@overload
def to_csv(
data: dict | pd.DataFrame | DataFrameLike,
prefix: str = ...,
extension: str = ...,
filename: str = ...,
urlpath: str = ...,
) -> _ToFormatReturnUrlDict: ...
def to_csv(
data: dict | pd.DataFrame | DataFrameLike | None = None,
prefix: str = "altair-data",
extension: str = "csv",
filename: str = "{prefix}-{hash}.{extension}",
urlpath: str = "",
) -> partial | _ToFormatReturnUrlDict:
"""Write the data model to a .csv file and return a url based data model."""
kwds = _to_text_kwds(prefix, extension, filename, urlpath)
if data is None:
return partial(to_csv, **kwds)
else:
data_str = _data_to_csv_string(data)
return _to_text(data_str, **kwds, format=_FormatDict(type="csv"))
def _to_text(
data: str,
prefix: str,
extension: str,
filename: str,
urlpath: str,
format: _FormatDict,
) -> _ToFormatReturnUrlDict:
data_hash = _compute_data_hash(data)
filename = filename.format(prefix=prefix, hash=data_hash, extension=extension)
Path(filename).write_text(data, encoding="utf-8")
url = str(Path(urlpath, filename))
return _ToFormatReturnUrlDict({"url": url, "format": format})
def _to_text_kwds(prefix: str, extension: str, filename: str, urlpath: str, /) -> dict[str, str]: # fmt: skip
return {"prefix": prefix, "extension": extension, "filename": filename, "urlpath": urlpath} # fmt: skip
def to_values(data: DataType) -> ToValuesReturnType:
"""Replace a DataFrame by a data model with values."""
check_data_type(data)
# `pass_through=True` passes `data` through as-is if it is not a Narwhals object.
data_native = nw.to_native(data, pass_through=True)
if isinstance(data_native, SupportsGeoInterface):
return {"values": _from_geo_interface(data_native)}
elif is_pandas_dataframe(data_native):
data_native = sanitize_pandas_dataframe(data_native)
return {"values": data_native.to_dict(orient="records")}
elif isinstance(data_native, dict):
if "values" not in data_native:
msg = "values expected in data dict, but not present."
raise KeyError(msg)
return data_native
elif isinstance(data, nw.DataFrame):
data = sanitize_narwhals_dataframe(data)
return {"values": data.rows(named=True)}
else:
# Should never reach this state as tested by check_data_type
msg = f"Unrecognized data type: {type(data)}"
raise ValueError(msg)
def check_data_type(data: DataType) -> None:
if not is_data_type(data):
msg = f"Expected dict, DataFrame or a __geo_interface__ attribute, got: {type(data)}"
raise TypeError(msg)
# ==============================================================================
# Private utilities
# ==============================================================================
def _compute_data_hash(data_str: str) -> str:
return hashlib.sha256(data_str.encode()).hexdigest()[:32]
def _from_geo_interface(data: SupportsGeoInterface) -> dict[str, Any]:
"""
Sanitize a ``__geo_interface__`` w/ pre-sanitize step for ``pandas`` if needed.
Introduces an intersection type::
geo: <subclass of SupportsGeoInterface and DataFrame> | SupportsGeoInterface
"""
geo = sanitize_pandas_dataframe(data) if is_pandas_dataframe(data) else data
return sanitize_geo_interface(geo.__geo_interface__)
def _data_to_json_string(data: DataType) -> str:
"""Return a JSON string representation of the input data."""
check_data_type(data)
if isinstance(data, SupportsGeoInterface):
return json.dumps(_from_geo_interface(data))
elif is_pandas_dataframe(data):
data = sanitize_pandas_dataframe(data)
return data.to_json(orient="records", double_precision=15)
elif isinstance(data, dict):
if "values" not in data:
msg = "values expected in data dict, but not present."
raise KeyError(msg)
return json.dumps(data["values"], sort_keys=True)
try:
data_nw = nw.from_native(data, eager_only=True)
except TypeError as exc:
msg = "to_json only works with data expressed as a DataFrame or as a dict"
raise NotImplementedError(msg) from exc
data_nw = sanitize_narwhals_dataframe(data_nw)
return json.dumps(data_nw.rows(named=True))
def _data_to_csv_string(data: DataType) -> str:
"""Return a CSV string representation of the input data."""
check_data_type(data)
if isinstance(data, SupportsGeoInterface):
msg = (
f"to_csv does not yet work with data that "
f"is of type {type(SupportsGeoInterface).__name__!r}.\n"
f"See https://github.com/vega/altair/issues/3441"
)
raise NotImplementedError(msg)
elif is_pandas_dataframe(data):
data = sanitize_pandas_dataframe(data)
return data.to_csv(index=False)
elif isinstance(data, dict):
if "values" not in data:
msg = "values expected in data dict, but not present"
raise KeyError(msg)
try:
import pandas as pd
except ImportError as exc:
msg = "pandas is required to convert a dict to a CSV string"
raise ImportError(msg) from exc
return pd.DataFrame.from_dict(data["values"]).to_csv(index=False)
try:
data_nw = nw.from_native(data, eager_only=True)
except TypeError as exc:
msg = "to_csv only works with data expressed as a DataFrame or as a dict"
raise NotImplementedError(msg) from exc
return data_nw.write_csv()
def arrow_table_from_dfi_dataframe(dfi_df: DataFrameLike) -> pa.Table:
"""Convert a DataFrame Interchange Protocol compatible object to an Arrow Table."""
import pyarrow as pa
# First check if the dataframe object has a method to convert to arrow.
# Give this preference over the pyarrow from_dataframe function since the object
# has more control over the conversion, and may have broader compatibility.
# This is the case for Polars, which supports Date32 columns in direct conversion
# while pyarrow does not yet support this type in from_dataframe
for convert_method_name in ("arrow", "to_arrow", "to_arrow_table", "to_pyarrow"):
convert_method = getattr(dfi_df, convert_method_name, None)
if callable(convert_method):
result = convert_method()
if isinstance(result, pa.Table):
return result
pi = import_pyarrow_interchange()
return pi.from_dataframe(dfi_df)
| _ToFormatReturnUrlDict |
python | pandas-dev__pandas | pandas/tests/window/test_numba.py | {
"start": 1387,
"end": 9731
} | class ____:
@pytest.mark.parametrize("jit", [True, False])
def test_numba_vs_cython_apply(self, jit, nogil, parallel, nopython, center, step):
def f(x, *args):
arg_sum = 0
for arg in args:
arg_sum += arg
return np.mean(x) + arg_sum
if jit:
import numba
f = numba.jit(f)
engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython}
args = (2,)
s = Series(range(10))
result = s.rolling(2, center=center, step=step).apply(
f, args=args, engine="numba", engine_kwargs=engine_kwargs, raw=True
)
expected = s.rolling(2, center=center, step=step).apply(
f, engine="cython", args=args, raw=True
)
tm.assert_series_equal(result, expected)
def test_apply_numba_with_kwargs(self, roll_frame):
# GH 58995
# rolling apply
def func(sr, a=0):
return sr.sum() + a
data = DataFrame(range(10))
result = data.rolling(5).apply(func, engine="numba", raw=True, kwargs={"a": 1})
expected = data.rolling(5).sum() + 1
tm.assert_frame_equal(result, expected)
result = data.rolling(5).apply(func, engine="numba", raw=True, args=(1,))
tm.assert_frame_equal(result, expected)
# expanding apply
result = data.expanding().apply(func, engine="numba", raw=True, kwargs={"a": 1})
expected = data.expanding().sum() + 1
tm.assert_frame_equal(result, expected)
result = data.expanding().apply(func, engine="numba", raw=True, args=(1,))
tm.assert_frame_equal(result, expected)
# groupby rolling
result = (
roll_frame.groupby("A")
.rolling(5)
.apply(func, engine="numba", raw=True, kwargs={"a": 1})
)
expected = roll_frame.groupby("A").rolling(5).sum() + 1
tm.assert_frame_equal(result, expected)
result = (
roll_frame.groupby("A")
.rolling(5)
.apply(func, engine="numba", raw=True, args=(1,))
)
tm.assert_frame_equal(result, expected)
# groupby expanding
result = (
roll_frame.groupby("A")
.expanding()
.apply(func, engine="numba", raw=True, kwargs={"a": 1})
)
expected = roll_frame.groupby("A").expanding().sum() + 1
tm.assert_frame_equal(result, expected)
result = (
roll_frame.groupby("A")
.expanding()
.apply(func, engine="numba", raw=True, args=(1,))
)
tm.assert_frame_equal(result, expected)
def test_numba_min_periods(self):
# GH 58868
def last_row(x):
assert len(x) == 3
return x[-1]
df = DataFrame([[1, 2], [3, 4], [5, 6], [7, 8]])
result = df.rolling(3, method="table", min_periods=3).apply(
last_row, raw=True, engine="numba"
)
expected = DataFrame([[np.nan, np.nan], [np.nan, np.nan], [5, 6], [7, 8]])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"data",
[
DataFrame(np.eye(5)),
DataFrame(
[
[5, 7, 7, 7, np.nan, np.inf, 4, 3, 3, 3],
[5, 7, 7, 7, np.nan, np.inf, 7, 3, 3, 3],
[np.nan, np.nan, 5, 6, 7, 5, 5, 5, 5, 5],
]
).T,
Series(range(5), name="foo"),
Series([20, 10, 10, np.inf, 1, 1, 2, 3]),
Series([20, 10, 10, np.nan, 10, 1, 2, 3]),
],
)
def test_numba_vs_cython_rolling_methods(
self,
data,
nogil,
parallel,
nopython,
arithmetic_numba_supported_operators,
step,
):
method, kwargs = arithmetic_numba_supported_operators
engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython}
roll = data.rolling(3, step=step)
result = getattr(roll, method)(
engine="numba", engine_kwargs=engine_kwargs, **kwargs
)
expected = getattr(roll, method)(engine="cython", **kwargs)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"data", [DataFrame(np.eye(5)), Series(range(5), name="foo")]
)
def test_numba_vs_cython_expanding_methods(
self, data, nogil, parallel, nopython, arithmetic_numba_supported_operators
):
method, kwargs = arithmetic_numba_supported_operators
engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython}
data = DataFrame(np.eye(5))
expand = data.expanding()
result = getattr(expand, method)(
engine="numba", engine_kwargs=engine_kwargs, **kwargs
)
expected = getattr(expand, method)(engine="cython", **kwargs)
tm.assert_equal(result, expected)
@pytest.mark.parametrize("jit", [True, False])
def test_cache_apply(self, jit, nogil, parallel, nopython, step):
# Test that the functions are cached correctly if we switch functions
def func_1(x):
return np.mean(x) + 4
def func_2(x):
return np.std(x) * 5
if jit:
import numba
func_1 = numba.jit(func_1)
func_2 = numba.jit(func_2)
engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython}
roll = Series(range(10)).rolling(2, step=step)
result = roll.apply(
func_1, engine="numba", engine_kwargs=engine_kwargs, raw=True
)
expected = roll.apply(func_1, engine="cython", raw=True)
tm.assert_series_equal(result, expected)
result = roll.apply(
func_2, engine="numba", engine_kwargs=engine_kwargs, raw=True
)
expected = roll.apply(func_2, engine="cython", raw=True)
tm.assert_series_equal(result, expected)
# This run should use the cached func_1
result = roll.apply(
func_1, engine="numba", engine_kwargs=engine_kwargs, raw=True
)
expected = roll.apply(func_1, engine="cython", raw=True)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"window,window_kwargs",
[
["rolling", {"window": 3, "min_periods": 0}],
["expanding", {}],
],
)
def test_dont_cache_args(
self, window, window_kwargs, nogil, parallel, nopython, method
):
# GH 42287
def add(values, x):
return np.sum(values) + x
engine_kwargs = {"nopython": nopython, "nogil": nogil, "parallel": parallel}
df = DataFrame({"value": [0, 0, 0]})
result = getattr(df, window)(method=method, **window_kwargs).apply(
add, raw=True, engine="numba", engine_kwargs=engine_kwargs, args=(1,)
)
expected = DataFrame({"value": [1.0, 1.0, 1.0]})
tm.assert_frame_equal(result, expected)
result = getattr(df, window)(method=method, **window_kwargs).apply(
add, raw=True, engine="numba", engine_kwargs=engine_kwargs, args=(2,)
)
expected = DataFrame({"value": [2.0, 2.0, 2.0]})
tm.assert_frame_equal(result, expected)
def test_dont_cache_engine_kwargs(self):
# If the user passes a different set of engine_kwargs don't return the same
# jitted function
nogil = False
parallel = True
nopython = True
def func(x):
return nogil + parallel + nopython
engine_kwargs = {"nopython": nopython, "nogil": nogil, "parallel": parallel}
df = DataFrame({"value": [0, 0, 0]})
result = df.rolling(1).apply(
func, raw=True, engine="numba", engine_kwargs=engine_kwargs
)
expected = DataFrame({"value": [2.0, 2.0, 2.0]})
tm.assert_frame_equal(result, expected)
parallel = False
engine_kwargs = {"nopython": nopython, "nogil": nogil, "parallel": parallel}
result = df.rolling(1).apply(
func, raw=True, engine="numba", engine_kwargs=engine_kwargs
)
expected = DataFrame({"value": [1.0, 1.0, 1.0]})
tm.assert_frame_equal(result, expected)
@td.skip_if_no("numba")
| TestEngine |
python | pytorch__pytorch | benchmarks/inductor_backends/cutlass.py | {
"start": 1729,
"end": 2239
} | class ____:
max_autotune: bool = True
coordinate_descent_tuning: bool = True
max_autotune_gemm_backends: str = "ATEN"
@abstractmethod
def name(self) -> str:
pass
def to_options(self) -> dict[str, Any]:
return {
"max_autotune": self.max_autotune,
"coordinate_descent_tuning": self.coordinate_descent_tuning,
"max_autotune_gemm_backends": self.max_autotune_gemm_backends,
}
@dataclass(frozen=True, kw_only=True)
| ExperimentConfig |
python | google__flatbuffers | conanfile.py | {
"start": 166,
"end": 2922
} | class ____(ConanFile):
name = "flatbuffers"
license = "Apache-2.0"
url = "https://github.com/google/flatbuffers"
homepage = "http://google.github.io/flatbuffers/"
author = "Wouter van Oortmerssen"
topics = ("conan", "flatbuffers", "serialization", "rpc", "json-parser")
description = "Memory Efficient Serialization Library"
settings = "os", "compiler", "build_type", "arch"
options = {"shared": [True, False], "fPIC": [True, False]}
default_options = {"shared": False, "fPIC": True}
generators = "cmake"
exports = "LICENSE"
exports_sources = [
"CMake/*",
"include/*",
"src/*",
"grpc/*",
"CMakeLists.txt",
"conan/CMakeLists.txt",
]
def source(self):
"""Wrap the original CMake file to call conan_basic_setup"""
shutil.move("CMakeLists.txt", "CMakeListsOriginal.txt")
shutil.move(os.path.join("conan", "CMakeLists.txt"), "CMakeLists.txt")
def config_options(self):
"""Remove fPIC option on Windows platform"""
if self.settings.os == "Windows":
self.options.remove("fPIC")
def configure_cmake(self):
"""Create CMake instance and execute configure step"""
cmake = CMake(self)
cmake.definitions["FLATBUFFERS_BUILD_TESTS"] = False
cmake.definitions["FLATBUFFERS_BUILD_SHAREDLIB"] = self.options.shared
cmake.definitions["FLATBUFFERS_BUILD_FLATLIB"] = not self.options.shared
cmake.configure()
return cmake
def build(self):
"""Configure, build and install FlatBuffers using CMake."""
cmake = self.configure_cmake()
cmake.build()
def package(self):
"""Copy Flatbuffers' artifacts to package folder"""
cmake = self.configure_cmake()
cmake.install()
self.copy(pattern="LICENSE", dst="licenses")
self.copy(
pattern="FindFlatBuffers.cmake",
dst=os.path.join("lib", "cmake", "flatbuffers"),
src="CMake",
)
self.copy(pattern="flathash*", dst="bin", src="bin")
self.copy(pattern="flatc*", dst="bin", src="bin")
if self.settings.os == "Windows" and self.options.shared:
if self.settings.compiler == "Visual Studio":
shutil.move(
os.path.join(self.package_folder, "lib", "%s.dll" % self.name),
os.path.join(self.package_folder, "bin", "%s.dll" % self.name),
)
elif self.settings.compiler == "gcc":
shutil.move(
os.path.join(self.package_folder, "lib", "lib%s.dll" % self.name),
os.path.join(self.package_folder, "bin", "lib%s.dll" % self.name),
)
def package_info(self):
"""Collect built libraries names and solve flatc path."""
self.cpp_info.libs = tools.collect_libs(self)
self.user_info.flatc = os.path.join(self.package_folder, "bin", "flatc")
| FlatbuffersConan |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_slots/SLOT000.py | {
"start": 38,
"end": 111
} | class ____(str): # Ok
__slots__ = ["foo"]
from enum import Enum
| Good |
python | langchain-ai__langchain | libs/core/langchain_core/tools/base.py | {
"start": 13550,
"end": 44881
} | class ____(BaseTool):
...
args_schema: Type[BaseModel] = SchemaClass
..."""
name = cls.__name__
msg = (
f"Tool definition for {name} must include valid type annotations"
f" for argument 'args_schema' to behave as expected.\n"
f"Expected annotation of 'Type[BaseModel]'"
f" but got '{args_schema_type}'.\n"
f"Expected class looks like:\n"
f"{typehint_mandate}"
)
raise SchemaAnnotationError(msg)
name: str
"""The unique name of the tool that clearly communicates its purpose."""
description: str
"""Used to tell the model how/when/why to use the tool.
You can provide few-shot examples as a part of the description.
"""
args_schema: Annotated[ArgsSchema | None, SkipValidation()] = Field(
default=None, description="The tool schema."
)
"""Pydantic model class to validate and parse the tool's input arguments.
Args schema should be either:
- A subclass of `pydantic.BaseModel`.
- A subclass of `pydantic.v1.BaseModel` if accessing v1 namespace in pydantic 2
- A JSON schema dict
"""
return_direct: bool = False
"""Whether to return the tool's output directly.
Setting this to `True` means that after the tool is called, the `AgentExecutor` will
stop looping.
"""
verbose: bool = False
"""Whether to log the tool's progress."""
callbacks: Callbacks = Field(default=None, exclude=True)
"""Callbacks to be called during tool execution."""
tags: list[str] | None = None
"""Optional list of tags associated with the tool.
These tags will be associated with each call to this tool,
and passed as arguments to the handlers defined in `callbacks`.
You can use these to, e.g., identify a specific instance of a tool with its use
case.
"""
metadata: dict[str, Any] | None = None
"""Optional metadata associated with the tool.
This metadata will be associated with each call to this tool,
and passed as arguments to the handlers defined in `callbacks`.
You can use these to, e.g., identify a specific instance of a tool with its use
case.
"""
handle_tool_error: bool | str | Callable[[ToolException], str] | None = False
"""Handle the content of the `ToolException` thrown."""
handle_validation_error: (
bool | str | Callable[[ValidationError | ValidationErrorV1], str] | None
) = False
"""Handle the content of the `ValidationError` thrown."""
response_format: Literal["content", "content_and_artifact"] = "content"
"""The tool response format.
If `'content'` then the output of the tool is interpreted as the contents of a
`ToolMessage`. If `'content_and_artifact'` then the output is expected to be a
two-tuple corresponding to the `(content, artifact)` of a `ToolMessage`.
"""
def __init__(self, **kwargs: Any) -> None:
"""Initialize the tool.
Raises:
TypeError: If `args_schema` is not a subclass of pydantic `BaseModel` or
`dict`.
"""
if (
"args_schema" in kwargs
and kwargs["args_schema"] is not None
and not is_basemodel_subclass(kwargs["args_schema"])
and not isinstance(kwargs["args_schema"], dict)
):
msg = (
"args_schema must be a subclass of pydantic BaseModel or "
f"a JSON schema dict. Got: {kwargs['args_schema']}."
)
raise TypeError(msg)
super().__init__(**kwargs)
model_config = ConfigDict(
arbitrary_types_allowed=True,
)
@property
def is_single_input(self) -> bool:
"""Check if the tool accepts only a single input argument.
Returns:
`True` if the tool has only one input argument, `False` otherwise.
"""
keys = {k for k in self.args if k != "kwargs"}
return len(keys) == 1
@property
def args(self) -> dict:
"""Get the tool's input arguments schema.
Returns:
`dict` containing the tool's argument properties.
"""
if isinstance(self.args_schema, dict):
json_schema = self.args_schema
elif self.args_schema and issubclass(self.args_schema, BaseModelV1):
json_schema = self.args_schema.schema()
else:
input_schema = self.get_input_schema()
json_schema = input_schema.model_json_schema()
return json_schema["properties"]
@property
def tool_call_schema(self) -> ArgsSchema:
"""Get the schema for tool calls, excluding injected arguments.
Returns:
The schema that should be used for tool calls from language models.
"""
if isinstance(self.args_schema, dict):
if self.description:
return {
**self.args_schema,
"description": self.description,
}
return self.args_schema
full_schema = self.get_input_schema()
fields = []
for name, type_ in get_all_basemodel_annotations(full_schema).items():
if not _is_injected_arg_type(type_):
fields.append(name)
return _create_subset_model(
self.name, full_schema, fields, fn_description=self.description
)
@functools.cached_property
def _injected_args_keys(self) -> frozenset[str]:
# base implementation doesn't manage injected args
return _EMPTY_SET
# --- Runnable ---
@override
def get_input_schema(self, config: RunnableConfig | None = None) -> type[BaseModel]:
"""The tool's input schema.
Args:
config: The configuration for the tool.
Returns:
The input schema for the tool.
"""
if self.args_schema is not None:
if isinstance(self.args_schema, dict):
return super().get_input_schema(config)
return self.args_schema
return create_schema_from_function(self.name, self._run)
@override
def invoke(
self,
input: str | dict | ToolCall,
config: RunnableConfig | None = None,
**kwargs: Any,
) -> Any:
tool_input, kwargs = _prep_run_args(input, config, **kwargs)
return self.run(tool_input, **kwargs)
@override
async def ainvoke(
self,
input: str | dict | ToolCall,
config: RunnableConfig | None = None,
**kwargs: Any,
) -> Any:
tool_input, kwargs = _prep_run_args(input, config, **kwargs)
return await self.arun(tool_input, **kwargs)
# --- Tool ---
def _parse_input(
self, tool_input: str | dict, tool_call_id: str | None
) -> str | dict[str, Any]:
"""Parse and validate tool input using the args schema.
Args:
tool_input: The raw input to the tool.
tool_call_id: The ID of the tool call, if available.
Returns:
The parsed and validated input.
Raises:
ValueError: If `string` input is provided with JSON schema `args_schema`.
ValueError: If `InjectedToolCallId` is required but `tool_call_id` is not
provided.
TypeError: If `args_schema` is not a Pydantic `BaseModel` or dict.
"""
input_args = self.args_schema
if isinstance(tool_input, str):
if input_args is not None:
if isinstance(input_args, dict):
msg = (
"String tool inputs are not allowed when "
"using tools with JSON schema args_schema."
)
raise ValueError(msg)
key_ = next(iter(get_fields(input_args).keys()))
if issubclass(input_args, BaseModel):
input_args.model_validate({key_: tool_input})
elif issubclass(input_args, BaseModelV1):
input_args.parse_obj({key_: tool_input})
else:
msg = f"args_schema must be a Pydantic BaseModel, got {input_args}"
raise TypeError(msg)
return tool_input
if input_args is not None:
if isinstance(input_args, dict):
return tool_input
if issubclass(input_args, BaseModel):
# Check args_schema for InjectedToolCallId
for k, v in get_all_basemodel_annotations(input_args).items():
if _is_injected_arg_type(v, injected_type=InjectedToolCallId):
if tool_call_id is None:
msg = (
"When tool includes an InjectedToolCallId "
"argument, tool must always be invoked with a full "
"model ToolCall of the form: {'args': {...}, "
"'name': '...', 'type': 'tool_call', "
"'tool_call_id': '...'}"
)
raise ValueError(msg)
tool_input[k] = tool_call_id
result = input_args.model_validate(tool_input)
result_dict = result.model_dump()
elif issubclass(input_args, BaseModelV1):
# Check args_schema for InjectedToolCallId
for k, v in get_all_basemodel_annotations(input_args).items():
if _is_injected_arg_type(v, injected_type=InjectedToolCallId):
if tool_call_id is None:
msg = (
"When tool includes an InjectedToolCallId "
"argument, tool must always be invoked with a full "
"model ToolCall of the form: {'args': {...}, "
"'name': '...', 'type': 'tool_call', "
"'tool_call_id': '...'}"
)
raise ValueError(msg)
tool_input[k] = tool_call_id
result = input_args.parse_obj(tool_input)
result_dict = result.dict()
else:
msg = (
f"args_schema must be a Pydantic BaseModel, got {self.args_schema}"
)
raise NotImplementedError(msg)
validated_input = {
k: getattr(result, k) for k in result_dict if k in tool_input
}
for k in self._injected_args_keys:
if k == "tool_call_id":
if tool_call_id is None:
msg = (
"When tool includes an InjectedToolCallId "
"argument, tool must always be invoked with a full "
"model ToolCall of the form: {'args': {...}, "
"'name': '...', 'type': 'tool_call', "
"'tool_call_id': '...'}"
)
raise ValueError(msg)
validated_input[k] = tool_call_id
if k in tool_input:
injected_val = tool_input[k]
validated_input[k] = injected_val
return validated_input
return tool_input
@abstractmethod
def _run(self, *args: Any, **kwargs: Any) -> Any:
"""Use the tool.
Add `run_manager: CallbackManagerForToolRun | None = None` to child
implementations to enable tracing.
Returns:
The result of the tool execution.
"""
async def _arun(self, *args: Any, **kwargs: Any) -> Any:
"""Use the tool asynchronously.
Add `run_manager: AsyncCallbackManagerForToolRun | None = None` to child
implementations to enable tracing.
Returns:
The result of the tool execution.
"""
if kwargs.get("run_manager") and signature(self._run).parameters.get(
"run_manager"
):
kwargs["run_manager"] = kwargs["run_manager"].get_sync()
return await run_in_executor(None, self._run, *args, **kwargs)
def _filter_injected_args(self, tool_input: dict) -> dict:
"""Filter out injected tool arguments from the input dictionary.
Injected arguments are those annotated with `InjectedToolArg` or its
subclasses, or arguments in `FILTERED_ARGS` like `run_manager` and callbacks.
Args:
tool_input: The tool input dictionary to filter.
Returns:
A filtered dictionary with injected arguments removed.
"""
# Start with filtered args from the constant
filtered_keys = set[str](FILTERED_ARGS)
# If we have an args_schema, use it to identify injected args
if self.args_schema is not None:
try:
annotations = get_all_basemodel_annotations(self.args_schema)
for field_name, field_type in annotations.items():
if _is_injected_arg_type(field_type):
filtered_keys.add(field_name)
except Exception: # noqa: S110
# If we can't get annotations, just use FILTERED_ARGS
pass
# Filter out the injected keys from tool_input
return {k: v for k, v in tool_input.items() if k not in filtered_keys}
def _to_args_and_kwargs(
self, tool_input: str | dict, tool_call_id: str | None
) -> tuple[tuple, dict]:
"""Convert tool input to positional and keyword arguments.
Args:
tool_input: The input to the tool.
tool_call_id: The ID of the tool call, if available.
Returns:
A tuple of `(positional_args, keyword_args)` for the tool.
Raises:
TypeError: If the tool input type is invalid.
"""
if (
self.args_schema is not None
and isinstance(self.args_schema, type)
and is_basemodel_subclass(self.args_schema)
and not get_fields(self.args_schema)
):
# StructuredTool with no args
return (), {}
tool_input = self._parse_input(tool_input, tool_call_id)
# For backwards compatibility, if run_input is a string,
# pass as a positional argument.
if isinstance(tool_input, str):
return (tool_input,), {}
if isinstance(tool_input, dict):
# Make a shallow copy of the input to allow downstream code
# to modify the root level of the input without affecting the
# original input.
# This is used by the tool to inject run time information like
# the callback manager.
return (), tool_input.copy()
# This code path is not expected to be reachable.
msg = f"Invalid tool input type: {type(tool_input)}"
raise TypeError(msg)
def run(
self,
tool_input: str | dict[str, Any],
verbose: bool | None = None, # noqa: FBT001
start_color: str | None = "green",
color: str | None = "green",
callbacks: Callbacks = None,
*,
tags: list[str] | None = None,
metadata: dict[str, Any] | None = None,
run_name: str | None = None,
run_id: uuid.UUID | None = None,
config: RunnableConfig | None = None,
tool_call_id: str | None = None,
**kwargs: Any,
) -> Any:
"""Run the tool.
Args:
tool_input: The input to the tool.
verbose: Whether to log the tool's progress.
start_color: The color to use when starting the tool.
color: The color to use when ending the tool.
callbacks: Callbacks to be called during tool execution.
tags: Optional list of tags associated with the tool.
metadata: Optional metadata associated with the tool.
run_name: The name of the run.
run_id: The id of the run.
config: The configuration for the tool.
tool_call_id: The id of the tool call.
**kwargs: Keyword arguments to be passed to tool callbacks (event handler)
Returns:
The output of the tool.
Raises:
ToolException: If an error occurs during tool execution.
"""
callback_manager = CallbackManager.configure(
callbacks,
self.callbacks,
self.verbose or bool(verbose),
tags,
self.tags,
metadata,
self.metadata,
)
# Filter out injected arguments from callback inputs
filtered_tool_input = (
self._filter_injected_args(tool_input)
if isinstance(tool_input, dict)
else None
)
# Use filtered inputs for the input_str parameter as well
tool_input_str = (
tool_input
if isinstance(tool_input, str)
else str(
filtered_tool_input if filtered_tool_input is not None else tool_input
)
)
run_manager = callback_manager.on_tool_start(
{"name": self.name, "description": self.description},
tool_input_str,
color=start_color,
name=run_name,
run_id=run_id,
inputs=filtered_tool_input,
**kwargs,
)
content = None
artifact = None
status = "success"
error_to_raise: Exception | KeyboardInterrupt | None = None
try:
child_config = patch_config(config, callbacks=run_manager.get_child())
with set_config_context(child_config) as context:
tool_args, tool_kwargs = self._to_args_and_kwargs(
tool_input, tool_call_id
)
if signature(self._run).parameters.get("run_manager"):
tool_kwargs |= {"run_manager": run_manager}
if config_param := _get_runnable_config_param(self._run):
tool_kwargs |= {config_param: config}
response = context.run(self._run, *tool_args, **tool_kwargs)
if self.response_format == "content_and_artifact":
msg = (
"Since response_format='content_and_artifact' "
"a two-tuple of the message content and raw tool output is "
f"expected. Instead, generated response is of type: "
f"{type(response)}."
)
if not isinstance(response, tuple):
error_to_raise = ValueError(msg)
else:
try:
content, artifact = response
except ValueError:
error_to_raise = ValueError(msg)
else:
content = response
except (ValidationError, ValidationErrorV1) as e:
if not self.handle_validation_error:
error_to_raise = e
else:
content = _handle_validation_error(e, flag=self.handle_validation_error)
status = "error"
except ToolException as e:
if not self.handle_tool_error:
error_to_raise = e
else:
content = _handle_tool_error(e, flag=self.handle_tool_error)
status = "error"
except (Exception, KeyboardInterrupt) as e:
error_to_raise = e
if error_to_raise:
run_manager.on_tool_error(error_to_raise)
raise error_to_raise
output = _format_output(content, artifact, tool_call_id, self.name, status)
run_manager.on_tool_end(output, color=color, name=self.name, **kwargs)
return output
async def arun(
self,
tool_input: str | dict,
verbose: bool | None = None, # noqa: FBT001
start_color: str | None = "green",
color: str | None = "green",
callbacks: Callbacks = None,
*,
tags: list[str] | None = None,
metadata: dict[str, Any] | None = None,
run_name: str | None = None,
run_id: uuid.UUID | None = None,
config: RunnableConfig | None = None,
tool_call_id: str | None = None,
**kwargs: Any,
) -> Any:
"""Run the tool asynchronously.
Args:
tool_input: The input to the tool.
verbose: Whether to log the tool's progress.
start_color: The color to use when starting the tool.
color: The color to use when ending the tool.
callbacks: Callbacks to be called during tool execution.
tags: Optional list of tags associated with the tool.
metadata: Optional metadata associated with the tool.
run_name: The name of the run.
run_id: The id of the run.
config: The configuration for the tool.
tool_call_id: The id of the tool call.
**kwargs: Keyword arguments to be passed to tool callbacks
Returns:
The output of the tool.
Raises:
ToolException: If an error occurs during tool execution.
"""
callback_manager = AsyncCallbackManager.configure(
callbacks,
self.callbacks,
self.verbose or bool(verbose),
tags,
self.tags,
metadata,
self.metadata,
)
# Filter out injected arguments from callback inputs
filtered_tool_input = (
self._filter_injected_args(tool_input)
if isinstance(tool_input, dict)
else None
)
# Use filtered inputs for the input_str parameter as well
tool_input_str = (
tool_input
if isinstance(tool_input, str)
else str(
filtered_tool_input if filtered_tool_input is not None else tool_input
)
)
run_manager = await callback_manager.on_tool_start(
{"name": self.name, "description": self.description},
tool_input_str,
color=start_color,
name=run_name,
run_id=run_id,
inputs=filtered_tool_input,
**kwargs,
)
content = None
artifact = None
status = "success"
error_to_raise: Exception | KeyboardInterrupt | None = None
try:
tool_args, tool_kwargs = self._to_args_and_kwargs(tool_input, tool_call_id)
child_config = patch_config(config, callbacks=run_manager.get_child())
with set_config_context(child_config) as context:
func_to_check = (
self._run if self.__class__._arun is BaseTool._arun else self._arun # noqa: SLF001
)
if signature(func_to_check).parameters.get("run_manager"):
tool_kwargs["run_manager"] = run_manager
if config_param := _get_runnable_config_param(func_to_check):
tool_kwargs[config_param] = config
coro = self._arun(*tool_args, **tool_kwargs)
response = await coro_with_context(coro, context)
if self.response_format == "content_and_artifact":
msg = (
"Since response_format='content_and_artifact' "
"a two-tuple of the message content and raw tool output is "
f"expected. Instead, generated response is of type: "
f"{type(response)}."
)
if not isinstance(response, tuple):
error_to_raise = ValueError(msg)
else:
try:
content, artifact = response
except ValueError:
error_to_raise = ValueError(msg)
else:
content = response
except ValidationError as e:
if not self.handle_validation_error:
error_to_raise = e
else:
content = _handle_validation_error(e, flag=self.handle_validation_error)
status = "error"
except ToolException as e:
if not self.handle_tool_error:
error_to_raise = e
else:
content = _handle_tool_error(e, flag=self.handle_tool_error)
status = "error"
except (Exception, KeyboardInterrupt) as e:
error_to_raise = e
if error_to_raise:
await run_manager.on_tool_error(error_to_raise)
raise error_to_raise
output = _format_output(content, artifact, tool_call_id, self.name, status)
await run_manager.on_tool_end(output, color=color, name=self.name, **kwargs)
return output
def _is_tool_call(x: Any) -> bool:
"""Check if the input is a tool call dictionary.
Args:
x: The input to check.
Returns:
`True` if the input is a tool call, `False` otherwise.
"""
return isinstance(x, dict) and x.get("type") == "tool_call"
def _handle_validation_error(
e: ValidationError | ValidationErrorV1,
*,
flag: Literal[True] | str | Callable[[ValidationError | ValidationErrorV1], str],
) -> str:
"""Handle validation errors based on the configured flag.
Args:
e: The validation error that occurred.
flag: How to handle the error (`bool`, `str`, or `Callable`).
Returns:
The error message to return.
Raises:
ValueError: If the flag type is unexpected.
"""
if isinstance(flag, bool):
content = "Tool input validation error"
elif isinstance(flag, str):
content = flag
elif callable(flag):
content = flag(e)
else:
msg = (
f"Got unexpected type of `handle_validation_error`. Expected bool, "
f"str or callable. Received: {flag}"
)
raise ValueError(msg) # noqa: TRY004
return content
def _handle_tool_error(
e: ToolException,
*,
flag: Literal[True] | str | Callable[[ToolException], str] | None,
) -> str:
"""Handle tool execution errors based on the configured flag.
Args:
e: The tool exception that occurred.
flag: How to handle the error (`bool`, `str`, or `Callable`).
Returns:
The error message to return.
Raises:
ValueError: If the flag type is unexpected.
"""
if isinstance(flag, bool):
content = e.args[0] if e.args else "Tool execution error"
elif isinstance(flag, str):
content = flag
elif callable(flag):
content = flag(e)
else:
msg = (
f"Got unexpected type of `handle_tool_error`. Expected bool, str "
f"or callable. Received: {flag}"
)
raise ValueError(msg) # noqa: TRY004
return content
def _prep_run_args(
value: str | dict | ToolCall,
config: RunnableConfig | None,
**kwargs: Any,
) -> tuple[str | dict, dict]:
"""Prepare arguments for tool execution.
Args:
value: The input value (`str`, `dict`, or `ToolCall`).
config: The runnable configuration.
**kwargs: Additional keyword arguments.
Returns:
A tuple of `(tool_input, run_kwargs)`.
"""
config = ensure_config(config)
if _is_tool_call(value):
tool_call_id: str | None = cast("ToolCall", value)["id"]
tool_input: str | dict = cast("ToolCall", value)["args"].copy()
else:
tool_call_id = None
tool_input = cast("str | dict", value)
return (
tool_input,
dict(
callbacks=config.get("callbacks"),
tags=config.get("tags"),
metadata=config.get("metadata"),
run_name=config.get("run_name"),
run_id=config.pop("run_id", None),
config=config,
tool_call_id=tool_call_id,
**kwargs,
),
)
def _format_output(
content: Any,
artifact: Any,
tool_call_id: str | None,
name: str,
status: str,
) -> ToolOutputMixin | Any:
"""Format tool output as a `ToolMessage` if appropriate.
Args:
content: The main content of the tool output.
artifact: Any artifact data from the tool.
tool_call_id: The ID of the tool call.
name: The name of the tool.
status: The execution status.
Returns:
The formatted output, either as a `ToolMessage` or the original content.
"""
if isinstance(content, ToolOutputMixin) or tool_call_id is None:
return content
if not _is_message_content_type(content):
content = _stringify(content)
return ToolMessage(
content,
artifact=artifact,
tool_call_id=tool_call_id,
name=name,
status=status,
)
def _is_message_content_type(obj: Any) -> bool:
"""Check if object is valid message content format.
Validates content for OpenAI or Anthropic format tool messages.
Args:
obj: The object to check.
Returns:
`True` if the object is valid message content, `False` otherwise.
"""
return isinstance(obj, str) or (
isinstance(obj, list) and all(_is_message_content_block(e) for e in obj)
)
def _is_message_content_block(obj: Any) -> bool:
"""Check if object is a valid message content block.
Validates content blocks for OpenAI or Anthropic format.
Args:
obj: The object to check.
Returns:
`True` if the object is a valid content block, `False` otherwise.
"""
if isinstance(obj, str):
return True
if isinstance(obj, dict):
return obj.get("type", None) in TOOL_MESSAGE_BLOCK_TYPES
return False
def _stringify(content: Any) -> str:
"""Convert content to string, preferring JSON format.
Args:
content: The content to stringify.
Returns:
String representation of the content.
"""
try:
return json.dumps(content, ensure_ascii=False)
except Exception:
return str(content)
def _get_type_hints(func: Callable) -> dict[str, type] | None:
"""Get type hints from a function, handling partial functions.
Args:
func: The function to get type hints from.
Returns:
`dict` of type hints, or `None` if extraction fails.
"""
if isinstance(func, functools.partial):
func = func.func
try:
return get_type_hints(func)
except Exception:
return None
def _get_runnable_config_param(func: Callable) -> str | None:
"""Find the parameter name for `RunnableConfig` in a function.
Args:
func: The function to check.
Returns:
The parameter name for `RunnableConfig`, or `None` if not found.
"""
type_hints = _get_type_hints(func)
if not type_hints:
return None
for name, type_ in type_hints.items():
if type_ is RunnableConfig:
return name
return None
| ChildTool |
python | Textualize__textual | src/textual/widgets/_progress_bar.py | {
"start": 5791,
"end": 6656
} | class ____(Label):
"""A label to display the estimated time until completion of the progress bar."""
DEFAULT_CSS = """
ETAStatus {
width: 9;
content-align-horizontal: right;
}
"""
eta: reactive[float | None] = reactive[Optional[float]](None)
"""Estimated number of seconds till completion, or `None` if no estimate is available."""
def render(self) -> RenderResult:
"""Render the ETA display."""
eta = self.eta
if eta is None:
return "--:--:--"
else:
minutes, seconds = divmod(round(eta), 60)
hours, minutes = divmod(minutes, 60)
if hours > 999999:
return "+999999h"
elif hours > 99:
return f"{hours}h"
else:
return f"{hours:02}:{minutes:02}:{seconds:02}"
| ETAStatus |
python | PrefectHQ__prefect | tests/server/models/test_saved_searches.py | {
"start": 84,
"end": 2174
} | class ____:
async def test_create_saved_search_succeeds(self, session):
filters = [
{
"object": "flow",
"property": "name",
"type": "string",
"operation": "equals",
"value": "foo",
},
{
"object": "flow_run",
"property": "name",
"type": "string",
"operation": "equals",
"value": "bar",
},
]
saved_search = await models.saved_searches.create_saved_search(
session=session,
saved_search=schemas.core.SavedSearch(
name="My SavedSearch", filters=filters
),
)
assert saved_search.name == "My SavedSearch"
assert saved_search.filters == filters
async def test_create_saved_search_updates_existing_saved_search(
self,
session,
):
filters = [
{
"object": "flow",
"property": "name",
"type": "string",
"operation": "equals",
"value": "foo",
},
{
"object": "flow_run",
"property": "name",
"type": "string",
"operation": "equals",
"value": "bar",
},
]
saved_search = await models.saved_searches.create_saved_search(
session=session,
saved_search=schemas.core.SavedSearch(
name="My SavedSearch", filters=filters
),
)
assert saved_search.name == "My SavedSearch"
assert saved_search.filters == filters
saved_search = await models.saved_searches.create_saved_search(
session=session,
saved_search=schemas.core.SavedSearch(
name="My SavedSearch",
),
)
assert saved_search.name == "My SavedSearch"
# should be removed
assert not saved_search.filters
| TestCreateSavedSearch |
python | getsentry__sentry | tests/sentry/backup/test_rpc.py | {
"start": 12559,
"end": 18101
} | class ____(TestCase):
"""
Validate errors related to the `import_by_model()` RPC method.
"""
@staticmethod
def is_user_model(model: Any) -> bool:
return NormalizedModelName(model["model"]) == USER_MODEL_NAME
@cached_property
def _json_of_exhaustive_user_with_minimum_privileges(self) -> Any:
with open(
get_fixture_path("backup", "user-with-minimum-privileges.json"), "rb"
) as backup_file:
return orjson.loads(backup_file.read())
def json_of_exhaustive_user_with_minimum_privileges(self) -> Any:
return deepcopy(self._json_of_exhaustive_user_with_minimum_privileges)
def test_bad_invalid_min_ordinal(self) -> None:
result = import_export_service.import_by_model(
import_model_name=str(USER_MODEL_NAME),
scope=RpcImportScope.Global,
flags=RpcImportFlags(import_uuid=str(uuid4().hex)),
filter_by=[],
pk_map=RpcPrimaryKeyMap(),
json_data="[]",
min_ordinal=0,
)
assert isinstance(result, RpcImportError)
assert result.get_kind() == RpcImportErrorKind.InvalidMinOrdinal
def test_bad_unknown_model(self) -> None:
result = import_export_service.import_by_model(
import_model_name="sentry.doesnotexist",
scope=RpcImportScope.Global,
flags=RpcImportFlags(import_uuid=str(uuid4().hex)),
filter_by=[],
pk_map=RpcPrimaryKeyMap(),
json_data="[]",
min_ordinal=1,
)
assert isinstance(result, RpcImportError)
assert result.get_kind() == RpcImportErrorKind.UnknownModel
@assume_test_silo_mode(SiloMode.CONTROL, can_be_monolith=False)
def test_bad_incorrect_silo_mode_for_model(self) -> None:
result = import_export_service.import_by_model(
import_model_name=str(PROJECT_MODEL_NAME),
scope=RpcImportScope.Global,
flags=RpcImportFlags(import_uuid=str(uuid4().hex)),
filter_by=[],
pk_map=RpcPrimaryKeyMap(),
json_data="[]",
min_ordinal=1,
)
assert isinstance(result, RpcImportError)
assert result.get_kind() == RpcImportErrorKind.IncorrectSiloModeForModel
def test_bad_unspecified_scope(self) -> None:
result = import_export_service.import_by_model(
import_model_name=str(USER_MODEL_NAME),
flags=RpcImportFlags(import_uuid=str(uuid4().hex)),
filter_by=[],
pk_map=RpcPrimaryKeyMap(),
json_data="[]",
min_ordinal=1,
)
assert isinstance(result, RpcImportError)
assert result.get_kind() == RpcImportErrorKind.UnspecifiedScope
def test_bad_missing_import_uuid(self) -> None:
result = import_export_service.import_by_model(
import_model_name=str(USER_MODEL_NAME),
scope=RpcImportScope.Global,
flags=RpcImportFlags(),
filter_by=[],
pk_map=RpcPrimaryKeyMap(),
json_data="[]",
min_ordinal=1,
)
assert isinstance(result, RpcImportError)
assert result.get_kind() == RpcImportErrorKind.MissingImportUUID
def test_bad_invalid_json(self) -> None:
result = import_export_service.import_by_model(
import_model_name=str(USER_MODEL_NAME),
scope=RpcImportScope.Global,
flags=RpcImportFlags(import_uuid=str(uuid4().hex)),
filter_by=[],
pk_map=RpcPrimaryKeyMap(),
json_data="_",
min_ordinal=1,
)
assert isinstance(result, RpcImportError)
assert result.get_kind() == RpcImportErrorKind.DeserializationFailed
def test_bad_validation(self) -> None:
models = self.json_of_exhaustive_user_with_minimum_privileges()
# Username too long - will fail deserialization.
for model in models:
if self.is_user_model(model):
model["fields"]["username"] = "a" * (MAX_USERNAME_LENGTH + 1)
json_data = orjson.dumps(
[m for m in models if self.is_user_model(m)],
option=orjson.OPT_UTC_Z | orjson.OPT_NON_STR_KEYS,
).decode()
result = import_export_service.import_by_model(
import_model_name=str(USER_MODEL_NAME),
scope=RpcImportScope.Global,
flags=RpcImportFlags(import_uuid=str(uuid4().hex)),
filter_by=[],
pk_map=RpcPrimaryKeyMap(),
json_data=json_data,
min_ordinal=1,
)
assert isinstance(result, RpcImportError)
assert result.get_kind() == RpcImportErrorKind.ValidationError
def test_bad_unexpected_model(self) -> None:
models = self.json_of_exhaustive_user_with_minimum_privileges()
json_data = orjson.dumps(
[m for m in models if self.is_user_model(m)],
option=orjson.OPT_UTC_Z | orjson.OPT_NON_STR_KEYS,
).decode()
result = import_export_service.import_by_model(
import_model_name="sentry.option",
scope=RpcImportScope.Global,
flags=RpcImportFlags(import_uuid=str(uuid4().hex)),
filter_by=[],
pk_map=RpcPrimaryKeyMap(),
json_data=json_data,
min_ordinal=1,
)
assert isinstance(result, RpcImportError)
assert result.get_kind() == RpcImportErrorKind.UnexpectedModel
@no_silo_test
| RpcImportErrorTests |
python | django__django | tests/aggregation_regress/models.py | {
"start": 172,
"end": 335
} | class ____(models.Model):
name = models.CharField(max_length=100)
age = models.IntegerField()
friends = models.ManyToManyField("self", blank=True)
| Author |
python | getsentry__sentry | src/sentry/ingest/transaction_clusterer/__init__.py | {
"start": 150,
"end": 744
} | class ____:
name: str
"""Human-friendly name of the namespace. For example, logging purposes."""
data: str
"""Prefix to store input data to the clusterer."""
rules: str
"""Prefix to store produced rules in the clusterer, in non-persistent storage."""
persistent_storage: str
"""Option name to store produced rules in the clusterer, in persistent storage."""
tracker: str
"""Option name to emit tracking data of this namespace, such as metrics."""
meta_store: str
"""Option name to emit store metadata belonging to this namespace."""
| NamespaceOption |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-shopify/source_shopify/scopes.py | {
"start": 2979,
"end": 6490
} | class ____:
# define default logger
logger = logging.getLogger("airbyte")
def __init__(self, config: Mapping[str, Any]) -> None:
self.permitted_streams: List[str] = list(ALWAYS_PERMITTED_STREAMS)
self.not_permitted_streams: List[set[str, str]] = []
self._error_handler = ShopifyErrorHandler()
self._http_client = HttpClient("ShopifyScopes", self.logger, self._error_handler, session=requests.Session())
self.user_scopes = self.get_user_scopes(config)
# for each stream check the authenticated user has all scopes required
self.get_streams_from_user_scopes()
# log if there are streams missing scopes and should be omitted
self.emit_missing_scopes()
# template for the log message
missing_scope_message: str = (
"The stream `{stream}` could not be synced without the `{scope}` scope. Please check the `{scope}` is granted."
)
def get_user_scopes(self, config) -> list[Any]:
url = f"https://{config['shop']}.myshopify.com/admin/oauth/access_scopes.json"
headers = config["authenticator"].get_auth_header()
try:
_, response = self._http_client.send_request("GET", url, headers=headers, request_kwargs={})
access_scopes = [scope.get("handle") for scope in response.json().get("access_scopes")]
except InvalidURL:
raise ShopifyWrongShopNameError(url)
except JSONDecodeError as json_error:
raise ShopifyBadJsonError(json_error)
if access_scopes:
return access_scopes
else:
raise ShopifyAccessScopesError(response)
def log_missing_scope(self, not_permitted_stream: Mapping[str, Any]) -> str:
stream_name, scope = not_permitted_stream
self.logger.warning(self.missing_scope_message.format(stream=stream_name, scope=scope))
def emit_missing_scopes(self) -> Optional[Iterable[List[str]]]:
if len(self.not_permitted_streams) > 0:
for not_permitted_stream in self.not_permitted_streams:
self.log_missing_scope(not_permitted_stream)
def get_permitted_streams(self) -> List[str]:
# return the list of validated streams
return self.permitted_streams
def not_permitted_streams_names(self) -> List[str]:
return [not_permitted[0] for not_permitted in self.not_permitted_streams]
def stream_has_no_missing_scopes(self, stream_name: str) -> bool:
return stream_name not in self.not_permitted_streams_names()
def check_user_has_stream_scope(self, stream_name: str, scope: str) -> None:
if scope not in self.user_scopes:
self.not_permitted_streams.append((stream_name, scope))
def register_permitted_stream(self, stream_name: str) -> None:
# allow stream only if there is a complete match with required scopes
if self.stream_has_no_missing_scopes(stream_name):
self.permitted_streams.append(stream_name)
def validate_stream_scopes(self, stream_name: str, scopes_required: str) -> None:
for scope in scopes_required:
self.check_user_has_stream_scope(stream_name, scope)
def get_streams_from_user_scopes(self) -> None:
# for each stream check the authenticated user has all scopes required
for stream_name, stream_scopes in SCOPES_MAPPING.items():
self.validate_stream_scopes(stream_name, stream_scopes)
self.register_permitted_stream(stream_name)
| ShopifyScopes |
python | getsentry__sentry | tests/sentry/snuba/test_query_subscription_consumer.py | {
"start": 2510,
"end": 4869
} | class ____(BaseQuerySubscriptionTest, TestCase):
@pytest.fixture(autouse=True)
def _setup_metrics(self):
with mock.patch("sentry.utils.metrics") as self.metrics:
yield
def test_arroyo_consumer(self) -> None:
topic_defn = get_topic_definition(Topic.EVENTS)
create_topics(topic_defn["cluster"], [topic_defn["real_topic_name"]])
registration_key = "registered_test_2"
mock_callback = mock.Mock()
register_subscriber(registration_key)(mock_callback)
with self.tasks():
snuba_query = create_snuba_query(
SnubaQuery.Type.ERROR,
Dataset.Events,
"hello",
"count()",
timedelta(minutes=10),
timedelta(minutes=1),
None,
)
sub = create_snuba_subscription(self.project, registration_key, snuba_query)
sub.refresh_from_db()
data = self.valid_wrapper
data["payload"]["subscription_id"] = sub.subscription_id
commit = mock.Mock()
partition = Partition(ArroyoTopic("test"), 0)
strategy = QuerySubscriptionStrategyFactory(
self.dataset.value,
1,
1,
1,
DEFAULT_BLOCK_SIZE,
DEFAULT_BLOCK_SIZE,
# We have to disable multi_proc here, otherwise the consumer attempts to access the dev
# database rather than the test one due to reinitialising Django
multi_proc=False,
).create_with_partitions(commit, {partition: 0})
message = self.build_mock_message(data, topic=self.topic)
strategy.submit(
Message(
BrokerValue(
KafkaPayload(b"key", message.value().encode("utf-8"), [("should_drop", b"1")]),
partition,
1,
datetime.now(),
)
)
)
data = deepcopy(data)
data["payload"]["values"] = data["payload"]["result"]
data["payload"].pop("result")
data["payload"].pop("request")
data["payload"]["timestamp"] = datetime.fromisoformat(data["payload"]["timestamp"]).replace(
tzinfo=timezone.utc
)
mock_callback.assert_called_once_with(data["payload"], sub)
| HandleMessageTest |
python | pypa__setuptools | setuptools/tests/test_config_discovery.py | {
"start": 12325,
"end": 15237
} | class ____:
def _simulate_package_with_extension(self, tmp_path):
# This example is based on: https://github.com/nucleic/kiwi/tree/1.4.0
files = [
"benchmarks/file.py",
"docs/Makefile",
"docs/requirements.txt",
"docs/source/conf.py",
"proj/header.h",
"proj/file.py",
"py/proj.cpp",
"py/other.cpp",
"py/file.py",
"py/py.typed",
"py/tests/test_proj.py",
"README.rst",
]
_populate_project_dir(tmp_path, files, {})
setup_script = """
from setuptools import Extension, setup
ext_modules = [
Extension(
"proj",
["py/proj.cpp", "py/other.cpp"],
include_dirs=["."],
language="c++",
),
]
setup(ext_modules=ext_modules)
"""
(tmp_path / "setup.py").write_text(DALS(setup_script), encoding="utf-8")
def test_skip_discovery_with_setupcfg_metadata(self, tmp_path):
"""Ensure that auto-discovery is not triggered when the project is based on
C-extensions only, for backward compatibility.
"""
self._simulate_package_with_extension(tmp_path)
pyproject = """
[build-system]
requires = []
build-backend = 'setuptools.build_meta'
"""
(tmp_path / "pyproject.toml").write_text(DALS(pyproject), encoding="utf-8")
setupcfg = """
[metadata]
name = proj
version = 42
"""
(tmp_path / "setup.cfg").write_text(DALS(setupcfg), encoding="utf-8")
dist = _get_dist(tmp_path, {})
assert dist.get_name() == "proj"
assert dist.get_version() == "42"
assert dist.py_modules is None
assert dist.packages is None
assert len(dist.ext_modules) == 1
assert dist.ext_modules[0].name == "proj"
def test_dont_skip_discovery_with_pyproject_metadata(self, tmp_path):
"""When opting-in to pyproject.toml metadata, auto-discovery will be active if
the package lists C-extensions, but does not configure py-modules or packages.
This way we ensure users with complex package layouts that would lead to the
discovery of multiple top-level modules/packages see errors and are forced to
explicitly set ``packages`` or ``py-modules``.
"""
self._simulate_package_with_extension(tmp_path)
pyproject = """
[project]
name = 'proj'
version = '42'
"""
(tmp_path / "pyproject.toml").write_text(DALS(pyproject), encoding="utf-8")
with pytest.raises(PackageDiscoveryError, match="multiple (packages|modules)"):
_get_dist(tmp_path, {})
| TestWithCExtension |
python | pandas-dev__pandas | asv_bench/benchmarks/strings.py | {
"start": 7425,
"end": 7521
} | class ____(Dtypes):
def time_iter(self, dtype):
for i in self.s:
pass
| Iter |
python | huggingface__transformers | tests/models/chameleon/test_image_processing_chameleon.py | {
"start": 3667,
"end": 10003
} | class ____(ImageProcessingTestMixin, unittest.TestCase):
image_processing_class = ChameleonImageProcessor if is_vision_available() else None
fast_image_processing_class = ChameleonImageProcessorFast if is_torchvision_available() else None
# Copied from tests.models.clip.test_image_processing_clip.CLIPImageProcessingTest.setUp with CLIP->Chameleon
def setUp(self):
super().setUp()
self.image_processor_tester = ChameleonImageProcessingTester(self)
@property
# Copied from tests.models.clip.test_image_processing_clip.CLIPImageProcessingTest.image_processor_dict
def image_processor_dict(self):
return self.image_processor_tester.prepare_image_processor_dict()
def test_image_processor_properties(self):
for image_processing_class in self.image_processor_list:
image_processing = image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(image_processing, "do_resize"))
self.assertTrue(hasattr(image_processing, "size"))
self.assertTrue(hasattr(image_processing, "do_center_crop"))
self.assertTrue(hasattr(image_processing, "center_crop"))
self.assertTrue(hasattr(image_processing, "do_normalize"))
self.assertTrue(hasattr(image_processing, "image_mean"))
self.assertTrue(hasattr(image_processing, "image_std"))
self.assertTrue(hasattr(image_processing, "do_convert_rgb"))
def test_image_processor_from_dict_with_kwargs(self):
for image_processing_class in self.image_processor_list:
image_processor = image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size, {"shortest_edge": 18})
self.assertEqual(image_processor.crop_size, {"height": 18, "width": 18})
image_processor = image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84)
self.assertEqual(image_processor.size, {"shortest_edge": 42})
self.assertEqual(image_processor.crop_size, {"height": 84, "width": 84})
def test_call_pil(self):
for image_processing_class in self.image_processor_list:
# Initialize image_processing
image_processing = image_processing_class(**self.image_processor_dict)
# create random PIL images
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=True)
for image in image_inputs:
self.assertIsInstance(image, Image.Image)
# Test not batched input
encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values
expected_output_image_shape = (1, 3, 18, 18)
self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape)
# Test batched
encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values
expected_output_image_shape = (7, 3, 18, 18)
self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape)
def test_call_numpy(self):
for image_processing_class in self.image_processor_list:
# Initialize image_processing
image_processing = image_processing_class(**self.image_processor_dict)
# create random numpy tensors
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=True, numpify=True)
for image in image_inputs:
self.assertIsInstance(image, np.ndarray)
# Test not batched input
encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values
expected_output_image_shape = (1, 3, 18, 18)
self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape)
# Test batched
encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values
expected_output_image_shape = (7, 3, 18, 18)
self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape)
def test_call_pytorch(self):
for image_processing_class in self.image_processor_list:
# Initialize image_processing
image_processing = image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=True, torchify=True)
for image in image_inputs:
self.assertIsInstance(image, torch.Tensor)
# Test not batched input
encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values
expected_output_image_shape = (1, 3, 18, 18)
self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape)
# Test batched
encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values
expected_output_image_shape = (7, 3, 18, 18)
self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape)
def test_nested_input(self):
for image_processing_class in self.image_processor_list:
image_processing = image_processing_class(**self.image_processor_dict)
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=True)
# Test batched as a list of images
encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values
expected_output_image_shape = (7, 3, 18, 18)
self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape)
# Test batched as a nested list of images, where each sublist is one batch
image_inputs_nested = [image_inputs[:3], image_inputs[3:]]
encoded_images_nested = image_processing(image_inputs_nested, return_tensors="pt").pixel_values
expected_output_image_shape = (7, 3, 18, 18)
self.assertEqual(tuple(encoded_images_nested.shape), expected_output_image_shape)
# Image processor should return same pixel values, independently of input format
self.assertTrue((encoded_images_nested == encoded_images).all())
| ChameleonImageProcessingTest |
python | xlwings__xlwings | xlwings/constants.py | {
"start": 112442,
"end": 112554
} | class ____:
xlByColumns = 2 # from enum XlSearchOrder
xlByRows = 1 # from enum XlSearchOrder
| SearchOrder |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 275144,
"end": 275830
} | class ____(sgqlc.types.Input):
"""Autogenerated input type of RemoveEnterpriseMember"""
__schema__ = github_schema
__field_names__ = ("enterprise_id", "user_id", "client_mutation_id")
enterprise_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="enterpriseId")
"""The ID of the enterprise from which the user should be removed."""
user_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="userId")
"""The ID of the user to remove from the enterprise."""
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
| RemoveEnterpriseMemberInput |
python | paramiko__paramiko | paramiko/auth_strategy.py | {
"start": 3348,
"end": 3918
} | class ____(PrivateKey):
"""
An in-memory, decrypted `.PKey` object.
"""
def __init__(self, username, pkey):
super().__init__(username=username)
# No decryption (presumably) necessary!
self.pkey = pkey
def __repr__(self):
# NOTE: most of interesting repr-bits for private keys is in PKey.
# TODO: tacking on agent-ness like this is a bit awkward, but, eh?
rep = super()._repr(pkey=self.pkey)
if isinstance(self.pkey, AgentKey):
rep += " [agent]"
return rep
| InMemoryPrivateKey |
python | dask__distributed | distributed/dashboard/components/scheduler.py | {
"start": 32474,
"end": 35417
} | class ____(DashboardComponent):
"""How many tasks are on each worker"""
@log_errors
def __init__(self, scheduler, **kwargs):
self.last = 0
self.scheduler = scheduler
self.source = ColumnDataSource(
{
"bandwidth": [1, 2],
"source": ["a", "b"],
"destination": ["a", "b"],
"bandwidth_text": ["1", "2"],
}
)
values = [hex(x)[2:] for x in range(64, 256)][::-1]
mapper = linear_cmap(
field_name="bandwidth",
palette=["#" + x + x + "FF" for x in values],
low=0,
high=1,
)
self.root = figure(
title="Bandwidth by Worker",
tools="",
name="bandwidth_worker_heatmap",
x_range=["a", "b"],
y_range=["a", "b"],
**kwargs,
)
self.root.xaxis.major_label_orientation = XLABEL_ORIENTATION
self.root.rect(
source=self.source,
x="source",
y="destination",
color=mapper,
height=1,
width=1,
)
self.color_map = mapper["transform"]
color_bar = ColorBar(
color_mapper=self.color_map,
label_standoff=12,
border_line_color=None,
location=(0, 0),
)
color_bar.formatter = NumeralTickFormatter(format="0.0 b")
color_bar.ticker = AdaptiveTicker(**TICKS_1024)
self.root.add_layout(color_bar, "right")
self.root.toolbar_location = None
hover = HoverTool()
hover.tooltips = """
<div>
<p><b>Source:</b> @source </p>
<p><b>Destination:</b> @destination </p>
<p><b>Bandwidth:</b> @bandwidth_text / s</p>
</div>
"""
hover.point_policy = "follow_mouse"
self.root.add_tools(hover)
@without_property_validation
@log_errors
def update(self):
bw = self.scheduler.bandwidth_workers
if not bw:
return
def name(address):
try:
ws = self.scheduler.workers[address]
except KeyError:
return address
if ws.name is not None:
return str(ws.name)
return address
x, y, value = zip(*((name(a), name(b), c) for (a, b), c in bw.items()))
self.color_map.high = max(value)
factors = list(sorted(set(x + y)))
self.root.x_range.factors = factors
self.root.y_range.factors = factors[::-1]
result = {
"source": x,
"destination": y,
"bandwidth": value,
"bandwidth_text": list(map(format_bytes, value)),
}
self.root.title.text = "Bandwidth: " + format_bytes(self.scheduler.bandwidth)
update(self.source, result)
| BandwidthWorkers |
python | pypa__setuptools | setuptools/_vendor/wheel/cli/convert.py | {
"start": 3054,
"end": 5003
} | class ____(ConvertSource):
def __init__(self, path: Path):
if not (match := egg_filename_re.match(path.name)):
raise ValueError(f"Invalid egg file name: {path.name}")
# Binary wheels are assumed to be for CPython
self.path = path
self.name = normalize(match.group("name"))
self.version = match.group("ver")
if pyver := match.group("pyver"):
self.pyver = pyver.replace(".", "")
if arch := match.group("arch"):
self.abi = self.pyver.replace("py", "cp")
self.platform = normalize(arch)
self.metadata = Message()
def generate_contents(self) -> Iterator[tuple[str, bytes]]:
with ZipFile(self.path, "r") as zip_file:
for filename in sorted(zip_file.namelist()):
# Skip pure directory entries
if filename.endswith("/"):
continue
# Handle files in the egg-info directory specially, selectively moving
# them to the dist-info directory while converting as needed
if filename.startswith("EGG-INFO/"):
if filename == "EGG-INFO/requires.txt":
requires = zip_file.read(filename).decode("utf-8")
convert_requires(requires, self.metadata)
elif filename == "EGG-INFO/PKG-INFO":
pkginfo = zip_file.read(filename).decode("utf-8")
convert_pkg_info(pkginfo, self.metadata)
elif filename == "EGG-INFO/entry_points.txt":
yield (
f"{self.dist_info_dir}/entry_points.txt",
zip_file.read(filename),
)
continue
# For any other file, just pass it through
yield filename, zip_file.read(filename)
| EggFileSource |
python | modin-project__modin | modin/tests/pandas/extensions/test_series_extensions.py | {
"start": 5808,
"end": 10304
} | class ____:
def test_override_index(self, Backend1):
series = pd.Series(["a", "b"])
def set_index(self, new_index):
self._query_compiler.index = [f"{v}_custom" for v in new_index]
register_series_accessor(name="index", backend=Backend1)(
property(fget=lambda self: self._query_compiler.index[::-1], fset=set_index)
)
assert list(series.index) == [0, 1]
backend_series = series.set_backend(Backend1)
assert list(backend_series.index) == [1, 0]
backend_series.index = [2, 3]
assert list(backend_series.index) == ["3_custom", "2_custom"]
def test_add_deletable_property(self, Backend1):
# register a public property `public_property_name` that is backed by
# a private attribute `private_property_name`.
public_property_name = "property_name"
private_property_name = "_property_name"
def get_property(self):
return getattr(self, private_property_name)
def set_property(self, value):
setattr(self, private_property_name, value)
def del_property(self):
delattr(self, private_property_name)
register_series_accessor(name=public_property_name, backend=Backend1)(
property(get_property, set_property, del_property)
)
series = pd.Series([0])
assert not hasattr(series, public_property_name)
backend_series = series.set_backend(Backend1)
setattr(backend_series, public_property_name, "value")
assert hasattr(backend_series, private_property_name)
assert getattr(backend_series, public_property_name) == "value"
delattr(backend_series, public_property_name)
# check that the deletion works.
assert not hasattr(backend_series, private_property_name)
def test_non_settable_extension_property(self, Backend1):
property_name = "property_name"
register_series_accessor(name=property_name, backend=Backend1)(
property(fget=(lambda self: 4))
)
series = pd.Series([0])
assert not hasattr(series, property_name)
backend_series = series.set_backend(Backend1)
assert getattr(backend_series, property_name) == 4
with pytest.raises(AttributeError):
setattr(backend_series, property_name, "value")
def test_delete_non_deletable_extension_property(self, Backend1):
property_name = "property_name"
register_series_accessor(name=property_name, backend=Backend1)(
property(fget=(lambda self: "value"))
)
series = pd.Series([0])
assert not hasattr(series, property_name)
backend_series = series.set_backend(Backend1)
with pytest.raises(AttributeError):
delattr(backend_series, property_name)
def test_deleting_extension_that_is_not_property_raises_attribute_error(Backend1):
expected_string_val = "Some string value"
method_name = "new_method"
series = pd.Series([1, 2, 3]).set_backend(Backend1)
@register_series_accessor(name=method_name, backend=Backend1)
def my_method_implementation(self):
return expected_string_val
assert hasattr(pd.Series, method_name)
assert series.new_method() == expected_string_val
with pytest.raises(AttributeError):
delattr(series, method_name)
def test_disallowed_extensions(Backend1, non_extendable_attribute_name):
with pytest.raises(
ValueError,
match=re.escape(
f"Cannot register an extension with the reserved name {non_extendable_attribute_name}."
),
):
register_series_accessor(name=non_extendable_attribute_name, backend=Backend1)(
"unused_value"
)
def test_wrapped_extension(Backend1):
"""
Tests using the extensions system to overwrite a method with a wrapped version of the original method
obtained via getattr.
Because the QueryCompilerCaster ABC automatically wraps all methods with a dispatch to the appropriate
backend, we must use the __wrapped__ property of the originally-defined attribute to avoid
infinite recursion.
"""
original_item = pd.Series.item.__wrapped__
@register_series_accessor(name="item", backend=Backend1)
def item_implementation(self):
return (original_item(self) + 2) * 5
series = pd.Series([3])
assert series.item() == 3
assert series.set_backend(Backend1).item() == 25
| TestProperty |
python | weaviate__weaviate-python-client | weaviate/collections/classes/config.py | {
"start": 11317,
"end": 11619
} | class ____(_GenerativeProvider):
generative: Union[GenerativeSearches, _EnumLikeStr] = Field(
default=GenerativeSearches.NVIDIA, frozen=True, exclude=True
)
temperature: Optional[float]
model: Optional[str]
maxTokens: Optional[int]
baseURL: Optional[str]
| _GenerativeNvidia |
python | Textualize__textual | src/textual/widgets/_static.py | {
"start": 223,
"end": 3180
} | class ____(Widget, inherit_bindings=False):
"""A widget to display simple static content, or use as a base class for more complex widgets.
Args:
content: A Content object, Rich renderable, or string containing console markup.
expand: Expand content if required to fill container.
shrink: Shrink content if required to fill container.
markup: True if markup should be parsed and rendered.
name: Name of widget.
id: ID of Widget.
classes: Space separated list of class names.
disabled: Whether the static is disabled or not.
"""
DEFAULT_CSS = """
Static {
height: auto;
}
"""
def __init__(
self,
content: VisualType = "",
*,
expand: bool = False,
shrink: bool = False,
markup: bool = True,
name: str | None = None,
id: str | None = None,
classes: str | None = None,
disabled: bool = False,
) -> None:
super().__init__(
name=name, id=id, classes=classes, disabled=disabled, markup=markup
)
self.expand = expand
self.shrink = shrink
self.__content = content
self.__visual: Visual | None = None
@property
def visual(self) -> Visual:
"""The visual to be displayed.
Note that the visual is what is ultimately rendered in the widget, but may not be the
same object set with the `update` method or `content` property. For instance, if you
update with a string, then the visual will be a [Content][textual.content.Content] instance.
"""
if self.__visual is None:
self.__visual = visualize(self, self.__content, markup=self._render_markup)
return self.__visual
@property
def content(self) -> VisualType:
"""The original content set in the constructor."""
return self.__content
@content.setter
def content(self, content: VisualType) -> None:
self.__content = content
self.__visual = visualize(self, content, markup=self._render_markup)
self.clear_cached_dimensions()
self.refresh(layout=True)
def render(self) -> RenderResult:
"""Get a rich renderable for the widget's content.
Returns:
A rich renderable.
"""
return self.visual
def update(self, content: VisualType = "", *, layout: bool = True) -> None:
"""Update the widget's content area with a string, a Visual (such as [Content][textual.content.Content]), or a [Rich renderable](https://rich.readthedocs.io/en/latest/protocol.html).
Args:
content: New content.
layout: Also perform a layout operation (set to `False` if you are certain the size won't change).
"""
self.__content = content
self.__visual = visualize(self, content, markup=self._render_markup)
self.refresh(layout=layout)
| Static |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.