language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | allegroai__clearml | clearml/backend_api/services/v2_23/frames.py | {
"start": 403493,
"end": 428496
} | class ____(Response):
"""
Response of frames.get_snippets_for_dataview2 endpoint.
:param frames: List of frames for the requested page. The amount of frames
returned is not guaranteed to be equal to the requested page size.
:type frames: Sequence[Snippet]
:param frames_total: The total number of first frames per unique URI
:type frames_total: int
:param search_after: The key for querying next batch of frames
:type search_after: str
:param total_in_versions: The total number of snippets for the dataview
versions (without applying the dataview filters)
:type total_in_versions: int
"""
_service = "frames"
_action = "get_snippets_for_dataview2"
_version = "2.23"
_schema = {
"definitions": {
"augmentation": {
"properties": {
"arguments": {
"additionalProperties": True,
"description": "Arguments dictionary, passed to custom augmentations.",
"type": ["object", "null"],
},
"cls": {
"description": "Augmentation class (see global definitions)",
"type": ["string", "null"],
},
"params": {
"description": (
"Transform parameters, an array ot 3 randomly generated values. Fixed values are passed in"
" case of affine reflect augmentation."
),
"items": {"type": "number"},
"type": ["array", "null"],
},
"strength": {
"description": "Transform strength. Required for pixel transforms.",
"type": ["number", "null"],
},
"trans_mat": {
"description": "Transform matrix (list of lists). Required for affine transforms.",
"items": {"items": {"type": "number"}, "type": "array"},
"type": ["array", "null"],
},
"type": {
"description": "Augmentation type (see global definitions)",
"type": ["string", "null"],
},
},
"type": "object",
},
"dataset_version": {
"properties": {
"id": {"description": "Dataset id", "type": ["string", "null"]},
"version": {
"description": "Dataset version id",
"type": ["string", "null"],
},
},
"type": "object",
},
"frame": {
"properties": {
"augmentation": {
"description": "List of augmentations",
"items": {"$ref": "#/definitions/augmentation"},
"type": ["array", "null"],
},
"blob": {
"description": "Raw data (blob) for the frame",
"type": ["string", "null"],
},
"context_id": {
"description": (
"Context ID. Used for the default frames sorting. If not set then it is filled from the uri"
" of the first source."
),
"type": ["string", "null"],
},
"dataset": {
"description": "Frame's dataset version",
"oneOf": [
{"$ref": "#/definitions/dataset_version"},
{"type": "null"},
],
},
"id": {"description": "Frame id", "type": ["string", "null"]},
"is_key_frame": {
"description": "Is this a key frame (only applicable in frames who'se src is a video)",
"type": ["boolean", "null"],
},
"key_frame": {
"description": "ID of the key frame that this frame belongs to",
"type": ["string", "null"],
},
"label_rule_counts": {
"additionalProperties": True,
"description": "The number of matched roi per lable rule",
"type": ["object", "null"],
},
"labels_size": {
"description": "Number of labels returned",
"type": ["integer", "null"],
},
"meta": {
"additionalProperties": True,
"description": (
"Additional metadata dictionary for the frame. Please note that using this field"
" effectively defines a schema (dictionary structure and types used as values) - frames"
" within the same dataset cannot use conflicting schemas for this field (see documentation"
" for more details)."
),
"type": ["object", "null"],
},
"meta_blob": {
"additionalProperties": True,
"description": (
"Non searchable metadata dictionary for the frame. The fields in this object cannot be"
" searched by and are not added to the frame schema"
),
"type": ["object", "null"],
},
"new_ver": {
"description": "Newer version of this frame, if asked to merge",
"oneOf": [{"$ref": "#/definitions/frame"}, {"type": "null"}],
},
"rois": {
"description": "Frame regions of interest",
"items": {"$ref": "#/definitions/roi"},
"type": ["array", "null"],
},
"rule_name": {
"description": (
"Name of the filtering rule according to which this frame was provided (if applicable)"
),
"type": ["string", "null"],
},
"saved": {
"description": "Last time frame was saved (timestamp)",
"type": ["integer", "null"],
},
"saved_in_version": {
"description": "Last version this frame was saved in (version ID)",
"type": ["string", "null"],
},
"sources": {
"description": "Sources of this frame",
"items": {"$ref": "#/definitions/source"},
"type": ["array", "null"],
},
"timestamp": {
"description": (
"Frame's offset in milliseconds, used primarily for video content. Used for the default"
" frames sorting as the secondary key (with the primary key being 'context_id'). For"
" images, this value should typically be 0. If not set, value is filled from the timestamp"
" of the first source. We recommend using this field only in cases concerning the default"
" sorting behavior."
),
"type": ["integer", "null"],
},
"updated": {
"description": "Last time frame was saved (timestamp)",
"type": ["integer", "null"],
},
"updated_in_version": {
"description": "Last version this frame was updated in (version ID)",
"type": ["string", "null"],
},
"video_gop": {
"description": (
"Video encoding GOP value for the source of this frame. Only valid for video frames"
),
"type": ["number", "null"],
},
},
"type": "object",
},
"mask": {
"properties": {
"content_type": {
"description": "Content type (e.g. 'image/jpeg', 'image/png')",
"type": ["string", "null"],
},
"height": {
"description": "Height in pixels",
"type": ["integer", "null"],
},
"id": {
"description": "unique ID (in this frame)",
"type": ["string", "null"],
},
"timestamp": {
"default": 0,
"description": (
"Timestamp in the source data (for video content. for images, this value should be 0)"
),
"type": ["integer", "null"],
},
"uri": {"description": "Data URI", "type": ["string", "null"]},
"width": {
"description": "Width in pixels",
"type": ["integer", "null"],
},
},
"type": "object",
},
"preview": {
"properties": {
"content_type": {
"description": "Content type (e.g. 'image/jpeg', 'image/png')",
"type": ["string", "null"],
},
"height": {
"description": "Height in pixels",
"type": ["integer", "null"],
},
"timestamp": {
"default": 0,
"description": (
"Timestamp in the source data (for video content. for images, this value should be 0)"
),
"type": ["integer", "null"],
},
"uri": {"description": "Data URI", "type": ["string", "null"]},
"width": {
"description": "Width in pixels",
"type": ["integer", "null"],
},
},
"type": "object",
},
"roi": {
"properties": {
"area": {
"description": "ROI area (not used)",
"type": ["integer", "null"],
},
"confidence": {
"description": "ROI confidence",
"type": ["number", "null"],
},
"id": {"description": "ROI id", "type": ["string", "null"]},
"label": {
"description": "ROI labels",
"items": {"type": "string"},
"type": ["array", "null"],
},
"label_num": {
"description": (
"Label number according to the specified labels mapping Used only when ROI is returned as"
" part of a task's frame."
),
"type": ["integer", "null"],
},
"mask": {
"description": "Mask info for this ROI",
"oneOf": [{"$ref": "#/definitions/roi_mask"}, {"type": "null"}],
},
"meta": {
"additionalProperties": True,
"description": "Additional metadata dictionary for the roi",
"type": ["object", "null"],
},
"poly": {
"description": "ROI polygon (x0, y0, ..., xn, yn)",
"items": {"type": "number"},
"type": ["array", "null"],
},
"sources": {
"description": "Sources that this ROI belongs to",
"items": {"type": "string"},
"type": ["array", "null"],
},
},
"type": "object",
},
"roi_mask": {
"properties": {
"id": {"description": "Mask ID", "type": "string"},
"value": {
"description": "Mask value",
"items": {"type": "integer"},
"type": "array",
},
},
"required": ["id", "value"],
"type": "object",
},
"snippet": {
"properties": {
"augmentation": {
"description": "List of augmentations",
"items": {"$ref": "#/definitions/augmentation"},
"type": ["array", "null"],
},
"blob": {
"description": "Raw data (blob) for the frame",
"type": ["string", "null"],
},
"context_id": {
"description": (
"Context ID. Used for the default frames sorting. If not set then it is filled from the uri"
" of the first source."
),
"type": ["string", "null"],
},
"dataset": {
"description": "Frame's dataset version",
"oneOf": [
{"$ref": "#/definitions/dataset_version"},
{"type": "null"},
],
},
"id": {"description": "Frame id", "type": ["string", "null"]},
"is_key_frame": {
"description": "Is this a key frame (only applicable in frames who'se src is a video)",
"type": ["boolean", "null"],
},
"key_frame": {
"description": "ID of the key frame that this frame belongs to",
"type": ["string", "null"],
},
"label_rule_counts": {
"additionalProperties": True,
"description": "The number of matched roi per lable rule",
"type": ["object", "null"],
},
"labels_size": {
"description": "Number of labels returned",
"type": ["integer", "null"],
},
"meta": {
"additionalProperties": True,
"description": (
"Additional metadata dictionary for the frame. Please note that using this field"
" effectively defines a schema (dictionary structure and types used as values) - frames"
" within the same dataset cannot use conflicting schemas for this field (see documentation"
" for more details)."
),
"type": ["object", "null"],
},
"meta_blob": {
"additionalProperties": True,
"description": (
"Non searchable metadata dictionary for the frame. The fields in this object cannot be"
" searched by and are not added to the frame schema"
),
"type": ["object", "null"],
},
"new_ver": {
"description": "Newer version of this frame, if asked to merge",
"oneOf": [{"$ref": "#/definitions/frame"}, {"type": "null"}],
},
"num_frames": {
"description": "Number of frames represented by this snippet",
"type": ["integer", "null"],
},
"rois": {
"description": "Frame regions of interest",
"items": {"$ref": "#/definitions/roi"},
"type": ["array", "null"],
},
"rule_name": {
"description": (
"Name of the filtering rule according to which this frame was provided (if applicable)"
),
"type": ["string", "null"],
},
"saved": {
"description": "Last time frame was saved (timestamp)",
"type": ["integer", "null"],
},
"saved_in_version": {
"description": "Last version this frame was saved in (version ID)",
"type": ["string", "null"],
},
"sources": {
"description": "Sources of this frame",
"items": {"$ref": "#/definitions/source"},
"type": ["array", "null"],
},
"timestamp": {
"description": (
"Frame's offset in milliseconds, used primarily for video content. Used for the default"
" frames sorting as the secondary key (with the primary key being 'context_id'). For"
" images, this value should typically be 0. If not set, value is filled from the timestamp"
" of the first source. We recommend using this field only in cases concerning the default"
" sorting behavior."
),
"type": ["integer", "null"],
},
"updated": {
"description": "Last time frame was saved (timestamp)",
"type": ["integer", "null"],
},
"updated_in_version": {
"description": "Last version this frame was updated in (version ID)",
"type": ["string", "null"],
},
"video_gop": {
"description": (
"Video encoding GOP value for the source of this frame. Only valid for video frames"
),
"type": ["number", "null"],
},
},
"type": "object",
},
"source": {
"properties": {
"content_type": {
"description": "Content type (e.g. 'image/jpeg', 'image/png')",
"type": ["string", "null"],
},
"height": {
"description": "Height in pixels",
"type": ["integer", "null"],
},
"id": {
"description": "unique ID (in this frame)",
"type": ["string", "null"],
},
"masks": {
"items": {"$ref": "#/definitions/mask"},
"type": ["array", "null"],
},
"meta": {
"additionalProperties": True,
"description": "Additional metadata dictionary for the source",
"type": ["object", "null"],
},
"preview": {
"oneOf": [{"$ref": "#/definitions/preview"}, {"type": "null"}]
},
"timestamp": {
"default": 0,
"description": (
"Timestamp in the source data (for video content. for images, this value should be 0)"
),
"type": ["integer", "null"],
},
"uri": {"description": "Data URI", "type": ["string", "null"]},
"width": {
"description": "Width in pixels",
"type": ["integer", "null"],
},
},
"type": "object",
},
},
"properties": {
"frames": {
"description": (
"List of frames for the requested page. The amount of frames returned is not guaranteed to be equal"
" to the requested page size."
),
"items": {"$ref": "#/definitions/snippet"},
"type": ["array", "null"],
},
"frames_total": {
"description": "The total number of first frames per unique URI",
"type": ["integer", "null"],
},
"search_after": {
"description": "The key for querying next batch of frames",
"type": ["string", "null"],
},
"total_in_versions": {
"description": (
"The total number of snippets for the dataview versions (without applying the dataview filters)"
),
"type": ["integer", "null"],
},
},
"type": "object",
}
def __init__(
self,
frames=None,
frames_total=None,
search_after=None,
total_in_versions=None,
**kwargs
):
super(GetSnippetsForDataview2Response, self).__init__(**kwargs)
self.frames = frames
self.frames_total = frames_total
self.search_after = search_after
self.total_in_versions = total_in_versions
@schema_property("frames")
def frames(self):
return self._property_frames
@frames.setter
def frames(self, value):
if value is None:
self._property_frames = None
return
self.assert_isinstance(value, "frames", (list, tuple))
if any(isinstance(v, dict) for v in value):
value = [Snippet.from_dict(v) if isinstance(v, dict) else v for v in value]
else:
self.assert_isinstance(value, "frames", Snippet, is_array=True)
self._property_frames = value
@schema_property("frames_total")
def frames_total(self):
return self._property_frames_total
@frames_total.setter
def frames_total(self, value):
if value is None:
self._property_frames_total = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "frames_total", six.integer_types)
self._property_frames_total = value
@schema_property("search_after")
def search_after(self):
return self._property_search_after
@search_after.setter
def search_after(self, value):
if value is None:
self._property_search_after = None
return
self.assert_isinstance(value, "search_after", six.string_types)
self._property_search_after = value
@schema_property("total_in_versions")
def total_in_versions(self):
return self._property_total_in_versions
@total_in_versions.setter
def total_in_versions(self, value):
if value is None:
self._property_total_in_versions = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "total_in_versions", six.integer_types)
self._property_total_in_versions = value
| GetSnippetsForDataview2Response |
python | cherrypy__cherrypy | cherrypy/test/test_tools.py | {
"start": 449,
"end": 16246
} | class ____(helper.CPWebCase):
@staticmethod
def setup_server():
# Put check_access in a custom toolbox with its own namespace
myauthtools = cherrypy._cptools.Toolbox('myauth')
def check_access(default=False):
if not getattr(cherrypy.request, 'userid', default):
raise cherrypy.HTTPError(401)
myauthtools.check_access = cherrypy.Tool(
'before_request_body',
check_access,
)
def numerify():
def number_it(body):
for chunk in body:
for k, v in cherrypy.request.numerify_map:
chunk = chunk.replace(k, v)
yield chunk
cherrypy.response.body = number_it(cherrypy.response.body)
class NumTool(cherrypy.Tool):
def _setup(self):
def makemap():
m = self._merged_args().get('map', {})
cherrypy.request.numerify_map = list(m.items())
cherrypy.request.hooks.attach('on_start_resource', makemap)
def critical():
cherrypy.request.error_response = cherrypy.HTTPError(
502,
).set_response
critical.failsafe = True
cherrypy.request.hooks.attach('on_start_resource', critical)
cherrypy.request.hooks.attach(self._point, self.callable)
tools.numerify = NumTool('before_finalize', numerify)
# It's not mandatory to inherit from cherrypy.Tool.
class NadsatTool:
def __init__(self):
self.ended = {}
self._name = 'nadsat'
def nadsat(self):
def nadsat_it_up(body):
for chunk in body:
chunk = chunk.replace(b'good', b'horrorshow')
chunk = chunk.replace(b'piece', b'lomtick')
yield chunk
cherrypy.response.body = nadsat_it_up(cherrypy.response.body)
nadsat.priority = 0
def cleanup(self):
# This runs after the request has been completely written out.
cherrypy.response.body = [b'razdrez']
id = cherrypy.request.params.get('id')
if id:
self.ended[id] = True
cleanup.failsafe = True
def _setup(self):
cherrypy.request.hooks.attach('before_finalize', self.nadsat)
cherrypy.request.hooks.attach('on_end_request', self.cleanup)
tools.nadsat = NadsatTool()
def pipe_body():
cherrypy.request.process_request_body = False
clen = int(cherrypy.request.headers['Content-Length'])
cherrypy.request.body = cherrypy.request.rfile.read(clen)
# Assert that we can use a callable object instead of a function.
class Rotator(object):
def __call__(self, scale):
r = cherrypy.response
r.collapse_body()
r.body = [bytes([(x + scale) % 256 for x in r.body[0]])]
cherrypy.tools.rotator = cherrypy.Tool('before_finalize', Rotator())
def stream_handler(next_handler, *args, **kwargs):
actual = cherrypy.request.config.get('tools.streamer.arg')
assert actual == 'arg value'
cherrypy.response.output = o = io.BytesIO()
try:
next_handler(*args, **kwargs)
# Ignore the response and return our accumulated output
# instead.
return o.getvalue()
finally:
o.close()
cherrypy.tools.streamer = cherrypy._cptools.HandlerWrapperTool(
stream_handler,
)
class Root:
@cherrypy.expose
def index(self):
return 'Howdy earth!'
@cherrypy.expose
@cherrypy.config(
**{
'tools.streamer.on': True,
'tools.streamer.arg': 'arg value',
},
)
def tarfile(self):
actual = cherrypy.request.config.get('tools.streamer.arg')
assert actual == 'arg value'
cherrypy.response.output.write(b'I am ')
cherrypy.response.output.write(b'a tarfile')
@cherrypy.expose
def euro(self):
hooks = list(cherrypy.request.hooks['before_finalize'])
hooks.sort()
cbnames = [x.callback.__name__ for x in hooks]
assert cbnames == ['gzip'], cbnames
priorities = [x.priority for x in hooks]
assert priorities == [80], priorities
yield ntou('Hello,')
yield ntou('world')
yield europoundUnicode
# Bare hooks
@cherrypy.expose
@cherrypy.config(**{'hooks.before_request_body': pipe_body})
def pipe(self):
return cherrypy.request.body
# Multiple decorators; include kwargs just for fun.
# Note that rotator must run before gzip.
@cherrypy.expose
def decorated_euro(self, *vpath):
yield ntou('Hello,')
yield ntou('world')
yield europoundUnicode
decorated_euro = tools.gzip(compress_level=6)(decorated_euro)
decorated_euro = tools.rotator(scale=3)(decorated_euro)
root = Root()
class TestType(type):
"""Metaclass which automatically exposes all functions in each
subclass, and adds an instance of the subclass as an attribute
of root.
"""
def __init__(cls, name, bases, dct):
type.__init__(cls, name, bases, dct)
for value in dct.values():
if isinstance(value, types.FunctionType):
cherrypy.expose(value)
setattr(root, name.lower(), cls())
Test = TestType('Test', (object,), {})
# METHOD ONE:
# Declare Tools in _cp_config
@cherrypy.config(**{'tools.nadsat.on': True})
class Demo(Test):
def index(self, id=None):
return 'A good piece of cherry pie'
def ended(self, id):
return repr(tools.nadsat.ended[id])
def err(self, id=None):
raise ValueError()
def errinstream(self, id=None):
yield 'nonconfidential'
raise ValueError()
yield 'confidential'
# METHOD TWO: decorator using Tool()
# We support Python 2.3, but the @-deco syntax would look like
# this:
# @tools.check_access()
def restricted(self):
return 'Welcome!'
restricted = myauthtools.check_access()(restricted)
userid = restricted
def err_in_onstart(self):
return 'success!'
@cherrypy.config(**{'response.stream': True})
def stream(self, id=None):
for x in range(100000000):
yield str(x)
conf = {
# METHOD THREE:
# Declare Tools in detached config
'/demo': {
'tools.numerify.on': True,
'tools.numerify.map': {b'pie': b'3.14159'},
},
'/demo/restricted': {
'request.show_tracebacks': False,
},
'/demo/userid': {
'request.show_tracebacks': False,
'myauth.check_access.default': True,
},
'/demo/errinstream': {
'response.stream': True,
},
'/demo/err_in_onstart': {
# Because this isn't a dict, on_start_resource will error.
'tools.numerify.map': 'pie->3.14159',
},
# Combined tools
'/euro': {
'tools.gzip.on': True,
'tools.encode.on': True,
},
# Priority specified in config
'/decorated_euro/subpath': {
'tools.gzip.priority': 10,
},
# Handler wrappers
'/tarfile': {'tools.streamer.on': True},
}
app = cherrypy.tree.mount(root, config=conf)
app.request_class.namespaces['myauth'] = myauthtools
root.tooldecs = _test_decorators.ToolExamples()
def testHookErrors(self):
self.getPage('/demo/?id=1')
# If body is "razdrez", then on_end_request is being called too early.
self.assertBody('A horrorshow lomtick of cherry 3.14159')
# If this fails, then on_end_request isn't being called at all.
time.sleep(0.1)
self.getPage('/demo/ended/1')
self.assertBody('True')
valerr = '\n raise ValueError()\nValueError'
self.getPage('/demo/err?id=3')
# If body is "razdrez", then on_end_request is being called too early.
self.assertErrorPage(502, pattern=valerr)
# If this fails, then on_end_request isn't being called at all.
time.sleep(0.1)
self.getPage('/demo/ended/3')
self.assertBody('True')
# If body is "razdrez", then on_end_request is being called too early.
if cherrypy.server.protocol_version == 'HTTP/1.0' or getattr(
cherrypy.server,
'using_apache',
False,
):
self.getPage('/demo/errinstream?id=5')
# Because this error is raised after the response body has
# started, the status should not change to an error status.
self.assertStatus('200 OK')
self.assertBody('nonconfidential')
else:
# Because this error is raised after the response body has
# started, and because it's chunked output, an error is raised by
# the HTTP client when it encounters incomplete output.
self.assertRaises(
(ValueError, IncompleteRead),
self.getPage,
'/demo/errinstream?id=5',
)
# If this fails, then on_end_request isn't being called at all.
time.sleep(0.1)
self.getPage('/demo/ended/5')
self.assertBody('True')
# Test the "__call__" technique (compile-time decorator).
self.getPage('/demo/restricted')
self.assertErrorPage(401)
# Test compile-time decorator with kwargs from config.
self.getPage('/demo/userid')
self.assertBody('Welcome!')
def testEndRequestOnDrop(self):
old_timeout = None
try:
httpserver = cherrypy.server.httpserver
old_timeout = httpserver.timeout
except (AttributeError, IndexError):
return self.skip()
try:
httpserver.timeout = timeout
# Test that on_end_request is called even if the client drops.
self.persistent = True
try:
conn = self.HTTP_CONN
conn.putrequest('GET', '/demo/stream?id=9', skip_host=True)
conn.putheader('Host', self.HOST)
conn.endheaders()
# Skip the rest of the request and close the conn. This will
# cause the server's active socket to error, which *should*
# result in the request being aborted, and request.close being
# called all the way up the stack (including WSGI middleware),
# eventually calling our on_end_request hook.
finally:
self.persistent = False
time.sleep(timeout * 2)
# Test that the on_end_request hook was called.
self.getPage('/demo/ended/9')
self.assertBody('True')
finally:
if old_timeout is not None:
httpserver.timeout = old_timeout
def testGuaranteedHooks(self):
# The 'critical' on_start_resource hook is 'failsafe' (guaranteed
# to run even if there are failures in other on_start methods).
# This is NOT true of the other hooks.
# Here, we have set up a failure in NumerifyTool.numerify_map,
# but our 'critical' hook should run and set the error to 502.
self.getPage('/demo/err_in_onstart')
self.assertErrorPage(502)
tmpl = "AttributeError: 'str' object has no attribute '{attr}'"
expected_msg = tmpl.format(attr='items')
self.assertInBody(expected_msg)
def testCombinedTools(self):
expectedResult = (ntou('Hello,world') + europoundUnicode).encode(
'utf-8',
)
zbuf = io.BytesIO()
zfile = gzip.GzipFile(mode='wb', fileobj=zbuf, compresslevel=9)
zfile.write(expectedResult)
zfile.close()
self.getPage(
'/euro',
headers=[
('Accept-Encoding', 'gzip'),
('Accept-Charset', 'ISO-8859-1,utf-8;q=0.7,*;q=0.7'),
],
)
self.assertInBody(zbuf.getvalue()[:3])
zbuf = io.BytesIO()
zfile = gzip.GzipFile(mode='wb', fileobj=zbuf, compresslevel=6)
zfile.write(expectedResult)
zfile.close()
self.getPage('/decorated_euro', headers=[('Accept-Encoding', 'gzip')])
self.assertInBody(zbuf.getvalue()[:3])
# This returns a different value because gzip's priority was
# lowered in conf, allowing the rotator to run after gzip.
# Of course, we don't want breakage in production apps,
# but it proves the priority was changed.
self.getPage(
'/decorated_euro/subpath',
headers=[('Accept-Encoding', 'gzip')],
)
self.assertInBody(bytes([(x + 3) % 256 for x in zbuf.getvalue()]))
def testBareHooks(self):
content = 'bit of a pain in me gulliver'
self.getPage(
'/pipe',
headers=[
('Content-Length', str(len(content))),
('Content-Type', 'text/plain'),
],
method='POST',
body=content,
)
self.assertBody(content)
def testHandlerWrapperTool(self):
self.getPage('/tarfile')
self.assertBody('I am a tarfile')
def testToolWithConfig(self):
self.getPage('/tooldecs/blah')
self.assertHeader('Content-Type', 'application/data')
def testWarnToolOn(self):
# get
try:
cherrypy.tools.numerify.on
except AttributeError:
pass
else:
raise AssertionError('Tool.on did not error as it should have.')
# set
try:
cherrypy.tools.numerify.on = True
except AttributeError:
pass
else:
raise AssertionError('Tool.on did not error as it should have.')
def testDecorator(self):
@cherrypy.tools.register('on_start_resource')
def example():
pass
self.assertTrue(isinstance(cherrypy.tools.example, cherrypy.Tool))
self.assertEqual(cherrypy.tools.example._point, 'on_start_resource')
@cherrypy.tools.register( # noqa: F811
'before_finalize',
name='renamed',
priority=60,
)
def example(): # noqa: F811
pass
self.assertTrue(isinstance(cherrypy.tools.renamed, cherrypy.Tool))
self.assertEqual(cherrypy.tools.renamed._point, 'before_finalize')
self.assertEqual(cherrypy.tools.renamed._name, 'renamed')
self.assertEqual(cherrypy.tools.renamed._priority, 60)
| ToolTests |
python | kamyu104__LeetCode-Solutions | Python/minimum-time-takes-to-reach-destination-without-drowning.py | {
"start": 55,
"end": 1394
} | class ____(object):
def minimumSeconds(self, land):
"""
:type land: List[List[str]]
:rtype: int
"""
DIRECTIONS = ((1, 0), (0, 1), (-1, 0), (0, -1))
lookup = [[-1 if land[i][j] == "*" else 0 for j in xrange(len(land[0]))] for i in xrange(len(land))]
q = [(i, j, -1) for i in xrange(len(land)) for j in xrange(len(land[0])) if land[i][j] == "*"]
q.append(next((i, j, 1) for i in xrange(len(land)) for j in xrange(len(land[0])) if land[i][j] == "S"))
lookup[q[-1][0]][q[-1][1]] = 1
while q:
new_q = []
for i, j, d in q:
if land[i][j] == "D":
return d-1
for di, dj in DIRECTIONS:
ni, nj = i+di, j+dj
if not (0 <= ni < len(land) and 0 <= nj < len(land[0]) and land[ni][nj] != "X" and lookup[ni][nj] != -1):
continue
if d != -1 and lookup[ni][nj] == 0:
lookup[ni][nj] = 1
new_q.append((ni, nj, d+1))
elif d == -1 and land[ni][nj] != "D":
lookup[ni][nj] = -1
new_q.append((ni, nj, -1))
q = new_q
return -1
# Time: O(m * n)
# Space: O(m * n)
# simulation, bfs
| Solution |
python | facebookresearch__faiss | tests/test_index_binary_from_float.py | {
"start": 4548,
"end": 5663
} | class ____(unittest.TestCase):
def test_override(self):
d = 256
nt = 3500
nb = 10000
nq = 500
(xt, xb, xq) = make_binary_dataset(d, nb, nt, nq)
def train_and_get_centroids(override_kmeans_index):
index = faiss.index_binary_factory(d, "BIVF10")
index.verbose = True
if override_kmeans_index is not None:
index.clustering_index = override_kmeans_index
index.train(xt)
centroids = faiss.downcast_IndexBinary(index.quantizer).xb
return faiss.vector_to_array(centroids).reshape(-1, d // 8)
centroids_ref = train_and_get_centroids(None)
# should do the exact same thing
centroids_new = train_and_get_centroids(faiss.IndexFlatL2(d))
assert np.all(centroids_ref == centroids_new)
# will do less accurate assignment... Sanity check that the
# index is indeed used by kmeans
centroids_new = train_and_get_centroids(faiss.IndexLSH(d, 16))
assert not np.all(centroids_ref == centroids_new)
| TestOverrideKmeansQuantizer |
python | getsentry__sentry | src/sentry/grouping/fingerprinting/matchers.py | {
"start": 908,
"end": 4713
} | class ____:
def __init__(
self,
key: str, # The event attribute on which to match
pattern: str, # The value to match (or to not match, depending on `negated`)
negated: bool = False, # If True, match when `event[key]` does NOT equal `pattern`
) -> None:
if key.startswith("tags."):
self.key = key
else:
try:
self.key = MATCHERS[key]
except KeyError:
raise InvalidFingerprintingConfig("Unknown matcher '%s'" % key)
self.pattern = pattern
self.negated = negated
@property
def match_type(self) -> str:
if self.key == "message":
return "toplevel"
if self.key in ("logger", "level"):
return "log_info"
if self.key in ("type", "value"):
return "exceptions"
if self.key.startswith("tags."):
return "tags"
if self.key == "sdk":
return "sdk"
if self.key == "family":
return "family"
if self.key == "release":
return "release"
return "frames"
def matches(self, event_values: dict[str, Any]) -> bool:
match_found = self._positive_match(event_values)
return not match_found if self.negated else match_found
def _positive_path_match(self, value: str | None) -> bool:
if value is None:
return False
if glob_match(value, self.pattern, ignorecase=True, doublestar=True, path_normalize=True):
return True
if not value.startswith("/") and glob_match(
"/" + value, self.pattern, ignorecase=True, doublestar=True, path_normalize=True
):
return True
return False
def _positive_match(self, event_values: dict[str, Any]) -> bool:
# Handle cases where `self.key` isn't 1-to-1 with the corresponding key in `event_values`
if self.key == "path":
return any(
self._positive_path_match(value)
# Use a set so that if the values match, we don't needlessly check both
for value in {event_values.get("abs_path"), event_values.get("filename")}
)
if self.key == "message":
return any(
value is not None and glob_match(value, self.pattern, ignorecase=True)
# message tests against exception value also, as this is what users expect
for value in [event_values.get("message"), event_values.get("value")]
)
# For the rest, `self.key` matches the key in `event_values`
value = event_values.get(self.key)
if value is None:
return False
if self.key in ["package", "release"]:
return self._positive_path_match(value)
if self.key in ["family", "sdk"]:
flags = self.pattern.split(",")
return "all" in flags or value in flags
if self.key == "app":
return value == bool_from_string(self.pattern)
if self.key in ["level", "value"]:
return glob_match(value, self.pattern, ignorecase=True)
return glob_match(value, self.pattern, ignorecase=False)
def _to_config_structure(self) -> list[str]:
key = self.key
if self.negated:
key = "!" + key
return [key, self.pattern]
@classmethod
def _from_config_structure(cls, matcher: list[str]) -> Self:
key, pattern = matcher
negated = key.startswith("!")
key = key.lstrip("!")
return cls(key, pattern, negated)
@property
def text(self) -> str:
return '{}{}:"{}"'.format(
"!" if self.negated else "",
self.key,
self.pattern,
)
| FingerprintMatcher |
python | mlflow__mlflow | tests/gateway/tools.py | {
"start": 3392,
"end": 4068
} | class ____:
def __init__(self, data: list[bytes], headers: dict[str, str] | None = None, status: int = 200):
self.status = status
self.headers = headers
self._content = data
def raise_for_status(self) -> None:
if 400 <= self.status < 600:
raise aiohttp.ClientResponseError(None, None, status=self.status)
async def _async_content(self):
for line in self._content:
yield line
@property
def content(self):
return self._async_content()
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc, traceback):
pass
| MockAsyncStreamingResponse |
python | jazzband__django-model-utils | model_utils/managers.py | {
"start": 7301,
"end": 8459
} | class ____(InheritanceQuerySetMixin[ModelT], QuerySet[ModelT]): # type: ignore[misc]
def instance_of(self, *models: type[ModelT]) -> InheritanceQuerySet[ModelT]:
"""
Fetch only objects that are instances of the provided model(s).
"""
# If we aren't already selecting the subclasses, we need
# to in order to get this to work.
# How can we tell if we are not selecting subclasses?
# Is it safe to just apply .select_subclasses(*models)?
# Due to https://code.djangoproject.com/ticket/16572, we
# can't really do this for anything other than children (ie,
# no grandchildren+).
where_queries = []
for model in models:
where_queries.append('(' + ' AND '.join([
'"{}"."{}" IS NOT NULL'.format(
model._meta.db_table,
field.column,
) for field in model._meta.parents.values()
]) + ')')
return cast(
'InheritanceQuerySet[ModelT]',
self.select_subclasses(*models).extra(where=[' OR '.join(where_queries)])
)
| InheritanceQuerySet |
python | cherrypy__cherrypy | cherrypy/test/test_wsgi_unix_socket.py | {
"start": 1045,
"end": 2196
} | class ____(helper.CPWebCase):
"""
Test basic behavior on a cherrypy wsgi server listening
on a unix socket.
It exercises the config option `server.socket_file`.
"""
HTTP_CONN = USocketHTTPConnection(USOCKET_PATH)
@staticmethod
def setup_server():
class Root(object):
@cherrypy.expose
def index(self):
return 'Test OK'
@cherrypy.expose
def error(self):
raise Exception('Invalid page')
config = {'server.socket_file': USOCKET_PATH}
cherrypy.config.update(config)
cherrypy.tree.mount(Root())
def tearDown(self):
cherrypy.config.update({'server.socket_file': None})
def test_simple_request(self):
self.getPage('/')
self.assertStatus('200 OK')
self.assertInBody('Test OK')
def test_not_found(self):
self.getPage('/invalid_path')
self.assertStatus('404 Not Found')
def test_internal_error(self):
self.getPage('/error')
self.assertStatus('500 Internal Server Error')
self.assertInBody('Invalid page')
| WSGI_UnixSocket_Test |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 822670,
"end": 823424
} | class ____(sgqlc.types.relay.Connection):
"""A list of organizations managed by an enterprise."""
__schema__ = github_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count")
edges = sgqlc.types.Field(sgqlc.types.list_of("OrganizationEdge"), graphql_name="edges")
"""A list of edges."""
nodes = sgqlc.types.Field(sgqlc.types.list_of("Organization"), graphql_name="nodes")
"""A list of nodes."""
page_info = sgqlc.types.Field(sgqlc.types.non_null("PageInfo"), graphql_name="pageInfo")
"""Information to aid in pagination."""
total_count = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="totalCount")
"""Identifies the total count of items in the connection."""
| OrganizationConnection |
python | ethereum__web3.py | ens/exceptions.py | {
"start": 97,
"end": 257
} | class ____(ENSException, ValueError):
"""
An ENS exception wrapper for `ValueError`, for better control over
exception handling.
"""
| ENSValueError |
python | xlwings__xlwings | xlwings/constants.py | {
"start": 111786,
"end": 111964
} | class ____:
xlExclusive = 3 # from enum XlSaveAsAccessMode
xlNoChange = 1 # from enum XlSaveAsAccessMode
xlShared = 2 # from enum XlSaveAsAccessMode
| SaveAsAccessMode |
python | doocs__leetcode | solution/1700-1799/1734.Decode XORed Permutation/Solution.py | {
"start": 0,
"end": 390
} | class ____:
def decode(self, encoded: List[int]) -> List[int]:
n = len(encoded) + 1
a = b = 0
for i in range(0, n - 1, 2):
a ^= encoded[i]
for i in range(1, n + 1):
b ^= i
perm = [0] * n
perm[-1] = a ^ b
for i in range(n - 2, -1, -1):
perm[i] = encoded[i] ^ perm[i + 1]
return perm
| Solution |
python | lxml__lxml | src/lxml/html/_html5builder.py | {
"start": 353,
"end": 516
} | class ____:
def __init__(self, name, publicId, systemId):
self.name = name
self.publicId = publicId
self.systemId = systemId
| DocumentType |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/links/test_ec2.py | {
"start": 1189,
"end": 2144
} | class ____(BaseAwsLinksTestCase):
link_class = EC2InstanceLink
INSTANCE_ID = "i-xxxxxxxxxxxx"
def test_extra_link(self, mock_supervisor_comms):
if AIRFLOW_V_3_0_PLUS and mock_supervisor_comms:
mock_supervisor_comms.send.return_value = XComResult(
key=self.link_class.key,
value={
"region_name": "eu-west-1",
"aws_domain": self.link_class.get_aws_domain("aws"),
"aws_partition": "aws",
"instance_id": self.INSTANCE_ID,
},
)
self.assert_extra_link_url(
expected_url=(
"https://console.aws.amazon.com/ec2/home"
f"?region=eu-west-1#InstanceDetails:instanceId={self.INSTANCE_ID}"
),
region_name="eu-west-1",
aws_partition="aws",
instance_id=self.INSTANCE_ID,
)
| TestEC2InstanceLink |
python | doocs__leetcode | solution/3100-3199/3100.Water Bottles II/Solution.py | {
"start": 0,
"end": 290
} | class ____:
def maxBottlesDrunk(self, numBottles: int, numExchange: int) -> int:
ans = numBottles
while numBottles >= numExchange:
numBottles -= numExchange
numExchange += 1
ans += 1
numBottles += 1
return ans
| Solution |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/hashability1.py | {
"start": 475,
"end": 1010
} | class ____(list[str]):
def __hash__(self) -> int: ...
s3 = {StrList()}
# This should generate two errors because {} and [] are not hashable.
d1 = {{}: None, None: 2, dict: 3, frozenset(): 4, []: ""}
# This should generate two errors because {} and [] are not hashable.
d2: dict[Any, Any] = {{}: None, None: 2, dict: 3, frozenset(): 4, []: ""}
def func1(x: str | dict[Any, Any], y: Any, z: None):
# This should generate an error because dict isn't hashable
d3 = {x: "hi"}
d4 = {y: "hi", z: "hi"}
@dataclass
| StrList |
python | wandb__wandb | wandb/vendor/watchdog_0_9_0/wandb_watchdog/observers/fsevents2.py | {
"start": 8862,
"end": 9043
} | class ____(BaseObserver):
def __init__(self, timeout=DEFAULT_OBSERVER_TIMEOUT):
BaseObserver.__init__(self, emitter_class=FSEventsEmitter, timeout=timeout)
| FSEventsObserver2 |
python | pypa__warehouse | tests/unit/organizations/test_services.py | {
"start": 1552,
"end": 39304
} | class ____:
def test_verify_service(self):
assert verifyClass(IOrganizationService, services.DatabaseOrganizationService)
def test_service_creation(self):
session = pretend.stub()
service = services.DatabaseOrganizationService(session)
assert service.db is session
def test_get_organization(self, organization_service):
organization = OrganizationFactory.create()
assert organization_service.get_organization(organization.id) == organization
def test_get_organization_application(self, organization_service):
organization_application = OrganizationApplicationFactory.create()
assert (
organization_service.get_organization_application(
organization_application.id
)
== organization_application
)
def test_get_organization_by_name(self, organization_service):
organization = OrganizationFactory.create()
assert (
organization_service.get_organization_by_name(organization.name)
== organization
)
def test_get_organization_applications_by_name(self, organization_service):
app0 = OrganizationApplicationFactory.create(name="pypi")
app1 = OrganizationApplicationFactory.create(name="PyPI")
assert sorted(
organization_service.get_organization_applications_by_name("pypi")
) == sorted(
[
app0,
app1,
]
)
def test_get_organization_applications_by_name_and_submitter(
self, organization_service
):
app = OrganizationApplicationFactory.create()
assert organization_service.get_organization_applications_by_name(
app.name, submitted_by=app.submitted_by
) == [app]
def test_approve_organization_application(
self, db_request, organization_service, monkeypatch
):
send_email = pretend.call_recorder(lambda *a, **kw: None)
monkeypatch.setattr(
services, "send_new_organization_approved_email", send_email
)
monkeypatch.setattr(
services, "send_new_organization_declined_email", send_email
)
admin = UserFactory(username="admin", is_superuser=True)
db_request.user = admin
organization_application = OrganizationApplicationFactory.create()
competing_organization_application = OrganizationApplicationFactory.create(
name=organization_application.name.lower()
)
assert (
organization_application.status == OrganizationApplicationStatus.Submitted
)
assert (
competing_organization_application.status
== OrganizationApplicationStatus.Submitted
)
assert sorted(
organization_service.get_organization_applications_by_name(
organization_application.name
)
) == sorted([organization_application, competing_organization_application])
assert sorted(
organization_service.get_organization_applications_by_name(
organization_application.name, undecided=True
)
) == sorted([organization_application, competing_organization_application])
assert (
organization_service.get_organization_by_name(organization_application.name)
is None
)
organization_service.approve_organization_application(
organization_application.id, db_request
)
organization = organization_service.get_organization_by_name(
organization_application.name
)
assert organization is not None
assert organization.is_active is True
assert (
organization_service.get_organization_applications_by_name(
organization_application.name, undecided=True
)
== []
)
create_event = (
db_request.db.query(Organization.Event)
.filter_by(
tag=EventTag.Organization.OrganizationCreate, source_id=organization.id
)
.one()
)
assert create_event.additional["created_by_user_id"] == str(
organization_application.submitted_by_id
)
assert create_event.additional["redact_ip"] is True
assert organization_application.status == OrganizationApplicationStatus.Approved
assert organization_application.organization == organization
assert (
competing_organization_application.status
== OrganizationApplicationStatus.Declined
)
assert competing_organization_application.organization is None
assert send_email.calls == [
pretend.call(
db_request,
organization_application.submitted_by,
organization_name=organization.name,
message="",
),
pretend.call(
db_request,
competing_organization_application.submitted_by,
organization_name=competing_organization_application.name,
message="",
),
]
catalog_entry = (
db_request.db.query(OrganizationNameCatalog)
.filter_by(normalized_name=organization_application.normalized_name)
.one()
)
assert catalog_entry.organization_id == organization.id
roles = organization.users
assert len(roles) == 1
assert organization.owners == [organization_application.submitted_by]
org_role_add_event = (
db_request.db.query(Organization.Event)
.filter_by(
tag=EventTag.Organization.OrganizationRoleAdd, source_id=organization.id
)
.one()
)
assert org_role_add_event.additional == {
"submitted_by_user_id": str(organization_application.submitted_by.id),
"organization_name": organization.name,
"role_name": "Owner",
"target_user_id": str(organization_application.submitted_by.id),
"redact_ip": True,
}
account_role_add_event = (
db_request.db.query(User.Event)
.filter_by(
tag=EventTag.Account.OrganizationRoleAdd,
source_id=organization_application.submitted_by_id,
)
.one()
)
assert account_role_add_event.additional == {
"submitted_by_user_id": str(organization_application.submitted_by.id),
"organization_name": organization.name,
"role_name": "Owner",
"redact_ip": True,
}
def test_defer_organization_application(self, db_request, organization_service):
admin = UserFactory(username="admin", is_superuser=True)
db_request.user = admin
organization_application = OrganizationApplicationFactory.create()
organization_service.defer_organization_application(
organization_application.id, db_request
)
assert organization_application.status == OrganizationApplicationStatus.Deferred
def test_request_more_information_organization_application(
self, db_request, organization_service, monkeypatch
):
send_email = pretend.call_recorder(lambda *a, **kw: None)
monkeypatch.setattr(
services, "send_new_organization_moreinformationneeded_email", send_email
)
admin = UserFactory(username="admin", is_superuser=True)
db_request.user = admin
db_request.params["message"] = "some message"
organization_application = OrganizationApplicationFactory.create()
organization_service.request_more_information(
organization_application.id, db_request
)
assert len(organization_application.observations) == 1
assert (
organization_application.status
== OrganizationApplicationStatus.MoreInformationNeeded
)
assert send_email.calls == [
pretend.call(
db_request,
organization_application.submitted_by,
organization_name=organization_application.name,
organization_application_id=organization_application.id,
message="some message",
),
]
def test_request_more_information_organization_application_no_message(
self, db_request, organization_service, monkeypatch
):
send_email = pretend.call_recorder(lambda *a, **kw: None)
monkeypatch.setattr(
services, "send_new_organization_moreinformationneeded_email", send_email
)
admin = UserFactory(username="admin", is_superuser=True)
db_request.user = admin
organization_application = OrganizationApplicationFactory.create()
with pytest.raises(ValueError): # noqa
organization_service.request_more_information(
organization_application.id, db_request
)
assert len(organization_application.observations) == 0
assert send_email.calls == []
def test_decline_organization_application(
self, db_request, organization_service, monkeypatch
):
send_email = pretend.call_recorder(lambda *a, **kw: None)
monkeypatch.setattr(
services, "send_new_organization_declined_email", send_email
)
admin = UserFactory(username="admin", is_superuser=True)
db_request.user = admin
organization_application = OrganizationApplicationFactory.create()
organization_service.decline_organization_application(
organization_application.id, db_request
)
assert organization_application.status == OrganizationApplicationStatus.Declined
assert send_email.calls == [
pretend.call(
db_request,
organization_application.submitted_by,
organization_name=organization_application.name,
message="",
),
]
def test_find_organizationid(self, organization_service):
organization = OrganizationFactory.create()
assert (
organization_service.find_organizationid(organization.name)
== organization.id
)
def test_find_organizationid_nonexistent_org(self, organization_service):
assert organization_service.find_organizationid("a_spoon_in_the_matrix") is None
def test_get_organizations(self, organization_service):
organization = OrganizationFactory.create(name="org")
another_organization = OrganizationFactory.create(name="another_org")
orgs = organization_service.get_organizations()
assert organization in orgs
assert another_organization in orgs
def test_get_organizations_by_user(self, organization_service, user_service):
user_organization = OrganizationFactory.create()
user = UserFactory.create()
organization_service.add_organization_role(
user_organization.id,
user.id,
OrganizationRoleType.Owner.value,
)
another_user_organization = OrganizationFactory.create()
another_user = UserFactory.create()
organization_service.add_organization_role(
another_user_organization.id,
another_user.id,
OrganizationRoleType.Owner.value,
)
user_orgs = organization_service.get_organizations_by_user(user.id)
another_user_orgs = organization_service.get_organizations_by_user(
another_user.id
)
assert user_organization in user_orgs
assert user_organization not in another_user_orgs
assert another_user_organization in another_user_orgs
assert another_user_organization not in user_orgs
def test_get_organization_role(self, organization_service, user_service):
organization_role = OrganizationRoleFactory.create()
assert (
organization_service.get_organization_role(organization_role.id)
== organization_role
)
def test_get_organization_role_by_user(self, organization_service, user_service):
organization_role = OrganizationRoleFactory.create()
assert (
organization_service.get_organization_role_by_user(
organization_role.organization_id,
organization_role.user_id,
)
== organization_role
)
def test_get_organization_role_by_user_nonexistent_role(self, organization_service):
user = UserFactory.create()
organization = OrganizationFactory.create()
assert (
organization_service.get_organization_role_by_user(organization.id, user.id)
is None
)
def test_get_organization_roles(self, organization_service, user_service):
organization = OrganizationFactory.create()
user = UserFactory.create()
another_user = UserFactory.create()
added_owner = organization_service.add_organization_role(
organization.id,
user.id,
OrganizationRoleType.Owner.value,
)
added_member = organization_service.add_organization_role(
organization.id,
another_user.id,
OrganizationRoleType.Member.value,
)
org_roles = organization_service.get_organization_roles(organization.id)
assert added_owner in org_roles
assert added_member in org_roles
def test_add_organization_role(self, organization_service, user_service):
user = UserFactory.create()
organization = OrganizationFactory.create()
added_role = organization_service.add_organization_role(
organization.id,
user.id,
OrganizationRoleType.Owner.value,
)
assert added_role.role_name == OrganizationRoleType.Owner.value
assert added_role.user_id == user.id
assert added_role.organization_id == organization.id
def test_delete_organization_role(self, organization_service, user_service):
organization_role = OrganizationRoleFactory.create()
organization_service.delete_organization_role(organization_role.id)
assert (
organization_service.get_organization_role_by_user(
organization_role.organization_id,
organization_role.user_id,
)
is None
)
def test_delete_organization_role_deletes_team_roles(
self, organization_service, user_service
):
user = UserFactory.create()
organization = OrganizationFactory.create()
organization_role = OrganizationRoleFactory.create(
organization=organization, user=user
)
team = TeamFactory.create(organization=organization)
TeamRoleFactory.create(team=team, user=user)
organization_service.delete_organization_role(organization_role.id)
assert (
organization_service.get_organization_role_by_user(
organization_role.organization_id,
user.id,
)
is None
)
assert (
organization_service.get_organization_team_roles_by_user(
organization.id,
user.id,
)
== []
)
def test_get_organization_invite(self, organization_service):
organization_invite = OrganizationInvitationFactory.create()
assert (
organization_service.get_organization_invite(organization_invite.id)
is not None
)
def test_get_organization_invite_by_user(self, organization_service):
organization_invite = OrganizationInvitationFactory.create()
assert (
organization_service.get_organization_invite_by_user(
organization_invite.organization_id, organization_invite.user_id
)
is not None
)
def test_get_organization_invite_by_user_nonexistent_invite(
self, organization_service
):
user = UserFactory.create()
organization = OrganizationFactory.create()
assert (
organization_service.get_organization_invite_by_user(
organization.id, user.id
)
is None
)
def test_get_organization_invites(self, organization_service, user_service):
user = UserFactory.create()
organization = OrganizationFactory.create()
another_organization = OrganizationFactory.create()
invite = organization_service.add_organization_invite(
organization.id,
user.id,
"some_token",
)
another_invite = organization_service.add_organization_invite(
another_organization.id,
user.id,
"some_token",
)
invites = organization_service.get_organization_invites_by_user(user.id)
assert invite in invites
assert another_invite in invites
def test_add_organization_invite(self, organization_service, user_service):
user = UserFactory.create()
organization = OrganizationFactory.create()
added_invite = organization_service.add_organization_invite(
organization.id,
user.id,
"some_token",
)
assert added_invite.user_id == user.id
assert added_invite.organization_id == organization.id
assert added_invite.token == "some_token"
def test_delete_organization_invite(self, organization_service):
organization_invite = OrganizationInvitationFactory.create()
organization_service.delete_organization_invite(organization_invite.id)
organization_service.db.flush()
assert (
organization_service.get_organization_invite(organization_invite.id) is None
)
def test_delete_organization(self, organization_service, db_request):
organization = OrganizationFactory.create()
stripe_customer = StripeCustomerFactory.create()
OrganizationStripeCustomerFactory.create(
organization=organization, customer=stripe_customer
)
subscription = StripeSubscriptionFactory.create(customer=stripe_customer)
OrganizationStripeSubscriptionFactory.create(
organization=organization, subscription=subscription
)
TeamFactory.create(organization=organization)
organization_service.delete_organization(organization.id)
assert not (
db_request.db.query(OrganizationInvitation)
.filter_by(organization=organization)
.count()
)
assert not (
db_request.db.query(OrganizationNameCatalog)
.filter(OrganizationNameCatalog.organization_id == organization.id)
.count()
)
assert not (
db_request.db.query(OrganizationProject)
.filter_by(organization=organization)
.count()
)
assert not (
db_request.db.query(OrganizationRole)
.filter_by(organization=organization)
.count()
)
assert not (
db_request.db.query(OrganizationStripeSubscription)
.filter_by(organization=organization, subscription=subscription)
.count()
)
assert not (
db_request.db.query(OrganizationStripeCustomer)
.filter_by(organization=organization, customer=stripe_customer)
.count()
)
assert not (
db_request.db.query(StripeSubscription)
.filter(StripeSubscription.id == subscription.id)
.count()
)
assert not (
db_request.db.query(Team).filter_by(organization=organization).count()
)
assert organization_service.get_organization(organization.id) is None
def test_delete_organization_without_subscription(
self, organization_service, db_request
):
organization = OrganizationFactory.create()
TeamFactory.create(organization=organization)
organization_service.delete_organization(organization.id)
assert not (
db_request.db.query(OrganizationInvitation)
.filter_by(organization=organization)
.count()
)
assert not (
db_request.db.query(OrganizationNameCatalog)
.filter(OrganizationNameCatalog.organization_id == organization.id)
.count()
)
assert not (
db_request.db.query(OrganizationProject)
.filter_by(organization=organization)
.count()
)
assert not (
db_request.db.query(OrganizationRole)
.filter_by(organization=organization)
.count()
)
assert not (
db_request.db.query(OrganizationStripeSubscription)
.filter_by(organization=organization)
.count()
)
assert not (
db_request.db.query(Team).filter_by(organization=organization).count()
)
assert organization_service.get_organization(organization.id) is None
def test_rename_organization(self, organization_service, db_request):
organization = OrganizationFactory.create()
organization_service.rename_organization(organization.id, "some_new_name")
assert organization.name == "some_new_name"
db_organization = organization_service.get_organization(organization.id)
assert db_organization.name == "some_new_name"
organization_service.db.flush()
assert (
db_request.db.query(OrganizationNameCatalog)
.filter(
OrganizationNameCatalog.normalized_name == organization.normalized_name
)
.count()
)
def test_rename_organization_back(self, organization_service, db_request):
organization = OrganizationFactory.create()
original_name = organization.name
organization_service.rename_organization(organization.id, "some_new_name")
assert organization.name == "some_new_name"
db_organization = organization_service.get_organization(organization.id)
assert db_organization.name == "some_new_name"
organization_service.db.flush()
assert (
db_request.db.query(OrganizationNameCatalog)
.filter(
OrganizationNameCatalog.normalized_name == organization.normalized_name
)
.count()
) == 1
organization_service.rename_organization(organization.id, original_name)
assert organization.name == original_name
db_organization = organization_service.get_organization(organization.id)
assert db_organization.name == original_name
organization_service.db.flush()
assert (
db_request.db.query(OrganizationNameCatalog)
.filter(
OrganizationNameCatalog.normalized_name == organization.normalized_name
)
.count()
) == 1
def test_rename_fails_if_organization_name_in_use(
self, organization_service, db_request
):
conflicting_org = OrganizationFactory.create()
organization = OrganizationFactory.create()
with pytest.raises(ValueError): # noqa: PT011
organization_service.rename_organization(
organization.id, conflicting_org.name
)
def test_rename_fails_if_organization_name_previously_used(
self, organization_service, db_request
):
conflicting_org = OrganizationFactory.create()
original_name = conflicting_org.name
organization_service.rename_organization(conflicting_org.id, "some_new_name")
organization = OrganizationFactory.create()
with pytest.raises(ValueError): # noqa: PT011
organization_service.rename_organization(organization.id, original_name)
def test_update_organization(self, organization_service, db_request):
organization = OrganizationFactory.create()
organization_service.update_organization(
organization.id,
name="some_new_name",
display_name="Some New Name",
orgtype=OrganizationType.Company.value,
)
assert organization.name == "some_new_name"
assert organization.display_name == "Some New Name"
assert organization.orgtype == OrganizationType.Company
db_organization = organization_service.get_organization(organization.id)
assert db_organization.name == "some_new_name"
assert db_organization.display_name == "Some New Name"
assert db_organization.orgtype == OrganizationType.Company
organization_service.db.flush()
assert (
db_request.db.query(OrganizationNameCatalog)
.filter(
OrganizationNameCatalog.normalized_name
== db_organization.normalized_name
)
.count()
)
def test_get_organization_project(self, organization_service):
organization = OrganizationFactory.create()
project = ProjectFactory.create()
organization_project = OrganizationProjectFactory.create(
organization=organization, project=project
)
assert (
organization_service.get_organization_project(organization.id, project.id)
== organization_project
)
def test_add_organization_project(self, organization_service, db_request):
organization = OrganizationFactory.create()
project = ProjectFactory.create()
organization_service.add_organization_project(organization.id, project.id)
assert (
db_request.db.query(OrganizationProject)
.filter(
OrganizationProject.organization_id == organization.id,
OrganizationProject.project_id == project.id,
)
.count()
)
def test_delete_organization_project(self, organization_service, db_request):
organization = OrganizationFactory.create()
project = ProjectFactory.create()
OrganizationProjectFactory.create(organization=organization, project=project)
organization_service.delete_organization_project(organization.id, project.id)
assert not (
db_request.db.query(OrganizationProject)
.filter(
OrganizationProject.organization_id == organization.id,
OrganizationProject.project_id == project.id,
)
.count()
)
def test_record_tos_engagement_invalid_engagement(
self, organization_service, db_request
):
organization = OrganizationFactory.create()
assert organization.terms_of_service_engagements == []
with pytest.raises(ValueError): # noqa: PT011
organization_service.record_tos_engagement(
organization.id,
"initial",
None,
)
@pytest.mark.parametrize(
"engagement",
[
TermsOfServiceEngagement.Flashed,
TermsOfServiceEngagement.Notified,
TermsOfServiceEngagement.Viewed,
TermsOfServiceEngagement.Agreed,
],
)
def test_record_tos_engagement(self, organization_service, db_request, engagement):
organization = OrganizationFactory.create()
assert organization.terms_of_service_engagements == []
organization_service.record_tos_engagement(
organization.id,
"initial",
engagement=engagement,
)
assert (
db_request.db.query(OrganizationTermsOfServiceEngagement)
.filter(
OrganizationTermsOfServiceEngagement.organization_id == organization.id,
OrganizationTermsOfServiceEngagement.revision == "initial",
OrganizationTermsOfServiceEngagement.engagement == engagement,
)
.count()
) == 1
def test_add_organization_subscription(self, organization_service, db_request):
organization = OrganizationFactory.create()
stripe_customer = StripeCustomerFactory.create()
OrganizationStripeCustomerFactory.create(
organization=organization, customer=stripe_customer
)
subscription = StripeSubscriptionFactory.create(customer=stripe_customer)
organization_service.add_organization_subscription(
organization.id, subscription.id
)
assert (
db_request.db.query(OrganizationStripeSubscription)
.filter(
OrganizationStripeSubscription.organization_id == organization.id,
OrganizationStripeSubscription.subscription_id == subscription.id,
)
.count()
)
def test_delete_organization_subscription(self, organization_service, db_request):
organization = OrganizationFactory.create()
stripe_customer = StripeCustomerFactory.create()
OrganizationStripeCustomerFactory.create(
organization=organization, customer=stripe_customer
)
subscription = StripeSubscriptionFactory.create(customer=stripe_customer)
OrganizationStripeSubscriptionFactory.create(
organization=organization, subscription=subscription
)
organization_service.delete_organization_subscription(
organization.id, subscription.id
)
assert not (
db_request.db.query(OrganizationStripeSubscription)
.filter(
OrganizationStripeSubscription.organization_id == organization.id,
OrganizationStripeSubscription.subscription_id == subscription.id,
)
.count()
)
def test_get_organization_stripe_customer(self, organization_service):
organization = OrganizationFactory.create()
organization_stripe_customer = OrganizationStripeCustomerFactory.create(
organization=organization
)
assert (
organization_service.get_organization_stripe_customer(organization.id)
== organization_stripe_customer
)
def test_add_organization_stripe_customer(self, organization_service, db_request):
organization = OrganizationFactory.create()
stripe_customer = StripeCustomerFactory.create()
organization_service.add_organization_stripe_customer(
organization.id, stripe_customer.id
)
assert (
db_request.db.query(OrganizationStripeCustomer)
.filter(
OrganizationStripeCustomer.organization_id == organization.id,
OrganizationStripeCustomer.stripe_customer_id == stripe_customer.id,
)
.count()
)
def test_get_teams_by_organization(self, organization_service):
organization = OrganizationFactory.create()
team = TeamFactory.create(organization=organization)
teams = organization_service.get_teams_by_organization(organization.id)
assert len(teams) == 1
assert team in teams
team2 = TeamFactory.create(organization=organization)
teams = organization_service.get_teams_by_organization(organization.id)
assert len(teams) == 2
assert team in teams
assert team2 in teams
def test_get_team(self, organization_service):
team = TeamFactory.create()
assert organization_service.get_team(team.id) == team
def test_find_teamid(self, organization_service):
organization = OrganizationFactory.create()
team = TeamFactory.create(organization=organization)
assert organization_service.find_teamid(organization.id, team.name) == team.id
def test_find_teamid_nonexistent_org(self, organization_service):
organization = OrganizationFactory.create()
assert (
organization_service.find_teamid(organization.id, "a_spoon_in_the_matrix")
is None
)
def test_get_teams_by_user(self, organization_service):
team = TeamFactory.create()
user = UserFactory.create()
TeamRoleFactory.create(team=team, user=user)
teams = organization_service.get_teams_by_user(user.id)
assert team in teams
team2 = TeamFactory.create()
TeamRoleFactory.create(team=team2, user=user)
teams = organization_service.get_teams_by_user(user.id)
assert team in teams
assert team2 in teams
def test_test_add_team(self, organization_service):
team = TeamFactory.create()
new_team = organization_service.add_team(
name=team.name,
organization_id=team.organization.id,
)
organization_service.db.flush()
team_from_db = organization_service.get_team(new_team.id)
assert team_from_db.name == team.name
assert team_from_db.organization_id == team.organization_id
def test_rename_team(self, organization_service):
team = TeamFactory.create()
organization_service.rename_team(team.id, "some_new_name")
assert team.name == "some_new_name"
db_team = organization_service.get_team(team.id)
assert db_team.name == "some_new_name"
def test_delete_team(self, organization_service):
team = TeamFactory.create()
user = UserFactory.create()
project = ProjectFactory.create()
team_role = TeamRoleFactory.create(team=team, user=user)
team_project_role = TeamProjectRoleFactory.create(team=team, project=project)
assert organization_service.get_team_role(team_role.id) is not None
assert (
organization_service.get_team_project_role(team_project_role.id) is not None
)
team_role_id = team_role.id
team_project_role_id = team_project_role.id
organization_service.delete_team(team.id)
assert organization_service.get_team_role(team_role_id) is None
assert organization_service.get_team_project_role(team_project_role_id) is None
assert organization_service.get_team(team.id) is None
def test_delete_teams_by_organization(self, organization_service):
organization = OrganizationFactory.create()
team = TeamFactory.create(organization=organization)
team2 = TeamFactory.create(organization=organization)
teams = organization_service.get_teams_by_organization(organization.id)
assert len(teams) == 2
assert team in teams
assert team2 in teams
organization_service.delete_teams_by_organization(organization.id)
teams = organization_service.get_teams_by_organization(organization.id)
assert len(teams) == 0
assert team not in teams
assert team2 not in teams
def test_get_team_role(self, organization_service):
team = TeamFactory.create()
user = UserFactory.create()
team_role = TeamRoleFactory.create(team=team, user=user)
assert organization_service.get_team_role(team_role.id) == team_role
def test_add_team_role(self, organization_service, db_request):
team = TeamFactory.create()
user = UserFactory.create()
organization_service.add_team_role(team.id, user.id, "Member")
assert (
db_request.db.query(TeamRole)
.filter(
TeamRole.team_id == team.id,
TeamRole.user_id == user.id,
TeamRole.role_name == "Member",
)
.count()
)
def test_delete_team_role(self, organization_service):
team = TeamFactory.create()
user = UserFactory.create()
team_role = TeamRoleFactory.create(team=team, user=user)
team_role_id = team_role.id
organization_service.delete_team_role(team_role.id)
organization_service.db.flush()
assert organization_service.get_team_role(team_role_id) is None
def test_get_team_project_role(self, organization_service):
team = TeamFactory.create()
project = ProjectFactory.create()
team_project_role = TeamProjectRoleFactory.create(team=team, project=project)
assert (
organization_service.get_team_project_role(team_project_role.id)
== team_project_role
)
def test_add_team_project_role(self, organization_service, db_request):
team = TeamFactory.create()
project = ProjectFactory.create()
organization_service.add_team_project_role(team.id, project.id, "Owner")
assert (
db_request.db.query(TeamProjectRole)
.filter(
TeamProjectRole.team_id == team.id,
TeamProjectRole.project_id == project.id,
TeamProjectRole.role_name == "Owner",
)
.count()
)
def test_delete_team_project_role(self, organization_service):
team = TeamFactory.create()
project = ProjectFactory.create()
team_project_role = TeamProjectRoleFactory.create(team=team, project=project)
team_project_role_id = team_project_role.id
organization_service.delete_team_project_role(team_project_role.id)
assert organization_service.get_team_role(team_project_role_id) is None
| TestDatabaseOrganizationService |
python | google__python-fire | fire/console/platforms.py | {
"start": 8218,
"end": 12551
} | class ____(object):
"""Holds an operating system and architecture."""
def __init__(self, operating_system, architecture):
"""Constructs a new platform.
Args:
operating_system: OperatingSystem, The OS
architecture: Architecture, The machine architecture.
"""
self.operating_system = operating_system
self.architecture = architecture
def __str__(self):
return '{}-{}'.format(self.operating_system, self.architecture)
@staticmethod
def Current(os_override=None, arch_override=None):
"""Determines the current platform you are running on.
Args:
os_override: OperatingSystem, A value to use instead of the current.
arch_override: Architecture, A value to use instead of the current.
Returns:
Platform, The platform tuple of operating system and architecture. Either
can be None if it could not be determined.
"""
return Platform(
os_override if os_override else OperatingSystem.Current(),
arch_override if arch_override else Architecture.Current())
def UserAgentFragment(self):
"""Generates the fragment of the User-Agent that represents the OS.
Examples:
(Linux 3.2.5-gg1236)
(Windows NT 6.1.7601)
(Macintosh; PPC Mac OS X 12.4.0)
(Macintosh; Intel Mac OS X 12.4.0)
Returns:
str, The fragment of the User-Agent string.
"""
# Below, there are examples of the value of platform.uname() per platform.
# platform.release() is uname[2], platform.version() is uname[3].
if self.operating_system == OperatingSystem.LINUX:
# ('Linux', '<hostname goes here>', '3.2.5-gg1236',
# '#1 SMP Tue May 21 02:35:06 PDT 2013', 'x86_64', 'x86_64')
return '({name} {version})'.format(
name=self.operating_system.name, version=platform.release())
elif self.operating_system == OperatingSystem.WINDOWS:
# ('Windows', '<hostname goes here>', '7', '6.1.7601', 'AMD64',
# 'Intel64 Family 6 Model 45 Stepping 7, GenuineIntel')
return '({name} NT {version})'.format(
name=self.operating_system.name, version=platform.version())
elif self.operating_system == OperatingSystem.MACOSX:
# ('Darwin', '<hostname goes here>', '12.4.0',
# 'Darwin Kernel Version 12.4.0: Wed May 1 17:57:12 PDT 2013;
# root:xnu-2050.24.15~1/RELEASE_X86_64', 'x86_64', 'i386')
format_string = '(Macintosh; {name} Mac OS X {version})'
arch_string = (self.architecture.name
if self.architecture == Architecture.ppc else 'Intel')
return format_string.format(
name=arch_string, version=platform.release())
else:
return '()'
def AsyncPopenArgs(self):
"""Returns the args for spawning an async process using Popen on this OS.
Make sure the main process does not wait for the new process. On windows
this means setting the 0x8 creation flag to detach the process.
Killing a group leader kills the whole group. Setting creation flag 0x200 on
Windows or running setsid on *nix makes sure the new process is in a new
session with the new process the group leader. This means it can't be killed
if the parent is killed.
Finally, all file descriptors (FD) need to be closed so that waiting for the
output of the main process does not inadvertently wait for the output of the
new process, which means waiting for the termination of the new process.
If the new process wants to write to a file, it can open new FDs.
Returns:
{str:}, The args for spawning an async process using Popen on this OS.
"""
args = {}
if self.operating_system == OperatingSystem.WINDOWS:
args['close_fds'] = True # This is enough to close _all_ FDs on windows.
detached_process = 0x00000008
create_new_process_group = 0x00000200
# 0x008 | 0x200 == 0x208
args['creationflags'] = detached_process | create_new_process_group
else:
# Killing a group leader kills the whole group.
# Create a new session with the new process the group leader.
args['preexec_fn'] = os.setsid
args['close_fds'] = True # This closes all FDs _except_ 0, 1, 2 on *nix.
args['stdin'] = subprocess.PIPE
args['stdout'] = subprocess.PIPE
args['stderr'] = subprocess.PIPE
return args
| Platform |
python | matplotlib__matplotlib | lib/matplotlib/backends/backend_wx.py | {
"start": 1460,
"end": 2160
} | class ____(TimerBase):
"""Subclass of `.TimerBase` using wx.Timer events."""
def __init__(self, *args, **kwargs):
self._timer = wx.Timer()
self._timer.Notify = self._on_timer
super().__init__(*args, **kwargs)
def _timer_start(self):
self._timer.Start(self._interval, self._single)
def _timer_stop(self):
self._timer.Stop()
def _timer_set_interval(self):
if self._timer.IsRunning():
self._timer_start() # Restart with new interval.
@_api.deprecated(
"2.0", name="wx", obj_type="backend", removal="the future",
alternative="wxagg",
addendum="See the Matplotlib usage FAQ for more info on backends.")
| TimerWx |
python | django__django | tests/generic_inline_admin/tests.py | {
"start": 11631,
"end": 17215
} | class ____(SimpleTestCase):
def setUp(self):
self.site = AdminSite()
def test_get_formset_kwargs(self):
media_inline = MediaInline(Media, AdminSite())
# Create a formset with default arguments
formset = media_inline.get_formset(request)
self.assertEqual(formset.max_num, DEFAULT_MAX_NUM)
self.assertIs(formset.can_order, False)
# Create a formset with custom keyword arguments
formset = media_inline.get_formset(request, max_num=100, can_order=True)
self.assertEqual(formset.max_num, 100)
self.assertIs(formset.can_order, True)
def test_custom_form_meta_exclude_with_readonly(self):
"""
The custom ModelForm's `Meta.exclude` is respected when
used in conjunction with `GenericInlineModelAdmin.readonly_fields`
and when no `ModelAdmin.exclude` is defined.
"""
class MediaForm(ModelForm):
class Meta:
model = Media
exclude = ["url"]
class MediaInline(GenericTabularInline):
readonly_fields = ["description"]
form = MediaForm
model = Media
class EpisodeAdmin(admin.ModelAdmin):
inlines = [MediaInline]
ma = EpisodeAdmin(Episode, self.site)
self.assertEqual(
list(list(ma.get_formsets_with_inlines(request))[0][0]().forms[0].fields),
["keywords", "id", "DELETE"],
)
def test_custom_form_meta_exclude(self):
"""
The custom ModelForm's `Meta.exclude` is respected by
`GenericInlineModelAdmin.get_formset`, and overridden if
`ModelAdmin.exclude` or `GenericInlineModelAdmin.exclude` are defined.
Refs #15907.
"""
# First with `GenericInlineModelAdmin` -----------------
class MediaForm(ModelForm):
class Meta:
model = Media
exclude = ["url"]
class MediaInline(GenericTabularInline):
exclude = ["description"]
form = MediaForm
model = Media
class EpisodeAdmin(admin.ModelAdmin):
inlines = [MediaInline]
ma = EpisodeAdmin(Episode, self.site)
self.assertEqual(
list(list(ma.get_formsets_with_inlines(request))[0][0]().forms[0].fields),
["url", "keywords", "id", "DELETE"],
)
# Then, only with `ModelForm` -----------------
class MediaInline(GenericTabularInline):
form = MediaForm
model = Media
class EpisodeAdmin(admin.ModelAdmin):
inlines = [MediaInline]
ma = EpisodeAdmin(Episode, self.site)
self.assertEqual(
list(list(ma.get_formsets_with_inlines(request))[0][0]().forms[0].fields),
["description", "keywords", "id", "DELETE"],
)
def test_get_fieldsets(self):
# get_fieldsets is called when figuring out form fields.
# Refs #18681.
class MediaForm(ModelForm):
class Meta:
model = Media
fields = "__all__"
class MediaInline(GenericTabularInline):
form = MediaForm
model = Media
can_delete = False
def get_fieldsets(self, request, obj=None):
return [(None, {"fields": ["url", "description"]})]
ma = MediaInline(Media, self.site)
form = ma.get_formset(None).form
self.assertEqual(form._meta.fields, ["url", "description"])
def test_get_formsets_with_inlines_returns_tuples(self):
"""
get_formsets_with_inlines() returns the correct tuples.
"""
class MediaForm(ModelForm):
class Meta:
model = Media
exclude = ["url"]
class MediaInline(GenericTabularInline):
form = MediaForm
model = Media
class AlternateInline(GenericTabularInline):
form = MediaForm
model = Media
class EpisodeAdmin(admin.ModelAdmin):
inlines = [AlternateInline, MediaInline]
ma = EpisodeAdmin(Episode, self.site)
inlines = ma.get_inline_instances(request)
for (formset, inline), other_inline in zip(
ma.get_formsets_with_inlines(request), inlines
):
self.assertIsInstance(formset, other_inline.get_formset(request).__class__)
def test_get_inline_instances_override_get_inlines(self):
class MediaInline(GenericTabularInline):
model = Media
class AlternateInline(GenericTabularInline):
model = Media
class EpisodeAdmin(admin.ModelAdmin):
inlines = (AlternateInline, MediaInline)
def get_inlines(self, request, obj):
if hasattr(request, "name"):
if request.name == "alternate":
return self.inlines[:1]
elif request.name == "media":
return self.inlines[1:2]
return []
ma = EpisodeAdmin(Episode, self.site)
self.assertEqual(ma.get_inlines(request, None), [])
self.assertEqual(ma.get_inline_instances(request), [])
for name, inline_class in (
("alternate", AlternateInline),
("media", MediaInline),
):
request.name = name
self.assertEqual(ma.get_inlines(request, None), (inline_class,))
self.assertEqual(type(ma.get_inline_instances(request)[0]), inline_class)
| GenericInlineModelAdminTest |
python | apache__airflow | providers/google/src/airflow/providers/google/suite/operators/sheets.py | {
"start": 1017,
"end": 3440
} | class ____(BaseOperator):
"""
Creates a new spreadsheet.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GoogleSheetsCreateSpreadsheetOperator`
:param spreadsheet: an instance of Spreadsheet
https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets#Spreadsheet
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param api_endpoint: Optional. Custom API endpoint, e.g: private.googleapis.com.
This can be used to target private VPC or restricted access endpoints.
"""
template_fields: Sequence[str] = (
"spreadsheet",
"impersonation_chain",
)
def __init__(
self,
*,
spreadsheet: dict[str, Any],
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
api_endpoint: str | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.gcp_conn_id = gcp_conn_id
self.spreadsheet = spreadsheet
self.impersonation_chain = impersonation_chain
self.api_endpoint = api_endpoint
def execute(self, context: Any) -> dict[str, Any]:
hook = GSheetsHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
api_endpoint=self.api_endpoint,
)
spreadsheet = hook.create_spreadsheet(spreadsheet=self.spreadsheet)
context["task_instance"].xcom_push(key="spreadsheet_id", value=spreadsheet["spreadsheetId"])
context["task_instance"].xcom_push(key="spreadsheet_url", value=spreadsheet["spreadsheetUrl"])
return spreadsheet
| GoogleSheetsCreateSpreadsheetOperator |
python | numpy__numpy | numpy/_core/tests/test_numeric.py | {
"start": 127787,
"end": 135117
} | class ____:
'''Test ones_like, zeros_like, empty_like and full_like'''
def compare_array_value(self, dz, value, fill_value):
if value is not None:
if fill_value:
# Conversion is close to what np.full_like uses
# but we may want to convert directly in the future
# which may result in errors (where this does not).
z = np.array(value).astype(dz.dtype)
assert_(np.all(dz == z))
else:
assert_(np.all(dz == value))
def check_like_function(self, like_function, value, fill_value=False):
data = [
# Array scalars
(np.array(3.), None),
(np.array(3), 'f8'),
# 1D arrays
(np.arange(6, dtype='f4'), None),
(np.arange(6), 'c16'),
# 2D C-layout arrays
(np.arange(6).reshape(2, 3), None),
(np.arange(6).reshape(3, 2), 'i1'),
# 2D F-layout arrays
(np.arange(6).reshape((2, 3), order='F'), None),
(np.arange(6).reshape((3, 2), order='F'), 'i1'),
# 3D C-layout arrays
(np.arange(24).reshape(2, 3, 4), None),
(np.arange(24).reshape(4, 3, 2), 'f4'),
# 3D F-layout arrays
(np.arange(24).reshape((2, 3, 4), order='F'), None),
(np.arange(24).reshape((4, 3, 2), order='F'), 'f4'),
# 3D non-C/F-layout arrays
(np.arange(24).reshape(2, 3, 4).swapaxes(0, 1), None),
(np.arange(24).reshape(4, 3, 2).swapaxes(0, 1), '?'),
]
shapes = [(), (5,), (5, 6,), (5, 6, 7,)]
if fill_value:
fill_kwarg = {'fill_value': value}
else:
fill_kwarg = {}
for d, dtype in data:
# default (K) order, dtype
dz = like_function(d, dtype=dtype, **fill_kwarg)
assert_equal(dz.shape, d.shape)
assert_equal(np.array(dz.strides) * d.dtype.itemsize,
np.array(d.strides) * dz.dtype.itemsize)
assert_equal(d.flags.c_contiguous, dz.flags.c_contiguous)
assert_equal(d.flags.f_contiguous, dz.flags.f_contiguous)
if dtype is None:
assert_equal(dz.dtype, d.dtype)
else:
assert_equal(dz.dtype, np.dtype(dtype))
self.compare_array_value(dz, value, fill_value)
# C order, default dtype
dz = like_function(d, order='C', dtype=dtype, **fill_kwarg)
assert_equal(dz.shape, d.shape)
assert_(dz.flags.c_contiguous)
if dtype is None:
assert_equal(dz.dtype, d.dtype)
else:
assert_equal(dz.dtype, np.dtype(dtype))
self.compare_array_value(dz, value, fill_value)
# F order, default dtype
dz = like_function(d, order='F', dtype=dtype, **fill_kwarg)
assert_equal(dz.shape, d.shape)
assert_(dz.flags.f_contiguous)
if dtype is None:
assert_equal(dz.dtype, d.dtype)
else:
assert_equal(dz.dtype, np.dtype(dtype))
self.compare_array_value(dz, value, fill_value)
# A order
dz = like_function(d, order='A', dtype=dtype, **fill_kwarg)
assert_equal(dz.shape, d.shape)
if d.flags.f_contiguous:
assert_(dz.flags.f_contiguous)
else:
assert_(dz.flags.c_contiguous)
if dtype is None:
assert_equal(dz.dtype, d.dtype)
else:
assert_equal(dz.dtype, np.dtype(dtype))
self.compare_array_value(dz, value, fill_value)
# Test the 'shape' parameter
for s in shapes:
for o in 'CFA':
sz = like_function(d, dtype=dtype, shape=s, order=o,
**fill_kwarg)
assert_equal(sz.shape, s)
if dtype is None:
assert_equal(sz.dtype, d.dtype)
else:
assert_equal(sz.dtype, np.dtype(dtype))
if o == 'C' or (o == 'A' and d.flags.c_contiguous):
assert_(sz.flags.c_contiguous)
elif o == 'F' or (o == 'A' and d.flags.f_contiguous):
assert_(sz.flags.f_contiguous)
self.compare_array_value(sz, value, fill_value)
if (d.ndim != len(s)):
assert_equal(np.argsort(like_function(d, dtype=dtype,
shape=s, order='K',
**fill_kwarg).strides),
np.argsort(np.empty(s, dtype=dtype,
order='C').strides))
else:
assert_equal(np.argsort(like_function(d, dtype=dtype,
shape=s, order='K',
**fill_kwarg).strides),
np.argsort(d.strides))
# Test the 'subok' parameter
class MyNDArray(np.ndarray):
pass
a = np.array([[1, 2], [3, 4]]).view(MyNDArray)
b = like_function(a, **fill_kwarg)
assert_(type(b) is MyNDArray)
b = like_function(a, subok=False, **fill_kwarg)
assert_(type(b) is not MyNDArray)
# Test invalid dtype
with assert_raises(TypeError):
a = np.array(b"abc")
like_function(a, dtype="S-1", **fill_kwarg)
def test_ones_like(self):
self.check_like_function(np.ones_like, 1)
def test_zeros_like(self):
self.check_like_function(np.zeros_like, 0)
def test_empty_like(self):
self.check_like_function(np.empty_like, None)
def test_filled_like(self):
self.check_like_function(np.full_like, 0, True)
self.check_like_function(np.full_like, 1, True)
# Large integers may overflow, but using int64 is OK (casts)
# see also gh-27075
with pytest.raises(OverflowError):
np.full_like(np.ones(3, dtype=np.int8), 1000)
self.check_like_function(np.full_like, np.int64(1000), True)
self.check_like_function(np.full_like, 123.456, True)
# Inf to integer casts cause invalid-value errors: ignore them.
with np.errstate(invalid="ignore"):
self.check_like_function(np.full_like, np.inf, True)
@pytest.mark.parametrize('likefunc', [np.empty_like, np.full_like,
np.zeros_like, np.ones_like])
@pytest.mark.parametrize('dtype', [str, bytes])
def test_dtype_str_bytes(self, likefunc, dtype):
# Regression test for gh-19860
a = np.arange(16).reshape(2, 8)
b = a[:, ::2] # Ensure b is not contiguous.
kwargs = {'fill_value': ''} if likefunc == np.full_like else {}
result = likefunc(b, dtype=dtype, **kwargs)
if dtype == str:
assert result.strides == (16, 4)
else:
# dtype is bytes
assert result.strides == (4, 1)
| TestLikeFuncs |
python | pypa__warehouse | warehouse/utils/static.py | {
"start": 112,
"end": 619
} | class ____(_ManifestCacheBuster):
def __init__(self, *args, strict=True, **kwargs):
super().__init__(*args, **kwargs)
self.strict = strict
def __call__(self, request, subpath, kw):
try:
return self.manifest[subpath], kw
except KeyError:
# If we're not in strict mode, then we'll allow missing files to
# just fall back to the un-cachebusted path.
if not self.strict:
return subpath, kw
| ManifestCacheBuster |
python | lazyprogrammer__machine_learning_examples | ab_testing/comparing_epsilons.py | {
"start": 386,
"end": 2156
} | class ____:
def __init__(self, m):
self.m = m
self.m_estimate = 0
self.N = 0
def pull(self):
return np.random.randn() + self.m
def update(self, x):
self.N += 1
self.m_estimate = (1 - 1.0/self.N)*self.m_estimate + 1.0/self.N*x
def run_experiment(m1, m2, m3, eps, N):
bandits = [BanditArm(m1), BanditArm(m2), BanditArm(m3)]
# count number of suboptimal choices
means = np.array([m1, m2, m3])
true_best = np.argmax(means)
count_suboptimal = 0
data = np.empty(N)
for i in range(N):
# epsilon greedy
p = np.random.random()
if p < eps:
j = np.random.choice(len(bandits))
else:
j = np.argmax([b.m_estimate for b in bandits])
x = bandits[j].pull()
bandits[j].update(x)
if j != true_best:
count_suboptimal += 1
# for the plot
data[i] = x
cumulative_average = np.cumsum(data) / (np.arange(N) + 1)
# plot moving average ctr
plt.plot(cumulative_average)
plt.plot(np.ones(N)*m1)
plt.plot(np.ones(N)*m2)
plt.plot(np.ones(N)*m3)
plt.xscale('log')
plt.show()
for b in bandits:
print(b.m_estimate)
print("percent suboptimal for epsilon = %s:" % eps, float(count_suboptimal) / N)
return cumulative_average
if __name__ == '__main__':
m1, m2, m3 = 1.5, 2.5, 3.5
c_1 = run_experiment(m1, m2, m3, 0.1, 100000)
c_05 = run_experiment(m1, m2, m3, 0.05, 100000)
c_01 = run_experiment(m1, m2, m3, 0.01, 100000)
# log scale plot
plt.plot(c_1, label='eps = 0.1')
plt.plot(c_05, label='eps = 0.05')
plt.plot(c_01, label='eps = 0.01')
plt.legend()
plt.xscale('log')
plt.show()
# linear plot
plt.plot(c_1, label='eps = 0.1')
plt.plot(c_05, label='eps = 0.05')
plt.plot(c_01, label='eps = 0.01')
plt.legend()
plt.show()
| BanditArm |
python | numpy__numpy | numpy/distutils/fcompiler/lahey.py | {
"start": 92,
"end": 1327
} | class ____(FCompiler):
compiler_type = 'lahey'
description = 'Lahey/Fujitsu Fortran 95 Compiler'
version_pattern = r'Lahey/Fujitsu Fortran 95 Compiler Release (?P<version>[^\s*]*)'
executables = {
'version_cmd' : ["<F90>", "--version"],
'compiler_f77' : ["lf95", "--fix"],
'compiler_fix' : ["lf95", "--fix"],
'compiler_f90' : ["lf95"],
'linker_so' : ["lf95", "-shared"],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"]
}
module_dir_switch = None #XXX Fix me
module_include_switch = None #XXX Fix me
def get_flags_opt(self):
return ['-O']
def get_flags_debug(self):
return ['-g', '--chk', '--chkglobal']
def get_library_dirs(self):
opt = []
d = os.environ.get('LAHEY')
if d:
opt.append(os.path.join(d, 'lib'))
return opt
def get_libraries(self):
opt = []
opt.extend(['fj9f6', 'fj9i6', 'fj9ipp', 'fj9e6'])
return opt
if __name__ == '__main__':
from distutils import log
log.set_verbosity(2)
from numpy.distutils import customized_fcompiler
print(customized_fcompiler(compiler='lahey').get_version())
| LaheyFCompiler |
python | RaRe-Technologies__gensim | gensim/models/ldamodel.py | {
"start": 10882,
"end": 75341
} | class ____(interfaces.TransformationABC, basemodel.BaseTopicModel):
"""Train and use Online Latent Dirichlet Allocation model as presented in
`'Online Learning for LDA' by Hoffman et al.`_
Examples
-------
Initialize a model using a Gensim corpus
.. sourcecode:: pycon
>>> from gensim.test.utils import common_corpus
>>>
>>> lda = LdaModel(common_corpus, num_topics=10)
You can then infer topic distributions on new, unseen documents.
.. sourcecode:: pycon
>>> doc_bow = [(1, 0.3), (2, 0.1), (0, 0.09)]
>>> doc_lda = lda[doc_bow]
The model can be updated (trained) with new documents.
.. sourcecode:: pycon
>>> # In practice (corpus =/= initial training corpus), but we use the same here for simplicity.
>>> other_corpus = common_corpus
>>>
>>> lda.update(other_corpus)
Model persistency is achieved through :meth:`~gensim.models.ldamodel.LdaModel.load` and
:meth:`~gensim.models.ldamodel.LdaModel.save` methods.
"""
def __init__(self, corpus=None, num_topics=100, id2word=None,
distributed=False, chunksize=2000, passes=1, update_every=1,
alpha='symmetric', eta=None, decay=0.5, offset=1.0, eval_every=10,
iterations=50, gamma_threshold=0.001, minimum_probability=0.01,
random_state=None, ns_conf=None, minimum_phi_value=0.01,
per_word_topics=False, callbacks=None, dtype=np.float32):
"""
Parameters
----------
corpus : iterable of list of (int, float), optional
Stream of document vectors or sparse matrix of shape (`num_documents`, `num_terms`).
If you have a CSC in-memory matrix, you can convert it to a
streamed corpus with the help of gensim.matutils.Sparse2Corpus.
If not given, the model is left untrained (presumably because you want to call
:meth:`~gensim.models.ldamodel.LdaModel.update` manually).
num_topics : int, optional
The number of requested latent topics to be extracted from the training corpus.
id2word : {dict of (int, str), :class:`gensim.corpora.dictionary.Dictionary`}
Mapping from word IDs to words. It is used to determine the vocabulary size, as well as for
debugging and topic printing.
distributed : bool, optional
Whether distributed computing should be used to accelerate training.
chunksize : int, optional
Number of documents to be used in each training chunk.
passes : int, optional
Number of passes through the corpus during training.
update_every : int, optional
Number of documents to be iterated through for each update.
Set to 0 for batch learning, > 1 for online iterative learning.
alpha : {float, numpy.ndarray of float, list of float, str}, optional
A-priori belief on document-topic distribution, this can be:
* scalar for a symmetric prior over document-topic distribution,
* 1D array of length equal to num_topics to denote an asymmetric user defined prior for each topic.
Alternatively default prior selecting strategies can be employed by supplying a string:
* 'symmetric': (default) Uses a fixed symmetric prior of `1.0 / num_topics`,
* 'asymmetric': Uses a fixed normalized asymmetric prior of `1.0 / (topic_index + sqrt(num_topics))`,
* 'auto': Learns an asymmetric prior from the corpus (not available if `distributed==True`).
eta : {float, numpy.ndarray of float, list of float, str}, optional
A-priori belief on topic-word distribution, this can be:
* scalar for a symmetric prior over topic-word distribution,
* 1D array of length equal to num_words to denote an asymmetric user defined prior for each word,
* matrix of shape (num_topics, num_words) to assign a probability for each word-topic combination.
Alternatively default prior selecting strategies can be employed by supplying a string:
* 'symmetric': (default) Uses a fixed symmetric prior of `1.0 / num_topics`,
* 'auto': Learns an asymmetric prior from the corpus.
decay : float, optional
A number between (0.5, 1] to weight what percentage of the previous lambda value is forgotten
when each new document is examined.
Corresponds to :math:`\\kappa` from `'Online Learning for LDA' by Hoffman et al.`_
offset : float, optional
Hyper-parameter that controls how much we will slow down the first steps the first few iterations.
Corresponds to :math:`\\tau_0` from `'Online Learning for LDA' by Hoffman et al.`_
eval_every : int, optional
Log perplexity is estimated every that many updates. Setting this to one slows down training by ~2x.
iterations : int, optional
Maximum number of iterations through the corpus when inferring the topic distribution of a corpus.
gamma_threshold : float, optional
Minimum change in the value of the gamma parameters to continue iterating.
minimum_probability : float, optional
Topics with a probability lower than this threshold will be filtered out.
random_state : {np.random.RandomState, int}, optional
Either a randomState object or a seed to generate one. Useful for reproducibility.
ns_conf : dict of (str, object), optional
Key word parameters propagated to :func:`gensim.utils.getNS` to get a Pyro4 nameserver.
Only used if `distributed` is set to True.
minimum_phi_value : float, optional
if `per_word_topics` is True, this represents a lower bound on the term probabilities.
per_word_topics : bool
If True, the model also computes a list of topics, sorted in descending order of most likely topics for
each word, along with their phi values multiplied by the feature length (i.e. word count).
callbacks : list of :class:`~gensim.models.callbacks.Callback`
Metric callbacks to log and visualize evaluation metrics of the model during training.
dtype : {numpy.float16, numpy.float32, numpy.float64}, optional
Data-type to use during calculations inside model. All inputs are also converted.
"""
self.dtype = np.finfo(dtype).dtype
# store user-supplied parameters
self.id2word = id2word
if corpus is None and self.id2word is None:
raise ValueError(
'at least one of corpus/id2word must be specified, to establish input space dimensionality'
)
if self.id2word is None:
logger.warning("no word id mapping provided; initializing from corpus, assuming identity")
self.id2word = utils.dict_from_corpus(corpus)
self.num_terms = len(self.id2word)
elif len(self.id2word) > 0:
self.num_terms = 1 + max(self.id2word.keys())
else:
self.num_terms = 0
if self.num_terms == 0:
raise ValueError("cannot compute LDA over an empty collection (no terms)")
self.distributed = bool(distributed)
self.num_topics = int(num_topics)
self.chunksize = chunksize
self.decay = decay
self.offset = offset
self.minimum_probability = minimum_probability
self.num_updates = 0
self.passes = passes
self.update_every = update_every
self.eval_every = eval_every
self.minimum_phi_value = minimum_phi_value
self.per_word_topics = per_word_topics
self.callbacks = callbacks
self.alpha, self.optimize_alpha = self.init_dir_prior(alpha, 'alpha')
assert self.alpha.shape == (self.num_topics,), \
"Invalid alpha shape. Got shape %s, but expected (%d, )" % (str(self.alpha.shape), self.num_topics)
self.eta, self.optimize_eta = self.init_dir_prior(eta, 'eta')
assert self.eta.shape == (self.num_terms,) or self.eta.shape == (self.num_topics, self.num_terms), (
"Invalid eta shape. Got shape %s, but expected (%d, 1) or (%d, %d)" %
(str(self.eta.shape), self.num_terms, self.num_topics, self.num_terms))
self.random_state = utils.get_random_state(random_state)
# VB constants
self.iterations = iterations
self.gamma_threshold = gamma_threshold
# set up distributed environment if necessary
if not distributed:
logger.info("using serial LDA version on this node")
self.dispatcher = None
self.numworkers = 1
else:
if self.optimize_alpha:
raise NotImplementedError("auto-optimizing alpha not implemented in distributed LDA")
# set up distributed version
try:
import Pyro4
if ns_conf is None:
ns_conf = {}
with utils.getNS(**ns_conf) as ns:
from gensim.models.lda_dispatcher import LDA_DISPATCHER_PREFIX
self.dispatcher = Pyro4.Proxy(ns.list(prefix=LDA_DISPATCHER_PREFIX)[LDA_DISPATCHER_PREFIX])
logger.debug("looking for dispatcher at %s" % str(self.dispatcher._pyroUri))
self.dispatcher.initialize(
id2word=self.id2word, num_topics=self.num_topics, chunksize=chunksize,
alpha=alpha, eta=eta, distributed=False
)
self.numworkers = len(self.dispatcher.getworkers())
logger.info("using distributed version with %i workers", self.numworkers)
except Exception as err:
logger.error("failed to initialize distributed LDA (%s)", err)
raise RuntimeError("failed to initialize distributed LDA (%s)" % err)
# Initialize the variational distribution q(beta|lambda)
self.state = LdaState(self.eta, (self.num_topics, self.num_terms), dtype=self.dtype)
self.state.sstats[...] = self.random_state.gamma(100., 1. / 100., (self.num_topics, self.num_terms))
self.expElogbeta = np.exp(dirichlet_expectation(self.state.sstats))
# Check that we haven't accidentally fallen back to np.float64
assert self.eta.dtype == self.dtype
assert self.expElogbeta.dtype == self.dtype
# if a training corpus was provided, start estimating the model right away
if corpus is not None:
use_numpy = self.dispatcher is not None
start = time.time()
self.update(corpus, chunks_as_numpy=use_numpy)
self.add_lifecycle_event(
"created",
msg=f"trained {self} in {time.time() - start:.2f}s",
)
def init_dir_prior(self, prior, name):
"""Initialize priors for the Dirichlet distribution.
Parameters
----------
prior : {float, numpy.ndarray of float, list of float, str}
A-priori belief on document-topic distribution. If `name` == 'alpha', then the prior can be:
* scalar for a symmetric prior over document-topic distribution,
* 1D array of length equal to num_topics to denote an asymmetric user defined prior for each topic.
Alternatively default prior selecting strategies can be employed by supplying a string:
* 'symmetric': (default) Uses a fixed symmetric prior of `1.0 / num_topics`,
* 'asymmetric': Uses a fixed normalized asymmetric prior of `1.0 / (topic_index + sqrt(num_topics))`,
* 'auto': Learns an asymmetric prior from the corpus (not available if `distributed==True`).
A-priori belief on topic-word distribution. If `name` == 'eta' then the prior can be:
* scalar for a symmetric prior over topic-word distribution,
* 1D array of length equal to num_words to denote an asymmetric user defined prior for each word,
* matrix of shape (num_topics, num_words) to assign a probability for each word-topic combination.
Alternatively default prior selecting strategies can be employed by supplying a string:
* 'symmetric': (default) Uses a fixed symmetric prior of `1.0 / num_topics`,
* 'auto': Learns an asymmetric prior from the corpus.
name : {'alpha', 'eta'}
Whether the `prior` is parameterized by the alpha vector (1 parameter per topic)
or by the eta (1 parameter per unique term in the vocabulary).
Returns
-------
init_prior: numpy.ndarray
Initialized Dirichlet prior:
If 'alpha' was provided as `name` the shape is (self.num_topics, ).
If 'eta' was provided as `name` the shape is (len(self.id2word), ).
is_auto: bool
Flag that shows if hyperparameter optimization should be used or not.
"""
if prior is None:
prior = 'symmetric'
if name == 'alpha':
prior_shape = self.num_topics
elif name == 'eta':
prior_shape = self.num_terms
else:
raise ValueError("'name' must be 'alpha' or 'eta'")
is_auto = False
if isinstance(prior, str):
if prior == 'symmetric':
logger.info("using symmetric %s at %s", name, 1.0 / self.num_topics)
init_prior = np.fromiter(
(1.0 / self.num_topics for i in range(prior_shape)),
dtype=self.dtype, count=prior_shape,
)
elif prior == 'asymmetric':
if name == 'eta':
raise ValueError("The 'asymmetric' option cannot be used for eta")
init_prior = np.fromiter(
(1.0 / (i + np.sqrt(prior_shape)) for i in range(prior_shape)),
dtype=self.dtype, count=prior_shape,
)
init_prior /= init_prior.sum()
logger.info("using asymmetric %s %s", name, list(init_prior))
elif prior == 'auto':
is_auto = True
init_prior = np.fromiter((1.0 / self.num_topics for i in range(prior_shape)),
dtype=self.dtype, count=prior_shape)
if name == 'alpha':
logger.info("using autotuned %s, starting with %s", name, list(init_prior))
else:
raise ValueError("Unable to determine proper %s value given '%s'" % (name, prior))
elif isinstance(prior, list):
init_prior = np.asarray(prior, dtype=self.dtype)
elif isinstance(prior, np.ndarray):
init_prior = prior.astype(self.dtype, copy=False)
elif isinstance(prior, (np.number, numbers.Real)):
init_prior = np.fromiter((prior for i in range(prior_shape)), dtype=self.dtype)
else:
raise ValueError("%s must be either a np array of scalars, list of scalars, or scalar" % name)
return init_prior, is_auto
def __str__(self):
"""Get a string representation of the current object.
Returns
-------
str
Human readable representation of the most important model parameters.
"""
return "%s<num_terms=%s, num_topics=%s, decay=%s, chunksize=%s>" % (
self.__class__.__name__, self.num_terms, self.num_topics, self.decay, self.chunksize
)
def sync_state(self, current_Elogbeta=None):
"""Propagate the states topic probabilities to the inner object's attribute.
Parameters
----------
current_Elogbeta: numpy.ndarray
Posterior probabilities for each topic, optional.
If omitted, it will get Elogbeta from state.
"""
if current_Elogbeta is None:
current_Elogbeta = self.state.get_Elogbeta()
self.expElogbeta = np.exp(current_Elogbeta)
assert self.expElogbeta.dtype == self.dtype
def clear(self):
"""Clear the model's state to free some memory. Used in the distributed implementation."""
self.state = None
self.Elogbeta = None
def inference(self, chunk, collect_sstats=False):
"""Given a chunk of sparse document vectors, estimate gamma (parameters controlling the topic weights)
for each document in the chunk.
This function does not modify the model. The whole input chunk of document is assumed to fit in RAM;
chunking of a large corpus must be done earlier in the pipeline. Avoids computing the `phi` variational
parameter directly using the optimization presented in
`Lee, Seung: Algorithms for non-negative matrix factorization"
<https://papers.nips.cc/paper/1861-algorithms-for-non-negative-matrix-factorization.pdf>`_.
Parameters
----------
chunk : list of list of (int, float)
The corpus chunk on which the inference step will be performed.
collect_sstats : bool, optional
If set to True, also collect (and return) sufficient statistics needed to update the model's topic-word
distributions.
Returns
-------
(numpy.ndarray, {numpy.ndarray, None})
The first element is always returned and it corresponds to the states gamma matrix. The second element is
only returned if `collect_sstats` == True and corresponds to the sufficient statistics for the M step.
"""
try:
len(chunk)
except TypeError:
# convert iterators/generators to plain list, so we have len() etc.
chunk = list(chunk)
if len(chunk) > 1:
logger.debug("performing inference on a chunk of %i documents", len(chunk))
# Initialize the variational distribution q(theta|gamma) for the chunk
gamma = self.random_state.gamma(100., 1. / 100., (len(chunk), self.num_topics)).astype(self.dtype, copy=False)
Elogtheta = dirichlet_expectation(gamma)
expElogtheta = np.exp(Elogtheta)
assert Elogtheta.dtype == self.dtype
assert expElogtheta.dtype == self.dtype
if collect_sstats:
sstats = np.zeros_like(self.expElogbeta, dtype=self.dtype)
else:
sstats = None
converged = 0
# Now, for each document d update that document's gamma and phi
# Inference code copied from Hoffman's `onlineldavb.py` (esp. the
# Lee&Seung trick which speeds things up by an order of magnitude, compared
# to Blei's original LDA-C code, cool!).
integer_types = (int, np.integer,)
epsilon = np.finfo(self.dtype).eps
for d, doc in enumerate(chunk):
if len(doc) > 0 and not isinstance(doc[0][0], integer_types):
# make sure the term IDs are ints, otherwise np will get upset
ids = [int(idx) for idx, _ in doc]
else:
ids = [idx for idx, _ in doc]
cts = np.fromiter((cnt for _, cnt in doc), dtype=self.dtype, count=len(doc))
gammad = gamma[d, :]
Elogthetad = Elogtheta[d, :]
expElogthetad = expElogtheta[d, :]
expElogbetad = self.expElogbeta[:, ids]
# The optimal phi_{dwk} is proportional to expElogthetad_k * expElogbetad_kw.
# phinorm is the normalizer.
# TODO treat zeros explicitly, instead of adding epsilon?
phinorm = np.dot(expElogthetad, expElogbetad) + epsilon
# Iterate between gamma and phi until convergence
for _ in range(self.iterations):
lastgamma = gammad
# We represent phi implicitly to save memory and time.
# Substituting the value of the optimal phi back into
# the update for gamma gives this update. Cf. Lee&Seung 2001.
gammad = self.alpha + expElogthetad * np.dot(cts / phinorm, expElogbetad.T)
Elogthetad = dirichlet_expectation(gammad)
expElogthetad = np.exp(Elogthetad)
phinorm = np.dot(expElogthetad, expElogbetad) + epsilon
# If gamma hasn't changed much, we're done.
meanchange = mean_absolute_difference(gammad, lastgamma)
if meanchange < self.gamma_threshold:
converged += 1
break
gamma[d, :] = gammad
assert gammad.dtype == self.dtype
if collect_sstats:
# Contribution of document d to the expected sufficient
# statistics for the M step.
sstats[:, ids] += np.outer(expElogthetad.T, cts / phinorm)
if len(chunk) > 1:
logger.debug("%i/%i documents converged within %i iterations", converged, len(chunk), self.iterations)
if collect_sstats:
# This step finishes computing the sufficient statistics for the
# M step, so that
# sstats[k, w] = \sum_d n_{dw} * phi_{dwk}
# = \sum_d n_{dw} * exp{Elogtheta_{dk} + Elogbeta_{kw}} / phinorm_{dw}.
sstats *= self.expElogbeta
assert sstats.dtype == self.dtype
assert gamma.dtype == self.dtype
return gamma, sstats
def do_estep(self, chunk, state=None):
"""Perform inference on a chunk of documents, and accumulate the collected sufficient statistics.
Parameters
----------
chunk : list of list of (int, float)
The corpus chunk on which the inference step will be performed.
state : :class:`~gensim.models.ldamodel.LdaState`, optional
The state to be updated with the newly accumulated sufficient statistics. If none, the models
`self.state` is updated.
Returns
-------
numpy.ndarray
Gamma parameters controlling the topic weights, shape (`len(chunk)`, `self.num_topics`).
"""
if state is None:
state = self.state
gamma, sstats = self.inference(chunk, collect_sstats=True)
state.sstats += sstats
state.numdocs += gamma.shape[0] # avoids calling len(chunk) on a generator
assert gamma.dtype == self.dtype
return gamma
def update_alpha(self, gammat, rho):
"""Update parameters for the Dirichlet prior on the per-document topic weights.
Parameters
----------
gammat : numpy.ndarray
Previous topic weight parameters.
rho : float
Learning rate.
Returns
-------
numpy.ndarray
Sequence of alpha parameters.
"""
N = float(len(gammat))
logphat = sum(dirichlet_expectation(gamma) for gamma in gammat) / N
assert logphat.dtype == self.dtype
self.alpha = update_dir_prior(self.alpha, N, logphat, rho)
logger.info("optimized alpha %s", list(self.alpha))
assert self.alpha.dtype == self.dtype
return self.alpha
def update_eta(self, lambdat, rho):
"""Update parameters for the Dirichlet prior on the per-topic word weights.
Parameters
----------
lambdat : numpy.ndarray
Previous lambda parameters.
rho : float
Learning rate.
Returns
-------
numpy.ndarray
The updated eta parameters.
"""
N = float(lambdat.shape[0])
logphat = (sum(dirichlet_expectation(lambda_) for lambda_ in lambdat) / N).reshape((self.num_terms,))
assert logphat.dtype == self.dtype
self.eta = update_dir_prior(self.eta, N, logphat, rho)
assert self.eta.dtype == self.dtype
return self.eta
def log_perplexity(self, chunk, total_docs=None):
"""Calculate and return per-word likelihood bound, using a chunk of documents as evaluation corpus.
Also output the calculated statistics, including the perplexity=2^(-bound), to log at INFO level.
Parameters
----------
chunk : list of list of (int, float)
The corpus chunk on which the inference step will be performed.
total_docs : int, optional
Number of docs used for evaluation of the perplexity.
Returns
-------
numpy.ndarray
The variational bound score calculated for each word.
"""
if total_docs is None:
total_docs = len(chunk)
corpus_words = sum(cnt for document in chunk for _, cnt in document)
subsample_ratio = 1.0 * total_docs / len(chunk)
perwordbound = self.bound(chunk, subsample_ratio=subsample_ratio) / (subsample_ratio * corpus_words)
logger.info(
"%.3f per-word bound, %.1f perplexity estimate based on a held-out corpus of %i documents with %i words",
perwordbound, np.exp2(-perwordbound), len(chunk), corpus_words
)
return perwordbound
def update(self, corpus, chunksize=None, decay=None, offset=None,
passes=None, update_every=None, eval_every=None, iterations=None,
gamma_threshold=None, chunks_as_numpy=False):
"""Train the model with new documents, by EM-iterating over the corpus until the topics converge, or until
the maximum number of allowed iterations is reached. `corpus` must be an iterable.
In distributed mode, the E step is distributed over a cluster of machines.
Notes
-----
This update also supports updating an already trained model (`self`) with new documents from `corpus`;
the two models are then merged in proportion to the number of old vs. new documents.
This feature is still experimental for non-stationary input streams.
For stationary input (no topic drift in new documents), on the other hand,
this equals the online update of `'Online Learning for LDA' by Hoffman et al.`_
and is guaranteed to converge for any `decay` in (0.5, 1].
Additionally, for smaller corpus sizes,
an increasing `offset` may be beneficial (see Table 1 in the same paper).
Parameters
----------
corpus : iterable of list of (int, float), optional
Stream of document vectors or sparse matrix of shape (`num_documents`, `num_terms`) used to update the
model.
chunksize : int, optional
Number of documents to be used in each training chunk.
decay : float, optional
A number between (0.5, 1] to weight what percentage of the previous lambda value is forgotten
when each new document is examined. Corresponds to :math:`\\kappa` from
`'Online Learning for LDA' by Hoffman et al.`_
offset : float, optional
Hyper-parameter that controls how much we will slow down the first steps the first few iterations.
Corresponds to :math:`\\tau_0` from `'Online Learning for LDA' by Hoffman et al.`_
passes : int, optional
Number of passes through the corpus during training.
update_every : int, optional
Number of documents to be iterated through for each update.
Set to 0 for batch learning, > 1 for online iterative learning.
eval_every : int, optional
Log perplexity is estimated every that many updates. Setting this to one slows down training by ~2x.
iterations : int, optional
Maximum number of iterations through the corpus when inferring the topic distribution of a corpus.
gamma_threshold : float, optional
Minimum change in the value of the gamma parameters to continue iterating.
chunks_as_numpy : bool, optional
Whether each chunk passed to the inference step should be a numpy.ndarray or not. Numpy can in some settings
turn the term IDs into floats, these will be converted back into integers in inference, which incurs a
performance hit. For distributed computing it may be desirable to keep the chunks as `numpy.ndarray`.
"""
# use parameters given in constructor, unless user explicitly overrode them
if decay is None:
decay = self.decay
if offset is None:
offset = self.offset
if passes is None:
passes = self.passes
if update_every is None:
update_every = self.update_every
if eval_every is None:
eval_every = self.eval_every
if iterations is None:
iterations = self.iterations
if gamma_threshold is None:
gamma_threshold = self.gamma_threshold
try:
lencorpus = len(corpus)
except Exception:
logger.warning("input corpus stream has no len(); counting documents")
lencorpus = sum(1 for _ in corpus)
if lencorpus == 0:
logger.warning("LdaModel.update() called with an empty corpus")
return
if chunksize is None:
chunksize = min(lencorpus, self.chunksize)
self.state.numdocs += lencorpus
if update_every:
updatetype = "online"
if passes == 1:
updatetype += " (single-pass)"
else:
updatetype += " (multi-pass)"
updateafter = min(lencorpus, update_every * self.numworkers * chunksize)
else:
updatetype = "batch"
updateafter = lencorpus
evalafter = min(lencorpus, (eval_every or 0) * self.numworkers * chunksize)
updates_per_pass = max(1, lencorpus / updateafter)
logger.info(
"running %s LDA training, %s topics, %i passes over "
"the supplied corpus of %i documents, updating model once "
"every %i documents, evaluating perplexity every %i documents, "
"iterating %ix with a convergence threshold of %f",
updatetype, self.num_topics, passes, lencorpus,
updateafter, evalafter, iterations,
gamma_threshold
)
if updates_per_pass * passes < 10:
logger.warning(
"too few updates, training might not converge; "
"consider increasing the number of passes or iterations to improve accuracy"
)
# rho is the "speed" of updating; TODO try other fncs
# pass_ + num_updates handles increasing the starting t for each pass,
# while allowing it to "reset" on the first pass of each update
def rho():
return pow(offset + pass_ + (self.num_updates / chunksize), -decay)
if self.callbacks:
# pass the list of input callbacks to Callback class
callback = Callback(self.callbacks)
callback.set_model(self)
# initialize metrics list to store metric values after every epoch
self.metrics = defaultdict(list)
for pass_ in range(passes):
if self.dispatcher:
logger.info('initializing %s workers', self.numworkers)
self.dispatcher.reset(self.state)
else:
other = LdaState(self.eta, self.state.sstats.shape, self.dtype)
dirty = False
reallen = 0
chunks = utils.grouper(corpus, chunksize, as_numpy=chunks_as_numpy, dtype=self.dtype)
for chunk_no, chunk in enumerate(chunks):
reallen += len(chunk) # keep track of how many documents we've processed so far
if eval_every and ((reallen == lencorpus) or ((chunk_no + 1) % (eval_every * self.numworkers) == 0)):
self.log_perplexity(chunk, total_docs=lencorpus)
if self.dispatcher:
# add the chunk to dispatcher's job queue, so workers can munch on it
logger.info(
"PROGRESS: pass %i, dispatching documents up to #%i/%i",
pass_, chunk_no * chunksize + len(chunk), lencorpus
)
# this will eventually block until some jobs finish, because the queue has a small finite length
self.dispatcher.putjob(chunk)
else:
logger.info(
"PROGRESS: pass %i, at document #%i/%i",
pass_, chunk_no * chunksize + len(chunk), lencorpus
)
gammat = self.do_estep(chunk, other)
if self.optimize_alpha:
self.update_alpha(gammat, rho())
dirty = True
del chunk
# perform an M step. determine when based on update_every, don't do this after every chunk
if update_every and (chunk_no + 1) % (update_every * self.numworkers) == 0:
if self.dispatcher:
# distributed mode: wait for all workers to finish
logger.info("reached the end of input; now waiting for all remaining jobs to finish")
other = self.dispatcher.getstate()
self.do_mstep(rho(), other, pass_ > 0)
del other # frees up memory
if self.dispatcher:
logger.info('initializing workers')
self.dispatcher.reset(self.state)
else:
other = LdaState(self.eta, self.state.sstats.shape, self.dtype)
dirty = False
if reallen != lencorpus:
raise RuntimeError("input corpus size changed during training (don't use generators as input)")
# append current epoch's metric values
if self.callbacks:
current_metrics = callback.on_epoch_end(pass_)
for metric, value in current_metrics.items():
self.metrics[metric].append(value)
if dirty:
# finish any remaining updates
if self.dispatcher:
# distributed mode: wait for all workers to finish
logger.info("reached the end of input; now waiting for all remaining jobs to finish")
other = self.dispatcher.getstate()
self.do_mstep(rho(), other, pass_ > 0)
del other
dirty = False
def do_mstep(self, rho, other, extra_pass=False):
"""Maximization step: use linear interpolation between the existing topics and
collected sufficient statistics in `other` to update the topics.
Parameters
----------
rho : float
Learning rate.
other : :class:`~gensim.models.ldamodel.LdaModel`
The model whose sufficient statistics will be used to update the topics.
extra_pass : bool, optional
Whether this step required an additional pass over the corpus.
"""
logger.debug("updating topics")
# update self with the new blend; also keep track of how much did
# the topics change through this update, to assess convergence
previous_Elogbeta = self.state.get_Elogbeta()
self.state.blend(rho, other)
current_Elogbeta = self.state.get_Elogbeta()
self.sync_state(current_Elogbeta)
# print out some debug info at the end of each EM iteration
self.print_topics(5)
diff = mean_absolute_difference(previous_Elogbeta.ravel(), current_Elogbeta.ravel())
logger.info("topic diff=%f, rho=%f", diff, rho)
if self.optimize_eta:
self.update_eta(self.state.get_lambda(), rho)
if not extra_pass:
# only update if this isn't an additional pass
self.num_updates += other.numdocs
def bound(self, corpus, gamma=None, subsample_ratio=1.0) -> float:
"""Estimate the variational bound of documents from the corpus as E_q[log p(corpus)] - E_q[log q(corpus)].
Parameters
----------
corpus : iterable of list of (int, float), optional
Stream of document vectors or sparse matrix of shape (`num_documents`, `num_terms`) used to estimate the
variational bounds.
gamma : numpy.ndarray, optional
Topic weight variational parameters for each document. If not supplied, it will be inferred from the model.
subsample_ratio : float, optional
Percentage of the whole corpus represented by the passed `corpus` argument (in case this was a sample).
Set to 1.0 if the whole corpus was passed.This is used as a multiplicative factor to scale the likelihood
appropriately.
Returns
-------
float
The variational bound score calculated for each document.
"""
score = np.float64(0.0)
_lambda = self.state.get_lambda()
Elogbeta = dirichlet_expectation(_lambda)
for d, doc in enumerate(corpus): # stream the input doc-by-doc, in case it's too large to fit in RAM
if d % self.chunksize == 0:
logger.debug("bound: at document #%i", d)
if gamma is None:
gammad, _ = self.inference([doc])
else:
gammad = gamma[d]
Elogthetad = dirichlet_expectation(gammad)
assert gammad.dtype == self.dtype
assert Elogthetad.dtype == self.dtype
# E[log p(doc | theta, beta)]
score += sum(cnt * logsumexp(Elogthetad + Elogbeta[:, int(id)]) for id, cnt in doc)
# E[log p(theta | alpha) - log q(theta | gamma)]; assumes alpha is a vector
score += np.sum((self.alpha - gammad) * Elogthetad)
score += np.sum(gammaln(gammad) - gammaln(self.alpha))
score += gammaln(np.sum(self.alpha)) - gammaln(np.sum(gammad))
# Compensate likelihood for when `corpus` above is only a sample of the whole corpus. This ensures
# that the likelihood is always roughly on the same scale.
score *= subsample_ratio
# E[log p(beta | eta) - log q (beta | lambda)]; assumes eta is a scalar
score += np.sum((self.eta - _lambda) * Elogbeta)
score += np.sum(gammaln(_lambda) - gammaln(self.eta))
if np.ndim(self.eta) == 0:
sum_eta = self.eta * self.num_terms
else:
sum_eta = np.sum(self.eta)
score += np.sum(gammaln(sum_eta) - gammaln(np.sum(_lambda, 1)))
return score
def show_topics(self, num_topics=10, num_words=10, log=False, formatted=True):
"""Get a representation for selected topics.
Parameters
----------
num_topics : int, optional
Number of topics to be returned. Unlike LSA, there is no natural ordering between the topics in LDA.
The returned topics subset of all topics is therefore arbitrary and may change between two LDA
training runs.
num_words : int, optional
Number of words to be presented for each topic. These will be the most relevant words (assigned the highest
probability for each topic).
log : bool, optional
Whether the output is also logged, besides being returned.
formatted : bool, optional
Whether the topic representations should be formatted as strings. If False, they are returned as
2 tuples of (word, probability).
Returns
-------
list of {str, tuple of (str, float)}
a list of topics, each represented either as a string (when `formatted` == True) or word-probability
pairs.
"""
if num_topics < 0 or num_topics >= self.num_topics:
num_topics = self.num_topics
chosen_topics = range(num_topics)
else:
num_topics = min(num_topics, self.num_topics)
# add a little random jitter, to randomize results around the same alpha
sort_alpha = self.alpha + 0.0001 * self.random_state.rand(len(self.alpha))
# random_state.rand returns float64, but converting back to dtype won't speed up anything
sorted_topics = list(matutils.argsort(sort_alpha))
chosen_topics = sorted_topics[:num_topics // 2] + sorted_topics[-num_topics // 2:]
shown = []
topic = self.state.get_lambda()
for i in chosen_topics:
topic_ = topic[i]
topic_ = topic_ / topic_.sum() # normalize to probability distribution
bestn = matutils.argsort(topic_, num_words, reverse=True)
topic_ = [(self.id2word[id], topic_[id]) for id in bestn]
if formatted:
topic_ = ' + '.join('%.3f*"%s"' % (v, k) for k, v in topic_)
shown.append((i, topic_))
if log:
logger.info("topic #%i (%.3f): %s", i, self.alpha[i], topic_)
return shown
def show_topic(self, topicid, topn=10):
"""Get the representation for a single topic. Words here are the actual strings, in constrast to
:meth:`~gensim.models.ldamodel.LdaModel.get_topic_terms` that represents words by their vocabulary ID.
Parameters
----------
topicid : int
The ID of the topic to be returned
topn : int, optional
Number of the most significant words that are associated with the topic.
Returns
-------
list of (str, float)
Word - probability pairs for the most relevant words generated by the topic.
"""
return [(self.id2word[id], value) for id, value in self.get_topic_terms(topicid, topn)]
def get_topics(self):
"""Get the term-topic matrix learned during inference.
Returns
-------
numpy.ndarray
The probability for each word in each topic, shape (`num_topics`, `vocabulary_size`).
"""
topics = self.state.get_lambda()
return topics / topics.sum(axis=1)[:, None]
def get_topic_terms(self, topicid, topn=10):
"""Get the representation for a single topic. Words the integer IDs, in constrast to
:meth:`~gensim.models.ldamodel.LdaModel.show_topic` that represents words by the actual strings.
Parameters
----------
topicid : int
The ID of the topic to be returned
topn : int, optional
Number of the most significant words that are associated with the topic.
Returns
-------
list of (int, float)
Word ID - probability pairs for the most relevant words generated by the topic.
"""
topic = self.get_topics()[topicid]
topic = topic / topic.sum() # normalize to probability distribution
bestn = matutils.argsort(topic, topn, reverse=True)
return [(idx, topic[idx]) for idx in bestn]
def top_topics(self, corpus=None, texts=None, dictionary=None, window_size=None,
coherence='u_mass', topn=20, processes=-1):
"""Get the topics with the highest coherence score the coherence for each topic.
Parameters
----------
corpus : iterable of list of (int, float), optional
Corpus in BoW format.
texts : list of list of str, optional
Tokenized texts, needed for coherence models that use sliding window based (i.e. coherence=`c_something`)
probability estimator .
dictionary : :class:`~gensim.corpora.dictionary.Dictionary`, optional
Gensim dictionary mapping of id word to create corpus.
If `model.id2word` is present, this is not needed. If both are provided, passed `dictionary` will be used.
window_size : int, optional
Is the size of the window to be used for coherence measures using boolean sliding window as their
probability estimator. For 'u_mass' this doesn't matter.
If None - the default window sizes are used which are: 'c_v' - 110, 'c_uci' - 10, 'c_npmi' - 10.
coherence : {'u_mass', 'c_v', 'c_uci', 'c_npmi'}, optional
Coherence measure to be used.
Fastest method - 'u_mass', 'c_uci' also known as `c_pmi`.
For 'u_mass' corpus should be provided, if texts is provided, it will be converted to corpus
using the dictionary. For 'c_v', 'c_uci' and 'c_npmi' `texts` should be provided (`corpus` isn't needed)
topn : int, optional
Integer corresponding to the number of top words to be extracted from each topic.
processes : int, optional
Number of processes to use for probability estimation phase, any value less than 1 will be interpreted as
num_cpus - 1.
Returns
-------
list of (list of (int, str), float)
Each element in the list is a pair of a topic representation and its coherence score. Topic representations
are distributions of words, represented as a list of pairs of word IDs and their probabilities.
"""
cm = CoherenceModel(
model=self, corpus=corpus, texts=texts, dictionary=dictionary,
window_size=window_size, coherence=coherence, topn=topn,
processes=processes
)
coherence_scores = cm.get_coherence_per_topic()
str_topics = []
for topic in self.get_topics(): # topic = array of vocab_size floats, one per term
bestn = matutils.argsort(topic, topn=topn, reverse=True) # top terms for topic
beststr = [(topic[_id], self.id2word[_id]) for _id in bestn] # membership, token
str_topics.append(beststr) # list of topn (float membership, token) tuples
scored_topics = zip(str_topics, coherence_scores)
return sorted(scored_topics, key=lambda tup: tup[1], reverse=True)
def get_document_topics(self, bow, minimum_probability=None, minimum_phi_value=None,
per_word_topics=False):
"""Get the topic distribution for the given document.
Parameters
----------
bow : corpus : list of (int, float)
The document in BOW format.
minimum_probability : float
Topics with an assigned probability lower than this threshold will be discarded.
minimum_phi_value : float
If `per_word_topics` is True, this represents a lower bound on the term probabilities that are included.
If set to None, a value of 1e-8 is used to prevent 0s.
per_word_topics : bool
If True, this function will also return two extra lists as explained in the "Returns" section.
Returns
-------
list of (int, float)
Topic distribution for the whole document. Each element in the list is a pair of a topic's id, and
the probability that was assigned to it.
list of (int, list of (int, float), optional
Most probable topics per word. Each element in the list is a pair of a word's id, and a list of
topics sorted by their relevance to this word. Only returned if `per_word_topics` was set to True.
list of (int, list of float), optional
Phi relevance values, multiplied by the feature length, for each word-topic combination.
Each element in the list is a pair of a word's id and a list of the phi values between this word and
each topic. Only returned if `per_word_topics` was set to True.
"""
if minimum_probability is None:
minimum_probability = self.minimum_probability
minimum_probability = max(minimum_probability, 1e-8) # never allow zero values in sparse output
if minimum_phi_value is None:
minimum_phi_value = self.minimum_probability
minimum_phi_value = max(minimum_phi_value, 1e-8) # never allow zero values in sparse output
# if the input vector is a corpus, return a transformed corpus
is_corpus, corpus = utils.is_corpus(bow)
if is_corpus:
kwargs = dict(
per_word_topics=per_word_topics,
minimum_probability=minimum_probability,
minimum_phi_value=minimum_phi_value
)
return self._apply(corpus, **kwargs)
gamma, phis = self.inference([bow], collect_sstats=per_word_topics)
topic_dist = gamma[0] / sum(gamma[0]) # normalize distribution
document_topics = [
(topicid, topicvalue) for topicid, topicvalue in enumerate(topic_dist)
if topicvalue >= minimum_probability
]
if not per_word_topics:
return document_topics
word_topic = [] # contains word and corresponding topic
word_phi = [] # contains word and phi values
for word_type, weight in bow:
phi_values = [] # contains (phi_value, topic) pairing to later be sorted
phi_topic = [] # contains topic and corresponding phi value to be returned 'raw' to user
for topic_id in range(0, self.num_topics):
if phis[topic_id][word_type] >= minimum_phi_value:
# appends phi values for each topic for that word
# these phi values are scaled by feature length
phi_values.append((phis[topic_id][word_type], topic_id))
phi_topic.append((topic_id, phis[topic_id][word_type]))
# list with ({word_id => [(topic_0, phi_value), (topic_1, phi_value) ...]).
word_phi.append((word_type, phi_topic))
# sorts the topics based on most likely topic
# returns a list like ({word_id => [topic_id_most_probable, topic_id_second_most_probable, ...]).
sorted_phi_values = sorted(phi_values, reverse=True)
topics_sorted = [x[1] for x in sorted_phi_values]
word_topic.append((word_type, topics_sorted))
return document_topics, word_topic, word_phi # returns 2-tuple
def get_term_topics(self, word_id, minimum_probability=None):
"""Get the most relevant topics to the given word.
Parameters
----------
word_id : int
The word for which the topic distribution will be computed.
minimum_probability : float, optional
Topics with an assigned probability below this threshold will be discarded.
Returns
-------
list of (int, float)
The relevant topics represented as pairs of their ID and their assigned probability, sorted
by relevance to the given word.
"""
if minimum_probability is None:
minimum_probability = self.minimum_probability
minimum_probability = max(minimum_probability, 1e-8) # never allow zero values in sparse output
# if user enters word instead of id in vocab, change to get id
if isinstance(word_id, str):
word_id = self.id2word.doc2bow([word_id])[0][0]
values = []
for topic_id in range(0, self.num_topics):
if self.expElogbeta[topic_id][word_id] >= minimum_probability:
values.append((topic_id, self.expElogbeta[topic_id][word_id]))
return values
def diff(self, other, distance="kullback_leibler", num_words=100,
n_ann_terms=10, diagonal=False, annotation=True, normed=True):
"""Calculate the difference in topic distributions between two models: `self` and `other`.
Parameters
----------
other : :class:`~gensim.models.ldamodel.LdaModel`
The model which will be compared against the current object.
distance : {'kullback_leibler', 'hellinger', 'jaccard', 'jensen_shannon'}
The distance metric to calculate the difference with.
num_words : int, optional
The number of most relevant words used if `distance == 'jaccard'`. Also used for annotating topics.
n_ann_terms : int, optional
Max number of words in intersection/symmetric difference between topics. Used for annotation.
diagonal : bool, optional
Whether we need the difference between identical topics (the diagonal of the difference matrix).
annotation : bool, optional
Whether the intersection or difference of words between two topics should be returned.
normed : bool, optional
Whether the matrix should be normalized or not.
Returns
-------
numpy.ndarray
A difference matrix. Each element corresponds to the difference between the two topics,
shape (`self.num_topics`, `other.num_topics`)
numpy.ndarray, optional
Annotation matrix where for each pair we include the word from the intersection of the two topics,
and the word from the symmetric difference of the two topics. Only included if `annotation == True`.
Shape (`self.num_topics`, `other_model.num_topics`, 2).
Examples
--------
Get the differences between each pair of topics inferred by two models
.. sourcecode:: pycon
>>> from gensim.models.ldamulticore import LdaMulticore
>>> from gensim.test.utils import datapath
>>>
>>> m1 = LdaMulticore.load(datapath("lda_3_0_1_model"))
>>> m2 = LdaMulticore.load(datapath("ldamodel_python_3_5"))
>>> mdiff, annotation = m1.diff(m2)
>>> topic_diff = mdiff # get matrix with difference for each topic pair from `m1` and `m2`
"""
distances = {
"kullback_leibler": kullback_leibler,
"hellinger": hellinger,
"jaccard": jaccard_distance,
"jensen_shannon": jensen_shannon
}
if distance not in distances:
valid_keys = ", ".join("`{}`".format(x) for x in distances.keys())
raise ValueError("Incorrect distance, valid only {}".format(valid_keys))
if not isinstance(other, self.__class__):
raise ValueError("The parameter `other` must be of type `{}`".format(self.__name__))
distance_func = distances[distance]
d1, d2 = self.get_topics(), other.get_topics()
t1_size, t2_size = d1.shape[0], d2.shape[0]
annotation_terms = None
fst_topics = [{w for (w, _) in self.show_topic(topic, topn=num_words)} for topic in range(t1_size)]
snd_topics = [{w for (w, _) in other.show_topic(topic, topn=num_words)} for topic in range(t2_size)]
if distance == "jaccard":
d1, d2 = fst_topics, snd_topics
if diagonal:
assert t1_size == t2_size, \
"Both input models should have same no. of topics, " \
"as the diagonal will only be valid in a square matrix"
# initialize z and annotation array
z = np.zeros(t1_size)
if annotation:
annotation_terms = np.zeros(t1_size, dtype=list)
else:
# initialize z and annotation matrix
z = np.zeros((t1_size, t2_size))
if annotation:
annotation_terms = np.zeros((t1_size, t2_size), dtype=list)
# iterate over each cell in the initialized z and annotation
for topic in np.ndindex(z.shape):
topic1 = topic[0]
if diagonal:
topic2 = topic1
else:
topic2 = topic[1]
z[topic] = distance_func(d1[topic1], d2[topic2])
if annotation:
pos_tokens = fst_topics[topic1] & snd_topics[topic2]
neg_tokens = fst_topics[topic1].symmetric_difference(snd_topics[topic2])
pos_tokens = list(pos_tokens)[:min(len(pos_tokens), n_ann_terms)]
neg_tokens = list(neg_tokens)[:min(len(neg_tokens), n_ann_terms)]
annotation_terms[topic] = [pos_tokens, neg_tokens]
if normed:
if np.abs(np.max(z)) > 1e-8:
z /= np.max(z)
return z, annotation_terms
def __getitem__(self, bow, eps=None):
"""Get the topic distribution for the given document.
Wraps :meth:`~gensim.models.ldamodel.LdaModel.get_document_topics` to support an operator style call.
Uses the model's current state (set using constructor arguments) to fill in the additional arguments of the
wrapper method.
Parameters
---------
bow : list of (int, float)
The document in BOW format.
eps : float, optional
Topics with an assigned probability lower than this threshold will be discarded.
Returns
-------
list of (int, float)
Topic distribution for the given document. Each topic is represented as a pair of its ID and the probability
assigned to it.
"""
return self.get_document_topics(bow, eps, self.minimum_phi_value, self.per_word_topics)
def save(self, fname, ignore=('state', 'dispatcher'), separately=None, *args, **kwargs):
"""Save the model to a file.
Large internal arrays may be stored into separate files, with `fname` as prefix.
Notes
-----
If you intend to use models across Python 2/3 versions there are a few things to
keep in mind:
1. The pickled Python dictionaries will not work across Python versions
2. The `save` method does not automatically save all numpy arrays separately, only
those ones that exceed `sep_limit` set in :meth:`~gensim.utils.SaveLoad.save`. The main
concern here is the `alpha` array if for instance using `alpha='auto'`.
Please refer to the `wiki recipes section
<https://github.com/RaRe-Technologies/gensim/wiki/
Recipes-&-FAQ#q9-how-do-i-load-a-model-in-python-3-that-was-trained-and-saved-using-python-2>`_
for an example on how to work around these issues.
See Also
--------
:meth:`~gensim.models.ldamodel.LdaModel.load`
Load model.
Parameters
----------
fname : str
Path to the system file where the model will be persisted.
ignore : tuple of str, optional
The named attributes in the tuple will be left out of the pickled model. The reason why
the internal `state` is ignored by default is that it uses its own serialisation rather than the one
provided by this method.
separately : {list of str, None}, optional
If None - automatically detect large numpy/scipy.sparse arrays in the object being stored, and store
them into separate files. This avoids pickle memory errors and allows `mmap`'ing large arrays
back on load efficiently. If list of str - this attributes will be stored in separate files,
the automatic check is not performed in this case.
*args
Positional arguments propagated to :meth:`~gensim.utils.SaveLoad.save`.
**kwargs
Key word arguments propagated to :meth:`~gensim.utils.SaveLoad.save`.
"""
if self.state is not None:
self.state.save(utils.smart_extension(fname, '.state'), *args, **kwargs)
# Save the dictionary separately if not in 'ignore'.
if 'id2word' not in ignore:
utils.pickle(self.id2word, utils.smart_extension(fname, '.id2word'))
# make sure 'state', 'id2word' and 'dispatcher' are ignored from the pickled object, even if
# someone sets the ignore list themselves
if ignore is not None and ignore:
if isinstance(ignore, str):
ignore = [ignore]
ignore = [e for e in ignore if e] # make sure None and '' are not in the list
ignore = list({'state', 'dispatcher', 'id2word'} | set(ignore))
else:
ignore = ['state', 'dispatcher', 'id2word']
# make sure 'expElogbeta' and 'sstats' are ignored from the pickled object, even if
# someone sets the separately list themselves.
separately_explicit = ['expElogbeta', 'sstats']
# Also add 'alpha' and 'eta' to separately list if they are set 'auto' or some
# array manually.
if (isinstance(self.alpha, str) and self.alpha == 'auto') or \
(isinstance(self.alpha, np.ndarray) and len(self.alpha.shape) != 1):
separately_explicit.append('alpha')
if (isinstance(self.eta, str) and self.eta == 'auto') or \
(isinstance(self.eta, np.ndarray) and len(self.eta.shape) != 1):
separately_explicit.append('eta')
# Merge separately_explicit with separately.
if separately:
if isinstance(separately, str):
separately = [separately]
separately = [e for e in separately if e] # make sure None and '' are not in the list
separately = list(set(separately_explicit) | set(separately))
else:
separately = separately_explicit
super(LdaModel, self).save(fname, ignore=ignore, separately=separately, *args, **kwargs)
@classmethod
def load(cls, fname, *args, **kwargs):
"""Load a previously saved :class:`gensim.models.ldamodel.LdaModel` from file.
See Also
--------
:meth:`~gensim.models.ldamodel.LdaModel.save`
Save model.
Parameters
----------
fname : str
Path to the file where the model is stored.
*args
Positional arguments propagated to :meth:`~gensim.utils.SaveLoad.load`.
**kwargs
Key word arguments propagated to :meth:`~gensim.utils.SaveLoad.load`.
Examples
--------
Large arrays can be memmap'ed back as read-only (shared memory) by setting `mmap='r'`:
.. sourcecode:: pycon
>>> from gensim.test.utils import datapath
>>>
>>> fname = datapath("lda_3_0_1_model")
>>> lda = LdaModel.load(fname, mmap='r')
"""
kwargs['mmap'] = kwargs.get('mmap', None)
result = super(LdaModel, cls).load(fname, *args, **kwargs)
# check if `random_state` attribute has been set after main pickle load
# if set -> the model to be loaded was saved using a >= 0.13.2 version of Gensim
# if not set -> the model to be loaded was saved using a < 0.13.2 version of Gensim,
# so set `random_state` as the default value
if not hasattr(result, 'random_state'):
result.random_state = utils.get_random_state(None) # using default value `get_random_state(None)`
logging.warning("random_state not set so using default value")
# dtype could be absent in old models
if not hasattr(result, 'dtype'):
result.dtype = np.float64 # float64 was implicitly used before (cause it's default in numpy)
logging.info("dtype was not set in saved %s file %s, assuming np.float64", result.__class__.__name__, fname)
state_fname = utils.smart_extension(fname, '.state')
try:
result.state = LdaState.load(state_fname, *args, **kwargs)
except Exception as e:
logging.warning("failed to load state from %s: %s", state_fname, e)
id2word_fname = utils.smart_extension(fname, '.id2word')
# check if `id2word_fname` file is present on disk
# if present -> the model to be loaded was saved using a >= 0.13.2 version of Gensim,
# so set `result.id2word` using the `id2word_fname` file
# if not present -> the model to be loaded was saved using a < 0.13.2 version of Gensim,
# so `result.id2word` already set after the main pickle load
if os.path.isfile(id2word_fname):
try:
result.id2word = utils.unpickle(id2word_fname)
except Exception as e:
logging.warning("failed to load id2word dictionary from %s: %s", id2word_fname, e)
return result
| LdaModel |
python | jackfrued__Python-100-Days | 公开课/年薪50W+的Python程序员如何写代码/code/Python/opencourse/part01/example09.py | {
"start": 14,
"end": 239
} | class ____(type):
def __init__(cls, *args, **kwargs):
super().__init__(*args, **kwargs)
cls.clone = lambda self, is_deep=True: \
copy.deepcopy(self) if is_deep else copy.copy(self)
| PrototypeMeta |
python | pytorch__pytorch | test/inductor/extension_backends/triton/extension_codegen_backend.py | {
"start": 314,
"end": 991
} | class ____(BaseScheduling):
def __init__(self, scheduler):
super().__init__(scheduler)
self._triton_scheduling = triton.TritonScheduling(scheduler)
def can_fuse_vertical(self, node1, node2):
return True
def can_fuse_horizontal(self, node1, node2):
return True
def group_fn(self, sizes):
return self._triton_scheduling.group_fn(sizes)
def codegen_template(self, template_node, epilogue_nodes):
pass
def codegen_node(self, node):
self._triton_scheduling.codegen_node(node)
def codegen_sync(self):
pass
def flush(self):
self._triton_scheduling.flush()
| ExtensionScheduling |
python | pyca__cryptography | src/cryptography/x509/extensions.py | {
"start": 75493,
"end": 76968
} | class ____(ExtensionType):
oid = ExtensionOID.ADMISSIONS
def __init__(
self,
authority: GeneralName | None,
admissions: Iterable[Admission],
) -> None:
if authority is not None and not isinstance(authority, GeneralName):
raise TypeError("authority must be a GeneralName")
admissions = list(admissions)
if not all(
isinstance(admission, Admission) for admission in admissions
):
raise TypeError(
"Every item in the contents_of_admissions list must be an "
"Admission"
)
self._authority = authority
self._admissions = admissions
__len__, __iter__, __getitem__ = _make_sequence_methods("_admissions")
@property
def authority(self) -> GeneralName | None:
return self._authority
def __repr__(self) -> str:
return (
f"<Admissions(authority={self._authority}, "
f"admissions={self._admissions})>"
)
def __eq__(self, other: object) -> bool:
if not isinstance(other, Admissions):
return NotImplemented
return (
self.authority == other.authority
and self._admissions == other._admissions
)
def __hash__(self) -> int:
return hash((self.authority, tuple(self._admissions)))
def public_bytes(self) -> bytes:
return rust_x509.encode_extension_value(self)
| Admissions |
python | walkccc__LeetCode | solutions/693. Binary Number with Alternating Bits/693.py | {
"start": 0,
"end": 262
} | class ____:
def hasAlternatingBits(self, n: int) -> bool:
# n = 0b010101
# n >> 2 = 0b000101
# n ^ (n >> 2) = 0b010000 = a
# a - 1 = 0b001111
# a & (a - 1) = 0
a = n ^ (n >> 2)
return (a & (a - 1)) == 0
| Solution |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/data_structures/lookup_ops_test.py | {
"start": 165947,
"end": 166233
} | class ____(MutableHashTableBenchmark):
def _create_table(self):
return lookup_ops.DenseHashTable(
dtypes.int64,
dtypes.float32,
default_value=0.0,
empty_key=-1,
deleted_key=-2)
if __name__ == "__main__":
test.main()
| DenseHashTableBenchmark |
python | huggingface__transformers | src/transformers/models/deberta_v2/modeling_deberta_v2.py | {
"start": 36008,
"end": 36414
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.lm_head = DebertaV2LMPredictionHead(config)
# note that the input embeddings must be passed as an argument
def forward(self, sequence_output, word_embeddings):
prediction_scores = self.lm_head(sequence_output, word_embeddings)
return prediction_scores
@auto_docstring
| DebertaV2OnlyMLMHead |
python | pydantic__pydantic | tests/test_json_schema.py | {
"start": 92950,
"end": 94424
} | class ____(BaseModel):
c: float
"""
)
# All validation
keys_map, schema = models_json_schema(
[(module.ModelOne, 'validation'), (module.ModelTwo, 'validation'), (module.NestedModel, 'validation')]
)
model_names = set(schema['$defs'].keys())
expected_model_names = {
'ModelOne',
'ModelTwo',
f'{module.__name__}__ModelOne__NestedModel',
f'{module.__name__}__ModelTwo__NestedModel',
f'{module.__name__}__NestedModel',
}
assert model_names == expected_model_names
# Validation + serialization
keys_map, schema = models_json_schema(
[
(module.ModelOne, 'validation'),
(module.ModelTwo, 'validation'),
(module.NestedModel, 'validation'),
(module.ModelOne, 'serialization'),
(module.ModelTwo, 'serialization'),
(module.NestedModel, 'serialization'),
]
)
model_names = set(schema['$defs'].keys())
expected_model_names = {
'ModelOne',
'ModelTwo',
f'{module.__name__}__ModelOne__NestedModel',
f'{module.__name__}__ModelTwo__NestedModel',
f'{module.__name__}__NestedModel',
}
assert model_names == expected_model_names
def test_multiple_models_with_same_name_different_input_output(create_module):
module = create_module(
# language=Python
"""
from decimal import Decimal
from pydantic import BaseModel
| NestedModel |
python | huggingface__transformers | src/transformers/models/ernie/modular_ernie.py | {
"start": 4874,
"end": 4932
} | class ____(BertCrossAttention):
pass
| ErnieCrossAttention |
python | tiangolo__fastapi | scripts/notify_translations.py | {
"start": 3086,
"end": 3166
} | class ____(BaseModel):
repository: AllDiscussionsRepository
| AllDiscussionsData |
python | Textualize__textual | docs/examples/widgets/link.py | {
"start": 78,
"end": 435
} | class ____(App):
AUTO_FOCUS = None
CSS = """
Screen {
align: center middle;
}
"""
def compose(self) -> ComposeResult:
yield Link(
"Go to textualize.io",
url="https://textualize.io",
tooltip="Click me",
)
if __name__ == "__main__":
app = LabelApp()
app.run()
| LabelApp |
python | ray-project__ray | python/ray/dag/tests/experimental/test_execution_schedule.py | {
"start": 27117,
"end": 54232
} | class ____:
"""
Test whether `_generate_actor_to_execution_schedule` function generates the
correct execution schedule for each actor.
"""
def add_edge_between_read_compute_write(
self, operations: Dict[_DAGNodeOperationType, _DAGOperationGraphNode]
):
"""
Add edges between READ and COMPUTE, and between COMPUTE and WRITE operations
on the same actor.
Args:
operations: A dictionary where the key is the operation type and the value
is the operation node.
"""
assert len(operations) == 3
_add_edge(
operations[_DAGNodeOperationType.READ],
operations[_DAGNodeOperationType.COMPUTE],
)
_add_edge(
operations[_DAGNodeOperationType.COMPUTE],
operations[_DAGNodeOperationType.WRITE],
)
def add_data_dependeny(
self,
writer_operations: Dict[_DAGNodeOperationType, _DAGOperationGraphNode],
reader_operations: Dict[_DAGNodeOperationType, _DAGOperationGraphNode],
):
"""
Add a data dependency between the WRITE operation of the writer and the READ
operation of the reader.
Args:
writer_operations: A dictionary where the key is the operation type and the
value is the operation node of the writer.
reader_operations: A dictionary where the key is the operation type and the
value is the operation node of the reader.
"""
_add_edge(
writer_operations[_DAGNodeOperationType.WRITE],
reader_operations[_DAGNodeOperationType.READ],
)
def add_control_dependency(
self,
operations_1: Dict[_DAGNodeOperationType, _DAGOperationGraphNode],
operations_2: Dict[_DAGNodeOperationType, _DAGOperationGraphNode],
):
"""
Add a control dependency between the COMPUTE operation of the task with
bind_index i and the COMPUTE operation of the task with bind_index i+1
on the same actor.
Args:
operations_1: A dictionary where the key is the operation type and the value
is the operation node of the task with bind_index i.
operations_2: A dictionary where the key is the operation type and the value
is the operation node of the task with bind_index i+1.
"""
_add_edge(
operations_1[_DAGNodeOperationType.COMPUTE],
operations_2[_DAGNodeOperationType.COMPUTE],
)
def test_single_actor_1(self, monkeypatch):
"""
driver -> fake_actor.op (task_idx_1) -> fake_actor.op (task_idx_2) -> driver
Test the case where there is only one actor and no NCCL operations.
Because there is no NCCL operation, all operations with smaller
`bind_index` should be executed before the operations with larger
`bind_index` on the same actor.
"""
monkeypatch.setattr(ActorHandle, "__init__", mock_actor_handle_init)
fake_actor = ActorHandle("fake_actor")
task_idx_1, exec_task_idx_1 = 1, 0
task_idx_2, exec_task_idx_2 = 2, 1
graph = {
task_idx_1: generate_dag_graph_nodes(
exec_task_idx_1, task_idx_1, fake_actor
),
task_idx_2: generate_dag_graph_nodes(
exec_task_idx_2, task_idx_2, fake_actor
),
}
self.add_edge_between_read_compute_write(graph[task_idx_1])
self.add_edge_between_read_compute_write(graph[task_idx_2])
self.add_data_dependeny(graph[task_idx_1], graph[task_idx_2])
self.add_control_dependency(graph[task_idx_1], graph[task_idx_2])
actor_to_execution_schedule = _generate_and_extract_execution_schedule(graph)
assert len(actor_to_execution_schedule) == 1
assert len(actor_to_execution_schedule[fake_actor]) == 6
assert actor_to_execution_schedule[fake_actor] == [
graph[task_idx_1][_DAGNodeOperationType.READ].operation,
graph[task_idx_1][_DAGNodeOperationType.COMPUTE].operation,
graph[task_idx_1][_DAGNodeOperationType.WRITE].operation,
graph[task_idx_2][_DAGNodeOperationType.READ].operation,
graph[task_idx_2][_DAGNodeOperationType.COMPUTE].operation,
graph[task_idx_2][_DAGNodeOperationType.WRITE].operation,
]
def test_single_actor_2(self, monkeypatch):
"""
driver -> fake_actor.op (task_idx_1) -> fake_actor.op (task_idx_2) -> driver
| |
-> fake_actor.op (task_idx_3) -
When the `dad_idx_1.WRITE` operation is picked, both `task_idx_2.READ` and
`task_idx_3.READ` operations should be zero in-degree. In this case, the one
with the smaller `bind_index` should be selected first. That is,
`task_idx_2.READ` should be selected first.
"""
monkeypatch.setattr(ActorHandle, "__init__", mock_actor_handle_init)
fake_actor = ActorHandle("fake_actor")
task_idx_1, exec_task_idx_1 = 1, 0
task_idx_2, exec_task_idx_2 = 2, 1
task_idx_3, exec_task_idx_3 = 3, 2
graph = {
task_idx_1: generate_dag_graph_nodes(
exec_task_idx_1, task_idx_1, fake_actor
),
task_idx_2: generate_dag_graph_nodes(
exec_task_idx_2, task_idx_2, fake_actor
),
task_idx_3: generate_dag_graph_nodes(
exec_task_idx_3, task_idx_3, fake_actor
),
}
self.add_edge_between_read_compute_write(graph[task_idx_1])
self.add_edge_between_read_compute_write(graph[task_idx_2])
self.add_edge_between_read_compute_write(graph[task_idx_3])
self.add_data_dependeny(graph[task_idx_1], graph[task_idx_2])
self.add_data_dependeny(graph[task_idx_1], graph[task_idx_3])
self.add_control_dependency(graph[task_idx_1], graph[task_idx_2])
self.add_control_dependency(graph[task_idx_2], graph[task_idx_3])
actor_to_execution_schedule = _generate_and_extract_execution_schedule(graph)
assert len(actor_to_execution_schedule) == 1
assert len(actor_to_execution_schedule[fake_actor]) == 9
assert actor_to_execution_schedule[fake_actor] == [
graph[task_idx_1][_DAGNodeOperationType.READ].operation,
graph[task_idx_1][_DAGNodeOperationType.COMPUTE].operation,
graph[task_idx_1][_DAGNodeOperationType.WRITE].operation,
graph[task_idx_2][_DAGNodeOperationType.READ].operation,
graph[task_idx_2][_DAGNodeOperationType.COMPUTE].operation,
graph[task_idx_2][_DAGNodeOperationType.WRITE].operation,
graph[task_idx_3][_DAGNodeOperationType.READ].operation,
graph[task_idx_3][_DAGNodeOperationType.COMPUTE].operation,
graph[task_idx_3][_DAGNodeOperationType.WRITE].operation,
]
def test_two_actors_no_nccl(self, monkeypatch):
"""
driver -> actor_1.op (task_idx_1_1) -> actor_2.op (task_idx_2_2) -> driver
| |
-> actor_2.op (task_idx_2_1) -> actor_1.op (task_idx_1_2) -
Test the case where there are two actors and no NCCL operations.
Because there is no NCCL operation, all operations with smaller
`bind_index` should be executed before the operations with larger
`bind_index` on the same actor.
"""
monkeypatch.setattr(ActorHandle, "__init__", mock_actor_handle_init)
fake_actor_1 = ActorHandle("fake_actor_1")
task_idx_1_1, exec_task_idx_1_1 = 1, 0
task_idx_1_2, exec_task_idx_1_2 = 4, 1
fake_actor_2 = ActorHandle("fake_actor_2")
task_idx_2_1, exec_task_idx_2_1 = 2, 0
task_idx_2_2, exec_task_idx_2_2 = 3, 1
graph = {
task_idx_1_1: generate_dag_graph_nodes(
exec_task_idx_1_1, task_idx_1_1, fake_actor_1
),
task_idx_2_1: generate_dag_graph_nodes(
exec_task_idx_2_1, task_idx_2_1, fake_actor_2
),
task_idx_2_2: generate_dag_graph_nodes(
exec_task_idx_2_2, task_idx_2_2, fake_actor_2
),
task_idx_1_2: generate_dag_graph_nodes(
exec_task_idx_1_2, task_idx_1_2, fake_actor_1
),
}
self.add_edge_between_read_compute_write(graph[task_idx_1_1])
self.add_edge_between_read_compute_write(graph[task_idx_1_2])
self.add_edge_between_read_compute_write(graph[task_idx_2_1])
self.add_edge_between_read_compute_write(graph[task_idx_2_2])
self.add_data_dependeny(graph[task_idx_1_1], graph[task_idx_2_2])
self.add_data_dependeny(graph[task_idx_2_1], graph[task_idx_1_2])
self.add_control_dependency(graph[task_idx_1_1], graph[task_idx_1_2])
self.add_control_dependency(graph[task_idx_2_1], graph[task_idx_2_2])
actor_to_execution_schedule = _generate_and_extract_execution_schedule(graph)
assert len(actor_to_execution_schedule) == 2
assert len(actor_to_execution_schedule[fake_actor_1]) == 6
assert len(actor_to_execution_schedule[fake_actor_2]) == 6
assert actor_to_execution_schedule[fake_actor_1] == [
graph[task_idx_1_1][_DAGNodeOperationType.READ].operation,
graph[task_idx_1_1][_DAGNodeOperationType.COMPUTE].operation,
graph[task_idx_1_1][_DAGNodeOperationType.WRITE].operation,
graph[task_idx_1_2][_DAGNodeOperationType.READ].operation,
graph[task_idx_1_2][_DAGNodeOperationType.COMPUTE].operation,
graph[task_idx_1_2][_DAGNodeOperationType.WRITE].operation,
]
assert actor_to_execution_schedule[fake_actor_2] == [
graph[task_idx_2_1][_DAGNodeOperationType.READ].operation,
graph[task_idx_2_1][_DAGNodeOperationType.COMPUTE].operation,
graph[task_idx_2_1][_DAGNodeOperationType.WRITE].operation,
graph[task_idx_2_2][_DAGNodeOperationType.READ].operation,
graph[task_idx_2_2][_DAGNodeOperationType.COMPUTE].operation,
graph[task_idx_2_2][_DAGNodeOperationType.WRITE].operation,
]
def test_two_actors_with_nccl(self, monkeypatch):
"""
driver -> actor_1.op (task_idx_1_1) -> actor_2.op (task_idx_2_2) -> driver
| |
-> actor_2.op (task_idx_2_1) -> actor_1.op (task_idx_1_2) -
In this test, the communication between fake_actor_1 and fake_actor_2 is done
using NCCL. When the task_idx_1.WRITE operation is picked, the task_idx_2.READ
operation is also added to the execution schedule because of the NCCL operation.
"""
monkeypatch.setattr(ActorHandle, "__init__", mock_actor_handle_init)
fake_actor_1 = ActorHandle("fake_actor_1")
task_idx_1_1, exec_task_idx_1_1 = 1, 0
task_idx_1_2, exec_task_idx_1_2 = 4, 1
fake_actor_2 = ActorHandle("fake_actor_2")
task_idx_2_1, exec_task_idx_2_1 = 2, 0
task_idx_2_2, exec_task_idx_2_2 = 3, 1
graph = {
task_idx_1_1: generate_dag_graph_nodes(
exec_task_idx_1_1,
task_idx_1_1,
fake_actor_1,
requires_nccl_write=True,
),
task_idx_2_1: generate_dag_graph_nodes(
exec_task_idx_2_1,
task_idx_2_1,
fake_actor_2,
requires_nccl_write=True,
),
task_idx_2_2: generate_dag_graph_nodes(
exec_task_idx_2_2,
task_idx_2_2,
fake_actor_2,
requires_nccl_read=True,
),
task_idx_1_2: generate_dag_graph_nodes(
exec_task_idx_1_2,
task_idx_1_2,
fake_actor_1,
requires_nccl_read=True,
),
}
set_sync_idxs_p2p(graph, task_idx_1_1, task_idx_2_2)
set_sync_idxs_p2p(graph, task_idx_2_1, task_idx_1_2)
self.add_edge_between_read_compute_write(graph[task_idx_1_1])
self.add_edge_between_read_compute_write(graph[task_idx_1_2])
self.add_edge_between_read_compute_write(graph[task_idx_2_1])
self.add_edge_between_read_compute_write(graph[task_idx_2_2])
self.add_data_dependeny(graph[task_idx_1_1], graph[task_idx_2_2])
self.add_data_dependeny(graph[task_idx_2_1], graph[task_idx_1_2])
self.add_control_dependency(graph[task_idx_1_1], graph[task_idx_1_2])
self.add_control_dependency(graph[task_idx_2_1], graph[task_idx_2_2])
actor_to_execution_schedule = _generate_and_extract_execution_schedule(graph)
assert len(actor_to_execution_schedule) == 2
assert len(actor_to_execution_schedule[fake_actor_1]) == 6
assert len(actor_to_execution_schedule[fake_actor_2]) == 6
assert actor_to_execution_schedule[fake_actor_1] == [
graph[task_idx_1_1][_DAGNodeOperationType.READ].operation,
graph[task_idx_1_1][_DAGNodeOperationType.COMPUTE].operation,
graph[task_idx_1_1][_DAGNodeOperationType.WRITE].operation,
graph[task_idx_1_2][_DAGNodeOperationType.READ].operation,
graph[task_idx_1_2][_DAGNodeOperationType.COMPUTE].operation,
graph[task_idx_1_2][_DAGNodeOperationType.WRITE].operation,
]
assert actor_to_execution_schedule[fake_actor_2] == [
# `actor_2.task_idx_2_2.READ` (P2P recv) is scheduled together with
# `actor_1.task_idx_1_1.WRITE` (P2P send).
graph[task_idx_2_2][_DAGNodeOperationType.READ].operation,
graph[task_idx_2_1][_DAGNodeOperationType.READ].operation,
graph[task_idx_2_1][_DAGNodeOperationType.COMPUTE].operation,
graph[task_idx_2_1][_DAGNodeOperationType.WRITE].operation,
graph[task_idx_2_2][_DAGNodeOperationType.COMPUTE].operation,
graph[task_idx_2_2][_DAGNodeOperationType.WRITE].operation,
]
def test_simulate_pp_2workers_2batches_1f1b_with_nccl(self, monkeypatch):
"""
This test simulates a simple 1F1B pipeline parallelism for training with
2 workers and 2 batches.
w1: fwd_b1 fwd_b2 bwd_b1 bwd_b2
w2: fwd_b1 bwd_b1 fwd_b2 bwd_b2
The communication between workers is done using NCCL. The communication
within the worker actor is done using IntraProcessChannel.
"""
monkeypatch.setattr(ActorHandle, "__init__", mock_actor_handle_init)
worker_1 = ActorHandle("worker_1")
task_idx_1_1, exec_task_idx_1_1 = 1, 0
task_idx_1_2, exec_task_idx_1_2 = 2, 1
task_idx_1_3, exec_task_idx_1_3 = 3, 2
task_idx_1_4, exec_task_idx_1_4 = 4, 3
worker_2 = ActorHandle("worker_2")
task_idx_2_1, exec_task_idx_2_1 = 5, 0
task_idx_2_2, exec_task_idx_2_2 = 6, 1
task_idx_2_3, exec_task_idx_2_3 = 7, 2
task_idx_2_4, exec_task_idx_2_4 = 8, 3
graph = {
task_idx_1_1: generate_dag_graph_nodes(
exec_task_idx_1_1,
task_idx_1_1,
worker_1,
requires_nccl_write=True,
),
task_idx_1_2: generate_dag_graph_nodes(
exec_task_idx_1_2,
task_idx_1_2,
worker_1,
requires_nccl_write=True,
),
task_idx_1_3: generate_dag_graph_nodes(
exec_task_idx_1_3,
task_idx_1_3,
worker_1,
requires_nccl_read=True,
),
task_idx_1_4: generate_dag_graph_nodes(
exec_task_idx_1_4,
task_idx_1_4,
worker_1,
requires_nccl_read=True,
),
task_idx_2_1: generate_dag_graph_nodes(
exec_task_idx_2_1,
task_idx_2_1,
worker_2,
requires_nccl_read=True,
),
task_idx_2_2: generate_dag_graph_nodes(
exec_task_idx_2_2,
task_idx_2_2,
worker_2,
requires_nccl_write=True,
),
task_idx_2_3: generate_dag_graph_nodes(
exec_task_idx_2_3,
task_idx_2_3,
worker_2,
requires_nccl_read=True,
),
task_idx_2_4: generate_dag_graph_nodes(
exec_task_idx_2_4,
task_idx_2_4,
worker_2,
requires_nccl_write=True,
),
}
set_sync_idxs_p2p(graph, task_idx_1_1, task_idx_2_1)
set_sync_idxs_p2p(graph, task_idx_1_2, task_idx_2_3)
set_sync_idxs_p2p(graph, task_idx_2_2, task_idx_1_3)
set_sync_idxs_p2p(graph, task_idx_2_4, task_idx_1_4)
self.add_edge_between_read_compute_write(graph[task_idx_1_1])
self.add_edge_between_read_compute_write(graph[task_idx_1_2])
self.add_edge_between_read_compute_write(graph[task_idx_1_3])
self.add_edge_between_read_compute_write(graph[task_idx_1_4])
self.add_edge_between_read_compute_write(graph[task_idx_2_1])
self.add_edge_between_read_compute_write(graph[task_idx_2_2])
self.add_edge_between_read_compute_write(graph[task_idx_2_3])
self.add_edge_between_read_compute_write(graph[task_idx_2_4])
self.add_data_dependeny(graph[task_idx_1_1], graph[task_idx_2_1])
self.add_data_dependeny(graph[task_idx_2_1], graph[task_idx_2_2])
self.add_data_dependeny(graph[task_idx_2_2], graph[task_idx_1_3])
self.add_data_dependeny(graph[task_idx_1_2], graph[task_idx_2_3])
self.add_data_dependeny(graph[task_idx_2_3], graph[task_idx_2_4])
self.add_data_dependeny(graph[task_idx_2_4], graph[task_idx_1_4])
self.add_control_dependency(graph[task_idx_1_1], graph[task_idx_1_2])
self.add_control_dependency(graph[task_idx_1_2], graph[task_idx_1_3])
self.add_control_dependency(graph[task_idx_1_3], graph[task_idx_1_4])
self.add_control_dependency(graph[task_idx_2_1], graph[task_idx_2_2])
self.add_control_dependency(graph[task_idx_2_2], graph[task_idx_2_3])
self.add_control_dependency(graph[task_idx_2_3], graph[task_idx_2_4])
actor_to_execution_schedule = _generate_and_extract_execution_schedule(graph)
assert len(actor_to_execution_schedule) == 2
assert len(actor_to_execution_schedule[worker_1]) == 12
assert len(actor_to_execution_schedule[worker_2]) == 12
assert actor_to_execution_schedule[worker_1] == [
graph[task_idx_1_1][_DAGNodeOperationType.READ].operation,
graph[task_idx_1_1][_DAGNodeOperationType.COMPUTE].operation,
graph[task_idx_1_1][_DAGNodeOperationType.WRITE].operation,
graph[task_idx_1_2][_DAGNodeOperationType.READ].operation,
graph[task_idx_1_2][_DAGNodeOperationType.COMPUTE].operation,
graph[task_idx_1_2][_DAGNodeOperationType.WRITE].operation,
graph[task_idx_1_3][_DAGNodeOperationType.READ].operation,
graph[task_idx_1_3][_DAGNodeOperationType.COMPUTE].operation,
graph[task_idx_1_3][_DAGNodeOperationType.WRITE].operation,
graph[task_idx_1_4][_DAGNodeOperationType.READ].operation,
graph[task_idx_1_4][_DAGNodeOperationType.COMPUTE].operation,
graph[task_idx_1_4][_DAGNodeOperationType.WRITE].operation,
]
assert actor_to_execution_schedule[worker_2] == [
graph[task_idx_2_1][_DAGNodeOperationType.READ].operation,
graph[task_idx_2_1][_DAGNodeOperationType.COMPUTE].operation,
graph[task_idx_2_1][_DAGNodeOperationType.WRITE].operation,
# `actor_2.task_idx_2_3.READ` (P2P recv) is scheduled together with
# `actor_1.task_idx_1_2.WRITE` (P2P send).
graph[task_idx_2_3][_DAGNodeOperationType.READ].operation,
graph[task_idx_2_2][_DAGNodeOperationType.READ].operation,
graph[task_idx_2_2][_DAGNodeOperationType.COMPUTE].operation,
graph[task_idx_2_2][_DAGNodeOperationType.WRITE].operation,
graph[task_idx_2_3][_DAGNodeOperationType.COMPUTE].operation,
graph[task_idx_2_3][_DAGNodeOperationType.WRITE].operation,
graph[task_idx_2_4][_DAGNodeOperationType.READ].operation,
graph[task_idx_2_4][_DAGNodeOperationType.COMPUTE].operation,
graph[task_idx_2_4][_DAGNodeOperationType.WRITE].operation,
]
def test_simulate_pp_2workers_2batches_1f1b_no_nccl(self, monkeypatch):
"""
This test simulates a simple 1F1B pipeline parallelism for training with
2 workers and 2 batches.
w1: fwd_b1 fwd_b2 bwd_b1 bwd_b2
w2: fwd_b1 bwd_b1 fwd_b2 bwd_b2
Because there is no NCCL operation, all operations with smaller
`bind_index` should be executed before the operations with larger
`bind_index` on the same actor.
"""
monkeypatch.setattr(ActorHandle, "__init__", mock_actor_handle_init)
worker_1 = ActorHandle("worker_1")
task_idx_1_1, exec_task_idx_1_1 = 1, 0
task_idx_1_2, exec_task_idx_1_2 = 2, 1
task_idx_1_3, exec_task_idx_1_3 = 3, 2
task_idx_1_4, exec_task_idx_1_4 = 4, 3
worker_2 = ActorHandle("worker_2")
task_idx_2_1, exec_task_idx_2_1 = 5, 0
task_idx_2_2, exec_task_idx_2_2 = 6, 1
task_idx_2_3, exec_task_idx_2_3 = 7, 2
task_idx_2_4, exec_task_idx_2_4 = 8, 3
# No NCCL operation.
graph = {
task_idx_1_1: generate_dag_graph_nodes(
exec_task_idx_1_1, task_idx_1_1, worker_1
),
task_idx_1_2: generate_dag_graph_nodes(
exec_task_idx_1_2, task_idx_1_2, worker_1
),
task_idx_1_3: generate_dag_graph_nodes(
exec_task_idx_1_3, task_idx_1_3, worker_1
),
task_idx_1_4: generate_dag_graph_nodes(
exec_task_idx_1_4, task_idx_1_4, worker_1
),
task_idx_2_1: generate_dag_graph_nodes(
exec_task_idx_2_1, task_idx_2_1, worker_2
),
task_idx_2_2: generate_dag_graph_nodes(
exec_task_idx_2_2, task_idx_2_2, worker_2
),
task_idx_2_3: generate_dag_graph_nodes(
exec_task_idx_2_3, task_idx_2_3, worker_2
),
task_idx_2_4: generate_dag_graph_nodes(
exec_task_idx_2_4, task_idx_2_4, worker_2
),
}
self.add_edge_between_read_compute_write(graph[task_idx_1_1])
self.add_edge_between_read_compute_write(graph[task_idx_1_2])
self.add_edge_between_read_compute_write(graph[task_idx_1_3])
self.add_edge_between_read_compute_write(graph[task_idx_1_4])
self.add_edge_between_read_compute_write(graph[task_idx_2_1])
self.add_edge_between_read_compute_write(graph[task_idx_2_2])
self.add_edge_between_read_compute_write(graph[task_idx_2_3])
self.add_edge_between_read_compute_write(graph[task_idx_2_4])
self.add_data_dependeny(graph[task_idx_1_1], graph[task_idx_2_1])
self.add_data_dependeny(graph[task_idx_2_1], graph[task_idx_2_2])
self.add_data_dependeny(graph[task_idx_2_2], graph[task_idx_1_3])
self.add_data_dependeny(graph[task_idx_1_2], graph[task_idx_2_3])
self.add_data_dependeny(graph[task_idx_2_3], graph[task_idx_2_4])
self.add_data_dependeny(graph[task_idx_2_4], graph[task_idx_1_4])
self.add_control_dependency(graph[task_idx_1_1], graph[task_idx_1_2])
self.add_control_dependency(graph[task_idx_1_2], graph[task_idx_1_3])
self.add_control_dependency(graph[task_idx_1_3], graph[task_idx_1_4])
self.add_control_dependency(graph[task_idx_2_1], graph[task_idx_2_2])
self.add_control_dependency(graph[task_idx_2_2], graph[task_idx_2_3])
self.add_control_dependency(graph[task_idx_2_3], graph[task_idx_2_4])
actor_to_execution_schedule = _generate_and_extract_execution_schedule(graph)
assert len(actor_to_execution_schedule) == 2
assert len(actor_to_execution_schedule[worker_1]) == 12
assert len(actor_to_execution_schedule[worker_2]) == 12
assert actor_to_execution_schedule[worker_1] == [
graph[task_idx_1_1][_DAGNodeOperationType.READ].operation,
graph[task_idx_1_1][_DAGNodeOperationType.COMPUTE].operation,
graph[task_idx_1_1][_DAGNodeOperationType.WRITE].operation,
graph[task_idx_1_2][_DAGNodeOperationType.READ].operation,
graph[task_idx_1_2][_DAGNodeOperationType.COMPUTE].operation,
graph[task_idx_1_2][_DAGNodeOperationType.WRITE].operation,
graph[task_idx_1_3][_DAGNodeOperationType.READ].operation,
graph[task_idx_1_3][_DAGNodeOperationType.COMPUTE].operation,
graph[task_idx_1_3][_DAGNodeOperationType.WRITE].operation,
graph[task_idx_1_4][_DAGNodeOperationType.READ].operation,
graph[task_idx_1_4][_DAGNodeOperationType.COMPUTE].operation,
graph[task_idx_1_4][_DAGNodeOperationType.WRITE].operation,
]
assert actor_to_execution_schedule[worker_2] == [
graph[task_idx_2_1][_DAGNodeOperationType.READ].operation,
graph[task_idx_2_1][_DAGNodeOperationType.COMPUTE].operation,
graph[task_idx_2_1][_DAGNodeOperationType.WRITE].operation,
graph[task_idx_2_2][_DAGNodeOperationType.READ].operation,
graph[task_idx_2_2][_DAGNodeOperationType.COMPUTE].operation,
# The order of `task_idx_2_3.READ` and `task_idx_2_2.WRITE` is important.
# It is different from the case where there is an NCCL operation.
graph[task_idx_2_2][_DAGNodeOperationType.WRITE].operation,
graph[task_idx_2_3][_DAGNodeOperationType.READ].operation,
graph[task_idx_2_3][_DAGNodeOperationType.COMPUTE].operation,
graph[task_idx_2_3][_DAGNodeOperationType.WRITE].operation,
graph[task_idx_2_4][_DAGNodeOperationType.READ].operation,
graph[task_idx_2_4][_DAGNodeOperationType.COMPUTE].operation,
graph[task_idx_2_4][_DAGNodeOperationType.WRITE].operation,
]
if __name__ == "__main__":
if os.environ.get("PARALLEL_CI"):
sys.exit(pytest.main(["-n", "auto", "--boxed", "-vs", __file__]))
else:
sys.exit(pytest.main(["-sv", __file__]))
| TestGenerateActorToExecutionSchedule |
python | tensorflow__tensorflow | tensorflow/python/ops/array_ops_test.py | {
"start": 1234,
"end": 5845
} | class ____(test.TestCase):
def testGatherGradHasPartialStaticShape(self):
# Create a tensor with an unknown dim 1.
x = random_ops.random_normal([4, 10, 10])
x = array_ops.gather(
x, array_ops.reshape(array_ops.where_v2(x[0, :, 0] > 0.5), [-1]), axis=1
)
x.shape.assert_is_compatible_with([4, None, 10])
with backprop.GradientTape() as tape:
tape.watch(x)
a = array_ops.gather(array_ops.gather(x, [0, 1]), [0, 1])
grad_a = tape.gradient(a, x)
with backprop.GradientTape() as tape:
tape.watch(x)
b = array_ops.gather(array_ops.gather(x, [2, 3], axis=2), [0, 1])
grad_b = tape.gradient(b, x)
# We make sure that the representation of the shapes are correct; the shape
# equality check will always eval to false due to the shapes being partial.
grad_a.shape.assert_is_compatible_with([None, None, 10])
grad_b.shape.assert_is_compatible_with([4, None, 10])
def testReshapeShapeInference(self):
# Create a tensor with an unknown dim 1.
x = random_ops.random_normal([4, 10, 10])
x = array_ops.gather(
x, array_ops.reshape(array_ops.where_v2(x[0, :, 0] > 0.5), [-1]), axis=1
)
x.shape.assert_is_compatible_with([4, None, 10])
a = array_ops.reshape(x, array_ops.shape(x))
a.shape.assert_is_compatible_with([4, None, 10])
b = array_ops.reshape(x, math_ops.cast(array_ops.shape(x), dtypes.int64))
b.shape.assert_is_compatible_with([4, None, 10])
# We do not shape-infer across a tf.cast into anything that's not tf.int32
# or tf.int64, since they might end up mangling the shape.
c = array_ops.reshape(
x,
math_ops.cast(
math_ops.cast(array_ops.shape(x), dtypes.float32), dtypes.int32
),
)
c.shape.assert_is_compatible_with([None, None, None])
def testEmptyMeshgrid(self):
self.assertEqual(array_ops.meshgrid(), [])
def testSlicedPartialShapeInference(self):
@def_function.function(autograph=False)
def g(x):
return array_ops.zeros([array_ops.shape(x)[0]])
conc = g.get_concrete_function(tensor_spec.TensorSpec([10, None]))
self.assertAllEqual(conc.output_shapes.as_list(), [10])
def testIdentityOnSlicedPartialShapeInference(self):
@def_function.function(autograph=False)
def g(x):
return array_ops.zeros([array_ops.identity(array_ops.shape(x)[0])])
conc = g.get_concrete_function(tensor_spec.TensorSpec([10, None]))
self.assertAllEqual(conc.output_shapes.as_list(), [10])
@test_util.run_in_graph_and_eager_modes
def testParallelConcatFailsWithRankZeroShape(self):
op = array_ops.ParallelConcat
para = {"shape": 0, "values": [1]}
def func():
y = op(**para)
return y
with self.assertRaisesRegex(
Exception, "(rank|dimension) of .* must be greater than .* 0"
):
func()
@test_util.run_in_graph_and_eager_modes
def testUpperBoundValuesWrongRank(self):
# Used to cause a segfault, b/266336058
arg0 = array_ops.zeros([2, 3], dtype=dtypes.float32)
arg1 = array_ops.zeros([2, 1, 0], dtype=dtypes.float32)
with self.assertRaisesRegex(
Exception, "Shape must be rank 2 but is rank 3"
):
gen_array_ops.upper_bound(arg0, arg1)
def testLowerBoundValuesWrongRank(self):
# Used to cause a segfault, b/266336058
arg0 = array_ops.zeros([2, 3], dtype=dtypes.float32)
arg1 = array_ops.zeros([2, 1, 0], dtype=dtypes.float32)
with self.assertRaisesRegex(
Exception, "Shape must be rank 2 but is rank 3"
):
gen_array_ops.lower_bound(arg0, arg1)
def testUpperBoundInputsWrongRank(self):
# Used to cause a segfault, b/266336058
arg0 = array_ops.zeros([2, 1, 0], dtype=dtypes.float32)
arg1 = array_ops.zeros([2, 3], dtype=dtypes.float32)
with self.assertRaisesRegex(
Exception, "Shape must be rank 2 but is rank 3"
):
gen_array_ops.upper_bound(arg0, arg1)
def testLowerBoundInputsWrongRank(self):
# Used to cause a segfault, b/266336058
arg0 = array_ops.zeros([2, 1, 0], dtype=dtypes.float32)
arg1 = array_ops.zeros([2, 3], dtype=dtypes.float32)
with self.assertRaisesRegex(
Exception, "Shape must be rank 2 but is rank 3"
):
gen_array_ops.lower_bound(arg0, arg1)
def testShapeDefaultIn32(self):
# The tf_shape_default_int64 flag should NOT be set when this test runs
self.assertFalse(flags.config().tf_shape_default_int64.value())
s1 = array_ops.shape_v2(array_ops.zeros([1, 2]))
self.assertEqual(s1.dtype, dtypes.int32)
if __name__ == "__main__":
test.main()
| ArrayOpTest |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/snap/job_snapshot.py | {
"start": 1767,
"end": 3333
} | class ____(RecordSerializer["JobSnap"]):
# v0
# v1:
# - lineage added
# v2:
# - graph_def_name
# v3:
# - metadata added
# v4:
# - add kwargs so that if future versions add new args, this version of deserialization will
# be able to ignore them. previously, new args would be passed to old versions and cause
# deserialization errors.
# v5:
# - run_tags added
def before_unpack(
self,
context,
unpacked_dict: Any,
) -> dict[str, Any]:
if unpacked_dict.get("graph_def_name") is None:
unpacked_dict["graph_def_name"] = unpacked_dict["name"]
if unpacked_dict.get("metadata") is None:
unpacked_dict["metadata"] = []
if unpacked_dict.get("lineage_snapshot") is None:
unpacked_dict["lineage_snapshot"] = None
if unpacked_dict.get("run_tags") is None:
unpacked_dict["run_tags"] = None
return unpacked_dict
# Note that unlike other serdes-whitelisted objects that hold metadata, the field here has always
# been called `metadata` instead of `metadata_entries`, so we don't need to rename the field for
# serialization.
@whitelist_for_serdes(
storage_name="PipelineSnapshot",
serializer=JobSnapSerializer,
skip_when_empty_fields={"metadata"},
skip_when_none_fields={"run_tags", "owners"},
field_serializers={"metadata": MetadataFieldSerializer},
storage_field_names={"node_defs_snapshot": "solid_definitions_snapshot"},
)
@record_custom
| JobSnapSerializer |
python | django__django | django/middleware/locale.py | {
"start": 344,
"end": 3442
} | class ____(MiddlewareMixin):
"""
Parse a request and decide what translation object to install in the
current thread context. This allows pages to be dynamically translated to
the language the user desires (if the language is available).
"""
response_redirect_class = HttpResponseRedirect
def process_request(self, request):
urlconf = getattr(request, "urlconf", settings.ROOT_URLCONF)
(
i18n_patterns_used,
prefixed_default_language,
) = is_language_prefix_patterns_used(urlconf)
language = translation.get_language_from_request(
request, check_path=i18n_patterns_used
)
language_from_path = translation.get_language_from_path(request.path_info)
if (
not language_from_path
and i18n_patterns_used
and not prefixed_default_language
):
language = settings.LANGUAGE_CODE
translation.activate(language)
request.LANGUAGE_CODE = translation.get_language()
def process_response(self, request, response):
language = translation.get_language()
language_from_path = translation.get_language_from_path(request.path_info)
urlconf = getattr(request, "urlconf", settings.ROOT_URLCONF)
(
i18n_patterns_used,
prefixed_default_language,
) = is_language_prefix_patterns_used(urlconf)
if (
response.status_code == 404
and not language_from_path
and i18n_patterns_used
and prefixed_default_language
):
# Maybe the language code is missing in the URL? Try adding the
# language prefix and redirecting to that URL.
language_path = "/%s%s" % (language, request.path_info)
path_valid = is_valid_path(language_path, urlconf)
path_needs_slash = not path_valid and (
settings.APPEND_SLASH
and not language_path.endswith("/")
and is_valid_path("%s/" % language_path, urlconf)
)
if path_valid or path_needs_slash:
script_prefix = get_script_prefix()
# Insert language after the script prefix and before the
# rest of the URL
language_url = request.get_full_path(
force_append_slash=path_needs_slash
).replace(script_prefix, "%s%s/" % (script_prefix, language), 1)
# Redirect to the language-specific URL as detected by
# get_language_from_request(). HTTP caches may cache this
# redirect, so add the Vary header.
redirect = self.response_redirect_class(language_url)
patch_vary_headers(redirect, ("Accept-Language", "Cookie"))
return redirect
if not (i18n_patterns_used and language_from_path):
patch_vary_headers(response, ("Accept-Language",))
response.headers.setdefault("Content-Language", language)
return response
| LocaleMiddleware |
python | great-expectations__great_expectations | great_expectations/expectations/sql_tokens_and_types.py | {
"start": 2651,
"end": 5948
} | class ____(str, Enum):
SELECT = "SELECT"
CLUSTER = "CLUSTER"
ALTER = "ALTER"
DATABASE = "DATABASE"
TABLE = "TABLE"
VIEW = "VIEW"
FUNCTION = "FUNCTION"
DROP = "DROP"
REPAIR = "REPAIR"
TRUNCATE = "TRUNCATE"
USE = "USE"
INSERT = "INSERT"
LOAD = "LOAD"
OVERWRITE = "OVERWRITE"
DIRECTORY = "DIRECTORY"
BY = "BY"
DISTRIBUTE = "DISTRIBUTE"
HAVING = "HAVING"
GROUP = "GROUP"
JOIN = "JOIN"
LIKE = "LIKE"
ORDER = "ORDER"
SORT = "SORT"
TABLESAMPLE = "TABLESAMPLE"
WHERE = "WHERE"
CASE = "CASE"
PIVOT = "PIVOT"
LATERAL = "LATERAL"
TRANSFORM = "TRANSFORM"
EXPLAIN = "EXPLAIN"
ADD = "ADD"
FILE = "FILE"
JAR = "JAR"
CACHE = "CACHE"
CLEAR = "CLEAR"
ANALYZE = "ANALYZE"
DESCRIBE = "DESCRIBE"
QUERY = "QUERY"
LIST = "LIST"
REFRESH = "REFRESH"
RESET = "RESET"
SET = "SET"
SHOW = "SHOW"
COLUMNS = "COLUMNS"
DATABASES = "DATABASES"
FUNCTIONS = "FUNCTIONS"
PARTITIONS = "PARTITIONS"
EXTENDED = "EXTENDED"
TABLES = "TABLES"
TBLPROPERTIES = "TBLPROPERTIES"
VIEWS = "VIEWS"
UNCACHE = "UNCACHE"
EXCEPT = "EXCEPT"
MINUS = "MINUS"
INTERSECT = "INTERSECT"
UNION = "UNION"
ALL = "ALL"
FROM = "FROM"
DISTINCT = "DISTINCT"
WITH = "WITH"
AS = "AS"
MAX = "MAX"
VALUES = "VALUES"
GROUPING = "GROUPING"
SETS = "SETS"
ROLLUP = "ROLLUP"
CUBE = "CUBE"
MIN = "MIN"
COUNT = "COUNT"
SUM = "SUM"
AVG = "AVG"
FILTER = "FILTER"
FIRST = "FIRST"
IGNORE = "IGNORE"
LAST = "LAST"
NATURAL = "NATURAL"
INNER = "INNER"
CROSS = "CROSS"
LEFT = "LEFT"
OUTER = "OUTER"
SEMI = "SEMI"
RIGHT = "RIGHT"
FULL = "FULL"
ANTI = "ANTI"
ON = "ON"
USING = "USING"
NOT = "NOT"
ESCAPE = "ESCAPE"
LIMIT = "LIMIT"
LENGTH = "LENGTH"
ASTERISK = "*"
ASC = "ASC"
DESC = "DESC"
NULLS = "NULLS"
ROWS = "ROWS"
OUT = "OUT"
OF = "OF"
RANGE = "RANGE"
EXPLODE = "EXPLODE"
EXPLODE_OUTER = "EXPLODE_OUTER"
INLINE = "INLINE"
INLINE_OUTER = "INLINE_OUTER"
POSEXPLODE = "POSEXPLODE"
POSEXPLODE_OUTER = "POSEXPLODE_OUTER"
STACK = "STACK"
JSON_TUPE = "JSON_TUPE"
PARSE_URL = "PARSE_URL"
RANK = "RANK"
DENSE_RANK = "DENSE_RANK"
PERCENT_RANK = "PERCENT_RANK"
NTILE = "NTILE"
ROW_NUMBER = "ROW_NUMBER"
RESPECT = "RESPECT"
CUME_DIST = "CUME_DIST"
LAG = "LAG"
LEAD = "LEAD"
NTH_VALUE = "NTH_VALUE"
FIRST_VALUE = "FIRST_VALUE"
LAST_VALUE = "LAST_VALUE"
PRECEDING = "PRECEDING"
BETWEEN = "BETWEEN"
AND = "AND"
CURRENT = "CURRENT"
FOLLOWING = "FOLLOWING"
UNBOUNDED = "UNBOUNDED"
WHEN = "WHEN"
THEN = "THEN"
ELSE = "ELSE"
FOR = "FOR"
valid_sql_tokens_and_types: Set[str] = set(
chain.from_iterable(
[
list(map(lambda i: i.upper(), ValidSqlTokens.__members__.keys())),
list(map(lambda i: i.upper(), ValidSqlAlchemyTypes.__members__.keys())),
list(map(lambda i: i.upper(), ValidSparkSqlTokens.__members__.keys())),
list(map(lambda i: i.upper(), ValidSparkSqlTypes.__members__.keys())),
]
)
)
| ValidSparkSqlTokens |
python | pytorch__pytorch | torchgen/_autoheuristic/pad_mm/train_regression_pad_mm.py | {
"start": 237,
"end": 714
} | class ____(AHTrainRegressionTree):
def __init__(self):
super().__init__()
def add_new_features(self, results):
ops = pad_mm_operations()
for op in ops:
results[op.name] = results.apply(op.func, axis=1)
added_categorical_features = [op.name for op in ops if op.is_categorical]
return (results, added_categorical_features)
if __name__ == "__main__":
train = AHTrainPadMM()
train.generate_heuristic()
| AHTrainPadMM |
python | PrefectHQ__prefect | tests/utilities/test_collections.py | {
"start": 518,
"end": 596
} | class ____(AutoEnum):
RED = AutoEnum.auto()
BLUE = AutoEnum.auto()
| Color |
python | doocs__leetcode | solution/2000-2099/2061.Number of Spaces Cleaning Robot Cleaned/Solution.py | {
"start": 0,
"end": 605
} | class ____:
def numberOfCleanRooms(self, room: List[List[int]]) -> int:
def dfs(i, j, k):
if (i, j, k) in vis:
return
nonlocal ans
ans += room[i][j] == 0
room[i][j] = -1
vis.add((i, j, k))
x, y = i + dirs[k], j + dirs[k + 1]
if 0 <= x < len(room) and 0 <= y < len(room[0]) and room[x][y] != 1:
dfs(x, y, k)
else:
dfs(i, j, (k + 1) % 4)
vis = set()
dirs = (0, 1, 0, -1, 0)
ans = 0
dfs(0, 0, 0)
return ans
| Solution |
python | sympy__sympy | sympy/printing/tensorflow.py | {
"start": 472,
"end": 8161
} | class ____(ArrayPrinter, AbstractPythonCodePrinter):
"""
Tensorflow printer which handles vectorized piecewise functions,
logical operators, max/min, and relational operators.
"""
printmethod = "_tensorflowcode"
mapping = {
sympy.Abs: "tensorflow.math.abs",
sympy.sign: "tensorflow.math.sign",
# XXX May raise error for ints.
sympy.ceiling: "tensorflow.math.ceil",
sympy.floor: "tensorflow.math.floor",
sympy.log: "tensorflow.math.log",
sympy.exp: "tensorflow.math.exp",
Sqrt: "tensorflow.math.sqrt",
sympy.cos: "tensorflow.math.cos",
sympy.acos: "tensorflow.math.acos",
sympy.sin: "tensorflow.math.sin",
sympy.asin: "tensorflow.math.asin",
sympy.tan: "tensorflow.math.tan",
sympy.atan: "tensorflow.math.atan",
sympy.atan2: "tensorflow.math.atan2",
# XXX Also may give NaN for complex results.
sympy.cosh: "tensorflow.math.cosh",
sympy.acosh: "tensorflow.math.acosh",
sympy.sinh: "tensorflow.math.sinh",
sympy.asinh: "tensorflow.math.asinh",
sympy.tanh: "tensorflow.math.tanh",
sympy.atanh: "tensorflow.math.atanh",
sympy.re: "tensorflow.math.real",
sympy.im: "tensorflow.math.imag",
sympy.arg: "tensorflow.math.angle",
# XXX May raise error for ints and complexes
sympy.erf: "tensorflow.math.erf",
sympy.loggamma: "tensorflow.math.lgamma",
sympy.Eq: "tensorflow.math.equal",
sympy.Ne: "tensorflow.math.not_equal",
sympy.StrictGreaterThan: "tensorflow.math.greater",
sympy.StrictLessThan: "tensorflow.math.less",
sympy.LessThan: "tensorflow.math.less_equal",
sympy.GreaterThan: "tensorflow.math.greater_equal",
sympy.And: "tensorflow.math.logical_and",
sympy.Or: "tensorflow.math.logical_or",
sympy.Not: "tensorflow.math.logical_not",
sympy.Max: "tensorflow.math.maximum",
sympy.Min: "tensorflow.math.minimum",
# Matrices
sympy.MatAdd: "tensorflow.math.add",
sympy.HadamardProduct: "tensorflow.math.multiply",
sympy.Trace: "tensorflow.linalg.trace",
# XXX May raise error for integer matrices.
sympy.Determinant : "tensorflow.linalg.det",
}
_default_settings = dict(
AbstractPythonCodePrinter._default_settings,
tensorflow_version=None
)
def __init__(self, settings=None):
super().__init__(settings)
version = self._settings['tensorflow_version']
if version is None and tensorflow:
version = tensorflow.__version__
self.tensorflow_version = version
def _print_Function(self, expr):
op = self.mapping.get(type(expr), None)
if op is None:
return super()._print_Basic(expr)
children = [self._print(arg) for arg in expr.args]
if len(children) == 1:
return "%s(%s)" % (
self._module_format(op),
children[0]
)
else:
return self._expand_fold_binary_op(op, children)
_print_Expr = _print_Function
_print_Application = _print_Function
_print_MatrixExpr = _print_Function
# TODO: a better class structure would avoid this mess:
_print_Relational = _print_Function
_print_Not = _print_Function
_print_And = _print_Function
_print_Or = _print_Function
_print_HadamardProduct = _print_Function
_print_Trace = _print_Function
_print_Determinant = _print_Function
def _print_Inverse(self, expr):
op = self._module_format('tensorflow.linalg.inv')
return "{}({})".format(op, self._print(expr.arg))
def _print_Transpose(self, expr):
version = self.tensorflow_version
if version and version_tuple(version) < version_tuple('1.14'):
op = self._module_format('tensorflow.matrix_transpose')
else:
op = self._module_format('tensorflow.linalg.matrix_transpose')
return "{}({})".format(op, self._print(expr.arg))
def _print_Derivative(self, expr):
variables = expr.variables
if any(isinstance(i, Iterable) for i in variables):
raise NotImplementedError("derivation by multiple variables is not supported")
def unfold(expr, args):
if not args:
return self._print(expr)
return "%s(%s, %s)[0]" % (
self._module_format("tensorflow.gradients"),
unfold(expr, args[:-1]),
self._print(args[-1]),
)
return unfold(expr.expr, variables)
def _print_Piecewise(self, expr):
version = self.tensorflow_version
if version and version_tuple(version) < version_tuple('1.0'):
tensorflow_piecewise = "tensorflow.select"
else:
tensorflow_piecewise = "tensorflow.where"
from sympy.functions.elementary.piecewise import Piecewise
e, cond = expr.args[0].args
if len(expr.args) == 1:
return '{}({}, {}, {})'.format(
self._module_format(tensorflow_piecewise),
self._print(cond),
self._print(e),
0)
return '{}({}, {}, {})'.format(
self._module_format(tensorflow_piecewise),
self._print(cond),
self._print(e),
self._print(Piecewise(*expr.args[1:])))
def _print_Pow(self, expr):
# XXX May raise error for
# int**float or int**complex or float**complex
base, exp = expr.args
if expr.exp == S.Half:
return "{}({})".format(
self._module_format("tensorflow.math.sqrt"), self._print(base))
return "{}({}, {})".format(
self._module_format("tensorflow.math.pow"),
self._print(base), self._print(exp))
def _print_MatrixBase(self, expr):
tensorflow_f = "tensorflow.Variable" if expr.free_symbols else "tensorflow.constant"
data = "["+", ".join(["["+", ".join([self._print(j) for j in i])+"]" for i in expr.tolist()])+"]"
return "%s(%s)" % (
self._module_format(tensorflow_f),
data,
)
def _print_MatMul(self, expr):
from sympy.matrices.expressions import MatrixExpr
mat_args = [arg for arg in expr.args if isinstance(arg, MatrixExpr)]
args = [arg for arg in expr.args if arg not in mat_args]
if args:
return "%s*%s" % (
self.parenthesize(Mul.fromiter(args), PRECEDENCE["Mul"]),
self._expand_fold_binary_op(
"tensorflow.linalg.matmul", mat_args)
)
else:
return self._expand_fold_binary_op(
"tensorflow.linalg.matmul", mat_args)
def _print_MatPow(self, expr):
return self._expand_fold_binary_op(
"tensorflow.linalg.matmul", [expr.base]*expr.exp)
def _print_CodeBlock(self, expr):
# TODO: is this necessary?
ret = []
for subexpr in expr.args:
ret.append(self._print(subexpr))
return "\n".join(ret)
def _print_isnan(self, exp):
return f'tensorflow.math.is_nan({self._print(*exp.args)})'
def _print_isinf(self, exp):
return f'tensorflow.math.is_inf({self._print(*exp.args)})'
_module = "tensorflow"
_einsum = "linalg.einsum"
_add = "math.add"
_transpose = "transpose"
_ones = "ones"
_zeros = "zeros"
def tensorflow_code(expr, **settings):
printer = TensorflowPrinter(settings)
return printer.doprint(expr)
| TensorflowPrinter |
python | facebook__pyre-check | client/json_rpc.py | {
"start": 1683,
"end": 1852
} | class ____(JSONRPCException):
"""
The method does not exist / is not available.
"""
def error_code(self) -> int:
return -32601
| MethodNotFoundError |
python | falconry__falcon | falcon/errors.py | {
"start": 105818,
"end": 107801
} | class ____(MediaMalformedError):
"""Represents a multipart form parsing error.
This error may refer to a malformed or truncated form, usage of deprecated
or unsupported features, or form parameters exceeding limits configured in
:class:`~.media.multipart.MultipartParseOptions`.
:class:`MultipartParseError` instances raised in this module always include
a short human-readable description of the error.
The cause of this exception, if any, is stored in the ``__cause__`` attribute
using the "raise ... from" form when raising.
Args:
source_error (Exception): The source exception that was the cause of this one.
"""
# NOTE(caselit): remove the description @property in MediaMalformedError
description = None
def __init__(
self,
*,
description: str | None = None,
**kwargs: HeaderArg | HTTPErrorKeywordArguments,
) -> None:
HTTPBadRequest.__init__(
self,
title='Malformed multipart/form-data request media',
description=description,
**kwargs, # type: ignore[arg-type]
)
# -----------------------------------------------------------------------------
# Helpers
# -----------------------------------------------------------------------------
def _load_headers(headers: HeaderArg | None) -> Headers:
"""Transform the headers to dict."""
if headers is None:
return {}
if isinstance(headers, dict):
return headers
return dict(headers)
def _parse_retry_after(
headers: HeaderArg | None,
retry_after: RetryAfter,
) -> HeaderArg | None:
"""Set the Retry-After to the headers when required."""
if retry_after is None:
return headers
headers = _load_headers(headers)
if isinstance(retry_after, datetime):
headers['Retry-After'] = dt_to_http(retry_after)
else:
headers['Retry-After'] = str(retry_after)
return headers
| MultipartParseError |
python | plotly__plotly.py | plotly/io/_orca.py | {
"start": 21648,
"end": 50705
} | class ____(object):
"""
Class to store information about the current status of the orca server.
"""
_props = {
"state": "unvalidated", # or 'validated' or 'running'
"executable_list": None,
"version": None,
"pid": None,
"port": None,
"command": None,
}
@property
def state(self):
"""
A string representing the state of the orca server process
One of:
- unvalidated: The orca executable has not yet been searched for or
tested to make sure its valid.
- validated: The orca executable has been located and tested for
validity, but it is not running.
- running: The orca server process is currently running.
"""
return self._props["state"]
@property
def executable(self):
"""
If the `state` property is 'validated' or 'running', this property
contains the full path to the orca executable.
This path can be specified explicitly by setting the `executable`
property of the `plotly.io.orca.config` object.
This property will be None if the `state` is 'unvalidated'.
"""
executable_list = self._props["executable_list"]
if executable_list is None:
return None
else:
return " ".join(executable_list)
@property
def version(self):
"""
If the `state` property is 'validated' or 'running', this property
contains the version of the validated orca executable.
This property will be None if the `state` is 'unvalidated'.
"""
return self._props["version"]
@property
def pid(self):
"""
The process id of the orca server process, if any. This property
will be None if the `state` is not 'running'.
"""
return self._props["pid"]
@property
def port(self):
"""
The port number that the orca server process is listening to, if any.
This property will be None if the `state` is not 'running'.
This port can be specified explicitly by setting the `port`
property of the `plotly.io.orca.config` object.
"""
return self._props["port"]
@property
def command(self):
"""
The command arguments used to launch the running orca server, if any.
This property will be None if the `state` is not 'running'.
"""
return self._props["command"]
def __repr__(self):
"""
Display a nice representation of the current orca server status.
"""
return """\
orca status
-----------
state: {state}
executable: {executable}
version: {version}
port: {port}
pid: {pid}
command: {command}
""".format(
executable=self.executable,
version=self.version,
port=self.port,
pid=self.pid,
state=self.state,
command=self.command,
)
# Make status a singleton object
# ------------------------------
status = OrcaStatus()
del OrcaStatus
@contextmanager
def orca_env():
"""
Context manager to clear and restore environment variables that are
problematic for orca to function properly
NODE_OPTIONS: When this variable is set, orca <v1.2 will have a
segmentation fault due to an electron bug.
See: https://github.com/electron/electron/issues/12695
ELECTRON_RUN_AS_NODE: When this environment variable is set the call
to orca is transformed into a call to nodejs.
See https://github.com/plotly/orca/issues/149#issuecomment-443506732
"""
clear_env_vars = ["NODE_OPTIONS", "ELECTRON_RUN_AS_NODE", "LD_PRELOAD"]
orig_env_vars = {}
try:
# Clear and save
orig_env_vars.update(
{var: os.environ.pop(var) for var in clear_env_vars if var in os.environ}
)
yield
finally:
# Restore
for var, val in orig_env_vars.items():
os.environ[var] = val
# Public orca server interaction functions
# ----------------------------------------
def validate_executable():
"""
Attempt to find and validate the orca executable specified by the
`plotly.io.orca.config.executable` property.
If the `plotly.io.orca.status.state` property is 'validated' or 'running'
then this function does nothing.
How it works:
- First, it searches the system PATH for an executable that matches the
name or path specified in the `plotly.io.orca.config.executable`
property.
- Then it runs the executable with the `--help` flag to make sure
it's the plotly orca executable
- Then it runs the executable with the `--version` flag to check the
orca version.
If all of these steps are successful then the `status.state` property
is set to 'validated' and the `status.executable` and `status.version`
properties are populated
Returns
-------
None
"""
# Check state
# -----------
if status.state != "unvalidated":
# Nothing more to do
return
# Initialize error messages
# -------------------------
install_location_instructions = """\
If you haven't installed orca yet, you can do so using conda as follows:
$ conda install -c plotly plotly-orca
Alternatively, see other installation methods in the orca project README at
https://github.com/plotly/orca
After installation is complete, no further configuration should be needed.
If you have installed orca, then for some reason plotly.py was unable to
locate it. In this case, set the `plotly.io.orca.config.executable`
property to the full path of your orca executable. For example:
>>> plotly.io.orca.config.executable = '/path/to/orca'
After updating this executable property, try the export operation again.
If it is successful then you may want to save this configuration so that it
will be applied automatically in future sessions. You can do this as follows:
>>> plotly.io.orca.config.save()
If you're still having trouble, feel free to ask for help on the forums at
https://community.plot.ly/c/api/python
"""
# Try to find an executable
# -------------------------
# Search for executable name or path in config.executable
executable = which(config.executable)
path = os.environ.get("PATH", os.defpath)
formatted_path = path.replace(os.pathsep, "\n ")
if executable is None:
raise ValueError(
"""
The orca executable is required to export figures as static images,
but it could not be found on the system path.
Searched for executable '{executable}' on the following path:
{formatted_path}
{instructions}""".format(
executable=config.executable,
formatted_path=formatted_path,
instructions=install_location_instructions,
)
)
# Check if we should run with Xvfb
# --------------------------------
xvfb_args = [
"--auto-servernum",
"--server-args",
"-screen 0 640x480x24 +extension RANDR +extension GLX",
executable,
]
if config.use_xvfb:
# Use xvfb
xvfb_run_executable = which("xvfb-run")
if not xvfb_run_executable:
raise ValueError(
"""
The plotly.io.orca.config.use_xvfb property is set to True, but the
xvfb-run executable could not be found on the system path.
Searched for the executable 'xvfb-run' on the following path:
{formatted_path}""".format(formatted_path=formatted_path)
)
executable_list = [xvfb_run_executable] + xvfb_args
elif (
config.use_xvfb == "auto"
and sys.platform.startswith("linux")
and not os.environ.get("DISPLAY")
and which("xvfb-run")
):
# use_xvfb is 'auto', we're on linux without a display server,
# and xvfb-run is available. Use it.
xvfb_run_executable = which("xvfb-run")
executable_list = [xvfb_run_executable] + xvfb_args
else:
# Do not use xvfb
executable_list = [executable]
# Run executable with --help and see if it's our orca
# ---------------------------------------------------
invalid_executable_msg = """
The orca executable is required in order to export figures as static images,
but the executable that was found at '{executable}'
does not seem to be a valid plotly orca executable. Please refer to the end of
this message for details on what went wrong.
{instructions}""".format(
executable=executable, instructions=install_location_instructions
)
# ### Run with Popen so we get access to stdout and stderr
with orca_env():
p = subprocess.Popen(
executable_list + ["--help"], stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
help_result, help_error = p.communicate()
if p.returncode != 0:
err_msg = (
invalid_executable_msg
+ """
Here is the error that was returned by the command
$ {executable} --help
[Return code: {returncode}]
{err_msg}
""".format(
executable=" ".join(executable_list),
err_msg=help_error.decode("utf-8"),
returncode=p.returncode,
)
)
# Check for Linux without X installed.
if sys.platform.startswith("linux") and not os.environ.get("DISPLAY"):
err_msg += """\
Note: When used on Linux, orca requires an X11 display server, but none was
detected. Please install Xvfb and configure plotly.py to run orca using Xvfb
as follows:
>>> import plotly.io as pio
>>> pio.orca.config.use_xvfb = True
You can save this configuration for use in future sessions as follows:
>>> pio.orca.config.save()
See https://www.x.org/releases/X11R7.6/doc/man/man1/Xvfb.1.xhtml
for more info on Xvfb
"""
raise ValueError(err_msg)
if not help_result:
raise ValueError(
invalid_executable_msg
+ """
The error encountered is that no output was returned by the command
$ {executable} --help
""".format(executable=" ".join(executable_list))
)
if "Plotly's image-exporting utilities" not in help_result.decode("utf-8"):
raise ValueError(
invalid_executable_msg
+ """
The error encountered is that unexpected output was returned by the command
$ {executable} --help
{help_result}
""".format(executable=" ".join(executable_list), help_result=help_result)
)
# Get orca version
# ----------------
# ### Run with Popen so we get access to stdout and stderr
with orca_env():
p = subprocess.Popen(
executable_list + ["--version"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
version_result, version_error = p.communicate()
if p.returncode != 0:
raise ValueError(
invalid_executable_msg
+ """
An error occurred while trying to get the version of the orca executable.
Here is the command that plotly.py ran to request the version
$ {executable} --version
This command returned the following error:
[Return code: {returncode}]
{err_msg}
""".format(
executable=" ".join(executable_list),
err_msg=version_error.decode("utf-8"),
returncode=p.returncode,
)
)
if not version_result:
raise ValueError(
invalid_executable_msg
+ """
The error encountered is that no version was reported by the orca executable.
Here is the command that plotly.py ran to request the version:
$ {executable} --version
""".format(executable=" ".join(executable_list))
)
else:
version_result = version_result.decode()
status._props["executable_list"] = executable_list
status._props["version"] = version_result.strip()
status._props["state"] = "validated"
def reset_status():
"""
Shutdown the running orca server, if any, and reset the orca status
to unvalidated.
This command is only needed if the desired orca executable is changed
during an interactive session.
Returns
-------
None
"""
shutdown_server()
status._props["executable_list"] = None
status._props["version"] = None
status._props["state"] = "unvalidated"
# Initialze process control variables
# -----------------------------------
orca_lock = threading.Lock()
orca_state = {"proc": None, "shutdown_timer": None}
# Shutdown
# --------
# The @atexit.register annotation ensures that the shutdown function is
# is run when the Python process is terminated
@atexit.register
def cleanup():
shutdown_server()
def shutdown_server():
"""
Shutdown the running orca server process, if any
Returns
-------
None
"""
# Use double-check locking to make sure the properties of orca_state
# are updated consistently across threads.
if orca_state["proc"] is not None:
with orca_lock:
if orca_state["proc"] is not None:
# We use psutil to kill all child processes of the main orca
# process. This prevents any zombie processes from being
# left over, and it saves us from needing to write
# OS-specific process management code here.
parent = psutil.Process(orca_state["proc"].pid)
for child in parent.children(recursive=True):
try:
child.terminate()
except Exception:
# We tried, move on
pass
try:
# Kill parent process
orca_state["proc"].terminate()
# Wait for the process to shutdown
orca_state["proc"].wait()
except Exception:
# We tried, move on
pass
# Update our internal process management state
orca_state["proc"] = None
if orca_state["shutdown_timer"] is not None:
orca_state["shutdown_timer"].cancel()
orca_state["shutdown_timer"] = None
orca_state["port"] = None
# Update orca.status so the user has an accurate view
# of the state of the orca server
status._props["state"] = "validated"
status._props["pid"] = None
status._props["port"] = None
status._props["command"] = None
# Launch or get server
def ensure_server():
"""
Start an orca server if none is running. If a server is already running,
then reset the timeout countdown
Returns
-------
None
"""
# Validate psutil
if psutil is None:
raise ValueError(
"""\
Image generation requires the psutil package.
Install using pip:
$ pip install psutil
Install using conda:
$ conda install psutil
"""
)
# Validate requests
if not get_module("requests"):
raise ValueError(
"""\
Image generation requires the requests package.
Install using pip:
$ pip install requests
Install using conda:
$ conda install requests
"""
)
if not config.server_url:
# Validate orca executable only if server_url is not provided
if status.state == "unvalidated":
validate_executable()
# Acquire lock to make sure that we keep the properties of orca_state
# consistent across threads
with orca_lock:
# Cancel the current shutdown timer, if any
if orca_state["shutdown_timer"] is not None:
orca_state["shutdown_timer"].cancel()
# Start a new server process if none is active
if orca_state["proc"] is None:
# Determine server port
if config.port is None:
orca_state["port"] = find_open_port()
else:
orca_state["port"] = config.port
# Build orca command list
cmd_list = status._props["executable_list"] + [
"serve",
"-p",
str(orca_state["port"]),
"--plotly",
config.plotlyjs,
"--graph-only",
]
if config.topojson:
cmd_list.extend(["--topojson", config.topojson])
if config.mathjax:
cmd_list.extend(["--mathjax", config.mathjax])
if config.mapbox_access_token:
cmd_list.extend(
["--mapbox-access-token", config.mapbox_access_token]
)
# Create subprocess that launches the orca server on the
# specified port.
DEVNULL = open(os.devnull, "wb")
with orca_env():
stderr = DEVNULL if "CI" in os.environ else None # fix for CI
orca_state["proc"] = subprocess.Popen(
cmd_list, stdout=DEVNULL, stderr=stderr
)
# Update orca.status so the user has an accurate view
# of the state of the orca server
status._props["state"] = "running"
status._props["pid"] = orca_state["proc"].pid
status._props["port"] = orca_state["port"]
status._props["command"] = cmd_list
# Create new shutdown timer if a timeout was specified
if config.timeout is not None:
t = threading.Timer(config.timeout, shutdown_server)
# Make it a daemon thread so that exit won't wait for timer to
# complete
t.daemon = True
t.start()
orca_state["shutdown_timer"] = t
@retry(min_wait=5, max_wait=10, max_delay=60000)
def request_image_with_retrying(**kwargs):
"""
Helper method to perform an image request to a running orca server process
with retrying logic.
"""
from requests import post
from plotly.io.json import to_json_plotly
if config.server_url:
server_url = config.server_url
else:
server_url = "http://{hostname}:{port}".format(
hostname="localhost", port=orca_state["port"]
)
request_params = {k: v for k, v in kwargs.items() if v is not None}
json_str = to_json_plotly(request_params)
response = post(server_url + "/", data=json_str)
if response.status_code == 522:
# On "522: client socket timeout", return server and keep trying
shutdown_server()
ensure_server()
raise OSError("522: client socket timeout")
return response
def to_image(fig, format=None, width=None, height=None, scale=None, validate=True):
"""
Convert a figure to a static image bytes string
Parameters
----------
fig:
Figure object or dict representing a figure
format: str or None
The desired image format. One of
- 'png'
- 'jpg' or 'jpeg'
- 'webp'
- 'svg'
- 'pdf'
- 'eps' (Requires the poppler library to be installed)
If not specified, will default to `plotly.io.config.default_format`
width: int or None
The width of the exported image in layout pixels. If the `scale`
property is 1.0, this will also be the width of the exported image
in physical pixels.
If not specified, will default to `plotly.io.config.default_width`
height: int or None
The height of the exported image in layout pixels. If the `scale`
property is 1.0, this will also be the height of the exported image
in physical pixels.
If not specified, will default to `plotly.io.config.default_height`
scale: int or float or None
The scale factor to use when exporting the figure. A scale factor
larger than 1.0 will increase the image resolution with respect
to the figure's layout pixel dimensions. Whereas as scale factor of
less than 1.0 will decrease the image resolution.
If not specified, will default to `plotly.io.config.default_scale`
validate: bool
True if the figure should be validated before being converted to
an image, False otherwise.
Returns
-------
bytes
The image data
"""
# Make sure orca sever is running
# -------------------------------
ensure_server()
# Handle defaults
# ---------------
# Apply configuration defaults to unspecified arguments
if format is None:
format = config.default_format
format = validate_coerce_format(format)
if scale is None:
scale = config.default_scale
if width is None:
width = config.default_width
if height is None:
height = config.default_height
# Validate figure
# ---------------
fig_dict = validate_coerce_fig_to_dict(fig, validate)
# Request image from server
# -------------------------
try:
response = request_image_with_retrying(
figure=fig_dict, format=format, scale=scale, width=width, height=height
)
except OSError:
# Get current status string
status_str = repr(status)
if config.server_url:
raise ValueError(
"""
Plotly.py was unable to communicate with the orca server at {server_url}
Please check that the server is running and accessible.
""".format(server_url=config.server_url)
)
else:
# Check if the orca server process exists
pid_exists = psutil.pid_exists(status.pid)
# Raise error message based on whether the server process existed
if pid_exists:
raise ValueError(
"""
For some reason plotly.py was unable to communicate with the
local orca server process, even though the server process seems to be running.
Please review the process and connection information below:
{info}
""".format(info=status_str)
)
else:
# Reset the status so that if the user tries again, we'll try to
# start the server again
reset_status()
raise ValueError(
"""
For some reason the orca server process is no longer running.
Please review the process and connection information below:
{info}
plotly.py will attempt to start the local server process again the next time
an image export operation is performed.
""".format(info=status_str)
)
# Check response
# --------------
if response.status_code == 200:
# All good
return response.content
else:
# ### Something went wrong ###
err_message = """
The image request was rejected by the orca conversion utility
with the following error:
{status}: {msg}
""".format(status=response.status_code, msg=response.content.decode("utf-8"))
# ### Try to be helpful ###
# Status codes from /src/component/plotly-graph/constants.js in the
# orca code base.
# statusMsg: {
# 400: 'invalid or malformed request syntax',
# 522: client socket timeout
# 525: 'plotly.js error',
# 526: 'plotly.js version 1.11.0 or up required',
# 530: 'image conversion error'
# }
if response.status_code == 400 and isinstance(fig, dict) and not validate:
err_message += """
Try setting the `validate` argument to True to check for errors in the
figure specification"""
elif response.status_code == 525:
any_mapbox = any(
[
trace.get("type", None) == "scattermapbox"
for trace in fig_dict.get("data", [])
]
)
if any_mapbox and config.mapbox_access_token is None:
err_message += """
Exporting scattermapbox traces requires a mapbox access token.
Create a token in your mapbox account and then set it using:
>>> plotly.io.orca.config.mapbox_access_token = 'pk.abc...'
If you would like this token to be applied automatically in
future sessions, then save your orca configuration as follows:
>>> plotly.io.orca.config.save()
"""
elif response.status_code == 530 and format == "eps":
err_message += """
Exporting to EPS format requires the poppler library. You can install
poppler on MacOS or Linux with:
$ conda install poppler
Or, you can install it on MacOS using homebrew with:
$ brew install poppler
Or, you can install it on Linux using your distribution's package manager to
install the 'poppler-utils' package.
Unfortunately, we don't yet know of an easy way to install poppler on Windows.
"""
raise ValueError(err_message)
def write_image(
fig, file, format=None, scale=None, width=None, height=None, validate=True
):
"""
Convert a figure to a static image and write it to a file or writeable
object
Parameters
----------
fig:
Figure object or dict representing a figure
file: str or writeable
A string representing a local file path or a writeable object
(e.g. a pathlib.Path object or an open file descriptor)
format: str or None
The desired image format. One of
- 'png'
- 'jpg' or 'jpeg'
- 'webp'
- 'svg'
- 'pdf'
- 'eps' (Requires the poppler library to be installed)
If not specified and `file` is a string then this will default to the
file extension. If not specified and `file` is not a string then this
will default to `plotly.io.config.default_format`
width: int or None
The width of the exported image in layout pixels. If the `scale`
property is 1.0, this will also be the width of the exported image
in physical pixels.
If not specified, will default to `plotly.io.config.default_width`
height: int or None
The height of the exported image in layout pixels. If the `scale`
property is 1.0, this will also be the height of the exported image
in physical pixels.
If not specified, will default to `plotly.io.config.default_height`
scale: int or float or None
The scale factor to use when exporting the figure. A scale factor
larger than 1.0 will increase the image resolution with respect
to the figure's layout pixel dimensions. Whereas as scale factor of
less than 1.0 will decrease the image resolution.
If not specified, will default to `plotly.io.config.default_scale`
validate: bool
True if the figure should be validated before being converted to
an image, False otherwise.
Returns
-------
None
"""
# Try to cast `file` as a pathlib object `path`.
# ----------------------------------------------
if isinstance(file, str):
# Use the standard Path constructor to make a pathlib object.
path = Path(file)
elif isinstance(file, Path):
# `file` is already a Path object.
path = file
else:
# We could not make a Path object out of file. Either `file` is an open file
# descriptor with a `write()` method or it's an invalid object.
path = None
# Infer format if not specified
# -----------------------------
if path is not None and format is None:
ext = path.suffix
if ext:
format = ext.lstrip(".")
else:
raise ValueError(
"""
Cannot infer image type from output path '{file}'.
Please add a file extension or specify the type using the format parameter.
For example:
>>> import plotly.io as pio
>>> pio.write_image(fig, file_path, format='png')
""".format(file=file)
)
# Request image
# -------------
# Do this first so we don't create a file if image conversion fails
img_data = to_image(
fig, format=format, scale=scale, width=width, height=height, validate=validate
)
# Open file
# ---------
if path is None:
# We previously failed to make sense of `file` as a pathlib object.
# Attempt to write to `file` as an open file descriptor.
try:
file.write(img_data)
return
except AttributeError:
pass
raise ValueError(
"""
The 'file' argument '{file}' is not a string, pathlib.Path object, or file descriptor.
""".format(file=file)
)
else:
# We previously succeeded in interpreting `file` as a pathlib object.
# Now we can use `write_bytes()`.
path.write_bytes(img_data)
| OrcaStatus |
python | redis__redis-py | redis/multidb/client.py | {
"start": 949,
"end": 11254
} | class ____(RedisModuleCommands, CoreCommands):
"""
Client that operates on multiple logical Redis databases.
Should be used in Active-Active database setups.
"""
def __init__(self, config: MultiDbConfig):
self._databases = config.databases()
self._health_checks = (
config.default_health_checks()
if not config.health_checks
else config.health_checks
)
self._health_check_interval = config.health_check_interval
self._health_check_policy: HealthCheckPolicy = config.health_check_policy.value(
config.health_check_probes, config.health_check_probes_delay
)
self._failure_detectors = (
config.default_failure_detectors()
if not config.failure_detectors
else config.failure_detectors
)
self._failover_strategy = (
config.default_failover_strategy()
if config.failover_strategy is None
else config.failover_strategy
)
self._failover_strategy.set_databases(self._databases)
self._auto_fallback_interval = config.auto_fallback_interval
self._event_dispatcher = config.event_dispatcher
self._command_retry = config.command_retry
self._command_retry.update_supported_errors((ConnectionRefusedError,))
self.command_executor = DefaultCommandExecutor(
failure_detectors=self._failure_detectors,
databases=self._databases,
command_retry=self._command_retry,
failover_strategy=self._failover_strategy,
failover_attempts=config.failover_attempts,
failover_delay=config.failover_delay,
event_dispatcher=self._event_dispatcher,
auto_fallback_interval=self._auto_fallback_interval,
)
self.initialized = False
self._hc_lock = threading.RLock()
self._bg_scheduler = BackgroundScheduler()
self._config = config
def initialize(self):
"""
Perform initialization of databases to define their initial state.
"""
def raise_exception_on_failed_hc(error):
raise error
# Initial databases check to define initial state
self._check_databases_health(on_error=raise_exception_on_failed_hc)
# Starts recurring health checks on the background.
self._bg_scheduler.run_recurring(
self._health_check_interval,
self._check_databases_health,
)
is_active_db_found = False
for database, weight in self._databases:
# Set on state changed callback for each circuit.
database.circuit.on_state_changed(self._on_circuit_state_change_callback)
# Set states according to a weights and circuit state
if database.circuit.state == CBState.CLOSED and not is_active_db_found:
self.command_executor.active_database = database
is_active_db_found = True
if not is_active_db_found:
raise NoValidDatabaseException(
"Initial connection failed - no active database found"
)
self.initialized = True
def get_databases(self) -> Databases:
"""
Returns a sorted (by weight) list of all databases.
"""
return self._databases
def set_active_database(self, database: SyncDatabase) -> None:
"""
Promote one of the existing databases to become an active.
"""
exists = None
for existing_db, _ in self._databases:
if existing_db == database:
exists = True
break
if not exists:
raise ValueError("Given database is not a member of database list")
self._check_db_health(database)
if database.circuit.state == CBState.CLOSED:
highest_weighted_db, _ = self._databases.get_top_n(1)[0]
self.command_executor.active_database = database
return
raise NoValidDatabaseException(
"Cannot set active database, database is unhealthy"
)
def add_database(self, database: SyncDatabase):
"""
Adds a new database to the database list.
"""
for existing_db, _ in self._databases:
if existing_db == database:
raise ValueError("Given database already exists")
self._check_db_health(database)
highest_weighted_db, highest_weight = self._databases.get_top_n(1)[0]
self._databases.add(database, database.weight)
self._change_active_database(database, highest_weighted_db)
def _change_active_database(
self, new_database: SyncDatabase, highest_weight_database: SyncDatabase
):
if (
new_database.weight > highest_weight_database.weight
and new_database.circuit.state == CBState.CLOSED
):
self.command_executor.active_database = new_database
def remove_database(self, database: Database):
"""
Removes a database from the database list.
"""
weight = self._databases.remove(database)
highest_weighted_db, highest_weight = self._databases.get_top_n(1)[0]
if (
highest_weight <= weight
and highest_weighted_db.circuit.state == CBState.CLOSED
):
self.command_executor.active_database = highest_weighted_db
def update_database_weight(self, database: SyncDatabase, weight: float):
"""
Updates a database from the database list.
"""
exists = None
for existing_db, _ in self._databases:
if existing_db == database:
exists = True
break
if not exists:
raise ValueError("Given database is not a member of database list")
highest_weighted_db, highest_weight = self._databases.get_top_n(1)[0]
self._databases.update_weight(database, weight)
database.weight = weight
self._change_active_database(database, highest_weighted_db)
def add_failure_detector(self, failure_detector: FailureDetector):
"""
Adds a new failure detector to the database.
"""
self._failure_detectors.append(failure_detector)
def add_health_check(self, healthcheck: HealthCheck):
"""
Adds a new health check to the database.
"""
with self._hc_lock:
self._health_checks.append(healthcheck)
def execute_command(self, *args, **options):
"""
Executes a single command and return its result.
"""
if not self.initialized:
self.initialize()
return self.command_executor.execute_command(*args, **options)
def pipeline(self):
"""
Enters into pipeline mode of the client.
"""
return Pipeline(self)
def transaction(self, func: Callable[["Pipeline"], None], *watches, **options):
"""
Executes callable as transaction.
"""
if not self.initialized:
self.initialize()
return self.command_executor.execute_transaction(func, *watches, *options)
def pubsub(self, **kwargs):
"""
Return a Publish/Subscribe object. With this object, you can
subscribe to channels and listen for messages that get published to
them.
"""
if not self.initialized:
self.initialize()
return PubSub(self, **kwargs)
def _check_db_health(self, database: SyncDatabase) -> bool:
"""
Runs health checks on the given database until first failure.
"""
# Health check will setup circuit state
is_healthy = self._health_check_policy.execute(self._health_checks, database)
if not is_healthy:
if database.circuit.state != CBState.OPEN:
database.circuit.state = CBState.OPEN
return is_healthy
elif is_healthy and database.circuit.state != CBState.CLOSED:
database.circuit.state = CBState.CLOSED
return is_healthy
def _check_databases_health(self, on_error: Callable[[Exception], None] = None):
"""
Runs health checks as a recurring task.
Runs health checks against all databases.
"""
with ThreadPoolExecutor(max_workers=len(self._databases)) as executor:
# Submit all health checks
futures = {
executor.submit(self._check_db_health, database)
for database, _ in self._databases
}
try:
for future in as_completed(
futures, timeout=self._health_check_interval
):
try:
future.result()
except UnhealthyDatabaseException as e:
unhealthy_db = e.database
unhealthy_db.circuit.state = CBState.OPEN
logger.exception(
"Health check failed, due to exception",
exc_info=e.original_exception,
)
if on_error:
on_error(e.original_exception)
except TimeoutError:
raise TimeoutError(
"Health check execution exceeds health_check_interval"
)
def _on_circuit_state_change_callback(
self, circuit: CircuitBreaker, old_state: CBState, new_state: CBState
):
if new_state == CBState.HALF_OPEN:
self._check_db_health(circuit.database)
return
if old_state == CBState.CLOSED and new_state == CBState.OPEN:
self._bg_scheduler.run_once(
DEFAULT_GRACE_PERIOD, _half_open_circuit, circuit
)
def close(self):
"""
Closes the client and all its resources.
"""
if self._bg_scheduler:
self._bg_scheduler.stop()
if self.command_executor.active_database:
self.command_executor.active_database.client.close()
def _half_open_circuit(circuit: CircuitBreaker):
circuit.state = CBState.HALF_OPEN
| MultiDBClient |
python | ray-project__ray | python/ray/dashboard/utils.py | {
"start": 1408,
"end": 2364
} | class ____(abc.ABC):
def __init__(self, dashboard_agent):
"""
Initialize current module when DashboardAgent loading modules.
:param dashboard_agent: The DashboardAgent instance.
"""
self._dashboard_agent = dashboard_agent
self.session_name = dashboard_agent.session_name
@abc.abstractmethod
async def run(self, server):
"""
Run the module in an asyncio loop. An agent module can provide
servicers to the server.
:param server: Asyncio GRPC server, or None if ray is minimal.
"""
@staticmethod
@abc.abstractclassmethod
def is_minimal_module():
"""
Return True if the module is minimal, meaning it
should work with `pip install ray` that doesn't requires additional
dependencies.
"""
@property
def gcs_address(self):
return self._dashboard_agent.gcs_address
@dataclass
| DashboardAgentModule |
python | pypa__pipenv | pipenv/patched/pip/_internal/exceptions.py | {
"start": 11057,
"end": 11363
} | class ____(InstallationError):
"""Metadata is invalid."""
def __init__(self, ireq: "InstallRequirement", error: str) -> None:
self.ireq = ireq
self.error = error
def __str__(self) -> str:
return f"Requested {self.ireq} has invalid metadata: {self.error}"
| MetadataInvalid |
python | numpy__numpy | numpy/_core/tests/test_conversion_utils.py | {
"start": 5084,
"end": 5696
} | class ____(StringConverterTestCase):
""" Tests of PyArray_CastingConverter """
conv = mt.run_casting_converter
case_insensitive = False
exact_match = True
def test_valid(self):
self._check("no", "NPY_NO_CASTING")
self._check("equiv", "NPY_EQUIV_CASTING")
self._check("safe", "NPY_SAFE_CASTING")
self._check("unsafe", "NPY_UNSAFE_CASTING")
self._check("same_kind", "NPY_SAME_KIND_CASTING")
def test_invalid(self):
# Currently, 'same_value' is supported only in ndarray.astype
self._check_value_error("same_value")
| TestCastingConverter |
python | sympy__sympy | sympy/functions/special/polynomials.py | {
"start": 22351,
"end": 23541
} | class ____(DefinedFunction):
r"""
``chebyshev_root(n, k)`` returns the $k$th root (indexed from zero) of
the $n$th Chebyshev polynomial of the first kind; that is, if
$0 \le k < n$, ``chebyshevt(n, chebyshevt_root(n, k)) == 0``.
Examples
========
>>> from sympy import chebyshevt, chebyshevt_root
>>> chebyshevt_root(3, 2)
-sqrt(3)/2
>>> chebyshevt(3, chebyshevt_root(3, 2))
0
See Also
========
jacobi, gegenbauer,
chebyshevt, chebyshevu, chebyshevu_root,
legendre, assoc_legendre,
hermite, hermite_prob,
laguerre, assoc_laguerre,
sympy.polys.orthopolys.jacobi_poly
sympy.polys.orthopolys.gegenbauer_poly
sympy.polys.orthopolys.chebyshevt_poly
sympy.polys.orthopolys.chebyshevu_poly
sympy.polys.orthopolys.hermite_poly
sympy.polys.orthopolys.hermite_prob_poly
sympy.polys.orthopolys.legendre_poly
sympy.polys.orthopolys.laguerre_poly
"""
@classmethod
def eval(cls, n, k):
if not ((0 <= k) and (k < n)):
raise ValueError("must have 0 <= k < n, "
"got k = %s and n = %s" % (k, n))
return cos(S.Pi*(2*k + 1)/(2*n))
| chebyshevt_root |
python | kamyu104__LeetCode-Solutions | Python/design-circular-deque.py | {
"start": 29,
"end": 2374
} | class ____(object):
def __init__(self, k):
"""
Initialize your data structure here. Set the size of the deque to be k.
:type k: int
"""
self.__start = 0
self.__size = 0
self.__buffer = [0] * k
def insertFront(self, value):
"""
Adds an item at the front of Deque. Return true if the operation is successful.
:type value: int
:rtype: bool
"""
if self.isFull():
return False
self.__start = (self.__start-1) % len(self.__buffer)
self.__buffer[self.__start] = value
self.__size += 1
return True
def insertLast(self, value):
"""
Adds an item at the rear of Deque. Return true if the operation is successful.
:type value: int
:rtype: bool
"""
if self.isFull():
return False
self.__buffer[(self.__start+self.__size) % len(self.__buffer)] = value
self.__size += 1
return True
def deleteFront(self):
"""
Deletes an item from the front of Deque. Return true if the operation is successful.
:rtype: bool
"""
if self.isEmpty():
return False
self.__start = (self.__start+1) % len(self.__buffer)
self.__size -= 1
return True
def deleteLast(self):
"""
Deletes an item from the rear of Deque. Return true if the operation is successful.
:rtype: bool
"""
if self.isEmpty():
return False
self.__size -= 1
return True
def getFront(self):
"""
Get the front item from the deque.
:rtype: int
"""
return -1 if self.isEmpty() else self.__buffer[self.__start]
def getRear(self):
"""
Get the last item from the deque.
:rtype: int
"""
return -1 if self.isEmpty() else self.__buffer[(self.__start+self.__size-1) % len(self.__buffer)]
def isEmpty(self):
"""
Checks whether the circular deque is empty or not.
:rtype: bool
"""
return self.__size == 0
def isFull(self):
"""
Checks whether the circular deque is full or not.
:rtype: bool
"""
return self.__size == len(self.__buffer)
| MyCircularDeque |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/matchClass3.py | {
"start": 1059,
"end": 1135
} | class ____(Protocol):
__match_args__ = ("x",)
x: int
@dataclass
| ProtoE |
python | huggingface__transformers | src/transformers/models/ovis2/processing_ovis2.py | {
"start": 1145,
"end": 7833
} | class ____(ProcessorMixin):
r"""
Constructs a Ovis2 processor which wraps Ovis2 image processor and a Qwen2 tokenizer into a single processor.
[`Ovis2Processor`] offers all the functionalities of [`Ovis2VideoProcessor`], [`Ovis2ImageProcessor`] and [`Qwen2TokenizerFast`]. See the
[`~Ovis2Processor.__call__`] and [`~Ovis2Processor.decode`] for more information.
Args:
image_processor ([`Ovis2ImageProcessor`], *optional*):
The image processor is a required input.
tokenizer ([`Qwen2TokenizerFast`], *optional*):
The tokenizer is a required input.
chat_template (`str`, *optional*): A Jinja template which will be used to convert lists of messages
in a chat into a tokenizable string.
image_token (`str`, *optional*, defaults to `"<image>"`):
Special token used to denote image location.
image_seq_length (`int`, *optional*, defaults to 256):
The number of image tokens to be used for each image in the input.
"""
def __init__(
self,
image_processor=None,
tokenizer=None,
chat_template=None,
image_token="<image>",
image_seq_length=256,
**kwargs,
):
self.image_seq_length = image_seq_length
self.image_token = tokenizer.image_token if hasattr(tokenizer, "image_token") else image_token
self.image_token_id = (
tokenizer.image_token_id
if getattr(tokenizer, "image_token_id", None)
else tokenizer.convert_tokens_to_ids(self.image_token)
)
super().__init__(image_processor, tokenizer, chat_template=chat_template, **kwargs)
def __call__(
self,
images: Optional[ImageInput] = None,
text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]] = None,
**kwargs: Unpack[Ovis2ProcessorKwargs],
) -> BatchFeature:
"""
Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`
and `kwargs` arguments to Qwen2TokenizerFast's [`~Qwen2TokenizerFast.__call__`] if `text` is not `None` to encode
the text. To prepare the image(s), this method forwards the `images` and `kwargs` arguments to
Ovis2ImageProcessor's [`~Ovis2ImageProcessor.__call__`] if `images` is not `None`. Please refer to the docstring
of the above two methods for more information.
Args:
images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`):
The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
tensor. Both channels-first and channels-last formats are supported.
text (`str`, `List[str]`, `List[List[str]]`):
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
Returns:
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
- **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
`return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
`None`).
- **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
- **image_sizes** -- Size of each image that will be used to unpad an image. Returned when `images` is not `None`.
"""
output_kwargs = self._merge_kwargs(
Ovis2ProcessorKwargs,
tokenizer_init_kwargs=self.tokenizer.init_kwargs,
**kwargs,
)
if isinstance(text, str):
text = [text]
elif not isinstance(text, list) and not isinstance(text[0], str):
raise TypeError("Invalid input text. Please provide a string, or a list of strings")
image_inputs = {}
if images is not None:
image_inputs = self.image_processor(images, **output_kwargs["images_kwargs"])
image_grids = image_inputs.pop("grids").tolist()
text = self._expand_image_tokens(text, image_grids)
text_inputs = self.tokenizer(text, **output_kwargs["text_kwargs"])
return BatchFeature(data={**text_inputs, **image_inputs})
def _expand_image_tokens(
self,
text: list[TextInput],
grids: list[list[int]],
):
processed_text = []
grid_index = 0
for sample in text:
while "<image>" in sample:
grid = grids[grid_index]
row, col = grid[0], grid[1]
placeholder = f"<IMG_START>{'<IMG_ATOM>' * self.image_seq_length}<IMG_GRID>"
if row * col > 1:
for r in range(row):
for c in range(col):
placeholder += f"{'<IMG_ATOM>' * self.image_seq_length}"
if c < col - 1:
placeholder += "<IMG_COL>"
if r < row - 1:
placeholder += "<IMG_ROW>"
placeholder += "<IMG_END>"
sample = sample.replace("<image>", placeholder, 1)
grid_index += 1
processed_text.append(sample)
return processed_text
def batch_decode(self, *args, **kwargs):
"""
This method forwards all its arguments to Qwen2TokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
refer to the docstring of this method for more information.
"""
return self.tokenizer.batch_decode(*args, **kwargs)
def decode(self, *args, **kwargs):
"""
This method forwards all its arguments to Qwen2TokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to
the docstring of this method for more information.
"""
return self.tokenizer.decode(*args, **kwargs)
@property
def model_input_names(self):
tokenizer_input_names = self.tokenizer.model_input_names
image_processor_input_names = self.image_processor.model_input_names
return list(tokenizer_input_names) + list(image_processor_input_names)
__all__ = ["Ovis2Processor"]
| Ovis2Processor |
python | huggingface__transformers | src/transformers/models/cohere2_vision/image_processing_cohere2_vision_fast.py | {
"start": 4936,
"end": 13351
} | class ____(BaseImageProcessorFast):
resample = PILImageResampling.BICUBIC
image_mean = OPENAI_CLIP_MEAN
image_std = OPENAI_CLIP_STD
size = {"height": 512, "width": 512}
do_resize = True
do_rescale = True
do_normalize = True
do_convert_rgb = True
crop_to_patches = True
min_patches = 1
max_patches = 12
valid_kwargs = Cohere2VisionFastImageProcessorKwargs
patch_size = 16
def __init__(self, **kwargs: Unpack[Cohere2VisionFastImageProcessorKwargs]):
super().__init__(**kwargs)
@auto_docstring
def preprocess(self, images: ImageInput, **kwargs: Unpack[Cohere2VisionFastImageProcessorKwargs]) -> BatchFeature:
return super().preprocess(images, **kwargs)
def crop_image_to_patches(
self,
images: "torch.Tensor",
min_patches: int,
max_patches: int,
use_thumbnail: bool = True,
patch_size: Optional[Union[tuple, int, dict]] = None,
interpolation: Optional["F.InterpolationMode"] = None,
):
"""
Crop the images to patches and return a list of cropped images.
The number of patches and their grid arrangement are determined by the original image size,
the target patch size and the minimum and maximum number of patches.
The aspect ratio of the patches grid is chosen to be the closest to the original image aspect ratio.
Args:
images (`torch.Tensor`):
The images to be cropped.
min_patches (`int`):
The minimum number of patches to be extracted from the image.
max_patches (`int`):
The maximum number of patches to be extracted from the image.
use_thumbnail (`bool`, *optional*, defaults to `True`):
Whether to add a thumbnail image to the list of cropped patches.
patch_size (`int`, `tuple[int, int]`, `dict`, *optional*):
The size of the output patches.
The format of the image data. If `None`, the format is inferred from the input image.
Returns:
list[`PIL.Image.Image`] or list[np.ndarray]: The list of cropped images.
"""
patch_size_height, patch_size_width = patch_size.height, patch_size.width
original_height, original_width = images.shape[-2:]
# find the closest aspect ratio to the target
num_columns, num_rows = get_optimal_tiled_canvas(
(original_height, original_width), (patch_size_height, patch_size_width), min_patches, max_patches
)
# calculate the target width and height
target_width = patch_size_width * num_columns
target_height = patch_size_height * num_rows
num_blocks = num_columns * num_rows
# resize the image so that each patch is of patch_size
resized_image = self.resize(
images, SizeDict(height=target_height, width=target_width), interpolation=interpolation
)
# split the image into patches
processed_images = []
for i in range(num_blocks):
column = i % num_columns
row = i // num_columns
box = (
column * patch_size_width,
row * patch_size_height,
(column + 1) * patch_size_width,
(row + 1) * patch_size_height,
)
# split the image
patch_image = resized_image[..., box[1] : box[3], box[0] : box[2]]
processed_images.append(patch_image)
if use_thumbnail and len(processed_images) != 1:
thumbnail_img = self.resize(images, patch_size, interpolation=interpolation)
processed_images.append(thumbnail_img)
processed_images = torch.stack(processed_images, dim=0).transpose(0, 1).contiguous()
return processed_images
def _preprocess(
self,
images: list["torch.Tensor"],
do_resize: bool,
size: SizeDict,
crop_to_patches: bool,
min_patches: int,
max_patches: int,
interpolation: Optional["F.InterpolationMode"],
do_center_crop: bool,
crop_size: SizeDict,
do_rescale: bool,
rescale_factor: float,
do_normalize: bool,
image_mean: Optional[Union[float, list[float]]],
image_std: Optional[Union[float, list[float]]],
disable_grouping: Optional[bool],
return_tensors: Optional[Union[str, TensorType]],
**kwargs,
) -> BatchFeature:
if crop_to_patches:
grouped_images, grouped_images_index = group_images_by_shape(images, disable_grouping=disable_grouping)
processed_images_grouped = {}
num_patches = {}
for shape, stacked_images in grouped_images.items():
stacked_images = self.crop_image_to_patches(
stacked_images,
min_patches,
max_patches,
patch_size=size,
interpolation=interpolation,
)
processed_images_grouped[shape] = stacked_images
num_patches[shape] = [stacked_images.shape[1]] * stacked_images.shape[0]
images = reorder_images(processed_images_grouped, grouped_images_index)
images = [image for images_list in images for image in images_list]
num_patches = reorder_images(num_patches, grouped_images_index)
else:
num_patches = [1] * len(images)
# Group images by size for batched resizing
grouped_images, grouped_images_index = group_images_by_shape(images, disable_grouping=disable_grouping)
resized_images_grouped = {}
for shape, stacked_images in grouped_images.items():
if do_resize:
stacked_images = self.resize(image=stacked_images, size=size, interpolation=interpolation)
resized_images_grouped[shape] = stacked_images
resized_images = reorder_images(resized_images_grouped, grouped_images_index)
# Group images by size for further processing
# Needed in case do_resize is False, or resize returns images with different sizes
grouped_images, grouped_images_index = group_images_by_shape(resized_images, disable_grouping=disable_grouping)
processed_images_grouped = {}
for shape, stacked_images in grouped_images.items():
if do_center_crop:
stacked_images = self.center_crop(stacked_images, crop_size)
# Fused rescale and normalize
stacked_images = self.rescale_and_normalize(
stacked_images, do_rescale, rescale_factor, do_normalize, image_mean, image_std
)
processed_images_grouped[shape] = stacked_images
processed_images = reorder_images(processed_images_grouped, grouped_images_index)
processed_images = torch.stack(processed_images, dim=0) if return_tensors else processed_images
return BatchFeature(
data={"pixel_values": processed_images, "num_patches": num_patches}, tensor_type=return_tensors
)
def get_number_of_image_patches(self, height: int, width: int, images_kwargs=None):
"""
A utility that returns number patches for a given image size.
Args:
height (`int`):
Height of the input image.
width (`int`):
Width of the input image.
images_kwargs (`dict`, *optional*)
Any kwargs to override defaults of the image processor.
Returns:
`int`: Number of patches per image.
"""
min_patches = images_kwargs.get("min_patches", self.min_patches)
max_patches = images_kwargs.get("max_patches", self.max_patches)
patch_size = images_kwargs.get("patch_size", self.size)
crop_to_patches = images_kwargs.get("crop_to_patches", self.crop_to_patches)
num_patches = 1
if crop_to_patches and max_patches > 1:
num_columns, num_rows = get_optimal_tiled_canvas(
(height, width), (patch_size["height"], patch_size["width"]), min_patches, max_patches
)
if num_columns * num_rows > 1:
num_patches += num_columns * num_rows
return num_patches
__all__ = ["Cohere2VisionImageProcessorFast"]
| Cohere2VisionImageProcessorFast |
python | apache__airflow | providers/fab/tests/unit/fab/auth_manager/api_endpoints/test_role_and_permission_endpoint.py | {
"start": 7094,
"end": 8375
} | class ____(TestRoleEndpoint):
def test_should_response_200(self):
response = self.client.get("/fab/v1/permissions", environ_overrides={"REMOTE_USER": "test"})
actions = {i[0] for i in self.app.appbuilder.sm.get_all_permissions() if i}
assert response.status_code == 200
assert response.json["total_entries"] == len(actions)
returned_actions = {perm["name"] for perm in response.json["actions"]}
assert actions == returned_actions
def test_should_raises_401_unauthenticated(self):
response = self.client.get("/fab/v1/permissions")
assert_401(response)
def test_should_raise_403_forbidden(self):
response = self.client.get(
"/fab/v1/permissions", environ_overrides={"REMOTE_USER": "test_no_permissions"}
)
assert response.status_code == 403
@pytest.mark.parametrize(
("set_auth_role_public", "expected_status_code"),
(("Public", 403), ("Admin", 200)),
indirect=["set_auth_role_public"],
)
def test_with_auth_role_public_set(self, set_auth_role_public, expected_status_code):
response = self.client.get("/fab/v1/permissions")
assert response.status_code == expected_status_code, response.json
| TestGetPermissionsEndpoint |
python | huggingface__transformers | tests/models/wav2vec2/test_tokenization_wav2vec2.py | {
"start": 1567,
"end": 14834
} | class ____(unittest.TestCase):
tokenizer_class = Wav2Vec2Tokenizer
@classmethod
def setUpClass(cls):
super().setUpClass()
vocab = "<pad> <s> </s> <unk> | E T A O N I H S R D L U M W C F G Y P B V K ' X J Q Z".split(" ")
vocab_tokens = dict(zip(vocab, range(len(vocab))))
cls.special_tokens_map = {"pad_token": "<pad>", "unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>"}
cls.tmpdirname = tempfile.mkdtemp()
cls.vocab_file = os.path.join(cls.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
with open(cls.vocab_file, "w", encoding="utf-8") as fp:
fp.write(json.dumps(vocab_tokens) + "\n")
@classmethod
def get_tokenizer(cls, pretrained_name=None, **kwargs):
kwargs.update(cls.special_tokens_map)
pretrained_name = pretrained_name or cls.tmpdirname
return Wav2Vec2Tokenizer.from_pretrained(pretrained_name, **kwargs)
def test_tokenizer_decode(self):
# TODO(PVP) - change to facebook
tokenizer = Wav2Vec2Tokenizer.from_pretrained("facebook/wav2vec2-base-960h")
sample_ids = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, tokenizer.word_delimiter_token_id, 24, 22, 5, 77],
]
tokens = tokenizer.decode(sample_ids[0])
batch_tokens = tokenizer.batch_decode(sample_ids)
self.assertEqual(tokens, batch_tokens[0])
self.assertEqual(batch_tokens, ["HELLO<unk>", "BYE BYE<unk>"])
def test_tokenizer_decode_special(self):
# TODO(PVP) - change to facebook
tokenizer = Wav2Vec2Tokenizer.from_pretrained("facebook/wav2vec2-base-960h")
sample_ids = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, tokenizer.word_delimiter_token_id, 24, 22, 5, 77],
]
sample_ids_2 = [
[11, 5, 5, 5, 5, 5, 15, 15, 15, tokenizer.pad_token_id, 15, 8, 98],
[
24,
22,
5,
tokenizer.pad_token_id,
tokenizer.pad_token_id,
tokenizer.pad_token_id,
tokenizer.word_delimiter_token_id,
24,
22,
5,
77,
tokenizer.word_delimiter_token_id,
],
]
batch_tokens = tokenizer.batch_decode(sample_ids)
batch_tokens_2 = tokenizer.batch_decode(sample_ids_2)
self.assertEqual(batch_tokens, batch_tokens_2)
self.assertEqual(batch_tokens, ["HELLO<unk>", "BYE BYE<unk>"])
def test_tokenizer_decode_added_tokens(self):
tokenizer = Wav2Vec2Tokenizer.from_pretrained("facebook/wav2vec2-base-960h")
tokenizer.add_tokens(["!", "?"])
tokenizer.add_special_tokens({"cls_token": "$$$"})
sample_ids = [
[
11,
5,
15,
tokenizer.pad_token_id,
15,
8,
98,
32,
32,
33,
tokenizer.word_delimiter_token_id,
32,
32,
33,
34,
34,
],
[24, 22, 5, tokenizer.word_delimiter_token_id, 24, 22, 5, 77, tokenizer.pad_token_id, 34, 34],
]
batch_tokens = tokenizer.batch_decode(sample_ids)
batch_tokens_2 = tokenizer.batch_decode(sample_ids, skip_special_tokens=True)
self.assertEqual(batch_tokens, ["HELLO<unk>!? !?$$$", "BYE BYE<unk>$$$"])
self.assertEqual(batch_tokens_2, ["HELO!? !?", "BYE BYE"])
def test_call(self):
# Tests that all call wrap to encode_plus and batch_encode_plus
tokenizer = self.get_tokenizer()
# create three inputs of length 800, 1000, and 1200
speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)]
np_speech_inputs = [np.asarray(speech_input) for speech_input in speech_inputs]
# Test not batched input
encoded_sequences_1 = tokenizer(speech_inputs[0], return_tensors="np").input_values
encoded_sequences_2 = tokenizer(np_speech_inputs[0], return_tensors="np").input_values
self.assertTrue(np.allclose(encoded_sequences_1, encoded_sequences_2, atol=1e-3))
# Test batched
encoded_sequences_1 = tokenizer(speech_inputs, return_tensors="np").input_values
encoded_sequences_2 = tokenizer(np_speech_inputs, return_tensors="np").input_values
for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2):
self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3))
# Test 2-D numpy arrays are batched.
speech_inputs = [floats_list((1, x))[0] for x in (800, 800, 800)]
np_speech_inputs = np.asarray(speech_inputs)
encoded_sequences_1 = tokenizer(speech_inputs, return_tensors="np").input_values
encoded_sequences_2 = tokenizer(np_speech_inputs, return_tensors="np").input_values
for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2):
self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3))
def test_padding(self, max_length=50):
def _input_values_have_equal_length(input_values):
length = len(input_values[0])
for input_values_slice in input_values[1:]:
if len(input_values_slice) != length:
return False
return True
def _input_values_are_equal(input_values_1, input_values_2):
if len(input_values_1) != len(input_values_2):
return False
for input_values_slice_1, input_values_slice_2 in zip(input_values_1, input_values_2):
if not np.allclose(np.asarray(input_values_slice_1), np.asarray(input_values_slice_2), atol=1e-3):
return False
return True
tokenizer = self.get_tokenizer()
speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)]
input_values_1 = tokenizer(speech_inputs).input_values
input_values_2 = tokenizer(speech_inputs, padding="longest").input_values
input_values_3 = tokenizer(speech_inputs, padding="longest", max_length=1600).input_values
self.assertFalse(_input_values_have_equal_length(input_values_1))
self.assertTrue(_input_values_have_equal_length(input_values_2))
self.assertTrue(_input_values_have_equal_length(input_values_3))
self.assertTrue(_input_values_are_equal(input_values_2, input_values_3))
self.assertTrue(len(input_values_1[0]) == 800)
self.assertTrue(len(input_values_2[0]) == 1200)
# padding should be 0.0
self.assertTrue(abs(sum(np.asarray(input_values_2[0])[800:])) < 1e-3)
self.assertTrue(abs(sum(np.asarray(input_values_2[1])[1000:])) < 1e-3)
input_values_4 = tokenizer(speech_inputs, padding="max_length").input_values
input_values_5 = tokenizer(speech_inputs, padding="max_length", max_length=1600).input_values
self.assertTrue(_input_values_are_equal(input_values_1, input_values_4))
self.assertEqual(input_values_5.shape, (3, 1600))
# padding should be 0.0
self.assertTrue(abs(sum(np.asarray(input_values_5[0])[800:1200])) < 1e-3)
input_values_6 = tokenizer(speech_inputs, pad_to_multiple_of=500).input_values
input_values_7 = tokenizer(speech_inputs, padding="longest", pad_to_multiple_of=500).input_values
input_values_8 = tokenizer(
speech_inputs, padding="max_length", pad_to_multiple_of=500, max_length=2400
).input_values
self.assertTrue(_input_values_are_equal(input_values_1, input_values_6))
self.assertEqual(input_values_7.shape, (3, 1500))
self.assertEqual(input_values_8.shape, (3, 2500))
# padding should be 0.0
self.assertTrue(abs(sum(np.asarray(input_values_7[0])[800:])) < 1e-3)
self.assertTrue(abs(sum(np.asarray(input_values_7[1])[1000:])) < 1e-3)
self.assertTrue(abs(sum(np.asarray(input_values_7[2])[1200:])) < 1e-3)
self.assertTrue(abs(sum(np.asarray(input_values_8[0])[800:])) < 1e-3)
self.assertTrue(abs(sum(np.asarray(input_values_8[1])[1000:])) < 1e-3)
self.assertTrue(abs(sum(np.asarray(input_values_8[2])[1200:])) < 1e-3)
def test_get_vocab(self):
tokenizer = self.get_tokenizer()
vocab_dict = tokenizer.get_vocab()
self.assertIsInstance(vocab_dict, dict)
self.assertGreaterEqual(len(tokenizer), len(vocab_dict))
vocab = [tokenizer.convert_ids_to_tokens(i) for i in range(len(tokenizer))]
self.assertEqual(len(vocab), len(tokenizer))
tokenizer.add_tokens(["asdfasdfasdfasdf"])
vocab = [tokenizer.convert_ids_to_tokens(i) for i in range(len(tokenizer))]
self.assertEqual(len(vocab), len(tokenizer))
def test_save_and_load_tokenizer(self):
tokenizer = self.get_tokenizer()
# Isolate this from the other tests because we save additional tokens/etc
tmpdirname = tempfile.mkdtemp()
sample_ids = [0, 1, 4, 8, 9, 0, 12]
before_tokens = tokenizer.decode(sample_ids)
before_vocab = tokenizer.get_vocab()
tokenizer.save_pretrained(tmpdirname)
after_tokenizer = tokenizer.__class__.from_pretrained(tmpdirname)
after_tokens = after_tokenizer.decode(sample_ids)
after_vocab = after_tokenizer.get_vocab()
self.assertEqual(before_tokens, after_tokens)
self.assertDictEqual(before_vocab, after_vocab)
shutil.rmtree(tmpdirname)
tokenizer = self.get_tokenizer()
# Isolate this from the other tests because we save additional tokens/etc
tmpdirname = tempfile.mkdtemp()
before_len = len(tokenizer)
sample_ids = [0, 1, 4, 8, 9, 0, 12, before_len, before_len + 1, before_len + 2]
tokenizer.add_tokens(["?", "!"])
extra_special_tokens = tokenizer.extra_special_tokens
extra_special_tokens.append("&")
tokenizer.add_special_tokens(
{"extra_special_tokens": extra_special_tokens}, replace_extra_special_tokens=False
)
before_tokens = tokenizer.decode(sample_ids)
before_vocab = tokenizer.get_vocab()
tokenizer.save_pretrained(tmpdirname)
after_tokenizer = tokenizer.__class__.from_pretrained(tmpdirname)
after_tokens = after_tokenizer.decode(sample_ids)
after_vocab = after_tokenizer.get_vocab()
self.assertEqual(before_tokens, after_tokens)
self.assertDictEqual(before_vocab, after_vocab)
self.assertTrue(len(tokenizer), before_len + 3)
self.assertTrue(len(tokenizer), len(after_tokenizer))
shutil.rmtree(tmpdirname)
def test_tokenizer_slow_store_full_signature(self):
signature = inspect.signature(self.tokenizer_class.__init__)
tokenizer = self.get_tokenizer()
for parameter_name, parameter in signature.parameters.items():
if parameter.default != inspect.Parameter.empty:
self.assertIn(parameter_name, tokenizer.init_kwargs)
def test_zero_mean_unit_variance_normalization(self):
tokenizer = self.get_tokenizer(do_normalize=True)
speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)]
processed = tokenizer(speech_inputs, padding="longest")
input_values = processed.input_values
def _check_zero_mean_unit_variance(input_vector):
self.assertTrue(np.abs(np.mean(input_vector)) < 1e-3)
self.assertTrue(np.abs(np.var(input_vector) - 1) < 1e-3)
_check_zero_mean_unit_variance(input_values[0, :800])
_check_zero_mean_unit_variance(input_values[1, :1000])
_check_zero_mean_unit_variance(input_values[2])
def test_return_attention_mask(self):
speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)]
# default case -> no attention_mask is returned
tokenizer = self.get_tokenizer()
processed = tokenizer(speech_inputs)
self.assertNotIn("attention_mask", processed)
# wav2vec2-lv60 -> return attention_mask
tokenizer = self.get_tokenizer(return_attention_mask=True)
processed = tokenizer(speech_inputs, padding="longest")
self.assertIn("attention_mask", processed)
self.assertListEqual(list(processed.attention_mask.shape), list(processed.input_values.shape))
self.assertListEqual(processed.attention_mask.sum(-1).tolist(), [800, 1000, 1200])
@slow
@require_torch
def test_pretrained_checkpoints_are_set_correctly(self):
# this test makes sure that models that are using
# group norm don't have their tokenizer return the
# attention_mask
model_id = "facebook/wav2vec2-base-960h"
config = Wav2Vec2Config.from_pretrained(model_id)
tokenizer = Wav2Vec2Tokenizer.from_pretrained(model_id)
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(tokenizer.return_attention_mask, config.feat_extract_norm == "layer")
| Wav2Vec2TokenizerTest |
python | cookiecutter__cookiecutter | cookiecutter/exceptions.py | {
"start": 643,
"end": 942
} | class ____(CookiecutterException):
"""
Exception for ambiguous project template directory.
Raised when Cookiecutter cannot determine which directory is the project
template, e.g. more than one dir appears to be a template dir.
"""
# unused locally
| UnknownTemplateDirException |
python | scipy__scipy | benchmarks/benchmarks/spatial.py | {
"start": 17047,
"end": 17515
} | class ____(Benchmark):
params = ([10, 100, 1000, 5000, 10000], [False, True])
param_names = ['num_points', 'furthest_site']
def setup(self, num_points, furthest_site):
rng = np.random.default_rng(123)
self.points = rng.random((num_points, 3))
def time_voronoi_calculation(self, num_points, furthest_site):
"""Time conventional Voronoi diagram calculation."""
Voronoi(self.points, furthest_site=furthest_site)
| VoronoiBench |
python | bottlepy__bottle | bottle.py | {
"start": 139428,
"end": 140154
} | class ____(ServerAdapter):
""" Untested. Options:
* See gevent.wsgi.WSGIServer() documentation for more options.
"""
def run(self, handler):
from gevent import pywsgi, local
if not isinstance(threading.local(), local.local):
msg = "Bottle requires gevent.monkey.patch_all() (before import)"
raise RuntimeError(msg)
if self.quiet:
self.options['log'] = None
address = (self.host, self.port)
server = pywsgi.WSGIServer(address, handler, **self.options)
if 'BOTTLE_CHILD' in os.environ:
import signal
signal.signal(signal.SIGINT, lambda s, f: server.stop())
server.serve_forever()
| GeventServer |
python | plotly__plotly.py | plotly/graph_objs/barpolar/_unselected.py | {
"start": 233,
"end": 3367
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "barpolar"
_path_str = "barpolar.unselected"
_valid_props = {"marker", "textfont"}
@property
def marker(self):
"""
The 'marker' property is an instance of Marker
that may be specified as:
- An instance of :class:`plotly.graph_objs.barpolar.unselected.Marker`
- A dict of string/value properties that will be passed
to the Marker constructor
Returns
-------
plotly.graph_objs.barpolar.unselected.Marker
"""
return self["marker"]
@marker.setter
def marker(self, val):
self["marker"] = val
@property
def textfont(self):
"""
The 'textfont' property is an instance of Textfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.barpolar.unselected.Textfont`
- A dict of string/value properties that will be passed
to the Textfont constructor
Returns
-------
plotly.graph_objs.barpolar.unselected.Textfont
"""
return self["textfont"]
@textfont.setter
def textfont(self, val):
self["textfont"] = val
@property
def _prop_descriptions(self):
return """\
marker
:class:`plotly.graph_objects.barpolar.unselected.Marker
` instance or dict with compatible properties
textfont
:class:`plotly.graph_objects.barpolar.unselected.Textfo
nt` instance or dict with compatible properties
"""
def __init__(self, arg=None, marker=None, textfont=None, **kwargs):
"""
Construct a new Unselected object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.barpolar.Unselected`
marker
:class:`plotly.graph_objects.barpolar.unselected.Marker
` instance or dict with compatible properties
textfont
:class:`plotly.graph_objects.barpolar.unselected.Textfo
nt` instance or dict with compatible properties
Returns
-------
Unselected
"""
super().__init__("unselected")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.barpolar.Unselected
constructor must be a dict or
an instance of :class:`plotly.graph_objs.barpolar.Unselected`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("marker", arg, marker)
self._set_property("textfont", arg, textfont)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Unselected |
python | astropy__astropy | astropy/units/core.py | {
"start": 60125,
"end": 66287
} | class ____(UnitBase):
"""
The base class of units that have a name.
Parameters
----------
st : str, list of str, 2-tuple
The name of the unit. If a list of strings, the first element
is the canonical (short) name, and the rest of the elements
are aliases. If a tuple of lists, the first element is a list
of short names, and the second element is a list of long
names; all but the first short name are considered "aliases".
Each name *should* be a valid Python identifier to make it
easy to access, but this is not required.
namespace : dict, optional
When provided, inject the unit, and all of its aliases, in the
given namespace dictionary. If a unit by the same name is
already in the namespace, a ValueError is raised.
doc : str, optional
A docstring describing the unit.
format : dict, optional
A mapping to format-specific representations of this unit.
For example, for the ``Ohm`` unit, it might be nice to have it
displayed as ``\\Omega`` by the ``latex`` formatter. In that
case, `format` argument should be set to::
{'latex': r'\\Omega'}
Raises
------
ValueError
If any of the given unit names are already in the registry.
ValueError
If any of the given unit names are not valid Python tokens.
"""
def __init__(
self,
st: str | list[str] | tuple[list[str], list[str]],
doc: str | None = None,
format: Mapping[str, str] | None = None,
namespace: MutableMapping[str, object] | None = None,
) -> None:
if isinstance(st, (bytes, str)):
self._names = [st]
self._short_names = [st]
self._long_names = []
elif isinstance(st, tuple):
if not len(st) == 2:
raise ValueError("st must be string, list or 2-tuple")
self._names = st[0] + [n for n in st[1] if n not in st[0]]
if not len(self._names):
raise ValueError("must provide at least one name")
self._short_names = st[0][:]
self._long_names = st[1][:]
else:
if len(st) == 0:
raise ValueError("st list must have at least one entry")
self._names = st[:]
self._short_names = [st[0]]
self._long_names = st[1:]
self._format = {} if format is None else format
self.__doc__ = (
self._generate_doc() if doc is None else textwrap.fill(textwrap.dedent(doc))
)
self._inject(namespace)
def _generate_doc(self) -> str:
"""
Generate a docstring for the unit if the user didn't supply
one. This is only used from the constructor and may be
overridden in subclasses.
"""
names = self.names
if len(self.names) > 1:
return f"{names[1]} ({names[0]})"
else:
return names[0]
@deprecated(since="7.0", alternative="to_string()")
def get_format_name(self, format):
"""
Get a name for this unit that is specific to a particular
format.
Uses the dictionary passed into the `format` kwarg in the
constructor.
Parameters
----------
format : str
The name of the format
Returns
-------
name : str
The name of the unit for the given format.
"""
return self._get_format_name(format)
def _get_format_name(self, format: str) -> str:
return self._format.get(format, self.name)
@property
def names(self) -> list[str]:
"""All the names associated with the unit."""
return self._names
@property
def name(self) -> str:
"""The canonical (short) name associated with the unit."""
return self._names[0]
@property
def aliases(self) -> list[str]:
"""The aliases (long) names for the unit."""
return self._names[1:]
@property
def short_names(self) -> list[str]:
"""All the short names associated with the unit."""
return self._short_names
@property
def long_names(self) -> list[str]:
"""All the long names associated with the unit."""
return self._long_names
def _inject(self, namespace: MutableMapping[str, object] | None = None) -> None:
"""
Injects the unit, and all of its aliases, in the given
namespace dictionary.
"""
if namespace is None:
return
# Loop through all of the names first, to ensure all of them
# are new, then add them all as a single "transaction" below.
for name in self._names:
nfkc_name = unicodedata.normalize("NFKC", name)
if nfkc_name in namespace and self != (obj := namespace[nfkc_name]):
msg = (
f"the namespace already uses the name {name!r} for {obj!r}"
if name == nfkc_name
else (
"the namespace already uses the NFKC normalized name "
f"{nfkc_name!r} for {obj!r}\n\nSee "
"https://docs.python.org/3/reference/lexical_analysis.html#identifiers "
"for more information."
)
)
raise ValueError(msg)
for name in self._names:
namespace[name] = self
def _recreate_irreducible_unit(cls, names, registered):
"""
This is used to reconstruct units when passed around by
multiprocessing.
"""
registry = get_current_unit_registry().registry
if names[0] in registry:
# If in local registry return that object.
return registry[names[0]]
else:
# otherwise, recreate the unit.
unit = cls(names)
if registered:
# If not in local registry but registered in origin registry,
# enable unit in local registry.
get_current_unit_registry().add_enabled_units([unit])
return unit
| NamedUnit |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/testing/suite/test_select.py | {
"start": 57176,
"end": 57754
} | class ____(AssertsCompiledSQL, fixtures.TablesTest):
__sparse_driver_backend__ = True
@testing.fails_if(testing.requires.supports_distinct_on)
def test_distinct_on(self):
with testing.expect_deprecated(
"Passing expression to ``distinct`` to generate "
):
stm = select("*").distinct(column("q")).select_from(table("foo"))
with testing.expect_deprecated(
"DISTINCT ON is currently supported only by the PostgreSQL "
):
self.assert_compile(stm, "SELECT DISTINCT * FROM foo")
| DistinctOnTest |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_vision.py | {
"start": 9490,
"end": 10284
} | class ____:
@mock.patch("airflow.providers.google.cloud.operators.vision.CloudVisionHook")
def test_minimal_green_path(self, mock_hook):
mock_hook.return_value.delete_product.return_value = {}
op = CloudVisionDeleteProductOperator(
location=LOCATION_TEST, product_id=PRODUCT_ID_TEST, task_id="id"
)
op.execute(context=None)
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=None,
)
mock_hook.return_value.delete_product.assert_called_once_with(
location=LOCATION_TEST,
product_id=PRODUCT_ID_TEST,
project_id=None,
retry=DEFAULT,
timeout=None,
metadata=(),
)
| TestCloudVisionProductDelete |
python | sqlalchemy__sqlalchemy | test/base/test_utils.py | {
"start": 13676,
"end": 14625
} | class ____(fixtures.TestBase):
@combinations(util.immutabledict({1: 2, 3: 4}), util.FacadeDict({2: 3}))
def test_immutable(self, d):
calls = (
lambda: d.__delitem__(1),
lambda: d.__setitem__(2, 3),
lambda: d.__setattr__(2, 3),
d.clear,
lambda: d.setdefault(1, 3),
lambda: d.update({2: 4}),
)
if hasattr(d, "pop"):
calls += (lambda: d.pop(2), d.popitem)
for m in calls:
with expect_raises_message(TypeError, "object is immutable"):
m()
def test_readonly_properties(self):
d = util.ReadOnlyProperties({3: 4})
calls = (
lambda: d.__delitem__(1),
lambda: d.__setitem__(2, 3),
lambda: d.__setattr__(2, 3),
)
for m in calls:
with expect_raises_message(TypeError, "object is immutable"):
m()
| ImmutableTest |
python | django__django | tests/admin_views/admin.py | {
"start": 24290,
"end": 24493
} | class ____(admin.ModelAdmin):
def change_view(self, *args, **kwargs):
kwargs["extra_context"] = {"show_delete": False}
return super().change_view(*args, **kwargs)
| UndeletableObjectAdmin |
python | huggingface__transformers | tests/models/edgetam/test_modeling_edgetam.py | {
"start": 1488,
"end": 2546
} | class ____:
def __init__(
self,
hidden_size=32,
input_image_size=128,
patch_size=16,
mask_input_channels=8,
num_point_embeddings=4,
hidden_act="gelu",
):
self.hidden_size = hidden_size
self.input_image_size = input_image_size
self.patch_size = patch_size
self.mask_input_channels = mask_input_channels
self.num_point_embeddings = num_point_embeddings
self.hidden_act = hidden_act
def get_config(self):
return EdgeTamPromptEncoderConfig(
image_size=self.input_image_size,
patch_size=self.patch_size,
mask_input_channels=self.mask_input_channels,
hidden_size=self.hidden_size,
num_point_embeddings=self.num_point_embeddings,
hidden_act=self.hidden_act,
)
def prepare_config_and_inputs(self):
dummy_points = floats_tensor([self.batch_size, 3, 2])
config = self.get_config()
return config, dummy_points
| EdgeTamPromptEncoderTester |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pylint/no_self_use.py | {
"start": 1055,
"end": 1097
} | class ____:
def foo(self):
...
| A |
python | sphinx-doc__sphinx | tests/roots/test-ext-autodoc/target/__init__.py | {
"start": 812,
"end": 2250
} | class ____:
"""Class to document."""
def meth(self):
"""Function."""
def undocmeth(self):
pass
def skipmeth(self):
"""Method that should be skipped."""
def excludemeth(self):
"""Method that should be excluded."""
# should not be documented
skipattr = 'foo'
#: should be documented -- süß
attr = 'bar'
docattr = 'baz'
"""should likewise be documented -- süß"""
udocattr = 'quux'
"""should be documented as well - süß"""
# initialized to any class imported from another module
mdocattr = StringIO()
"""should be documented as well - süß"""
roger = _funky_classmethod('roger', 2, 3, 4)
moore = _funky_classmethod(
'moore', 9, 8, 7, docstring='moore(a, e, f) -> happiness'
)
@staticmethod
def b_staticmeth():
pass
@staticmethod
def a_staticmeth():
pass
def __init__(self, arg):
self.inst_attr_inline = None #: an inline documented instance attr
#: a documented instance attribute
self.inst_attr_comment = None
self.inst_attr_string = None
"""a documented instance attribute"""
self._private_inst_attr = None #: a private instance attribute
def __special1__(self): # NoQA: PLW3201
"""documented special method"""
def __special2__(self): # NoQA: PLW3201
# undocumented special method
pass
| Class |
python | pyca__cryptography | src/cryptography/hazmat/primitives/asymmetric/padding.py | {
"start": 1520,
"end": 2087
} | class ____(AsymmetricPadding):
name = "EME-OAEP"
def __init__(
self,
mgf: MGF,
algorithm: hashes.HashAlgorithm,
label: bytes | None,
):
if not isinstance(algorithm, hashes.HashAlgorithm):
raise TypeError("Expected instance of hashes.HashAlgorithm.")
self._mgf = mgf
self._algorithm = algorithm
self._label = label
@property
def algorithm(self) -> hashes.HashAlgorithm:
return self._algorithm
@property
def mgf(self) -> MGF:
return self._mgf
| OAEP |
python | getsentry__sentry | src/sentry/sentry_apps/utils/errors.py | {
"start": 332,
"end": 1771
} | class ____(Exception):
error_type: SentryAppErrorType
status_code: int
def __init__(
self,
message: str,
status_code: int | None = None,
public_context: dict[str, Any] | None = None,
webhook_context: dict[str, Any] | None = None,
) -> None:
self.status_code = status_code or self.status_code
# Info that gets sent only to the integrator via webhook
self.public_context = public_context or {}
# Info that gets sent to the end user via endpoint Response AND sent to integrator
self.webhook_context = webhook_context or {}
self.message = message
def to_public_dict(self) -> SentryAppPublicErrorBody:
error_body: SentryAppPublicErrorBody = {"detail": self.message}
if public_context := self.public_context:
error_body.update({"context": public_context})
return error_body
def response_from_exception(self) -> Response:
response: dict[str, Any] = {"detail": self.message}
if public_context := self.public_context:
response.update({"context": public_context})
return Response(response, status=self.status_code)
def __repr__(self) -> str:
return f"{type(self).__name__}: message={self.message} status_code={self.status_code} error_type={self.error_type}"
# Represents a user/client error that occured during a Sentry App process
| SentryAppBaseError |
python | coleifer__peewee | tests/keys.py | {
"start": 14402,
"end": 15989
} | class ____(ModelTestCase):
requires = [FK_A, FK_B]
def test_fk_to_non_pk_field(self):
a1 = FK_A.create(key='a1')
a2 = FK_A.create(key='a2')
b1 = FK_B.create(fk_a=a1)
b2 = FK_B.create(fk_a=a2)
args = (b1.fk_a, b1.fk_a_id, a1, a1.key)
for arg in args:
query = FK_B.select().where(FK_B.fk_a == arg)
self.assertSQL(query, (
'SELECT "t1"."id", "t1"."fk_a_id" FROM "fk_b" AS "t1" '
'WHERE ("t1"."fk_a_id" = ?)'), ['a1'])
b1_db = query.get()
self.assertEqual(b1_db.id, b1.id)
def test_fk_to_non_pk_insert_update(self):
a1 = FK_A.create(key='a1')
b1 = FK_B.create(fk_a=a1)
self.assertEqual(FK_B.select().where(FK_B.fk_a == a1).count(), 1)
exprs = (
{FK_B.fk_a: a1},
{'fk_a': a1},
{FK_B.fk_a: a1.key},
{'fk_a': a1.key})
for n, expr in enumerate(exprs, 2):
self.assertTrue(FK_B.insert(expr).execute())
self.assertEqual(FK_B.select().where(FK_B.fk_a == a1).count(), n)
a2 = FK_A.create(key='a2')
exprs = (
{FK_B.fk_a: a2},
{'fk_a': a2},
{FK_B.fk_a: a2.key},
{'fk_a': a2.key})
b_list = list(FK_B.select().where(FK_B.fk_a == a1))
for i, (b, expr) in enumerate(zip(b_list[1:], exprs), 1):
self.assertTrue(FK_B.update(expr).where(FK_B.id == b.id).execute())
self.assertEqual(FK_B.select().where(FK_B.fk_a == a2).count(), i)
| TestFKtoNonPKField |
python | getsentry__sentry | src/sentry/search/events/builder/errors.py | {
"start": 4078,
"end": 5386
} | class ____(ErrorsQueryBuilderMixin, DiscoverQueryBuilder):
def get_snql_query(self) -> Request:
self.validate_having_clause()
return Request(
dataset=self.dataset.value,
app_id="errors",
query=Query(
match=self.match,
select=self.columns,
array_join=self.array_join,
where=self.where,
having=self.having,
groupby=self.groupby,
orderby=self.orderby,
limit=self.limit,
offset=self.offset,
limitby=self.limitby,
),
flags=Flags(turbo=self.turbo),
tenant_ids=self.tenant_ids,
)
def add_conditions(self, conditions: list[Condition]) -> None:
"""
Override the base implementation to add entity data
"""
entity_key = get_entity_key_from_query_builder(self)
time_col = ENTITY_TIME_COLUMNS[entity_key]
entity = Entity(entity_key.value, alias="events")
new_conditions = []
for condition in conditions:
column = Column(time_col, entity=entity)
new_conditions.append(Condition(column, condition.op, condition.rhs))
self.where += new_conditions
| ErrorsQueryBuilder |
python | kamyu104__LeetCode-Solutions | Python/maximum-number-of-points-from-grid-queries.py | {
"start": 110,
"end": 1248
} | class ____(object):
def maxPoints(self, grid, queries):
"""
:type grid: List[List[int]]
:type queries: List[int]
:rtype: List[int]
"""
directions = [(0, 1), (0, -1), (1, 0), (-1, 0)]
min_heap = [(grid[0][0], 0, 0)]
lookup = [[False]*len(grid[0]) for _ in xrange(len(grid))]
lookup[0][0] = True
mx = 0
cnt = collections.Counter()
while min_heap:
curr, i, j = heapq.heappop(min_heap)
mx = max(mx, curr)
cnt[mx] += 1
for di, dj in directions:
ni, nj = i+di, j+dj
if not (0 <= ni < len(grid) and
0 <= nj < len(grid[0]) and
not lookup[ni][nj]):
continue
lookup[ni][nj] = True
heapq.heappush(min_heap, (grid[ni][nj], ni, nj))
vals = sorted(cnt.iterkeys())
prefix = [0]*(len(vals)+1)
for i in xrange(len(vals)):
prefix[i+1] += prefix[i]+cnt[vals[i]]
return map(lambda x: prefix[bisect.bisect_left(vals, x)], queries)
| Solution |
python | scipy__scipy | scipy/stats/_continuous_distns.py | {
"start": 17862,
"end": 19076
} | class ____(FitError):
"""
Raised when a solver fails to converge while fitting a distribution.
"""
# This exception is raised by, for example, beta_gen.fit when
# optimize.fsolve returns with ier != 1.
def __init__(self, mesg):
emsg = "Solver for the MLE equations failed to converge: "
emsg += mesg.replace('\n', '')
self.args = (emsg,)
def _beta_mle_a(a, b, n, s1):
# The zeros of this function give the MLE for `a`, with
# `b`, `n` and `s1` given. `s1` is the sum of the logs of
# the data. `n` is the number of data points.
psiab = sc.psi(a + b)
func = s1 - n * (-psiab + sc.psi(a))
return func
def _beta_mle_ab(theta, n, s1, s2):
# Zeros of this function are critical points of
# the maximum likelihood function. Solving this system
# for theta (which contains a and b) gives the MLE for a and b
# given `n`, `s1` and `s2`. `s1` is the sum of the logs of the data,
# and `s2` is the sum of the logs of 1 - data. `n` is the number
# of data points.
a, b = theta
psiab = sc.psi(a + b)
func = [s1 - n * (-psiab + sc.psi(a)),
s2 - n * (-psiab + sc.psi(b))]
return func
| FitSolverError |
python | readthedocs__readthedocs.org | readthedocs/rtd_tests/tests/test_oauth_tasks.py | {
"start": 1370,
"end": 7849
} | class ____(TestCase):
def setUp(self):
self.user = get(User)
self.project = get(Project, users=[self.user])
self.version = get(Version, project=self.project)
self.socialaccount_gh = get(
SocialAccount,
user=self.user,
provider=GitHubOAuth2Adapter.provider_id,
)
self.socialaccount_ghapp = get(
SocialAccount,
user=self.user,
provider=GitHubAppProvider.id,
)
self.socialaccount_gl = get(
SocialAccount,
user=self.user,
provider=GitLabOAuth2Adapter.provider_id,
)
self.socialaccount_bb = get(
SocialAccount,
user=self.user,
provider=BitbucketOAuth2Adapter.provider_id,
)
@patch("readthedocs.oauth.services.githubapp.GitHubAppService.sync_user_access")
@patch("readthedocs.oauth.services.github.GitHubService.sync")
@patch("readthedocs.oauth.services.gitlab.GitLabService.sync")
@patch("readthedocs.oauth.services.bitbucket.BitbucketService.sync")
def test_sync_repository(self, sync_bb, sync_gl, sync_gh, sync_ghapp):
r = sync_remote_repositories(self.user.pk)
self.assertNotIn("error", r)
sync_bb.assert_called_once()
sync_gl.assert_called_once()
sync_gh.assert_called_once()
sync_ghapp.assert_called_once()
@patch("readthedocs.oauth.services.githubapp.GitHubAppService.sync_user_access")
@patch("readthedocs.oauth.services.github.GitHubService.sync")
@patch("readthedocs.oauth.services.gitlab.GitLabService.sync")
@patch("readthedocs.oauth.services.bitbucket.BitbucketService.sync")
def test_sync_repository_failsync(self, sync_bb, sync_gl, sync_gh, sync_ghapp):
sync_gh.side_effect = SyncServiceError
r = sync_remote_repositories(self.user.pk)
self.assertIn("GitHub", r["error"])
self.assertNotIn("GitLab", r["error"])
self.assertNotIn("Bitbucket", r["error"])
sync_bb.assert_called_once()
sync_gl.assert_called_once()
sync_gh.assert_called_once()
sync_ghapp.assert_called_once()
@patch("readthedocs.oauth.services.githubapp.GitHubAppService.sync_user_access")
@patch("readthedocs.oauth.services.github.GitHubService.sync")
@patch("readthedocs.oauth.services.gitlab.GitLabService.sync")
@patch("readthedocs.oauth.services.bitbucket.BitbucketService.sync")
def test_sync_repository_failsync_more_than_one(
self, sync_bb, sync_gl, sync_gh, sync_ghapp
):
sync_gh.side_effect = SyncServiceError
sync_bb.side_effect = SyncServiceError
r = sync_remote_repositories(self.user.pk)
self.assertIn("GitHub", r["error"])
self.assertIn("Bitbucket", r["error"])
self.assertNotIn("GitLab", r["error"])
sync_bb.assert_called_once()
sync_gl.assert_called_once()
sync_gh.assert_called_once()
sync_ghapp.assert_called_once()
@patch.object(GitHubService, "update_repository")
@patch.object(GitHubAppService, "update_repository")
@patch.object(GitLabService, "update_repository")
@patch.object(BitbucketService, "update_repository")
def test_sync_remote_repository_organizations_without_slugs(
self, update_repository_bb, update_repository_gl, update_repository_ghapp, update_repository_gh
):
organization = get(Organization)
get(
SSOIntegration,
provider=SSOIntegration.PROVIDER_ALLAUTH,
organization=organization,
)
get(
OrganizationOwner,
owner=self.user,
organization=organization,
)
gh_repository = get(
RemoteRepository,
full_name="org/repo",
vcs_provider=GITHUB,
)
gh_repository.get_remote_repository_relation(self.user, self.socialaccount_gh)
ghapp_installation = get(
GitHubAppInstallation,
installation_id=1111,
target_id=1111,
target_type=GitHubAccountType.USER,
)
ghapp_repository = get(
RemoteRepository,
full_name="org/repo",
vcs_provider=GITHUB_APP,
github_app_installation=ghapp_installation,
)
ghapp_repository.get_remote_repository_relation(self.user, self.socialaccount_ghapp)
gl_repository = get(
RemoteRepository,
full_name="org/repo",
vcs_provider=GITLAB,
)
gl_repository.get_remote_repository_relation(self.user, self.socialaccount_gl)
bb_repository = get(
RemoteRepository,
full_name="org/repo",
vcs_provider=BITBUCKET,
)
bb_repository.get_remote_repository_relation(self.user, self.socialaccount_bb)
remote_repositories = [
gh_repository,
ghapp_repository,
gl_repository,
bb_repository,
]
for remote_repository in remote_repositories:
project = get(Project, remote_repository=remote_repository)
organization.projects.add(project)
sync_remote_repositories_from_sso_organizations()
update_repository_gh.assert_called_once_with(gh_repository)
update_repository_gl.assert_called_once_with(gl_repository)
update_repository_bb.assert_called_once_with(bb_repository)
update_repository_ghapp.assert_not_called()
@patch("readthedocs.oauth.services.githubapp.GitHubAppService.sync_user_access")
@patch("readthedocs.oauth.services.github.GitHubService.sync")
@patch("readthedocs.oauth.services.gitlab.GitLabService.sync")
@patch("readthedocs.oauth.services.bitbucket.BitbucketService.sync")
def test_sync_dont_stop_if_one_service_account_of_same_type_fails(
self, sync_bb, sync_gl, sync_gh, sync_ghapp
):
get(
SocialAccount,
user=self.user,
provider=GitHubOAuth2Adapter.provider_id,
)
sync_gh.side_effect = SyncServiceError
r = sync_remote_repositories(self.user.pk)
assert "GitHub" in r["error"]
assert "Bitbucket" not in r["error"]
assert "GitLab" not in r["error"]
sync_bb.assert_called_once()
sync_gl.assert_called_once()
sync_ghapp.assert_called_once()
assert sync_gh.call_count == 2
| SyncRemoteRepositoriesTests |
python | numpy__numpy | numpy/matrixlib/tests/test_masked_matrix.py | {
"start": 346,
"end": 813
} | class ____(MaskedArray, np.matrix,):
def __new__(cls, data, mask=nomask):
mat = np.matrix(data)
_data = MaskedArray.__new__(cls, data=mat, mask=mask)
return _data
def __array_finalize__(self, obj):
np.matrix.__array_finalize__(self, obj)
MaskedArray.__array_finalize__(self, obj)
@property
def _series(self):
_view = self.view(MaskedArray)
_view._sharedmask = False
return _view
| MMatrix |
python | sphinx-doc__sphinx | sphinx/domains/cpp/_ast.py | {
"start": 42653,
"end": 43795
} | class ____(ASTExpression):
def __init__(self, typ: ASTType, expr: ASTExpression) -> None:
self.typ = typ
self.expr = expr
def __eq__(self, other: object) -> bool:
if not isinstance(other, ASTCastExpr):
return NotImplemented
return self.typ == other.typ and self.expr == other.expr
def __hash__(self) -> int:
return hash((self.typ, self.expr))
def _stringify(self, transform: StringifyTransform) -> str:
res = ['(']
res.extend((
transform(self.typ),
')',
transform(self.expr),
))
return ''.join(res)
def get_id(self, version: int) -> str:
return 'cv' + self.typ.get_id(version) + self.expr.get_id(version)
def describe_signature(
self, signode: TextElement, mode: str, env: BuildEnvironment, symbol: Symbol
) -> None:
signode += addnodes.desc_sig_punctuation('(', '(')
self.typ.describe_signature(signode, mode, env, symbol)
signode += addnodes.desc_sig_punctuation(')', ')')
self.expr.describe_signature(signode, mode, env, symbol)
| ASTCastExpr |
python | pypa__hatch | tests/backend/builders/plugin/test_interface.py | {
"start": 5201,
"end": 14906
} | class ____:
@pytest.mark.requires_unix
def test_infinite_loop_prevention(self, temp_dir):
project_dir = temp_dir / "project"
project_dir.ensure_dir_exists()
with project_dir.as_cwd():
config = {"tool": {"hatch": {"build": {"include": ["foo", "README.md"]}}}}
builder = MockBuilder(str(project_dir), config=config)
(project_dir / "README.md").touch()
foo = project_dir / "foo"
foo.ensure_dir_exists()
(foo / "bar.txt").touch()
(foo / "baz").symlink_to(project_dir)
assert [f.path for f in builder.recurse_included_files()] == [
str(project_dir / "README.md"),
str(project_dir / "foo" / "bar.txt"),
]
def test_only_include(self, temp_dir):
project_dir = temp_dir / "project"
project_dir.ensure_dir_exists()
with project_dir.as_cwd():
config = {"tool": {"hatch": {"build": {"only-include": ["foo"], "artifacts": ["README.md"]}}}}
builder = MockBuilder(str(project_dir), config=config)
(project_dir / "README.md").touch()
foo = project_dir / "foo"
foo.ensure_dir_exists()
(foo / "bar.txt").touch()
assert [f.path for f in builder.recurse_included_files()] == [str(project_dir / "foo" / "bar.txt")]
def test_no_duplication_force_include_only(self, temp_dir):
project_dir = temp_dir / "project"
project_dir.ensure_dir_exists()
with project_dir.as_cwd():
config = {
"tool": {
"hatch": {
"build": {
"force-include": {
"../external.txt": "new/target2.txt",
"old": "new",
},
}
}
}
}
builder = MockBuilder(str(project_dir), config=config)
(project_dir / "foo.txt").touch()
old = project_dir / "old"
old.ensure_dir_exists()
(old / "target1.txt").touch()
(old / "target2.txt").touch()
(temp_dir / "external.txt").touch()
build_data = builder.get_default_build_data()
builder.set_build_data_defaults(build_data)
with builder.config.set_build_data(build_data):
assert [(f.path, f.distribution_path) for f in builder.recurse_included_files()] == [
(str(project_dir / "foo.txt"), "foo.txt"),
(str(project_dir / "old" / "target1.txt"), f"new{path_sep}target1.txt"),
(str(temp_dir / "external.txt"), f"new{path_sep}target2.txt"),
]
def test_no_duplication_force_include_and_selection(self, temp_dir):
project_dir = temp_dir / "project"
project_dir.ensure_dir_exists()
with project_dir.as_cwd():
config = {
"tool": {
"hatch": {
"build": {
"include": ["foo.txt", "bar.txt", "baz.txt"],
"force-include": {"../external.txt": "new/file.txt"},
}
}
}
}
builder = MockBuilder(str(project_dir), config=config)
(project_dir / "foo.txt").touch()
(project_dir / "bar.txt").touch()
(project_dir / "baz.txt").touch()
(temp_dir / "external.txt").touch()
build_data = builder.get_default_build_data()
builder.set_build_data_defaults(build_data)
build_data["force_include"]["bar.txt"] = "bar.txt"
with builder.config.set_build_data(build_data):
assert [(f.path, f.distribution_path) for f in builder.recurse_included_files()] == [
(str(project_dir / "baz.txt"), "baz.txt"),
(str(project_dir / "foo.txt"), "foo.txt"),
(str(temp_dir / "external.txt"), f"new{path_sep}file.txt"),
(str(project_dir / "bar.txt"), "bar.txt"),
]
def test_no_duplication_force_include_with_sources(self, temp_dir):
project_dir = temp_dir / "project"
project_dir.ensure_dir_exists()
with project_dir.as_cwd():
config = {
"tool": {
"hatch": {
"build": {
"include": ["src"],
"sources": ["src"],
"force-include": {"../external.txt": "new/file.txt"},
}
}
}
}
builder = MockBuilder(str(project_dir), config=config)
src_dir = project_dir / "src"
src_dir.mkdir()
(src_dir / "foo.txt").touch()
(src_dir / "bar.txt").touch()
(src_dir / "baz.txt").touch()
(temp_dir / "external.txt").touch()
build_data = builder.get_default_build_data()
builder.set_build_data_defaults(build_data)
build_data["force_include"]["src/bar.txt"] = "bar.txt"
with builder.config.set_build_data(build_data):
assert [(f.path, f.distribution_path) for f in builder.recurse_included_files()] == [
(str(src_dir / "baz.txt"), "baz.txt"),
(str(src_dir / "foo.txt"), "foo.txt"),
(str(temp_dir / "external.txt"), f"new{path_sep}file.txt"),
(str(src_dir / "bar.txt"), "bar.txt"),
]
def test_exists(self, temp_dir):
project_dir = temp_dir / "project"
project_dir.ensure_dir_exists()
with project_dir.as_cwd():
config = {
"tool": {
"hatch": {
"build": {
"force-include": {
"../notfound": "target.txt",
},
}
}
}
}
builder = MockBuilder(str(project_dir), config=config)
build_data = builder.get_default_build_data()
builder.set_build_data_defaults(build_data)
with (
builder.config.set_build_data(build_data),
pytest.raises(FileNotFoundError, match="Forced include not found"),
):
list(builder.recurse_included_files())
def test_order(self, temp_dir):
project_dir = temp_dir / "project"
project_dir.ensure_dir_exists()
with project_dir.as_cwd():
config = {
"tool": {
"hatch": {
"build": {
"sources": ["src"],
"include": ["src/foo", "bar", "README.md", "tox.ini"],
"exclude": ["**/foo/baz.txt"],
"force-include": {
"../external1.txt": "nested/target2.txt",
"../external2.txt": "nested/target1.txt",
"../external": "nested",
},
}
}
}
}
builder = MockBuilder(str(project_dir), config=config)
foo = project_dir / "src" / "foo"
foo.ensure_dir_exists()
(foo / "bar.txt").touch()
(foo / "baz.txt").touch()
bar = project_dir / "bar"
bar.ensure_dir_exists()
(bar / "foo.txt").touch()
# Excluded
for name in EXCLUDED_DIRECTORIES:
excluded_dir = bar / name
excluded_dir.ensure_dir_exists()
(excluded_dir / "file.ext").touch()
for name in EXCLUDED_FILES:
excluded_file = bar / name
excluded_file.touch()
(project_dir / "README.md").touch()
(project_dir / "tox.ini").touch()
(temp_dir / "external1.txt").touch()
(temp_dir / "external2.txt").touch()
external = temp_dir / "external"
external.ensure_dir_exists()
(external / "external1.txt").touch()
(external / "external2.txt").touch()
# Excluded
for name in EXCLUDED_DIRECTORIES:
excluded_dir = external / name
excluded_dir.ensure_dir_exists()
(excluded_dir / "file.ext").touch()
for name in EXCLUDED_FILES:
excluded_file = external / name
excluded_file.touch()
assert [(f.path, f.distribution_path) for f in builder.recurse_included_files()] == [
(str(project_dir / "README.md"), "README.md"),
(str(project_dir / "tox.ini"), "tox.ini"),
(
str(project_dir / "bar" / "foo.txt"),
f"bar{path_sep}foo.txt",
),
(str(project_dir / "src" / "foo" / "bar.txt"), f"foo{path_sep}bar.txt"),
(str(temp_dir / "external" / "external1.txt"), f"nested{path_sep}external1.txt"),
(str(temp_dir / "external" / "external2.txt"), f"nested{path_sep}external2.txt"),
(str(temp_dir / "external2.txt"), f"nested{path_sep}target1.txt"),
(str(temp_dir / "external1.txt"), f"nested{path_sep}target2.txt"),
]
| TestDirectoryRecursion |
python | allegroai__clearml | clearml/backend_api/services/v2_23/events.py | {
"start": 70692,
"end": 72400
} | class ____(Request):
"""
Delete all task events. *This cannot be undone!*
:param task: Task ID
:type task: str
:param allow_locked: Allow deleting events even if the task is locked
:type allow_locked: bool
"""
_service = "events"
_action = "delete_for_task"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {
"allow_locked": {
"default": False,
"description": "Allow deleting events even if the task is locked",
"type": "boolean",
},
"task": {"description": "Task ID", "type": "string"},
},
"required": ["task"],
"type": "object",
}
def __init__(self, task: str, allow_locked: Optional[bool] = False, **kwargs: Any) -> None:
super(DeleteForTaskRequest, self).__init__(**kwargs)
self.task = task
self.allow_locked = allow_locked
@schema_property("task")
def task(self) -> str:
return self._property_task
@task.setter
def task(self, value: str) -> None:
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
@schema_property("allow_locked")
def allow_locked(self) -> Optional[bool]:
return self._property_allow_locked
@allow_locked.setter
def allow_locked(self, value: Optional[bool]) -> None:
if value is None:
self._property_allow_locked = None
return
self.assert_isinstance(value, "allow_locked", (bool,))
self._property_allow_locked = value
| DeleteForTaskRequest |
python | huggingface__transformers | src/transformers/models/ibert/modeling_ibert.py | {
"start": 14050,
"end": 15074
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.quant_mode = config.quant_mode
self.self = IBertSelfAttention(config)
self.output = IBertSelfOutput(config)
def forward(
self,
hidden_states,
hidden_states_scaling_factor,
attention_mask=None,
output_attentions=False,
):
self_outputs, self_outputs_scaling_factor = self.self(
hidden_states,
hidden_states_scaling_factor,
attention_mask,
output_attentions,
)
attention_output, attention_output_scaling_factor = self.output(
self_outputs[0], self_outputs_scaling_factor[0], hidden_states, hidden_states_scaling_factor
)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
outputs_scaling_factor = (attention_output_scaling_factor,) + self_outputs_scaling_factor[1:]
return outputs, outputs_scaling_factor
| IBertAttention |
python | pytorch__pytorch | test/inductor/test_mix_order_reduction.py | {
"start": 2124,
"end": 16516
} | class ____(TestBase):
@parametrize(
"name",
[
"sum",
"prod",
"mean",
],
)
@parametrize("swap", (False, True))
@parametrize("split_reductions", (False, True))
@parametrize("shape", ((32768, 768), (32769, 768), (32, 1024, 768)))
def test_mix_order_reduction(self, name, swap, split_reductions, shape):
# torch.prod does not accept tuple for dim argument
if name == "prod" and len(shape) == 3:
self.skipTest("Invalid combination")
def f(x):
def outer_red():
if len(shape) == 3:
return reduction_fn(x, dim=(0, 1))
else:
assert len(shape) == 2
return reduction_fn(x, dim=0)
if swap:
return outer_red(), reduction_fn(x, dim=-1)
else:
return reduction_fn(x, dim=-1), outer_red()
reduction_fn = getattr(torch, name)
dtype = torch.float
x = torch.randn(shape, dtype=dtype, device=GPU_TYPE)
opt_f = torch.compile(
f,
options={
"split_reductions": split_reductions,
},
)
ref = f(x)
act = opt_f(x)
self.assertTrue(same(ref, act, tol=1e-3), f"ref:\n{ref}\nact:\n{act}")
self.assertEqual(
inductor_config.triton.mix_order_reduction,
metrics.codegen_mix_order_reduction,
)
def test_xmask(self):
"""
Make sure xmask is setup properly
"""
if not inductor_config.triton.mix_order_reduction:
self.skipTest("Mix order reduction not enabled")
def f(x):
return x.sum(dim=0), x.sum(dim=1)
M, N = 32768 + 1023, 768
EXTRA_ROW = 1
buf = torch.randn(M + EXTRA_ROW, N, device=GPU_TYPE)
x = buf[:M, :]
# make sure wrong xmask error loud if read excess elements
buf[M:, :] = 1000000
opt_f = torch.compile(
f,
options={
"triton.mix_order_reduction_initial_xblock": 2,
},
)
ref = f(x)
act = opt_f(x)
self.assertTrue(same(ref, act, tol=1e-3), f"ref:\n{ref}\nact:\n{act}")
self.assertEqual(
inductor_config.triton.mix_order_reduction,
metrics.codegen_mix_order_reduction,
)
def test_avoid_non_coalesced_access(self):
if not inductor_config.triton.mix_order_reduction:
self.skipTest("Mix order reduction not enabled")
def f(x, y):
return (x + y).sum(dim=-1), x.sum(dim=(0, 1))
x = torch.randn(128, 256, 768, device=GPU_TYPE)
y = torch.randn(128, 768, 256, device=GPU_TYPE).transpose(1, 2)
self.check_numeric(f, (x, y))
# we skip mix order reduction for such kernel since
# we force XBLOCK to be 1, the access to tensor y would be
# very inefficient.
# TODO: support XBLOCK larger than 1. But in that case, we
# would have bigger restriction on rnumel to avoid exploding
# shared memory.
self.assertEqual(metrics.codegen_mix_order_reduction, 0)
@inductor_config.patch(coordinate_descent_tuning=True)
def test_XBLOCK_coordest_tuning(self):
"""
We should skip XBLOCK coordinate descent tuning for
mix order reduction.
"""
if not inductor_config.triton.mix_order_reduction:
self.skipTest("Mix order reduction not enabled")
def f(x):
return x.sum(dim=-1), x.sum(dim=0)
x = torch.randn(32768, 256, dtype=torch.float, device=GPU_TYPE)
self.check_numeric(f, (x,))
self.assertEqual(metrics.codegen_mix_order_reduction, 1)
@inductor_config.patch(unroll_reductions_threshold=1)
def test_3layer_split_reduction(self):
"""
Use a larger M and smaller N to trigger a 3 layer split reduction.
"""
if not inductor_config.triton.mix_order_reduction:
self.skipTest("Mix order reduction not enabled")
def f(x):
return x.sum(dim=-1), x.sum(dim=0)
x = torch.randn(32768 * 256, 2, dtype=torch.float, device=GPU_TYPE)
self.check_numeric(f, (x,))
# We don't do mix order reduction for split redutions
# with more than 2 layers
self.assertEqual(metrics.codegen_mix_order_reduction, 0)
def test_independent_split_size(self):
"""
Make sure mix order reduction can pick the split size it wants
"""
if not inductor_config.triton.mix_order_reduction:
self.skipTest("Mix order reduction not enabled")
def f(x):
return x.sum(dim=-1), x.sum(dim=0)
def check_one_split_size(split_size):
torch._dynamo.reset()
with inductor_config.patch(
"triton.mix_order_reduction_split_size", split_size
):
self.check_numeric(f, (x,))
self.assertEqual(
inductor_config.triton.mix_order_reduction,
metrics.codegen_mix_order_reduction,
)
_, (code,) = utils.run_and_get_code(torch.compile(f), x)
self.assertTrue(f"'RSPLIT_SIZE': {split_size}" in code)
x = torch.randn(32768, 768, dtype=torch.float, device=GPU_TYPE)
check_one_split_size(8)
check_one_split_size(16)
@inductor_config.patch(split_reductions=False)
def test_non_contiguous_input(self):
def f(x):
return x.sum(dim=-1), x.sum(dim=[0, 1])
x = torch.randn(1024, 32, 768, dtype=torch.float, device=GPU_TYPE).permute(
1, 0, 2
)
self.check_numeric(f, (x,))
self.assertEqual(
inductor_config.triton.mix_order_reduction,
metrics.codegen_mix_order_reduction,
)
@inductor_config.patch(split_reductions=False)
def test_multi_workspace_allocation(self):
def f(x, y):
return x.sum(dim=0), x.sum(dim=1), y.sum(dim=0), y.sum(dim=1)
x = torch.randn(4096 * 64, 32, device=GPU_TYPE)
y = torch.randn(4098 * 64, 34, device=GPU_TYPE)
self.check_numeric(f, (x, y))
expected_mix_order_reduction = (
0 if not inductor_config.triton.mix_order_reduction else 2
)
self.assertEqual(
expected_mix_order_reduction, metrics.codegen_mix_order_reduction
)
@parametrize(
"wdtype",
[
torch.bfloat16, # extra down cast for dw is needed
torch.float,
],
)
@parametrize("split_reductions", (False, True))
@parametrize("shape", ((32768, 2048), (32768, 768), (32768 + 1023, 768)))
@parametrize("max_autotune", (False, True))
@parametrize("initial_xblock", (1, 2))
def test_rms_norm_bwd(
self, wdtype, split_reductions, shape, max_autotune, initial_xblock
):
# max_autotune can be slow and cost resource, trim down the tests
# for max autotune
if max_autotune and not (
wdtype == torch.bfloat16
and not split_reductions
and shape in ((32768, 768), (32769, 768))
and initial_xblock == 1
and inductor_config.triton.mix_order_reduction
):
self.skipTest("Skip non-critical tests to save resources.")
def f(x, w, eps):
orig_dtype = x.dtype
x = x.float()
rsqrt = torch.rsqrt((x * x).sum(dim=-1) / x.shape[-1] + eps)
y = (x * rsqrt[:, None] * w).to(dtype=orig_dtype)
return y
def fwd_bwd(f):
x.grad = None
w.grad = None
out = f(x, w, eps)
out.backward(dy)
return x.grad, w.grad
torch.manual_seed(1337)
# M, N = 1152 * 500, 384
M, N = shape
x = torch.randn(M, N, dtype=torch.bfloat16, device=GPU_TYPE, requires_grad=True)
w = torch.randn(N, dtype=wdtype, device=GPU_TYPE, requires_grad=True)
dy = torch.randn_like(x)
eps = 1e-5
opt_f = torch.compile(
f,
options={
"split_reductions": split_reductions,
"triton.mix_order_reduction_initial_xblock": initial_xblock,
**(
{
"max_autotune": True,
"coordinate_descent_tuning": True,
}
if max_autotune
else {}
),
},
)
ref = fwd_bwd(f)
act, (_, bwd_wrapper) = utils.run_and_get_code(fwd_bwd, opt_f)
self.assertTrue(same(ref, act, tol=1e-2), f"ref:\n{ref}\nact:\n{act}")
self.assertEqual(
inductor_config.triton.mix_order_reduction,
metrics.codegen_mix_order_reduction,
)
@parametrize(
"wbdtype",
[
torch.bfloat16, # extra down cast for dw/db is needed
torch.float,
],
)
@parametrize("split_reductions", (False, True))
@parametrize("shape", ((32768, 768), (32769, 768)))
def test_layer_norm_bwd_with_bias(self, wbdtype, split_reductions, shape):
def f(x, w, b, eps):
return F.layer_norm(x, x.shape[-1:], w.float(), b.float(), eps)
def fwd_bwd(f):
x.grad = None
w.grad = None
b.grad = None
out = f(x, w, b, eps)
out.backward(dy)
return x.grad, w.grad, b.grad
# M, N = 1152 * 500, 384
M, N = shape
xdtype = torch.float
x = torch.randn(M, N, dtype=xdtype, device=GPU_TYPE, requires_grad=True)
w = torch.randn(N, dtype=wbdtype, device=GPU_TYPE, requires_grad=True)
b = torch.randn(N, dtype=wbdtype, device=GPU_TYPE, requires_grad=True)
dy = torch.randn_like(x)
eps = 1e-5
opt_f = torch.compile(
f,
options={
"split_reductions": split_reductions,
},
)
ref = fwd_bwd(f)
act, (_, bwd_wrapper) = utils.run_and_get_code(fwd_bwd, opt_f)
self.assertTrue(same(ref, act, tol=1e-2), f"ref:\n{ref}\nact:\n{act}")
self.assertEqual(
inductor_config.triton.mix_order_reduction,
metrics.codegen_mix_order_reduction,
)
@parametrize("dynamic_dims", ([0], [1], [0, 1]))
def test_rms_norm_bwd_with_dynamic_shape(self, dynamic_dims):
if not inductor_config.triton.mix_order_reduction:
self.skipTest("Mix order reduction not enabled")
def f(x, w, eps):
return F.rms_norm(x, x.shape[-1:], weight=w, eps=eps)
def fwd_bwd(f):
x.grad = None
w.grad = None
out = f(x, w, eps)
out.backward(dy)
return x.grad, w.grad
M0, M1, N = 251, 223, 128
wbdtype = torch.float
xdtype = torch.float
x = torch.randn(M0, M1, N, dtype=xdtype, device=GPU_TYPE, requires_grad=True)
torch._dynamo.mark_dynamic(x, (0, 1))
w = torch.randn(N, dtype=wbdtype, device=GPU_TYPE, requires_grad=True)
dy = torch.randn_like(x)
eps = 1e-5
opt_f = torch.compile(
f,
options={
"split_reductions": False,
},
)
ref = fwd_bwd(f)
act, (_, bwd_wrapper) = utils.run_and_get_code(fwd_bwd, opt_f)
self.assertTrue(same(ref, act, tol=1e-2), f"ref:\n{ref}\nact:\n{act}")
self.assertEqual(
inductor_config.triton.mix_order_reduction,
metrics.codegen_mix_order_reduction,
)
@parametrize("dynamic_dims", ([0], [1], [0, 1]))
def test_layer_norm_bwd_with_dynamic_shape(self, dynamic_dims):
if not inductor_config.triton.mix_order_reduction:
self.skipTest("Mix order reduction not enabled")
def f(x, w, eps):
return F.layer_norm(x, x.shape[-1:], weight=w, bias=None, eps=eps)
def fwd_bwd(f):
x.grad = None
w.grad = None
out = f(x, w, eps)
out.backward(dy)
return x.grad, w.grad
M0, M1, N = 251, 223, 128
wbdtype = torch.float
xdtype = torch.float
x = torch.randn(M0, M1, N, dtype=xdtype, device=GPU_TYPE, requires_grad=True)
torch._dynamo.mark_dynamic(x, dynamic_dims)
w = torch.randn(N, dtype=wbdtype, device=GPU_TYPE, requires_grad=True)
dy = torch.randn_like(x)
eps = 1e-5
opt_f = torch.compile(f)
ref = fwd_bwd(f)
act, (_, bwd_wrapper) = utils.run_and_get_code(fwd_bwd, opt_f)
self.assertTrue(same(ref, act, tol=1e-2), f"ref:\n{ref}\nact:\n{act}")
self.assertEqual(
inductor_config.triton.mix_order_reduction,
metrics.codegen_mix_order_reduction,
)
@parametrize("split_reductions", (False, True))
@parametrize("shape", ((32768, 768), (32769, 768)))
def test_layer_norm_bwd_no_bias(self, split_reductions, shape):
def f(x, w, eps):
return F.layer_norm(x, x.shape[-1:], w, bias=None, eps=eps)
def fwd_bwd(f):
x.grad = None
w.grad = None
out = f(x, w, eps)
out.backward(dy)
return x.grad, w.grad
# M, N = 1152 * 500, 384
M, N = shape
xdtype = torch.float
wbdtype = torch.float
x = torch.randn(M, N, dtype=xdtype, device=GPU_TYPE, requires_grad=True)
w = torch.randn(N, dtype=wbdtype, device=GPU_TYPE, requires_grad=True)
dy = torch.randn_like(x)
eps = 1e-5
opt_f = torch.compile(
f,
options={
"split_reductions": split_reductions,
},
)
ref = fwd_bwd(f)
act, (_, bwd_wrapper) = utils.run_and_get_code(fwd_bwd, opt_f)
self.assertTrue(same(ref, act, tol=1e-2), f"ref:\n{ref}\nact:\n{act}")
self.assertEqual(
inductor_config.triton.mix_order_reduction,
metrics.codegen_mix_order_reduction,
)
@inductor_config.patch(
"triton.mix_order_reduction", not inductor_config.triton.mix_order_reduction
)
| MixOrderReductionTest |
python | gevent__gevent | src/greentest/3.12/test_signal.py | {
"start": 2647,
"end": 7102
} | class ____(unittest.TestCase):
def trivial_signal_handler(self, *args):
pass
def create_handler_with_partial(self, argument):
return functools.partial(self.trivial_signal_handler, argument)
def test_out_of_range_signal_number_raises_error(self):
self.assertRaises(ValueError, signal.getsignal, 4242)
self.assertRaises(ValueError, signal.signal, 4242,
self.trivial_signal_handler)
self.assertRaises(ValueError, signal.strsignal, 4242)
def test_setting_signal_handler_to_none_raises_error(self):
self.assertRaises(TypeError, signal.signal,
signal.SIGUSR1, None)
def test_getsignal(self):
hup = signal.signal(signal.SIGHUP, self.trivial_signal_handler)
self.assertIsInstance(hup, signal.Handlers)
self.assertEqual(signal.getsignal(signal.SIGHUP),
self.trivial_signal_handler)
signal.signal(signal.SIGHUP, hup)
self.assertEqual(signal.getsignal(signal.SIGHUP), hup)
def test_no_repr_is_called_on_signal_handler(self):
# See https://github.com/python/cpython/issues/112559.
class MyArgument:
def __init__(self):
self.repr_count = 0
def __repr__(self):
self.repr_count += 1
return super().__repr__()
argument = MyArgument()
self.assertEqual(0, argument.repr_count)
handler = self.create_handler_with_partial(argument)
hup = signal.signal(signal.SIGHUP, handler)
self.assertIsInstance(hup, signal.Handlers)
self.assertEqual(signal.getsignal(signal.SIGHUP), handler)
signal.signal(signal.SIGHUP, hup)
self.assertEqual(signal.getsignal(signal.SIGHUP), hup)
self.assertEqual(0, argument.repr_count)
@unittest.skipIf(sys.platform.startswith("netbsd"),
"gh-124083: strsignal is not supported on NetBSD")
def test_strsignal(self):
self.assertIn("Interrupt", signal.strsignal(signal.SIGINT))
self.assertIn("Terminated", signal.strsignal(signal.SIGTERM))
self.assertIn("Hangup", signal.strsignal(signal.SIGHUP))
# Issue 3864, unknown if this affects earlier versions of freebsd also
def test_interprocess_signal(self):
dirname = os.path.dirname(__file__)
script = os.path.join(dirname, 'signalinterproctester.py')
assert_python_ok(script)
@unittest.skipUnless(
hasattr(signal, "valid_signals"),
"requires signal.valid_signals"
)
def test_valid_signals(self):
s = signal.valid_signals()
self.assertIsInstance(s, set)
self.assertIn(signal.Signals.SIGINT, s)
self.assertIn(signal.Signals.SIGALRM, s)
self.assertNotIn(0, s)
self.assertNotIn(signal.NSIG, s)
self.assertLess(len(s), signal.NSIG)
# gh-91145: Make sure that all SIGxxx constants exposed by the Python
# signal module have a number in the [0; signal.NSIG-1] range.
for name in dir(signal):
if not name.startswith("SIG"):
continue
if name in {"SIG_IGN", "SIG_DFL"}:
# SIG_IGN and SIG_DFL are pointers
continue
with self.subTest(name=name):
signum = getattr(signal, name)
self.assertGreaterEqual(signum, 0)
self.assertLess(signum, signal.NSIG)
@unittest.skipUnless(sys.executable, "sys.executable required.")
@support.requires_subprocess()
def test_keyboard_interrupt_exit_code(self):
"""KeyboardInterrupt triggers exit via SIGINT."""
process = subprocess.run(
[sys.executable, "-c",
"import os, signal, time\n"
"os.kill(os.getpid(), signal.SIGINT)\n"
"for _ in range(999): time.sleep(0.01)"],
stderr=subprocess.PIPE)
self.assertIn(b"KeyboardInterrupt", process.stderr)
self.assertEqual(process.returncode, -signal.SIGINT)
# Caveat: The exit code is insufficient to guarantee we actually died
# via a signal. POSIX shells do more than look at the 8 bit value.
# Writing an automation friendly test of an interactive shell
# to confirm that our process died via a SIGINT proved too complex.
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
| PosixTests |
python | keon__algorithms | tests/test_backtrack.py | {
"start": 7497,
"end": 7993
} | class ____(unittest.TestCase):
def test_generate_parenthesis(self):
self.assertEqual(generate_parenthesis_v1(2), ['()()', '(())'])
self.assertEqual(generate_parenthesis_v1(3), ['()()()', '()(())',
'(())()', '(()())', '((()))'])
self.assertEqual(generate_parenthesis_v2(2), ['(())', '()()'])
self.assertEqual(generate_parenthesis_v2(3), ['((()))', '(()())',
'(())()', '()(())', '()()()'])
| TestGenerateParenthesis |
python | PrefectHQ__prefect | tests/server/orchestration/api/test_workers.py | {
"start": 15315,
"end": 16440
} | class ____:
async def test_delete_work_pool(self, client, work_pool, session):
work_pool_id = work_pool.id
response = await client.delete(f"/work_pools/{work_pool.name}")
assert response.status_code == status.HTTP_204_NO_CONTENT, response.text
assert not await models.workers.read_work_pool(
session=session, work_pool_id=work_pool_id
)
async def test_nonexistent_work_pool(self, client):
response = await client.delete("/work_pools/does-not-exist")
assert response.status_code == status.HTTP_404_NOT_FOUND, response.text
@pytest.mark.parametrize("name", RESERVED_POOL_NAMES)
async def test_delete_reserved_pool_fails(self, session, client, name):
assert await models.workers.create_work_pool(
session=session, work_pool=WorkPoolCreate(name=name)
)
await session.commit()
response = await client.delete(f"/work_pools/{name}")
assert response.status_code == status.HTTP_403_FORBIDDEN, response.text
assert "reserved for internal use" in response.json()["detail"]
| TestDeleteWorkPool |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_date02.py | {
"start": 342,
"end": 2113
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_date02.xlsx")
self.ignore_elements = {"xl/charts/chart1.xml": ["<c:formatCode"]}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "line"})
date_format = workbook.add_format({"num_format": 14})
chart.axis_ids = [55112064, 55115136]
worksheet.set_column("A:A", 12)
dates = [
date(2013, 1, 1),
date(2013, 1, 2),
date(2013, 1, 3),
date(2013, 1, 4),
date(2013, 1, 5),
date(2013, 1, 6),
date(2013, 1, 7),
date(2013, 1, 8),
date(2013, 1, 9),
date(2013, 1, 10),
]
values = [10, 30, 20, 40, 20, 60, 50, 40, 30, 30]
worksheet.write_column("A1", dates, date_format)
worksheet.write_column("B1", values)
chart.add_series(
{
"categories": "=Sheet1!$A$1:$A$10",
"values": "=Sheet1!$B$1:$B$10",
}
)
chart.set_x_axis(
{
"date_axis": True,
"min": int(worksheet._convert_date_time(date(2013, 1, 2))),
"max": int(worksheet._convert_date_time(date(2013, 1, 9))),
"num_format": "dd/mm/yyyy",
"num_format_linked": True,
}
)
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | kubernetes-client__python | kubernetes/client/models/v1alpha1_mutating_admission_policy.py | {
"start": 383,
"end": 6971
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'kind': 'str',
'metadata': 'V1ObjectMeta',
'spec': 'V1alpha1MutatingAdmissionPolicySpec'
}
attribute_map = {
'api_version': 'apiVersion',
'kind': 'kind',
'metadata': 'metadata',
'spec': 'spec'
}
def __init__(self, api_version=None, kind=None, metadata=None, spec=None, local_vars_configuration=None): # noqa: E501
"""V1alpha1MutatingAdmissionPolicy - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._kind = None
self._metadata = None
self._spec = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
if spec is not None:
self.spec = spec
@property
def api_version(self):
"""Gets the api_version of this V1alpha1MutatingAdmissionPolicy. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1alpha1MutatingAdmissionPolicy. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1alpha1MutatingAdmissionPolicy.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1alpha1MutatingAdmissionPolicy. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def kind(self):
"""Gets the kind of this V1alpha1MutatingAdmissionPolicy. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1alpha1MutatingAdmissionPolicy. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1alpha1MutatingAdmissionPolicy.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1alpha1MutatingAdmissionPolicy. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1alpha1MutatingAdmissionPolicy. # noqa: E501
:return: The metadata of this V1alpha1MutatingAdmissionPolicy. # noqa: E501
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1alpha1MutatingAdmissionPolicy.
:param metadata: The metadata of this V1alpha1MutatingAdmissionPolicy. # noqa: E501
:type: V1ObjectMeta
"""
self._metadata = metadata
@property
def spec(self):
"""Gets the spec of this V1alpha1MutatingAdmissionPolicy. # noqa: E501
:return: The spec of this V1alpha1MutatingAdmissionPolicy. # noqa: E501
:rtype: V1alpha1MutatingAdmissionPolicySpec
"""
return self._spec
@spec.setter
def spec(self, spec):
"""Sets the spec of this V1alpha1MutatingAdmissionPolicy.
:param spec: The spec of this V1alpha1MutatingAdmissionPolicy. # noqa: E501
:type: V1alpha1MutatingAdmissionPolicySpec
"""
self._spec = spec
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1alpha1MutatingAdmissionPolicy):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1alpha1MutatingAdmissionPolicy):
return True
return self.to_dict() != other.to_dict()
| V1alpha1MutatingAdmissionPolicy |
python | celery__celery | t/smoke/workers/other.py | {
"start": 251,
"end": 1813
} | class ____(SmokeWorkerContainer):
"""Alternative worker with different name and queue, but same configurations for the rest."""
@classmethod
def worker_name(cls) -> str:
return "other_smoke_tests_worker"
@classmethod
def worker_queue(cls) -> str:
return "other_smoke_tests_queue"
# Build the image like the dev worker
celery_other_dev_worker_image = build(
path=".",
dockerfile="t/smoke/workers/docker/dev",
tag="t/smoke/worker:other",
buildargs=OtherSmokeWorkerContainer.buildargs(),
)
# Define container settings like the dev worker
other_dev_worker_container = container(
image="{celery_other_dev_worker_image.id}",
environment=fxtr("default_worker_env"),
network="{default_pytest_celery_network.name}",
volumes={
# Volume: Worker /app
"{default_worker_volume.name}": defaults.DEFAULT_WORKER_VOLUME,
# Mount: Celery source
os.path.abspath(os.getcwd()): {
"bind": "/celery",
"mode": "rw",
},
},
wrapper_class=OtherSmokeWorkerContainer,
timeout=defaults.DEFAULT_WORKER_CONTAINER_TIMEOUT,
command=OtherSmokeWorkerContainer.command(),
)
@pytest.fixture
def celery_other_dev_worker(
other_dev_worker_container: OtherSmokeWorkerContainer,
celery_setup_app: Celery,
) -> CeleryTestWorker:
"""Creates a pytest-celery worker node from the worker container."""
worker = CeleryTestWorker(other_dev_worker_container, app=celery_setup_app)
yield worker
worker.teardown()
| OtherSmokeWorkerContainer |
python | mlflow__mlflow | mlflow/pytorch/_lightning_autolog.py | {
"start": 15321,
"end": 29798
} | class ____(pl.Callback, MlflowModelCheckpointCallbackBase):
"""Callback for auto-logging pytorch-lightning model checkpoints to MLflow.
This callback implementation only supports pytorch-lightning >= 1.6.0.
Args:
monitor: In automatic model checkpointing, the metric name to monitor if
you set `model_checkpoint_save_best_only` to True.
save_best_only: If True, automatic model checkpointing only saves when
the model is considered the "best" model according to the quantity
monitored and previous checkpoint model is overwritten.
mode: one of {"min", "max"}. In automatic model checkpointing,
if save_best_only=True, the decision to overwrite the current save file is made
based on either the maximization or the minimization of the monitored quantity.
save_weights_only: In automatic model checkpointing, if True, then
only the model's weights will be saved. Otherwise, the optimizer states,
lr-scheduler states, etc are added in the checkpoint too.
save_freq: `"epoch"` or integer. When using `"epoch"`, the callback
saves the model after each epoch. When using integer, the callback
saves the model at end of this many batches. Note that if the saving isn't
aligned to epochs, the monitored metric may potentially be less reliable (it
could reflect as little as 1 batch, since the metrics get reset
every epoch). Defaults to `"epoch"`.
.. code-block:: python
:caption: Example
import mlflow
from mlflow.pytorch import MlflowModelCheckpointCallback
from pytorch_lightning import Trainer
mlflow.pytorch.autolog(checkpoint=True)
model = MyLightningModuleNet() # A custom-pytorch lightning model
train_loader = create_train_dataset_loader()
mlflow_checkpoint_callback = MlflowModelCheckpointCallback()
trainer = Trainer(callbacks=[mlflow_checkpoint_callback])
with mlflow.start_run() as run:
trainer.fit(model, train_loader)
"""
def __init__(
self,
monitor="val_loss",
mode="min",
save_best_only=True,
save_weights_only=False,
save_freq="epoch",
):
super().__init__(
checkpoint_file_suffix=".pth",
monitor=monitor,
mode=mode,
save_best_only=save_best_only,
save_weights_only=save_weights_only,
save_freq=save_freq,
)
self.trainer = None
def save_checkpoint(self, filepath: str):
# Note: `trainer.save_checkpoint` implementation contains invocation of
# `self.strategy.barrier("Trainer.save_checkpoint")`,
# in DDP training, this callback is only invoked in rank 0 process,
# the `barrier` invocation causes deadlock,
# so I implement `save_checkpoint` instead of
# calling `trainer.save_checkpoint`.
checkpoint = self.trainer._checkpoint_connector.dump_checkpoint(self.save_weights_only)
self.trainer.strategy.save_checkpoint(checkpoint, filepath)
@rank_zero_only
def on_fit_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
self.trainer = trainer
@rank_zero_only
def on_train_batch_end(
self,
trainer: "pl.Trainer",
pl_module: "pl.LightningModule",
outputs,
batch,
batch_idx,
) -> None:
if isinstance(self.save_freq, int) and (
trainer.global_step > 0 and trainer.global_step % self.save_freq == 0
):
self.check_and_save_checkpoint_if_needed(
current_epoch=trainer.current_epoch,
global_step=trainer.global_step,
metric_dict={k: float(v) for k, v in trainer.callback_metrics.items()},
)
@rank_zero_only
def on_train_epoch_end(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
if self.save_freq == "epoch":
self.check_and_save_checkpoint_if_needed(
current_epoch=trainer.current_epoch,
global_step=trainer.global_step,
metric_dict={k: float(v) for k, v in trainer.callback_metrics.items()},
)
# PyTorch-Lightning refactored the LoggerConnector class in version 1.4.0 and made metrics
# update on demand. Prior to this, the metrics from the current step were not available to
# callbacks immediately, so the view of metrics was off by one step.
# To avoid this problem, we access the metrics via the logger_connector for older versions.
if _pl_version >= Version("1.4.0"):
def _get_step_metrics(trainer):
return trainer.callback_metrics
else:
def _get_step_metrics(trainer):
return trainer.logger_connector.cached_results.get_latest_batch_log_metrics()
def _log_early_stop_params(early_stop_callback, client, run_id):
"""
Logs early stopping configuration parameters to MLflow.
Args:
early_stop_callback: The early stopping callback instance used during training.
client: An `MlflowAutologgingQueueingClient` instance used for MLflow logging.
run_id: The ID of the MLflow Run to which to log configuration parameters.
"""
client.log_params(
run_id,
{
p: getattr(early_stop_callback, p)
for p in ["monitor", "mode", "patience", "min_delta", "stopped_epoch"]
if hasattr(early_stop_callback, p)
},
)
def _log_early_stop_metrics(early_stop_callback, client, run_id, model_id=None):
"""
Logs early stopping behavior results (e.g. stopped epoch) as metrics to MLflow.
Args:
early_stop_callback: The early stopping callback instance used during training.
client: An `MlflowAutologgingQueueingClient` instance used for MLflow logging.
run_id: The ID of the MLflow Run to which to log configuration parameters.
model_id: The ID of the LoggedModel to which the metrics are associated.
"""
if early_stop_callback.stopped_epoch == 0:
return
metrics = {
"stopped_epoch": early_stop_callback.stopped_epoch,
"restored_epoch": early_stop_callback.stopped_epoch - max(1, early_stop_callback.patience),
}
if hasattr(early_stop_callback, "best_score"):
metrics["best_score"] = float(early_stop_callback.best_score)
if hasattr(early_stop_callback, "wait_count"):
metrics["wait_count"] = early_stop_callback.wait_count
client.log_metrics(run_id, metrics, model_id=model_id)
def patched_fit(original, self, *args, **kwargs):
"""
A patched implementation of `pytorch_lightning.Trainer.fit` which enables logging the
following parameters, metrics and artifacts:
- Training epochs
- Optimizer parameters
- `EarlyStoppingCallback`_ parameters
- Metrics stored in `trainer.callback_metrics`
- Model checkpoints
- Trained model
.. _EarlyStoppingCallback:
https://pytorch-lightning.readthedocs.io/en/latest/early_stopping.html
"""
from mlflow.pytorch import _is_forecasting_model
if not MIN_REQ_VERSION <= _pl_version <= MAX_REQ_VERSION:
warnings.warn(
"Autologging is known to be compatible with pytorch-lightning versions between "
f"{MIN_REQ_VERSION} and {MAX_REQ_VERSION} and may not succeed with packages "
"outside this range."
)
model = args[0] if len(args) > 0 else kwargs["model"]
if _is_forecasting_model(model):
# The forecasting model predict method calls tensor board writer's add_hparams
# method, which triggers pytorch autologging. The patch is for disabling it.
original_predict = model.predict
@functools.wraps(original_predict)
def patched_predict(*args, **kwargs):
with disable_autologging():
return original_predict(*args, **kwargs)
model.predict = patched_predict
with disable_autologging():
run_id = mlflow.active_run().info.run_id
tracking_uri = mlflow.get_tracking_uri()
client = MlflowAutologgingQueueingClient(tracking_uri)
log_model_signatures = get_autologging_config(
mlflow.pytorch.FLAVOR_NAME, "log_model_signatures", True
)
log_models = get_autologging_config(mlflow.pytorch.FLAVOR_NAME, "log_models", True)
model_id = None
if log_models:
model_id = _initialize_logged_model(
name="model", flavor=mlflow.pytorch.FLAVOR_NAME
).model_id
metrics_logger = BatchMetricsLogger(run_id, tracking_uri, model_id=model_id)
log_every_n_epoch = get_autologging_config(
mlflow.pytorch.FLAVOR_NAME, "log_every_n_epoch", 1
)
log_every_n_step = get_autologging_config(
mlflow.pytorch.FLAVOR_NAME, "log_every_n_step", None
)
early_stop_callback = None
for callback in self.callbacks:
if isinstance(callback, pl.callbacks.early_stopping.EarlyStopping):
early_stop_callback = callback
_log_early_stop_params(early_stop_callback, client, run_id)
if not any(isinstance(callbacks, __MlflowPLCallback) for callbacks in self.callbacks):
self.callbacks += [
__MlflowPLCallback(
client,
metrics_logger,
run_id,
log_models,
log_every_n_epoch,
log_every_n_step,
log_model_signatures,
)
]
model_checkpoint = get_autologging_config(mlflow.pytorch.FLAVOR_NAME, "checkpoint", True)
if model_checkpoint:
# __MLflowModelCheckpoint only supports pytorch-lightning >= 1.6.0
if _pl_version >= Version("1.6.0"):
checkpoint_monitor = get_autologging_config(
mlflow.pytorch.FLAVOR_NAME, "checkpoint_monitor", "val_loss"
)
checkpoint_mode = get_autologging_config(
mlflow.pytorch.FLAVOR_NAME, "checkpoint_mode", "min"
)
checkpoint_save_best_only = get_autologging_config(
mlflow.pytorch.FLAVOR_NAME, "checkpoint_save_best_only", True
)
checkpoint_save_weights_only = get_autologging_config(
mlflow.pytorch.FLAVOR_NAME, "checkpoint_save_weights_only", False
)
checkpoint_save_freq = get_autologging_config(
mlflow.pytorch.FLAVOR_NAME, "checkpoint_save_freq", "epoch"
)
if not any(
isinstance(callbacks, MlflowModelCheckpointCallback)
for callbacks in self.callbacks
):
self.callbacks += [
MlflowModelCheckpointCallback(
monitor=checkpoint_monitor,
mode=checkpoint_mode,
save_best_only=checkpoint_save_best_only,
save_weights_only=checkpoint_save_weights_only,
save_freq=checkpoint_save_freq,
)
]
else:
warnings.warn(
"Automatic model checkpointing is disabled because this feature only "
"supports pytorch-lightning >= 1.6.0."
)
client.flush(synchronous=False)
with tempfile.TemporaryDirectory() as tempdir:
os.environ[_MLFLOW_LIGHTNING_AUTOLOGGING_TMP_DIR_ENV] = tempdir
try:
result = original(self, *args, **kwargs)
finally:
for callback in self.callbacks:
if isinstance(callback, __MlflowPLCallback) and callback._model_forward_patch:
gorilla.revert(callback._model_forward_patch)
model_signature = None
input_output_tensors_file = os.path.join(tempdir, _INPUT_OUTPUT_TENSORS_FILENAME)
if os.path.exists(input_output_tensors_file):
input_tensor, output_tensor = torch.load(input_output_tensors_file)
try:
input_example = input_tensor.cpu().numpy()
with torch.no_grad():
output_example = output_tensor.cpu().numpy()
model_signature = infer_signature(
input_example,
output_example,
)
except Exception as e:
_logger.warning(
"Inferring model signature failed, skip logging signature. "
"You need to manually log the model with a provided signature after "
f"training. root cause: {e!r}.",
exc_info=True,
)
if early_stop_callback is not None:
_log_early_stop_metrics(early_stop_callback, client, run_id, model_id=model_id)
if Version(pl.__version__) < Version("1.4.0"):
summary = str(ModelSummary(self.model, mode="full"))
else:
summary = str(ModelSummary(self.model, max_depth=-1))
summary_file = os.path.join(tempdir, "model_summary.txt")
with open(summary_file, "w") as f:
f.write(summary)
mlflow.log_artifact(local_path=summary_file)
if log_models:
registered_model_name = get_autologging_config(
mlflow.pytorch.FLAVOR_NAME, "registered_model_name", None
)
mlflow.pytorch.log_model(
self.model,
name="model",
registered_model_name=registered_model_name,
model_id=model_id,
signature=model_signature,
)
if early_stop_callback is not None and self.checkpoint_callback.best_model_path:
mlflow.log_artifact(
local_path=self.checkpoint_callback.best_model_path,
artifact_path="restored_model_checkpoint",
)
client.flush(synchronous=True)
return result
| MlflowModelCheckpointCallback |
python | huggingface__transformers | src/transformers/models/owlvit/processing_owlvit.py | {
"start": 1258,
"end": 1529
} | class ____(ProcessingKwargs, total=False):
images_kwargs: OwlViTImagesKwargs
_defaults = {
"text_kwargs": {
"padding": "max_length",
},
"common_kwargs": {
"return_tensors": "pt",
},
}
| OwlViTProcessorKwargs |
python | astropy__astropy | astropy/io/ascii/basic.py | {
"start": 959,
"end": 1630
} | class ____(core.BaseReader):
r"""Character-delimited table with a single header line at the top.
Lines beginning with a comment character (default='#') as the first
non-whitespace character are comments.
Example table::
# Column definition is the first uncommented line
# Default delimiter is the space character.
apples oranges pears
# Data starts after the header column definition, blank lines ignored
1 2 3
4 5 6
"""
_format_name = "basic"
_description = "Basic table with custom delimiters"
_io_registry_format_aliases = ["ascii"]
header_class = BasicHeader
data_class = BasicData
| Basic |
python | python__mypy | mypy/report.py | {
"start": 22654,
"end": 26673
} | class ____(AbstractReporter):
"""Reporter for generating Cobertura compliant XML."""
def __init__(self, reports: Reports, output_dir: str) -> None:
super().__init__(reports, output_dir)
self.root = etree.Element("coverage", timestamp=str(int(time.time())), version=__version__)
self.doc = etree.ElementTree(self.root)
self.root_package = CoberturaPackage(".")
def on_file(
self,
tree: MypyFile,
modules: dict[str, MypyFile],
type_map: dict[Expression, Type],
options: Options,
) -> None:
path = os.path.relpath(tree.path)
visitor = stats.StatisticsVisitor(
inferred=True,
filename=tree.fullname,
modules=modules,
typemap=type_map,
all_nodes=True,
)
tree.accept(visitor)
class_name = os.path.basename(path)
file_info = FileInfo(path, tree._fullname)
class_element = etree.Element("class", complexity="1.0", filename=path, name=class_name)
etree.SubElement(class_element, "methods")
lines_element = etree.SubElement(class_element, "lines")
class_lines_covered = 0
class_total_lines = 0
for lineno, _ in iterate_python_lines(path):
status = visitor.line_map.get(lineno, stats.TYPE_EMPTY)
hits = 0
branch = False
if status == stats.TYPE_EMPTY:
continue
class_total_lines += 1
if status != stats.TYPE_ANY:
class_lines_covered += 1
hits = 1
if status == stats.TYPE_IMPRECISE:
branch = True
file_info.counts[status] += 1
line_element = etree.SubElement(
lines_element,
"line",
branch=str(branch).lower(),
hits=str(hits),
number=str(lineno),
precision=stats.precision_names[status],
)
if branch:
line_element.attrib["condition-coverage"] = "50% (1/2)"
class_element.attrib["branch-rate"] = "0"
class_element.attrib["line-rate"] = get_line_rate(class_lines_covered, class_total_lines)
# parent_module is set to whichever module contains this file. For most files, we want
# to simply strip the last element off of the module. But for __init__.py files,
# the module == the parent module.
parent_module = file_info.module.rsplit(".", 1)[0]
if file_info.name.endswith("__init__.py"):
parent_module = file_info.module
if parent_module not in self.root_package.packages:
self.root_package.packages[parent_module] = CoberturaPackage(parent_module)
current_package = self.root_package.packages[parent_module]
packages_to_update = [self.root_package, current_package]
for package in packages_to_update:
package.total_lines += class_total_lines
package.covered_lines += class_lines_covered
current_package.classes[class_name] = class_element
def on_finish(self) -> None:
self.root.attrib["line-rate"] = get_line_rate(
self.root_package.covered_lines, self.root_package.total_lines
)
self.root.attrib["branch-rate"] = "0"
self.root.attrib["lines-covered"] = str(self.root_package.covered_lines)
self.root.attrib["lines-valid"] = str(self.root_package.total_lines)
sources = etree.SubElement(self.root, "sources")
source_element = etree.SubElement(sources, "source")
source_element.text = os.getcwd()
self.root_package.add_packages(self.root)
out_path = os.path.join(self.output_dir, "cobertura.xml")
self.doc.write(out_path, encoding="utf-8", pretty_print=True)
print("Generated Cobertura report:", os.path.abspath(out_path))
register_reporter("cobertura-xml", CoberturaXmlReporter, needs_lxml=True)
| CoberturaXmlReporter |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/ruff/RUF049.py | {
"start": 191,
"end": 225
} | class ____(Flag): ...
@dataclass()
| E |
python | django__django | tests/conditional_processing/tests.py | {
"start": 653,
"end": 12863
} | class ____(SimpleTestCase):
def assertFullResponse(self, response, check_last_modified=True, check_etag=True):
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, FULL_RESPONSE.encode())
if response.request["REQUEST_METHOD"] in ("GET", "HEAD"):
if check_last_modified:
self.assertEqual(response.headers["Last-Modified"], LAST_MODIFIED_STR)
if check_etag:
self.assertEqual(response.headers["ETag"], ETAG)
else:
self.assertNotIn("Last-Modified", response.headers)
self.assertNotIn("ETag", response.headers)
def assertNotModified(self, response):
self.assertEqual(response.status_code, 304)
self.assertEqual(response.content, b"")
def test_without_conditions(self):
response = self.client.get("/condition/")
self.assertFullResponse(response)
def test_if_modified_since(self):
self.client.defaults["HTTP_IF_MODIFIED_SINCE"] = LAST_MODIFIED_STR
response = self.client.get("/condition/")
self.assertNotModified(response)
response = self.client.put("/condition/")
self.assertFullResponse(response)
self.client.defaults["HTTP_IF_MODIFIED_SINCE"] = LAST_MODIFIED_NEWER_STR
response = self.client.get("/condition/")
self.assertNotModified(response)
response = self.client.put("/condition/")
self.assertFullResponse(response)
self.client.defaults["HTTP_IF_MODIFIED_SINCE"] = LAST_MODIFIED_INVALID_STR
response = self.client.get("/condition/")
self.assertFullResponse(response)
self.client.defaults["HTTP_IF_MODIFIED_SINCE"] = EXPIRED_LAST_MODIFIED_STR
response = self.client.get("/condition/")
self.assertFullResponse(response)
def test_if_unmodified_since(self):
self.client.defaults["HTTP_IF_UNMODIFIED_SINCE"] = LAST_MODIFIED_STR
response = self.client.get("/condition/")
self.assertFullResponse(response)
self.client.defaults["HTTP_IF_UNMODIFIED_SINCE"] = LAST_MODIFIED_NEWER_STR
response = self.client.get("/condition/")
self.assertFullResponse(response)
self.client.defaults["HTTP_IF_UNMODIFIED_SINCE"] = LAST_MODIFIED_INVALID_STR
response = self.client.get("/condition/")
self.assertFullResponse(response)
self.client.defaults["HTTP_IF_UNMODIFIED_SINCE"] = EXPIRED_LAST_MODIFIED_STR
response = self.client.get("/condition/")
self.assertEqual(response.status_code, 412)
def test_if_none_match(self):
self.client.defaults["HTTP_IF_NONE_MATCH"] = ETAG
response = self.client.get("/condition/")
self.assertNotModified(response)
response = self.client.put("/condition/")
self.assertEqual(response.status_code, 412)
self.client.defaults["HTTP_IF_NONE_MATCH"] = EXPIRED_ETAG
response = self.client.get("/condition/")
self.assertFullResponse(response)
# Several etags in If-None-Match is a bit exotic but why not?
self.client.defaults["HTTP_IF_NONE_MATCH"] = "%s, %s" % (ETAG, EXPIRED_ETAG)
response = self.client.get("/condition/")
self.assertNotModified(response)
def test_weak_if_none_match(self):
"""
If-None-Match comparisons use weak matching, so weak and strong ETags
with the same value result in a 304 response.
"""
self.client.defaults["HTTP_IF_NONE_MATCH"] = ETAG
response = self.client.get("/condition/weak_etag/")
self.assertNotModified(response)
response = self.client.put("/condition/weak_etag/")
self.assertEqual(response.status_code, 412)
self.client.defaults["HTTP_IF_NONE_MATCH"] = WEAK_ETAG
response = self.client.get("/condition/weak_etag/")
self.assertNotModified(response)
response = self.client.put("/condition/weak_etag/")
self.assertEqual(response.status_code, 412)
response = self.client.get("/condition/")
self.assertNotModified(response)
response = self.client.put("/condition/")
self.assertEqual(response.status_code, 412)
def test_all_if_none_match(self):
self.client.defaults["HTTP_IF_NONE_MATCH"] = "*"
response = self.client.get("/condition/")
self.assertNotModified(response)
response = self.client.put("/condition/")
self.assertEqual(response.status_code, 412)
response = self.client.get("/condition/no_etag/")
self.assertFullResponse(response, check_last_modified=False, check_etag=False)
def test_if_match(self):
self.client.defaults["HTTP_IF_MATCH"] = ETAG
response = self.client.put("/condition/")
self.assertFullResponse(response)
self.client.defaults["HTTP_IF_MATCH"] = EXPIRED_ETAG
response = self.client.put("/condition/")
self.assertEqual(response.status_code, 412)
def test_weak_if_match(self):
"""
If-Match comparisons use strong matching, so any comparison involving
a weak ETag return a 412 response.
"""
self.client.defaults["HTTP_IF_MATCH"] = ETAG
response = self.client.get("/condition/weak_etag/")
self.assertEqual(response.status_code, 412)
self.client.defaults["HTTP_IF_MATCH"] = WEAK_ETAG
response = self.client.get("/condition/weak_etag/")
self.assertEqual(response.status_code, 412)
response = self.client.get("/condition/")
self.assertEqual(response.status_code, 412)
def test_all_if_match(self):
self.client.defaults["HTTP_IF_MATCH"] = "*"
response = self.client.get("/condition/")
self.assertFullResponse(response)
response = self.client.get("/condition/no_etag/")
self.assertEqual(response.status_code, 412)
def test_both_headers(self):
# See RFC 9110 Section 13.2.2.
self.client.defaults["HTTP_IF_MODIFIED_SINCE"] = LAST_MODIFIED_STR
self.client.defaults["HTTP_IF_NONE_MATCH"] = ETAG
response = self.client.get("/condition/")
self.assertNotModified(response)
self.client.defaults["HTTP_IF_MODIFIED_SINCE"] = EXPIRED_LAST_MODIFIED_STR
self.client.defaults["HTTP_IF_NONE_MATCH"] = ETAG
response = self.client.get("/condition/")
self.assertNotModified(response)
self.client.defaults["HTTP_IF_MODIFIED_SINCE"] = LAST_MODIFIED_STR
self.client.defaults["HTTP_IF_NONE_MATCH"] = EXPIRED_ETAG
response = self.client.get("/condition/")
self.assertFullResponse(response)
self.client.defaults["HTTP_IF_MODIFIED_SINCE"] = EXPIRED_LAST_MODIFIED_STR
self.client.defaults["HTTP_IF_NONE_MATCH"] = EXPIRED_ETAG
response = self.client.get("/condition/")
self.assertFullResponse(response)
def test_both_headers_2(self):
self.client.defaults["HTTP_IF_UNMODIFIED_SINCE"] = LAST_MODIFIED_STR
self.client.defaults["HTTP_IF_MATCH"] = ETAG
response = self.client.get("/condition/")
self.assertFullResponse(response)
self.client.defaults["HTTP_IF_UNMODIFIED_SINCE"] = EXPIRED_LAST_MODIFIED_STR
self.client.defaults["HTTP_IF_MATCH"] = ETAG
response = self.client.get("/condition/")
self.assertFullResponse(response)
self.client.defaults["HTTP_IF_UNMODIFIED_SINCE"] = EXPIRED_LAST_MODIFIED_STR
self.client.defaults["HTTP_IF_MATCH"] = EXPIRED_ETAG
response = self.client.get("/condition/")
self.assertEqual(response.status_code, 412)
self.client.defaults["HTTP_IF_UNMODIFIED_SINCE"] = LAST_MODIFIED_STR
self.client.defaults["HTTP_IF_MATCH"] = EXPIRED_ETAG
response = self.client.get("/condition/")
self.assertEqual(response.status_code, 412)
def test_single_condition_1(self):
self.client.defaults["HTTP_IF_MODIFIED_SINCE"] = LAST_MODIFIED_STR
response = self.client.get("/condition/last_modified/")
self.assertNotModified(response)
response = self.client.get("/condition/etag/")
self.assertFullResponse(response, check_last_modified=False)
def test_single_condition_2(self):
self.client.defaults["HTTP_IF_NONE_MATCH"] = ETAG
response = self.client.get("/condition/etag/")
self.assertNotModified(response)
response = self.client.get("/condition/last_modified/")
self.assertFullResponse(response, check_etag=False)
def test_single_condition_3(self):
self.client.defaults["HTTP_IF_MODIFIED_SINCE"] = EXPIRED_LAST_MODIFIED_STR
response = self.client.get("/condition/last_modified/")
self.assertFullResponse(response, check_etag=False)
def test_single_condition_4(self):
self.client.defaults["HTTP_IF_NONE_MATCH"] = EXPIRED_ETAG
response = self.client.get("/condition/etag/")
self.assertFullResponse(response, check_last_modified=False)
def test_single_condition_5(self):
self.client.defaults["HTTP_IF_MODIFIED_SINCE"] = LAST_MODIFIED_STR
response = self.client.get("/condition/last_modified2/")
self.assertNotModified(response)
response = self.client.get("/condition/etag2/")
self.assertFullResponse(response, check_last_modified=False)
def test_single_condition_6(self):
self.client.defaults["HTTP_IF_NONE_MATCH"] = ETAG
response = self.client.get("/condition/etag2/")
self.assertNotModified(response)
response = self.client.get("/condition/last_modified2/")
self.assertFullResponse(response, check_etag=False)
def test_single_condition_7(self):
self.client.defaults["HTTP_IF_UNMODIFIED_SINCE"] = EXPIRED_LAST_MODIFIED_STR
response = self.client.get("/condition/last_modified/")
self.assertEqual(response.status_code, 412)
response = self.client.get("/condition/etag/")
self.assertEqual(response.status_code, 412)
def test_single_condition_8(self):
self.client.defaults["HTTP_IF_UNMODIFIED_SINCE"] = LAST_MODIFIED_STR
response = self.client.get("/condition/last_modified/")
self.assertFullResponse(response, check_etag=False)
def test_single_condition_9(self):
self.client.defaults["HTTP_IF_UNMODIFIED_SINCE"] = EXPIRED_LAST_MODIFIED_STR
response = self.client.get("/condition/last_modified2/")
self.assertEqual(response.status_code, 412)
response = self.client.get("/condition/etag2/")
self.assertEqual(response.status_code, 412)
def test_single_condition_head(self):
self.client.defaults["HTTP_IF_MODIFIED_SINCE"] = LAST_MODIFIED_STR
response = self.client.head("/condition/")
self.assertNotModified(response)
def test_unquoted(self):
"""
The same quoted ETag should be set on the header regardless of whether
etag_func() in condition() returns a quoted or an unquoted ETag.
"""
response_quoted = self.client.get("/condition/etag/")
response_unquoted = self.client.get("/condition/unquoted_etag/")
self.assertEqual(response_quoted["ETag"], response_unquoted["ETag"])
# It's possible that the matching algorithm could use the wrong value even
# if the ETag header is set correctly correctly (as tested by
# test_unquoted()), so check that the unquoted value is matched.
def test_unquoted_if_none_match(self):
self.client.defaults["HTTP_IF_NONE_MATCH"] = ETAG
response = self.client.get("/condition/unquoted_etag/")
self.assertNotModified(response)
response = self.client.put("/condition/unquoted_etag/")
self.assertEqual(response.status_code, 412)
self.client.defaults["HTTP_IF_NONE_MATCH"] = EXPIRED_ETAG
response = self.client.get("/condition/unquoted_etag/")
self.assertFullResponse(response, check_last_modified=False)
def test_invalid_etag(self):
self.client.defaults["HTTP_IF_NONE_MATCH"] = '"""'
response = self.client.get("/condition/etag/")
self.assertFullResponse(response, check_last_modified=False)
| ConditionalGet |
python | lepture__mistune | src/mistune/core.py | {
"start": 3657,
"end": 6047
} | class ____(Generic[ST]):
sc_flag: "re._FlagsType" = re.M
state_cls: Type[ST]
SPECIFICATION: ClassVar[Dict[str, str]] = {}
DEFAULT_RULES: ClassVar[Iterable[str]] = []
def __init__(self) -> None:
self.specification = self.SPECIFICATION.copy()
self.rules = list(self.DEFAULT_RULES)
self._methods: Dict[
str,
Callable[[Match[str], ST], Optional[int]],
] = {}
self.__sc: Dict[str, Pattern[str]] = {}
def compile_sc(self, rules: Optional[List[str]] = None) -> Pattern[str]:
if rules is None:
key = "$"
rules = self.rules
else:
key = "|".join(rules)
sc = self.__sc.get(key)
if sc:
return sc
regex = "|".join(r"(?P<%s>%s)" % (k, self.specification[k]) for k in rules)
sc = re.compile(regex, self.sc_flag)
self.__sc[key] = sc
return sc
def register(
self,
name: str,
pattern: Union[str, None],
func: Callable[[Self, Match[str], ST], Optional[int]],
before: Optional[str] = None,
) -> None:
"""Register a new rule to parse the token. This method is usually used to
create a new plugin.
:param name: name of the new grammar
:param pattern: regex pattern in string
:param func: the parsing function
:param before: insert this rule before a built-in rule
"""
self._methods[name] = lambda m, state: func(self, m, state)
if pattern:
self.specification[name] = pattern
if name not in self.rules:
self.insert_rule(self.rules, name, before=before)
def register_rule(self, name: str, pattern: str, func: Any) -> None:
raise DeprecationWarning("This plugin is not compatible with mistune v3.")
@staticmethod
def insert_rule(rules: List[str], name: str, before: Optional[str] = None) -> None:
if before:
try:
index = rules.index(before)
rules.insert(index, name)
except ValueError:
rules.append(name)
else:
rules.append(name)
def parse_method(self, m: Match[str], state: ST) -> Optional[int]:
lastgroup = m.lastgroup
assert lastgroup
func = self._methods[lastgroup]
return func(m, state)
| Parser |
python | mwaskom__seaborn | seaborn/_core/properties.py | {
"start": 11226,
"end": 11345
} | class ____(IntervalProperty):
"""Thickness of lines that define point glyphs."""
_default_range = .25, 2.5
| Stroke |
python | zarr-developers__zarr-python | src/zarr/core/common.py | {
"start": 1860,
"end": 7602
} | class ____(TypedDict, Generic[TName, TConfig]):
"""
A typed dictionary representing an object with a name and configuration, where the configuration
is a mapping of string keys to values, e.g. another typed dictionary or a JSON object.
This class is generic with two type parameters: the type of the name (``TName``) and the type of
the configuration (``TConfig``).
"""
name: ReadOnly[TName]
"""The name of the object."""
configuration: ReadOnly[TConfig]
"""The configuration of the object."""
def product(tup: tuple[int, ...]) -> int:
return functools.reduce(operator.mul, tup, 1)
def ceildiv(a: float, b: float) -> int:
if a == 0:
return 0
return math.ceil(a / b)
T = TypeVar("T", bound=tuple[Any, ...])
V = TypeVar("V")
async def concurrent_map(
items: Iterable[T],
func: Callable[..., Awaitable[V]],
limit: int | None = None,
) -> list[V]:
if limit is None:
return await asyncio.gather(*list(starmap(func, items)))
else:
sem = asyncio.Semaphore(limit)
async def run(item: tuple[Any]) -> V:
async with sem:
return await func(*item)
return await asyncio.gather(*[asyncio.ensure_future(run(item)) for item in items])
E = TypeVar("E", bound=Enum)
def enum_names(enum: type[E]) -> Iterator[str]:
for item in enum:
yield item.name
def parse_enum(data: object, cls: type[E]) -> E:
if isinstance(data, cls):
return data
if not isinstance(data, str):
raise TypeError(f"Expected str, got {type(data)}")
if data in enum_names(cls):
return cls(data)
raise ValueError(f"Value must be one of {list(enum_names(cls))!r}. Got {data} instead.")
def parse_name(data: JSON, expected: str | None = None) -> str:
if isinstance(data, str):
if expected is None or data == expected:
return data
raise ValueError(f"Expected '{expected}'. Got {data} instead.")
else:
raise TypeError(f"Expected a string, got an instance of {type(data)}.")
def parse_configuration(data: JSON) -> JSON:
if not isinstance(data, dict):
raise TypeError(f"Expected dict, got {type(data)}")
return data
@overload
def parse_named_configuration(
data: JSON | NamedConfig[str, Any], expected_name: str | None = None
) -> tuple[str, dict[str, JSON]]: ...
@overload
def parse_named_configuration(
data: JSON | NamedConfig[str, Any],
expected_name: str | None = None,
*,
require_configuration: bool = True,
) -> tuple[str, dict[str, JSON] | None]: ...
def parse_named_configuration(
data: JSON | NamedConfig[str, Any],
expected_name: str | None = None,
*,
require_configuration: bool = True,
) -> tuple[str, JSON | None]:
if not isinstance(data, dict):
raise TypeError(f"Expected dict, got {type(data)}")
if "name" not in data:
raise ValueError(f"Named configuration does not have a 'name' key. Got {data}.")
name_parsed = parse_name(data["name"], expected_name)
if "configuration" in data:
configuration_parsed = parse_configuration(data["configuration"])
elif require_configuration:
raise ValueError(f"Named configuration does not have a 'configuration' key. Got {data}.")
else:
configuration_parsed = None
return name_parsed, configuration_parsed
def parse_shapelike(data: ShapeLike) -> tuple[int, ...]:
if isinstance(data, int):
if data < 0:
raise ValueError(f"Expected a non-negative integer. Got {data} instead")
return (data,)
try:
data_tuple = tuple(data)
except TypeError as e:
msg = f"Expected an integer or an iterable of integers. Got {data} instead."
raise TypeError(msg) from e
if not all(isinstance(v, int) for v in data_tuple):
msg = f"Expected an iterable of integers. Got {data} instead."
raise TypeError(msg)
if not all(v > -1 for v in data_tuple):
msg = f"Expected all values to be non-negative. Got {data} instead."
raise ValueError(msg)
return data_tuple
def parse_fill_value(data: Any) -> Any:
# todo: real validation
return data
def parse_order(data: Any) -> Literal["C", "F"]:
if data in ("C", "F"):
return cast("Literal['C', 'F']", data)
raise ValueError(f"Expected one of ('C', 'F'), got {data} instead.")
def parse_bool(data: Any) -> bool:
if isinstance(data, bool):
return data
raise ValueError(f"Expected bool, got {data} instead.")
def _warn_write_empty_chunks_kwarg() -> None:
# TODO: link to docs page on array configuration in this message
msg = (
"The `write_empty_chunks` keyword argument is deprecated and will be removed in future versions. "
"To control whether empty chunks are written to storage, either use the `config` keyword "
"argument, as in `config={'write_empty_chunks': True}`,"
"or change the global 'array.write_empty_chunks' configuration variable."
)
warnings.warn(msg, ZarrRuntimeWarning, stacklevel=2)
def _warn_order_kwarg() -> None:
# TODO: link to docs page on array configuration in this message
msg = (
"The `order` keyword argument has no effect for Zarr format 3 arrays. "
"To control the memory layout of the array, either use the `config` keyword "
"argument, as in `config={'order': 'C'}`,"
"or change the global 'array.order' configuration variable."
)
warnings.warn(msg, ZarrRuntimeWarning, stacklevel=2)
def _default_zarr_format() -> ZarrFormat:
"""Return the default zarr_version"""
return cast("ZarrFormat", int(zarr_config.get("default_zarr_format", 3)))
| NamedRequiredConfig |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.