language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | bokeh__bokeh | src/bokeh/models/widgets/pickers.py | {
"start": 1952,
"end": 2468
} | class ____(InputWidget):
""" Base class for various kinds of picker widgets. """
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
position = Enum(CalendarPosition, default="auto", help="""
Where the calendar is rendered relative to the input when ``inline`` is False.
""")
inline = Bool(default=False, help="""
Whether the calendar sholud be displayed inline.
""")
@abstract
| PickerBase |
python | google__pytype | pytype/pytd/pytd.py | {
"start": 18523,
"end": 18591
} | class ____(Type):
value: int | str | bool | TypeU | Constant
| Literal |
python | imageio__imageio | imageio/plugins/_dicom.py | {
"start": 3868,
"end": 22347
} | class ____(object):
"""
This class provides reading of pixel data from DICOM files. It is
focussed on getting the pixel data, not the meta info.
To use, first create an instance of this class (giving it
a file object or filename). Next use the info attribute to
get a dict of the meta data. The loading of pixel data is
deferred until get_numpy_array() is called.
Comparison with Pydicom
-----------------------
This code focusses on getting the pixel data out, which allows some
shortcuts, resulting in the code being much smaller.
Since the processing of data elements is much cheaper (it skips a lot
of tags), this code is about 3x faster than pydicom (except for the
deflated DICOM files).
This class does borrow some code (and ideas) from the pydicom
project, and (to the best of our knowledge) has the same limitations
as pydicom with regard to the type of files that it can handle.
Limitations
-----------
For more advanced DICOM processing, please check out pydicom.
* Only a predefined subset of data elements (meta information) is read.
* This is a reader; it can not write DICOM files.
* (just like pydicom) it can handle none of the compressed DICOM
formats except for "Deflated Explicit VR Little Endian"
(1.2.840.10008.1.2.1.99).
"""
def __init__(self, file):
# Open file if filename given
if isinstance(file, str):
self._filename = file
self._file = open(file, "rb")
else:
self._filename = "<unknown file>"
self._file = file
# Init variable to store position and size of pixel data
self._pixel_data_loc = None
# The meta header is always explicit and little endian
self.is_implicit_VR = False
self.is_little_endian = True
self._unpackPrefix = "<"
# Dict to store data elements of interest in
self._info = {}
# VR Conversion
self._converters = {
# Numbers
"US": lambda x: self._unpack("H", x),
"UL": lambda x: self._unpack("L", x),
# Numbers encoded as strings
"DS": lambda x: self._splitValues(x, float, "\\"),
"IS": lambda x: self._splitValues(x, int, "\\"),
# strings
"AS": lambda x: x.decode("ascii", "ignore").strip("\x00"),
"DA": lambda x: x.decode("ascii", "ignore").strip("\x00"),
"TM": lambda x: x.decode("ascii", "ignore").strip("\x00"),
"UI": lambda x: x.decode("ascii", "ignore").strip("\x00"),
"LO": lambda x: x.decode("utf-8", "ignore").strip("\x00").rstrip(),
"CS": lambda x: self._splitValues(x, float, "\\"),
"PN": lambda x: x.decode("utf-8", "ignore").strip("\x00").rstrip(),
}
# Initiate reading
self._read()
@property
def info(self):
return self._info
def _splitValues(self, x, type, splitter):
s = x.decode("ascii").strip("\x00")
try:
if splitter in s:
return tuple([type(v) for v in s.split(splitter) if v.strip()])
else:
return type(s)
except ValueError:
return s
def _unpack(self, fmt, value):
return struct.unpack(self._unpackPrefix + fmt, value)[0]
# Really only so we need minimal changes to _pixel_data_numpy
def __iter__(self):
return iter(self._info.keys())
def __getattr__(self, key):
info = object.__getattribute__(self, "_info")
if key in info:
return info[key]
return object.__getattribute__(self, key) # pragma: no cover
def _read(self):
f = self._file
# Check prefix after peamble
f.seek(128)
if f.read(4) != b"DICM":
raise NotADicomFile("Not a valid DICOM file.")
# Read
self._read_header()
self._read_data_elements()
self._get_shape_and_sampling()
# Close if done, reopen if necessary to read pixel data
if os.path.isfile(self._filename):
self._file.close()
self._file = None
def _readDataElement(self):
f = self._file
# Get group and element
group = self._unpack("H", f.read(2))
element = self._unpack("H", f.read(2))
# Get value length
if self.is_implicit_VR:
vl = self._unpack("I", f.read(4))
else:
vr = f.read(2)
if vr in (b"OB", b"OW", b"SQ", b"UN"):
reserved = f.read(2) # noqa
vl = self._unpack("I", f.read(4))
else:
vl = self._unpack("H", f.read(2))
# Get value
if group == 0x7FE0 and element == 0x0010:
here = f.tell()
self._pixel_data_loc = here, vl
f.seek(here + vl)
return group, element, b"Deferred loading of pixel data"
else:
if vl == 0xFFFFFFFF:
value = self._read_undefined_length_value()
else:
value = f.read(vl)
return group, element, value
def _read_undefined_length_value(self, read_size=128):
"""Copied (in compacted form) from PyDicom
Copyright Darcy Mason.
"""
fp = self._file
# data_start = fp.tell()
search_rewind = 3
bytes_to_find = struct.pack(
self._unpackPrefix + "HH", SequenceDelimiterTag[0], SequenceDelimiterTag[1]
)
found = False
value_chunks = []
while not found:
chunk_start = fp.tell()
bytes_read = fp.read(read_size)
if len(bytes_read) < read_size:
# try again,
# if still don't get required amount, this is last block
new_bytes = fp.read(read_size - len(bytes_read))
bytes_read += new_bytes
if len(bytes_read) < read_size:
raise EOFError(
"End of file reached before sequence " "delimiter found."
)
index = bytes_read.find(bytes_to_find)
if index != -1:
found = True
value_chunks.append(bytes_read[:index])
fp.seek(chunk_start + index + 4) # rewind to end of delimiter
length = fp.read(4)
if length != b"\0\0\0\0":
logger.warning(
"Expected 4 zero bytes after undefined length " "delimiter"
)
else:
fp.seek(fp.tell() - search_rewind) # rewind a bit
# accumulate the bytes read (not including the rewind)
value_chunks.append(bytes_read[:-search_rewind])
# if get here then have found the byte string
return b"".join(value_chunks)
def _read_header(self):
f = self._file
TransferSyntaxUID = None
# Read all elements, store transferSyntax when we encounter it
try:
while True:
fp_save = f.tell()
# Get element
group, element, value = self._readDataElement()
if group == 0x02:
if group == 0x02 and element == 0x10:
TransferSyntaxUID = value.decode("ascii").strip("\x00")
else:
# No more group 2: rewind and break
# (don't trust group length)
f.seek(fp_save)
break
except (EOFError, struct.error): # pragma: no cover
raise RuntimeError("End of file reached while still in header.")
# Handle transfer syntax
self._info["TransferSyntaxUID"] = TransferSyntaxUID
#
if TransferSyntaxUID is None:
# Assume ExplicitVRLittleEndian
is_implicit_VR, is_little_endian = False, True
elif TransferSyntaxUID == "1.2.840.10008.1.2.1":
# ExplicitVRLittleEndian
is_implicit_VR, is_little_endian = False, True
elif TransferSyntaxUID == "1.2.840.10008.1.2.2":
# ExplicitVRBigEndian
is_implicit_VR, is_little_endian = False, False
elif TransferSyntaxUID == "1.2.840.10008.1.2":
# implicit VR little endian
is_implicit_VR, is_little_endian = True, True
elif TransferSyntaxUID == "1.2.840.10008.1.2.1.99":
# DeflatedExplicitVRLittleEndian:
is_implicit_VR, is_little_endian = False, True
self._inflate()
else:
# http://www.dicomlibrary.com/dicom/transfer-syntax/
t, extra_info = TransferSyntaxUID, ""
if "1.2.840.10008.1.2.4.50" <= t < "1.2.840.10008.1.2.4.99":
extra_info = " (JPEG)"
if "1.2.840.10008.1.2.4.90" <= t < "1.2.840.10008.1.2.4.99":
extra_info = " (JPEG 2000)"
if t == "1.2.840.10008.1.2.5":
extra_info = " (RLE)"
if t == "1.2.840.10008.1.2.6.1":
extra_info = " (RFC 2557)"
raise CompressedDicom(
"The dicom reader can only read files with "
"uncompressed image data - not %r%s. You "
"can try using dcmtk or gdcm to convert the "
"image." % (t, extra_info)
)
# From hereon, use implicit/explicit big/little endian
self.is_implicit_VR = is_implicit_VR
self.is_little_endian = is_little_endian
self._unpackPrefix = "><"[is_little_endian]
def _read_data_elements(self):
info = self._info
try:
while True:
# Get element
group, element, value = self._readDataElement()
# Is it a group we are interested in?
if group in GROUPS:
key = (group, element)
name, vr = MINIDICT.get(key, (None, None))
# Is it an element we are interested in?
if name:
# Store value
converter = self._converters.get(vr, lambda x: x)
info[name] = converter(value)
except (EOFError, struct.error):
pass # end of file ...
def get_numpy_array(self):
"""Get numpy arra for this DICOM file, with the correct shape,
and pixel values scaled appropriately.
"""
# Is there pixel data at all?
if "PixelData" not in self:
raise TypeError("No pixel data found in this dataset.")
# Load it now if it was not already loaded
if self._pixel_data_loc and len(self.PixelData) < 100:
# Reopen file?
close_file = False
if self._file is None:
close_file = True
self._file = open(self._filename, "rb")
# Read data
self._file.seek(self._pixel_data_loc[0])
if self._pixel_data_loc[1] == 0xFFFFFFFF:
value = self._read_undefined_length_value()
else:
value = self._file.read(self._pixel_data_loc[1])
# Close file
if close_file:
self._file.close()
self._file = None
# Overwrite
self._info["PixelData"] = value
# Get data
data = self._pixel_data_numpy()
data = self._apply_slope_and_offset(data)
# Remove data again to preserve memory
# Note that the data for the original file is loaded twice ...
self._info["PixelData"] = (
b"Data converted to numpy array, " + b"raw data removed to preserve memory"
)
return data
def _get_shape_and_sampling(self):
"""Get shape and sampling without actuall using the pixel data.
In this way, the user can get an idea what's inside without having
to load it.
"""
# Get shape (in the same way that pydicom does)
if "NumberOfFrames" in self and self.NumberOfFrames > 1:
if self.SamplesPerPixel > 1:
shape = (
self.SamplesPerPixel,
self.NumberOfFrames,
self.Rows,
self.Columns,
)
else:
shape = self.NumberOfFrames, self.Rows, self.Columns
elif "SamplesPerPixel" in self:
if self.SamplesPerPixel > 1:
if self.BitsAllocated == 8:
shape = self.SamplesPerPixel, self.Rows, self.Columns
else:
raise NotImplementedError(
"DICOM plugin only handles "
"SamplesPerPixel > 1 if Bits "
"Allocated = 8"
)
else:
shape = self.Rows, self.Columns
else:
raise RuntimeError(
"DICOM file has no SamplesPerPixel " "(perhaps this is a report?)"
)
# Try getting sampling between pixels
if "PixelSpacing" in self:
sampling = float(self.PixelSpacing[0]), float(self.PixelSpacing[1])
else:
sampling = 1.0, 1.0
if "SliceSpacing" in self:
sampling = (abs(self.SliceSpacing),) + sampling
# Ensure that sampling has as many elements as shape
sampling = (1.0,) * (len(shape) - len(sampling)) + sampling[-len(shape) :]
# Set shape and sampling
self._info["shape"] = shape
self._info["sampling"] = sampling
def _pixel_data_numpy(self):
"""Return a NumPy array of the pixel data."""
# Taken from pydicom
# Copyright (c) 2008-2012 Darcy Mason
if "PixelData" not in self:
raise TypeError("No pixel data found in this dataset.")
# determine the type used for the array
need_byteswap = self.is_little_endian != sys_is_little_endian
# Make NumPy format code, e.g. "uint16", "int32" etc
# from two pieces of info:
# self.PixelRepresentation -- 0 for unsigned, 1 for signed;
# self.BitsAllocated -- 8, 16, or 32
format_str = "%sint%d" % (
("u", "")[self.PixelRepresentation],
self.BitsAllocated,
)
try:
numpy_format = np.dtype(format_str)
except TypeError: # pragma: no cover
raise TypeError(
"Data type not understood by NumPy: format='%s', "
" PixelRepresentation=%d, BitsAllocated=%d"
% (numpy_format, self.PixelRepresentation, self.BitsAllocated)
)
# Have correct Numpy format, so create the NumPy array
arr = np.frombuffer(self.PixelData, numpy_format).copy()
# XXX byte swap - may later handle this in read_file!!?
if need_byteswap:
arr.byteswap(True) # True means swap in-place, don't make new copy
# Note the following reshape operations return a new *view* onto arr,
# but don't copy the data
arr = arr.reshape(*self._info["shape"])
return arr
def _apply_slope_and_offset(self, data):
"""
If RescaleSlope and RescaleIntercept are present in the data,
apply them. The data type of the data is changed if necessary.
"""
# Obtain slope and offset
slope, offset = 1, 0
needFloats, needApplySlopeOffset = False, False
if "RescaleSlope" in self:
needApplySlopeOffset = True
slope = self.RescaleSlope
if "RescaleIntercept" in self:
needApplySlopeOffset = True
offset = self.RescaleIntercept
if int(slope) != slope or int(offset) != offset:
needFloats = True
if not needFloats:
slope, offset = int(slope), int(offset)
# Apply slope and offset
if needApplySlopeOffset:
# Maybe we need to change the datatype?
if data.dtype in [np.float32, np.float64]:
pass
elif needFloats:
data = data.astype(np.float32)
else:
# Determine required range
minReq, maxReq = data.min().item(), data.max().item()
minReq = min([minReq, minReq * slope + offset, maxReq * slope + offset])
maxReq = max([maxReq, minReq * slope + offset, maxReq * slope + offset])
# Determine required datatype from that
dtype = None
if minReq < 0:
# Signed integer type
maxReq = max([-minReq, maxReq])
if maxReq < 2**7:
dtype = np.int8
elif maxReq < 2**15:
dtype = np.int16
elif maxReq < 2**31:
dtype = np.int32
else:
dtype = np.float32
else:
# Unsigned integer type
if maxReq < 2**8:
dtype = np.int8
elif maxReq < 2**16:
dtype = np.int16
elif maxReq < 2**32:
dtype = np.int32
else:
dtype = np.float32
# Change datatype
if dtype != data.dtype:
data = data.astype(dtype)
# Apply slope and offset
data *= slope
data += offset
# Done
return data
def _inflate(self):
# Taken from pydicom
# Copyright (c) 2008-2012 Darcy Mason
import zlib
from io import BytesIO
# See PS3.6-2008 A.5 (p 71) -- when written, the entire dataset
# following the file metadata was prepared the normal way,
# then "deflate" compression applied.
# All that is needed here is to decompress and then
# use as normal in a file-like object
zipped = self._file.read()
# -MAX_WBITS part is from comp.lang.python answer:
# groups.google.com/group/comp.lang.python/msg/e95b3b38a71e6799
unzipped = zlib.decompress(zipped, -zlib.MAX_WBITS)
self._file = BytesIO(unzipped) # a file-like object
| SimpleDicomReader |
python | gevent__gevent | src/greentest/3.14/test_smtplib.py | {
"start": 4889,
"end": 6949
} | class ____(GeneralTests, unittest.TestCase):
client = smtplib.LMTP
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), "test requires Unix domain socket")
def testUnixDomainSocketTimeoutDefault(self):
local_host = '/some/local/lmtp/delivery/program'
mock_socket.reply_with(b"220 Hello world")
try:
client = self.client(local_host, self.port)
finally:
mock_socket.setdefaulttimeout(None)
self.assertIsNone(client.sock.gettimeout())
client.close()
def testTimeoutZero(self):
super().testTimeoutZero()
local_host = '/some/local/lmtp/delivery/program'
with self.assertRaises(ValueError):
self.client(local_host, timeout=0)
# Test server thread using the specified SMTP server class
def debugging_server(serv, serv_evt, client_evt):
serv_evt.set()
try:
if hasattr(select, 'poll'):
poll_fun = asyncore.poll2
else:
poll_fun = asyncore.poll
n = 1000
while asyncore.socket_map and n > 0:
poll_fun(0.01, asyncore.socket_map)
# when the client conversation is finished, it will
# set client_evt, and it's then ok to kill the server
if client_evt.is_set():
serv.close()
break
n -= 1
except TimeoutError:
pass
finally:
if not client_evt.is_set():
# allow some time for the client to read the result
time.sleep(0.5)
serv.close()
asyncore.close_all()
serv_evt.set()
MSG_BEGIN = '---------- MESSAGE FOLLOWS ----------\n'
MSG_END = '------------ END MESSAGE ------------\n'
# NOTE: Some SMTP objects in the tests below are created with a non-default
# local_hostname argument to the constructor, since (on some systems) the FQDN
# lookup caused by the default local_hostname sometimes takes so long that the
# test server times out, causing the test to fail.
# Test behavior of smtpd.DebuggingServer
| LMTPGeneralTests |
python | pydantic__pydantic | pydantic-core/tests/test_errors.py | {
"start": 33009,
"end": 37672
} | class ____:
def __repr__(self):
raise RuntimeError('bad repr')
def test_error_on_repr(pydantic_version):
s = SchemaValidator(core_schema.int_schema())
with pytest.raises(ValidationError) as exc_info:
s.validate_python(BadRepr())
assert str(exc_info.value) == (
'1 validation error for int\n'
' Input should be a valid integer '
'[type=int_type, input_value=<unprintable BadRepr object>, input_type=BadRepr]'
+ (
f'\n For further information visit https://errors.pydantic.dev/{pydantic_version}/v/int_type'
if os.environ.get('PYDANTIC_ERRORS_INCLUDE_URL', '1') != 'false'
else ''
)
)
assert exc_info.value.errors(include_url=False) == [
{'type': 'int_type', 'loc': (), 'msg': 'Input should be a valid integer', 'input': IsInstance(BadRepr)}
]
assert exc_info.value.json(include_url=True) == IsJson(
[
{
'type': 'int_type',
'loc': [],
'msg': 'Input should be a valid integer',
'input': '<Unserializable BadRepr object>',
'url': f'https://errors.pydantic.dev/{pydantic_version}/v/int_type',
}
]
)
def test_error_json(pydantic_version):
s = SchemaValidator(core_schema.str_schema(min_length=3))
with pytest.raises(ValidationError) as exc_info:
s.validate_python('12')
# insert_assert(exc_info.value.errors(include_url=False))
assert exc_info.value.errors(include_url=False) == [
{
'type': 'string_too_short',
'loc': (),
'msg': 'String should have at least 3 characters',
'input': '12',
'ctx': {'min_length': 3},
}
]
assert exc_info.value.json() == IsJson(
[
{
'type': 'string_too_short',
'loc': [],
'msg': 'String should have at least 3 characters',
'input': '12',
'ctx': {'min_length': 3},
'url': f'https://errors.pydantic.dev/{pydantic_version}/v/string_too_short',
}
]
)
assert exc_info.value.json(include_url=False, include_context=False) == IsJson(
[{'type': 'string_too_short', 'loc': [], 'msg': 'String should have at least 3 characters', 'input': '12'}]
)
assert exc_info.value.json().startswith('[{"type":"string_too_short",')
assert exc_info.value.json(indent=2).startswith('[\n {\n "type": "string_too_short",')
def test_error_json_python_error(pydantic_version: str):
def raise_py_error(v: Any) -> Any:
try:
assert False
except AssertionError as e:
raise ValueError('Oh no!') from e
s = SchemaValidator(core_schema.no_info_plain_validator_function(raise_py_error))
with pytest.raises(ValidationError) as exc_info:
s.validate_python('anything')
exc = exc_info.value.errors()[0]['ctx']['error']
assert isinstance(exc, ValueError)
assert isinstance(exc.__context__, AssertionError)
# insert_assert(exc_info.value.errors(include_url=False))
assert exc_info.value.errors(include_url=False) == [
{
'type': 'value_error',
'loc': (),
'msg': 'Value error, Oh no!',
'input': 'anything',
'ctx': {'error': HasRepr(repr(ValueError('Oh no!')))},
}
]
assert exc_info.value.json() == IsJson(
[
{
'type': 'value_error',
'loc': [],
'msg': 'Value error, Oh no!',
'input': 'anything',
'ctx': {'error': 'Oh no!'},
'url': f'https://errors.pydantic.dev/{pydantic_version}/v/value_error',
}
]
)
assert exc_info.value.json(include_url=False, include_context=False) == IsJson(
[{'type': 'value_error', 'loc': [], 'msg': 'Value error, Oh no!', 'input': 'anything'}]
)
assert exc_info.value.json().startswith('[{"type":"value_error",')
assert exc_info.value.json(indent=2).startswith('[\n {\n "type": "value_error",')
def test_error_json_cycle():
s = SchemaValidator(core_schema.str_schema(min_length=3))
cycle = []
cycle.append(cycle)
msg = '[type=string_type, input_value=[[...]], input_type=list]'
with pytest.raises(ValidationError, match=re.escape(msg)) as exc_info:
s.validate_python(cycle)
assert exc_info.value.json(include_url=False) == IsJson(
[{'type': 'string_type', 'loc': [], 'msg': 'Input should be a valid string', 'input': ['...']}]
)
| BadRepr |
python | FactoryBoy__factory_boy | factory/fuzzy.py | {
"start": 3281,
"end": 3777
} | class ____(BaseFuzzyAttribute):
"""Random decimal within a given range."""
def __init__(self, low, high=None, precision=2):
if high is None:
high = low
low = 0.0
self.low = low
self.high = high
self.precision = precision
super().__init__()
def fuzz(self):
base = decimal.Decimal(str(random.randgen.uniform(self.low, self.high)))
return base.quantize(decimal.Decimal(10) ** -self.precision)
| FuzzyDecimal |
python | tox-dev__tox | src/tox/execute/local_sub_process/__init__.py | {
"start": 6808,
"end": 14584
} | class ____(ExecuteInstance):
def __init__(
self,
request: ExecuteRequest,
options: ExecuteOptions,
out: SyncWrite,
err: SyncWrite,
on_exit_drain: bool = True, # noqa: FBT001, FBT002
) -> None:
super().__init__(request, options, out, err)
self.process: Popen[bytes] | None = None
self._cmd: list[str] | None = None
self._read_stderr: ReadViaThread | None = None
self._read_stdout: ReadViaThread | None = None
self._on_exit_drain = on_exit_drain
@property
def cmd(self) -> Sequence[str]:
if self._cmd is None:
base = self.request.cmd[0]
executable = shutil.which(base, path=self.request.env["PATH"])
if executable is None:
cmd = self.request.cmd # if failed to find leave as it is
else:
if self.request.allow is not None:
for allow in self.request.allow:
# 1. allow matches just the original name of the executable
# 2. allow matches the entire resolved path
if fnmatch.fnmatch(self.request.cmd[0], allow) or fnmatch.fnmatch(executable, allow):
break
else:
msg = f"{base} (resolves to {executable})" if base == executable else base
msg = f"{msg} is not allowed, use allowlist_externals to allow it"
raise Fail(msg)
cmd = [executable]
if sys.platform != "win32" and self.request.env.get("TOX_LIMITED_SHEBANG", "").strip():
shebang_line = shebang(executable)
if shebang_line:
cmd = [*shebang_line, executable]
cmd.extend(self.request.cmd[1:])
self._cmd = cmd
return self._cmd
def __enter__(self) -> ExecuteStatus:
# adjust sub-process terminal size
columns, lines = shutil.get_terminal_size(fallback=(-1, -1))
if columns != -1: # pragma: no branch
self.request.env.setdefault("COLUMNS", str(columns))
if lines != -1: # pragma: no branch
self.request.env.setdefault("LINES", str(lines))
stdout, stderr = self.get_stream_file_no("stdout"), self.get_stream_file_no("stderr")
try:
self.process = process = Popen(
self.cmd,
stdout=next(stdout),
stderr=next(stderr),
stdin={StdinSource.USER: None, StdinSource.OFF: DEVNULL, StdinSource.API: PIPE}[self.request.stdin],
cwd=str(self.request.cwd),
env=self.request.env,
)
except OSError as exception:
# We log a nice error message to avout returning opaque error codes,
# like exit code 2 (filenotfound).
logging.error("Exception running subprocess %s", exception) # noqa: TRY400
return LocalSubprocessExecuteFailedStatus(self.options, self._out, self._err, exception.errno)
status = LocalSubprocessExecuteStatus(self.options, self._out, self._err, process)
drain, pid = self._on_exit_drain, self.process.pid
self._read_stderr = ReadViaThread(stderr.send(process), self.err_handler, name=f"err-{pid}", drain=drain)
self._read_stderr.__enter__()
self._read_stdout = ReadViaThread(stdout.send(process), self.out_handler, name=f"out-{pid}", drain=drain)
self._read_stdout.__enter__()
if sys.platform == "win32": # explicit check for mypy: # pragma: win32 cover
process.stderr.read = self._read_stderr._drain_stream # type: ignore[assignment,union-attr] # noqa: SLF001
process.stdout.read = self._read_stdout._drain_stream # type: ignore[assignment,union-attr] # noqa: SLF001
return status
def __exit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: TracebackType | None,
) -> None:
if self._read_stderr is not None:
self._read_stderr.__exit__(exc_type, exc_val, exc_tb)
if self._read_stdout is not None:
self._read_stdout.__exit__(exc_type, exc_val, exc_tb)
if self.process is not None: # cleanup the file handlers
for stream in (self.process.stdout, self.process.stderr, self.process.stdin):
if stream is not None and not getattr(stream, "closed", False):
try:
stream.close()
except OSError as exc: # pragma: no cover
logging.warning("error while trying to close %r with %r", stream, exc) # pragma: no cover
@staticmethod
def get_stream_file_no(key: str) -> Generator[int, Popen[bytes], None]:
allocated_pty = _pty(key)
if allocated_pty is not None:
main_fd, child_fd = allocated_pty
yield child_fd
os.close(child_fd) # close the child process pipe
yield main_fd
else:
process = yield PIPE
stream = getattr(process, key)
if sys.platform == "win32": # explicit check for mypy # pragma: win32 cover
yield stream.handle
else:
yield stream.name
def set_out_err(self, out: SyncWrite, err: SyncWrite) -> tuple[SyncWrite, SyncWrite]:
prev = self._out, self._err
if self._read_stdout is not None: # pragma: no branch
self._read_stdout.handler = out.handler
if self._read_stderr is not None: # pragma: no branch
self._read_stderr.handler = err.handler
return prev
def _pty(key: str) -> tuple[int, int] | None:
"""
Allocate a virtual terminal (pty) for a subprocess.
A virtual terminal allows a process to perform syscalls that fetch attributes related to the tty,
for example to determine whether to use colored output or enter interactive mode.
The termios attributes of the controlling terminal stream will be copied to the allocated pty.
:param key: The stream to copy attributes from. Either "stdout" or "stderr".
:return: (main_fd, child_fd) of an allocated pty; or None on error or if unsupported (win32).
"""
if sys.platform == "win32": # explicit check for mypy # pragma: win32 cover
return None
stream: io.TextIOWrapper = getattr(sys, key)
# when our current stream is a tty, emulate pty for the child
# to allow host streams traits to be inherited
if not stream.isatty():
return None
try:
import fcntl # noqa: PLC0415
import pty # noqa: PLC0415
import struct # noqa: PLC0415
import termios # noqa: PLC0415
except ImportError: # pragma: no cover
return None # cannot proceed on platforms without pty support
try:
main, child = pty.openpty()
except OSError: # could not open a tty
return None # pragma: no cover
try:
mode = termios.tcgetattr(stream)
termios.tcsetattr(child, termios.TCSANOW, mode)
except (termios.error, OSError): # could not inherit traits
return None # pragma: no cover
# adjust sub-process terminal size
columns, lines = shutil.get_terminal_size(fallback=(-1, -1))
if columns != -1 and lines != -1:
size = struct.pack("HHHH", lines, columns, 0, 0)
fcntl.ioctl(child, termios.TIOCSWINSZ, size)
return main, child
__all__ = (
"SIG_INTERRUPT",
"LocalSubProcessExecuteInstance",
"LocalSubProcessExecutor",
"LocalSubprocessExecuteFailedStatus",
"LocalSubprocessExecuteStatus",
)
| LocalSubProcessExecuteInstance |
python | jazzband__django-oauth-toolkit | oauth2_provider/views/oidc.py | {
"start": 4784,
"end": 5930
} | class ____(OIDCOnlyMixin, View):
"""
View used to show oidc json web key set document
"""
def get(self, request, *args, **kwargs):
keys = []
if oauth2_settings.OIDC_RSA_PRIVATE_KEY:
for pem in [
oauth2_settings.OIDC_RSA_PRIVATE_KEY,
*oauth2_settings.OIDC_RSA_PRIVATE_KEYS_INACTIVE,
]:
key = jwk_from_pem(pem)
data = {"alg": "RS256", "use": "sig", "kid": key.thumbprint()}
data.update(json.loads(key.export_public()))
keys.append(data)
response = JsonResponse({"keys": keys})
response["Access-Control-Allow-Origin"] = "*"
response["Cache-Control"] = (
"Cache-Control: public, "
+ f"max-age={oauth2_settings.OIDC_JWKS_MAX_AGE_SECONDS}, "
+ f"stale-while-revalidate={oauth2_settings.OIDC_JWKS_MAX_AGE_SECONDS}, "
+ f"stale-if-error={oauth2_settings.OIDC_JWKS_MAX_AGE_SECONDS}"
)
return response
@method_decorator(csrf_exempt, name="dispatch")
@method_decorator(login_not_required, name="dispatch")
| JwksInfoView |
python | apache__airflow | airflow-core/tests/unit/dags/test_on_kill.py | {
"start": 1106,
"end": 2133
} | class ____(EmptyOperator):
def execute(self, context: Context):
import os
self.log.info("Signalling that I am running")
# signal to the test that we've started
with open("/tmp/airflow_on_kill_running", "w") as f:
f.write("ON_KILL_RUNNING")
self.log.info("Signalled")
# This runs extra processes, so that we can be sure that we correctly
# tidy up all processes launched by a task when killing
if not os.fork():
os.system("sleep 10")
time.sleep(10)
def on_kill(self):
self.log.info("Executing on_kill")
with open("/tmp/airflow_on_kill_killed", "w") as f:
f.write("ON_KILL_TEST")
self.log.info("Executed on_kill")
# DAG tests backfill with pooled tasks
# Previously backfill would queue the task but never run it
dag1 = DAG(dag_id="test_on_kill", start_date=datetime(2015, 1, 1), schedule="@daily")
dag1_task1 = DummyWithOnKill(task_id="task1", dag=dag1, owner="airflow")
| DummyWithOnKill |
python | tensorflow__tensorflow | tensorflow/python/framework/python_tensor_converter_test.py | {
"start": 1330,
"end": 9127
} | class ____(test_util.TensorFlowTestCase,
parameterized.TestCase):
def setUp(self):
context.ensure_initialized()
super(PythonTensorConverterTest, self).setUp()
def makePythonTensorConverter(self):
return _pywrap_python_tensor_converter.PythonTensorConverter(
context.context())
#=============================================================================
# Convert int to tensor.
def testConvertIntWithInferredDType(self):
converter = self.makePythonTensorConverter()
result, dtype, used_fallback = converter.Convert(12, types_pb2.DT_INVALID)
self.assertIsInstance(result, tensor.Tensor)
self.assertAllEqual(result, 12)
self.assertEqual(dtype, types_pb2.DT_INT32)
self.assertEqual(used_fallback, not context.executing_eagerly())
def testConvertIntWithExplicitDtype(self):
converter = self.makePythonTensorConverter()
result, dtype, used_fallback = converter.Convert(12, types_pb2.DT_INT64)
self.assertIsInstance(result, tensor.Tensor)
self.assertAllEqual(result, 12)
self.assertEqual(dtype, types_pb2.DT_INT64)
self.assertEqual(used_fallback, not context.executing_eagerly())
def testConvertIntWithIncompatibleDtype(self):
converter = self.makePythonTensorConverter()
with self.assertRaisesRegex(
TypeError, "Expected string, but got 3 of type 'int'"
"|Cannot convert 3 to EagerTensor of dtype string"):
converter.Convert(3, types_pb2.DT_STRING)
#=============================================================================
# Convert tensor to tensor.
def testConvertTensorWithInferredDType(self):
converter = self.makePythonTensorConverter()
result, dtype, used_fallback = converter.Convert(
constant_op.constant([1, 2, 3]), types_pb2.DT_INVALID)
self.assertIsInstance(result, tensor.Tensor)
self.assertAllEqual(result, [1, 2, 3])
self.assertEqual(dtype, types_pb2.DT_INT32)
self.assertFalse(used_fallback)
def testConvertTensorWithExplicitDtype(self):
converter = self.makePythonTensorConverter()
result, dtype, used_fallback = converter.Convert(
constant_op.constant([1, 2, 3], dtypes.int64), types_pb2.DT_INT64)
self.assertIsInstance(result, tensor.Tensor)
self.assertAllEqual(result, [1, 2, 3])
self.assertEqual(dtype, types_pb2.DT_INT64)
self.assertFalse(used_fallback)
def testConvertTensorWithIncorrectDtype(self):
converter = self.makePythonTensorConverter()
with self.assertRaises((TypeError, ValueError)):
converter.Convert(
constant_op.constant([1, 2, 3], dtypes.int32), types_pb2.DT_INT64)
#=============================================================================
# Convert list to tensor.
def testConvertListWithInferredDType(self):
converter = self.makePythonTensorConverter()
result, dtype, used_fallback = converter.Convert([[1, 2, 3], [4, 5, 6]],
types_pb2.DT_INVALID)
self.assertIsInstance(result, tensor.Tensor)
self.assertAllEqual(result, [[1, 2, 3], [4, 5, 6]])
self.assertEqual(dtype, types_pb2.DT_INT32)
self.assertEqual(used_fallback, not context.executing_eagerly())
def testConvertListWithExplicitDtype(self):
converter = self.makePythonTensorConverter()
result, dtype, used_fallback = converter.Convert([[1, 2, 3], [4, 5, 6]],
types_pb2.DT_INT64)
self.assertIsInstance(result, tensor.Tensor)
self.assertAllEqual(result, [[1, 2, 3], [4, 5, 6]])
self.assertEqual(dtype, types_pb2.DT_INT64)
self.assertEqual(used_fallback, not context.executing_eagerly())
def testConvertListWithIncompatibleDtype(self):
converter = self.makePythonTensorConverter()
with self.assertRaisesRegex(
TypeError, "Expected string, but got .* of type 'int'"
"|Cannot convert .* to EagerTensor of dtype string"):
converter.Convert([[1, 2, 3], [4, 5, 6]], types_pb2.DT_STRING)
def testConvertListWithInconsistentDtype(self):
converter = self.makePythonTensorConverter()
with self.assertRaisesRegex(
(TypeError, ValueError),
"Can't convert Python sequence with mixed types to Tensor."
"|Failed to convert"):
converter.Convert([[1, 2], ["a", "b"]], types_pb2.DT_INVALID)
#=============================================================================
# Convert np.array to tensor.
def testConvertNumpyArrayWithInferredDType(self):
converter = self.makePythonTensorConverter()
x = np.array([[1, 2, 3], [4, 5, 6]], np.int32)
result, dtype, used_fallback = converter.Convert(x, types_pb2.DT_INVALID)
self.assertIsInstance(result, tensor.Tensor)
self.assertAllEqual(result, [[1, 2, 3], [4, 5, 6]])
self.assertEqual(dtype, types_pb2.DT_INT32)
self.assertEqual(used_fallback, not context.executing_eagerly())
def testConvertNumpyArrayWithExplicitDtype(self):
converter = self.makePythonTensorConverter()
x = np.array([[1, 2, 3], [4, 5, 6]], np.int32)
result, dtype, used_fallback = converter.Convert(x, types_pb2.DT_INT64)
self.assertIsInstance(result, tensor.Tensor)
self.assertAllEqual(result, [[1, 2, 3], [4, 5, 6]])
self.assertEqual(dtype, types_pb2.DT_INT64)
self.assertEqual(used_fallback, not context.executing_eagerly())
def testConvertNumpyArrayWithIncompatibleDtype(self):
converter = self.makePythonTensorConverter()
x = np.array([[1, 2, 3], [4, 5, 6]], np.int32)
with self.assertRaises((ValueError, TypeError)):
converter.Convert(x, types_pb2.DT_STRING)
def testConvertNumpyArrayWithUnsupportedDtype(self):
converter = self.makePythonTensorConverter()
x = np.array([[1, 2], ["a", "b"]], np.object_)
with self.assertRaises((ValueError, TypeError)):
converter.Convert(x, types_pb2.DT_INVALID)
#=============================================================================
# Convert IndexedSlices to tensor.
def testConvertIndexedSlicesWithInferredDType(self):
converter = self.makePythonTensorConverter()
x = indexed_slices.IndexedSlices(
constant_op.constant([[1, 2, 3]], dtypes.int32, name="x_values"),
constant_op.constant([1], dtypes.int64, name="x_indices"),
constant_op.constant([3, 3], dtypes.int64, name="x_shape"))
result, dtype, used_fallback = converter.Convert(x, types_pb2.DT_INVALID)
self.assertIsInstance(result, tensor.Tensor)
self.assertAllEqual(result, [[0, 0, 0], [1, 2, 3], [0, 0, 0]])
self.assertEqual(dtype, types_pb2.DT_INT32)
self.assertTrue(used_fallback)
def testConvertIndexedSlicesWithExplicitDtype(self):
converter = self.makePythonTensorConverter()
x = indexed_slices.IndexedSlices(
constant_op.constant([[1, 2, 3]], dtypes.int32, name="x_values"),
constant_op.constant([1], dtypes.int64, name="x_indices"),
constant_op.constant([3, 3], dtypes.int64, name="x_shape"))
result, dtype, used_fallback = converter.Convert(x, types_pb2.DT_INT32)
self.assertIsInstance(result, tensor.Tensor)
self.assertAllEqual(result, [[0, 0, 0], [1, 2, 3], [0, 0, 0]])
self.assertEqual(dtype, types_pb2.DT_INT32)
self.assertTrue(used_fallback)
def testConvertIndexedSlicesWithIncorrectDtype(self):
converter = self.makePythonTensorConverter()
x = indexed_slices.IndexedSlices(
constant_op.constant([[1, 2, 3]], dtypes.int32, name="x_values"),
constant_op.constant([1], dtypes.int64, name="x_indices"),
constant_op.constant([3, 3], dtypes.int64, name="x_shape"))
with self.assertRaises((ValueError, TypeError)):
converter.Convert(x, types_pb2.DT_FLOAT)
if __name__ == "__main__":
googletest.main()
| PythonTensorConverterTest |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_unused_arguments/ignore_variadic_names.py | {
"start": 101,
"end": 236
} | class ____:
def f(self, a, b):
print("Hello, world!")
def f(self, a, b, *args, **kwargs):
print("Hello, world!")
| C |
python | numba__numba | numba/np/ufunc/parallel.py | {
"start": 6191,
"end": 7337
} | class ____(ufuncbuilder.UFuncBuilder):
def build(self, cres, sig):
_launch_threads()
# Builder wrapper for ufunc entry point
ctx = cres.target_context
signature = cres.signature
library = cres.library
fname = cres.fndesc.llvm_func_name
info = build_ufunc_wrapper(library, ctx, fname, signature, cres)
ptr = info.library.get_pointer_to_function(info.name)
# Get dtypes
dtypenums = [np.dtype(a.name).num for a in signature.args]
dtypenums.append(np.dtype(signature.return_type.name).num)
keepalive = ()
return dtypenums, ptr, keepalive
def build_ufunc_wrapper(library, ctx, fname, signature, cres):
innerfunc = ufuncbuilder.build_ufunc_wrapper(library, ctx, fname,
signature, objmode=False,
cres=cres)
info = build_gufunc_kernel(library, ctx, innerfunc, signature,
len(signature.args))
return info
# ---------------------------------------------------------------------------
| ParallelUFuncBuilder |
python | django__django | tests/messages_tests/tests.py | {
"start": 3711,
"end": 7779
} | class ____(MessagesTestMixin, SimpleTestCase):
def test_assertion(self):
response = FakeResponse()
add_message(response.wsgi_request, constants.DEBUG, "DEBUG message.")
add_message(response.wsgi_request, constants.INFO, "INFO message.")
add_message(response.wsgi_request, constants.SUCCESS, "SUCCESS message.")
add_message(response.wsgi_request, constants.WARNING, "WARNING message.")
add_message(response.wsgi_request, constants.ERROR, "ERROR message.")
self.assertMessages(
response,
[
Message(constants.DEBUG, "DEBUG message."),
Message(constants.INFO, "INFO message."),
Message(constants.SUCCESS, "SUCCESS message."),
Message(constants.WARNING, "WARNING message."),
Message(constants.ERROR, "ERROR message."),
],
)
def test_with_tags(self):
response = FakeResponse()
add_message(
response.wsgi_request,
constants.INFO,
"INFO message.",
extra_tags="extra-info",
)
add_message(
response.wsgi_request,
constants.SUCCESS,
"SUCCESS message.",
extra_tags="extra-success",
)
add_message(
response.wsgi_request,
constants.WARNING,
"WARNING message.",
extra_tags="extra-warning",
)
add_message(
response.wsgi_request,
constants.ERROR,
"ERROR message.",
extra_tags="extra-error",
)
self.assertMessages(
response,
[
Message(constants.INFO, "INFO message.", "extra-info"),
Message(constants.SUCCESS, "SUCCESS message.", "extra-success"),
Message(constants.WARNING, "WARNING message.", "extra-warning"),
Message(constants.ERROR, "ERROR message.", "extra-error"),
],
)
@override_settings(MESSAGE_TAGS={42: "CUSTOM"})
def test_custom_levelname(self):
response = FakeResponse()
add_message(response.wsgi_request, 42, "CUSTOM message.")
self.assertMessages(response, [Message(42, "CUSTOM message.")])
def test_ordered(self):
response = FakeResponse()
add_message(response.wsgi_request, constants.INFO, "First message.")
add_message(response.wsgi_request, constants.WARNING, "Second message.")
expected_messages = [
Message(constants.WARNING, "Second message."),
Message(constants.INFO, "First message."),
]
self.assertMessages(response, expected_messages, ordered=False)
with self.assertRaisesMessage(AssertionError, "Lists differ: "):
self.assertMessages(response, expected_messages)
def test_mismatching_length(self):
response = FakeResponse()
add_message(response.wsgi_request, constants.INFO, "INFO message.")
msg = (
"Lists differ: [Message(level=20, message='INFO message.')] != []\n\n"
"First list contains 1 additional elements.\n"
"First extra element 0:\n"
"Message(level=20, message='INFO message.')\n\n"
"- [Message(level=20, message='INFO message.')]\n"
"+ []"
)
with self.assertRaisesMessage(AssertionError, msg):
self.assertMessages(response, [])
def test_method_frames_ignored_by_unittest(self):
response = FakeResponse()
try:
self.assertMessages(response, [object()])
except AssertionError:
exc_type, exc, tb = sys.exc_info()
result = unittest.TestResult()
result.addFailure(self, (exc_type, exc, tb))
stack = traceback.extract_tb(exc.__traceback__)
self.assertEqual(len(stack), 1)
# Top element in the stack is this method, not assertMessages.
self.assertEqual(stack[-1].name, "test_method_frames_ignored_by_unittest")
| AssertMessagesTest |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/roots/pipeline.py | {
"start": 547,
"end": 765
} | class ____(graphene.Union):
class Meta:
types = (
GrapheneGraph,
GrapheneGraphNotFoundError,
GraphenePythonError,
)
name = "GraphOrError"
| GrapheneGraphOrError |
python | jazzband__django-polymorphic | example/orders/models.py | {
"start": 1652,
"end": 1914
} | class ____(Payment):
"""
Payment by SEPA (EU)
"""
iban = models.CharField(max_length=34)
bic = models.CharField(max_length=11)
class Meta:
verbose_name = _("SEPA Payment")
verbose_name_plural = _("SEPA Payments")
| SepaPayment |
python | spack__spack | lib/spack/spack/repo.py | {
"start": 61267,
"end": 70061
} | class ____(RepoDescriptor):
def __init__(
self,
*,
name: Optional[str],
repository: str,
branch: Optional[str],
commit: Optional[str],
tag: Optional[str],
destination: str,
relative_paths: Optional[List[str]],
lock: spack.util.lock.Lock,
) -> None:
super().__init__(name)
self.repository = repository
self.branch = branch
self.commit = commit
self.tag = tag
self.destination = destination
self.relative_paths = relative_paths
self.error: Optional[str] = None
self.write_transaction = spack.util.lock.WriteTransaction(lock)
self.read_transaction = spack.util.lock.ReadTransaction(lock)
def _fetched(self) -> bool:
"""Check if the repository has been fetched by looking for the .git
directory or file (when a submodule)."""
return os.path.exists(os.path.join(self.destination, ".git"))
def fetched(self) -> bool:
with self.read_transaction:
return self._fetched()
return False
def get_commit(self, git: MaybeExecutable = None):
git = git or spack.util.git.git(required=True)
with self.read_transaction:
if not self._fetched():
return None
with fs.working_dir(self.destination):
return git("rev-parse", "HEAD", output=str).strip()
def _clone_or_pull(
self,
git: spack.util.executable.Executable,
update: bool = False,
remote: str = "origin",
depth: Optional[int] = None,
) -> None:
with self.write_transaction:
try:
with fs.working_dir(self.destination, create=True):
# do not fetch if the package repository was fetched by another
# process while we were waiting for the lock
fetched = self._fetched()
if fetched and not update:
self.read_index_file()
return
# If depth is not provided, default to:
# 1. The first time the repo is loaded, download a partial clone.
# This speeds up CI/CD and other cases where the user never
# updates the repository.
# 2. When *updating* an already cloned copy of the repository,
# perform a full fetch (unshallowing the repo if necessary) to
# optimize for full history.
if depth is None and not fetched:
depth = 2
# setup the repository if it does not exist
if not fetched:
spack.util.git.init_git_repo(self.repository, remote=remote, git_exe=git)
# determine the default branch from ls-remote
# (if no branch, tag, or commit is specified)
if not (self.commit or self.tag or self.branch):
# Get HEAD and all branches. On more recent versions of git, this can
# be done with a single call to `git ls-remote --symref remote HEAD`.
refs = git("ls-remote", remote, "HEAD", "refs/heads/*", output=str)
head_match = re.search(r"^([0-9a-f]+)\s+HEAD$", refs, re.MULTILINE)
if not head_match:
self.error = f"Unable to locate HEAD for {self.repository}"
return
head_sha = head_match.group(1)
# Find the first branch that matches this SHA
branch_match = re.search(
rf"^{re.escape(head_sha)}\s+refs/heads/(\S+)$", refs, re.MULTILINE
)
if not branch_match:
self.error = (
f"Unable to locate a default branch for {self.repository}"
)
return
self.branch = branch_match.group(1)
# determine the branch and remote if no config values exist
elif not (self.commit or self.tag or self.branch):
self.branch = git("rev-parse", "--abbrev-ref", "HEAD", output=str).strip()
remote = git("config", f"branch.{self.branch}.remote", output=str).strip()
if self.commit:
spack.util.git.pull_checkout_commit(self.commit, git_exe=git)
elif self.tag:
spack.util.git.pull_checkout_tag(
self.tag, remote, depth=depth, git_exe=git
)
elif self.branch:
# if the branch already exists we should use the
# previously configured remote
try:
output = git("config", f"branch.{self.branch}.remote", output=str)
remote = output.strip()
except spack.util.executable.ProcessError:
pass
spack.util.git.pull_checkout_branch(
self.branch, remote=remote, depth=depth, git_exe=git
)
except spack.util.executable.ProcessError:
self.error = f"Failed to {'update' if update else 'clone'} repository {self.name}"
return
self.read_index_file()
def update(self, git: MaybeExecutable = None, remote: str = "origin") -> None:
if git is None:
raise RepoError("Git executable not found")
self._clone_or_pull(git, update=True, remote=remote)
if self.error:
raise RepoError(self.error)
def initialize(self, fetch: bool = True, git: MaybeExecutable = None) -> None:
"""Clone the remote repository if it has not been fetched yet and read the index file
if necessary."""
if self.fetched():
self.read_index_file()
return
if not fetch:
return
if not git:
self.error = "Git executable not found"
return
self._clone_or_pull(git)
def read_index_file(self) -> None:
if self.relative_paths is not None:
return
repo_index_file = os.path.join(self.destination, SPACK_REPO_INDEX_FILE_NAME)
try:
with open(repo_index_file, encoding="utf-8") as f:
index_data = syaml.load(f)
assert "repo_index" in index_data, "missing 'repo_index' key"
repo_index = index_data["repo_index"]
assert isinstance(repo_index, dict), "'repo_index' must be a dictionary"
assert "paths" in repo_index, "missing 'paths' key in 'repo_index'"
sub_paths = repo_index["paths"]
assert isinstance(sub_paths, list), "'paths' under 'repo_index' must be a list"
except (OSError, syaml.SpackYAMLError, AssertionError) as e:
self.error = f"failed to read {repo_index_file}: {e}"
return
# validate that this is a list of relative paths.
if not isinstance(sub_paths, list) or not all(isinstance(p, str) for p in sub_paths):
self.error = "invalid repo index file format: expected a list of relative paths."
return
self.relative_paths = sub_paths
def __repr__(self):
return (
f"RemoteRepoDescriptor(name={self.name!r}, "
f"repository={self.repository!r}, "
f"destination={self.destination!r}, "
f"relative_paths={self.relative_paths!r})"
)
def construct(
self, cache: spack.util.file_cache.FileCache, overrides: Optional[Dict[str, Any]] = None
) -> Dict[str, Union[Repo, Exception]]:
if self.error:
return {self.destination: Exception(self.error)}
repos: Dict[str, Union[Repo, Exception]] = {}
for subpath in self.relative_paths or []:
if os.path.isabs(subpath):
repos[self.destination] = Exception(
f"Repository subpath '{subpath}' must be relative"
)
continue
path = os.path.join(self.destination, subpath)
try:
repos[path] = Repo(path, cache=cache, overrides=overrides)
except RepoError as e:
repos[path] = e
return repos
| RemoteRepoDescriptor |
python | scipy__scipy | scipy/signal/tests/test_ltisys.py | {
"start": 33225,
"end": 39020
} | class ____:
A = [[1.0, 2.0], [3.0, 4.0]]
B = [[-1.0], [5.0]]
C = [[4.0, 5.0]]
D = [[2.5]]
def test_no_matrix_fails(self):
assert_raises(ValueError, abcd_normalize)
def test_A_nosquare_fails(self, xp):
assert_raises(ValueError, abcd_normalize, xp.asarray([1, -1]),
xp.asarray(self.B), xp.asarray(self.C), xp.asarray(self.D))
def test_AB_mismatch_fails(self, xp):
assert_raises(ValueError, abcd_normalize, xp.asarray(self.A),
xp.asarray([-1, 5]), xp.asarray(self.C), xp.asarray(self.D))
def test_AC_mismatch_fails(self, xp):
assert_raises(ValueError, abcd_normalize, xp.asarray(self.A),
xp.asarray(self.B), xp.asarray([[4.0], [5.0]]),
xp.asarray(self.D))
def test_CD_mismatch_fails(self, xp):
assert_raises(ValueError, abcd_normalize, xp.asarray(self.A),
xp.asarray(self.B), xp.asarray(self.C), xp.asarray([2.5, 0]))
def test_BD_mismatch_fails(self, xp):
assert_raises(ValueError, abcd_normalize, xp.asarray(self.A),
xp.asarray([-1, 5]), xp.asarray(self.C), xp.asarray(self.D))
def test_normalized_matrices_unchanged(self, xp):
A_, B_, C_, D_ = map(xp.asarray, (self.A, self.B, self.C, self.D))
A, B, C, D = abcd_normalize(A=A_, B=B_, C=C_, D=D_)
xp_assert_equal(A, A_)
xp_assert_equal(B, B_)
xp_assert_equal(C, C_)
xp_assert_equal(D, D_)
def test_shapes(self, xp):
A, B, C, D = abcd_normalize(xp.asarray(self.A), xp.asarray(self.B),
xp.asarray([1, 0]), xp.asarray(0))
assert A.shape[0] == A.shape[1]
assert A.shape[0] == B.shape[0]
assert A.shape[0] == C.shape[1]
assert C.shape[0] == D.shape[0]
assert B.shape[1] == D.shape[1]
def test_zero_dimension_is_not_none1(self, xp):
A_ = xp.asarray(self.A)
B_ = xp.zeros((2, 0))
D_ = xp.zeros((0, 0))
A, B, C, D = abcd_normalize(A=A_, B=B_, D=D_)
xp_assert_equal(A, A_)
xp_assert_equal(B, B_)
xp_assert_equal(D, D_)
assert C.shape[0] == D_.shape[0]
assert C.shape[1] == A_.shape[0]
def test_zero_dimension_is_not_none2(self, xp):
A_ = xp.asarray(self.A)
B_ = xp.zeros((2, 0))
C_ = xp.zeros((0, 2))
A, B, C, D = abcd_normalize(A=A_, B=B_, C=C_)
xp_assert_equal(A, A_)
xp_assert_equal(B, B_)
xp_assert_equal(C, C_)
assert D.shape[0] == C_.shape[0]
assert D.shape[1] == B_.shape[1]
def test_missing_A(self, xp):
B_, C_, D_ = map(xp.asarray, (self.B, self.C, self.D))
A, B, C, D = abcd_normalize(B=B_, C=C_, D=D_)
assert A.shape[0] == A.shape[1]
assert A.shape[0] == B.shape[0]
assert A.shape == (B_.shape[0], B_.shape[0])
def test_missing_B(self, xp):
A_, C_, D_ = map(xp.asarray, (self.A, self.C, self.D))
A, B, C, D = abcd_normalize(A=A_, C=C_, D=D_)
assert B.shape[0] == A.shape[0]
assert B.shape[1] == D.shape[1]
assert B.shape == (A_.shape[0], D_.shape[1])
def test_missing_C(self, xp):
A_, B_, D_ = map(xp.asarray, (self.A, self.B, self.D))
A, B, C, D = abcd_normalize(A=A_, B=B_, D=D_)
assert C.shape[0] == D.shape[0]
assert C.shape[1] == A.shape[0]
assert C.shape == (D_.shape[0], A_.shape[0])
def test_missing_D(self, xp):
A_, B_, C_ = map(xp.asarray, (self.A, self.B, self.C))
A, B, C, D = abcd_normalize(A=A_, B=B_, C=C_)
assert D.shape[0] == C.shape[0]
assert D.shape[1] == B.shape[1]
assert D.shape == (C_.shape[0], B_.shape[1])
def test_missing_AB(self, xp):
C_, D_ = map(xp.asarray, (self.C, self.D))
A, B, C, D = abcd_normalize(C=C_, D=D_)
assert A.shape[0] == A.shape[1]
assert A.shape[0] == B.shape[0]
assert B.shape[1] == D.shape[1]
assert A.shape == (C_.shape[1], C_.shape[1])
assert B.shape == (C_.shape[1], D_.shape[1])
def test_missing_AC(self, xp):
B_, D_ = map(xp.asarray, (self.B, self.D))
A, B, C, D = abcd_normalize(B=B_, D=D_)
assert A.shape[0] == A.shape[1]
assert A.shape[0] == B.shape[0]
assert C.shape[0] == D.shape[0]
assert C.shape[1] == A.shape[0]
assert A.shape == (B_.shape[0], B_.shape[0])
assert C.shape == (D_.shape[0], B_.shape[0])
def test_missing_AD(self, xp):
B_, C_ = map(xp.asarray, (self.B, self.C))
A, B, C, D = abcd_normalize(B=B_, C=C_)
assert A.shape[0] == A.shape[1]
assert A.shape[0] == B.shape[0]
assert D.shape[0] == C.shape[0]
assert D.shape[1] == B.shape[1]
assert A.shape == (B_.shape[0], B_.shape[0])
assert D.shape == (C_.shape[0], B_.shape[1])
def test_missing_BC(self, xp):
A_, D_ = map(xp.asarray, (self.A, self.D))
A, B, C, D = abcd_normalize(A=A_, D=D_)
assert B.shape[0] == A.shape[0]
assert B.shape[1] == D.shape[1]
assert C.shape[0] == D.shape[0]
assert C.shape[1] == A.shape[0]
assert B.shape == (A_.shape[0], D_.shape[1])
assert C.shape == (D_.shape[0], A_.shape[0])
def test_missing_ABC_fails(self, xp):
assert_raises(ValueError, abcd_normalize, D=xp.asarray(self.D))
def test_missing_BD_fails(self, xp):
assert_raises(ValueError, abcd_normalize, A=xp.asarray(self.A),
C=xp.asarray(self.C))
def test_missing_CD_fails(self, xp):
assert_raises(ValueError, abcd_normalize, A=xp.asarray(self.A),
B=xp.asarray(self.B))
| Test_abcd_normalize |
python | python-openxml__python-docx | src/docx/shared.py | {
"start": 2885,
"end": 3170
} | class ____(Length):
"""Convenience constructor for length in twips, e.g. ``width = Twips(42)``.
A twip is a twentieth of a point, 635 EMU.
"""
def __new__(cls, twips: float):
emu = int(twips * Length._EMUS_PER_TWIP)
return Length.__new__(cls, emu)
| Twips |
python | milvus-io__pymilvus | pymilvus/exceptions.py | {
"start": 2691,
"end": 2796
} | class ____(MilvusException):
"""Raise when insert data isn't match with schema"""
| DataNotMatchException |
python | ethereum__web3.py | web3/utils/subscriptions.py | {
"start": 7145,
"end": 7876
} | class ____(EthSubscription[BlockData]):
def __init__(
self,
label: str | None = None,
handler: NewHeadsSubscriptionHandler | None = None,
handler_context: dict[str, Any] | None = None,
parallelize: bool | None = None,
) -> None:
super().__init__(
subscription_params=("newHeads",),
handler=handler,
handler_context=handler_context,
label=label,
parallelize=parallelize,
)
PendingTxSubscriptionContext = EthSubscriptionContext[
"PendingTxSubscription", Union[HexBytes, TxData]
]
PendingTxSubscriptionHandler = Callable[
[PendingTxSubscriptionContext], Coroutine[Any, Any, None]
]
| NewHeadsSubscription |
python | ray-project__ray | rllib/algorithms/dqn/dqn.py | {
"start": 2332,
"end": 24984
} | class ____(AlgorithmConfig):
r"""Defines a configuration class from which a DQN Algorithm can be built.
.. testcode::
from ray.rllib.algorithms.dqn.dqn import DQNConfig
config = (
DQNConfig()
.environment("CartPole-v1")
.training(replay_buffer_config={
"type": "PrioritizedEpisodeReplayBuffer",
"capacity": 60000,
"alpha": 0.5,
"beta": 0.5,
})
.env_runners(num_env_runners=1)
)
algo = config.build()
algo.train()
algo.stop()
.. testcode::
from ray.rllib.algorithms.dqn.dqn import DQNConfig
from ray import tune
config = (
DQNConfig()
.environment("CartPole-v1")
.training(
num_atoms=tune.grid_search([1,])
)
)
tune.Tuner(
"DQN",
run_config=tune.RunConfig(stop={"training_iteration":1}),
param_space=config,
).fit()
.. testoutput::
:hide:
...
"""
def __init__(self, algo_class=None):
"""Initializes a DQNConfig instance."""
self.exploration_config = {
"type": "EpsilonGreedy",
"initial_epsilon": 1.0,
"final_epsilon": 0.02,
"epsilon_timesteps": 10000,
}
super().__init__(algo_class=algo_class or DQN)
# Overrides of AlgorithmConfig defaults
# `env_runners()`
# Set to `self.n_step`, if 'auto'.
self.rollout_fragment_length: Union[int, str] = "auto"
# New stack uses `epsilon` as either a constant value or a scheduler
# defined like this.
# TODO (simon): Ensure that users can understand how to provide epsilon.
# (sven): Should we add this to `self.env_runners(epsilon=..)`?
self.epsilon = [(0, 1.0), (10000, 0.05)]
# `training()`
self.grad_clip = 40.0
# Note: Only when using enable_rl_module_and_learner=True can the clipping mode
# be configured by the user. On the old API stack, RLlib will always clip by
# global_norm, no matter the value of `grad_clip_by`.
self.grad_clip_by = "global_norm"
self.lr = 5e-4
self.train_batch_size = 32
# `evaluation()`
self.evaluation(evaluation_config=AlgorithmConfig.overrides(explore=False))
# `reporting()`
self.min_time_s_per_iteration = None
self.min_sample_timesteps_per_iteration = 1000
# DQN specific config settings.
# fmt: off
# __sphinx_doc_begin__
self.target_network_update_freq = 500
self.num_steps_sampled_before_learning_starts = 1000
self.store_buffer_in_checkpoints = False
self.adam_epsilon = 1e-8
self.tau = 1.0
self.num_atoms = 1
self.v_min = -10.0
self.v_max = 10.0
self.noisy = False
self.sigma0 = 0.5
self.dueling = True
self.hiddens = [256]
self.double_q = True
self.n_step = 1
self.before_learn_on_batch = None
self.training_intensity = None
self.td_error_loss_fn = "huber"
self.categorical_distribution_temperature = 1.0
# The burn-in for stateful `RLModule`s.
self.burn_in_len = 0
# Replay buffer configuration.
self.replay_buffer_config = {
"type": "PrioritizedEpisodeReplayBuffer",
# Size of the replay buffer. Note that if async_updates is set,
# then each worker will have a replay buffer of this size.
"capacity": 50000,
"alpha": 0.6,
# Beta parameter for sampling from prioritized replay buffer.
"beta": 0.4,
}
# fmt: on
# __sphinx_doc_end__
self.lr_schedule = None # @OldAPIStack
# Deprecated
self.buffer_size = DEPRECATED_VALUE
self.prioritized_replay = DEPRECATED_VALUE
self.learning_starts = DEPRECATED_VALUE
self.replay_batch_size = DEPRECATED_VALUE
# Can not use DEPRECATED_VALUE here because -1 is a common config value
self.replay_sequence_length = None
self.prioritized_replay_alpha = DEPRECATED_VALUE
self.prioritized_replay_beta = DEPRECATED_VALUE
self.prioritized_replay_eps = DEPRECATED_VALUE
@override(AlgorithmConfig)
def training(
self,
*,
target_network_update_freq: Optional[int] = NotProvided,
replay_buffer_config: Optional[dict] = NotProvided,
store_buffer_in_checkpoints: Optional[bool] = NotProvided,
lr_schedule: Optional[List[List[Union[int, float]]]] = NotProvided,
epsilon: Optional[LearningRateOrSchedule] = NotProvided,
adam_epsilon: Optional[float] = NotProvided,
grad_clip: Optional[int] = NotProvided,
num_steps_sampled_before_learning_starts: Optional[int] = NotProvided,
tau: Optional[float] = NotProvided,
num_atoms: Optional[int] = NotProvided,
v_min: Optional[float] = NotProvided,
v_max: Optional[float] = NotProvided,
noisy: Optional[bool] = NotProvided,
sigma0: Optional[float] = NotProvided,
dueling: Optional[bool] = NotProvided,
hiddens: Optional[int] = NotProvided,
double_q: Optional[bool] = NotProvided,
n_step: Optional[Union[int, Tuple[int, int]]] = NotProvided,
before_learn_on_batch: Callable[
[Type[MultiAgentBatch], List[Type[Policy]], Type[int]],
Type[MultiAgentBatch],
] = NotProvided,
training_intensity: Optional[float] = NotProvided,
td_error_loss_fn: Optional[str] = NotProvided,
categorical_distribution_temperature: Optional[float] = NotProvided,
burn_in_len: Optional[int] = NotProvided,
**kwargs,
) -> Self:
"""Sets the training related configuration.
Args:
target_network_update_freq: Update the target network every
`target_network_update_freq` sample steps.
replay_buffer_config: Replay buffer config.
Examples:
{
"_enable_replay_buffer_api": True,
"type": "MultiAgentReplayBuffer",
"capacity": 50000,
"replay_sequence_length": 1,
}
- OR -
{
"_enable_replay_buffer_api": True,
"type": "MultiAgentPrioritizedReplayBuffer",
"capacity": 50000,
"prioritized_replay_alpha": 0.6,
"prioritized_replay_beta": 0.4,
"prioritized_replay_eps": 1e-6,
"replay_sequence_length": 1,
}
- Where -
prioritized_replay_alpha: Alpha parameter controls the degree of
prioritization in the buffer. In other words, when a buffer sample has
a higher temporal-difference error, with how much more probability
should it drawn to use to update the parametrized Q-network. 0.0
corresponds to uniform probability. Setting much above 1.0 may quickly
result as the sampling distribution could become heavily “pointy” with
low entropy.
prioritized_replay_beta: Beta parameter controls the degree of
importance sampling which suppresses the influence of gradient updates
from samples that have higher probability of being sampled via alpha
parameter and the temporal-difference error.
prioritized_replay_eps: Epsilon parameter sets the baseline probability
for sampling so that when the temporal-difference error of a sample is
zero, there is still a chance of drawing the sample.
store_buffer_in_checkpoints: Set this to True, if you want the contents of
your buffer(s) to be stored in any saved checkpoints as well.
Warnings will be created if:
- This is True AND restoring from a checkpoint that contains no buffer
data.
- This is False AND restoring from a checkpoint that does contain
buffer data.
epsilon: Epsilon exploration schedule. In the format of [[timestep, value],
[timestep, value], ...]. A schedule must start from
timestep 0.
adam_epsilon: Adam optimizer's epsilon hyper parameter.
grad_clip: If not None, clip gradients during optimization at this value.
num_steps_sampled_before_learning_starts: Number of timesteps to collect
from rollout workers before we start sampling from replay buffers for
learning. Whether we count this in agent steps or environment steps
depends on config.multi_agent(count_steps_by=..).
tau: Update the target by \tau * policy + (1-\tau) * target_policy.
num_atoms: Number of atoms for representing the distribution of return.
When this is greater than 1, distributional Q-learning is used.
v_min: Minimum value estimation
v_max: Maximum value estimation
noisy: Whether to use noisy network to aid exploration. This adds parametric
noise to the model weights.
sigma0: Control the initial parameter noise for noisy nets.
dueling: Whether to use dueling DQN.
hiddens: Dense-layer setup for each the advantage branch and the value
branch
double_q: Whether to use double DQN.
n_step: N-step target updates. If >1, sars' tuples in trajectories will be
postprocessed to become sa[discounted sum of R][s t+n] tuples. An
integer will be interpreted as a fixed n-step value. If a tuple of 2
ints is provided here, the n-step value will be drawn for each sample(!)
in the train batch from a uniform distribution over the closed interval
defined by `[n_step[0], n_step[1]]`.
before_learn_on_batch: Callback to run before learning on a multi-agent
batch of experiences.
training_intensity: The intensity with which to update the model (vs
collecting samples from the env).
If None, uses "natural" values of:
`train_batch_size` / (`rollout_fragment_length` x `num_env_runners` x
`num_envs_per_env_runner`).
If not None, will make sure that the ratio between timesteps inserted
into and sampled from the buffer matches the given values.
Example:
training_intensity=1000.0
train_batch_size=250
rollout_fragment_length=1
num_env_runners=1 (or 0)
num_envs_per_env_runner=1
-> natural value = 250 / 1 = 250.0
-> will make sure that replay+train op will be executed 4x asoften as
rollout+insert op (4 * 250 = 1000).
See: rllib/algorithms/dqn/dqn.py::calculate_rr_weights for further
details.
td_error_loss_fn: "huber" or "mse". loss function for calculating TD error
when num_atoms is 1. Note that if num_atoms is > 1, this parameter
is simply ignored, and softmax cross entropy loss will be used.
categorical_distribution_temperature: Set the temperature parameter used
by Categorical action distribution. A valid temperature is in the range
of [0, 1]. Note that this mostly affects evaluation since TD error uses
argmax for return calculation.
burn_in_len: The burn-in period for a stateful RLModule. It allows the
Learner to utilize the initial `burn_in_len` steps in a replay sequence
solely for unrolling the network and establishing a typical starting
state. The network is then updated on the remaining steps of the
sequence. This process helps mitigate issues stemming from a poor
initial state - zero or an outdated recorded state. Consider setting
this parameter to a positive integer if your stateful RLModule faces
convergence challenges or exhibits signs of catastrophic forgetting.
Returns:
This updated AlgorithmConfig object.
"""
# Pass kwargs onto super's `training()` method.
super().training(**kwargs)
if target_network_update_freq is not NotProvided:
self.target_network_update_freq = target_network_update_freq
if replay_buffer_config is not NotProvided:
# Override entire `replay_buffer_config` if `type` key changes.
# Update, if `type` key remains the same or is not specified.
new_replay_buffer_config = deep_update(
{"replay_buffer_config": self.replay_buffer_config},
{"replay_buffer_config": replay_buffer_config},
False,
["replay_buffer_config"],
["replay_buffer_config"],
)
self.replay_buffer_config = new_replay_buffer_config["replay_buffer_config"]
if store_buffer_in_checkpoints is not NotProvided:
self.store_buffer_in_checkpoints = store_buffer_in_checkpoints
if lr_schedule is not NotProvided:
self.lr_schedule = lr_schedule
if epsilon is not NotProvided:
self.epsilon = epsilon
if adam_epsilon is not NotProvided:
self.adam_epsilon = adam_epsilon
if grad_clip is not NotProvided:
self.grad_clip = grad_clip
if num_steps_sampled_before_learning_starts is not NotProvided:
self.num_steps_sampled_before_learning_starts = (
num_steps_sampled_before_learning_starts
)
if tau is not NotProvided:
self.tau = tau
if num_atoms is not NotProvided:
self.num_atoms = num_atoms
if v_min is not NotProvided:
self.v_min = v_min
if v_max is not NotProvided:
self.v_max = v_max
if noisy is not NotProvided:
self.noisy = noisy
if sigma0 is not NotProvided:
self.sigma0 = sigma0
if dueling is not NotProvided:
self.dueling = dueling
if hiddens is not NotProvided:
self.hiddens = hiddens
if double_q is not NotProvided:
self.double_q = double_q
if n_step is not NotProvided:
self.n_step = n_step
if before_learn_on_batch is not NotProvided:
self.before_learn_on_batch = before_learn_on_batch
if training_intensity is not NotProvided:
self.training_intensity = training_intensity
if td_error_loss_fn is not NotProvided:
self.td_error_loss_fn = td_error_loss_fn
if categorical_distribution_temperature is not NotProvided:
self.categorical_distribution_temperature = (
categorical_distribution_temperature
)
if burn_in_len is not NotProvided:
self.burn_in_len = burn_in_len
return self
@override(AlgorithmConfig)
def validate(self) -> None:
# Call super's validation method.
super().validate()
if self.enable_rl_module_and_learner:
# `lr_schedule` checking.
if self.lr_schedule is not None:
self._value_error(
"`lr_schedule` is deprecated and must be None! Use the "
"`lr` setting to setup a schedule."
)
else:
if not self.in_evaluation:
validate_buffer_config(self)
# TODO (simon): Find a clean solution to deal with configuration configs
# when using the new API stack.
if self.exploration_config["type"] == "ParameterNoise":
if self.batch_mode != "complete_episodes":
self._value_error(
"ParameterNoise Exploration requires `batch_mode` to be "
"'complete_episodes'. Try setting `config.env_runners("
"batch_mode='complete_episodes')`."
)
if self.noisy:
self._value_error(
"ParameterNoise Exploration and `noisy` network cannot be"
" used at the same time!"
)
if self.td_error_loss_fn not in ["huber", "mse"]:
self._value_error("`td_error_loss_fn` must be 'huber' or 'mse'!")
# Check rollout_fragment_length to be compatible with n_step.
if (
not self.in_evaluation
and self.rollout_fragment_length != "auto"
and self.rollout_fragment_length < self.n_step
):
self._value_error(
f"Your `rollout_fragment_length` ({self.rollout_fragment_length}) is "
f"smaller than `n_step` ({self.n_step})! "
"Try setting config.env_runners(rollout_fragment_length="
f"{self.n_step})."
)
# Check, if the `max_seq_len` is longer then the burn-in.
if (
"max_seq_len" in self.model_config
and 0 < self.model_config["max_seq_len"] <= self.burn_in_len
):
raise ValueError(
f"Your defined `burn_in_len`={self.burn_in_len} is larger or equal "
f"`max_seq_len`={self.model_config['max_seq_len']}! Either decrease "
"the `burn_in_len` or increase your `max_seq_len`."
)
# Validate that we use the corresponding `EpisodeReplayBuffer` when using
# episodes.
# TODO (sven, simon): Implement the multi-agent case for replay buffers.
from ray.rllib.utils.replay_buffers.episode_replay_buffer import (
EpisodeReplayBuffer,
)
if (
self.enable_env_runner_and_connector_v2
and not isinstance(self.replay_buffer_config["type"], str)
and not issubclass(self.replay_buffer_config["type"], EpisodeReplayBuffer)
):
self._value_error(
"When using the new `EnvRunner API` the replay buffer must be of type "
"`EpisodeReplayBuffer`."
)
elif not self.enable_env_runner_and_connector_v2 and (
(
isinstance(self.replay_buffer_config["type"], str)
and "Episode" in self.replay_buffer_config["type"]
)
or issubclass(self.replay_buffer_config["type"], EpisodeReplayBuffer)
):
self._value_error(
"When using the old API stack the replay buffer must not be of type "
"`EpisodeReplayBuffer`! We suggest you use the following config to run "
"DQN on the old API stack: `config.training(replay_buffer_config={"
"'type': 'MultiAgentPrioritizedReplayBuffer', "
"'prioritized_replay_alpha': [alpha], "
"'prioritized_replay_beta': [beta], "
"'prioritized_replay_eps': [eps], "
"})`."
)
@override(AlgorithmConfig)
def get_rollout_fragment_length(self, worker_index: int = 0) -> int:
if self.rollout_fragment_length == "auto":
return (
self.n_step[1]
if isinstance(self.n_step, (tuple, list))
else self.n_step
)
else:
return self.rollout_fragment_length
@override(AlgorithmConfig)
def get_default_rl_module_spec(self) -> RLModuleSpecType:
if self.framework_str == "torch":
from ray.rllib.algorithms.dqn.torch.default_dqn_torch_rl_module import (
DefaultDQNTorchRLModule,
)
return RLModuleSpec(
module_class=DefaultDQNTorchRLModule,
model_config=self.model_config,
)
else:
raise ValueError(
f"The framework {self.framework_str} is not supported! "
"Use `config.framework('torch')` instead."
)
@property
@override(AlgorithmConfig)
def _model_config_auto_includes(self) -> Dict[str, Any]:
return super()._model_config_auto_includes | {
"double_q": self.double_q,
"dueling": self.dueling,
"epsilon": self.epsilon,
"num_atoms": self.num_atoms,
"std_init": self.sigma0,
"v_max": self.v_max,
"v_min": self.v_min,
}
@override(AlgorithmConfig)
def get_default_learner_class(self) -> Union[Type["Learner"], str]:
if self.framework_str == "torch":
from ray.rllib.algorithms.dqn.torch.dqn_torch_learner import (
DQNTorchLearner,
)
return DQNTorchLearner
else:
raise ValueError(
f"The framework {self.framework_str} is not supported! "
"Use `config.framework('torch')` instead."
)
def calculate_rr_weights(config: AlgorithmConfig) -> List[float]:
"""Calculate the round robin weights for the rollout and train steps"""
if not config.training_intensity:
return [1, 1]
# Calculate the "native ratio" as:
# [train-batch-size] / [size of env-rolled-out sampled data]
# This is to set freshly rollout-collected data in relation to
# the data we pull from the replay buffer (which also contains old
# samples).
native_ratio = config.total_train_batch_size / (
config.get_rollout_fragment_length()
* config.num_envs_per_env_runner
# Add one to workers because the local
# worker usually collects experiences as well, and we avoid division by zero.
* max(config.num_env_runners + 1, 1)
)
# Training intensity is specified in terms of
# (steps_replayed / steps_sampled), so adjust for the native ratio.
sample_and_train_weight = config.training_intensity / native_ratio
if sample_and_train_weight < 1:
return [int(np.round(1 / sample_and_train_weight)), 1]
else:
return [1, int(np.round(sample_and_train_weight))]
| DQNConfig |
python | getsentry__sentry-python | sentry_sdk/_init_implementation.py | {
"start": 173,
"end": 2559
} | class ____:
_CONTEXT_MANAGER_DEPRECATION_WARNING_MESSAGE = (
"Using the return value of sentry_sdk.init as a context manager "
"and manually calling the __enter__ and __exit__ methods on the "
"return value are deprecated. We are no longer maintaining this "
"functionality, and we will remove it in the next major release."
)
def __init__(self, client):
# type: (sentry_sdk.Client) -> None
self._client = client
def __enter__(self):
# type: () -> _InitGuard
warnings.warn(
self._CONTEXT_MANAGER_DEPRECATION_WARNING_MESSAGE,
stacklevel=2,
category=DeprecationWarning,
)
return self
def __exit__(self, exc_type, exc_value, tb):
# type: (Any, Any, Any) -> None
warnings.warn(
self._CONTEXT_MANAGER_DEPRECATION_WARNING_MESSAGE,
stacklevel=2,
category=DeprecationWarning,
)
c = self._client
if c is not None:
c.close()
def _check_python_deprecations():
# type: () -> None
# Since we're likely to deprecate Python versions in the future, I'm keeping
# this handy function around. Use this to detect the Python version used and
# to output logger.warning()s if it's deprecated.
pass
def _init(*args, **kwargs):
# type: (*Optional[str], **Any) -> ContextManager[Any]
"""Initializes the SDK and optionally integrations.
This takes the same arguments as the client constructor.
"""
client = sentry_sdk.Client(*args, **kwargs)
sentry_sdk.get_global_scope().set_client(client)
_check_python_deprecations()
rv = _InitGuard(client)
return rv
if TYPE_CHECKING:
# Make mypy, PyCharm and other static analyzers think `init` is a type to
# have nicer autocompletion for params.
#
# Use `ClientConstructor` to define the argument types of `init` and
# `ContextManager[Any]` to tell static analyzers about the return type.
class init(sentry_sdk.consts.ClientConstructor, _InitGuard): # noqa: N801
pass
else:
# Alias `init` for actual usage. Go through the lambda indirection to throw
# PyCharm off of the weakly typed signature (it would otherwise discover
# both the weakly typed signature of `_init` and our faked `init` type).
init = (lambda: _init)()
| _InitGuard |
python | kamyu104__LeetCode-Solutions | Python/minimum-absolute-difference-in-sliding-submatrix.py | {
"start": 1353,
"end": 1915
} | class ____(object):
def minAbsDiff(self, grid, k):
"""
:type grid: List[List[int]]
:type k: int
:rtype: List[List[int]]
"""
result = [[-1]*(len(grid[0])-(k-1)) for _ in xrange(len(grid)-(k-1))]
for i in xrange(len(grid)-(k-1)):
for j in xrange(len(grid[0])-(k-1)):
vals = sorted({grid[i+di][j+dj] for di in xrange(k) for dj in xrange(k)})
result[i][j] = min(vals[x+1]-vals[x] for x in xrange(len(vals)-1)) if len(vals) != 1 else 0
return result
| Solution2 |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/metaclass7.py | {
"start": 763,
"end": 885
} | class ____(type):
def __call__(cls, *args, **kwargs) -> Any:
return super().__call__(*args, **kwargs)
| MetaClass3 |
python | numpy__numpy | numpy/_core/tests/test_multiarray.py | {
"start": 29848,
"end": 32605
} | class ____:
def _create_array(self):
return np.array([0, 1])[0]
def test_ellipsis_subscript(self):
a = self._create_array()
assert_equal(a[...], 0)
assert_equal(a[...].shape, ())
def test_empty_subscript(self):
a = self._create_array()
assert_equal(a[()], 0)
assert_equal(a[()].shape, ())
def test_invalid_subscript(self):
a = self._create_array()
assert_raises(IndexError, lambda x: x[0], a)
assert_raises(IndexError, lambda x: x[np.array([], int)], a)
def test_invalid_subscript_assignment(self):
a = self._create_array()
def assign(x, i, v):
x[i] = v
assert_raises(TypeError, assign, a, 0, 42)
def test_newaxis(self):
a = self._create_array()
assert_equal(a[np.newaxis].shape, (1,))
assert_equal(a[..., np.newaxis].shape, (1,))
assert_equal(a[np.newaxis, ...].shape, (1,))
assert_equal(a[..., np.newaxis].shape, (1,))
assert_equal(a[np.newaxis, ..., np.newaxis].shape, (1, 1))
assert_equal(a[..., np.newaxis, np.newaxis].shape, (1, 1))
assert_equal(a[np.newaxis, np.newaxis, ...].shape, (1, 1))
assert_equal(a[(np.newaxis,) * 10].shape, (1,) * 10)
def test_invalid_newaxis(self):
a = self._create_array()
def subscript(x, i):
x[i]
assert_raises(IndexError, subscript, a, (np.newaxis, 0))
assert_raises(IndexError, subscript, a, (np.newaxis,) * 70)
def test_overlapping_assignment(self):
# With positive strides
a = np.arange(4)
a[:-1] = a[1:]
assert_equal(a, [1, 2, 3, 3])
a = np.arange(4)
a[1:] = a[:-1]
assert_equal(a, [0, 0, 1, 2])
# With positive and negative strides
a = np.arange(4)
a[:] = a[::-1]
assert_equal(a, [3, 2, 1, 0])
a = np.arange(6).reshape(2, 3)
a[::-1, :] = a[:, ::-1]
assert_equal(a, [[5, 4, 3], [2, 1, 0]])
a = np.arange(6).reshape(2, 3)
a[::-1, ::-1] = a[:, ::-1]
assert_equal(a, [[3, 4, 5], [0, 1, 2]])
# With just one element overlapping
a = np.arange(5)
a[:3] = a[2:]
assert_equal(a, [2, 3, 4, 3, 4])
a = np.arange(5)
a[2:] = a[:3]
assert_equal(a, [0, 1, 0, 1, 2])
a = np.arange(5)
a[2::-1] = a[2:]
assert_equal(a, [4, 3, 2, 3, 4])
a = np.arange(5)
a[2:] = a[2::-1]
assert_equal(a, [0, 1, 2, 1, 0])
a = np.arange(5)
a[2::-1] = a[:1:-1]
assert_equal(a, [2, 3, 4, 3, 4])
a = np.arange(5)
a[:1:-1] = a[2::-1]
assert_equal(a, [0, 1, 0, 1, 2])
| TestScalarIndexing |
python | huggingface__transformers | src/transformers/models/unispeech_sat/modeling_unispeech_sat.py | {
"start": 65253,
"end": 66873
} | class ____(nn.Module):
def __init__(self, config, layer_id=0):
super().__init__()
self.in_conv_dim = config.tdnn_dim[layer_id - 1] if layer_id > 0 else config.tdnn_dim[layer_id]
self.out_conv_dim = config.tdnn_dim[layer_id]
self.kernel_size = config.tdnn_kernel[layer_id]
self.dilation = config.tdnn_dilation[layer_id]
self.kernel = nn.Linear(self.in_conv_dim * self.kernel_size, self.out_conv_dim)
self.activation = nn.ReLU()
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
if is_peft_available():
from peft.tuners.lora import LoraLayer
if is_peft_available():
if isinstance(self.kernel, LoraLayer):
warnings.warn(
"Detected LoRA on TDNNLayer. LoRA weights won't be applied due to optimization. "
"You should exclude TDNNLayer from LoRA's target modules.",
)
# for backward compatibility, we keep nn.Linear but call F.conv1d for speed up
hidden_states = hidden_states.transpose(1, 2)
weight = self.kernel.weight.view(self.out_conv_dim, self.kernel_size, self.in_conv_dim).transpose(1, 2)
hidden_states = nn.functional.conv1d(hidden_states, weight, self.kernel.bias, dilation=self.dilation)
hidden_states = hidden_states.transpose(1, 2)
hidden_states = self.activation(hidden_states)
return hidden_states
@auto_docstring(
custom_intro="""
UniSpeechSat Model with an XVector feature extraction head on top for tasks like Speaker Verification.
"""
)
| TDNNLayer |
python | getsentry__sentry | src/sentry/notifications/api/serializers/notification_action_response.py | {
"start": 432,
"end": 2738
} | class ____(Serializer):
"""
Model serializer for outgoing NotificationAction API payloads
"""
def get_attrs(self, item_list: Sequence[NotificationAction], user, **kwargs):
action_ids = {i.id for i in item_list}
projects_by_action_id = manytoone_to_dict(
NotificationActionProject.objects.filter(action_id__in=action_ids),
"action_id",
)
valid_triggers: dict[int, str] = dict(NotificationAction.get_trigger_types())
return {
item: {
"trigger_type": valid_triggers[item.trigger_type],
"projects": [p.project_id for p in projects_by_action_id[item.id]],
}
for item in item_list
}
def serialize(self, obj: NotificationAction, attrs, user, **kwargs) -> dict[str, Any]:
return {
"id": obj.id,
"organizationId": obj.organization_id,
"integrationId": obj.integration_id,
"sentryAppId": obj.sentry_app_id,
"projects": attrs["projects"],
"serviceType": ActionService.get_name(obj.service_type),
"triggerType": attrs["trigger_type"],
"targetType": ActionTarget.get_name(obj.target_type),
"targetIdentifier": obj.target_identifier,
"targetDisplay": obj.target_display,
}
@classmethod
def get_example(cls, **action_kwargs):
"""
Create example serialized response for documentation.
Any kwargs will be applied to the NotificationAction.
"""
action = NotificationAction(
**{
"id": 27,
"organization_id": 721,
"integration_id": 916,
"type": ActionService.SLACK.value,
"trigger_type": ActionTrigger.AUDIT_LOG.value,
"target_type": ActionTarget.SPECIFIC.value,
"target_identifier": "C0123S456AL",
"target_display": "#sentry-audit-log",
**action_kwargs,
}
)
attrs = {
"projects": [503, 1209],
"trigger_type": ActionTrigger.get_name(action.trigger_type),
}
return cls().serialize(action, attrs=attrs, user=AnonymousUser())
| OutgoingNotificationActionSerializer |
python | gevent__gevent | src/gevent/_ident.py | {
"start": 541,
"end": 2249
} | class ____(object):
"""
Maintains a unique mapping of (small) non-negative integer identifiers
to objects that can be weakly referenced.
It is guaranteed that no two objects will have the the same
identifier at the same time, as long as those objects are
also uniquely hashable.
"""
def __init__(self):
# {obj -> (ident, wref(obj))}
self._registry = WeakKeyDictionary()
# A heap of numbers that have been used and returned
self._available_idents = []
def get_ident(self, obj):
"""
Retrieve the identifier for *obj*, creating one
if necessary.
"""
try:
return self._registry[obj][0]
except KeyError:
pass
if self._available_idents:
# Take the smallest free number
ident = heappop(self._available_idents)
else:
# Allocate a bigger one
ident = len(self._registry)
vref = ValuedWeakRef(obj, self._return_ident)
vref.value = ident # pylint:disable=assigning-non-slot,attribute-defined-outside-init
self._registry[obj] = (ident, vref)
return ident
def _return_ident(self, vref):
# By the time this is called, self._registry has been
# updated
if heappush is not None:
# Under some circumstances we can get called
# when the interpreter is shutting down, and globals
# aren't available any more.
heappush(self._available_idents, vref.value)
def __len__(self):
return len(self._registry)
from gevent._util import import_c_accel
import_c_accel(globals(), 'gevent.__ident')
| IdentRegistry |
python | squidfunk__mkdocs-material | material/plugins/projects/structure/__init__.py | {
"start": 10260,
"end": 10548
} | class ____:
# Initialize project job
def __init__(self, project: Project, dependencies: list[Project]):
self.project = project
self.dependencies = dependencies
# -----------------------------------------------------------------------------
# Project link
| ProjectJob |
python | rapidsai__cudf | python/cudf/cudf/core/join/_join_helpers.py | {
"start": 773,
"end": 1140
} | class ____:
# Indexer into a column (either a data column or index level).
#
# >>> df
# a
# b
# 4 1
# 5 2
# 6 3
# >>> _Indexer("a", column=True).get(df) # returns column "a" of df
# >>> _Indexer("b", index=True).get(df) # returns index level "b" of df
def __init__(self, name: Any):
self.name = name
| _Indexer |
python | mlflow__mlflow | mlflow/gateway/schemas/chat.py | {
"start": 3805,
"end": 3957
} | class ____(ChatCompletionChunk, ResponseModel):
model_config = ConfigDict(json_schema_extra=_STREAM_RESPONSE_PAYLOAD_EXTRA_SCHEMA)
| StreamResponsePayload |
python | ethereum__web3.py | web3/geth.py | {
"start": 4215,
"end": 6465
} | class ____(Module):
"""
https://geth.ethereum.org/docs/interacting-with-geth/rpc/ns-admin
"""
is_async = True
_add_peer: Method[Callable[[EnodeURI], Awaitable[bool]]] = Method(
RPC.admin_addPeer,
mungers=[default_root_munger],
)
async def add_peer(self, node_url: EnodeURI) -> bool:
return await self._add_peer(node_url)
_datadir: Method[Callable[[], Awaitable[str]]] = Method(
RPC.admin_datadir,
is_property=True,
)
async def datadir(self) -> str:
return await self._datadir()
_node_info: Method[Callable[[], Awaitable[NodeInfo]]] = Method(
RPC.admin_nodeInfo,
is_property=True,
)
async def node_info(self) -> NodeInfo:
return await self._node_info()
_peers: Method[Callable[[], Awaitable[list[Peer]]]] = Method(
RPC.admin_peers,
is_property=True,
)
async def peers(self) -> list[Peer]:
return await self._peers()
# start_http and stop_http
_start_http: Method[Callable[[str, int, str, str], Awaitable[bool]]] = Method(
RPC.admin_startHTTP,
mungers=[admin_start_params_munger],
)
_stop_http: Method[Callable[[], Awaitable[bool]]] = Method(
RPC.admin_stopHTTP,
is_property=True,
)
async def start_http(
self,
host: str = "localhost",
port: int = 8546,
cors: str = "",
apis: str = "eth,net,web3",
) -> bool:
return await self._start_http(host, port, cors, apis)
async def stop_http(self) -> bool:
return await self._stop_http()
# start_ws and stop_ws
_start_ws: Method[Callable[[str, int, str, str], Awaitable[bool]]] = Method(
RPC.admin_startWS,
mungers=[admin_start_params_munger],
)
_stop_ws: Method[Callable[[], Awaitable[bool]]] = Method(
RPC.admin_stopWS,
is_property=True,
)
async def start_ws(
self,
host: str = "localhost",
port: int = 8546,
cors: str = "",
apis: str = "eth,net,web3",
) -> bool:
return await self._start_ws(host, port, cors, apis)
async def stop_ws(self) -> bool:
return await self._stop_ws()
| AsyncGethAdmin |
python | Lightning-AI__lightning | examples/pytorch/basics/autoencoder.py | {
"start": 4176,
"end": 5659
} | class ____(LightningModule):
"""
>>> LitAutoEncoder() # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
LitAutoEncoder(
(encoder): ...
(decoder): ...
)
"""
def __init__(self, hidden_dim: int = 64, learning_rate=10e-3):
super().__init__()
self.save_hyperparameters()
self.encoder = nn.Sequential(nn.Linear(28 * 28, hidden_dim), nn.ReLU(), nn.Linear(hidden_dim, 3))
self.decoder = nn.Sequential(nn.Linear(3, hidden_dim), nn.ReLU(), nn.Linear(hidden_dim, 28 * 28))
def forward(self, x):
z = self.encoder(x)
return self.decoder(z)
def training_step(self, batch, batch_idx):
return self._common_step(batch, batch_idx, "train")
def validation_step(self, batch, batch_idx):
self._common_step(batch, batch_idx, "val")
def test_step(self, batch, batch_idx):
self._common_step(batch, batch_idx, "test")
def predict_step(self, batch, batch_idx, dataloader_idx=None):
x = self._prepare_batch(batch)
return self(x)
def configure_optimizers(self):
return torch.optim.Adam(self.parameters(), lr=self.hparams.learning_rate)
def _prepare_batch(self, batch):
x, _ = batch
return x.view(x.size(0), -1)
def _common_step(self, batch, batch_idx, stage: str):
x = self._prepare_batch(batch)
loss = F.mse_loss(x, self(x))
self.log(f"{stage}_loss", loss, on_step=True)
return loss
| LitAutoEncoder |
python | PyCQA__pylint | tests/functional/n/no/no_warning_docstring.py | {
"start": 321,
"end": 521
} | class ____(AAAA):
''' class BBBB '''
def __init__(self):
AAAA.__init__(self)
# should ignore docstring calling from class AAAA
def method1(self):
AAAA.method1(self)
| BBBB |
python | kamyu104__LeetCode-Solutions | Python/check-if-numbers-are-ascending-in-a-sentence.py | {
"start": 29,
"end": 563
} | class ____(object):
def areNumbersAscending(self, s):
"""
:type s: str
:rtype: bool
"""
prev = curr = -1
for i, c in enumerate(s):
if c.isdigit():
curr = max(curr, 0)*10+int(c)
continue
if prev != -1 and curr != -1 and prev >= curr:
return False
if curr != -1:
prev = curr
curr = -1
return curr == -1 or prev < curr
# Time: O(n)
# Space: O(n)
| Solution |
python | ApeWorX__ape | tests/functional/test_dependencies.py | {
"start": 28413,
"end": 31911
} | class ____:
@pytest.fixture
def api(self):
return LocalDependency(local=Path.cwd(), name="ooga", version="1.0.0")
@pytest.fixture
def dependency(self, api, project):
return Dependency(api, project)
def test_repr(self, dependency):
actual = repr(dependency)
path = str(Path.cwd()).replace(str(Path.home()), "$HOME")
expected = f"<LocalDependency local={path}, version=1.0.0>"
assert actual == expected
def test_project_path(self, dependency, data_folder):
actual = dependency.project_path
name = dependency.api.package_id.replace("/", "_")
expected = data_folder / "packages" / "projects" / name / "1_0_0"
assert actual == expected
def test_api_path(self, dependency, data_folder):
actual = dependency.api_path
name = dependency.api.package_id.replace("/", "_")
expected = data_folder / "packages" / "api" / name / "1_0_0.json"
assert actual == expected
def test_manifest_path(self, dependency, data_folder):
actual = dependency.manifest_path
name = dependency.api.package_id.replace("/", "_")
expected = data_folder / "packages" / "manifests" / name / "1_0_0.json"
assert actual == expected
def test_installed(self, dependency):
dependency.uninstall()
assert not dependency.installed
dependency.install()
assert dependency.installed
def test_installed_version_id_fails(self, project):
api = PythonDependency(
site_package="apethisdependencyisnotinstalled",
name="apethisdependencyisnotinstalled",
)
dependency = Dependency(api, project)
assert not dependency.installed
def test_compile(self, project):
with create_tempdir() as path:
api = LocalDependency(local=path, name="ooga", version="1.0.0")
dependency = Dependency(api, project)
contract_path = dependency.project.contracts_folder / "CCC.json"
contract_path.parent.mkdir(exist_ok=True, parents=True)
contract_path.write_text(
'[{"name":"foo","type":"fallback", "stateMutability":"nonpayable"}]',
encoding="utf8",
)
# Since we are adding a file mid-session, we have to refresh so
# it's picked up. Users typically don't have to do this.
dependency.project.refresh_sources()
result = dependency.compile()
assert len(result) == 1
assert result["CCC"].name == "CCC"
@skip_if_plugin_installed("vyper", "solidity")
def test_compile_missing_compilers(self, project, ape_caplog):
with create_tempdir() as path:
api = LocalDependency(local=path, name="ooga2", version="1.1.0")
dependency = Dependency(api, project)
sol_path = dependency.project.contracts_folder / "Sol.sol"
sol_path.parent.mkdir(exist_ok=True, parents=True)
sol_path.write_text("// Sol", encoding="utf8")
vy_path = dependency.project.contracts_folder / "Vy.vy"
vy_path.write_text("# Vy", encoding="utf8")
expected = (
"Compiling dependency produced no contract types. "
"Try installing 'ape-solidity' or 'ape-vyper'."
)
result = dependency.compile()
assert len(result) == 0
assert expected in ape_caplog.head
| TestDependency |
python | cython__cython | Cython/Compiler/Nodes.py | {
"start": 71873,
"end": 75579
} | class ____(StatNode):
# name string or None
# cname string or None
# scoped boolean Is a C++ scoped enum
# underlying_type CSimpleBaseTypeNode The underlying value type (int or C++ type)
# items [CEnumDefItemNode]
# typedef_flag boolean
# visibility "public" or "private" or "extern"
# api boolean
# in_pxd boolean
# create_wrapper boolean
# entry Entry
# doc EncodedString or None Doc string
child_attrs = ["items", "underlying_type"]
doc = None
def declare(self, env):
doc = None
if Options.docstrings:
doc = embed_position(self.pos, self.doc)
self.entry = env.declare_enum(
self.name, self.pos,
cname=self.cname,
scoped=self.scoped,
typedef_flag=self.typedef_flag,
visibility=self.visibility, api=self.api,
create_wrapper=self.create_wrapper, doc=doc)
def analyse_declarations(self, env):
scope = None
underlying_type = self.underlying_type.analyse(env)
if not underlying_type.is_int:
error(self.underlying_type.pos, "underlying type is not an integral type")
self.entry.type.underlying_type = underlying_type
if self.scoped and self.items is not None:
scope = CppScopedEnumScope(self.name, env)
scope.type = self.entry.type
scope.directives = env.directives
else:
scope = env
if self.items is not None:
if self.in_pxd and not env.in_cinclude:
self.entry.defined_in_pxd = 1
# For extern enums, we can't reason about their equivalent int values because
# we don't know if their definition is complete.
is_declared_enum = self.visibility != 'extern'
next_int_enum_value = 0 if is_declared_enum else None
for item in self.items:
item.analyse_enum_declarations(scope, self.entry, next_int_enum_value)
if is_declared_enum:
next_int_enum_value = 1 + (
item.entry.enum_int_value if item.entry.enum_int_value is not None else next_int_enum_value)
def analyse_expressions(self, env):
return self
def generate_execution_code(self, code):
if self.scoped:
# Nothing to do here for C++ enums.
return
if not self.api and not (self.name or self.visibility == 'public'):
# API enums need to be globally importable and we (currently) do that through global item names.
# Named enums are namespaced and need no additional global setup.
return
# Copy the values of anonymous cpdef/api enums into the global Python module namespace.
code.mark_pos(self.pos)
temp = code.funcstate.allocate_temp(PyrexTypes.py_object_type, manage_ref=True)
for item in self.entry.enum_values:
code.putln("%s = PyLong_FromLong(%s); %s" % (
temp,
item.cname,
code.error_goto_if_null(temp, item.pos)))
code.put_gotref(temp, PyrexTypes.py_object_type)
code.putln('if (PyDict_SetItemString(%s, %s, %s) < 0) %s' % (
code.name_in_module_state(Naming.moddict_cname),
item.name.as_c_string_literal(),
temp,
code.error_goto(item.pos)))
code.put_decref_clear(temp, PyrexTypes.py_object_type)
code.funcstate.release_temp(temp)
| CEnumDefNode |
python | PyCQA__pylint | tests/functional/s/super/super_init_not_called.py | {
"start": 2071,
"end": 2277
} | class ____(AbstractBase):
def __init__(self, param: int) -> None: # [super-init-not-called]
print("Called")
def abstract_method(self) -> str:
return "Implemented"
| DerivedFromAbstract |
python | plotly__plotly.py | plotly/graph_objs/scattermapbox/_legendgrouptitle.py | {
"start": 233,
"end": 2982
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "scattermapbox"
_path_str = "scattermapbox.legendgrouptitle"
_valid_props = {"font", "text"}
@property
def font(self):
"""
Sets this legend group's title font.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.scattermapbox.legendgrouptitle.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Returns
-------
plotly.graph_objs.scattermapbox.legendgrouptitle.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
@property
def text(self):
"""
Sets the title of the legend group.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
@property
def _prop_descriptions(self):
return """\
font
Sets this legend group's title font.
text
Sets the title of the legend group.
"""
def __init__(self, arg=None, font=None, text=None, **kwargs):
"""
Construct a new Legendgrouptitle object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.scattermapbox.
Legendgrouptitle`
font
Sets this legend group's title font.
text
Sets the title of the legend group.
Returns
-------
Legendgrouptitle
"""
super().__init__("legendgrouptitle")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.scattermapbox.Legendgrouptitle
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scattermapbox.Legendgrouptitle`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("font", arg, font)
self._set_property("text", arg, text)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Legendgrouptitle |
python | modin-project__modin | modin/config/envvars.py | {
"start": 33440,
"end": 33875
} | class ____(EnvironmentVariable, type=bool):
"""Whether serialization should be persistent."""
varname = "MODIN_PERSISTENT_PICKLE"
# When set to off, it allows faster serialization which is only
# valid in current run (i.e. useless for saving to disk).
# When set to on, Modin objects could be saved to disk and loaded
# but serialization/deserialization could take more time.
default = False
| PersistentPickle |
python | anthropics__anthropic-sdk-python | src/anthropic/lib/bedrock/_beta_messages.py | {
"start": 1376,
"end": 2287
} | class ____(AsyncAPIResource):
create = FirstPartyAsyncMessagesAPI.create
@cached_property
def with_raw_response(self) -> AsyncMessagesWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return the
the raw response object instead of the parsed content.
For more information, see https://www.github.com/anthropics/anthropic-sdk-python#accessing-raw-response-data-eg-headers
"""
return AsyncMessagesWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncMessagesWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/anthropics/anthropic-sdk-python#with_streaming_response
"""
return AsyncMessagesWithStreamingResponse(self)
| AsyncMessages |
python | mlflow__mlflow | docs/api_reference/source/testcode_block.py | {
"start": 1818,
"end": 3667
} | class ____(CodeBlock):
"""
Overrides the `code-block` directive to dump code blocks marked with the `:test:` option
to files for testing.
```
.. code-block:: python
:test:
print("Hello, world!")
```
"""
option_spec = {**CodeBlock.option_spec, "test": directives.flag}
def _dump_code_block(self):
docs_dir = Path.cwd()
repo_root = docs_dir.parent.parent
directory = docs_dir.joinpath(".examples")
directory.mkdir(exist_ok=True)
source, lineno_in_docstring = self.get_source_info()
obj_path = source.split(":docstring of ")[1]
code_block_location = get_code_block_location(obj_path, lineno_in_docstring, repo_root)
name = re.sub(r"[\._]+", "_", obj_path).strip("")
filename = f"test_{name}_{lineno_in_docstring}.py"
content = textwrap.indent("\n".join(self.content), " " * 4)
code = "\n".join(
[
f"# Location: {code_block_location}",
"import pytest",
"",
"",
# Show the code block location in the test report.
f"@pytest.mark.parametrize('_', [' {code_block_location} '])",
"def test(_):",
content,
"",
"",
'if __name__ == "__main__":',
" test()",
"",
]
)
directory.joinpath(filename).write_text(code)
def run(self):
if "test" in self.options:
self._dump_code_block()
return super().run()
def setup(app):
app.add_directive("code-block", TestCodeBlockDirective, override=True)
return {
"version": "builtin",
"parallel_read_safe": False,
"parallel_write_safe": False,
}
| TestCodeBlockDirective |
python | getsentry__sentry | tests/sentry/integrations/gitlab/tasks/test_pr_comment.py | {
"start": 7586,
"end": 11578
} | class ____(SnubaTestCase, GitlabCommentTestCase):
def test_simple(self) -> None:
group1 = [
self.store_event(
{"fingerprint": ["group-1"], "timestamp": before_now(days=1).isoformat()},
project_id=self.project.id,
)
for _ in range(3)
][0].group.id
group2 = [
self.store_event(
{"fingerprint": ["group-2"], "timestamp": before_now(days=1).isoformat()},
project_id=self.project.id,
)
for _ in range(6)
][0].group.id
group3 = [
self.store_event(
{"fingerprint": ["group-3"], "timestamp": before_now(days=1).isoformat()},
project_id=self.project.id,
)
for _ in range(4)
][0].group.id
res = self.pr_comment_workflow.get_top_5_issues_by_count(
[group1, group2, group3], self.project
)
assert [issue["group_id"] for issue in res] == [group2, group3, group1]
def test_over_5_issues(self) -> None:
issue_ids = [
self.store_event(
{"fingerprint": [f"group-{idx}"], "timestamp": before_now(days=1).isoformat()},
project_id=self.project.id,
).group.id
for idx in range(6)
]
res = self.pr_comment_workflow.get_top_5_issues_by_count(issue_ids, self.project)
assert len(res) == 5
def test_ignore_info_level_issues(self) -> None:
group1 = [
self.store_event(
{
"fingerprint": ["group-1"],
"timestamp": before_now(days=1).isoformat(),
"level": logging.INFO,
},
project_id=self.project.id,
)
for _ in range(3)
][0].group.id
group2 = [
self.store_event(
{"fingerprint": ["group-2"], "timestamp": before_now(days=1).isoformat()},
project_id=self.project.id,
)
for _ in range(6)
][0].group.id
group3 = [
self.store_event(
{
"fingerprint": ["group-3"],
"timestamp": before_now(days=1).isoformat(),
"level": logging.INFO,
},
project_id=self.project.id,
)
for _ in range(4)
][0].group.id
res = self.pr_comment_workflow.get_top_5_issues_by_count(
[group1, group2, group3], self.project
)
assert [issue["group_id"] for issue in res] == [group2]
def test_do_not_ignore_other_issues(self) -> None:
group1 = [
self.store_event(
{
"fingerprint": ["group-1"],
"timestamp": before_now(days=1).isoformat(),
"level": logging.ERROR,
},
project_id=self.project.id,
)
for _ in range(3)
][0].group.id
group2 = [
self.store_event(
{
"fingerprint": ["group-2"],
"timestamp": before_now(days=1).isoformat(),
"level": logging.INFO,
},
project_id=self.project.id,
)
for _ in range(6)
][0].group.id
group3 = [
self.store_event(
{
"fingerprint": ["group-3"],
"timestamp": before_now(days=1).isoformat(),
"level": logging.DEBUG,
},
project_id=self.project.id,
)
for _ in range(4)
][0].group.id
res = self.pr_comment_workflow.get_top_5_issues_by_count(
[group1, group2, group3], self.project
)
assert [issue["group_id"] for issue in res] == [group3, group1]
| TestTop5IssuesByCount |
python | python-openxml__python-docx | tests/dml/test_color.py | {
"start": 379,
"end": 4897
} | class ____:
"""Unit-test suite for `docx.dml.color.ColorFormat` objects."""
@pytest.mark.parametrize(
("r_cxml", "expected_value"),
[
("w:r", None),
("w:r/w:rPr", None),
("w:r/w:rPr/w:color{w:val=auto}", MSO_COLOR_TYPE.AUTO),
("w:r/w:rPr/w:color{w:val=4224FF}", MSO_COLOR_TYPE.RGB),
("w:r/w:rPr/w:color{w:themeColor=dark1}", MSO_COLOR_TYPE.THEME),
(
"w:r/w:rPr/w:color{w:val=F00BA9,w:themeColor=accent1}",
MSO_COLOR_TYPE.THEME,
),
],
)
def it_knows_its_color_type(self, r_cxml: str, expected_value: MSO_COLOR_TYPE | None):
assert ColorFormat(cast(CT_R, element(r_cxml))).type == expected_value
@pytest.mark.parametrize(
("r_cxml", "rgb"),
[
("w:r", None),
("w:r/w:rPr", None),
("w:r/w:rPr/w:color{w:val=auto}", None),
("w:r/w:rPr/w:color{w:val=4224FF}", "4224ff"),
("w:r/w:rPr/w:color{w:val=auto,w:themeColor=accent1}", None),
("w:r/w:rPr/w:color{w:val=F00BA9,w:themeColor=accent1}", "f00ba9"),
],
)
def it_knows_its_RGB_value(self, r_cxml: str, rgb: str | None):
expected_value = RGBColor.from_string(rgb) if rgb else None
assert ColorFormat(cast(CT_R, element(r_cxml))).rgb == expected_value
@pytest.mark.parametrize(
("r_cxml", "new_value", "expected_cxml"),
[
("w:r", RGBColor(10, 20, 30), "w:r/w:rPr/w:color{w:val=0A141E}"),
("w:r/w:rPr", RGBColor(1, 2, 3), "w:r/w:rPr/w:color{w:val=010203}"),
(
"w:r/w:rPr/w:color{w:val=123abc}",
RGBColor(42, 24, 99),
"w:r/w:rPr/w:color{w:val=2A1863}",
),
(
"w:r/w:rPr/w:color{w:val=auto}",
RGBColor(16, 17, 18),
"w:r/w:rPr/w:color{w:val=101112}",
),
(
"w:r/w:rPr/w:color{w:val=234bcd,w:themeColor=dark1}",
RGBColor(24, 42, 99),
"w:r/w:rPr/w:color{w:val=182A63}",
),
("w:r/w:rPr/w:color{w:val=234bcd,w:themeColor=dark1}", None, "w:r/w:rPr"),
("w:r", None, "w:r"),
],
)
def it_can_change_its_RGB_value(
self, r_cxml: str, new_value: RGBColor | None, expected_cxml: str
):
color_format = ColorFormat(cast(CT_R, element(r_cxml)))
color_format.rgb = new_value
assert color_format._element.xml == xml(expected_cxml)
@pytest.mark.parametrize(
("r_cxml", "expected_value"),
[
("w:r", None),
("w:r/w:rPr", None),
("w:r/w:rPr/w:color{w:val=auto}", None),
("w:r/w:rPr/w:color{w:val=4224FF}", None),
("w:r/w:rPr/w:color{w:themeColor=accent1}", MSO_THEME_COLOR.ACCENT_1),
("w:r/w:rPr/w:color{w:val=F00BA9,w:themeColor=dark1}", MSO_THEME_COLOR.DARK_1),
],
)
def it_knows_its_theme_color(self, r_cxml: str, expected_value: MSO_THEME_COLOR | None):
color_format = ColorFormat(cast(CT_R, element(r_cxml)))
assert color_format.theme_color == expected_value
@pytest.mark.parametrize(
("r_cxml", "new_value", "expected_cxml"),
[
(
"w:r",
MSO_THEME_COLOR.ACCENT_1,
"w:r/w:rPr/w:color{w:val=000000,w:themeColor=accent1}",
),
(
"w:r/w:rPr",
MSO_THEME_COLOR.ACCENT_2,
"w:r/w:rPr/w:color{w:val=000000,w:themeColor=accent2}",
),
(
"w:r/w:rPr/w:color{w:val=101112}",
MSO_THEME_COLOR.ACCENT_3,
"w:r/w:rPr/w:color{w:val=101112,w:themeColor=accent3}",
),
(
"w:r/w:rPr/w:color{w:val=234bcd,w:themeColor=dark1}",
MSO_THEME_COLOR.LIGHT_2,
"w:r/w:rPr/w:color{w:val=234bcd,w:themeColor=light2}",
),
("w:r/w:rPr/w:color{w:val=234bcd,w:themeColor=dark1}", None, "w:r/w:rPr"),
("w:r", None, "w:r"),
],
)
def it_can_change_its_theme_color(
self, r_cxml: str, new_value: MSO_THEME_COLOR | None, expected_cxml: str
):
color_format = ColorFormat(cast(CT_R, element(r_cxml)))
color_format.theme_color = new_value
assert color_format._element.xml == xml(expected_cxml)
| DescribeColorFormat |
python | bokeh__bokeh | src/bokeh/models/widgets/buttons.py | {
"start": 5328,
"end": 6459
} | class ____(AbstractButton):
''' A dropdown button.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
label = Override(default="Dropdown")
split = Bool(default=False, help="""
""")
menu = List(Nullable(Either(String, Tuple(String, Either(String, Instance(Callback))))), help="""
Button's dropdown menu consisting of entries containing item's text and
value name. Use ``None`` as a menu separator.
""")
def on_click(self, handler: EventCallback) -> None:
''' Set up a handler for button or menu item clicks.
Args:
handler (func) : handler function to call when button is activated.
Returns:
None
'''
self.on_event(ButtonClick, handler)
self.on_event(MenuItemClick, handler)
def js_on_click(self, handler: Callback) -> None:
''' Set up a JavaScript handler for button or menu item clicks. '''
self.js_on_event(ButtonClick, handler)
self.js_on_event(MenuItemClick, handler)
| Dropdown |
python | walkccc__LeetCode | solutions/1766. Tree of Coprimes/1766.py | {
"start": 0,
"end": 865
} | class ____:
def getCoprimes(self, nums: list[int], edges: list[list[int]]) -> list[int]:
MAX = 50
ans = [-1] * len(nums)
tree = [[] for _ in range(len(nums))]
# stacks[i] := (node, depth)s of nodes with value i
stacks = [[] for _ in range(MAX + 1)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
def getAncestor(u: int) -> int:
maxNode = -1
maxDepth = -1
for i, stack in enumerate(stacks):
if stack and stack[-1][1] > maxDepth and math.gcd(nums[u], i) == 1:
maxNode, maxDepth = stack[-1]
return maxNode
def dfs(u: int, prev: int, depth: int) -> int:
ans[u] = getAncestor(u)
stacks[nums[u]].append((u, depth))
for v in tree[u]:
if v != prev:
dfs(v, u, depth + 1)
stacks[nums[u]].pop()
dfs(0, -1, 0)
return ans
| Solution |
python | ray-project__ray | python/ray/_private/thirdparty/pynvml/pynvml.py | {
"start": 89321,
"end": 90053
} | class ____(Structure):
_fields_ = [("vgpuVmCompatibility", _nvmlVgpuVmCompatibility_t),
("compatibilityLimitCode", _nvmlVgpuPgpuCompatibilityLimitCode_t)
]
## vGPU scheduler policy defines
NVML_VGPU_SCHEDULER_POLICY_UNKNOWN = 0
NVML_VGPU_SCHEDULER_POLICY_BEST_EFFORT = 1
NVML_VGPU_SCHEDULER_POLICY_EQUAL_SHARE = 2
NVML_VGPU_SCHEDULER_POLICY_FIXED_SHARE = 3
## Supported vGPU scheduler policy count
NVML_SUPPORTED_VGPU_SCHEDULER_POLICY_COUNT = 3
NVML_SCHEDULER_SW_MAX_LOG_ENTRIES = 200
NVML_VGPU_SCHEDULER_ARR_DEFAULT = 0
NVML_VGPU_SCHEDULER_ARR_DISABLE = 1
NVML_VGPU_SCHEDULER_ARR_ENABLE = 2
NVML_VGPU_SCHEDULER_ENGINE_TYPE_GRAPHICS = 1
| c_nvmlVgpuPgpuCompatibility_t |
python | walkccc__LeetCode | solutions/1540. Can Convert String in K Moves/1540-2.py | {
"start": 0,
"end": 529
} | class ____:
def canConvertString(self, s: str, t: str, k: int) -> bool:
if len(s) != len(t):
return False
# e.g. s = "aab", t = "bbc", so shiftCount[1] = 3
# 1. a -> b, need 1 move.
# 2. a -> b, need 1 + 26 moves.
# 3. b -> c, need 1 + 26 * 2 moves.
shiftCount = [0] * 26
for a, b in zip(s, t):
shift = (ord(b) - ord(a) + 26) % 26
if shift == 0:
continue
if shift + 26 * shiftCount[shift] > k:
return False
shiftCount[shift] += 1
return True
| Solution |
python | fsspec__filesystem_spec | fsspec/implementations/http.py | {
"start": 19177,
"end": 25641
} | class ____(AbstractBufferedFile):
"""
A file-like object pointing to a remote HTTP(S) resource
Supports only reading, with read-ahead of a predetermined block-size.
In the case that the server does not supply the filesize, only reading of
the complete file in one go is supported.
Parameters
----------
url: str
Full URL of the remote resource, including the protocol
session: aiohttp.ClientSession or None
All calls will be made within this session, to avoid restarting
connections where the server allows this
block_size: int or None
The amount of read-ahead to do, in bytes. Default is 5MB, or the value
configured for the FileSystem creating this file
size: None or int
If given, this is the size of the file in bytes, and we don't attempt
to call the server to find the value.
kwargs: all other key-values are passed to requests calls.
"""
def __init__(
self,
fs,
url,
session=None,
block_size=None,
mode="rb",
cache_type="bytes",
cache_options=None,
size=None,
loop=None,
asynchronous=False,
**kwargs,
):
if mode != "rb":
raise NotImplementedError("File mode not supported")
self.asynchronous = asynchronous
self.loop = loop
self.url = url
self.session = session
self.details = {"name": url, "size": size, "type": "file"}
super().__init__(
fs=fs,
path=url,
mode=mode,
block_size=block_size,
cache_type=cache_type,
cache_options=cache_options,
**kwargs,
)
def read(self, length=-1):
"""Read bytes from file
Parameters
----------
length: int
Read up to this many bytes. If negative, read all content to end of
file. If the server has not supplied the filesize, attempting to
read only part of the data will raise a ValueError.
"""
if (
(length < 0 and self.loc == 0) # explicit read all
# but not when the size is known and fits into a block anyways
and not (self.size is not None and self.size <= self.blocksize)
):
self._fetch_all()
if self.size is None:
if length < 0:
self._fetch_all()
else:
length = min(self.size - self.loc, length)
return super().read(length)
async def async_fetch_all(self):
"""Read whole file in one shot, without caching
This is only called when position is still at zero,
and read() is called without a byte-count.
"""
logger.debug(f"Fetch all for {self}")
if not isinstance(self.cache, AllBytes):
r = await self.session.get(self.fs.encode_url(self.url), **self.kwargs)
async with r:
r.raise_for_status()
out = await r.read()
self.cache = AllBytes(
size=len(out), fetcher=None, blocksize=None, data=out
)
self.size = len(out)
_fetch_all = sync_wrapper(async_fetch_all)
def _parse_content_range(self, headers):
"""Parse the Content-Range header"""
s = headers.get("Content-Range", "")
m = re.match(r"bytes (\d+-\d+|\*)/(\d+|\*)", s)
if not m:
return None, None, None
if m[1] == "*":
start = end = None
else:
start, end = [int(x) for x in m[1].split("-")]
total = None if m[2] == "*" else int(m[2])
return start, end, total
async def async_fetch_range(self, start, end):
"""Download a block of data
The expectation is that the server returns only the requested bytes,
with HTTP code 206. If this is not the case, we first check the headers,
and then stream the output - if the data size is bigger than we
requested, an exception is raised.
"""
logger.debug(f"Fetch range for {self}: {start}-{end}")
kwargs = self.kwargs.copy()
headers = kwargs.pop("headers", {}).copy()
headers["Range"] = f"bytes={start}-{end - 1}"
logger.debug(f"{self.url} : {headers['Range']}")
r = await self.session.get(
self.fs.encode_url(self.url), headers=headers, **kwargs
)
async with r:
if r.status == 416:
# range request outside file
return b""
r.raise_for_status()
# If the server has handled the range request, it should reply
# with status 206 (partial content). But we'll guess that a suitable
# Content-Range header or a Content-Length no more than the
# requested range also mean we have got the desired range.
response_is_range = (
r.status == 206
or self._parse_content_range(r.headers)[0] == start
or int(r.headers.get("Content-Length", end + 1)) <= end - start
)
if response_is_range:
# partial content, as expected
out = await r.read()
elif start > 0:
raise ValueError(
"The HTTP server doesn't appear to support range requests. "
"Only reading this file from the beginning is supported. "
"Open with block_size=0 for a streaming file interface."
)
else:
# Response is not a range, but we want the start of the file,
# so we can read the required amount anyway.
cl = 0
out = []
while True:
chunk = await r.content.read(2**20)
# data size unknown, let's read until we have enough
if chunk:
out.append(chunk)
cl += len(chunk)
if cl > end - start:
break
else:
break
out = b"".join(out)[: end - start]
return out
_fetch_range = sync_wrapper(async_fetch_range)
magic_check = re.compile("([*[])")
def has_magic(s):
match = magic_check.search(s)
return match is not None
| HTTPFile |
python | redis__redis-py | redis/connection.py | {
"start": 43693,
"end": 46376
} | class ____(AbstractConnection):
"Manages TCP communication to and from a Redis server"
def __init__(
self,
host="localhost",
port=6379,
socket_keepalive=False,
socket_keepalive_options=None,
socket_type=0,
**kwargs,
):
self._host = host
self.port = int(port)
self.socket_keepalive = socket_keepalive
self.socket_keepalive_options = socket_keepalive_options or {}
self.socket_type = socket_type
super().__init__(**kwargs)
def repr_pieces(self):
pieces = [("host", self.host), ("port", self.port), ("db", self.db)]
if self.client_name:
pieces.append(("client_name", self.client_name))
return pieces
def _connect(self):
"Create a TCP socket connection"
# we want to mimic what socket.create_connection does to support
# ipv4/ipv6, but we want to set options prior to calling
# socket.connect()
err = None
for res in socket.getaddrinfo(
self.host, self.port, self.socket_type, socket.SOCK_STREAM
):
family, socktype, proto, canonname, socket_address = res
sock = None
try:
sock = socket.socket(family, socktype, proto)
# TCP_NODELAY
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
# TCP_KEEPALIVE
if self.socket_keepalive:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
for k, v in self.socket_keepalive_options.items():
sock.setsockopt(socket.IPPROTO_TCP, k, v)
# set the socket_connect_timeout before we connect
sock.settimeout(self.socket_connect_timeout)
# connect
sock.connect(socket_address)
# set the socket_timeout now that we're connected
sock.settimeout(self.socket_timeout)
return sock
except OSError as _:
err = _
if sock is not None:
try:
sock.shutdown(socket.SHUT_RDWR) # ensure a clean close
except OSError:
pass
sock.close()
if err is not None:
raise err
raise OSError("socket.getaddrinfo returned an empty list")
def _host_error(self):
return f"{self.host}:{self.port}"
@property
def host(self) -> str:
return self._host
@host.setter
def host(self, value: str):
self._host = value
| Connection |
python | qdrant__qdrant-client | qdrant_client/http/models/models.py | {
"start": 139306,
"end": 140610
} | class ____(BaseModel, extra="forbid"):
type: "TextIndexType" = Field(..., description="")
tokenizer: Optional["TokenizerType"] = Field(default=None, description="")
min_token_len: Optional[int] = Field(default=None, description="Minimum characters to be tokenized.")
max_token_len: Optional[int] = Field(default=None, description="Maximum characters to be tokenized.")
lowercase: Optional[bool] = Field(default=None, description="If true, lowercase all tokens. Default: true.")
ascii_folding: Optional[bool] = Field(
default=None,
description="If true, normalize tokens by folding accented characters to ASCII (e.g., 'ação' -> 'acao'). Default: false.",
)
phrase_matching: Optional[bool] = Field(
default=None, description="If true, support phrase matching. Default: false."
)
stopwords: Optional["StopwordsInterface"] = Field(
default=None,
description="Ignore this set of tokens. Can select from predefined languages and/or provide a custom set.",
)
on_disk: Optional[bool] = Field(default=None, description="If true, store the index on disk. Default: false.")
stemmer: Optional["StemmingAlgorithm"] = Field(
default=None, description="Algorithm for stemming. Default: disabled."
)
| TextIndexParams |
python | plotly__plotly.py | plotly/graph_objs/contour/colorbar/_title.py | {
"start": 233,
"end": 3971
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "contour.colorbar"
_path_str = "contour.colorbar.title"
_valid_props = {"font", "side", "text"}
@property
def font(self):
"""
Sets this color bar's title font.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.contour.colorbar.title.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Returns
-------
plotly.graph_objs.contour.colorbar.title.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
@property
def side(self):
"""
Determines the location of color bar's title with respect to
the color bar. Defaults to "top" when `orientation` if "v" and
defaults to "right" when `orientation` if "h".
The 'side' property is an enumeration that may be specified as:
- One of the following enumeration values:
['right', 'top', 'bottom']
Returns
-------
Any
"""
return self["side"]
@side.setter
def side(self, val):
self["side"] = val
@property
def text(self):
"""
Sets the title of the color bar.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
@property
def _prop_descriptions(self):
return """\
font
Sets this color bar's title font.
side
Determines the location of color bar's title with
respect to the color bar. Defaults to "top" when
`orientation` if "v" and defaults to "right" when
`orientation` if "h".
text
Sets the title of the color bar.
"""
def __init__(self, arg=None, font=None, side=None, text=None, **kwargs):
"""
Construct a new Title object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.contour.colorbar.Title`
font
Sets this color bar's title font.
side
Determines the location of color bar's title with
respect to the color bar. Defaults to "top" when
`orientation` if "v" and defaults to "right" when
`orientation` if "h".
text
Sets the title of the color bar.
Returns
-------
Title
"""
super().__init__("title")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.contour.colorbar.Title
constructor must be a dict or
an instance of :class:`plotly.graph_objs.contour.colorbar.Title`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("font", arg, font)
self._set_property("side", arg, side)
self._set_property("text", arg, text)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Title |
python | encode__starlette | starlette/websockets.py | {
"start": 399,
"end": 576
} | class ____(Exception):
def __init__(self, code: int = 1000, reason: str | None = None) -> None:
self.code = code
self.reason = reason or ""
| WebSocketDisconnect |
python | huggingface__transformers | tests/models/umt5/test_modeling_umt5.py | {
"start": 8581,
"end": 14487
} | class ____(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(UMT5Model, UMT5ForConditionalGeneration, UMT5ForSequenceClassification, UMT5ForQuestionAnswering)
if is_torch_available()
else ()
)
pipeline_model_mapping = (
{
"feature-extraction": UMT5Model,
"question-answering": UMT5ForQuestionAnswering,
"summarization": UMT5ForConditionalGeneration,
"text-classification": UMT5ForSequenceClassification,
"text2text-generation": UMT5ForConditionalGeneration,
"translation": UMT5ForConditionalGeneration,
"zero-shot": UMT5ForSequenceClassification,
}
if is_torch_available()
else {}
)
is_encoder_decoder = True
test_missing_keys = True
# The small UMT5 model needs higher percentages for CPU/MP tests
model_split_percents = [0.5, 0.8, 0.9]
def setUp(self):
self.model_tester = UMT5ModelTester(self)
# `QAPipelineTests` is not working well with slow tokenizers (for some models) and we don't want to touch the file
# `src/transformers/data/processors/squad.py` (where this test fails for this model)
def is_pipeline_test_to_skip(
self,
pipeline_test_case_name,
config_class,
model_architecture,
tokenizer_name,
image_processor_name,
feature_extractor_name,
processor_name,
):
if pipeline_test_case_name == "QAPipelineTests" and not tokenizer_name.endswith("Fast"):
return True
return False
# UMT5ForSequenceClassification does not support inputs_embeds
def test_inputs_embeds(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (UMT5Model, UMT5ForConditionalGeneration, UMT5ForQuestionAnswering):
model = model_class(config)
model.to(torch_device)
model.eval()
inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class))
if not self.is_encoder_decoder:
input_ids = inputs["input_ids"]
del inputs["input_ids"]
else:
encoder_input_ids = inputs["input_ids"]
decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids)
del inputs["input_ids"]
inputs.pop("decoder_input_ids", None)
wte = model.get_input_embeddings()
if not self.is_encoder_decoder:
inputs["inputs_embeds"] = wte(input_ids)
else:
inputs["inputs_embeds"] = wte(encoder_input_ids)
inputs["decoder_inputs_embeds"] = wte(decoder_input_ids)
with torch.no_grad():
model(**inputs)[0]
# overwrite because T5 doesn't accept position ids as input and expects `decoder_input_ids`
def test_custom_4d_attention_mask(self):
for model_class in self.all_generative_model_classes:
config, input_dict = self.model_tester.prepare_config_and_inputs_for_common()
model = model_class(config).to(device=torch_device, dtype=torch.float32)
(
input_ids,
_,
input_ids_shared_prefix,
mask_shared_prefix,
_,
) = self._get_custom_4d_mask_test_data()
logits = model.forward(
decoder_input_ids=input_ids,
input_ids=input_dict["input_ids"][:3],
).logits
# logits.shape == torch.Size([3, 4, ...])
logits_shared_prefix = model(
input_ids=input_dict["input_ids"][:1],
decoder_input_ids=input_ids_shared_prefix,
decoder_attention_mask=mask_shared_prefix,
)[0]
# logits_shared_prefix.shape == torch.Size([1, 6, ...])
out_last_tokens = logits[:, -1, :] # last tokens in each batch line
out_shared_prefix_last_tokens = logits_shared_prefix[0, -3:, :] # last three tokens
# comparing softmax-normalized logits:
normalized_0 = F.softmax(out_last_tokens)
normalized_1 = F.softmax(out_shared_prefix_last_tokens)
torch.testing.assert_close(normalized_0, normalized_1, rtol=1e-3, atol=1e-4)
def test_with_sequence_classification_head(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_with_sequence_classification_head(*config_and_inputs)
@unittest.skipIf(torch_device == "cpu", "Can't do half precision")
def test_model_fp16_forward(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fp16_forward(*config_and_inputs)
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant_false(self):
pass
@unittest.skip(reason="UMT5 has no separate base model without a head.")
def test_model_base_model_prefix(self):
pass
# Copied from tests.models.t5.test_modeling_t5.T5EncoderOnlyModelTester with T5->UMT5
| UMT5ModelTest |
python | encode__django-rest-framework | tests/schemas/test_coreapi.py | {
"start": 17846,
"end": 17958
} | class ____(ExampleViewSet):
permission_classes = [DenyAllUsingPermissionDenied]
| PermissionDeniedExampleViewSet |
python | spack__spack | lib/spack/spack/test/stage.py | {
"start": 11692,
"end": 13578
} | class ____(spack.fetch_strategy.FetchStrategy):
def fetch(self):
raise spack.fetch_strategy.FailedDownloadError(
"<non-existent URL>", "This implementation of FetchStrategy always fails"
)
@pytest.fixture
def search_fn():
"""Returns a search function that always succeeds."""
class _Mock:
performed_search = False
def __call__(self):
self.performed_search = True
return []
return _Mock()
def check_stage_dir_perms(prefix, path):
"""Check the stage directory perms to ensure match expectations."""
# Ensure the path's subdirectories -- to `$user` -- have their parent's
# perms while those from `$user` on are owned and restricted to the
# user.
assert path.startswith(prefix)
user = getpass.getuser()
prefix_status = os.stat(prefix)
uid = getuid()
# Obtain lists of ancestor and descendant paths of the $user node, if any.
#
# Skip processing prefix ancestors since no guarantee they will be in the
# required group (e.g. $TEMPDIR on HPC machines).
skip = prefix if prefix.endswith(os.sep) else prefix + os.sep
group_paths, user_node, user_paths = partition_path(path.replace(skip, ""), user)
for p in group_paths:
p_status = os.stat(os.path.join(prefix, p))
assert p_status.st_gid == prefix_status.st_gid
assert p_status.st_mode == prefix_status.st_mode
# Add the path ending with the $user node to the user paths to ensure paths
# from $user (on down) meet the ownership and permission requirements.
if user_node:
user_paths.insert(0, user_node)
for p in user_paths:
p_status = os.stat(os.path.join(prefix, p))
assert uid == p_status.st_uid
assert p_status.st_mode & stat.S_IRWXU == stat.S_IRWXU
@pytest.mark.usefixtures("mock_packages")
| FailingFetchStrategy |
python | getsentry__sentry | tests/sentry/lang/javascript/test_sourcemaps.py | {
"start": 13168,
"end": 16072
} | class ____(TestCase):
# Tests lookups that fall exactly on source map token boundaries
# https://github.com/mozilla/source-map/blob/master/test/test-source-map-consumer.js#138
def test_exact_mappings(self) -> None:
smap_view = SourceMapView.from_json_bytes(indexed_sourcemap_example)
# one.js
assert smap_view.lookup(0, 1) == SourceMapTokenMatch(
dst_line=0,
dst_col=1,
src="/the/root/one.js",
src_line=0,
src_col=1,
src_id=0,
name=None,
)
assert smap_view.lookup(0, 18) == SourceMapTokenMatch(
dst_line=0,
dst_col=18,
src="/the/root/one.js",
src_line=0,
src_col=21,
src_id=0,
name="bar",
)
assert smap_view.lookup(0, 28) == SourceMapTokenMatch(
dst_line=0,
dst_col=28,
src="/the/root/one.js",
src_line=1,
src_col=10,
src_id=0,
name="baz",
)
# two.js
assert smap_view.lookup(1, 18) == SourceMapTokenMatch(
dst_line=1,
dst_col=18,
src="/the/root/two.js",
src_line=0,
src_col=21,
src_id=1,
name="n",
)
assert smap_view.lookup(1, 21) == SourceMapTokenMatch(
dst_line=1,
dst_col=21,
src="/the/root/two.js",
src_line=1,
src_col=3,
src_id=1,
name=None,
)
assert smap_view.lookup(1, 21) == SourceMapTokenMatch(
dst_line=1,
dst_col=21,
src="/the/root/two.js",
src_line=1,
src_col=3,
src_id=1,
name=None,
)
# Tests lookups that fall inside source map token boundaries
# https://github.com/mozilla/source-map/blob/master/test/test-source-map-consumer.js#181
def test_fuzzy_mapping(self) -> None:
smap_view = SourceMapView.from_json_bytes(indexed_sourcemap_example)
# one.js
assert smap_view.lookup(0, 20) == SourceMapTokenMatch(
dst_line=0,
dst_col=18,
src="/the/root/one.js",
src_line=0,
src_col=21,
src_id=0,
name="bar",
)
assert smap_view.lookup(0, 30) == SourceMapTokenMatch(
dst_line=0,
dst_col=28,
src="/the/root/one.js",
src_line=1,
src_col=10,
src_id=0,
name="baz",
)
assert smap_view.lookup(1, 12) == SourceMapTokenMatch(
dst_line=1,
dst_col=9,
src="/the/root/two.js",
src_line=0,
src_col=11,
src_id=1,
name=None,
)
| ParseIndexedSourcemapTest |
python | dagster-io__dagster | python_modules/dagster/dagster_tests/storage_tests/branching_io_manager_tests/utils.py | {
"start": 394,
"end": 2588
} | class ____:
"""Helper class for running asset-oriented tests. Handles threading
through the instance for you (this is easy to forget to do).
"""
def __init__(self, defs: Definitions, instance: DagsterInstance):
self.defs = defs
self.instance = instance
@staticmethod
@contextmanager
def ephemeral(defs: Definitions):
with DagsterInstance.ephemeral() as instance:
yield DefinitionsRunner(defs, instance)
def materialize_all_assets(
self, partition_key: Optional[str] = None
) -> dg.ExecuteInProcessResult:
all_keys = list(self.defs.get_repository_def().asset_graph.get_all_asset_keys())
job_def = self.defs.resolve_implicit_job_def_def_for_assets(all_keys)
assert job_def
return job_def.execute_in_process(instance=self.instance, partition_key=partition_key)
def materialize_assets(
self, asset_selection: Sequence[CoercibleToAssetKey], partition_key: Optional[str] = None
) -> dg.ExecuteInProcessResult:
asset_keys = [AssetKey.from_coercible(asset_key) for asset_key in asset_selection]
job_def = self.defs.resolve_implicit_job_def_def_for_assets(asset_keys)
assert job_def
return job_def.execute_in_process(
instance=self.instance,
asset_selection=asset_keys,
partition_key=partition_key,
)
def materialize_asset(
self, asset_key: CoercibleToAssetKey, partition_key: Optional[str] = None
) -> dg.ExecuteInProcessResult:
return self.materialize_assets([asset_key], partition_key)
def load_asset_value(
self, asset_key: CoercibleToAssetKey, partition_key: Optional[str] = None
) -> object:
return self.defs.load_asset_value(
asset_key=asset_key, instance=self.instance, partition_key=partition_key
)
def get_last_5000_asset_materialization_event_records(
self, asset_key: CoercibleToAssetKey
) -> list[dg.EventLogRecord]:
return [
*self.instance.fetch_materializations(
AssetKey.from_coercible(asset_key), limit=5000
).records
]
| DefinitionsRunner |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/oracle/vector.py | {
"start": 1603,
"end": 2072
} | class ____(Enum):
"""Enum representing the data format used to store vector components.
See :ref:`oracle_vector_datatype` for background.
.. versionadded:: 2.0.41
"""
INT8 = "INT8"
"""
8-bit integer format.
"""
BINARY = "BINARY"
"""
Binary format.
"""
FLOAT32 = "FLOAT32"
"""
32-bit floating-point format.
"""
FLOAT64 = "FLOAT64"
"""
64-bit floating-point format.
"""
| VectorStorageFormat |
python | getsentry__sentry | src/sentry/models/grouphistory.py | {
"start": 6509,
"end": 13364
} | class ____(Model):
"""
This model is used to track certain status changes for groups,
and is designed to power a few types of queries:
- `resolved_in:release` syntax - we can query for entries with status=REGRESSION and matching release
- Time to Resolution and Age of Unresolved Issues-style queries
- Issue Activity/Status over time breakdown (i.e. for each of the last 14 days, how many new, resolved, regressed, unignored, etc. issues were there?)
"""
__relocation_scope__ = RelocationScope.Excluded
objects: ClassVar[GroupHistoryManager] = GroupHistoryManager()
organization = FlexibleForeignKey("sentry.Organization", db_constraint=False)
group = FlexibleForeignKey("sentry.Group", db_constraint=False)
project = FlexibleForeignKey("sentry.Project", db_constraint=False)
release = FlexibleForeignKey("sentry.Release", null=True, db_constraint=False)
user_id = HybridCloudForeignKey(settings.AUTH_USER_MODEL, null=True, on_delete="SET_NULL")
team = FlexibleForeignKey("sentry.Team", null=True, on_delete=models.SET_NULL)
status = BoundedPositiveIntegerField(
default=0,
choices=(
(GroupHistoryStatus.ONGOING, _("Ongoing")),
(GroupHistoryStatus.RESOLVED, _("Resolved")),
(GroupHistoryStatus.AUTO_RESOLVED, _("Automatically Resolved")),
(GroupHistoryStatus.IGNORED, _("Ignored")),
(GroupHistoryStatus.UNIGNORED, _("Unignored")),
(GroupHistoryStatus.REGRESSED, _("Regressed")),
(GroupHistoryStatus.ASSIGNED, _("Assigned")),
(GroupHistoryStatus.UNASSIGNED, _("Unassigned")),
(GroupHistoryStatus.DELETED, _("Deleted")),
(GroupHistoryStatus.DELETED_AND_DISCARDED, _("Deleted and Discarded")),
(GroupHistoryStatus.REVIEWED, _("Reviewed")),
(GroupHistoryStatus.SET_RESOLVED_IN_RELEASE, _("Resolved in Release")),
(GroupHistoryStatus.SET_RESOLVED_IN_COMMIT, _("Resolved in Commit")),
(GroupHistoryStatus.SET_RESOLVED_IN_PULL_REQUEST, _("Resolved in Pull Request")),
(GroupHistoryStatus.ESCALATING, _("Escalating")),
),
)
prev_history_date = models.DateTimeField(
null=True
) # This field is used to simplify query calculations.
date_added = models.DateTimeField(default=timezone.now)
class Meta:
db_table = "sentry_grouphistory"
app_label = "sentry"
indexes = (
models.Index(fields=("project", "status", "release")),
models.Index(fields=("group", "status")),
models.Index(fields=("project", "date_added")),
)
__repr__ = sane_repr("group_id", "release_id")
@property
def owner(self) -> Actor | None:
"""Part of ActorOwned protocol"""
return Actor.from_id(user_id=self.user_id, team_id=self.team_id)
@owner.setter
def owner(self, actor: Actor | None) -> None:
"""Part of ActorOwned protocol"""
self.team_id = None
self.user_id = None
if actor and actor.is_user:
self.user_id = actor.id
if actor and actor.is_team:
self.team_id = actor.id
def get_prev_history(group: Group, status: int) -> GroupHistory | None:
"""
Finds the most recent row that is the inverse of this history row, if one exists.
"""
previous_statuses = PREVIOUS_STATUSES.get(status)
if not previous_statuses:
return None
prev_histories = GroupHistory.objects.filter(
group=group, status__in=previous_statuses
).order_by("-date_added")
return prev_histories.first()
def record_group_history_from_activity_type(
group: Group,
activity_type: int,
actor: RpcUser | User | Team | None = None,
release: Release | None = None,
) -> GroupHistory | None:
"""
Writes a `GroupHistory` row for an activity type if there's a relevant `GroupHistoryStatus` that
maps to it
"""
status = ACTIVITY_STATUS_TO_GROUP_HISTORY_STATUS.get(activity_type, None)
# Substatus-based GroupHistory should override activity-based GroupHistory since it's more specific.
if group.substatus:
status_str = GROUP_SUBSTATUS_TO_GROUP_HISTORY_STATUS.get(group.substatus, None)
if status_str is not None:
status = STRING_TO_STATUS_LOOKUP.get(status_str, status)
if status is not None:
return record_group_history(group, status, actor, release)
return None
def record_group_history(
group: Group,
status: int,
actor: User | RpcUser | Team | None = None,
release: Release | None = None,
) -> GroupHistory:
from sentry.models.team import Team
from sentry.users.models.user import User
from sentry.users.services.user import RpcUser
prev_history = get_prev_history(group, status)
user_id = None
team_id = None
if actor:
if isinstance(actor, RpcUser) or isinstance(actor, User):
user_id = actor.id
elif isinstance(actor, Team):
team_id = actor.id
else:
raise ValueError("record_group_history actor argument must be RPCUser or Team")
return GroupHistory.objects.create(
organization=group.project.organization,
group=group,
project=group.project,
release=release,
user_id=user_id,
team_id=team_id,
status=status,
prev_history_date=prev_history.date_added if prev_history else None,
)
def bulk_record_group_history(
groups: Sequence[Group],
status: int,
actor: User | RpcUser | Team | None = None,
release: Release | None = None,
) -> list[GroupHistory]:
from sentry.models.team import Team
from sentry.users.models.user import User
from sentry.users.services.user import RpcUser
def get_prev_history_date(group: Group, status: int) -> datetime.datetime | None:
prev_history = get_prev_history(group, status)
return prev_history.date_added if prev_history else None
user_id: int | None = None
team_id: int | None = None
if actor:
if isinstance(actor, RpcUser) or isinstance(actor, User):
user_id = actor.id
elif isinstance(actor, Team):
team_id = actor.id
else:
raise ValueError("record_group_history actor argument must be RPCUser or Team")
return GroupHistory.objects.bulk_create(
[
GroupHistory(
organization=group.project.organization,
group=group,
project=group.project,
release=release,
team_id=team_id,
user_id=user_id,
status=status,
prev_history_date=get_prev_history_date(group, status),
)
for group in groups
]
)
| GroupHistory |
python | django__django | tests/queries/models.py | {
"start": 10906,
"end": 11127
} | class ____(models.Model):
new_name = models.CharField(max_length=15)
category = models.OneToOneField(SimpleCategory, models.CASCADE)
def __str__(self):
return "one2one " + self.new_name
| OneToOneCategory |
python | numpy__numpy | tools/swig/test/testVector.py | {
"start": 10633,
"end": 10898
} | class ____(VectorTestCase):
def __init__(self, methodName="runTest"):
VectorTestCase.__init__(self, methodName)
self.typeStr = "schar"
self.typeCode = "b"
######################################################################
| scharTestCase |
python | html5lib__html5lib-python | html5lib/treebuilders/base.py | {
"start": 4149,
"end": 4859
} | class ____(list):
def append(self, node):
"""Append node to the end of the list."""
equalCount = 0
if node != Marker:
for element in self[::-1]:
if element == Marker:
break
if self.nodesEqual(element, node):
equalCount += 1
if equalCount == 3:
self.remove(element)
break
list.append(self, node)
def nodesEqual(self, node1, node2):
if not node1.nameTuple == node2.nameTuple:
return False
if not node1.attributes == node2.attributes:
return False
return True
| ActiveFormattingElements |
python | allegroai__clearml | clearml/backend_api/services/v2_23/projects.py | {
"start": 37777,
"end": 42151
} | class ____(Request):
"""
Create a new project
:param name: Project name Unique within the company.
:type name: str
:param description: Project description.
:type description: str
:param tags: User-defined tags
:type tags: Sequence[str]
:param system_tags: System tags. This field is reserved for system use, please
don't use it.
:type system_tags: Sequence[str]
:param default_output_destination: The default output destination URL for new
tasks under this project
:type default_output_destination: str
"""
_service = "projects"
_action = "create"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {
"default_output_destination": {
"description": "The default output destination URL for new tasks under this project",
"type": "string",
},
"description": {"description": "Project description.", "type": "string"},
"name": {
"description": "Project name Unique within the company.",
"type": "string",
},
"system_tags": {
"description": "System tags. This field is reserved for system use, please don't use it.",
"items": {"type": "string"},
"type": "array",
},
"tags": {
"description": "User-defined tags",
"items": {"type": "string"},
"type": "array",
},
},
"required": ["name"],
"type": "object",
}
def __init__(
self,
name: str,
description: Optional[str] = None,
tags: Optional[List[str]] = None,
system_tags: Optional[List[str]] = None,
default_output_destination: Optional[str] = None,
**kwargs: Any
) -> None:
super(CreateRequest, self).__init__(**kwargs)
self.name = name
self.description = description
self.tags = tags
self.system_tags = system_tags
self.default_output_destination = default_output_destination
@schema_property("name")
def name(self) -> str:
return self._property_name
@name.setter
def name(self, value: str) -> None:
if value is None:
self._property_name = None
return
self.assert_isinstance(value, "name", six.string_types)
self._property_name = value
@schema_property("description")
def description(self) -> Optional[str]:
return self._property_description
@description.setter
def description(self, value: Optional[str]) -> None:
if value is None:
self._property_description = None
return
self.assert_isinstance(value, "description", six.string_types)
self._property_description = value
@schema_property("tags")
def tags(self) -> Optional[List[str]]:
return self._property_tags
@tags.setter
def tags(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_tags = None
return
self.assert_isinstance(value, "tags", (list, tuple))
self.assert_isinstance(value, "tags", six.string_types, is_array=True)
self._property_tags = value
@schema_property("system_tags")
def system_tags(self) -> Optional[List[str]]:
return self._property_system_tags
@system_tags.setter
def system_tags(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_system_tags = None
return
self.assert_isinstance(value, "system_tags", (list, tuple))
self.assert_isinstance(value, "system_tags", six.string_types, is_array=True)
self._property_system_tags = value
@schema_property("default_output_destination")
def default_output_destination(self) -> Optional[str]:
return self._property_default_output_destination
@default_output_destination.setter
def default_output_destination(self, value: Optional[str]) -> None:
if value is None:
self._property_default_output_destination = None
return
self.assert_isinstance(value, "default_output_destination", six.string_types)
self._property_default_output_destination = value
| CreateRequest |
python | getsentry__sentry | tests/sentry/integrations/msteams/test_notify_action.py | {
"start": 1000,
"end": 15172
} | class ____(RuleTestCase, PerformanceIssueTestCase):
rule_cls = MsTeamsNotifyServiceAction
def setUp(self) -> None:
event = self.get_event()
self.integration, _ = self.create_provider_integration_for(
event.project.organization,
self.user,
provider="msteams",
name="Galactic Empire",
external_id="D4r7h_Pl4gu315_th3_w153",
metadata={
"service_url": "https://smba.trafficmanager.net/amer",
"access_token": "d4rk51d3",
"expires_at": int(time.time()) + 86400,
},
)
def assert_form_valid(
self, form: Form, expected_channel_id: str, expected_channel: str
) -> None:
assert form.is_valid()
assert form.cleaned_data["channel_id"] == expected_channel_id
assert form.cleaned_data["channel"] == expected_channel
@responses.activate
@patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
@mock.patch("sentry.analytics.record")
def test_applies_correctly(self, mock_record: MagicMock, mock_record_event: MagicMock) -> None:
event = self.get_event()
rule = self.get_rule(
data={"team": self.integration.id, "channel": "Naboo", "channel_id": "nb"}
)
notification_uuid = "123e4567-e89b-12d3-a456-426614174000"
results = list(rule.after(event=event, notification_uuid=notification_uuid))
assert len(results) == 1
responses.add(
method=responses.POST,
url="https://smba.trafficmanager.net/amer/v3/conversations/nb/activities",
status=200,
json={},
)
results[0].callback(event, futures=[])
data = orjson.loads(responses.calls[0].request.body)
assert "attachments" in data
attachments = data["attachments"]
assert len(attachments) == 1
# Wish there was a better way to do this, but we
# can't pass the title and title link separately
# with MS Teams cards.
title_card = attachments[0]["content"]["body"][0]
title_pattern = r"\[%s\](.*)" % event.title
assert re.match(title_pattern, title_card["text"])
assert_last_analytics_event(
mock_record,
AlertSentEvent(
provider="msteams",
alert_id="",
alert_type="issue_alert",
organization_id=self.organization.id,
project_id=self.project.id,
external_id="nb",
notification_uuid=notification_uuid,
),
)
assert_any_analytics_event(
mock_record,
MSTeamsIntegrationNotificationSent(
category="issue_alert",
organization_id=self.organization.id,
project_id=self.project.id,
group_id=event.group_id,
notification_uuid=notification_uuid,
alert_id=None,
),
)
assert_slo_metric(mock_record_event)
@responses.activate
@patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
@mock.patch("sentry.analytics.record")
def test_client_error(self, mock_record: MagicMock, mock_record_event: MagicMock) -> None:
event = self.get_event()
rule = self.get_rule(
data={"team": self.integration.id, "channel": "Naboo", "channel_id": "nb"}
)
notification_uuid = "123e4567-e89b-12d3-a456-426614174000"
results = list(rule.after(event=event, notification_uuid=notification_uuid))
assert len(results) == 1
responses.add(
method=responses.POST,
url="https://smba.trafficmanager.net/amer/v3/conversations/nb/activities",
status=500,
json={},
)
results[0].callback(event, futures=[])
data = orjson.loads(responses.calls[0].request.body)
assert "attachments" in data
attachments = data["attachments"]
assert len(attachments) == 1
# Wish there was a better way to do this, but we
# can't pass the title and title link separately
# with MS Teams cards.
title_card = attachments[0]["content"]["body"][0]
title_pattern = r"\[%s\](.*)" % event.title
assert re.match(title_pattern, title_card["text"])
assert_last_analytics_event(
mock_record,
AlertSentEvent(
provider="msteams",
alert_id="",
alert_type="issue_alert",
organization_id=self.organization.id,
project_id=self.project.id,
external_id="nb",
notification_uuid=notification_uuid,
),
)
assert_any_analytics_event(
mock_record,
MSTeamsIntegrationNotificationSent(
category="issue_alert",
organization_id=self.organization.id,
project_id=self.project.id,
group_id=event.group_id,
notification_uuid=notification_uuid,
alert_id=None,
),
)
assert_slo_metric(mock_record_event, EventLifecycleOutcome.FAILURE)
@responses.activate
@patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
@mock.patch("sentry.analytics.record")
def test_halt(self, mock_record: MagicMock, mock_record_event: MagicMock) -> None:
event = self.get_event()
rule = self.get_rule(
data={"team": self.integration.id, "channel": "Naboo", "channel_id": "nb"}
)
notification_uuid = "123e4567-e89b-12d3-a456-426614174000"
results = list(rule.after(event=event, notification_uuid=notification_uuid))
assert len(results) == 1
responses.add(
method=responses.POST,
url="https://smba.trafficmanager.net/amer/v3/conversations/nb/activities",
status=403,
json={
"error": {
"code": "ConversationBlockedByUser",
"message": "User blocked the conversation with the bot.",
}
},
)
results[0].callback(event, futures=[])
data = orjson.loads(responses.calls[0].request.body)
assert "attachments" in data
attachments = data["attachments"]
assert len(attachments) == 1
# Wish there was a better way to do this, but we
# can't pass the title and title link separately
# with MS Teams cards.
title_card = attachments[0]["content"]["body"][0]
title_pattern = r"\[%s\](.*)" % event.title
assert re.match(title_pattern, title_card["text"])
assert_last_analytics_event(
mock_record,
AlertSentEvent(
provider="msteams",
alert_id="",
alert_type="issue_alert",
organization_id=self.organization.id,
project_id=self.project.id,
external_id="nb",
notification_uuid=notification_uuid,
),
)
assert_any_analytics_event(
mock_record,
MSTeamsIntegrationNotificationSent(
category="issue_alert",
organization_id=self.organization.id,
project_id=self.project.id,
group_id=event.group_id,
notification_uuid=notification_uuid,
alert_id=None,
),
)
assert_slo_metric(mock_record_event, EventLifecycleOutcome.HALTED)
@responses.activate
@patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
@mock.patch(
"sentry.services.eventstore.models.GroupEvent.occurrence",
return_value=TEST_ISSUE_OCCURRENCE,
new_callable=mock.PropertyMock,
)
def test_applies_correctly_generic_issue(
self, occurrence: MagicMock, mock_record_event: MagicMock
) -> None:
event = self.store_event(
data={"message": "Hello world", "level": "error"}, project_id=self.project.id
)
group_event = event.for_group(event.groups[0])
rule = self.get_rule(
data={"team": self.integration.id, "channel": "Hellboy", "channel_id": "nb"}
)
results = list(rule.after(event=group_event))
assert len(results) == 1
responses.add(
method=responses.POST,
url="https://smba.trafficmanager.net/amer/v3/conversations/nb/activities",
status=200,
json={},
)
results[0].callback(event, futures=[])
data = orjson.loads(responses.calls[0].request.body)
assert "attachments" in data
attachments = data["attachments"]
assert len(attachments) == 1
title_card = attachments[0]["content"]["body"][0]
description = attachments[0]["content"]["body"][1]
assert (
title_card["text"]
== f"[{TEST_ISSUE_OCCURRENCE.issue_title}](http://testserver/organizations/{self.organization.slug}/issues/{event.group_id}/?referrer=msteams)"
)
assert description["text"] == TEST_ISSUE_OCCURRENCE.evidence_display[0].value
assert_slo_metric(mock_record_event, EventLifecycleOutcome.SUCCESS)
@responses.activate
@patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
@mock.patch(
"sentry.services.eventstore.models.GroupEvent.occurrence",
return_value=TEST_PERF_ISSUE_OCCURRENCE,
new_callable=mock.PropertyMock,
)
def test_applies_correctly_performance_issue(
self, occurrence: MagicMock, mock_record_event: MagicMock
) -> None:
event = self.create_performance_issue()
rule = self.get_rule(
data={"team": self.integration.id, "channel": "Naboo", "channel_id": "nb"}
)
results = list(rule.after(event=event))
assert len(results) == 1
responses.add(
method=responses.POST,
url="https://smba.trafficmanager.net/amer/v3/conversations/nb/activities",
status=200,
json={},
)
results[0].callback(event, futures=[])
data = orjson.loads(responses.calls[0].request.body)
assert "attachments" in data
attachments = data["attachments"]
assert len(attachments) == 1
title_card = attachments[0]["content"]["body"][0]
description = attachments[0]["content"]["body"][1]
assert (
title_card["text"]
== f"[N+1 Query](http://testserver/organizations/{self.organization.slug}/issues/{event.group_id}/?referrer=msteams)"
)
assert (
description["text"]
== "db - SELECT `books\\_author`.`id`, `books\\_author`.`name` FROM `books\\_author` WHERE `books\\_author`.`id` = %s LIMIT 21"
)
assert_slo_metric(mock_record_event, EventLifecycleOutcome.SUCCESS)
def test_render_label(self) -> None:
rule = self.get_rule(data={"team": self.integration.id, "channel": "Tatooine"})
assert rule.render_label() == "Send a notification to the Galactic Empire Team to Tatooine"
def test_render_label_without_integration(self) -> None:
with assume_test_silo_mode_of(Integration):
self.integration.delete()
rule = self.get_rule(data={"team": self.integration.id, "channel": "Coruscant"})
assert rule.render_label() == "Send a notification to the [removed] Team to Coruscant"
@responses.activate
def test_valid_channel_selected(self) -> None:
rule = self.get_rule(data={"team": self.integration.id, "channel": "Death Star"})
channels = [{"id": "d_s", "name": "Death Star"}]
responses.add(
method=responses.GET,
url="https://smba.trafficmanager.net/amer/v3/teams/D4r7h_Pl4gu315_th3_w153/conversations",
json={"conversations": channels},
)
form = rule.get_form_instance()
self.assert_form_valid(form, "d_s", "Death Star")
@responses.activate
def test_valid_member_selected(self) -> None:
rule = self.get_rule(data={"team": self.integration.id, "channel": "Darth Vader"})
channels = [{"id": "i_s_d", "name": "Imperial Star Destroyer"}]
responses.add(
method=responses.GET,
url="https://smba.trafficmanager.net/amer/v3/teams/D4r7h_Pl4gu315_th3_w153/conversations",
json={"conversations": channels},
)
members = [{"name": "Darth Vader", "id": "d_v", "tenantId": "1428-5714-2857"}]
responses.add(
method=responses.GET,
url="https://smba.trafficmanager.net/amer/v3/conversations/D4r7h_Pl4gu315_th3_w153/pagedmembers?pageSize=500",
json={"members": members},
)
responses.add(
method=responses.POST,
url="https://smba.trafficmanager.net/amer/v3/conversations",
json={"id": "i_am_your_father"},
)
form = rule.get_form_instance()
self.assert_form_valid(form, "i_am_your_father", "Darth Vader")
@responses.activate
def test_invalid_channel_selected(self) -> None:
rule = self.get_rule(data={"team": self.integration.id, "channel": "Alderaan"})
channels = [{"name": "Hoth", "id": "hh"}]
responses.add(
method=responses.GET,
url="https://smba.trafficmanager.net/amer/v3/teams/D4r7h_Pl4gu315_th3_w153/conversations",
json={"conversations": channels},
)
members = [{"name": "Darth Sidious", "id": "d_s", "tenantId": "0102-0304-0506"}]
responses.add(
method=responses.GET,
url="https://smba.trafficmanager.net/amer/v3/conversations/D4r7h_Pl4gu315_th3_w153/pagedmembers?pageSize=500",
json={"members": members},
)
form = rule.get_form_instance()
assert not form.is_valid()
assert len(form.errors) == 1
| MsTeamsNotifyActionTest |
python | PyCQA__isort | scripts/build_config_option_docs.py | {
"start": 1387,
"end": 9895
} | class ____:
section_complete: str = ""
cfg: str = ""
pyproject_toml: str = ""
cli: str = ""
def __post_init__(self):
if self.cfg or self.pyproject_toml or self.cli:
if self.cfg:
cfg = dedent(self.cfg).lstrip()
self.cfg = (
dedent(
"""
### Example `.isort.cfg`
```
[settings]
{cfg}
```
"""
)
.format(cfg=cfg)
.lstrip()
)
if self.pyproject_toml:
pyproject_toml = dedent(self.pyproject_toml).lstrip()
self.pyproject_toml = (
dedent(
"""
### Example `pyproject.toml`
```
[tool.isort]
{pyproject_toml}
```
"""
)
.format(pyproject_toml=pyproject_toml)
.lstrip()
)
if self.cli:
cli = dedent(self.cli).lstrip()
self.cli = (
dedent(
"""
### Example cli usage
`{cli}`
"""
)
.format(cli=cli)
.lstrip()
)
sections = [s for s in [self.cfg, self.pyproject_toml, self.cli] if s]
sections_str = "\n".join(sections)
self.section_complete = f"""**Examples:**
{sections_str}"""
else:
self.section_complete = ""
def __str__(self):
return self.section_complete
description_mapping: dict[str, str]
description_mapping = {
"length_sort_sections": "Sort the given sections by length",
"forced_separate": "Force certain sub modules to show separately",
"sections": "What sections isort should display imports for and in what order",
"known_other": "known_OTHER is how imports of custom sections are defined. "
"OTHER is a placeholder for the custom section name.",
"comment_prefix": "Allows customizing how isort prefixes comments that it adds or modifies on import lines"
"Generally ` #` (two spaces before a pound symbol) is use, though one space is also common.",
"lines_before_imports": "The number of blank lines to place before imports. -1 for automatic determination",
"lines_after_imports": "The number of blank lines to place after imports. -1 for automatic determination",
"lines_between_sections": "The number of lines to place between sections",
"lines_between_types": "The number of lines to place between direct and from imports",
"lexicographical": "Lexicographical order is strictly alphabetical order. "
"For example by default isort will sort `1, 10, 2` into `1, 2, 10` - but with "
"lexicographical sorting enabled it will remain `1, 10, 2`.",
"ignore_comments": "If enabled, isort will strip comments that exist within import lines.",
"constants": "An override list of tokens to always recognize as a CONSTANT for order_by_type regardless of casing.",
"classes": "An override list of tokens to always recognize as a Class for order_by_type regardless of casing.",
"variables": "An override list of tokens to always recognize as a var for order_by_type regardless of casing.",
"auto_identify_namespace_packages": "Automatically determine local namespace packages, generally by lack of any src files before a src containing directory.",
"namespace_packages": "Manually specify one or more namespace packages.",
"follow_links": "If `True` isort will follow symbolic links when doing recursive sorting.",
"git_ignore": "If `True` isort will honor ignores within locally defined .git_ignore files.",
"formatting_function": "The fully qualified Python path of a function to apply to format code sorted by isort.",
"group_by_package": "If `True` isort will automatically create section groups by the top-level package they come from.",
"indented_import_headings": "If `True` isort will apply import headings to indented imports the same way it does unindented ones.",
"import_headings": "A mapping of import sections to import heading comments that should show above them.",
"import_footers": "A mapping of import sections to import footer comments that should show below them.",
}
example_mapping: dict[str, Example]
example_mapping = {
"skip": Example(
cfg="""
skip=.gitignore,.dockerignore""",
pyproject_toml="""
skip = [".gitignore", ".dockerignore"]
""",
),
"extend_skip": Example(
cfg="""
extend_skip=.md,.json""",
pyproject_toml="""
extend_skip = [".md", ".json"]
""",
),
"skip_glob": Example(
cfg="""
skip_glob=docs/*
""",
pyproject_toml="""
skip_glob = ["docs/*"]
""",
),
"extend_skip_glob": Example(
cfg="""
extend_skip_glob=my_*_module.py,test/*
""",
pyproject_toml="""
extend_skip_glob = ["my_*_module.py", "test/*"]
""",
),
"known_third_party": Example(
cfg="""
known_third_party=my_module1,my_module2
""",
pyproject_toml="""
known_third_party = ["my_module1", "my_module2"]
""",
),
"known_first_party": Example(
cfg="""
known_first_party=my_module1,my_module2
""",
pyproject_toml="""
known_first_party = ["my_module1", "my_module2"]
""",
),
"known_local_folder": Example(
cfg="""
known_local_folder=my_module1,my_module2
""",
pyproject_toml="""
known_local_folder = ["my_module1", "my_module2"]
""",
),
"known_standard_library": Example(
cfg="""
known_standard_library=my_module1,my_module2
""",
pyproject_toml="""
known_standard_library = ["my_module1", "my_module2"]
""",
),
"extra_standard_library": Example(
cfg="""
extra_standard_library=my_module1,my_module2
""",
pyproject_toml="""
extra_standard_library = ["my_module1", "my_module2"]
""",
),
"forced_separate": Example(
cfg="""
forced_separate=glob_exp1,glob_exp2
""",
pyproject_toml="""
forced_separate = ["glob_exp1", "glob_exp2"]
""",
),
"length_sort_sections": Example(
cfg="""
length_sort_sections=future,stdlib
""",
pyproject_toml="""
length_sort_sections = ["future", "stdlib"]
""",
),
"add_imports": Example(
cfg="""
add_imports=import os,import json
""",
pyproject_toml="""
add_imports = ["import os", "import json"]
""",
),
"remove_imports": Example(
cfg="""
remove_imports=os,json
""",
pyproject_toml="""
remove_imports = ["os", "json"]
""",
),
"single_line_exclusions": Example(
cfg="""
single_line_exclusions=os,json
""",
pyproject_toml="""
single_line_exclusions = ["os", "json"]
""",
),
"no_lines_before": Example(
cfg="""
no_lines_before=future,stdlib
""",
pyproject_toml="""
no_lines_before = ["future", "stdlib"]
""",
),
"src_paths": Example(
cfg="""
src_paths = src,tests
""",
pyproject_toml="""
src_paths = ["src", "tests"]
""",
),
"treat_comments_as_code": Example(
cfg="""
treat_comments_as_code = # my comment 1, # my other comment
""",
pyproject_toml="""
treat_comments_as_code = ["# my comment 1", "# my other comment"]
""",
),
"supported_extensions": Example(
cfg="""
supported_extensions=pyw,ext
""",
pyproject_toml="""
supported_extensions = ["pyw", "ext"]
""",
),
"blocked_extensions": Example(
cfg="""
blocked_extensions=pyw,pyc
""",
pyproject_toml="""
blocked_extensions = ["pyw", "pyc"]
""",
),
"known_other": Example(
cfg="""
sections=FUTURE,STDLIB,THIRDPARTY,AIRFLOW,FIRSTPARTY,LOCALFOLDER
known_airflow=airflow""",
pyproject_toml="""
sections = ['FUTURE', 'STDLIB', 'THIRDPARTY', 'AIRFLOW', 'FIRSTPARTY', 'LOCALFOLDER']
known_airflow = ['airflow']""",
),
"multi_line_output": Example(cfg="multi_line_output=3", pyproject_toml="multi_line_output = 3"),
"show_version": Example(cli="isort --version"),
"py_version": Example(
cli="isort --py 39",
pyproject_toml="""
py_version=39
""",
cfg="""
py_version=39
""",
),
}
@dataclasses.dataclass
| Example |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-webflow/source_webflow/auth.py | {
"start": 705,
"end": 893
} | class ____(WebflowAuthMixin, TokenAuthenticator):
"""
Authentication class information
https://docs.developers.webflow.com/reference/authorization
"""
| WebflowTokenAuthenticator |
python | numpy__numpy | numpy/_core/tests/test_umath.py | {
"start": 90152,
"end": 91961
} | class ____(_FilterInvalids):
def test_logaddexp_values(self):
x = [1, 2, 3, 4, 5]
y = [5, 4, 3, 2, 1]
z = [6, 6, 6, 6, 6]
for dt, dec_ in zip(['f', 'd', 'g'], [6, 15, 15]):
xf = np.log(np.array(x, dtype=dt))
yf = np.log(np.array(y, dtype=dt))
zf = np.log(np.array(z, dtype=dt))
assert_almost_equal(np.logaddexp(xf, yf), zf, decimal=dec_)
def test_logaddexp_range(self):
x = [1000000, -1000000, 1000200, -1000200]
y = [1000200, -1000200, 1000000, -1000000]
z = [1000200, -1000000, 1000200, -1000000]
for dt in ['f', 'd', 'g']:
logxf = np.array(x, dtype=dt)
logyf = np.array(y, dtype=dt)
logzf = np.array(z, dtype=dt)
assert_almost_equal(np.logaddexp(logxf, logyf), logzf)
def test_inf(self):
inf = np.inf
x = [inf, -inf, inf, -inf, inf, 1, -inf, 1] # noqa: E221
y = [inf, inf, -inf, -inf, 1, inf, 1, -inf] # noqa: E221
z = [inf, inf, inf, -inf, inf, inf, 1, 1]
with np.errstate(invalid='raise'):
for dt in ['f', 'd', 'g']:
logxf = np.array(x, dtype=dt)
logyf = np.array(y, dtype=dt)
logzf = np.array(z, dtype=dt)
assert_equal(np.logaddexp(logxf, logyf), logzf)
def test_nan(self):
assert_(np.isnan(np.logaddexp(np.nan, np.inf)))
assert_(np.isnan(np.logaddexp(np.inf, np.nan)))
assert_(np.isnan(np.logaddexp(np.nan, 0)))
assert_(np.isnan(np.logaddexp(0, np.nan)))
assert_(np.isnan(np.logaddexp(np.nan, np.nan)))
def test_reduce(self):
assert_equal(np.logaddexp.identity, -np.inf)
assert_equal(np.logaddexp.reduce([]), -np.inf)
| TestLogAddExp |
python | bokeh__bokeh | src/bokeh/document/events.py | {
"start": 9644,
"end": 12691
} | class ____(DocumentPatchedEvent):
''' A concrete event representing updating an attribute and value of a
specific Bokeh Model.
'''
kind = "ModelChanged"
def __init__(self, document: Document, model: Model, attr: str, new: Any,
setter: Setter | None = None, callback_invoker: Invoker | None = None):
'''
Args:
document (Document) :
A Bokeh document that is to be updated.
model (Model) :
A Model to update
attr (str) :
The name of the attribute to update on the model.
new (object) :
The new value of the attribute
setter (ClientSession or ServerSession or None, optional) :
This is used to prevent "boomerang" updates to Bokeh apps.
(default: None)
See :class:`~bokeh.document.events.DocumentChangedEvent`
for more details.
callback_invoker (callable, optional) :
A callable that will invoke any Model callbacks that should
be executed in response to the change that triggered this
event. (default: None)
'''
super().__init__(document, setter, callback_invoker)
self.model = model
self.attr = attr
self.new = new
def combine(self, event: DocumentChangedEvent) -> bool:
'''
'''
if not isinstance(event, ModelChangedEvent):
return False
# If these are not true something weird is going on, maybe updates from
# Python bokeh.client, don't try to combine
if self.setter != event.setter:
return False
if self.document != event.document:
return False
if (self.model == event.model) and (self.attr == event.attr):
self.new = event.new
self.callback_invoker = event.callback_invoker
return True
return False
def dispatch(self, receiver: Any) -> None:
''' Dispatch handling of this event to a receiver.
This method will invoke ``receiver._document_model_changed`` if it exists.
'''
super().dispatch(receiver)
if hasattr(receiver, '_document_model_changed'):
cast(DocumentModelChangedMixin, receiver)._document_model_changed(self)
def to_serializable(self, serializer: Serializer) -> ModelChanged:
''' Create a JSON representation of this event suitable for sending
to clients.
Args:
serializer (Serializer):
'''
return ModelChanged(
kind = self.kind,
model = self.model.ref,
attr = self.attr,
new = serializer.encode(self.new),
)
@staticmethod
def _handle_event(doc: Document, event: ModelChangedEvent) -> None:
model = event.model
attr = event.attr
value = event.new
model.set_from_json(attr, value, setter=event.setter)
| ModelChangedEvent |
python | automl__auto-sklearn | autosklearn/pipeline/implementations/MinorityCoalescer.py | {
"start": 103,
"end": 2861
} | class ____(BaseEstimator, TransformerMixin):
"""Group together categories which occurence is less than a specified
minimum fraction. Coalesced categories get index of one.
"""
def __init__(self, minimum_fraction=None):
self.minimum_fraction = minimum_fraction
def check_X(self, X):
X_data = X.data if sparse.issparse(X) else X
if np.nanmin(X_data) < 2:
raise ValueError("X needs to contain only integers greater than two.")
def fit(self, X, y=None):
self.check_X(X)
if self.minimum_fraction is None:
return self
# Remember which values should not be coalesced
do_not_coalesce = list()
for column in range(X.shape[1]):
do_not_coalesce.append(set())
if sparse.issparse(X):
indptr_start = X.indptr[column]
indptr_end = X.indptr[column + 1]
unique, counts = np.unique(
X.data[indptr_start:indptr_end], return_counts=True
)
colsize = indptr_end - indptr_start
else:
unique, counts = np.unique(X[:, column], return_counts=True)
colsize = X.shape[0]
for unique_value, count in zip(unique, counts):
fraction = float(count) / colsize
if fraction >= self.minimum_fraction:
do_not_coalesce[-1].add(unique_value)
self.do_not_coalesce_ = do_not_coalesce
return self
def transform(self, X):
self.check_X(X)
if self.minimum_fraction is None:
return X
for column in range(X.shape[1]):
if sparse.issparse(X):
indptr_start = X.indptr[column]
indptr_end = X.indptr[column + 1]
unique = np.unique(X.data[indptr_start:indptr_end])
for unique_value in unique:
if unique_value not in self.do_not_coalesce_[column]:
indptr_start = X.indptr[column]
indptr_end = X.indptr[column + 1]
X.data[indptr_start:indptr_end][
X.data[indptr_start:indptr_end] == unique_value
] = 1
else:
unique = np.unique(X[:, column])
unique_values = [
unique_value
for unique_value in unique
if unique_value not in self.do_not_coalesce_[column]
]
mask = np.isin(X[:, column], unique_values)
X[mask, column] = 1
return X
def fit_transform(self, X, y=None):
return self.fit(X, y).transform(X)
| MinorityCoalescer |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 555983,
"end": 556453
} | class ____(sgqlc.types.Type):
"""Autogenerated return type of DeleteLinkedBranch"""
__schema__ = github_schema
__field_names__ = ("client_mutation_id", "issue")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
issue = sgqlc.types.Field("Issue", graphql_name="issue")
"""The issue the linked branch was unlinked from."""
| DeleteLinkedBranchPayload |
python | eth-brownie__brownie | brownie/exceptions.py | {
"start": 5363,
"end": 5418
} | class ____(LookupError):
pass
@final
| EventLookupError |
python | getsentry__sentry | src/sentry/migrations/0989_add_release_date_added_idx.py | {
"start": 184,
"end": 1505
} | class ____(CheckedMigration):
# This flag is used to mark that a migration shouldn't be automatically run in production.
# This should only be used for operations where it's safe to run the migration after your
# code has deployed. So this should not be used for most operations that alter the schema
# of a table.
# Here are some things that make sense to mark as post deployment:
# - Large data migrations. Typically we want these to be run manually so that they can be
# monitored and not block the deploy for a long period of time while they run.
# - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to
# run this outside deployments so that we don't block them. Note that while adding an index
# is a schema change, it's completely safe to run the operation after the code has deployed.
# Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment
is_post_deployment = True
dependencies = [
("sentry", "0988_data_forwarding"),
]
operations = [
migrations.AlterField(
model_name="release",
name="date_added",
field=models.DateTimeField(db_index=True, default=django.utils.timezone.now),
),
]
| Migration |
python | pytorch__pytorch | test/higher_order_ops/test_invoke_subgraph.py | {
"start": 73630,
"end": 75927
} | class ____(torch.nn.Module):
def forward(self, s77: "Sym(s77)", L_x_: "f32[s77, 8]"):
l_x_ = L_x_
subgraph_0 = self.subgraph_0
invoke_subgraph = torch.ops.higher_order.invoke_subgraph(subgraph_0, 'subgraph_0', s77, l_x_); subgraph_0 = l_x_ = None
a: "f32[s77, 8]" = invoke_subgraph[0]; invoke_subgraph = None
floordiv: "Sym((s77//2))" = s77 // 2
b: "f32[(s77//2), 8]" = torch.narrow(a, 0, 0, floordiv); a = floordiv = None
subgraph_1 = self.subgraph_1
invoke_subgraph_1 = torch.ops.higher_order.invoke_subgraph(subgraph_1, 'subgraph_1', s77, b); subgraph_1 = s77 = b = None
getitem_3: "f32[(s77//2), 8]" = invoke_subgraph_1[0]; invoke_subgraph_1 = None
return (getitem_3,)
class subgraph_0(torch.nn.Module):
def forward(self, s77: "Sym(s77)", l_x_: "f32[s77, 8]"):
sin: "f32[s77, 8]" = torch.sin(l_x_); l_x_ = None
return (sin,)
class subgraph_1(torch.nn.Module):
def forward(self, s77: "Sym(s77)", b: "f32[(s77//2), 8]"):
sin: "f32[(s77//2), 8]" = torch.sin(b); b = None
return (sin,)
""",
)
def test_autograd_function(self):
class CustomOp(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
return torch.sin(x)
@staticmethod
def backward(ctx, grad_out):
(x,) = ctx.saved_tensors
return x * torch.cos(grad_out)
@nested_compile_region
def gn(x):
return CustomOp.apply(x)
def fn(x):
return gn(x) + gn(x)
backend = AotEagerAndRecordGraphs()
opt_fn = torch.compile(fn, backend=backend, fullgraph=True)
x = torch.randn(8, 8, requires_grad=True)
x_clone = x.detach().clone().requires_grad_(True)
ref = fn(x)
res = opt_fn(x_clone)
ref.sum().backward()
res.sum().backward()
self.assertEqual(ref, res)
self.assertEqual(x.grad, x_clone.grad)
if not TEST_WITH_CROSSREF:
self.assertExpectedInline(
normalize_gm(backend.graphs[0].print_readable(print_output=False)),
"""\
| GraphModule |
python | django__django | django/contrib/gis/gdal/geometries.py | {
"start": 20429,
"end": 21614
} | class ____(OGRGeometry):
def _geos_ptr(self):
from django.contrib.gis import geos
return geos.Point._create_empty() if self.empty else super()._geos_ptr()
@classmethod
def _create_empty(cls):
return capi.create_geom(OGRGeomType("point").num)
@property
def x(self):
"Return the X coordinate for this Point."
return capi.getx(self.ptr, 0)
@property
def y(self):
"Return the Y coordinate for this Point."
return capi.gety(self.ptr, 0)
@property
def z(self):
"Return the Z coordinate for this Point."
if self.is_3d:
return capi.getz(self.ptr, 0)
@property
def m(self):
"""Return the M coordinate for this Point."""
if self.is_measured:
return capi.getm(self.ptr, 0)
@property
def tuple(self):
"Return the tuple of this point."
if self.is_3d and self.is_measured:
return self.x, self.y, self.z, self.m
if self.is_3d:
return self.x, self.y, self.z
if self.is_measured:
return self.x, self.y, self.m
return self.x, self.y
coords = tuple
| Point |
python | getsentry__sentry | src/sentry/workflow_engine/handlers/condition/latest_release_handler.py | {
"start": 1637,
"end": 4385
} | class ____(CacheAccess[Release | Literal[False]]):
"""
If we have a latest adopted release for a project in an environment, we cache it.
If we don't, we cache False.
"""
def __init__(self, event: GroupEvent, environment: Environment):
self._key = latest_adopted_release_cache_key(event.group.project_id, environment.id)
def key(self) -> str:
return self._key
def get_latest_adopted_release_for_env(
environment: Environment, event: GroupEvent
) -> Release | None:
"""
Get the latest adopted release for a project in an environment.
"""
return _get_latest_release_for_env_impl(
environment,
event,
only_adopted=True,
cache_access=_LatestAdoptedReleaseCacheAccess(event, environment),
)
def get_latest_release_for_env(
environment: Environment | None,
event: GroupEvent,
) -> Release | None:
"""
Get the latest release for a project in an environment.
NOTE: This is independent of whether it has been adopted or not.
"""
return _get_latest_release_for_env_impl(
environment,
event,
only_adopted=False,
cache_access=_LatestReleaseCacheAccess(event, environment),
)
def _get_latest_release_for_env_impl(
environment: Environment | None,
event: GroupEvent,
only_adopted: bool,
cache_access: CacheAccess[Release | Literal[False]],
) -> Release | None:
latest_release = cache_access.get()
if latest_release is not None:
if latest_release is False:
return None
return latest_release
organization_id = event.group.project.organization_id
environments = [environment] if environment else None
def record_get_latest_release_result(result: str) -> None:
metrics.incr(
"workflow_engine.latest_release.get_latest_release",
tags={
"has_environment": str(environment is not None),
"result": result,
"only_adopted": str(only_adopted),
},
)
try:
latest_release_version = get_latest_release(
[event.group.project],
environments,
organization_id,
adopted=only_adopted,
)[0]
record_get_latest_release_result("success")
except Release.DoesNotExist:
record_get_latest_release_result("does_not_exist")
cache_access.set(False, 600)
return None
latest_release = Release.objects.get(
version=latest_release_version, organization_id=organization_id
)
cache_access.set(latest_release or False, 600)
return latest_release
@condition_handler_registry.register(Condition.LATEST_RELEASE)
| _LatestAdoptedReleaseCacheAccess |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeAliasType2.py | {
"start": 1387,
"end": 1705
} | class ____(Generic[T1]):
L = TypeAliasType("L", list[T1])
a1: A[int].L = [1, 2, 3]
a2: A[str].L = ["1", "2", "3"]
# This should generate an error because S is not in scope.
TA8 = TypeAliasType("TA8", list[S])
def identity[T](t: T) -> T:
return t
reveal_type(identity(TA1), expected_text="TypeAliasType")
| A |
python | django__django | tests/custom_managers/models.py | {
"start": 950,
"end": 1550
} | class ____(models.QuerySet):
def filter(self, *args, **kwargs):
queryset = super().filter(fun=True)
queryset._filter_CustomQuerySet = True
return queryset
def public_method(self, *args, **kwargs):
return self.all()
def _private_method(self, *args, **kwargs):
return self.all()
def optout_public_method(self, *args, **kwargs):
return self.all()
optout_public_method.queryset_only = True
def _optin_private_method(self, *args, **kwargs):
return self.all()
_optin_private_method.queryset_only = False
| CustomQuerySet |
python | google__flatbuffers | python/flatbuffers/flexbuffers.py | {
"start": 9559,
"end": 9825
} | class ____:
"""Base class for all non-trivial data accessors."""
__slots__ = '_buf', '_byte_width'
def __init__(self, buf, byte_width):
self._buf = buf
self._byte_width = byte_width
@property
def ByteWidth(self):
return self._byte_width
| Object |
python | readthedocs__readthedocs.org | readthedocs/rtd_tests/tests/test_project_views.py | {
"start": 25824,
"end": 27371
} | class ____(TestCase):
def setUp(self):
self.user = get(User)
self.project = get(Project, slug="test", users=[self.user])
self.version = get(Version, slug="1.0", project=self.project)
self.email_notification = get(EmailHook, project=self.project)
self.client.force_login(self.user)
def test_list(self):
resp = self.client.get(
reverse("projects_notifications", args=[self.project.slug]),
)
self.assertEqual(resp.status_code, 200)
queryset = resp.context["emails"]
self.assertEqual(queryset.count(), 1)
self.assertEqual(queryset.first(), self.email_notification)
def test_create(self):
self.assertEqual(self.project.emailhook_notifications.all().count(), 1)
resp = self.client.post(
reverse("projects_notifications_create", args=[self.project.slug]),
data={
"email": "test@example.com",
},
)
self.assertEqual(resp.status_code, 302)
self.assertEqual(self.project.emailhook_notifications.all().count(), 2)
def test_delete(self):
self.assertEqual(self.project.emailhook_notifications.all().count(), 1)
self.client.post(
reverse("projects_notification_delete", args=[self.project.slug]),
data={"email": self.email_notification.email},
)
self.assertEqual(self.project.emailhook_notifications.all().count(), 0)
@override_settings(RTD_ALLOW_ORGANIZATIONS=False)
| TestProjectEmailNotifications |
python | conda__conda | conda/plugins/types.py | {
"start": 9482,
"end": 9961
} | class ____(CondaPlugin):
"""
Return type to use when defining a conda post-solve plugin hook.
For details on how this is used, see
:meth:`~conda.plugins.hookspec.CondaSpecs.conda_post_solves`.
:param name: Post-solve name (e.g., ``custom_plugin_post_solve``).
:param action: Callable which contains the code to be run.
"""
name: str
action: Callable[[str, tuple[PackageRecord, ...], tuple[PackageRecord, ...]], None]
@dataclass
| CondaPostSolve |
python | astropy__astropy | astropy/cosmology/_src/tests/io/test_connect.py | {
"start": 9363,
"end": 10478
} | class ____(ToFromFormatTestMixin):
"""Test Cosmology[To/From]Format classes."""
@pytest.fixture(scope="class", params=cosmo_instances)
def cosmo(self, request):
return getattr(cosmology.realizations, request.param)
@pytest.fixture(scope="class")
def cosmo_cls(self, cosmo):
return cosmo.__class__
# ==============================================================
@pytest.mark.parametrize("format_type", tofrom_formats)
def test_fromformat_class_mismatch(self, cosmo, format_type):
format, totype = format_type
# test to_format
obj = cosmo.to_format(format)
assert isinstance(obj, totype)
# class mismatch
with pytest.raises(TypeError):
w0wzCDM.from_format(obj, format=format)
with pytest.raises(TypeError):
Cosmology.from_format(obj, format=format, cosmology=w0wzCDM)
# when specifying the class
with pytest.raises(ValueError, match="`cosmology` must be either"):
w0wzCDM.from_format(obj, format=format, cosmology="FlatLambdaCDM")
| TestCosmologyToFromFormat |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/protocol6.py | {
"start": 284,
"end": 349
} | class ____(Mammal[_T3], Protocol):
type_of_hooves: _T3
| Ungulate |
python | spack__spack | lib/spack/spack/vendor/ruamel/yaml/error.py | {
"start": 400,
"end": 1305
} | class ____:
__slots__ = 'name', 'index', 'line', 'column'
def __init__(self, name, index, line, column):
# type: (Any, int, int, int) -> None
self.name = name
self.index = index
self.line = line
self.column = column
def __str__(self):
# type: () -> Any
where = _F(
' in "{sname!s}", line {sline1:d}, column {scolumn1:d}',
sname=self.name,
sline1=self.line + 1,
scolumn1=self.column + 1,
)
return where
def __eq__(self, other):
# type: (Any) -> bool
if self.line != other.line or self.column != other.column:
return False
if self.name != other.name or self.index != other.index:
return False
return True
def __ne__(self, other):
# type: (Any) -> bool
return not self.__eq__(other)
| StreamMark |
python | pytorch__pytorch | torch/_subclasses/meta_utils.py | {
"start": 22234,
"end": 30348
} | class ____(Generic[_TensorT]):
id: MetaTensorId
ndim: int
dtype: torch.dtype
device: torch.device
# NB: Sometimes, size, stride and storage_offset contain SymInt, in which
# case this is NOT serializable. That only happens when you're
# re-fakeifying a fake tensor with an existing ShapeEnv... maybe we
# can get rid of this use case entirely. Notably, even if we are
# fakeifying a real tensor into a fake tensor with symbolic shapes, the
# size here is NOT dynamic
# NB: These also contain SymInt because wrap_meta_outputs_with_default_device_logic
# goes through this codepath. But it really should not LOL.
# NB: size could potentially be None as you can override it and make it
# throw an error, but we don't currently have any subclasses that do this
# except C++ nested tensor but we're going to have nested int to make this
# defined on NJT
size: tuple[int, ...]
dynamo_dynamic_indices: list[int]
dynamo_hint_overrides: dict[int, int]
layout: torch.layout = torch.strided
is_inference: bool = False
is_leaf: bool = False
requires_grad: bool = False
is_sparse: bool = False
is_mkldnn: bool = False
is_functorch_wrapped: bool = False
is_batchedtensor: bool = False
is_legacy_batchedtensor: bool = False
is_gradtrackingtensor: bool = False
is_view: bool = False
is_nested: bool = False
# We eagerly symbolicize the associated nested int for e.g. offsets / lengths
# metadata if that offsets is already associated with a nested int.
# See test_construct_from_jagged_with_input_offsets_mixed_case.
nested_int: Optional[int] = None
is_traceable_wrapper_subclass: bool = False
is_functional: bool = False
is_conj: bool = False
is_neg: bool = False
is_parameter: bool = False
stride: Optional[tuple[int, ...]] = None
storage_offset: int = 0
# NB: We have a choice whether or not to store the id or a direct pointer
# to the data structure. For ease of use, we store the data structure,
# but this means that when we serialize, we have to swizzle these pointers
# back into ids (so we have accurate aliasing relationships)
storage: Optional[MetaStorageDesc] = None
sparse_dim: Optional[int] = None # is_sparse, is_sparse_compressed
dense_dim: Optional[int] = None # is_sparse, is_sparse_compressed
is_coalesced: Optional[bool] = None # is_sparse
crow_indices: Optional[MetaTensorDesc] = None # is_sparse_compressed
col_indices: Optional[MetaTensorDesc] = None # is_sparse_compressed
ccol_indices: Optional[MetaTensorDesc] = None # is_sparse_compressed
row_indices: Optional[MetaTensorDesc] = None # is_sparse_compressed
values: Optional[MetaTensorDesc] = None # is_sparse_compressed
unwrapped: Optional[MetaTensorDesc] = None # is_functorch_wrapped
bdim: Optional[int] = None # is_functorch_wrapped
base: Optional[MetaTensorDesc] = None # is_view
attrs: Optional[dict[str, MetaTensorDesc]] = None # is_traceable_wrapper_subclass
creation_meta: Optional[CreationMeta] = None
grad: Optional[MetaTensorDesc] = None
# Everything below is NOT serializable, need some more work
_UNSERIALIZABLE: ClassVar[set[str]] = {
"ctx",
"type",
"fake_mode",
# view_func isn't serializable when it's a _CustomViewFunc
"view_func",
"level",
"current_level",
"functorch_stack",
"autograd_meta_from",
"data",
"nested_int",
}
ctx: Optional[object] = None # is_traceable_wrapper_subclass
type: Optional[type] = None # is_traceable_wrapper_subclass
fake_mode: Optional[FakeTensorMode] = None
view_func: Optional[ViewFunc] = None
# level looks serializable, but actually it is meaningless without
# the functorch_stack below
level: Optional[int] = None # is_functorch_wrapped
current_level: Optional[int] = None
functorch_stack: Optional[list[CInterpreter]] = None
autograd_meta_from: Optional[torch.Tensor] = None
# This is only populated on copy_data, and typically is not used at all,
# except for some of our meta-ification paths that don't properly use
# storage (pro-tip: you should use storage)
data: Optional[torch.Tensor] = None
# Faithfully serializing functorch tensors will not be too difficult.
# We only need to consider grad/vmap interpreters, and their internal
# state is only bools (mostly what the grad enabled/disabled state
# should be in the lower layer). Beyond that, tensors just need to
# precisely indicate which particular interpreter they correspond
# to (we then replace level with a pointer to the interpreter stack.)
# However, this use of functorch is very "non-lexical" so it's not
# entirely clear how to make it all lexical again, so we haven't done
# it for now.
# NB: This will reference numeric IDs, and it is assumed that you've
# already serialized everything this recursively references
def as_json(self, describer_id: _DescriberId) -> dict[str, object]:
def json(k: str, v: object) -> object:
# Some best-effort debugging serialization for unserializable
# fields (feel free to add other special cases as appropriate)
if k in ["data", "autograd_meta_from"]:
return None # never repr these
if k in MetaTensorDesc._UNSERIALIZABLE:
return repr(v)
if isinstance(v, (torch.device, torch.dtype, torch.layout)):
return repr(v)
if isinstance(v, torch.SymInt):
return repr(v)
if isinstance(v, (tuple, list)):
return [json(k, v1) for v1 in v]
if isinstance(v, (MetaStorageDesc, MetaTensorDesc)):
return v.id
if isinstance(v, CreationMeta):
return str(v)
if k == "attrs" and isinstance(v, dict):
return {k1: v1.id for k1, v1 in v.items()}
return v
r = {
field.name: json(field.name, getattr(self, field.name))
for field in dataclasses.fields(self)
if not (
getattr(self, field.name) is field.default
or (
field.name == "dynamo_dynamic_indices"
and not getattr(self, field.name)
)
)
}
r.update({"describer_id": describer_id})
return r
@property
def shape(self) -> tuple[int, ...]:
return self.size
# A more faithful reproduction would do a copy on the entire
# storage, but this needs to be done carefully because the
# underlying storage could have larger extent than is implied
# by size/stride. The real fix is to properly call
# meta_storage recursively here.
#
# These "safe" functions are intended to be used under no_dispatch() mode.
# The no_dispatch() here is intended to prevent ambient fake tensor mode from
# fakeifying the operation. But if we are given an honest to goodness
# FakeTensor as src, we MUST NOT run the copy/clone operation. A better way
# to do this would be to not use no_dispatch and instead just disable fake
# tensor mode only (allowing for subclass dispatch to occur)
def _safe_copy(dst: torch.Tensor, src: Optional[torch.Tensor]) -> None:
if type(src) is not torch.Tensor:
return
dst.copy_(src)
def _safe_clone(src: torch.Tensor) -> Optional[torch.Tensor]:
if type(src) is not torch.Tensor:
return None
return src.clone()
# This is a class for converting multiple tensors into meta tensors which
# share the same view/storage structure. The operation model is you allocate
# one of these, and then call it repeatedly on all the tensors you want to
# convert. It's important to use the same object for tensors you want to
# share storage because this is how we correlate shared storages to the same
# meta storages. This class will hold weak references to cached tenosrs
# and tensor storages.
| MetaTensorDesc |
python | run-llama__llama_index | llama-index-integrations/storage/index_store/llama-index-storage-index-store-tablestore/llama_index/storage/index_store/tablestore/base.py | {
"start": 187,
"end": 1672
} | class ____(KVIndexStore):
"""
Tablestore Index store.
Args:
tablestore_kvstore (TablestoreKVStore): Tablestore key-value store
namespace (str): namespace for the index store
collection_suffix (str): suffix for the table name
"""
def __init__(
self,
tablestore_kvstore: TablestoreKVStore,
namespace: str = "llama_index_index_store_",
collection_suffix: str = "data",
) -> None:
"""Init a TablestoreIndexStore."""
super().__init__(
kvstore=tablestore_kvstore,
namespace=namespace,
collection_suffix=collection_suffix,
)
self._tablestore_kvstore = tablestore_kvstore
@classmethod
def from_config(
cls,
endpoint: Optional[str] = None,
instance_name: Optional[str] = None,
access_key_id: Optional[str] = None,
access_key_secret: Optional[str] = None,
**kwargs: Any,
) -> "TablestoreIndexStore":
"""Load a TablestoreIndexStore from config."""
kv_store = TablestoreKVStore(
endpoint=endpoint,
instance_name=instance_name,
access_key_id=access_key_id,
access_key_secret=access_key_secret,
kwargs=kwargs,
)
return cls(tablestore_kvstore=kv_store)
def delete_all_index(self):
"""Delete all index."""
self._tablestore_kvstore.delete_all(self._collection)
| TablestoreIndexStore |
python | joke2k__faker | tests/providers/test_date_time.py | {
"start": 42776,
"end": 43128
} | class ____(unittest.TestCase):
def setUp(self):
self.fake = Faker("nl_NL")
Faker.seed(0)
def test_day(self):
day = self.fake.day_of_week()
assert day in NlProvider.DAY_NAMES.values()
def test_month(self):
month = self.fake.month_name()
assert month in NlProvider.MONTH_NAMES.values()
| TestNlNl |
python | django__django | django/contrib/admin/views/autocomplete.py | {
"start": 257,
"end": 4385
} | class ____(BaseListView):
"""Handle AutocompleteWidget's AJAX requests for data."""
paginate_by = 20
admin_site = None
def get(self, request, *args, **kwargs):
"""
Return a JsonResponse with search results as defined in
serialize_result(), by default:
{
results: [{id: "123" text: "foo"}],
pagination: {more: true}
}
"""
(
self.term,
self.model_admin,
self.source_field,
to_field_name,
) = self.process_request(request)
if not self.has_perm(request):
raise PermissionDenied
self.object_list = self.get_queryset()
context = self.get_context_data()
return JsonResponse(
{
"results": [
self.serialize_result(obj, to_field_name)
for obj in context["object_list"]
],
"pagination": {"more": context["page_obj"].has_next()},
}
)
def serialize_result(self, obj, to_field_name):
"""
Convert the provided model object to a dictionary that is added to the
results list.
"""
return {"id": str(getattr(obj, to_field_name)), "text": str(obj)}
def get_paginator(self, *args, **kwargs):
"""Use the ModelAdmin's paginator."""
return self.model_admin.get_paginator(self.request, *args, **kwargs)
def get_queryset(self):
"""Return queryset based on ModelAdmin.get_search_results()."""
qs = self.model_admin.get_queryset(self.request)
qs = qs.complex_filter(self.source_field.get_limit_choices_to())
qs, search_use_distinct = self.model_admin.get_search_results(
self.request, qs, self.term
)
if search_use_distinct:
qs = qs.distinct()
return qs
def process_request(self, request):
"""
Validate request integrity, extract and return request parameters.
Since the subsequent view permission check requires the target model
admin, which is determined here, raise PermissionDenied if the
requested app, model or field are malformed.
Raise Http404 if the target model admin is not configured properly with
search_fields.
"""
term = request.GET.get("term", "")
try:
app_label = request.GET["app_label"]
model_name = request.GET["model_name"]
field_name = request.GET["field_name"]
except KeyError as e:
raise PermissionDenied from e
# Retrieve objects from parameters.
try:
source_model = apps.get_model(app_label, model_name)
except LookupError as e:
raise PermissionDenied from e
try:
source_field = source_model._meta.get_field(field_name)
except FieldDoesNotExist as e:
raise PermissionDenied from e
try:
remote_model = source_field.remote_field.model
except AttributeError as e:
raise PermissionDenied from e
try:
model_admin = self.admin_site.get_model_admin(remote_model)
except NotRegistered as e:
raise PermissionDenied from e
# Validate suitability of objects.
if not model_admin.get_search_fields(request):
raise Http404(
"%s must have search_fields for the autocomplete_view."
% type(model_admin).__qualname__
)
to_field_name = getattr(
source_field.remote_field, "field_name", remote_model._meta.pk.attname
)
to_field_name = remote_model._meta.get_field(to_field_name).attname
if not model_admin.to_field_allowed(request, to_field_name):
raise PermissionDenied
return term, model_admin, source_field, to_field_name
def has_perm(self, request, obj=None):
"""Check if user has permission to access the related model."""
return self.model_admin.has_view_permission(request, obj=obj)
| AutocompleteJsonView |
python | readthedocs__readthedocs.org | readthedocs/projects/views/private.py | {
"start": 7600,
"end": 7931
} | class ____(SuccessMessageMixin, PrivateViewMixin):
"""Common pieces for model views of Project."""
model = Project
lookup_url_kwarg = "project_slug"
lookup_field = "slug"
context_object_name = "project"
def get_queryset(self):
return self.model.objects.for_admin_user(self.request.user)
| ProjectMixin |
python | kamyu104__LeetCode-Solutions | Python/single-row-keyboard.py | {
"start": 29,
"end": 393
} | class ____(object):
def calculateTime(self, keyboard, word):
"""
:type keyboard: str
:type word: str
:rtype: int
"""
lookup = {c:i for i, c in enumerate(keyboard)}
result, prev = 0, 0
for c in word:
result += abs(lookup[c]-prev)
prev = lookup[c]
return result
| Solution |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-confluence/llama_index/readers/confluence/event.py | {
"start": 136,
"end": 420
} | class ____(Enum):
IMAGE = "image"
DOCUMENT = "document"
TEXT = "text"
HTML = "html"
CSV = "csv"
MARKDOWN = "md"
SPREADSHEET = "spreadsheet"
PRESENTATION = "presentation"
PDF = "pdf"
UNKNOWN = "unknown"
# LlamaIndex instrumentation events
| FileType |
python | google__pytype | pytype/matcher.py | {
"start": 4659,
"end": 5577
} | class ____:
"""Collection of TypeParameter objects encountered during matching."""
def __init__(self):
self.seen = set()
self._mutually_exclusive = collections.defaultdict(set)
def add_mutually_exclusive_groups(self, groups):
"""Adds groups of mutually exclusive type parameters.
For example, [{"T1", "T2"}, {"T3", "T4"}] would mean that the following
pairs are mutually exclusive: (T1, T3), (T1, T4), (T2, T3), (T2, T4).
Args:
groups: The mutually exclusive groups.
"""
all_params = set.union(*groups)
for group in groups:
mutually_exclusive = all_params - group
for name in group:
self._mutually_exclusive[name].update(mutually_exclusive)
def has_mutually_exclusive(self, name, subst):
"""Whether 'subst' has a param that is mutually exclusive with 'name'."""
return bool(self._mutually_exclusive[name].intersection(subst))
| _TypeParams |
python | modin-project__modin | modin/core/execution/dispatching/factories/factories.py | {
"start": 23958,
"end": 24137
} | class ____(BaseFactory):
@classmethod
@doc(_doc_factory_prepare_method, io_module_name="`NativeIO`")
def prepare(cls):
cls.io_cls = NativeIO
| NativeOnNativeFactory |
python | doocs__leetcode | solution/3100-3199/3173.Bitwise OR of Adjacent Elements/Solution.py | {
"start": 0,
"end": 119
} | class ____:
def orArray(self, nums: List[int]) -> List[int]:
return [a | b for a, b in pairwise(nums)]
| Solution |
python | realpython__materials | django-markdown/dmd_app/migrations/0001_initial.py | {
"start": 92,
"end": 817
} | class ____(migrations.Migration):
initial = True
dependencies = []
operations = [
migrations.CreateModel(
name="MarkdownContent",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("title", models.CharField(max_length=100)),
("content", models.TextField()),
],
options={
"verbose_name_plural": "Markdown content",
},
),
]
| Migration |
python | getsentry__sentry | tests/snuba/rules/conditions/test_event_frequency.py | {
"start": 52369,
"end": 52639
} | class ____(
PerfIssuePlatformEventMixin,
EventUniqueUserFrequencyConditionTestCase,
):
pass
@freeze_time(
(timezone.now() - timedelta(days=2)).replace(hour=12, minute=40, second=0, microsecond=0)
)
| PerfIssuePlatformIssueUniqueUserFrequencyConditionTestCase |
python | allegroai__clearml | clearml/backend_api/services/v2_23/dataviews.py | {
"start": 135009,
"end": 137798
} | class ____(Request):
"""
Move dataviews to a project
:param ids: Dataviews to move
:type ids: Sequence[str]
:param project: Target project ID. If not provided, `project_name` must be
provided. Use null for the root project
:type project: str
:param project_name: Target project name. If provided and a project with this
name does not exist, a new project will be created. If not provided, `project`
must be provided.
:type project_name: str
"""
_service = "dataviews"
_action = "move"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {
"ids": {
"description": "Dataviews to move",
"items": {"type": "string"},
"type": "array",
},
"project": {
"description": (
"Target project ID. If not provided, `project_name` must be provided. Use null for the root project"
),
"type": "string",
},
"project_name": {
"description": (
"Target project name. If provided and a project with this name does not exist, a new project will"
" be created. If not provided, `project` must be provided."
),
"type": "string",
},
},
"required": ["ids"],
"type": "object",
}
def __init__(self, ids, project=None, project_name=None, **kwargs):
super(MoveRequest, self).__init__(**kwargs)
self.ids = ids
self.project = project
self.project_name = project_name
@schema_property("ids")
def ids(self):
return self._property_ids
@ids.setter
def ids(self, value):
if value is None:
self._property_ids = None
return
self.assert_isinstance(value, "ids", (list, tuple))
self.assert_isinstance(value, "ids", six.string_types, is_array=True)
self._property_ids = value
@schema_property("project")
def project(self):
return self._property_project
@project.setter
def project(self, value):
if value is None:
self._property_project = None
return
self.assert_isinstance(value, "project", six.string_types)
self._property_project = value
@schema_property("project_name")
def project_name(self):
return self._property_project_name
@project_name.setter
def project_name(self, value):
if value is None:
self._property_project_name = None
return
self.assert_isinstance(value, "project_name", six.string_types)
self._property_project_name = value
| MoveRequest |
python | pytorch__pytorch | torch/_functorch/_aot_autograd/aot_autograd_result.py | {
"start": 7891,
"end": 8066
} | class ____(FxGraphCacheLoadable):
"""
Cacheable entry for a forward function
"""
def _is_backward(self) -> bool:
return False
@dataclass
| CompiledForward |
python | pennersr__django-allauth | allauth/idp/oidc/migrations/0001_initial.py | {
"start": 170,
"end": 6106
} | class ____(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name="Client",
fields=[
(
"id",
models.CharField(
default=allauth.idp.oidc.models.default_client_id,
max_length=100,
primary_key=True,
serialize=False,
verbose_name="Client ID",
),
),
("name", models.CharField(max_length=100)),
(
"secret",
models.CharField(
default=allauth.idp.oidc.models.default_client_secret,
max_length=200,
),
),
(
"scopes",
models.TextField(
default="openid",
help_text="The scope(s) the client is allowed to request. Provide one value per line, e.g.: openid(ENTER)profile(ENTER)email(ENTER)",
),
),
(
"type",
models.CharField(
choices=[
("confidential", "Confidential"),
("public", "Public"),
],
default="confidential",
max_length=20,
),
),
(
"grant_types",
models.TextField(
default="authorization_code",
help_text="A list of allowed grant types. Provide one value per line, e.g.: authorization_code(ENTER)client_credentials(ENTER)refresh_token(ENTER)",
),
),
(
"redirect_uris",
models.TextField(
blank=True,
default="",
help_text="A list of allowed redirect (callback) URLs, one per line.",
),
),
(
"cors_origins",
models.TextField(
blank=True,
default="",
help_text="A list of allowed origins for cross-origin requests, one per line.",
verbose_name="CORS allowed origins",
),
),
(
"response_types",
models.TextField(
default="code",
help_text="A list of allowed response types. Provide one value per line, e.g.: code(ENTER)id_token token(ENTER)",
),
),
(
"skip_consent",
models.BooleanField(
default=False,
help_text="Flag to allow skip the consent screen for this client",
),
),
("created_at", models.DateTimeField(default=django.utils.timezone.now)),
("data", models.JSONField(blank=True, default=None, null=True)),
(
"owner",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
),
),
],
options={
"verbose_name": "client",
"verbose_name_plural": "clients",
},
),
migrations.CreateModel(
name="Token",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"type",
models.CharField(
choices=[
("ia", "Initial access token"),
("at", "Access token"),
("rt", "Refresh token"),
("ac", "Authorization code"),
],
max_length=2,
),
),
("hash", models.CharField(max_length=255)),
("data", models.JSONField(blank=True, default=None, null=True)),
("created_at", models.DateTimeField(default=django.utils.timezone.now)),
(
"expires_at",
models.DateTimeField(blank=True, db_index=True, null=True),
),
("scopes", models.TextField(default="")),
(
"client",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="allauth_idp_oidc.client",
),
),
(
"user",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
),
),
],
options={
"unique_together": {("type", "hash")},
},
),
]
| Migration |
python | doocs__leetcode | solution/2200-2299/2218.Maximum Value of K Coins From Piles/Solution.py | {
"start": 0,
"end": 477
} | class ____:
def maxValueOfCoins(self, piles: List[List[int]], k: int) -> int:
n = len(piles)
f = [[0] * (k + 1) for _ in range(n + 1)]
for i, nums in enumerate(piles, 1):
s = list(accumulate(nums, initial=0))
for j in range(k + 1):
for h, w in enumerate(s):
if j < h:
break
f[i][j] = max(f[i][j], f[i - 1][j - h] + w)
return f[n][k]
| Solution |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.