language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | PrefectHQ__prefect | src/prefect/settings/models/server/services.py | {
"start": 18013,
"end": 20251
} | class ____(PrefectBaseSettings):
"""
Settings for controlling server services
"""
model_config: ClassVar[SettingsConfigDict] = build_settings_config(
("server", "services")
)
cancellation_cleanup: ServerServicesCancellationCleanupSettings = Field(
default_factory=ServerServicesCancellationCleanupSettings,
description="Settings for controlling the cancellation cleanup service",
)
event_persister: ServerServicesEventPersisterSettings = Field(
default_factory=ServerServicesEventPersisterSettings,
description="Settings for controlling the event persister service",
)
event_logger: ServerServicesEventLoggerSettings = Field(
default_factory=ServerServicesEventLoggerSettings,
description="Settings for controlling the event logger service",
)
foreman: ServerServicesForemanSettings = Field(
default_factory=ServerServicesForemanSettings,
description="Settings for controlling the foreman service",
)
late_runs: ServerServicesLateRunsSettings = Field(
default_factory=ServerServicesLateRunsSettings,
description="Settings for controlling the late runs service",
)
scheduler: ServerServicesSchedulerSettings = Field(
default_factory=ServerServicesSchedulerSettings,
description="Settings for controlling the scheduler service",
)
pause_expirations: ServerServicesPauseExpirationsSettings = Field(
default_factory=ServerServicesPauseExpirationsSettings,
description="Settings for controlling the pause expiration service",
)
repossessor: ServerServicesRepossessorSettings = Field(
default_factory=ServerServicesRepossessorSettings,
description="Settings for controlling the repossessor service",
)
task_run_recorder: ServerServicesTaskRunRecorderSettings = Field(
default_factory=ServerServicesTaskRunRecorderSettings,
description="Settings for controlling the task run recorder service",
)
triggers: ServerServicesTriggersSettings = Field(
default_factory=ServerServicesTriggersSettings,
description="Settings for controlling the triggers service",
)
| ServerServicesSettings |
python | explosion__spaCy | spacy/lang/mk/__init__.py | {
"start": 852,
"end": 1511
} | class ____(Language):
lang = "mk"
Defaults = MacedonianDefaults
@Macedonian.factory(
"lemmatizer",
assigns=["token.lemma"],
default_config={
"model": None,
"mode": "rule",
"overwrite": False,
"scorer": {"@scorers": "spacy.lemmatizer_scorer.v1"},
},
default_score_weights={"lemma_acc": 1.0},
)
def make_lemmatizer(
nlp: Language,
model: Optional[Model],
name: str,
mode: str,
overwrite: bool,
scorer: Optional[Callable],
):
return MacedonianLemmatizer(
nlp.vocab, model, name, mode=mode, overwrite=overwrite, scorer=scorer
)
__all__ = ["Macedonian"]
| Macedonian |
python | ray-project__ray | release/ray_release/exception.py | {
"start": 1024,
"end": 1084
} | class ____(ReleaseTestError):
pass
| ReleaseTestPackageError |
python | PrefectHQ__prefect | tests/server/utilities/test_database.py | {
"start": 1060,
"end": 1120
} | class ____(enum.Enum):
RED = "RED"
BLUE = "BLUE"
| Color |
python | huggingface__transformers | src/transformers/models/phimoe/modular_phimoe.py | {
"start": 12631,
"end": 12689
} | class ____(MixtralDecoderLayer):
pass
| PhimoeDecoderLayer |
python | gevent__gevent | src/greentest/3.10/test_ssl.py | {
"start": 77753,
"end": 79791
} | class ____(unittest.TestCase):
def test_read_write(self):
bio = ssl.MemoryBIO()
bio.write(b'foo')
self.assertEqual(bio.read(), b'foo')
self.assertEqual(bio.read(), b'')
bio.write(b'foo')
bio.write(b'bar')
self.assertEqual(bio.read(), b'foobar')
self.assertEqual(bio.read(), b'')
bio.write(b'baz')
self.assertEqual(bio.read(2), b'ba')
self.assertEqual(bio.read(1), b'z')
self.assertEqual(bio.read(1), b'')
def test_eof(self):
bio = ssl.MemoryBIO()
self.assertFalse(bio.eof)
self.assertEqual(bio.read(), b'')
self.assertFalse(bio.eof)
bio.write(b'foo')
self.assertFalse(bio.eof)
bio.write_eof()
self.assertFalse(bio.eof)
self.assertEqual(bio.read(2), b'fo')
self.assertFalse(bio.eof)
self.assertEqual(bio.read(1), b'o')
self.assertTrue(bio.eof)
self.assertEqual(bio.read(), b'')
self.assertTrue(bio.eof)
def test_pending(self):
bio = ssl.MemoryBIO()
self.assertEqual(bio.pending, 0)
bio.write(b'foo')
self.assertEqual(bio.pending, 3)
for i in range(3):
bio.read(1)
self.assertEqual(bio.pending, 3-i-1)
for i in range(3):
bio.write(b'x')
self.assertEqual(bio.pending, i+1)
bio.read()
self.assertEqual(bio.pending, 0)
def test_buffer_types(self):
bio = ssl.MemoryBIO()
bio.write(b'foo')
self.assertEqual(bio.read(), b'foo')
bio.write(bytearray(b'bar'))
self.assertEqual(bio.read(), b'bar')
bio.write(memoryview(b'baz'))
self.assertEqual(bio.read(), b'baz')
def test_error_types(self):
bio = ssl.MemoryBIO()
self.assertRaises(TypeError, bio.write, 'foo')
self.assertRaises(TypeError, bio.write, None)
self.assertRaises(TypeError, bio.write, True)
self.assertRaises(TypeError, bio.write, 1)
| MemoryBIOTests |
python | getsentry__sentry | tests/sentry/models/test_projectsdk.py | {
"start": 333,
"end": 10050
} | class ____(TestCase):
event_type = EventType.PROFILE_CHUNK
def setUp(self) -> None:
# setup some mock data inside the sdk index cache
SDK_DATA: dict[str, dict[str, Any]] = {
"sentry.python": {},
}
cache.set(SDK_INDEX_CACHE_KEY, SDK_DATA, 60)
def assert_db_entry(
self,
project,
event_type,
sdk_name,
sdk_version,
):
project_sdk = ProjectSDK.objects.get(
project=project,
event_type=event_type.value,
sdk_name=sdk_name,
)
assert project_sdk.sdk_version == sdk_version
return project_sdk
def assert_cache_entry(
self,
project,
event_type,
sdk_name,
sdk_version,
):
cache_key = ProjectSDK.get_cache_key(project, event_type, sdk_name)
project_sdk = cache.get(cache_key)
assert project_sdk.project == project
assert project_sdk.event_type == event_type.value
assert project_sdk.sdk_name == sdk_name
assert project_sdk.sdk_version == sdk_version
return project_sdk
def test_first_sdk_version(self) -> None:
ProjectSDK.update_with_newest_version_or_create(
project=self.project,
event_type=self.event_type,
sdk_name="sentry.python",
sdk_version="2.23.0",
)
# check the db entry was created
db_project_sdk = self.assert_db_entry(
self.project,
self.event_type,
"sentry.python",
"2.23.0",
)
# check the cache entry was created
cache_project_sdk = self.assert_cache_entry(
self.project,
self.event_type,
"sentry.python",
"2.23.0",
)
assert db_project_sdk.id == cache_project_sdk.id
def test_newer_sdk_version(self) -> None:
project_sdk = ProjectSDK.objects.create(
project=self.project,
event_type=self.event_type.value,
sdk_name="sentry.python",
sdk_version="2.21.0",
)
ProjectSDK.update_with_newest_version_or_create(
project=self.project,
event_type=self.event_type,
sdk_name="sentry.python",
sdk_version="2.23.0",
)
# check the db entry was updated
db_project_sdk = self.assert_db_entry(
self.project,
self.event_type,
"sentry.python",
"2.23.0",
)
assert project_sdk.id == db_project_sdk.id
# check the cache entry was created
cache_project_sdk = self.assert_cache_entry(
self.project,
self.event_type,
"sentry.python",
"2.23.0",
)
assert project_sdk.id == cache_project_sdk.id
def test_older_sdk_version(self) -> None:
project_sdk = ProjectSDK.objects.create(
project=self.project,
event_type=self.event_type.value,
sdk_name="sentry.python",
sdk_version="2.23.0",
)
ProjectSDK.update_with_newest_version_or_create(
project=self.project,
event_type=self.event_type,
sdk_name="sentry.python",
sdk_version="2.21.0",
)
# check the db entry was unchanged
db_project_sdk = self.assert_db_entry(
self.project,
self.event_type,
"sentry.python",
"2.23.0",
)
assert project_sdk.id == db_project_sdk.id
# check the cache entry was created
cache_project_sdk = self.assert_cache_entry(
self.project,
self.event_type,
"sentry.python",
"2.23.0",
)
assert project_sdk.id == cache_project_sdk.id
def test_same_sdk_version(self) -> None:
project_sdk = ProjectSDK.objects.create(
project=self.project,
event_type=self.event_type.value,
sdk_name="sentry.python",
sdk_version="2.23.0",
)
ProjectSDK.update_with_newest_version_or_create(
project=self.project,
event_type=self.event_type,
sdk_name="sentry.python",
sdk_version="2.23.0",
)
# check the db entry was unchanged
db_project_sdk = self.assert_db_entry(
self.project,
self.event_type,
"sentry.python",
"2.23.0",
)
assert project_sdk.id == db_project_sdk.id
# check the cache entry was created
cache_project_sdk = self.assert_cache_entry(
self.project,
self.event_type,
"sentry.python",
"2.23.0",
)
assert project_sdk.id == cache_project_sdk.id
def test_no_existing_version(self) -> None:
project_sdk = ProjectSDK.objects.create(
project=self.project,
event_type=self.event_type.value,
sdk_name="sentry.python",
)
ProjectSDK.update_with_newest_version_or_create(
project=self.project,
event_type=self.event_type,
sdk_name="sentry.python",
sdk_version="2.23.0",
)
# check the db entry was updated
db_project_sdk = self.assert_db_entry(
self.project,
self.event_type,
"sentry.python",
"2.23.0",
)
assert project_sdk.id == db_project_sdk.id
# check the cache entry was created
cache_project_sdk = self.assert_cache_entry(
self.project,
self.event_type,
"sentry.python",
"2.23.0",
)
assert project_sdk.id == cache_project_sdk.id
def test_no_new_version(self) -> None:
project_sdk = ProjectSDK.objects.create(
project=self.project,
event_type=self.event_type.value,
sdk_name="sentry.python",
sdk_version="2.23.0",
)
ProjectSDK.update_with_newest_version_or_create(
project=self.project,
event_type=self.event_type,
sdk_name="sentry.python",
sdk_version="",
)
# check the db entry was unchanged
db_project_sdk = self.assert_db_entry(
self.project,
self.event_type,
"sentry.python",
"2.23.0",
)
assert project_sdk.id == db_project_sdk.id
# check the cache entry does not exist
cache_key = ProjectSDK.get_cache_key(self.project, self.event_type, "sentry.python")
assert cache.get(cache_key) is None
def test_updated_cached_sdk_version(self) -> None:
ProjectSDK.update_with_newest_version_or_create(
project=self.project,
event_type=self.event_type,
sdk_name="sentry.python",
sdk_version="2.21.0",
)
# check the db entry was created
before_db_project_sdk = self.assert_db_entry(
self.project,
self.event_type,
"sentry.python",
"2.21.0",
)
# check the cache entry was created
before_cache_project_sdk = self.assert_cache_entry(
self.project,
self.event_type,
"sentry.python",
"2.21.0",
)
assert before_db_project_sdk.id == before_cache_project_sdk.id
ProjectSDK.update_with_newest_version_or_create(
project=self.project,
event_type=self.event_type,
sdk_name="sentry.python",
sdk_version="2.23.0",
)
# check the db entry was created
db_project_sdk = self.assert_db_entry(
self.project,
self.event_type,
"sentry.python",
"2.23.0",
)
assert before_db_project_sdk.id == db_project_sdk.id
# check the cache entry was created
cache_project_sdk = self.assert_cache_entry(
self.project,
self.event_type,
"sentry.python",
"2.23.0",
)
assert before_cache_project_sdk.id == cache_project_sdk.id
def test_normalized_sdk_name(self) -> None:
ProjectSDK.update_with_newest_version_or_create(
project=self.project,
event_type=self.event_type,
sdk_name="sentry.python.django",
sdk_version="2.23.0",
)
# check the db entry was created
db_project_sdk = self.assert_db_entry(
self.project,
self.event_type,
"sentry.python",
"2.23.0",
)
# check the cache entry was created
cache_project_sdk = self.assert_cache_entry(
self.project,
self.event_type,
"sentry.python",
"2.23.0",
)
assert db_project_sdk.id == cache_project_sdk.id
def test_unknown_sdk_name(self) -> None:
ProjectSDK.update_with_newest_version_or_create(
project=self.project,
event_type=self.event_type,
sdk_name="sentry.unknown",
sdk_version="2.23.0",
)
# check the db entry was created
assert not ProjectSDK.objects.filter(
project=self.project,
event_type=self.event_type.value,
sdk_name="sentry.unknown",
).exists()
# check the cache entry does not exist
cache_key = ProjectSDK.get_cache_key(self.project, self.event_type, "sentry.unknown")
assert cache.get(cache_key) is None
| UpdateWithNewestVersionOrCreateTest |
python | kamyu104__LeetCode-Solutions | Python/h-index.py | {
"start": 940,
"end": 1165
} | class ____(object):
def hIndex(self, citations):
"""
:type citations: List[int]
:rtype: int
"""
return sum(x >= i + 1 for i, x in enumerate(sorted(citations, reverse=True)))
| Solution3 |
python | numba__llvmlite | llvmlite/tests/test_binding.py | {
"start": 50935,
"end": 63205
} | class ____(BaseTest):
def test_str(self):
mod = self.module()
glob = mod.get_global_variable("glob")
self.assertEqual(str(glob), "@glob = global i32 0")
def test_name(self):
mod = self.module()
glob = mod.get_global_variable("glob")
self.assertEqual(glob.name, "glob")
glob.name = "foobar"
self.assertEqual(glob.name, "foobar")
def test_linkage(self):
mod = self.module()
glob = mod.get_global_variable("glob")
linkage = glob.linkage
self.assertIsInstance(glob.linkage, llvm.Linkage)
glob.linkage = linkage
self.assertEqual(glob.linkage, linkage)
for linkage in ("internal", "external"):
glob.linkage = linkage
self.assertIsInstance(glob.linkage, llvm.Linkage)
self.assertEqual(glob.linkage.name, linkage)
def test_visibility(self):
mod = self.module()
glob = mod.get_global_variable("glob")
visibility = glob.visibility
self.assertIsInstance(glob.visibility, llvm.Visibility)
glob.visibility = visibility
self.assertEqual(glob.visibility, visibility)
for visibility in ("hidden", "protected", "default"):
glob.visibility = visibility
self.assertIsInstance(glob.visibility, llvm.Visibility)
self.assertEqual(glob.visibility.name, visibility)
def test_storage_class(self):
mod = self.module()
glob = mod.get_global_variable("glob")
storage_class = glob.storage_class
self.assertIsInstance(glob.storage_class, llvm.StorageClass)
glob.storage_class = storage_class
self.assertEqual(glob.storage_class, storage_class)
for storage_class in ("dllimport", "dllexport", "default"):
glob.storage_class = storage_class
self.assertIsInstance(glob.storage_class, llvm.StorageClass)
self.assertEqual(glob.storage_class.name, storage_class)
def test_add_function_attribute(self):
mod = self.module()
fn = mod.get_function("sum")
fn.add_function_attribute("nocapture")
with self.assertRaises(ValueError) as raises:
fn.add_function_attribute("zext")
self.assertEqual(str(raises.exception), "no such attribute 'zext'")
def test_module(self):
mod = self.module()
glob = mod.get_global_variable("glob")
self.assertIs(glob.module, mod)
def test_type(self):
mod = self.module()
glob = mod.get_global_variable("glob")
tp = glob.type
self.assertIsInstance(tp, llvm.TypeRef)
def test_type_name(self):
mod = self.module()
glob = mod.get_global_variable("glob")
tp = glob.type
self.assertEqual(tp.name, "")
st = mod.get_global_variable("glob_struct")
self.assertIsNotNone(re.match(r"struct\.glob_type(\.[\d]+)?",
st.global_value_type.name))
def test_type_printing_variable(self):
mod = self.module()
glob = mod.get_global_variable("glob")
tp = glob.global_value_type
self.assertEqual(str(tp), 'i32')
def test_type_printing_function(self):
mod = self.module()
fn = mod.get_function("sum")
self.assertEqual(str(fn.global_value_type), "i32 (i32, i32)")
def test_type_printing_struct(self):
mod = self.module()
st = mod.get_global_variable("glob_struct")
self.assertTrue(st.type.is_pointer)
self.assertIsNotNone(re.match(r'ptr', str(st.type)))
self.assertIsNotNone(re.match(
r"%struct\.glob_type(\.[\d]+)? = type { i64, \[2 x i64\] }",
str(st.global_value_type)))
def test_close(self):
glob = self.glob()
glob.close()
glob.close()
def test_is_declaration(self):
defined = self.module().get_function('sum')
declared = self.module(asm_sum_declare).get_function('sum')
self.assertFalse(defined.is_declaration)
self.assertTrue(declared.is_declaration)
def test_module_global_variables(self):
mod = self.module(asm_sum)
gvars = list(mod.global_variables)
self.assertEqual(len(gvars), 4)
for v in gvars:
self.assertTrue(v.is_global)
def test_module_functions(self):
mod = self.module()
funcs = list(mod.functions)
self.assertEqual(len(funcs), 1)
func = funcs[0]
self.assertTrue(func.is_function)
self.assertEqual(func.name, 'sum')
with self.assertRaises(ValueError):
func.instructions
with self.assertRaises(ValueError):
func.operands
with self.assertRaises(ValueError):
func.opcode
def test_function_arguments(self):
mod = self.module()
func = mod.get_function('sum')
self.assertTrue(func.is_function)
args = list(func.arguments)
self.assertEqual(len(args), 2)
self.assertTrue(args[0].is_argument)
self.assertTrue(args[1].is_argument)
self.assertEqual(args[0].name, '.1')
self.assertEqual(str(args[0].type), 'i32')
self.assertEqual(args[1].name, '.2')
self.assertEqual(str(args[1].type), 'i32')
with self.assertRaises(ValueError):
args[0].blocks
with self.assertRaises(ValueError):
args[0].arguments
def test_function_blocks(self):
func = self.module().get_function('sum')
blocks = list(func.blocks)
self.assertEqual(len(blocks), 1)
block = blocks[0]
self.assertTrue(block.is_block)
def test_block_instructions(self):
func = self.module().get_function('sum')
insts = list(list(func.blocks)[0].instructions)
self.assertEqual(len(insts), 3)
self.assertTrue(insts[0].is_instruction)
self.assertTrue(insts[1].is_instruction)
self.assertTrue(insts[2].is_instruction)
self.assertEqual(insts[0].opcode, 'add')
self.assertEqual(insts[1].opcode, 'add')
self.assertEqual(insts[2].opcode, 'ret')
def test_instruction_operands(self):
func = self.module().get_function('sum')
add = list(list(func.blocks)[0].instructions)[0]
self.assertEqual(add.opcode, 'add')
operands = list(add.operands)
self.assertEqual(len(operands), 2)
self.assertTrue(operands[0].is_operand)
self.assertTrue(operands[1].is_operand)
self.assertEqual(operands[0].name, '.1')
self.assertEqual(str(operands[0].type), 'i32')
self.assertEqual(operands[1].name, '.2')
self.assertEqual(str(operands[1].type), 'i32')
def test_function_attributes(self):
ver = llvm.llvm_version_info[0]
readonly_attrs = [b'memory(read)' if ver > 15 else b'readonly']
mod = self.module(asm_attributes)
for func in mod.functions:
attrs = list(func.attributes)
if func.name == 'a_readonly_func':
self.assertEqual(attrs, readonly_attrs)
elif func.name == 'a_arg0_return_func':
self.assertEqual(attrs, [])
args = list(func.arguments)
self.assertEqual(list(args[0].attributes), [b'returned'])
self.assertEqual(list(args[1].attributes), [])
def test_value_kind(self):
mod = self.module()
self.assertEqual(mod.get_global_variable('glob').value_kind,
llvm.ValueKind.global_variable)
func = mod.get_function('sum')
self.assertEqual(func.value_kind, llvm.ValueKind.function)
block = list(func.blocks)[0]
self.assertEqual(block.value_kind, llvm.ValueKind.basic_block)
inst = list(block.instructions)[1]
self.assertEqual(inst.value_kind, llvm.ValueKind.instruction)
self.assertEqual(list(inst.operands)[0].value_kind,
llvm.ValueKind.constant_int)
self.assertEqual(list(inst.operands)[1].value_kind,
llvm.ValueKind.instruction)
iasm_func = self.module(asm_inlineasm).get_function('foo')
iasm_inst = list(list(iasm_func.blocks)[0].instructions)[0]
self.assertEqual(list(iasm_inst.operands)[0].value_kind,
llvm.ValueKind.inline_asm)
def test_is_constant(self):
mod = self.module()
self.assertTrue(mod.get_global_variable('glob').is_constant)
constant_operands = 0
for func in mod.functions:
self.assertTrue(func.is_constant)
for block in func.blocks:
self.assertFalse(block.is_constant)
for inst in block.instructions:
self.assertFalse(inst.is_constant)
for op in inst.operands:
if op.is_constant:
constant_operands += 1
self.assertEqual(constant_operands, 1)
def test_constant_int(self):
mod = self.module()
func = mod.get_function('sum')
insts = list(list(func.blocks)[0].instructions)
self.assertEqual(insts[1].opcode, 'add')
operands = list(insts[1].operands)
self.assertTrue(operands[0].is_constant)
self.assertFalse(operands[1].is_constant)
self.assertEqual(operands[0].get_constant_value(), 0)
with self.assertRaises(ValueError):
operands[1].get_constant_value()
mod = self.module(asm_sum3)
func = mod.get_function('sum')
insts = list(list(func.blocks)[0].instructions)
posint64 = list(insts[1].operands)[0]
negint64 = list(insts[2].operands)[0]
self.assertEqual(posint64.get_constant_value(), 5)
self.assertEqual(negint64.get_constant_value(signed_int=True), -5)
# Convert from unsigned arbitrary-precision integer to signed i64
as_u64 = negint64.get_constant_value(signed_int=False)
as_i64 = int.from_bytes(as_u64.to_bytes(8, 'little'), 'little',
signed=True)
self.assertEqual(as_i64, -5)
def test_constant_fp(self):
mod = self.module(asm_double_locale)
func = mod.get_function('foo')
insts = list(list(func.blocks)[0].instructions)
self.assertEqual(len(insts), 2)
self.assertEqual(insts[0].opcode, 'fadd')
operands = list(insts[0].operands)
self.assertTrue(operands[0].is_constant)
self.assertAlmostEqual(operands[0].get_constant_value(), 0.0)
self.assertTrue(operands[1].is_constant)
self.assertAlmostEqual(operands[1].get_constant_value(), 3.14)
mod = self.module(asm_double_inaccurate)
func = mod.get_function('foo')
inst = list(list(func.blocks)[0].instructions)[0]
operands = list(inst.operands)
with self.assertRaises(ValueError):
operands[0].get_constant_value()
self.assertAlmostEqual(operands[1].get_constant_value(round_fp=True), 0)
def test_constant_as_string(self):
mod = self.module(asm_null_constant)
func = mod.get_function('bar')
inst = list(list(func.blocks)[0].instructions)[0]
arg = list(inst.operands)[0]
self.assertTrue(arg.is_constant)
self.assertEqual(arg.get_constant_value(), 'ptr null')
def test_incoming_phi_blocks(self):
mod = self.module(asm_phi_blocks)
func = mod.get_function('foo')
blocks = list(func.blocks)
instructions = list(blocks[-1].instructions)
self.assertTrue(instructions[0].is_instruction)
self.assertEqual(instructions[0].opcode, 'phi')
incoming_blocks = list(instructions[0].incoming_blocks)
self.assertEqual(len(incoming_blocks), 2)
self.assertTrue(incoming_blocks[0].is_block)
self.assertTrue(incoming_blocks[1].is_block)
# Test reference to blocks (named or unnamed)
self.assertEqual(incoming_blocks[0], blocks[-1])
self.assertEqual(incoming_blocks[1], blocks[0])
# Test case that should fail
self.assertNotEqual(instructions[1].opcode, 'phi')
with self.assertRaises(ValueError):
instructions[1].incoming_blocks
| TestValueRef |
python | dagster-io__dagster | python_modules/dagster/dagster/_config/errors.py | {
"start": 1160,
"end": 1234
} | class ____:
field_names: Sequence[str]
@record
| FieldsNotDefinedErrorData |
python | python__mypy | test-data/unit/plugins/named_callable.py | {
"start": 174,
"end": 1098
} | class ____(Plugin):
def get_function_hook(self, fullname: str) -> Callable[[FunctionContext], Type] | None:
if fullname == "m.decorator1":
return decorator_call_hook
if fullname == "m._decorated": # This is a dummy name generated by the plugin
return decorate_hook
return None
def decorator_call_hook(ctx: FunctionContext) -> Type:
default = get_proper_type(ctx.default_return_type)
if isinstance(default, CallableType):
return default.copy_modified(name="m._decorated")
return ctx.default_return_type
def decorate_hook(ctx: FunctionContext) -> Type:
default = get_proper_type(ctx.default_return_type)
if isinstance(default, CallableType):
return default.copy_modified(ret_type=ctx.api.named_generic_type("builtins.str", []))
return ctx.default_return_type
def plugin(version: str) -> type[MyPlugin]:
return MyPlugin
| MyPlugin |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/storage/dagster_run.py | {
"start": 20899,
"end": 24320
} | class ____(IHaveNew):
"""Defines a filter across job runs, for use when querying storage directly.
Each field of the RunsFilter represents a logical AND with each other. For
example, if you specify job_name and tags, then you will receive only runs
with the specified job_name AND the specified tags. If left blank, then
all values will be permitted for that field.
Args:
run_ids (Optional[List[str]]): A list of job run_id values.
job_name (Optional[str]):
Name of the job to query for. If blank, all job_names will be accepted.
statuses (Optional[List[DagsterRunStatus]]):
A list of run statuses to filter by. If blank, all run statuses will be allowed.
tags (Optional[Dict[str, Union[str, List[str]]]]):
A dictionary of run tags to query by. All tags specified here must be present for a given run to pass the filter.
snapshot_id (Optional[str]): The ID of the job snapshot to query for. Intended for internal use.
updated_after (Optional[DateTime]): Filter by runs that were last updated before this datetime.
created_before (Optional[DateTime]): Filter by runs that were created before this datetime.
exclude_subruns (Optional[bool]): If true, runs that were launched to backfill historical data will be excluded from results.
"""
run_ids: Optional[Sequence[str]]
job_name: Optional[str]
statuses: Sequence[DagsterRunStatus]
tags: Mapping[str, Union[str, Sequence[str]]]
snapshot_id: Optional[str]
updated_after: Optional[datetime]
updated_before: Optional[datetime]
created_after: Optional[datetime]
created_before: Optional[datetime]
exclude_subruns: Optional[bool]
def __new__(
cls,
run_ids: Optional[Sequence[str]] = None,
job_name: Optional[str] = None,
statuses: Optional[Sequence[DagsterRunStatus]] = None,
tags: Optional[Mapping[str, Union[str, Sequence[str]]]] = None,
snapshot_id: Optional[str] = None,
updated_after: Optional[datetime] = None,
updated_before: Optional[datetime] = None,
created_after: Optional[datetime] = None,
created_before: Optional[datetime] = None,
exclude_subruns: Optional[bool] = None,
):
check.invariant(run_ids != [], "When filtering on run ids, a non-empty list must be used.")
return super().__new__(
cls,
run_ids=run_ids,
job_name=job_name,
statuses=statuses or [],
tags=tags or {},
snapshot_id=snapshot_id,
updated_after=updated_after,
updated_before=updated_before,
created_after=created_after,
created_before=created_before,
exclude_subruns=exclude_subruns,
)
@staticmethod
def for_schedule(
schedule: Union["RemoteSchedule", "InstigatorState", "ScheduleDefinition"],
) -> "RunsFilter":
return RunsFilter(tags=DagsterRun.tags_for_schedule(schedule))
@staticmethod
def for_sensor(
sensor: Union["RemoteSensor", "InstigatorState", "SensorDefinition"],
) -> "RunsFilter":
return RunsFilter(tags=DagsterRun.tags_for_sensor(sensor))
@staticmethod
def for_backfill(backfill_id: str) -> "RunsFilter":
return RunsFilter(tags=DagsterRun.tags_for_backfill_id(backfill_id))
| RunsFilter |
python | ethereum__web3.py | web3/exceptions.py | {
"start": 4133,
"end": 4256
} | class ____(Web3Exception):
"""
Raised when a fallback function doesn't exist in contract.
"""
| ABIFallbackNotFound |
python | PyCQA__pylint | tests/functional/a/abstract/abstract_class_instantiated.py | {
"start": 1550,
"end": 1628
} | class ____(Container, Iterator, Sizable, Hashable):
pass
| NoMroAbstractMethods |
python | tensorflow__tensorflow | tensorflow/python/debug/lib/debug_gradients_test.py | {
"start": 1554,
"end": 15816
} | class ____(test_util.TensorFlowTestCase):
def setUp(self):
rewriter_config = rewriter_config_pb2.RewriterConfig(
disable_model_pruning=True,
dependency_optimization=rewriter_config_pb2.RewriterConfig.OFF)
graph_options = config_pb2.GraphOptions(rewrite_options=rewriter_config)
config = config_pb2.ConfigProto(graph_options=graph_options)
self.sess = session.Session(config=config)
with self.sess.as_default():
self.u = variables.Variable(2.0, name="u")
self.v = variables.Variable(3.0, name="v")
self.w = math_ops.multiply(self.u.value(), self.v.value(), name="w")
def tearDown(self):
ops.reset_default_graph()
debug_gradients.clear_gradient_debuggers()
def testIdentifyGradientGivesCorrectTensorObjectWithoutContextManager(self):
grad_debugger = debug_gradients.GradientsDebugger()
id_grad_w = grad_debugger.identify_gradient(self.w)
y = math_ops.add(id_grad_w, -1.0, name="y")
grads = gradients_impl.gradients(y, [self.u, self.v])
self.assertEqual(2, len(grads))
u_grad = grads[0]
v_grad = grads[1]
self.sess.run(variables.global_variables_initializer())
self.assertAllClose(5.0, self.sess.run(y))
self.assertAllClose(3.0, self.sess.run(u_grad))
self.assertAllClose(2.0, self.sess.run(v_grad))
# Fetch the gradient tensor with the x-tensor object.
w_grad = grad_debugger.gradient_tensor(self.w)
self.assertIsInstance(w_grad, tensor.Tensor)
self.assertAllClose(1.0, self.sess.run(w_grad))
# Fetch the gradient tensor with the x-tensor's name.
w_grad = grad_debugger.gradient_tensor(self.w.name)
self.assertIsInstance(w_grad, tensor.Tensor)
self.assertAllClose(1.0, self.sess.run(w_grad))
# Fetch the gradient tensor with the x-tensor name.
w_grad = grad_debugger.gradient_tensor(self.w.name)
self.assertIsInstance(w_grad, tensor.Tensor)
self.assertAllClose(1.0, self.sess.run(w_grad))
def testIdentifyGradientGivesCorrectTensorObjectWithTfGradients(self):
grad_debugger = debug_gradients.GradientsDebugger()
id_grad_w = grad_debugger.identify_gradient(self.w)
y = math_ops.add(id_grad_w, -1.0, name="y")
with grad_debugger:
grads = gradients_impl.gradients(y, [self.u, self.v])
self.assertEqual(2, len(grads))
u_grad = grads[0]
v_grad = grads[1]
self.sess.run(variables.global_variables_initializer())
self.assertAllClose(5.0, self.sess.run(y))
self.assertAllClose(3.0, self.sess.run(u_grad))
self.assertAllClose(2.0, self.sess.run(v_grad))
# Fetch the gradient tensor with the x-tensor object.
w_grad = grad_debugger.gradient_tensor(self.w)
self.assertIsInstance(w_grad, tensor.Tensor)
self.assertAllClose(1.0, self.sess.run(w_grad))
# Fetch the gradient tensor with the x-tensor's name.
w_grad = grad_debugger.gradient_tensor(self.w.name)
self.assertIsInstance(w_grad, tensor.Tensor)
self.assertAllClose(1.0, self.sess.run(w_grad))
# Fetch the gradient tensor with the x-tensor name.
w_grad = grad_debugger.gradient_tensor(self.w.name)
self.assertIsInstance(w_grad, tensor.Tensor)
self.assertAllClose(1.0, self.sess.run(w_grad))
def testCallingIdentifyGradientTwiceWithTheSameGradientsDebuggerErrors(self):
grad_debugger = debug_gradients.GradientsDebugger()
grad_debugger.identify_gradient(self.w)
with self.assertRaisesRegex(ValueError,
"The graph already contains an op named .*"):
grad_debugger.identify_gradient(self.w)
def testIdentifyGradientWorksOnMultipleLosses(self):
grad_debugger_1 = debug_gradients.GradientsDebugger()
grad_debugger_2 = debug_gradients.GradientsDebugger()
y = math_ops.add(self.w, -1.0, name="y")
debug_y = grad_debugger_1.identify_gradient(y)
z1 = math_ops.square(debug_y, name="z1")
debug_y = grad_debugger_2.identify_gradient(y)
z2 = math_ops.sqrt(debug_y, name="z2")
with grad_debugger_1:
gradient_descent.GradientDescentOptimizer(0.1).minimize(z1)
with grad_debugger_2:
gradient_descent.GradientDescentOptimizer(0.1).minimize(z2)
dz1_dy = grad_debugger_1.gradient_tensor(y)
dz2_dy = grad_debugger_2.gradient_tensor(y)
self.assertIsInstance(dz1_dy, tensor.Tensor)
self.assertIsInstance(dz2_dy, tensor.Tensor)
self.assertIsNot(dz1_dy, dz2_dy)
self.sess.run(variables.global_variables_initializer())
self.assertAllClose(5.0**2, self.sess.run(z1))
self.assertAllClose(5.0**0.5, self.sess.run(z2))
self.assertAllClose(2.0 * 5.0, self.sess.run(dz1_dy))
self.assertAllClose(0.5 * (5.0**-0.5), self.sess.run(dz2_dy))
def testIdentifyGradientRaisesLookupErrorForUnknownXTensor(self):
grad_debugger_1 = debug_gradients.GradientsDebugger()
grad_debugger_2 = debug_gradients.GradientsDebugger()
id_grad_w = grad_debugger_1.identify_gradient(self.w)
y = math_ops.add(id_grad_w, -1.0, name="y")
# There are >1 gradient debuggers registered, and grad_debugger is not used
# as a context manager here, so the gradient w.r.t. self.w will not be
# registered.
gradients_impl.gradients(y, [self.u, self.v])
with self.assertRaisesRegex(
LookupError,
r"This GradientsDebugger has not received any gradient tensor for "):
grad_debugger_1.gradient_tensor(self.w)
with self.assertRaisesRegex(
LookupError,
r"This GradientsDebugger has not received any gradient tensor for "):
grad_debugger_2.gradient_tensor(self.w)
def testIdentifyGradientRaisesTypeErrorForNonTensorOrTensorNameInput(self):
grad_debugger = debug_gradients.GradientsDebugger()
with self.assertRaisesRegex(
TypeError,
r"x_tensor must be a str or tf\.Tensor or tf\.Variable, but instead "
r"has type .*Operation.*"):
grad_debugger.gradient_tensor(variables.global_variables_initializer())
def testIdentifyGradientTensorWorksWithGradientDescentOptimizer(self):
grad_debugger = debug_gradients.GradientsDebugger()
id_grad_w = grad_debugger.identify_gradient(self.w)
y = math_ops.add(id_grad_w, -1.0, name="y")
with grad_debugger:
gradient_descent.GradientDescentOptimizer(0.1).minimize(y)
self.sess.run(variables.global_variables_initializer())
# Fetch the gradient tensor with the x-tensor object.
w_grad = grad_debugger.gradient_tensor(self.w)
self.assertIsInstance(w_grad, tensor.Tensor)
self.assertAllClose(1.0, self.sess.run(w_grad))
def testWatchGradientsByXTensorNamesWorks(self):
y = math_ops.add(self.w, -1.0, name="y")
# The constructrion of the forward graph has completed.
# But we can still get the gradient tensors by using
# watch_gradients_by_tensor_names().
grad_debugger = debug_gradients.GradientsDebugger()
with grad_debugger.watch_gradients_by_tensor_names(self.sess.graph, "w:0$"):
grads = gradients_impl.gradients(y, [self.u, self.v])
self.assertEqual(2, len(grads))
u_grad = grads[0]
v_grad = grads[1]
self.sess.run(variables.global_variables_initializer())
self.assertAllClose(5.0, self.sess.run(y))
self.assertAllClose(3.0, self.sess.run(u_grad))
self.assertAllClose(2.0, self.sess.run(v_grad))
w_grad = grad_debugger.gradient_tensor(self.w)
self.assertIsInstance(w_grad, tensor.Tensor)
self.assertAllClose(1.0, self.sess.run(w_grad))
w_grad = grad_debugger.gradient_tensor("w:0")
self.assertIsInstance(w_grad, tensor.Tensor)
self.assertAllClose(1.0, self.sess.run(w_grad))
def testWatchGradientsByXTensorNamesWorksWithoutContextManager(self):
y = math_ops.add(self.w, -1.0, name="y")
# The constructrion of the forward graph has completed.
# But we can still get the gradient tensors by using
# watch_gradients_by_tensor_names().
grad_debugger = debug_gradients.GradientsDebugger()
grad_debugger.watch_gradients_by_tensor_names(self.sess.graph, "w:0$")
grads = gradients_impl.gradients(y, [self.u, self.v])
self.assertEqual(2, len(grads))
u_grad = grads[0]
v_grad = grads[1]
self.sess.run(variables.global_variables_initializer())
self.assertAllClose(5.0, self.sess.run(y))
self.assertAllClose(3.0, self.sess.run(u_grad))
self.assertAllClose(2.0, self.sess.run(v_grad))
w_grad = grad_debugger.gradient_tensor(self.w)
self.assertIsInstance(w_grad, tensor.Tensor)
self.assertAllClose(1.0, self.sess.run(w_grad))
w_grad = grad_debugger.gradient_tensor("w:0")
self.assertIsInstance(w_grad, tensor.Tensor)
self.assertAllClose(1.0, self.sess.run(w_grad))
def testWatchGradientsWorksOnRefTensor(self):
y = math_ops.add(self.w, -1.0, name="y")
grad_debugger = debug_gradients.GradientsDebugger()
with grad_debugger.watch_gradients_by_tensor_names(self.sess.graph, "u:0$"):
grads = gradients_impl.gradients(y, [self.u, self.v])
self.assertEqual(2, len(grads))
u_grad = grads[0]
v_grad = grads[1]
self.assertIs(u_grad, grad_debugger.gradient_tensor("u:0"))
self.sess.run(variables.global_variables_initializer())
self.assertAllClose(3.0, self.sess.run(u_grad))
self.assertAllClose(2.0, self.sess.run(v_grad))
self.assertAllClose(3.0, self.sess.run(
grad_debugger.gradient_tensor("u:0")))
def testWatchGradientsWorksOnMultipleTensors(self):
y = math_ops.add(self.w, -1.0, name="y")
grad_debugger = debug_gradients.GradientsDebugger()
with grad_debugger.watch_gradients_by_tensor_names(self.sess.graph,
"(u|w):0$"):
grads = gradients_impl.gradients(y, [self.u, self.v])
self.assertEqual(2, len(grads))
u_grad = grads[0]
self.assertEqual(2, len(grad_debugger.gradient_tensors()))
self.assertIs(u_grad, grad_debugger.gradient_tensor("u:0"))
self.assertIsInstance(grad_debugger.gradient_tensor("w:0"), tensor.Tensor)
self.sess.run(variables.global_variables_initializer())
self.assertAllClose(1.0, self.sess.run(
grad_debugger.gradient_tensor("w:0")))
self.assertAllClose(3.0, self.sess.run(
grad_debugger.gradient_tensor("u:0")))
def testWatchGradientsByXTensorsWorks(self):
y = math_ops.add(self.w, -1.0, name="foo/y")
z = math_ops.square(y, name="foo/z")
# The constructrion of the forward graph has completed.
# But we can still get the gradient tensors by using
# watch_gradients_by_x_tensors().
grad_debugger = debug_gradients.GradientsDebugger()
with grad_debugger.watch_gradients_by_tensors(self.sess.graph,
[self.w, self.u, y]):
gradient_descent.GradientDescentOptimizer(0.1).minimize(z)
self.assertEqual(3, len(grad_debugger.gradient_tensors()))
u_grad = grad_debugger.gradient_tensor(self.u)
w_grad = grad_debugger.gradient_tensor(self.w)
y_grad = grad_debugger.gradient_tensor(y)
self.sess.run(variables.global_variables_initializer())
self.assertAllClose(10.0, self.sess.run(y_grad))
self.assertAllClose(10.0, self.sess.run(w_grad))
self.assertAllClose(30.0, self.sess.run(u_grad))
def testWatchGradientsByTensorCanWorkOnMultipleLosses(self):
y = math_ops.add(self.w, -1.0, name="y")
z1 = math_ops.square(y, name="z1")
z2 = math_ops.sqrt(y, name="z2")
grad_debugger_1 = debug_gradients.GradientsDebugger()
with grad_debugger_1.watch_gradients_by_tensors(self.sess.graph, y):
gradient_descent.GradientDescentOptimizer(0.1).minimize(z1)
grad_debugger_2 = debug_gradients.GradientsDebugger()
with grad_debugger_2.watch_gradients_by_tensors(self.sess.graph, y):
gradient_descent.GradientDescentOptimizer(0.1).minimize(z2)
dz1_dy = grad_debugger_1.gradient_tensor(y)
dz2_dy = grad_debugger_2.gradient_tensor(y)
self.assertIsInstance(dz1_dy, tensor.Tensor)
self.assertIsInstance(dz2_dy, tensor.Tensor)
self.assertIsNot(dz1_dy, dz2_dy)
self.sess.run(variables.global_variables_initializer())
self.assertAllClose(5.0**2, self.sess.run(z1))
self.assertAllClose(5.0**0.5, self.sess.run(z2))
self.assertAllClose(2.0 * 5.0, self.sess.run(dz1_dy))
self.assertAllClose(0.5 * (5.0**-0.5), self.sess.run(dz2_dy))
def testGradientsValuesFromDumpWorks(self):
y = math_ops.add(self.w, -1.0, name="y")
z = math_ops.square(y, name="z")
grad_debugger = debug_gradients.GradientsDebugger()
with grad_debugger.watch_gradients_by_tensors(self.sess.graph,
[self.w, self.u, y]):
train_op = gradient_descent.GradientDescentOptimizer(0.1).minimize(z)
self.sess.run(variables.global_variables_initializer())
run_options = config_pb2.RunOptions(output_partition_graphs=True)
dump_dir = tempfile.mkdtemp()
debug_url = "file://" + dump_dir
debug_utils.watch_graph(run_options, self.sess.graph, debug_urls=debug_url)
run_metadata = config_pb2.RunMetadata()
self.assertAllClose(2.0, self.sess.run(self.u))
self.sess.run(train_op, options=run_options, run_metadata=run_metadata)
self.assertAllClose(-1.0, self.sess.run(self.u))
dump = debug_data.DebugDumpDir(
dump_dir, partition_graphs=run_metadata.partition_graphs)
dump.set_python_graph(self.sess.graph)
y_grad_values = debug_gradients.gradient_values_from_dump(
grad_debugger, y, dump)
self.assertEqual(1, len(y_grad_values))
self.assertAllClose(10.0, y_grad_values[0])
w_grad_values = debug_gradients.gradient_values_from_dump(
grad_debugger, self.w, dump)
self.assertEqual(1, len(w_grad_values))
self.assertAllClose(10.0, w_grad_values[0])
u_grad_values = debug_gradients.gradient_values_from_dump(
grad_debugger, self.u, dump)
self.assertEqual(1, len(u_grad_values))
self.assertAllClose(30.0, u_grad_values[0])
with self.assertRaisesRegex(
LookupError,
r"This GradientsDebugger has not received any gradient tensor for "
r"x-tensor v:0"):
debug_gradients.gradient_values_from_dump(grad_debugger, self.v, dump)
# Cleanup.
file_io.delete_recursively(dump_dir)
if __name__ == "__main__":
googletest.main()
| IdentifyGradientTest |
python | vyperlang__vyper | vyper/exceptions.py | {
"start": 11007,
"end": 11060
} | class ____(Exception):
"""Bad archive"""
| BadArchive |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/auto_materialize_rule_impls.py | {
"start": 26555,
"end": 29154
} | class ____(AutoMaterializeRule, NamedTuple("_SkipOnParentMissingRule", [])):
@property
def decision_type(self) -> AutoMaterializeDecisionType:
return AutoMaterializeDecisionType.SKIP
@property
def description(self) -> str:
return "waiting on upstream data to be present"
def evaluate_for_asset(
self,
context: "AutomationContext",
) -> "AutomationResult":
from dagster._core.definitions.declarative_automation.automation_condition import (
AutomationResult,
)
asset_partitions_by_evaluation_data = defaultdict(set)
# only need to evaluate net-new candidates and candidates whose parents have changed
subset_to_evaluate = (
context.legacy_context.candidates_not_evaluated_on_previous_tick_subset
| context.legacy_context.candidate_parent_has_or_will_update_subset
)
for candidate in subset_to_evaluate.asset_partitions:
missing_parent_asset_keys = set()
for (
parent
) in context.legacy_context.get_parents_that_will_not_be_materialized_on_current_tick(
asset_partition=candidate
):
# ignore missing or unexecutable assets, which will never have a materialization or
# observation
if not (
context.legacy_context.asset_graph.has(parent.asset_key)
and context.legacy_context.asset_graph.get(parent.asset_key).is_executable
):
continue
if not context.legacy_context.instance_queryer.asset_partition_has_materialization_or_observation(
parent
):
missing_parent_asset_keys.add(parent.asset_key)
if missing_parent_asset_keys:
asset_partitions_by_evaluation_data[
WaitingOnAssetsRuleEvaluationData(
frozenset(missing_parent_asset_keys)
).frozen_metadata
].add(candidate)
true_subset, subsets_with_metadata = (
context.legacy_context.add_evaluation_data_from_previous_tick(
asset_partitions_by_evaluation_data, ignore_subset=subset_to_evaluate
)
)
true_subset = context.asset_graph_view.legacy_get_asset_subset_from_valid_subset(
true_subset
)
return AutomationResult(context, true_subset, subsets_with_metadata=subsets_with_metadata)
@whitelist_for_serdes
| SkipOnParentMissingRule |
python | walkccc__LeetCode | solutions/3342. Find Minimum Time to Reach Last Room II/3342.py | {
"start": 0,
"end": 1078
} | class ____:
# Similar to 3341. Find Minimum Time to Reach Last Room I
def minTimeToReach(self, moveTime: list[list[int]]) -> int:
return self._dijkstra(moveTime,
(0, 0),
(len(moveTime) - 1, len(moveTime[0]) - 1))
def _dijkstra(
self,
moveTime: list[list[int]],
src: tuple[int, int],
dst: tuple[int, int]
) -> int:
DIRS = ((0, 1), (1, 0), (0, -1), (-1, 0))
m = len(moveTime)
n = len(moveTime[0])
dist = [[math.inf] * n for _ in range(m)]
dist[0][0] = 0
minHeap = [(0, src)] # (d, (ux, uy))
while minHeap:
d, u = heapq.heappop(minHeap)
if u == dst:
return d
i, j = u
if d > dist[i][j]:
continue
for dx, dy in DIRS:
x = i + dx
y = j + dy
if x < 0 or x == m or y < 0 or y == n:
continue
newDist = max(moveTime[x][y], d) + (i + j) % 2 + 1
if newDist < dist[x][y]:
dist[x][y] = newDist
heapq.heappush(minHeap, (newDist, (x, y)))
return -1
| Solution |
python | automl__auto-sklearn | autosklearn/pipeline/components/feature_preprocessing/select_percentile.py | {
"start": 0,
"end": 590
} | class ____(object):
def fit(self, X, y):
import sklearn.feature_selection
self.preprocessor = sklearn.feature_selection.SelectPercentile(
score_func=self.score_func, percentile=self.percentile
)
self.preprocessor.fit(X, y)
return self
def transform(self, X):
if self.preprocessor is None:
raise NotImplementedError()
Xt = self.preprocessor.transform(X)
if Xt.shape[1] == 0:
raise ValueError("%s removed all features." % self.__class__.__name__)
return Xt
| SelectPercentileBase |
python | tensorflow__tensorflow | tensorflow/lite/tools/optimize/debugging/python/debugger_test.py | {
"start": 4418,
"end": 16028
} | class ____(test_util.TensorFlowTestCase,
parameterized.TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.tf_model_root, cls.tf_model = _get_model()
cls.float_model = _convert_model(cls.tf_model_root, cls.tf_model)
cls.debug_model_float = _quantize_model(
cls.tf_model_root, cls.tf_model, _calibration_gen, quantized_io=False)
cls.debug_model_int8 = _quantize_model(
cls.tf_model_root, cls.tf_model, _calibration_gen, quantized_io=True)
@parameterized.named_parameters(
('float_io', False, False),
('quantized_io', True, False),
('float_io_from_converter', False, True),
('quantized_io_from_converter', True, True),
)
@test_util.run_v2_only
def test_layer_metrics(self, quantized_io, from_converter):
options = debugger.QuantizationDebugOptions(
layer_debug_metrics={'l1_norm': lambda diffs: np.mean(np.abs(diffs))})
if not from_converter:
if quantized_io:
debug_model = QuantizationDebuggerTest.debug_model_int8
else:
debug_model = QuantizationDebuggerTest.debug_model_float
quant_debugger = debugger.QuantizationDebugger(
quant_debug_model_content=debug_model,
debug_dataset=_calibration_gen,
debug_options=options)
else:
options.fully_quantize = quantized_io
quant_debugger = debugger.QuantizationDebugger(
converter=_quantize_converter(self.tf_model_root, self.tf_model,
_calibration_gen),
debug_dataset=_calibration_gen,
debug_options=options)
quant_debugger.run()
expected_quant_io_metrics = {
'num_elements': 9,
'stddev': 0.03850026,
'mean_error': 0.01673192,
'max_abs_error': 0.10039272,
'mean_squared_error': 0.0027558778,
'l1_norm': 0.023704167,
}
expected_float_io_metrics = {
'num_elements': 9,
'stddev': 0.050998904,
'mean_error': 0.007843441,
'max_abs_error': 0.105881885,
'mean_squared_error': 0.004357292,
'l1_norm': 0.035729896,
}
expected_metrics = (
expected_quant_io_metrics
if quantized_io else expected_float_io_metrics)
self.assertLen(quant_debugger.layer_statistics, 1)
actual_metrics = next(iter(quant_debugger.layer_statistics.values()))
self.assertCountEqual(expected_metrics.keys(), actual_metrics.keys())
for key, value in expected_metrics.items():
self.assertAlmostEqual(value, actual_metrics[key], places=5)
buffer = io.StringIO()
quant_debugger.layer_statistics_dump(buffer)
reader = csv.DictReader(buffer.getvalue().split())
actual_values = next(iter(reader))
expected_values = expected_metrics.copy()
expected_values.update({
'op_name': 'CONV_2D',
'tensor_idx': 7,
'scale': 0.15686275,
'zero_point': -128,
'tensor_name': r'Identity[1-9]?$'
})
for key, value in expected_values.items():
if isinstance(value, str):
self.assertIsNotNone(
re.match(value, actual_values[key]),
'String is different from expected string. Please fix test code if'
" it's being affected by graph manipulation changes.")
elif isinstance(value, list):
self.assertAlmostEqual(
value[0], float(actual_values[key][1:-1]), places=5)
else:
self.assertAlmostEqual(value, float(actual_values[key]), places=5)
@parameterized.named_parameters(
('float_io', False),
('quantized_io', True),
)
@test_util.run_v2_only
def test_model_metrics(self, quantized_io):
if quantized_io:
debug_model = QuantizationDebuggerTest.debug_model_int8
else:
debug_model = QuantizationDebuggerTest.debug_model_float
options = debugger.QuantizationDebugOptions(
model_debug_metrics={'stdev': lambda x, y: np.std(x[0] - y[0])})
quant_debugger = debugger.QuantizationDebugger(
quant_debug_model_content=debug_model,
float_model_content=QuantizationDebuggerTest.float_model,
debug_dataset=_calibration_gen,
debug_options=options)
quant_debugger.run()
expected_metrics = {'stdev': 0.050998904}
actual_metrics = quant_debugger.model_statistics
self.assertCountEqual(expected_metrics.keys(), actual_metrics.keys())
for key, value in expected_metrics.items():
self.assertAlmostEqual(value, actual_metrics[key], places=5)
@parameterized.named_parameters(
('float_io', False),
('quantized_io', True),
)
@test_util.run_v2_only
def test_layer_direct_compare_metrics(self, quantized_io):
def _corr(float_values, quant_values, scale, zero_point):
dequant_values = (quant_values.astype(np.int32) - zero_point) * scale
return np.corrcoef(float_values.flatten(), dequant_values.flatten())[0, 1]
if quantized_io:
debug_model = QuantizationDebuggerTest.debug_model_int8
else:
debug_model = QuantizationDebuggerTest.debug_model_float
options = debugger.QuantizationDebugOptions(
layer_direct_compare_metrics={'corr': _corr})
quant_debugger = debugger.QuantizationDebugger(
quant_debug_model_content=debug_model,
debug_dataset=_calibration_gen,
debug_options=options)
quant_debugger.run()
expected_metrics = {
'corr': 0.99999,
}
self.assertLen(quant_debugger.layer_statistics, 1)
actual_metrics = next(iter(quant_debugger.layer_statistics.values()))
for key, value in expected_metrics.items():
self.assertAlmostEqual(value, actual_metrics[key], places=4)
@test_util.run_v2_only
def test_wrong_input_raises_ValueError(self):
def wrong_calibration_gen():
for _ in range(5):
yield [
np.ones((1, 3, 3, 1), dtype=np.float32),
np.ones((1, 3, 3, 1), dtype=np.float32)
]
quant_debugger = debugger.QuantizationDebugger(
quant_debug_model_content=QuantizationDebuggerTest.debug_model_float,
debug_dataset=wrong_calibration_gen)
with self.assertRaisesRegex(
ValueError, r'inputs provided \(2\).+inputs to the model \(1\)'):
quant_debugger.run()
@test_util.run_v2_only
def test_non_debug_model_raises_ValueError(self):
normal_quant_model = _quantize_model(
QuantizationDebuggerTest.tf_model_root,
QuantizationDebuggerTest.tf_model,
_calibration_gen,
debug=False)
with self.assertRaisesRegex(
ValueError, 'Please check if the quantized model is in debug mode'):
debugger.QuantizationDebugger(
quant_debug_model_content=normal_quant_model,
debug_dataset=_calibration_gen)
@parameterized.named_parameters(
('empty quantization parameter', {
'quantization_parameters': {}
}, None),
('empty scales/zero points', {
'quantization_parameters': {
'scales': [],
'zero_points': []
}
}, None),
('invalid scales/zero points', {
'quantization_parameters': {
'scales': [1.0],
'zero_points': []
}
}, None),
('correct case', {
'quantization_parameters': {
'scales': [0.5, 1.0],
'zero_points': [42, 7]
}
}, (0.5, 42)),
)
def test_get_quant_params(self, tensor_detail, expected_value):
self.assertEqual(debugger._get_quant_params(tensor_detail), expected_value)
@parameterized.named_parameters(
('float_io', False),
('quantized_io', True),
)
@test_util.run_v2_only
def test_denylisted_ops_from_option_setter(self, quantized_io):
options = debugger.QuantizationDebugOptions(
layer_debug_metrics={'l1_norm': lambda diffs: np.mean(np.abs(diffs))},
fully_quantize=quantized_io)
quant_debugger = debugger.QuantizationDebugger(
converter=_quantize_converter(self.tf_model_root, self.tf_model,
_calibration_gen),
debug_dataset=_calibration_gen,
debug_options=options)
options.denylisted_ops = ['CONV_2D']
# TODO(b/195084873): The exception is expected to check whether selective
# quantization was done properly, since after the selective quantization
# the model will have no quantized layers thus have no NumericVerify ops,
# resulted in this exception. Marked with a bug to fix this in more
# straightforward way.
with self.assertRaisesRegex(
ValueError, 'Please check if the quantized model is in debug mode'):
quant_debugger.options = options
@parameterized.named_parameters(
('float_io', False),
('quantized_io', True),
)
@test_util.run_v2_only
def test_denylisted_ops_from_option_constructor(self, quantized_io):
options = debugger.QuantizationDebugOptions(
layer_debug_metrics={'l1_norm': lambda diffs: np.mean(np.abs(diffs))},
fully_quantize=quantized_io,
denylisted_ops=['CONV_2D'])
# TODO(b/195084873): Count the number of NumericVerify op.
with self.assertRaisesRegex(
ValueError, 'Please check if the quantized model is in debug mode'):
_ = debugger.QuantizationDebugger(
converter=_quantize_converter(self.tf_model_root, self.tf_model,
_calibration_gen),
debug_dataset=_calibration_gen,
debug_options=options)
@parameterized.named_parameters(
('float_io', False),
('quantized_io', True),
)
@test_util.run_v2_only
def test_denylisted_nodes_from_option_setter(self, quantized_io):
options = debugger.QuantizationDebugOptions(
layer_debug_metrics={'l1_norm': lambda diffs: np.mean(np.abs(diffs))},
fully_quantize=quantized_io)
quant_debugger = debugger.QuantizationDebugger(
converter=_quantize_converter(self.tf_model_root, self.tf_model,
_calibration_gen),
debug_dataset=_calibration_gen,
debug_options=options)
options.denylisted_nodes = ['Identity']
# TODO(b/195084873): Count the number of NumericVerify op.
with self.assertRaisesRegex(
ValueError, 'Please check if the quantized model is in debug mode'):
quant_debugger.options = options
@parameterized.named_parameters(
('float_io', False),
('quantized_io', True),
)
@test_util.run_v2_only
def test_denylisted_nodes_from_option_constructor(self, quantized_io):
options = debugger.QuantizationDebugOptions(
layer_debug_metrics={'l1_norm': lambda diffs: np.mean(np.abs(diffs))},
fully_quantize=quantized_io,
denylisted_nodes=['Identity'])
# TODO(b/195084873): Count the number of NumericVerify op.
with self.assertRaisesRegex(
ValueError, 'Please check if the quantized model is in debug mode'):
_ = debugger.QuantizationDebugger(
converter=_quantize_converter(self.tf_model_root, self.tf_model,
_calibration_gen),
debug_dataset=_calibration_gen,
debug_options=options)
@mock.patch.object(metrics.TFLiteMetrics,
'increase_counter_debugger_creation')
def test_creation_counter(self, increase_call):
debug_model = QuantizationDebuggerTest.debug_model_float
debugger.QuantizationDebugger(
quant_debug_model_content=debug_model, debug_dataset=_calibration_gen)
increase_call.assert_called_once()
if __name__ == '__main__':
test.main()
| QuantizationDebuggerTest |
python | geekcomputers__Python | venv/Lib/site-packages/pip/_vendor/rich/console.py | {
"start": 17993,
"end": 99169
} | class ____:
"""A high level console interface.
Args:
color_system (str, optional): The color system supported by your terminal,
either ``"standard"``, ``"256"`` or ``"truecolor"``. Leave as ``"auto"`` to autodetect.
force_terminal (Optional[bool], optional): Enable/disable terminal control codes, or None to auto-detect terminal. Defaults to None.
force_jupyter (Optional[bool], optional): Enable/disable Jupyter rendering, or None to auto-detect Jupyter. Defaults to None.
force_interactive (Optional[bool], optional): Enable/disable interactive mode, or None to auto detect. Defaults to None.
soft_wrap (Optional[bool], optional): Set soft wrap default on print method. Defaults to False.
theme (Theme, optional): An optional style theme object, or ``None`` for default theme.
stderr (bool, optional): Use stderr rather than stdout if ``file`` is not specified. Defaults to False.
file (IO, optional): A file object where the console should write to. Defaults to stdout.
quiet (bool, Optional): Boolean to suppress all output. Defaults to False.
width (int, optional): The width of the terminal. Leave as default to auto-detect width.
height (int, optional): The height of the terminal. Leave as default to auto-detect height.
style (StyleType, optional): Style to apply to all output, or None for no style. Defaults to None.
no_color (Optional[bool], optional): Enabled no color mode, or None to auto detect. Defaults to None.
tab_size (int, optional): Number of spaces used to replace a tab character. Defaults to 8.
record (bool, optional): Boolean to enable recording of terminal output,
required to call :meth:`export_html`, :meth:`export_svg`, and :meth:`export_text`. Defaults to False.
markup (bool, optional): Boolean to enable :ref:`console_markup`. Defaults to True.
emoji (bool, optional): Enable emoji code. Defaults to True.
emoji_variant (str, optional): Optional emoji variant, either "text" or "emoji". Defaults to None.
highlight (bool, optional): Enable automatic highlighting. Defaults to True.
log_time (bool, optional): Boolean to enable logging of time by :meth:`log` methods. Defaults to True.
log_path (bool, optional): Boolean to enable the logging of the caller by :meth:`log`. Defaults to True.
log_time_format (Union[str, TimeFormatterCallable], optional): If ``log_time`` is enabled, either string for strftime or callable that formats the time. Defaults to "[%X] ".
highlighter (HighlighterType, optional): Default highlighter.
legacy_windows (bool, optional): Enable legacy Windows mode, or ``None`` to auto detect. Defaults to ``None``.
safe_box (bool, optional): Restrict box options that don't render on legacy Windows.
get_datetime (Callable[[], datetime], optional): Callable that gets the current time as a datetime.datetime object (used by Console.log),
or None for datetime.now.
get_time (Callable[[], time], optional): Callable that gets the current time in seconds, default uses time.monotonic.
"""
_environ: Mapping[str, str] = os.environ
def __init__(
self,
*,
color_system: Optional[
Literal["auto", "standard", "256", "truecolor", "windows"]
] = "auto",
force_terminal: Optional[bool] = None,
force_jupyter: Optional[bool] = None,
force_interactive: Optional[bool] = None,
soft_wrap: bool = False,
theme: Optional[Theme] = None,
stderr: bool = False,
file: Optional[IO[str]] = None,
quiet: bool = False,
width: Optional[int] = None,
height: Optional[int] = None,
style: Optional[StyleType] = None,
no_color: Optional[bool] = None,
tab_size: int = 8,
record: bool = False,
markup: bool = True,
emoji: bool = True,
emoji_variant: Optional[EmojiVariant] = None,
highlight: bool = True,
log_time: bool = True,
log_path: bool = True,
log_time_format: Union[str, FormatTimeCallable] = "[%X]",
highlighter: Optional["HighlighterType"] = ReprHighlighter(),
legacy_windows: Optional[bool] = None,
safe_box: bool = True,
get_datetime: Optional[Callable[[], datetime]] = None,
get_time: Optional[Callable[[], float]] = None,
_environ: Optional[Mapping[str, str]] = None,
):
# Copy of os.environ allows us to replace it for testing
if _environ is not None:
self._environ = _environ
self.is_jupyter = _is_jupyter() if force_jupyter is None else force_jupyter
if self.is_jupyter:
if width is None:
jupyter_columns = self._environ.get("JUPYTER_COLUMNS")
if jupyter_columns is not None and jupyter_columns.isdigit():
width = int(jupyter_columns)
else:
width = JUPYTER_DEFAULT_COLUMNS
if height is None:
jupyter_lines = self._environ.get("JUPYTER_LINES")
if jupyter_lines is not None and jupyter_lines.isdigit():
height = int(jupyter_lines)
else:
height = JUPYTER_DEFAULT_LINES
self.tab_size = tab_size
self.record = record
self._markup = markup
self._emoji = emoji
self._emoji_variant: Optional[EmojiVariant] = emoji_variant
self._highlight = highlight
self.legacy_windows: bool = (
(detect_legacy_windows() and not self.is_jupyter)
if legacy_windows is None
else legacy_windows
)
if width is None:
columns = self._environ.get("COLUMNS")
if columns is not None and columns.isdigit():
width = int(columns) - self.legacy_windows
if height is None:
lines = self._environ.get("LINES")
if lines is not None and lines.isdigit():
height = int(lines)
self.soft_wrap = soft_wrap
self._width = width
self._height = height
self._color_system: Optional[ColorSystem]
self._force_terminal = None
if force_terminal is not None:
self._force_terminal = force_terminal
self._file = file
self.quiet = quiet
self.stderr = stderr
if color_system is None:
self._color_system = None
elif color_system == "auto":
self._color_system = self._detect_color_system()
else:
self._color_system = COLOR_SYSTEMS[color_system]
self._lock = threading.RLock()
self._log_render = LogRender(
show_time=log_time,
show_path=log_path,
time_format=log_time_format,
)
self.highlighter: HighlighterType = highlighter or _null_highlighter
self.safe_box = safe_box
self.get_datetime = get_datetime or datetime.now
self.get_time = get_time or monotonic
self.style = style
self.no_color = (
no_color if no_color is not None else "NO_COLOR" in self._environ
)
self.is_interactive = (
(self.is_terminal and not self.is_dumb_terminal)
if force_interactive is None
else force_interactive
)
self._record_buffer_lock = threading.RLock()
self._thread_locals = ConsoleThreadLocals(
theme_stack=ThemeStack(themes.DEFAULT if theme is None else theme)
)
self._record_buffer: List[Segment] = []
self._render_hooks: List[RenderHook] = []
self._live: Optional["Live"] = None
self._is_alt_screen = False
def __repr__(self) -> str:
return f"<console width={self.width} {self._color_system!s}>"
@property
def file(self) -> IO[str]:
"""Get the file object to write to."""
file = self._file or (sys.stderr if self.stderr else sys.stdout)
file = getattr(file, "rich_proxied_file", file)
if file is None:
file = NULL_FILE
return file
@file.setter
def file(self, new_file: IO[str]) -> None:
"""Set a new file object."""
self._file = new_file
@property
def _buffer(self) -> List[Segment]:
"""Get a thread local buffer."""
return self._thread_locals.buffer
@property
def _buffer_index(self) -> int:
"""Get a thread local buffer."""
return self._thread_locals.buffer_index
@_buffer_index.setter
def _buffer_index(self, value: int) -> None:
self._thread_locals.buffer_index = value
@property
def _theme_stack(self) -> ThemeStack:
"""Get the thread local theme stack."""
return self._thread_locals.theme_stack
def _detect_color_system(self) -> Optional[ColorSystem]:
"""Detect color system from env vars."""
if self.is_jupyter:
return ColorSystem.TRUECOLOR
if not self.is_terminal or self.is_dumb_terminal:
return None
if WINDOWS: # pragma: no cover
if self.legacy_windows: # pragma: no cover
return ColorSystem.WINDOWS
windows_console_features = get_windows_console_features()
return (
ColorSystem.TRUECOLOR
if windows_console_features.truecolor
else ColorSystem.EIGHT_BIT
)
else:
color_term = self._environ.get("COLORTERM", "").strip().lower()
if color_term in ("truecolor", "24bit"):
return ColorSystem.TRUECOLOR
term = self._environ.get("TERM", "").strip().lower()
_term_name, _hyphen, colors = term.rpartition("-")
color_system = _TERM_COLORS.get(colors, ColorSystem.STANDARD)
return color_system
def _enter_buffer(self) -> None:
"""Enter in to a buffer context, and buffer all output."""
self._buffer_index += 1
def _exit_buffer(self) -> None:
"""Leave buffer context, and render content if required."""
self._buffer_index -= 1
self._check_buffer()
def set_live(self, live: "Live") -> None:
"""Set Live instance. Used by Live context manager.
Args:
live (Live): Live instance using this Console.
Raises:
errors.LiveError: If this Console has a Live context currently active.
"""
with self._lock:
if self._live is not None:
raise errors.LiveError("Only one live display may be active at once")
self._live = live
def clear_live(self) -> None:
"""Clear the Live instance."""
with self._lock:
self._live = None
def push_render_hook(self, hook: RenderHook) -> None:
"""Add a new render hook to the stack.
Args:
hook (RenderHook): Render hook instance.
"""
with self._lock:
self._render_hooks.append(hook)
def pop_render_hook(self) -> None:
"""Pop the last renderhook from the stack."""
with self._lock:
self._render_hooks.pop()
def __enter__(self) -> "Console":
"""Own context manager to enter buffer context."""
self._enter_buffer()
return self
def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
"""Exit buffer context."""
self._exit_buffer()
def begin_capture(self) -> None:
"""Begin capturing console output. Call :meth:`end_capture` to exit capture mode and return output."""
self._enter_buffer()
def end_capture(self) -> str:
"""End capture mode and return captured string.
Returns:
str: Console output.
"""
render_result = self._render_buffer(self._buffer)
del self._buffer[:]
self._exit_buffer()
return render_result
def push_theme(self, theme: Theme, *, inherit: bool = True) -> None:
"""Push a new theme on to the top of the stack, replacing the styles from the previous theme.
Generally speaking, you should call :meth:`~rich.console.Console.use_theme` to get a context manager, rather
than calling this method directly.
Args:
theme (Theme): A theme instance.
inherit (bool, optional): Inherit existing styles. Defaults to True.
"""
self._theme_stack.push_theme(theme, inherit=inherit)
def pop_theme(self) -> None:
"""Remove theme from top of stack, restoring previous theme."""
self._theme_stack.pop_theme()
def use_theme(self, theme: Theme, *, inherit: bool = True) -> ThemeContext:
"""Use a different theme for the duration of the context manager.
Args:
theme (Theme): Theme instance to user.
inherit (bool, optional): Inherit existing console styles. Defaults to True.
Returns:
ThemeContext: [description]
"""
return ThemeContext(self, theme, inherit)
@property
def color_system(self) -> Optional[str]:
"""Get color system string.
Returns:
Optional[str]: "standard", "256" or "truecolor".
"""
if self._color_system is not None:
return _COLOR_SYSTEMS_NAMES[self._color_system]
else:
return None
@property
def encoding(self) -> str:
"""Get the encoding of the console file, e.g. ``"utf-8"``.
Returns:
str: A standard encoding string.
"""
return (getattr(self.file, "encoding", "utf-8") or "utf-8").lower()
@property
def is_terminal(self) -> bool:
"""Check if the console is writing to a terminal.
Returns:
bool: True if the console writing to a device capable of
understanding terminal codes, otherwise False.
"""
if self._force_terminal is not None:
return self._force_terminal
if hasattr(sys.stdin, "__module__") and sys.stdin.__module__.startswith(
"idlelib"
):
# Return False for Idle which claims to be a tty but can't handle ansi codes
return False
if self.is_jupyter:
# return False for Jupyter, which may have FORCE_COLOR set
return False
# If FORCE_COLOR env var has any value at all, we assume a terminal.
force_color = self._environ.get("FORCE_COLOR")
if force_color is not None:
self._force_terminal = True
return True
isatty: Optional[Callable[[], bool]] = getattr(self.file, "isatty", None)
try:
return False if isatty is None else isatty()
except ValueError:
# in some situation (at the end of a pytest run for example) isatty() can raise
# ValueError: I/O operation on closed file
# return False because we aren't in a terminal anymore
return False
@property
def is_dumb_terminal(self) -> bool:
"""Detect dumb terminal.
Returns:
bool: True if writing to a dumb terminal, otherwise False.
"""
_term = self._environ.get("TERM", "")
is_dumb = _term.lower() in ("dumb", "unknown")
return self.is_terminal and is_dumb
@property
def options(self) -> ConsoleOptions:
"""Get default console options."""
return ConsoleOptions(
max_height=self.size.height,
size=self.size,
legacy_windows=self.legacy_windows,
min_width=1,
max_width=self.width,
encoding=self.encoding,
is_terminal=self.is_terminal,
)
@property
def size(self) -> ConsoleDimensions:
"""Get the size of the console.
Returns:
ConsoleDimensions: A named tuple containing the dimensions.
"""
if self._width is not None and self._height is not None:
return ConsoleDimensions(self._width - self.legacy_windows, self._height)
if self.is_dumb_terminal:
return ConsoleDimensions(80, 25)
width: Optional[int] = None
height: Optional[int] = None
if WINDOWS: # pragma: no cover
try:
width, height = os.get_terminal_size()
except (AttributeError, ValueError, OSError): # Probably not a terminal
pass
else:
for file_descriptor in _STD_STREAMS:
try:
width, height = os.get_terminal_size(file_descriptor)
except (AttributeError, ValueError, OSError):
pass
else:
break
columns = self._environ.get("COLUMNS")
if columns is not None and columns.isdigit():
width = int(columns)
lines = self._environ.get("LINES")
if lines is not None and lines.isdigit():
height = int(lines)
# get_terminal_size can report 0, 0 if run from pseudo-terminal
width = width or 80
height = height or 25
return ConsoleDimensions(
width - self.legacy_windows if self._width is None else self._width,
height if self._height is None else self._height,
)
@size.setter
def size(self, new_size: Tuple[int, int]) -> None:
"""Set a new size for the terminal.
Args:
new_size (Tuple[int, int]): New width and height.
"""
width, height = new_size
self._width = width
self._height = height
@property
def width(self) -> int:
"""Get the width of the console.
Returns:
int: The width (in characters) of the console.
"""
return self.size.width
@width.setter
def width(self, width: int) -> None:
"""Set width.
Args:
width (int): New width.
"""
self._width = width
@property
def height(self) -> int:
"""Get the height of the console.
Returns:
int: The height (in lines) of the console.
"""
return self.size.height
@height.setter
def height(self, height: int) -> None:
"""Set height.
Args:
height (int): new height.
"""
self._height = height
def bell(self) -> None:
"""Play a 'bell' sound (if supported by the terminal)."""
self.control(Control.bell())
def capture(self) -> Capture:
"""A context manager to *capture* the result of print() or log() in a string,
rather than writing it to the console.
Example:
>>> from rich.console import Console
>>> console = Console()
>>> with console.capture() as capture:
... console.print("[bold magenta]Hello World[/]")
>>> print(capture.get())
Returns:
Capture: Context manager with disables writing to the terminal.
"""
capture = Capture(self)
return capture
def pager(
self, pager: Optional[Pager] = None, styles: bool = False, links: bool = False
) -> PagerContext:
"""A context manager to display anything printed within a "pager". The pager application
is defined by the system and will typically support at least pressing a key to scroll.
Args:
pager (Pager, optional): A pager object, or None to use :class:`~rich.pager.SystemPager`. Defaults to None.
styles (bool, optional): Show styles in pager. Defaults to False.
links (bool, optional): Show links in pager. Defaults to False.
Example:
>>> from rich.console import Console
>>> from rich.__main__ import make_test_card
>>> console = Console()
>>> with console.pager():
console.print(make_test_card())
Returns:
PagerContext: A context manager.
"""
return PagerContext(self, pager=pager, styles=styles, links=links)
def line(self, count: int = 1) -> None:
"""Write new line(s).
Args:
count (int, optional): Number of new lines. Defaults to 1.
"""
assert count >= 0, "count must be >= 0"
self.print(NewLine(count))
def clear(self, home: bool = True) -> None:
"""Clear the screen.
Args:
home (bool, optional): Also move the cursor to 'home' position. Defaults to True.
"""
if home:
self.control(Control.clear(), Control.home())
else:
self.control(Control.clear())
def status(
self,
status: RenderableType,
*,
spinner: str = "dots",
spinner_style: StyleType = "status.spinner",
speed: float = 1.0,
refresh_per_second: float = 12.5,
) -> "Status":
"""Display a status and spinner.
Args:
status (RenderableType): A status renderable (str or Text typically).
spinner (str, optional): Name of spinner animation (see python -m rich.spinner). Defaults to "dots".
spinner_style (StyleType, optional): Style of spinner. Defaults to "status.spinner".
speed (float, optional): Speed factor for spinner animation. Defaults to 1.0.
refresh_per_second (float, optional): Number of refreshes per second. Defaults to 12.5.
Returns:
Status: A Status object that may be used as a context manager.
"""
from .status import Status
status_renderable = Status(
status,
console=self,
spinner=spinner,
spinner_style=spinner_style,
speed=speed,
refresh_per_second=refresh_per_second,
)
return status_renderable
def show_cursor(self, show: bool = True) -> bool:
"""Show or hide the cursor.
Args:
show (bool, optional): Set visibility of the cursor.
"""
if self.is_terminal:
self.control(Control.show_cursor(show))
return True
return False
def set_alt_screen(self, enable: bool = True) -> bool:
"""Enables alternative screen mode.
Note, if you enable this mode, you should ensure that is disabled before
the application exits. See :meth:`~rich.Console.screen` for a context manager
that handles this for you.
Args:
enable (bool, optional): Enable (True) or disable (False) alternate screen. Defaults to True.
Returns:
bool: True if the control codes were written.
"""
changed = False
if self.is_terminal and not self.legacy_windows:
self.control(Control.alt_screen(enable))
changed = True
self._is_alt_screen = enable
return changed
@property
def is_alt_screen(self) -> bool:
"""Check if the alt screen was enabled.
Returns:
bool: True if the alt screen was enabled, otherwise False.
"""
return self._is_alt_screen
def set_window_title(self, title: str) -> bool:
"""Set the title of the console terminal window.
Warning: There is no means within Rich of "resetting" the window title to its
previous value, meaning the title you set will persist even after your application
exits.
``fish`` shell resets the window title before and after each command by default,
negating this issue. Windows Terminal and command prompt will also reset the title for you.
Most other shells and terminals, however, do not do this.
Some terminals may require configuration changes before you can set the title.
Some terminals may not support setting the title at all.
Other software (including the terminal itself, the shell, custom prompts, plugins, etc.)
may also set the terminal window title. This could result in whatever value you write
using this method being overwritten.
Args:
title (str): The new title of the terminal window.
Returns:
bool: True if the control code to change the terminal title was
written, otherwise False. Note that a return value of True
does not guarantee that the window title has actually changed,
since the feature may be unsupported/disabled in some terminals.
"""
if self.is_terminal:
self.control(Control.title(title))
return True
return False
def screen(
self, hide_cursor: bool = True, style: Optional[StyleType] = None
) -> "ScreenContext":
"""Context manager to enable and disable 'alternative screen' mode.
Args:
hide_cursor (bool, optional): Also hide the cursor. Defaults to False.
style (Style, optional): Optional style for screen. Defaults to None.
Returns:
~ScreenContext: Context which enables alternate screen on enter, and disables it on exit.
"""
return ScreenContext(self, hide_cursor=hide_cursor, style=style or "")
def measure(
self, renderable: RenderableType, *, options: Optional[ConsoleOptions] = None
) -> Measurement:
"""Measure a renderable. Returns a :class:`~rich.measure.Measurement` object which contains
information regarding the number of characters required to print the renderable.
Args:
renderable (RenderableType): Any renderable or string.
options (Optional[ConsoleOptions], optional): Options to use when measuring, or None
to use default options. Defaults to None.
Returns:
Measurement: A measurement of the renderable.
"""
measurement = Measurement.get(self, options or self.options, renderable)
return measurement
def render(
self, renderable: RenderableType, options: Optional[ConsoleOptions] = None
) -> Iterable[Segment]:
"""Render an object in to an iterable of `Segment` instances.
This method contains the logic for rendering objects with the console protocol.
You are unlikely to need to use it directly, unless you are extending the library.
Args:
renderable (RenderableType): An object supporting the console protocol, or
an object that may be converted to a string.
options (ConsoleOptions, optional): An options object, or None to use self.options. Defaults to None.
Returns:
Iterable[Segment]: An iterable of segments that may be rendered.
"""
_options = options or self.options
if _options.max_width < 1:
# No space to render anything. This prevents potential recursion errors.
return
render_iterable: RenderResult
renderable = rich_cast(renderable)
if hasattr(renderable, "__rich_console__") and not isclass(renderable):
render_iterable = renderable.__rich_console__(self, _options) # type: ignore[union-attr]
elif isinstance(renderable, str):
text_renderable = self.render_str(
renderable, highlight=_options.highlight, markup=_options.markup
)
render_iterable = text_renderable.__rich_console__(self, _options)
else:
raise errors.NotRenderableError(
f"Unable to render {renderable!r}; "
"A str, Segment or object with __rich_console__ method is required"
)
try:
iter_render = iter(render_iterable)
except TypeError:
raise errors.NotRenderableError(
f"object {render_iterable!r} is not renderable"
)
_Segment = Segment
_options = _options.reset_height()
for render_output in iter_render:
if isinstance(render_output, _Segment):
yield render_output
else:
yield from self.render(render_output, _options)
def render_lines(
self,
renderable: RenderableType,
options: Optional[ConsoleOptions] = None,
*,
style: Optional[Style] = None,
pad: bool = True,
new_lines: bool = False,
) -> List[List[Segment]]:
"""Render objects in to a list of lines.
The output of render_lines is useful when further formatting of rendered console text
is required, such as the Panel class which draws a border around any renderable object.
Args:
renderable (RenderableType): Any object renderable in the console.
options (Optional[ConsoleOptions], optional): Console options, or None to use self.options. Default to ``None``.
style (Style, optional): Optional style to apply to renderables. Defaults to ``None``.
pad (bool, optional): Pad lines shorter than render width. Defaults to ``True``.
new_lines (bool, optional): Include "\n" characters at end of lines.
Returns:
List[List[Segment]]: A list of lines, where a line is a list of Segment objects.
"""
with self._lock:
render_options = options or self.options
_rendered = self.render(renderable, render_options)
if style:
_rendered = Segment.apply_style(_rendered, style)
render_height = render_options.height
if render_height is not None:
render_height = max(0, render_height)
lines = list(
islice(
Segment.split_and_crop_lines(
_rendered,
render_options.max_width,
include_new_lines=new_lines,
pad=pad,
style=style,
),
None,
render_height,
)
)
if render_options.height is not None:
extra_lines = render_options.height - len(lines)
if extra_lines > 0:
pad_line = [
[Segment(" " * render_options.max_width, style), Segment("\n")]
if new_lines
else [Segment(" " * render_options.max_width, style)]
]
lines.extend(pad_line * extra_lines)
return lines
def render_str(
self,
text: str,
*,
style: Union[str, Style] = "",
justify: Optional[JustifyMethod] = None,
overflow: Optional[OverflowMethod] = None,
emoji: Optional[bool] = None,
markup: Optional[bool] = None,
highlight: Optional[bool] = None,
highlighter: Optional[HighlighterType] = None,
) -> "Text":
"""Convert a string to a Text instance. This is called automatically if
you print or log a string.
Args:
text (str): Text to render.
style (Union[str, Style], optional): Style to apply to rendered text.
justify (str, optional): Justify method: "default", "left", "center", "full", or "right". Defaults to ``None``.
overflow (str, optional): Overflow method: "crop", "fold", or "ellipsis". Defaults to ``None``.
emoji (Optional[bool], optional): Enable emoji, or ``None`` to use Console default.
markup (Optional[bool], optional): Enable markup, or ``None`` to use Console default.
highlight (Optional[bool], optional): Enable highlighting, or ``None`` to use Console default.
highlighter (HighlighterType, optional): Optional highlighter to apply.
Returns:
ConsoleRenderable: Renderable object.
"""
emoji_enabled = emoji or (emoji is None and self._emoji)
markup_enabled = markup or (markup is None and self._markup)
highlight_enabled = highlight or (highlight is None and self._highlight)
if markup_enabled:
rich_text = render_markup(
text,
style=style,
emoji=emoji_enabled,
emoji_variant=self._emoji_variant,
)
rich_text.justify = justify
rich_text.overflow = overflow
else:
rich_text = Text(
_emoji_replace(text, default_variant=self._emoji_variant)
if emoji_enabled
else text,
justify=justify,
overflow=overflow,
style=style,
)
_highlighter = (highlighter or self.highlighter) if highlight_enabled else None
if _highlighter is not None:
highlight_text = _highlighter(str(rich_text))
highlight_text.copy_styles(rich_text)
return highlight_text
return rich_text
def get_style(
self, name: Union[str, Style], *, default: Optional[Union[Style, str]] = None
) -> Style:
"""Get a Style instance by its theme name or parse a definition.
Args:
name (str): The name of a style or a style definition.
Returns:
Style: A Style object.
Raises:
MissingStyle: If no style could be parsed from name.
"""
if isinstance(name, Style):
return name
try:
style = self._theme_stack.get(name)
if style is None:
style = Style.parse(name)
return style.copy() if style.link else style
except errors.StyleSyntaxError as error:
if default is not None:
return self.get_style(default)
raise errors.MissingStyle(
f"Failed to get style {name!r}; {error}"
) from None
def _collect_renderables(
self,
objects: Iterable[Any],
sep: str,
end: str,
*,
justify: Optional[JustifyMethod] = None,
emoji: Optional[bool] = None,
markup: Optional[bool] = None,
highlight: Optional[bool] = None,
) -> List[ConsoleRenderable]:
"""Combine a number of renderables and text into one renderable.
Args:
objects (Iterable[Any]): Anything that Rich can render.
sep (str): String to write between print data.
end (str): String to write at end of print data.
justify (str, optional): One of "left", "right", "center", or "full". Defaults to ``None``.
emoji (Optional[bool], optional): Enable emoji code, or ``None`` to use console default.
markup (Optional[bool], optional): Enable markup, or ``None`` to use console default.
highlight (Optional[bool], optional): Enable automatic highlighting, or ``None`` to use console default.
Returns:
List[ConsoleRenderable]: A list of things to render.
"""
renderables: List[ConsoleRenderable] = []
_append = renderables.append
text: List[Text] = []
append_text = text.append
append = _append
if justify in ("left", "center", "right"):
def align_append(renderable: RenderableType) -> None:
_append(Align(renderable, cast(AlignMethod, justify)))
append = align_append
_highlighter: HighlighterType = _null_highlighter
if highlight or (highlight is None and self._highlight):
_highlighter = self.highlighter
def check_text() -> None:
if text:
sep_text = Text(sep, justify=justify, end=end)
append(sep_text.join(text))
text.clear()
for renderable in objects:
renderable = rich_cast(renderable)
if isinstance(renderable, str):
append_text(
self.render_str(
renderable, emoji=emoji, markup=markup, highlighter=_highlighter
)
)
elif isinstance(renderable, Text):
append_text(renderable)
elif isinstance(renderable, ConsoleRenderable):
check_text()
append(renderable)
elif is_expandable(renderable):
check_text()
append(Pretty(renderable, highlighter=_highlighter))
else:
append_text(_highlighter(str(renderable)))
check_text()
if self.style is not None:
style = self.get_style(self.style)
renderables = [Styled(renderable, style) for renderable in renderables]
return renderables
def rule(
self,
title: TextType = "",
*,
characters: str = "─",
style: Union[str, Style] = "rule.line",
align: AlignMethod = "center",
) -> None:
"""Draw a line with optional centered title.
Args:
title (str, optional): Text to render over the rule. Defaults to "".
characters (str, optional): Character(s) to form the line. Defaults to "─".
style (str, optional): Style of line. Defaults to "rule.line".
align (str, optional): How to align the title, one of "left", "center", or "right". Defaults to "center".
"""
from .rule import Rule
rule = Rule(title=title, characters=characters, style=style, align=align)
self.print(rule)
def control(self, *control: Control) -> None:
"""Insert non-printing control codes.
Args:
control_codes (str): Control codes, such as those that may move the cursor.
"""
if not self.is_dumb_terminal:
with self:
self._buffer.extend(_control.segment for _control in control)
def out(
self,
*objects: Any,
sep: str = " ",
end: str = "\n",
style: Optional[Union[str, Style]] = None,
highlight: Optional[bool] = None,
) -> None:
"""Output to the terminal. This is a low-level way of writing to the terminal which unlike
:meth:`~rich.console.Console.print` won't pretty print, wrap text, or apply markup, but will
optionally apply highlighting and a basic style.
Args:
sep (str, optional): String to write between print data. Defaults to " ".
end (str, optional): String to write at end of print data. Defaults to "\\\\n".
style (Union[str, Style], optional): A style to apply to output. Defaults to None.
highlight (Optional[bool], optional): Enable automatic highlighting, or ``None`` to use
console default. Defaults to ``None``.
"""
raw_output: str = sep.join(str(_object) for _object in objects)
self.print(
raw_output,
style=style,
highlight=highlight,
emoji=False,
markup=False,
no_wrap=True,
overflow="ignore",
crop=False,
end=end,
)
def print(
self,
*objects: Any,
sep: str = " ",
end: str = "\n",
style: Optional[Union[str, Style]] = None,
justify: Optional[JustifyMethod] = None,
overflow: Optional[OverflowMethod] = None,
no_wrap: Optional[bool] = None,
emoji: Optional[bool] = None,
markup: Optional[bool] = None,
highlight: Optional[bool] = None,
width: Optional[int] = None,
height: Optional[int] = None,
crop: bool = True,
soft_wrap: Optional[bool] = None,
new_line_start: bool = False,
) -> None:
"""Print to the console.
Args:
objects (positional args): Objects to log to the terminal.
sep (str, optional): String to write between print data. Defaults to " ".
end (str, optional): String to write at end of print data. Defaults to "\\\\n".
style (Union[str, Style], optional): A style to apply to output. Defaults to None.
justify (str, optional): Justify method: "default", "left", "right", "center", or "full". Defaults to ``None``.
overflow (str, optional): Overflow method: "ignore", "crop", "fold", or "ellipsis". Defaults to None.
no_wrap (Optional[bool], optional): Disable word wrapping. Defaults to None.
emoji (Optional[bool], optional): Enable emoji code, or ``None`` to use console default. Defaults to ``None``.
markup (Optional[bool], optional): Enable markup, or ``None`` to use console default. Defaults to ``None``.
highlight (Optional[bool], optional): Enable automatic highlighting, or ``None`` to use console default. Defaults to ``None``.
width (Optional[int], optional): Width of output, or ``None`` to auto-detect. Defaults to ``None``.
crop (Optional[bool], optional): Crop output to width of terminal. Defaults to True.
soft_wrap (bool, optional): Enable soft wrap mode which disables word wrapping and cropping of text or ``None`` for
Console default. Defaults to ``None``.
new_line_start (bool, False): Insert a new line at the start if the output contains more than one line. Defaults to ``False``.
"""
if not objects:
objects = (NewLine(),)
if soft_wrap is None:
soft_wrap = self.soft_wrap
if soft_wrap:
if no_wrap is None:
no_wrap = True
if overflow is None:
overflow = "ignore"
crop = False
render_hooks = self._render_hooks[:]
with self:
renderables = self._collect_renderables(
objects,
sep,
end,
justify=justify,
emoji=emoji,
markup=markup,
highlight=highlight,
)
for hook in render_hooks:
renderables = hook.process_renderables(renderables)
render_options = self.options.update(
justify=justify,
overflow=overflow,
width=min(width, self.width) if width is not None else NO_CHANGE,
height=height,
no_wrap=no_wrap,
markup=markup,
highlight=highlight,
)
new_segments: List[Segment] = []
extend = new_segments.extend
render = self.render
if style is None:
for renderable in renderables:
extend(render(renderable, render_options))
else:
for renderable in renderables:
extend(
Segment.apply_style(
render(renderable, render_options), self.get_style(style)
)
)
if new_line_start:
if (
len("".join(segment.text for segment in new_segments).splitlines())
> 1
):
new_segments.insert(0, Segment.line())
if crop:
buffer_extend = self._buffer.extend
for line in Segment.split_and_crop_lines(
new_segments, self.width, pad=False
):
buffer_extend(line)
else:
self._buffer.extend(new_segments)
def print_json(
self,
json: Optional[str] = None,
*,
data: Any = None,
indent: Union[None, int, str] = 2,
highlight: bool = True,
skip_keys: bool = False,
ensure_ascii: bool = False,
check_circular: bool = True,
allow_nan: bool = True,
default: Optional[Callable[[Any], Any]] = None,
sort_keys: bool = False,
) -> None:
"""Pretty prints JSON. Output will be valid JSON.
Args:
json (Optional[str]): A string containing JSON.
data (Any): If json is not supplied, then encode this data.
indent (Union[None, int, str], optional): Number of spaces to indent. Defaults to 2.
highlight (bool, optional): Enable highlighting of output: Defaults to True.
skip_keys (bool, optional): Skip keys not of a basic type. Defaults to False.
ensure_ascii (bool, optional): Escape all non-ascii characters. Defaults to False.
check_circular (bool, optional): Check for circular references. Defaults to True.
allow_nan (bool, optional): Allow NaN and Infinity values. Defaults to True.
default (Callable, optional): A callable that converts values that can not be encoded
in to something that can be JSON encoded. Defaults to None.
sort_keys (bool, optional): Sort dictionary keys. Defaults to False.
"""
from pip._vendor.rich.json import JSON
if json is None:
json_renderable = JSON.from_data(
data,
indent=indent,
highlight=highlight,
skip_keys=skip_keys,
ensure_ascii=ensure_ascii,
check_circular=check_circular,
allow_nan=allow_nan,
default=default,
sort_keys=sort_keys,
)
else:
if not isinstance(json, str):
raise TypeError(
f"json must be str. Did you mean print_json(data={json!r}) ?"
)
json_renderable = JSON(
json,
indent=indent,
highlight=highlight,
skip_keys=skip_keys,
ensure_ascii=ensure_ascii,
check_circular=check_circular,
allow_nan=allow_nan,
default=default,
sort_keys=sort_keys,
)
self.print(json_renderable, soft_wrap=True)
def update_screen(
self,
renderable: RenderableType,
*,
region: Optional[Region] = None,
options: Optional[ConsoleOptions] = None,
) -> None:
"""Update the screen at a given offset.
Args:
renderable (RenderableType): A Rich renderable.
region (Region, optional): Region of screen to update, or None for entire screen. Defaults to None.
x (int, optional): x offset. Defaults to 0.
y (int, optional): y offset. Defaults to 0.
Raises:
errors.NoAltScreen: If the Console isn't in alt screen mode.
"""
if not self.is_alt_screen:
raise errors.NoAltScreen("Alt screen must be enabled to call update_screen")
render_options = options or self.options
if region is None:
x = y = 0
render_options = render_options.update_dimensions(
render_options.max_width, render_options.height or self.height
)
else:
x, y, width, height = region
render_options = render_options.update_dimensions(width, height)
lines = self.render_lines(renderable, options=render_options)
self.update_screen_lines(lines, x, y)
def update_screen_lines(
self, lines: List[List[Segment]], x: int = 0, y: int = 0
) -> None:
"""Update lines of the screen at a given offset.
Args:
lines (List[List[Segment]]): Rendered lines (as produced by :meth:`~rich.Console.render_lines`).
x (int, optional): x offset (column no). Defaults to 0.
y (int, optional): y offset (column no). Defaults to 0.
Raises:
errors.NoAltScreen: If the Console isn't in alt screen mode.
"""
if not self.is_alt_screen:
raise errors.NoAltScreen("Alt screen must be enabled to call update_screen")
screen_update = ScreenUpdate(lines, x, y)
segments = self.render(screen_update)
self._buffer.extend(segments)
self._check_buffer()
def print_exception(
self,
*,
width: Optional[int] = 100,
extra_lines: int = 3,
theme: Optional[str] = None,
word_wrap: bool = False,
show_locals: bool = False,
suppress: Iterable[Union[str, ModuleType]] = (),
max_frames: int = 100,
) -> None:
"""Prints a rich render of the last exception and traceback.
Args:
width (Optional[int], optional): Number of characters used to render code. Defaults to 100.
extra_lines (int, optional): Additional lines of code to render. Defaults to 3.
theme (str, optional): Override pygments theme used in traceback
word_wrap (bool, optional): Enable word wrapping of long lines. Defaults to False.
show_locals (bool, optional): Enable display of local variables. Defaults to False.
suppress (Iterable[Union[str, ModuleType]]): Optional sequence of modules or paths to exclude from traceback.
max_frames (int): Maximum number of frames to show in a traceback, 0 for no maximum. Defaults to 100.
"""
from .traceback import Traceback
traceback = Traceback(
width=width,
extra_lines=extra_lines,
theme=theme,
word_wrap=word_wrap,
show_locals=show_locals,
suppress=suppress,
max_frames=max_frames,
)
self.print(traceback)
@staticmethod
def _caller_frame_info(
offset: int,
currentframe: Callable[[], Optional[FrameType]] = inspect.currentframe,
) -> Tuple[str, int, Dict[str, Any]]:
"""Get caller frame information.
Args:
offset (int): the caller offset within the current frame stack.
currentframe (Callable[[], Optional[FrameType]], optional): the callable to use to
retrieve the current frame. Defaults to ``inspect.currentframe``.
Returns:
Tuple[str, int, Dict[str, Any]]: A tuple containing the filename, the line number and
the dictionary of local variables associated with the caller frame.
Raises:
RuntimeError: If the stack offset is invalid.
"""
# Ignore the frame of this local helper
offset += 1
frame = currentframe()
if frame is not None:
# Use the faster currentframe where implemented
while offset and frame is not None:
frame = frame.f_back
offset -= 1
assert frame is not None
return frame.f_code.co_filename, frame.f_lineno, frame.f_locals
else:
# Fallback to the slower stack
frame_info = inspect.stack()[offset]
return frame_info.filename, frame_info.lineno, frame_info.frame.f_locals
def log(
self,
*objects: Any,
sep: str = " ",
end: str = "\n",
style: Optional[Union[str, Style]] = None,
justify: Optional[JustifyMethod] = None,
emoji: Optional[bool] = None,
markup: Optional[bool] = None,
highlight: Optional[bool] = None,
log_locals: bool = False,
_stack_offset: int = 1,
) -> None:
"""Log rich content to the terminal.
Args:
objects (positional args): Objects to log to the terminal.
sep (str, optional): String to write between print data. Defaults to " ".
end (str, optional): String to write at end of print data. Defaults to "\\\\n".
style (Union[str, Style], optional): A style to apply to output. Defaults to None.
justify (str, optional): One of "left", "right", "center", or "full". Defaults to ``None``.
emoji (Optional[bool], optional): Enable emoji code, or ``None`` to use console default. Defaults to None.
markup (Optional[bool], optional): Enable markup, or ``None`` to use console default. Defaults to None.
highlight (Optional[bool], optional): Enable automatic highlighting, or ``None`` to use console default. Defaults to None.
log_locals (bool, optional): Boolean to enable logging of locals where ``log()``
was called. Defaults to False.
_stack_offset (int, optional): Offset of caller from end of call stack. Defaults to 1.
"""
if not objects:
objects = (NewLine(),)
render_hooks = self._render_hooks[:]
with self:
renderables = self._collect_renderables(
objects,
sep,
end,
justify=justify,
emoji=emoji,
markup=markup,
highlight=highlight,
)
if style is not None:
renderables = [Styled(renderable, style) for renderable in renderables]
filename, line_no, locals = self._caller_frame_info(_stack_offset)
link_path = None if filename.startswith("<") else os.path.abspath(filename)
path = filename.rpartition(os.sep)[-1]
if log_locals:
locals_map = {
key: value
for key, value in locals.items()
if not key.startswith("__")
}
renderables.append(render_scope(locals_map, title="[i]locals"))
renderables = [
self._log_render(
self,
renderables,
log_time=self.get_datetime(),
path=path,
line_no=line_no,
link_path=link_path,
)
]
for hook in render_hooks:
renderables = hook.process_renderables(renderables)
new_segments: List[Segment] = []
extend = new_segments.extend
render = self.render
render_options = self.options
for renderable in renderables:
extend(render(renderable, render_options))
buffer_extend = self._buffer.extend
for line in Segment.split_and_crop_lines(
new_segments, self.width, pad=False
):
buffer_extend(line)
def _check_buffer(self) -> None:
"""Check if the buffer may be rendered. Render it if it can (e.g. Console.quiet is False)
Rendering is supported on Windows, Unix and Jupyter environments. For
legacy Windows consoles, the win32 API is called directly.
This method will also record what it renders if recording is enabled via Console.record.
"""
if self.quiet:
del self._buffer[:]
return
with self._lock:
if self.record:
with self._record_buffer_lock:
self._record_buffer.extend(self._buffer[:])
if self._buffer_index == 0:
if self.is_jupyter: # pragma: no cover
from .jupyter import display
display(self._buffer, self._render_buffer(self._buffer[:]))
del self._buffer[:]
else:
if WINDOWS:
use_legacy_windows_render = False
if self.legacy_windows:
fileno = get_fileno(self.file)
if fileno is not None:
use_legacy_windows_render = (
fileno in _STD_STREAMS_OUTPUT
)
if use_legacy_windows_render:
from pip._vendor.rich._win32_console import LegacyWindowsTerm
from pip._vendor.rich._windows_renderer import legacy_windows_render
buffer = self._buffer[:]
if self.no_color and self._color_system:
buffer = list(Segment.remove_color(buffer))
legacy_windows_render(buffer, LegacyWindowsTerm(self.file))
else:
# Either a non-std stream on legacy Windows, or modern Windows.
text = self._render_buffer(self._buffer[:])
# https://bugs.python.org/issue37871
# https://github.com/python/cpython/issues/82052
# We need to avoid writing more than 32Kb in a single write, due to the above bug
write = self.file.write
# Worse case scenario, every character is 4 bytes of utf-8
MAX_WRITE = 32 * 1024 // 4
try:
if len(text) <= MAX_WRITE:
write(text)
else:
batch: List[str] = []
batch_append = batch.append
size = 0
for line in text.splitlines(True):
if size + len(line) > MAX_WRITE and batch:
write("".join(batch))
batch.clear()
size = 0
batch_append(line)
size += len(line)
if batch:
write("".join(batch))
batch.clear()
except UnicodeEncodeError as error:
error.reason = f"{error.reason}\n*** You may need to add PYTHONIOENCODING=utf-8 to your environment ***"
raise
else:
text = self._render_buffer(self._buffer[:])
try:
self.file.write(text)
except UnicodeEncodeError as error:
error.reason = f"{error.reason}\n*** You may need to add PYTHONIOENCODING=utf-8 to your environment ***"
raise
self.file.flush()
del self._buffer[:]
def _render_buffer(self, buffer: Iterable[Segment]) -> str:
"""Render buffered output, and clear buffer."""
output: List[str] = []
append = output.append
color_system = self._color_system
legacy_windows = self.legacy_windows
not_terminal = not self.is_terminal
if self.no_color and color_system:
buffer = Segment.remove_color(buffer)
for text, style, control in buffer:
if style:
append(
style.render(
text,
color_system=color_system,
legacy_windows=legacy_windows,
)
)
elif not (not_terminal and control):
append(text)
rendered = "".join(output)
return rendered
def input(
self,
prompt: TextType = "",
*,
markup: bool = True,
emoji: bool = True,
password: bool = False,
stream: Optional[TextIO] = None,
) -> str:
"""Displays a prompt and waits for input from the user. The prompt may contain color / style.
It works in the same way as Python's builtin :func:`input` function and provides elaborate line editing and history features if Python's builtin :mod:`readline` module is previously loaded.
Args:
prompt (Union[str, Text]): Text to render in the prompt.
markup (bool, optional): Enable console markup (requires a str prompt). Defaults to True.
emoji (bool, optional): Enable emoji (requires a str prompt). Defaults to True.
password: (bool, optional): Hide typed text. Defaults to False.
stream: (TextIO, optional): Optional file to read input from (rather than stdin). Defaults to None.
Returns:
str: Text read from stdin.
"""
if prompt:
self.print(prompt, markup=markup, emoji=emoji, end="")
if password:
result = getpass("", stream=stream)
else:
if stream:
result = stream.readline()
else:
result = input()
return result
def export_text(self, *, clear: bool = True, styles: bool = False) -> str:
"""Generate text from console contents (requires record=True argument in constructor).
Args:
clear (bool, optional): Clear record buffer after exporting. Defaults to ``True``.
styles (bool, optional): If ``True``, ansi escape codes will be included. ``False`` for plain text.
Defaults to ``False``.
Returns:
str: String containing console contents.
"""
assert (
self.record
), "To export console contents set record=True in the constructor or instance"
with self._record_buffer_lock:
if styles:
text = "".join(
(style.render(text) if style else text)
for text, style, _ in self._record_buffer
)
else:
text = "".join(
segment.text
for segment in self._record_buffer
if not segment.control
)
if clear:
del self._record_buffer[:]
return text
def save_text(self, path: str, *, clear: bool = True, styles: bool = False) -> None:
"""Generate text from console and save to a given location (requires record=True argument in constructor).
Args:
path (str): Path to write text files.
clear (bool, optional): Clear record buffer after exporting. Defaults to ``True``.
styles (bool, optional): If ``True``, ansi style codes will be included. ``False`` for plain text.
Defaults to ``False``.
"""
text = self.export_text(clear=clear, styles=styles)
with open(path, "wt", encoding="utf-8") as write_file:
write_file.write(text)
def export_html(
self,
*,
theme: Optional[TerminalTheme] = None,
clear: bool = True,
code_format: Optional[str] = None,
inline_styles: bool = False,
) -> str:
"""Generate HTML from console contents (requires record=True argument in constructor).
Args:
theme (TerminalTheme, optional): TerminalTheme object containing console colors.
clear (bool, optional): Clear record buffer after exporting. Defaults to ``True``.
code_format (str, optional): Format string to render HTML. In addition to '{foreground}',
'{background}', and '{code}', should contain '{stylesheet}' if inline_styles is ``False``.
inline_styles (bool, optional): If ``True`` styles will be inlined in to spans, which makes files
larger but easier to cut and paste markup. If ``False``, styles will be embedded in a style tag.
Defaults to False.
Returns:
str: String containing console contents as HTML.
"""
assert (
self.record
), "To export console contents set record=True in the constructor or instance"
fragments: List[str] = []
append = fragments.append
_theme = theme or DEFAULT_TERMINAL_THEME
stylesheet = ""
render_code_format = CONSOLE_HTML_FORMAT if code_format is None else code_format
with self._record_buffer_lock:
if inline_styles:
for text, style, _ in Segment.filter_control(
Segment.simplify(self._record_buffer)
):
text = escape(text)
if style:
rule = style.get_html_style(_theme)
if style.link:
text = f'<a href="{style.link}">{text}</a>'
text = f'<span style="{rule}">{text}</span>' if rule else text
append(text)
else:
styles: Dict[str, int] = {}
for text, style, _ in Segment.filter_control(
Segment.simplify(self._record_buffer)
):
text = escape(text)
if style:
rule = style.get_html_style(_theme)
style_number = styles.setdefault(rule, len(styles) + 1)
if style.link:
text = f'<a class="r{style_number}" href="{style.link}">{text}</a>'
else:
text = f'<span class="r{style_number}">{text}</span>'
append(text)
stylesheet_rules: List[str] = []
stylesheet_append = stylesheet_rules.append
for style_rule, style_number in styles.items():
if style_rule:
stylesheet_append(f".r{style_number} {{{style_rule}}}")
stylesheet = "\n".join(stylesheet_rules)
rendered_code = render_code_format.format(
code="".join(fragments),
stylesheet=stylesheet,
foreground=_theme.foreground_color.hex,
background=_theme.background_color.hex,
)
if clear:
del self._record_buffer[:]
return rendered_code
def save_html(
self,
path: str,
*,
theme: Optional[TerminalTheme] = None,
clear: bool = True,
code_format: str = CONSOLE_HTML_FORMAT,
inline_styles: bool = False,
) -> None:
"""Generate HTML from console contents and write to a file (requires record=True argument in constructor).
Args:
path (str): Path to write html file.
theme (TerminalTheme, optional): TerminalTheme object containing console colors.
clear (bool, optional): Clear record buffer after exporting. Defaults to ``True``.
code_format (str, optional): Format string to render HTML. In addition to '{foreground}',
'{background}', and '{code}', should contain '{stylesheet}' if inline_styles is ``False``.
inline_styles (bool, optional): If ``True`` styles will be inlined in to spans, which makes files
larger but easier to cut and paste markup. If ``False``, styles will be embedded in a style tag.
Defaults to False.
"""
html = self.export_html(
theme=theme,
clear=clear,
code_format=code_format,
inline_styles=inline_styles,
)
with open(path, "wt", encoding="utf-8") as write_file:
write_file.write(html)
def export_svg(
self,
*,
title: str = "Rich",
theme: Optional[TerminalTheme] = None,
clear: bool = True,
code_format: str = CONSOLE_SVG_FORMAT,
font_aspect_ratio: float = 0.61,
unique_id: Optional[str] = None,
) -> str:
"""
Generate an SVG from the console contents (requires record=True in Console constructor).
Args:
title (str, optional): The title of the tab in the output image
theme (TerminalTheme, optional): The ``TerminalTheme`` object to use to style the terminal
clear (bool, optional): Clear record buffer after exporting. Defaults to ``True``
code_format (str, optional): Format string used to generate the SVG. Rich will inject a number of variables
into the string in order to form the final SVG output. The default template used and the variables
injected by Rich can be found by inspecting the ``console.CONSOLE_SVG_FORMAT`` variable.
font_aspect_ratio (float, optional): The width to height ratio of the font used in the ``code_format``
string. Defaults to 0.61, which is the width to height ratio of Fira Code (the default font).
If you aren't specifying a different font inside ``code_format``, you probably don't need this.
unique_id (str, optional): unique id that is used as the prefix for various elements (CSS styles, node
ids). If not set, this defaults to a computed value based on the recorded content.
"""
from pip._vendor.rich.cells import cell_len
style_cache: Dict[Style, str] = {}
def get_svg_style(style: Style) -> str:
"""Convert a Style to CSS rules for SVG."""
if style in style_cache:
return style_cache[style]
css_rules = []
color = (
_theme.foreground_color
if (style.color is None or style.color.is_default)
else style.color.get_truecolor(_theme)
)
bgcolor = (
_theme.background_color
if (style.bgcolor is None or style.bgcolor.is_default)
else style.bgcolor.get_truecolor(_theme)
)
if style.reverse:
color, bgcolor = bgcolor, color
if style.dim:
color = blend_rgb(color, bgcolor, 0.4)
css_rules.append(f"fill: {color.hex}")
if style.bold:
css_rules.append("font-weight: bold")
if style.italic:
css_rules.append("font-style: italic;")
if style.underline:
css_rules.append("text-decoration: underline;")
if style.strike:
css_rules.append("text-decoration: line-through;")
css = ";".join(css_rules)
style_cache[style] = css
return css
_theme = theme or SVG_EXPORT_THEME
width = self.width
char_height = 20
char_width = char_height * font_aspect_ratio
line_height = char_height * 1.22
margin_top = 1
margin_right = 1
margin_bottom = 1
margin_left = 1
padding_top = 40
padding_right = 8
padding_bottom = 8
padding_left = 8
padding_width = padding_left + padding_right
padding_height = padding_top + padding_bottom
margin_width = margin_left + margin_right
margin_height = margin_top + margin_bottom
text_backgrounds: List[str] = []
text_group: List[str] = []
classes: Dict[str, int] = {}
style_no = 1
def escape_text(text: str) -> str:
"""HTML escape text and replace spaces with nbsp."""
return escape(text).replace(" ", " ")
def make_tag(
name: str, content: Optional[str] = None, **attribs: object
) -> str:
"""Make a tag from name, content, and attributes."""
def stringify(value: object) -> str:
if isinstance(value, (float)):
return format(value, "g")
return str(value)
tag_attribs = " ".join(
f'{k.lstrip("_").replace("_", "-")}="{stringify(v)}"'
for k, v in attribs.items()
)
return (
f"<{name} {tag_attribs}>{content}</{name}>"
if content
else f"<{name} {tag_attribs}/>"
)
with self._record_buffer_lock:
segments = list(Segment.filter_control(self._record_buffer))
if clear:
self._record_buffer.clear()
if unique_id is None:
unique_id = "terminal-" + str(
zlib.adler32(
("".join(repr(segment) for segment in segments)).encode(
"utf-8",
"ignore",
)
+ title.encode("utf-8", "ignore")
)
)
y = 0
for y, line in enumerate(Segment.split_and_crop_lines(segments, length=width)):
x = 0
for text, style, _control in line:
style = style or Style()
rules = get_svg_style(style)
if rules not in classes:
classes[rules] = style_no
style_no += 1
class_name = f"r{classes[rules]}"
if style.reverse:
has_background = True
background = (
_theme.foreground_color.hex
if style.color is None
else style.color.get_truecolor(_theme).hex
)
else:
bgcolor = style.bgcolor
has_background = bgcolor is not None and not bgcolor.is_default
background = (
_theme.background_color.hex
if style.bgcolor is None
else style.bgcolor.get_truecolor(_theme).hex
)
text_length = cell_len(text)
if has_background:
text_backgrounds.append(
make_tag(
"rect",
fill=background,
x=x * char_width,
y=y * line_height + 1.5,
width=char_width * text_length,
height=line_height + 0.25,
shape_rendering="crispEdges",
)
)
if text != " " * len(text):
text_group.append(
make_tag(
"text",
escape_text(text),
_class=f"{unique_id}-{class_name}",
x=x * char_width,
y=y * line_height + char_height,
textLength=char_width * len(text),
clip_path=f"url(#{unique_id}-line-{y})",
)
)
x += cell_len(text)
line_offsets = [line_no * line_height + 1.5 for line_no in range(y)]
lines = "\n".join(
f"""<clipPath id="{unique_id}-line-{line_no}">
{make_tag("rect", x=0, y=offset, width=char_width * width, height=line_height + 0.25)}
</clipPath>"""
for line_no, offset in enumerate(line_offsets)
)
styles = "\n".join(
f".{unique_id}-r{rule_no} {{ {css} }}" for css, rule_no in classes.items()
)
backgrounds = "".join(text_backgrounds)
matrix = "".join(text_group)
terminal_width = ceil(width * char_width + padding_width)
terminal_height = (y + 1) * line_height + padding_height
chrome = make_tag(
"rect",
fill=_theme.background_color.hex,
stroke="rgba(255,255,255,0.35)",
stroke_width="1",
x=margin_left,
y=margin_top,
width=terminal_width,
height=terminal_height,
rx=8,
)
title_color = _theme.foreground_color.hex
if title:
chrome += make_tag(
"text",
escape_text(title),
_class=f"{unique_id}-title",
fill=title_color,
text_anchor="middle",
x=terminal_width // 2,
y=margin_top + char_height + 6,
)
chrome += f"""
<g transform="translate(26,22)">
<circle cx="0" cy="0" r="7" fill="#ff5f57"/>
<circle cx="22" cy="0" r="7" fill="#febc2e"/>
<circle cx="44" cy="0" r="7" fill="#28c840"/>
</g>
"""
svg = code_format.format(
unique_id=unique_id,
char_width=char_width,
char_height=char_height,
line_height=line_height,
terminal_width=char_width * width - 1,
terminal_height=(y + 1) * line_height - 1,
width=terminal_width + margin_width,
height=terminal_height + margin_height,
terminal_x=margin_left + padding_left,
terminal_y=margin_top + padding_top,
styles=styles,
chrome=chrome,
backgrounds=backgrounds,
matrix=matrix,
lines=lines,
)
return svg
def save_svg(
self,
path: str,
*,
title: str = "Rich",
theme: Optional[TerminalTheme] = None,
clear: bool = True,
code_format: str = CONSOLE_SVG_FORMAT,
font_aspect_ratio: float = 0.61,
unique_id: Optional[str] = None,
) -> None:
"""Generate an SVG file from the console contents (requires record=True in Console constructor).
Args:
path (str): The path to write the SVG to.
title (str, optional): The title of the tab in the output image
theme (TerminalTheme, optional): The ``TerminalTheme`` object to use to style the terminal
clear (bool, optional): Clear record buffer after exporting. Defaults to ``True``
code_format (str, optional): Format string used to generate the SVG. Rich will inject a number of variables
into the string in order to form the final SVG output. The default template used and the variables
injected by Rich can be found by inspecting the ``console.CONSOLE_SVG_FORMAT`` variable.
font_aspect_ratio (float, optional): The width to height ratio of the font used in the ``code_format``
string. Defaults to 0.61, which is the width to height ratio of Fira Code (the default font).
If you aren't specifying a different font inside ``code_format``, you probably don't need this.
unique_id (str, optional): unique id that is used as the prefix for various elements (CSS styles, node
ids). If not set, this defaults to a computed value based on the recorded content.
"""
svg = self.export_svg(
title=title,
theme=theme,
clear=clear,
code_format=code_format,
font_aspect_ratio=font_aspect_ratio,
unique_id=unique_id,
)
with open(path, "wt", encoding="utf-8") as write_file:
write_file.write(svg)
def _svg_hash(svg_main_code: str) -> str:
"""Returns a unique hash for the given SVG main code.
Args:
svg_main_code (str): The content we're going to inject in the SVG envelope.
Returns:
str: a hash of the given content
"""
return str(zlib.adler32(svg_main_code.encode()))
if __name__ == "__main__": # pragma: no cover
console = Console(record=True)
console.log(
"JSONRPC [i]request[/i]",
5,
1.3,
True,
False,
None,
{
"jsonrpc": "2.0",
"method": "subtract",
"params": {"minuend": 42, "subtrahend": 23},
"id": 3,
},
)
console.log("Hello, World!", "{'a': 1}", repr(console))
console.print(
{
"name": None,
"empty": [],
"quiz": {
"sport": {
"answered": True,
"q1": {
"question": "Which one is correct team name in NBA?",
"options": [
"New York Bulls",
"Los Angeles Kings",
"Golden State Warriors",
"Huston Rocket",
],
"answer": "Huston Rocket",
},
},
"maths": {
"answered": False,
"q1": {
"question": "5 + 7 = ?",
"options": [10, 11, 12, 13],
"answer": 12,
},
"q2": {
"question": "12 - 8 = ?",
"options": [1, 2, 3, 4],
"answer": 4,
},
},
},
}
)
| Console |
python | pytorch__pytorch | torchgen/api/python.py | {
"start": 23851,
"end": 24610
} | class ____:
# argument name
name: str
# RHS expression to reference PythonArgParser output.
expr: str
# In some special cases we need create different expr, e.g.:
# '_r.isNone(1)' instead of '_r.tensor(1)'.
index: int
# The python argument it maps to.
argument: PythonArgument
@property
def is_none_expr(self) -> str:
return f"_r.isNone({self.index})"
# To pass PythonArgParser output to the lambda wrapper, we need bind
# PythonArgParserOutputExpr to DispatchLambdaArgument.
# They are not always 1-1 mapped, e.g. scattered TensorOptions fields
# need be packed into a TensorOptions object, which is the argument
# that the lambda function wrapper takes.
@dataclass(frozen=True)
| PythonArgParserOutputExpr |
python | allegroai__clearml | clearml/automation/parameters.py | {
"start": 5518,
"end": 7377
} | class ____(UniformParameterRange):
"""
Logarithmic uniform randomly sampled hyperparameter object.
"""
def __init__(
self,
name: str,
min_value: float,
max_value: float,
base: float = 10,
step_size: Optional[float] = None,
include_max_value: bool = True,
) -> ():
"""
Create a parameter to be sampled by the SearchStrategy
:param str name: The parameter name. Match the Task hyperparameter name.
:param float min_value: The minimum exponent sample to use for uniform random sampling.
:param float max_value: The maximum exponent sample to use for uniform random sampling.
:param float base: The base used to raise the sampled exponent.
:param float step_size: If not ``None``, set step size (quantization) for value sampling.
:param bool include_max_value: Range includes the ``max_value``
The values are:
- ``True`` - The range includes the ``max_value`` (Default)
- ``False`` - Does not include.
"""
super().__init__(
name,
min_value,
max_value,
step_size=step_size,
include_max_value=include_max_value,
)
self.base = base
def get_value(self) -> Mapping[str, Any]:
"""
Return uniformly logarithmic sampled value based on object sampling definitions.
:return: ``{self.name: random value self.base^[self.min_value, self.max_value)}``
"""
values_dict = super().get_value()
return {self.name: self.base**v for v in values_dict.values()}
def to_list(self) -> Sequence[Mapping[str, float]]:
values_list = super().to_list()
return [{self.name: self.base ** v[self.name]} for v in values_list]
| LogUniformParameterRange |
python | ray-project__ray | python/ray/autoscaler/_private/cli_logger.py | {
"start": 6746,
"end": 24237
} | class ____:
"""Singleton class for CLI logging.
Without calling 'cli_logger.configure', the CLILogger will default
to 'record' style logging.
Attributes:
color_mode (str):
Can be "true", "false", or "auto".
Enables or disables `colorful`.
If `color_mode` is "auto", is set to `not stdout.isatty()`
indent_level (int):
The current indentation level.
All messages will be indented by prepending `" " * indent_level`
vebosity (int):
Output verbosity.
Low verbosity will disable `verbose` and `very_verbose` messages.
"""
color_mode: str
# color_mode: Union[Literal["auto"], Literal["false"], Literal["true"]]
indent_level: int
interactive: bool
VALID_LOG_STYLES = ("auto", "record", "pretty")
_autodetected_cf_colormode: int
def __init__(self):
self.indent_level = 0
self._verbosity = 0
self._verbosity_overriden = False
self._color_mode = "auto"
self._log_style = "record"
self.pretty = False
self.interactive = False
# store whatever colorful has detected for future use if
# the color ouput is toggled (colorful detects # of supported colors,
# so it has some non-trivial logic to determine this)
self._autodetected_cf_colormode = cf.colorful.colormode
self.set_format()
def set_format(self, format_tmpl=None):
if not format_tmpl:
from ray.autoscaler._private.constants import LOGGER_FORMAT
format_tmpl = LOGGER_FORMAT
self._formatter = logging.Formatter(format_tmpl)
def configure(self, log_style=None, color_mode=None, verbosity=None):
"""Configures the logger according to values."""
if log_style is not None:
self._set_log_style(log_style)
if color_mode is not None:
self._set_color_mode(color_mode)
if verbosity is not None:
self._set_verbosity(verbosity)
self.detect_colors()
@property
def log_style(self):
return self._log_style
def _set_log_style(self, x):
"""Configures interactivity and formatting."""
self._log_style = x.lower()
self.interactive = _isatty()
if self._log_style == "auto":
self.pretty = _isatty()
elif self._log_style == "record":
self.pretty = False
self._set_color_mode("false")
elif self._log_style == "pretty":
self.pretty = True
@property
def color_mode(self):
return self._color_mode
def _set_color_mode(self, x):
self._color_mode = x.lower()
self.detect_colors()
@property
def verbosity(self):
if self._verbosity_overriden:
return self._verbosity
elif not self.pretty:
return 999
return self._verbosity
def _set_verbosity(self, x):
self._verbosity = x
self._verbosity_overriden = True
def detect_colors(self):
"""Update color output settings.
Parse the `color_mode` string and optionally disable or force-enable
color output
(8-color ANSI if no terminal detected to be safe) in colorful.
"""
if self.color_mode == "true":
if self._autodetected_cf_colormode != cf.NO_COLORS:
cf.colormode = self._autodetected_cf_colormode
else:
cf.colormode = cf.ANSI_8_COLORS
return
if self.color_mode == "false":
cf.disable()
return
if self.color_mode == "auto":
# colorful autodetects tty settings
return
raise ValueError("Invalid log color setting: " + self.color_mode)
def newline(self):
"""Print a line feed."""
self.print("")
def _print(
self,
msg: str,
_level_str: str = "INFO",
_linefeed: bool = True,
end: str = None,
):
"""Proxy for printing messages.
Args:
msg: Message to print.
linefeed (bool):
If `linefeed` is `False` no linefeed is printed at the
end of the message.
"""
if self.pretty:
rendered_message = " " * self.indent_level + msg
else:
if msg.strip() == "":
return
caller_info = _external_caller_info()
record = logging.LogRecord(
name="cli",
# We override the level name later
# TODO(maximsmol): give approximate level #s to our log levels
level=0,
# The user-facing logs do not need this information anyway
# and it would be very tedious to extract since _print
# can be at varying depths in the call stack
# TODO(maximsmol): do it anyway to be extra
pathname=caller_info["filename"],
lineno=caller_info["lineno"],
msg=msg,
args={},
# No exception
exc_info=None,
)
record.levelname = _level_str
rendered_message = self._formatter.format(record)
# We aren't using standard python logging convention, so we hardcode
# the log levels for now.
if _level_str in ["WARNING", "ERROR", "PANIC"]:
stream = sys.stderr
else:
stream = sys.stdout
if not _linefeed:
stream.write(rendered_message)
stream.flush()
return
kwargs = {"end": end}
print(rendered_message, file=stream, **kwargs)
def indented(self):
"""Context manager that starts an indented block of output."""
cli_logger = self
class IndentedContextManager:
def __enter__(self):
cli_logger.indent_level += 1
def __exit__(self, type, value, tb):
cli_logger.indent_level -= 1
return IndentedContextManager()
def group(self, msg: str, *args: Any, **kwargs: Any):
"""Print a group title in a special color and start an indented block.
For arguments, see `_format_msg`.
"""
self.print(cf.dodgerBlue(msg), *args, **kwargs)
return self.indented()
def verbatim_error_ctx(self, msg: str, *args: Any, **kwargs: Any):
"""Context manager for printing multi-line error messages.
Displays a start sequence "!!! {optional message}"
and a matching end sequence "!!!".
The string "!!!" can be used as a "tombstone" for searching.
For arguments, see `_format_msg`.
"""
cli_logger = self
class VerbatimErorContextManager:
def __enter__(self):
cli_logger.error(cf.bold("!!! ") + "{}", msg, *args, **kwargs)
def __exit__(self, type, value, tb):
cli_logger.error(cf.bold("!!!"))
return VerbatimErorContextManager()
def labeled_value(self, key: str, msg: str, *args: Any, **kwargs: Any):
"""Displays a key-value pair with special formatting.
Args:
key: Label that is prepended to the message.
For other arguments, see `_format_msg`.
"""
self._print(cf.skyBlue(key) + ": " + _format_msg(cf.bold(msg), *args, **kwargs))
def verbose(self, msg: str, *args: Any, **kwargs: Any):
"""Prints a message if verbosity is not 0.
For arguments, see `_format_msg`.
"""
if self.verbosity > 0:
self.print(msg, *args, _level_str="VINFO", **kwargs)
def verbose_warning(self, msg, *args, **kwargs):
"""Prints a formatted warning if verbosity is not 0.
For arguments, see `_format_msg`.
"""
if self.verbosity > 0:
self._warning(msg, *args, _level_str="VWARN", **kwargs)
def verbose_error(self, msg: str, *args: Any, **kwargs: Any):
"""Logs an error if verbosity is not 0.
For arguments, see `_format_msg`.
"""
if self.verbosity > 0:
self._error(msg, *args, _level_str="VERR", **kwargs)
def very_verbose(self, msg: str, *args: Any, **kwargs: Any):
"""Prints if verbosity is > 1.
For arguments, see `_format_msg`.
"""
if self.verbosity > 1:
self.print(msg, *args, _level_str="VVINFO", **kwargs)
def success(self, msg: str, *args: Any, **kwargs: Any):
"""Prints a formatted success message.
For arguments, see `_format_msg`.
"""
self.print(cf.limeGreen(msg), *args, _level_str="SUCC", **kwargs)
def _warning(self, msg: str, *args: Any, _level_str: str = None, **kwargs: Any):
"""Prints a formatted warning message.
For arguments, see `_format_msg`.
"""
if _level_str is None:
raise ValueError("Log level not set.")
self.print(cf.orange(msg), *args, _level_str=_level_str, **kwargs)
def warning(self, *args, **kwargs):
self._warning(*args, _level_str="WARN", **kwargs)
def _error(self, msg: str, *args: Any, _level_str: str = None, **kwargs: Any):
"""Prints a formatted error message.
For arguments, see `_format_msg`.
"""
if _level_str is None:
raise ValueError("Log level not set.")
self.print(cf.red(msg), *args, _level_str=_level_str, **kwargs)
def error(self, *args, **kwargs):
self._error(*args, _level_str="ERR", **kwargs)
def panic(self, *args, **kwargs):
self._error(*args, _level_str="PANIC", **kwargs)
# Fine to expose _level_str here, since this is a general log function.
def print(
self,
msg: str,
*args: Any,
_level_str: str = "INFO",
end: str = None,
**kwargs: Any,
):
"""Prints a message.
For arguments, see `_format_msg`.
"""
self._print(_format_msg(msg, *args, **kwargs), _level_str=_level_str, end=end)
def info(self, msg: str, no_format=True, *args, **kwargs):
self.print(msg, no_format=no_format, *args, **kwargs)
def abort(
self, msg: Optional[str] = None, *args: Any, exc: Any = None, **kwargs: Any
):
"""Prints an error and aborts execution.
Print an error and throw an exception to terminate the program
(the exception will not print a message).
"""
if msg is not None:
self._error(msg, *args, _level_str="PANIC", **kwargs)
if exc is not None:
raise exc
exc_cls = click.ClickException
if self.pretty:
exc_cls = SilentClickException
if msg is None:
msg = "Exiting due to cli_logger.abort()"
raise exc_cls(msg)
def doassert(self, val: bool, msg: str, *args: Any, **kwargs: Any):
"""Handle assertion without throwing a scary exception.
Args:
val: Value to check.
For other arguments, see `_format_msg`.
"""
if not val:
exc = None
if not self.pretty:
exc = AssertionError()
# TODO(maximsmol): rework asserts so that we get the expression
# that triggered the assert
# to do this, install a global try-catch
# for AssertionError and raise them normally
self.abort(msg, *args, exc=exc, **kwargs)
def render_list(self, xs: List[str], separator: str = cf.reset(", ")):
"""Render a list of bolded values using a non-bolded separator."""
return separator.join([str(cf.bold(x)) for x in xs])
def confirm(
self,
yes: bool,
msg: str,
*args: Any,
_abort: bool = False,
_default: bool = False,
_timeout_s: Optional[float] = None,
**kwargs: Any,
):
"""Display a confirmation dialog.
Valid answers are "y/yes/true/1" and "n/no/false/0".
Args:
yes: If `yes` is `True` the dialog will default to "yes"
and continue without waiting for user input.
_abort (bool):
If `_abort` is `True`,
"no" means aborting the program.
_default (bool):
The default action to take if the user just presses enter
with no input.
_timeout_s (float):
If user has no input within _timeout_s seconds, the default
action is taken. None means no timeout.
"""
should_abort = _abort
default = _default
if not self.interactive and not yes:
# no formatting around --yes here since this is non-interactive
self.error(
"This command requires user confirmation. "
"When running non-interactively, supply --yes to skip."
)
raise ValueError("Non-interactive confirm without --yes.")
if default:
yn_str = "Y/n"
else:
yn_str = "y/N"
confirm_str = cf.underlined("Confirm [" + yn_str + "]:") + " "
rendered_message = _format_msg(msg, *args, **kwargs)
# the rendered message ends with ascii coding
if rendered_message and not msg.endswith("\n"):
rendered_message += " "
msg_len = len(rendered_message.split("\n")[-1])
complete_str = rendered_message + confirm_str
if yes:
self._print(complete_str + "y " + cf.dimmed("[automatic, due to --yes]"))
return True
self._print(complete_str, _linefeed=False)
res = None
yes_answers = ["y", "yes", "true", "1"]
no_answers = ["n", "no", "false", "0"]
try:
while True:
if _timeout_s is None:
ans = sys.stdin.readline()
elif sys.platform == "win32":
# Windows doesn't support select
start_time = time.time()
ans = ""
while True:
if (time.time() - start_time) >= _timeout_s:
self.newline()
ans = "\n"
break
elif msvcrt.kbhit():
ch = msvcrt.getwch()
if ch in ("\n", "\r"):
self.newline()
ans = ans + "\n"
break
elif ch == "\b":
if ans:
ans = ans[:-1]
# Emulate backspace erasing
print("\b \b", end="", flush=True)
else:
ans = ans + ch
print(ch, end="", flush=True)
else:
time.sleep(0.1)
else:
ready, _, _ = select.select([sys.stdin], [], [], _timeout_s)
if not ready:
self.newline()
ans = "\n"
else:
ans = sys.stdin.readline()
ans = ans.lower()
if ans == "\n":
res = default
break
ans = ans.strip()
if ans in yes_answers:
res = True
break
if ans in no_answers:
res = False
break
indent = " " * msg_len
self.error(
"{}Invalid answer: {}. Expected {} or {}",
indent,
cf.bold(ans.strip()),
self.render_list(yes_answers, "/"),
self.render_list(no_answers, "/"),
)
self._print(indent + confirm_str, _linefeed=False)
except KeyboardInterrupt:
self.newline()
res = default
if not res and should_abort:
# todo: make sure we tell the user if they
# need to do cleanup
self._print("Exiting...")
raise SilentClickException(
"Exiting due to the response to confirm(should_abort=True)."
)
return res
def prompt(self, msg: str, *args, **kwargs):
"""Prompt the user for some text input.
Args:
msg: The mesage to display to the user before the prompt.
Returns:
The string entered by the user.
"""
complete_str = cf.underlined(msg)
rendered_message = _format_msg(complete_str, *args, **kwargs)
# the rendered message ends with ascii coding
if rendered_message and not msg.endswith("\n"):
rendered_message += " "
self._print(rendered_message, linefeed=False)
res = ""
try:
ans = sys.stdin.readline()
ans = ans.lower()
res = ans.strip()
except KeyboardInterrupt:
self.newline()
return res
def flush(self):
sys.stdout.flush()
sys.stderr.flush()
| _CliLogger |
python | PyCQA__pylint | doc/data/messages/n/nonlocal-without-binding/bad.py | {
"start": 0,
"end": 92
} | class ____:
def get_color(self):
nonlocal colors # [nonlocal-without-binding]
| Fruit |
python | psf__black | src/black/handle_ipynb_magics.py | {
"start": 11952,
"end": 12131
} | class ____:
col_offset: int
magic: str
# Unsurprisingly, subclassing ast.NodeVisitor means we can't use dataclasses here
# as mypyc will generate broken code.
| OffsetAndMagic |
python | sqlalchemy__sqlalchemy | test/dialect/sqlite/test_reflection.py | {
"start": 1814,
"end": 2555
} | class ____(fixtures.TestBase):
__only_on__ = "sqlite"
__backend__ = True
@testing.fixture
def db_fixture(self, connection):
connection.exec_driver_sql(
'ATTACH %r AS "default"' % connection.engine.url.database
)
connection.exec_driver_sql(
'CREATE TABLE "default".a (id INTEGER PRIMARY KEY)'
)
try:
yield
finally:
connection.exec_driver_sql('drop table "default".a')
connection.exec_driver_sql('DETACH DATABASE "default"')
def test_reflect(self, connection, db_fixture):
meta = MetaData(schema="default")
meta.reflect(connection)
assert "default.a" in meta.tables
| KeywordInDatabaseNameTest |
python | crytic__slither | slither/visitors/expression/read_var.py | {
"start": 1711,
"end": 5619
} | class ____(ExpressionVisitor):
def __init__(self, expression: Expression) -> None:
self._result: Optional[List[Expression]] = None
super().__init__(expression)
def result(self) -> List[Expression]:
if self._result is None:
self._result = list(set(get(self.expression)))
return self._result
# override assignment
# dont explore if its direct assignment (we explore if its +=, -=, ...)
def _visit_assignement_operation(self, expression: AssignmentOperation) -> None:
if expression.type != AssignmentOperationType.ASSIGN:
self._visit_expression(expression.expression_left)
self._visit_expression(expression.expression_right)
def _post_assignement_operation(self, expression: AssignmentOperation) -> None:
if expression.type != AssignmentOperationType.ASSIGN:
left = get(expression.expression_left)
else:
left = []
right = get(expression.expression_right)
val = left + right
set_val(expression, val)
def _post_binary_operation(self, expression: BinaryOperation) -> None:
left = get(expression.expression_left)
right = get(expression.expression_right)
val = left + right
set_val(expression, val)
def _post_call_expression(self, expression: CallExpression) -> None:
called = get(expression.called)
argss = [get(a) for a in expression.arguments if a]
args = [item for sublist in argss for item in sublist]
val = called + args
set_val(expression, val)
def _post_conditional_expression(self, expression: ConditionalExpression) -> None:
if_expr = get(expression.if_expression)
else_expr = get(expression.else_expression)
then_expr = get(expression.then_expression)
val = if_expr + else_expr + then_expr
set_val(expression, val)
def _post_elementary_type_name_expression(
self, expression: ElementaryTypeNameExpression
) -> None:
set_val(expression, [])
# save only identifier expression
def _post_identifier(self, expression: Identifier) -> None:
if isinstance(expression.value, Variable):
set_val(expression, [expression])
elif isinstance(expression.value, SolidityVariable):
# TODO: investigate if this branch can be reached, and if Identifier.value has the correct type
set_val(expression, [expression])
else:
set_val(expression, [])
def _post_index_access(self, expression: IndexAccess) -> None:
left = get(expression.expression_left)
right = get(expression.expression_right)
val = left + right + [expression]
set_val(expression, val)
def _post_literal(self, expression: Literal) -> None:
set_val(expression, [])
def _post_member_access(self, expression: MemberAccess) -> None:
expr = get(expression.expression)
val = expr
set_val(expression, val)
def _post_new_array(self, expression: NewArray) -> None:
set_val(expression, [])
def _post_new_contract(self, expression: NewContract) -> None:
set_val(expression, [])
def _post_new_elementary_type(self, expression: NewElementaryType) -> None:
set_val(expression, [])
def _post_tuple_expression(self, expression: TupleExpression) -> None:
expressions = [get(e) for e in expression.expressions if e]
val = [item for sublist in expressions for item in sublist]
set_val(expression, val)
def _post_type_conversion(self, expression: TypeConversion) -> None:
expr = get(expression.expression)
val = expr
set_val(expression, val)
def _post_unary_operation(self, expression: UnaryOperation) -> None:
expr = get(expression.expression)
val = expr
set_val(expression, val)
| ReadVar |
python | dask__dask | dask/dataframe/tests/test_accessors.py | {
"start": 662,
"end": 10856
} | class ____:
def __init__(self, obj):
self.obj = obj
self.item = "item"
@property
def prop(self):
return self.item
def method(self):
return self.item
@pytest.mark.parametrize(
"obj, registrar",
[
(dd.Series, dd.extensions.register_series_accessor),
(dd.DataFrame, dd.extensions.register_dataframe_accessor),
(dd.Index, dd.extensions.register_index_accessor),
],
)
def test_register(obj, registrar):
if obj is dd.Index:
pytest.skip("from_pandas doesn't support Index")
with ensure_removed(obj, "mine"):
before = set(dir(obj))
registrar("mine")(MyAccessor)
instance = dd.from_pandas(obj._partition_type([], dtype=float), 2)
assert instance.mine.prop == "item"
after = set(dir(obj))
assert (before ^ after) == {"mine"}
assert "mine" in obj._accessors
def test_accessor_works():
with ensure_removed(dd.Series, "mine"):
dd.extensions.register_series_accessor("mine")(MyAccessor)
a = pd.Series([1, 2])
b = dd.from_pandas(a, 2)
assert b.mine.obj is b
assert b.mine.prop == "item"
assert b.mine.method() == "item"
@pytest.fixture
def df_ddf():
import numpy as np
df = pd.DataFrame(
{
"str_col": ["abc", "bcd", "cdef", "DEFG"],
"int_col": [1, 2, 3, 4],
"dt_col": np.array(
[int(1e9), int(1.1e9), int(1.2e9), None], dtype="M8[ns]"
),
},
index=["E", "f", "g", "h"],
)
df["string_col"] = df["str_col"].astype("string")
df.loc["E", "string_col"] = pd.NA
ddf = dd.from_pandas(df, 2)
return df, ddf
@pytest.mark.filterwarnings("ignore:The behavior of DatetimeProperties")
@pytest.mark.xfail(PANDAS_GE_300, reason="divisions are incorrect")
def test_dt_accessor(df_ddf):
df, ddf = df_ddf
assert "date" in dir(ddf.dt_col.dt)
# pandas loses Series.name via datetime accessor
# see https://github.com/pydata/pandas/issues/10712
assert_eq(ddf.dt_col.dt.date, df.dt_col.dt.date, check_names=False)
if PANDAS_GE_210:
warning_ctx = pytest.warns(FutureWarning, match="will return a Series")
else:
warning_ctx = contextlib.nullcontext()
# to_pydatetime returns a numpy array in pandas, but a Series in dask
# pandas will start returning a Series with 3.0 as well
with warning_ctx:
ddf_result = ddf.dt_col.dt.to_pydatetime()
with warning_ctx:
pd_result = pd.Series(
df.dt_col.dt.to_pydatetime(), index=df.index, dtype=object
)
assert_eq(ddf_result, pd_result)
assert set(ddf.dt_col.dt.date.dask) == set(ddf.dt_col.dt.date.dask)
assert set(ddf.dt_col.dt.to_pydatetime().dask) == set(
ddf.dt_col.dt.to_pydatetime().dask
)
def test_dt_accessor_not_available(df_ddf):
df, ddf = df_ddf
# Not available on invalid dtypes
with pytest.raises(AttributeError) as exc:
ddf.str_col.dt
assert ".dt accessor" in str(exc.value)
def test_str_accessor(df_ddf):
df, ddf = df_ddf
# implemented methods are present in tab completion
assert "upper" in dir(ddf.str_col.str)
assert "upper" in dir(ddf.string_col.str)
assert "upper" in dir(ddf.index.str)
# not implemented methods don't show up
assert "get_dummies" not in dir(ddf.str_col.str)
assert not hasattr(ddf.str_col.str, "get_dummies")
# Test simple method on both series and index
assert_eq(ddf.str_col.str.upper(), df.str_col.str.upper())
assert set(ddf.str_col.str.upper().dask) == set(ddf.str_col.str.upper().dask)
assert_eq(ddf.string_col.str.upper(), df.string_col.str.upper())
assert set(ddf.string_col.str.upper().dask) == set(ddf.string_col.str.upper().dask)
assert_eq(ddf.index.str.upper(), df.index.str.upper())
assert set(ddf.index.str.upper().dask) == set(ddf.index.str.upper().dask)
# make sure to pass through args & kwargs
# NOTE: when using pyarrow strings, `.str.contains(...)` will return a result
# with `boolean` dtype, while using object strings returns a `bool`. We cast
# the pandas DataFrame here to ensure pandas and Dask return the same dtype.
ctx = contextlib.nullcontext()
if pyarrow_strings_enabled():
df.str_col = to_pyarrow_string(df.str_col)
if not PANDAS_GE_210:
ctx = pytest.warns(
pd.errors.PerformanceWarning, match="Falling back on a non-pyarrow"
)
assert_eq(
ddf.str_col.str.contains("a"),
df.str_col.str.contains("a"),
)
assert_eq(ddf.string_col.str.contains("a"), df.string_col.str.contains("a"))
assert set(ddf.str_col.str.contains("a").dask) == set(
ddf.str_col.str.contains("a").dask
)
with ctx:
expected = df.str_col.str.contains("d", case=False)
assert_eq(
ddf.str_col.str.contains("d", case=False),
expected,
)
assert set(ddf.str_col.str.contains("d", case=False).dask) == set(
ddf.str_col.str.contains("d", case=False).dask
)
for na in [True, False]:
assert_eq(
ddf.str_col.str.contains("a", na=na),
df.str_col.str.contains("a", na=na),
)
assert set(ddf.str_col.str.contains("a", na=na).dask) == set(
ddf.str_col.str.contains("a", na=na).dask
)
for regex in [True, False]:
assert_eq(
ddf.str_col.str.contains("a", regex=regex),
df.str_col.str.contains("a", regex=regex),
)
assert set(ddf.str_col.str.contains("a", regex=regex).dask) == set(
ddf.str_col.str.contains("a", regex=regex).dask
)
def test_str_accessor_not_available(df_ddf):
df, ddf = df_ddf
# Not available on invalid dtypes
with pytest.raises(AttributeError) as exc:
ddf.int_col.str
assert ".str accessor" in str(exc.value)
assert "str" not in dir(ddf.int_col)
def test_str_accessor_getitem(df_ddf):
df, ddf = df_ddf
assert_eq(ddf.str_col.str[:2], df.str_col.str[:2])
assert_eq(ddf.str_col.str[1], df.str_col.str[1])
def test_str_accessor_extractall(df_ddf):
df, ddf = df_ddf
assert_eq(
ddf.str_col.str.extractall("(.*)b(.*)"), df.str_col.str.extractall("(.*)b(.*)")
)
@pytest.mark.parametrize("method", ["removeprefix", "removesuffix"])
def test_str_accessor_removeprefix_removesuffix(df_ddf, method):
df, ddf = df_ddf
prefix = df.str_col.iloc[0][:2]
suffix = df.str_col.iloc[0][-2:]
missing = "definitely a missing prefix/suffix"
def call(df, arg):
return getattr(df.str_col.str, method)(arg)
assert_eq(call(ddf, prefix), call(df, prefix))
assert_eq(call(ddf, suffix), call(df, suffix))
assert_eq(call(ddf, missing), call(df, missing))
def test_str_accessor_cat(df_ddf):
df, ddf = df_ddf
sol = df.str_col.str.cat(df.str_col.str.upper(), sep=":")
assert_eq(ddf.str_col.str.cat(ddf.str_col.str.upper(), sep=":"), sol)
assert_eq(ddf.str_col.str.cat(df.str_col.str.upper(), sep=":"), sol)
assert_eq(
ddf.str_col.str.cat([ddf.str_col.str.upper(), df.str_col.str.lower()], sep=":"),
df.str_col.str.cat([df.str_col.str.upper(), df.str_col.str.lower()], sep=":"),
)
assert_eq(ddf.str_col.str.cat(sep=":"), df.str_col.str.cat(sep=":"))
for o in ["foo", ["foo"]]:
with pytest.raises(TypeError):
ddf.str_col.str.cat(o)
def test_str_accessor_cat_none():
s = pd.Series(["a", "a", "b", "b", "c", np.nan], name="foo")
ds = dd.from_pandas(s, npartitions=2)
assert_eq(ds.str.cat(), s.str.cat())
assert_eq(ds.str.cat(na_rep="-"), s.str.cat(na_rep="-"))
assert_eq(ds.str.cat(sep="_", na_rep="-"), s.str.cat(sep="_", na_rep="-"))
@pytest.mark.parametrize("method", ["split", "rsplit"])
def test_str_accessor_split_noexpand(method):
def call(obj, *args, **kwargs):
return getattr(obj.str, method)(*args, **kwargs)
s = pd.Series(["a b c d", "aa bb cc dd", "aaa bbb ccc dddd"], name="foo")
ds = dd.from_pandas(s, npartitions=2)
for n in [1, 2, 3]:
assert_eq(call(s, n=n, expand=False), call(ds, n=n, expand=False))
assert call(ds, n=1, expand=False).name == "foo"
@pytest.mark.parametrize("method", ["split", "rsplit"])
def test_str_accessor_split_expand(method):
def call(obj, *args, **kwargs):
return getattr(obj.str, method)(*args, **kwargs)
s = pd.Series(
["a b c d", "aa bb cc dd", "aaa bbb ccc dddd"], index=["row1", "row2", "row3"]
)
ds = dd.from_pandas(s, npartitions=2)
for n in [1, 2, 3]:
assert_eq(call(s, n=n, expand=True), call(ds, n=n, expand=True))
with pytest.raises(NotImplementedError) as info:
call(ds, expand=True)
assert "n=" in str(info.value)
s = pd.Series(["a,bcd,zz,f", "aabb,ccdd,z,kk", "aaabbb,cccdddd,l,pp"])
ds = dd.from_pandas(s, npartitions=2)
for n in [1, 2, 3]:
assert_eq(
call(s, pat=",", n=n, expand=True), call(ds, pat=",", n=n, expand=True)
)
@pytest.mark.xfail(reason="Need to pad columns")
def test_str_accessor_split_expand_more_columns():
s = pd.Series(["a b c d", "aa", "aaa bbb ccc dddd"])
ds = dd.from_pandas(s, npartitions=2)
assert_eq(s.str.split(n=3, expand=True), ds.str.split(n=3, expand=True))
s = pd.Series(["a b c", "aa bb cc", "aaa bbb ccc"])
ds = dd.from_pandas(s, npartitions=2)
assert_eq(
ds.str.split(n=10, expand=True),
s.str.split(n=10, expand=True),
)
@pytest.mark.parametrize("index", [None, [0]], ids=["range_index", "other index"])
def test_str_split_no_warning(index):
df = pd.DataFrame({"a": ["a\nb"]}, index=index)
ddf = dd.from_pandas(df, npartitions=1)
pd_a = df["a"].str.split("\n", n=1, expand=True)
dd_a = ddf["a"].str.split("\n", n=1, expand=True)
assert_eq(dd_a, pd_a)
def test_string_nullable_types(df_ddf):
df, ddf = df_ddf
assert_eq(ddf.string_col.str.count("A"), df.string_col.str.count("A"))
assert_eq(ddf.string_col.str.isalpha(), df.string_col.str.isalpha())
| MyAccessor |
python | pydantic__pydantic | pydantic/v1/errors.py | {
"start": 4748,
"end": 4803
} | class ____(PydanticValueError):
code = 'url'
| UrlError |
python | readthedocs__readthedocs.org | readthedocs/projects/views/private.py | {
"start": 10633,
"end": 14408
} | class ____(ProjectVersionMixin, GenericModelView):
http_method_names = ["post"]
def post(self, request, *args, **kwargs):
version = self.get_object()
if not version.active:
version.built = False
version.save()
log.info("Removing files for version.", version_slug=version.slug)
clean_project_resources(
version.project,
version,
)
else:
return HttpResponseBadRequest(
"Can't delete HTML for an active version.",
)
return HttpResponseRedirect(self.get_success_url())
def show_config_step(wizard):
"""
Decide whether or not show the config step on "Add project" wizard.
If the `.readthedocs.yaml` file already exist in the default branch, we
don't show this step.
"""
# Try to get the cleaned data from the "basics" step only if
# we are in a step after it, otherwise, return True since we don't
# have the data yet, and django-forms calls this function multiple times.
basics_step = "basics"
cleaned_data = wizard.get_cleaned_data_for_step(basics_step) or {}
repo = cleaned_data.get("repo")
remote_repository = cleaned_data.get("remote_repository")
default_branch = cleaned_data.get("default_branch")
if repo and default_branch and remote_repository and remote_repository.vcs_provider == GITHUB:
# I don't know why `show_config_step` is called multiple times (at least 4).
# This is a problem for us because we perform external calls here and add messages to the request.
# Due to that, we are adding this instance variable to prevent this function to run multiple times.
# Maybe related to https://github.com/jazzband/django-formtools/issues/134
if hasattr(wizard, "_show_config_step_executed"):
return False
remote_repository_relations = (
remote_repository.remote_repository_relations.filter(
user=wizard.request.user,
account__isnull=False,
)
.select_related("account", "user")
.only("user", "account")
)
for relation in remote_repository_relations:
service = GitHubService(relation.user, relation.account)
session = service.session
for yaml in [
".readthedocs.yaml",
".readthedocs.yml",
"readthedocs.yaml",
"readthedocs.yml",
]:
try:
querystrings = f"?ref={default_branch}" if default_branch else ""
response = session.head(
f"https://api.github.com/repos/{remote_repository.full_name}/contents/{yaml}{querystrings}",
timeout=1,
)
if response.ok:
log.info(
"Read the Docs YAML file found for this repository.",
filename=yaml,
)
messages.success(
wizard.request,
_(
"We detected a configuration file in your repository and started your project's first build."
),
)
wizard._show_config_step_executed = True
return False
except Exception:
log.warning(
"Failed when hitting GitHub API to check for .readthedocs.yaml file.",
filename=yaml,
)
continue
return True
| ProjectVersionDeleteHTML |
python | google__jax | tests/api_test.py | {
"start": 259074,
"end": 259448
} | class ____(jtu.JaxTestCase):
@jtu.thread_unsafe_test() # GC isn't predictable
def test_xla_gc_callback(self):
# https://github.com/jax-ml/jax/issues/14882
x_np = np.arange(10, dtype='int32')
x_jax = jax.device_put(x_np)
x_np_weakref = weakref.ref(x_np)
del x_np
del x_jax
gc.collect()
assert x_np_weakref() is None
| GarbageCollectionTest |
python | spyder-ide__spyder | spyder/plugins/editor/utils/editor.py | {
"start": 3965,
"end": 5843
} | class ____(object):
"""
Utility class for running job after a certain delay.
If a new request is made during this delay, the previous request is dropped
and the timer is restarted for the new request.
We use this to implement a cooldown effect that prevents jobs from being
executed while the IDE is not idle.
A job is a simple callable.
"""
def __init__(self, delay=500):
"""
:param delay: Delay to wait before running the job. This delay applies
to all requests and cannot be changed afterwards.
"""
self._timer = QTimer()
self.delay = delay
self._timer.timeout.connect(self._exec_requested_job)
self._args = []
self._kwargs = {}
self._job = lambda x: None
def request_job(self, job, *args, **kwargs):
"""
Request a job execution.
The job will be executed after the delay specified in the
DelayJobRunner contructor elapsed if no other job is requested until
then.
:param job: job.
:type job: callable
:param args: job's position arguments
:param kwargs: job's keyworded arguments
"""
self.cancel_requests()
self._job = job
self._args = args
self._kwargs = kwargs
self._timer.start(self.delay)
def cancel_requests(self):
"""Cancels pending requests."""
self._timer.stop()
self._job = None
self._args = None
self._kwargs = None
def _exec_requested_job(self):
"""Execute the requested job after the timer has timeout."""
self._timer.stop()
try:
self._job(*self._args, **self._kwargs)
except KeyError:
# Catching the KeyError above is necessary to avoid
# issue spyder-ide/spyder#15712.
pass
| DelayJobRunner |
python | pyca__cryptography | tests/x509/test_x509_ext.py | {
"start": 76325,
"end": 77863
} | class ____:
def test_equality(self):
gn = x509.UniformResourceIdentifier("string")
gn2 = x509.UniformResourceIdentifier("string2")
gn3 = x509.UniformResourceIdentifier("string")
assert gn != gn2
assert gn != object()
assert gn == gn3
def test_not_text(self):
with pytest.raises(TypeError):
x509.UniformResourceIdentifier(1.3) # type:ignore[arg-type]
def test_no_parsed_hostname(self):
gn = x509.UniformResourceIdentifier("singlelabel")
assert gn.value == "singlelabel"
def test_with_port(self):
gn = x509.UniformResourceIdentifier("singlelabel:443/test")
assert gn.value == "singlelabel:443/test"
def test_non_a_label(self):
with pytest.raises(ValueError):
x509.UniformResourceIdentifier(
"http://\u043f\u044b\u043a\u0430.cryptography"
)
def test_empty_hostname(self):
gn = x509.UniformResourceIdentifier("ldap:///some-nonsense")
assert gn.value == "ldap:///some-nonsense"
def test_hash(self):
g1 = x509.UniformResourceIdentifier("http://host.com")
g2 = x509.UniformResourceIdentifier("http://host.com")
g3 = x509.UniformResourceIdentifier("http://other.com")
assert hash(g1) == hash(g2)
assert hash(g1) != hash(g3)
def test_repr(self):
gn = x509.UniformResourceIdentifier("string")
assert repr(gn) == ("<UniformResourceIdentifier(value='string')>")
| TestUniformResourceIdentifier |
python | fluentpython__example-code-2e | 19-concurrency/primes/procs.py | {
"start": 396,
"end": 2190
} | class ____(NamedTuple): # <3>
n: int
prime: bool
elapsed: float
JobQueue = queues.SimpleQueue[int] # <4>
ResultQueue = queues.SimpleQueue[PrimeResult] # <5>
def check(n: int) -> PrimeResult: # <6>
t0 = perf_counter()
res = is_prime(n)
return PrimeResult(n, res, perf_counter() - t0)
def worker(jobs: JobQueue, results: ResultQueue) -> None: # <7>
while n := jobs.get(): # <8>
results.put(check(n)) # <9>
results.put(PrimeResult(0, False, 0.0)) # <10>
def start_jobs(
procs: int, jobs: JobQueue, results: ResultQueue # <11>
) -> None:
for n in NUMBERS:
jobs.put(n) # <12>
for _ in range(procs):
proc = Process(target=worker, args=(jobs, results)) # <13>
proc.start() # <14>
jobs.put(0) # <15>
# end::PRIMES_PROC_TOP[]
# tag::PRIMES_PROC_MAIN[]
def main() -> None:
if len(sys.argv) < 2: # <1>
procs = cpu_count()
else:
procs = int(sys.argv[1])
print(f'Checking {len(NUMBERS)} numbers with {procs} processes:')
t0 = perf_counter()
jobs: JobQueue = SimpleQueue() # <2>
results: ResultQueue = SimpleQueue()
start_jobs(procs, jobs, results) # <3>
checked = report(procs, results) # <4>
elapsed = perf_counter() - t0
print(f'{checked} checks in {elapsed:.2f}s') # <5>
def report(procs: int, results: ResultQueue) -> int: # <6>
checked = 0
procs_done = 0
while procs_done < procs: # <7>
n, prime, elapsed = results.get() # <8>
if n == 0: # <9>
procs_done += 1
else:
checked += 1 # <10>
label = 'P' if prime else ' '
print(f'{n:16} {label} {elapsed:9.6f}s')
return checked
if __name__ == '__main__':
main()
# end::PRIMES_PROC_MAIN[]
| PrimeResult |
python | google__jax | jax/experimental/jax2tf/tests/flax_models/bilstm_classifier.py | {
"start": 12639,
"end": 14451
} | class ____(nn.Module):
"""A Text Classification model."""
embedding_size: int
hidden_size: int
vocab_size: int
output_size: int
dropout_rate: float
word_dropout_rate: float
unk_idx: int = 1
deterministic: bool | None = None
def setup(self):
self.embedder = Embedder(
vocab_size=self.vocab_size,
embedding_size=self.embedding_size,
dropout_rate=self.dropout_rate,
word_dropout_rate=self.word_dropout_rate,
unk_idx=self.unk_idx)
self.encoder = SimpleBiLSTM(hidden_size=self.hidden_size)
self.classifier = AttentionClassifier(
hidden_size=self.hidden_size,
output_size=self.output_size,
dropout_rate=self.dropout_rate)
def embed_token_ids(self, token_ids: Array,
deterministic: bool | None = None) -> Array:
deterministic = nn.module.merge_param(
'deterministic', self.deterministic, deterministic)
return self.embedder(token_ids, deterministic=deterministic)
def logits_from_embedded_inputs(
self, embedded_inputs: Array, lengths: Array,
deterministic: bool | None = None) -> Array:
deterministic = nn.module.merge_param(
'deterministic', self.deterministic, deterministic)
encoded_inputs = self.encoder(embedded_inputs, lengths)
return self.classifier(
encoded_inputs, lengths, deterministic=deterministic)
def __call__(self, token_ids: Array, lengths: Array,
deterministic: bool | None = None) -> Array:
"""Embeds the token IDs, encodes them, and classifies with attention."""
embedded_inputs = self.embed_token_ids(
token_ids, deterministic=deterministic)
logits = self.logits_from_embedded_inputs(
embedded_inputs, lengths, deterministic=deterministic)
return logits
| TextClassifier |
python | pytorch__pytorch | torch/utils/data/datapipes/dataframe/datapipes.py | {
"start": 2754,
"end": 3737
} | class ____(DFIterDataPipe):
def __init__(self, source_datapipe, filter_fn) -> None:
self.source_datapipe = source_datapipe
self.filter_fn = filter_fn
def __iter__(self):
size = None
all_buffer = []
filter_res = []
# pyrefly: ignore [bad-assignment]
for df in self.source_datapipe:
if size is None:
size = len(df.index)
for i in range(len(df.index)):
all_buffer.append(df[i : i + 1])
filter_res.append(self.filter_fn(df.iloc[i]))
buffer = []
for df, res in zip(all_buffer, filter_res, strict=True):
if res:
buffer.append(df)
if len(buffer) == size:
yield df_wrapper.concat(buffer)
buffer = []
if buffer:
yield df_wrapper.concat(buffer)
@functional_datapipe("_to_dataframes_pipe", enable_df_api_tracing=True)
| FilterDataFramesPipe |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/django/toystore/forms.py | {
"start": 3973,
"end": 4036
} | class ____(ReprForm):
_slug = forms.SlugField()
| SlugFieldForm |
python | wandb__wandb | tests/system_tests/test_launch/test_launch.py | {
"start": 283,
"end": 3313
} | class ____:
def __init__(self, *args, **kwargs):
pass
async def verify(self):
pass
async def build_image(self, *args, **kwargs):
pass
@pytest.mark.asyncio
async def test_launch_incorrect_backend(runner, user, monkeypatch):
proj = "test1"
entry_point = ["python", "/examples/examples/launch/launch-quickstart/train.py"]
settings = wandb.Settings(project=proj)
api = InternalApi()
monkeypatch.setattr(
wandb.sdk.launch.builder.build,
"LaunchProject",
lambda *args, **kwargs: MagicMock(),
)
monkeypatch.setattr(
wandb.sdk.launch.builder.build,
"validate_docker_installation",
lambda: None,
)
monkeypatch.setattr(
"wandb.docker",
lambda: None,
)
monkeypatch.setattr(
"wandb.sdk.launch.loader.environment_from_config",
lambda *args, **kawrgs: None,
)
(
monkeypatch.setattr(
"wandb.sdk.launch.loader.registry_from_config", lambda *args, **kawrgs: None
),
)
monkeypatch.setattr(
"wandb.sdk.launch.loader.builder_from_config",
lambda *args, **kawrgs: MockBuilder(),
)
r = wandb.init(settings=settings)
r.finish()
with pytest.raises(
LaunchError,
match="Could not create runner from config. Invalid runner name: testing123",
):
await _launch(
api,
docker_image="testimage",
entity=user,
project=proj,
entry_point=entry_point,
resource="testing123",
)
def test_launch_multi_run(runner, user):
with runner.isolated_filesystem(), mock.patch.dict(
"os.environ", {"WANDB_RUN_ID": "test", "WANDB_LAUNCH": "true"}
):
with wandb.init() as run1:
pass
with wandb.init() as run2:
pass
assert run1.id == "test"
assert run2.id == "test"
def test_launch_get_project_queue_error(user):
proj = "projectq32e"
api = InternalApi()
with pytest.raises(
CommError,
match=f"Error fetching run queues for {user}/{proj} check that you have access to this entity and project",
):
api.get_project_run_queues(user, proj)
def test_launch_wandb_init_launch_envs(
wandb_backend_spy,
runner,
user,
):
queue = "test-queue-name"
with runner.isolated_filesystem(), mock.patch.dict(
"os.environ",
{
"WANDB_LAUNCH_QUEUE_NAME": queue,
"WANDB_LAUNCH_QUEUE_ENTITY": user,
"WANDB_LAUNCH_TRACE_ID": "test123",
},
):
with wandb.init() as run:
run.log({"test": 1})
with wandb_backend_spy.freeze() as snapshot:
config = snapshot.config(run_id=run.id)
assert config["_wandb"]["value"]["launch_trace_id"] == "test123"
assert config["_wandb"]["value"]["launch_queue_entity"] == user
assert config["_wandb"]["value"]["launch_queue_name"] == queue
| MockBuilder |
python | django__django | tests/i18n/test_extraction.py | {
"start": 43915,
"end": 46148
} | class ____(ExtractorTests):
work_subdir = "exclude"
LOCALES = ["en", "fr", "it"]
PO_FILE = "locale/%s/LC_MESSAGES/django.po"
def _set_times_for_all_po_files(self):
"""
Set access and modification times to the Unix epoch time for all the
.po files.
"""
for locale in self.LOCALES:
os.utime(self.PO_FILE % locale, (0, 0))
def setUp(self):
super().setUp()
copytree("canned_locale", "locale")
self._set_times_for_all_po_files()
def test_command_help(self):
with captured_stdout(), captured_stderr():
# `call_command` bypasses the parser; by calling
# `execute_from_command_line` with the help subcommand we
# ensure that there are no issues with the parser itself.
execute_from_command_line(["django-admin", "help", "makemessages"])
def test_one_locale_excluded(self):
management.call_command("makemessages", exclude=["it"], verbosity=0)
self.assertRecentlyModified(self.PO_FILE % "en")
self.assertRecentlyModified(self.PO_FILE % "fr")
self.assertNotRecentlyModified(self.PO_FILE % "it")
def test_multiple_locales_excluded(self):
management.call_command("makemessages", exclude=["it", "fr"], verbosity=0)
self.assertRecentlyModified(self.PO_FILE % "en")
self.assertNotRecentlyModified(self.PO_FILE % "fr")
self.assertNotRecentlyModified(self.PO_FILE % "it")
def test_one_locale_excluded_with_locale(self):
management.call_command(
"makemessages", locale=["en", "fr"], exclude=["fr"], verbosity=0
)
self.assertRecentlyModified(self.PO_FILE % "en")
self.assertNotRecentlyModified(self.PO_FILE % "fr")
self.assertNotRecentlyModified(self.PO_FILE % "it")
def test_multiple_locales_excluded_with_locale(self):
management.call_command(
"makemessages", locale=["en", "fr", "it"], exclude=["fr", "it"], verbosity=0
)
self.assertRecentlyModified(self.PO_FILE % "en")
self.assertNotRecentlyModified(self.PO_FILE % "fr")
self.assertNotRecentlyModified(self.PO_FILE % "it")
| ExcludedLocaleExtractionTests |
python | scipy__scipy | scipy/ndimage/tests/test_morphology.py | {
"start": 131428,
"end": 132710
} | class ____:
# pytest's setup_method seems to clash with the autouse `xp` fixture
# so call _setup manually from all methods
def _setup(self, xp):
# dilation related setup
self.array = xp.asarray([[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 1, 1, 0],
[0, 0, 0, 0, 0]], dtype=xp.uint8)
self.sq3x3 = xp.ones((3, 3))
dilated3x3 = ndimage.binary_dilation(self.array, structure=self.sq3x3)
if is_numpy(xp):
self.dilated3x3 = dilated3x3.view(xp.uint8)
else:
self.dilated3x3 = xp.astype(dilated3x3, xp.uint8)
def test_dilation_square_structure(self, xp):
self._setup(xp)
result = ndimage.grey_dilation(self.array, structure=self.sq3x3)
# +1 accounts for difference between grey and binary dilation
assert_array_almost_equal(result, self.dilated3x3 + 1)
def test_dilation_scalar_size(self, xp):
self._setup(xp)
result = ndimage.grey_dilation(self.array, size=3)
assert_array_almost_equal(result, self.dilated3x3)
@make_xp_test_case(ndimage.binary_opening, ndimage.binary_closing)
| TestDilateFix |
python | pandas-dev__pandas | asv_bench/benchmarks/series_methods.py | {
"start": 6325,
"end": 6497
} | class ____:
def setup(self):
self.s = Series(index=Index([f"i-{i}" for i in range(10000)], dtype=object))
def time_dir_strings(self):
dir(self.s)
| Dir |
python | pypa__pipenv | pipenv/vendor/pythonfinder/exceptions.py | {
"start": 37,
"end": 144
} | class ____(Exception):
"""Raised when parsing an invalid python version"""
pass
| InvalidPythonVersion |
python | walkccc__LeetCode | solutions/794. Valid Tic-Tac-Toe State/794.py | {
"start": 0,
"end": 622
} | class ____:
def validTicTacToe(self, board: list[str]) -> bool:
def isWin(c: str) -> bool:
return (any(row.count(c) == 3 for row in board) or
any(row.count(c) == 3 for row in list(zip(*board))) or
all(board[i][i] == c for i in range(3)) or
all(board[i][2 - i] == c for i in range(3)))
countX = sum(row.count('X') for row in board)
countO = sum(row.count('O') for row in board)
if countX < countO or countX - countO > 1:
return False
if isWin('X') and countX == countO or isWin('O') and countX != countO:
return False
return True
| Solution |
python | apache__airflow | task-sdk/src/airflow/sdk/execution_time/callback_runner.py | {
"start": 1185,
"end": 1337
} | class ____(Generic[P, R]):
@staticmethod
def run(*args: P.args, **kwargs: P.kwargs) -> R: ... # type: ignore[empty-body]
| _ExecutionCallableRunner |
python | realpython__materials | inheritance-and-composition/choosing/hr.py | {
"start": 1458,
"end": 1897
} | class ____(SalaryPolicy):
def __init__(self, weekly_salary, commission_per_sale):
super().__init__(weekly_salary)
self.commission_per_sale = commission_per_sale
@property
def commission(self):
sales = self.hours_worked / 5
return sales * self.commission_per_sale
def calculate_payroll(self):
fixed = super().calculate_payroll()
return fixed + self.commission
| CommissionPolicy |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_quote_name09.py | {
"start": 314,
"end": 1509
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("quote_name09.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
sheet_name = "Sheet_1"
worksheet = workbook.add_worksheet(sheet_name)
chart = workbook.add_chart({"type": "column"})
chart.axis_ids = [54437760, 59195776]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
worksheet.repeat_rows(0, 1)
worksheet.set_portrait()
worksheet.vertical_dpi = 200
chart.add_series({"values": [sheet_name, 0, 0, 4, 0]})
chart.add_series({"values": [sheet_name, 0, 1, 4, 1]})
chart.add_series({"values": [sheet_name, 0, 2, 4, 2]})
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | google__jax | jax/experimental/sparse/util.py | {
"start": 1181,
"end": 4337
} | class ____(NamedTuple):
shape: Shape
indices_sorted: bool = False
unique_indices: bool = False
#--------------------------------------------------------------------
# utilities
# TODO: possibly make these primitives, targeting cusparse routines
# csr2coo/coo2csr/SPDDMM
def nfold_vmap(fun, N, *, broadcasted=True, in_axes=0):
"""Convenience function to apply (broadcasted) vmap N times."""
_vmap = broadcasting_vmap if broadcasted else vmap
for _ in range(N):
fun = _vmap(fun, in_axes=in_axes)
return fun
def broadcasting_vmap(fun, in_axes=0, out_axes=0):
@functools.wraps(fun)
def batched_fun(*args):
args_flat, in_tree = tree_util.tree_flatten(args)
in_axes_flat = flatten_axes("vmap in_axes", in_tree, in_axes, kws=False)
size = max(arg.shape[i] for arg, i in safe_zip(args_flat, in_axes_flat) if i is not None)
if size > 1:
if any(i is not None and arg.shape[i] not in (1, size)
for arg, i in safe_zip(args_flat, in_axes_flat)):
raise ValueError("broadcasting_vmap: mismatched input shapes")
args_flat, in_axes_flat = zip(*(
(arg, None) if i is None else (lax.squeeze(arg, (i,)), None) if arg.shape[i] == 1 else (arg, i)
for arg, i in zip(args_flat, in_axes_flat)
))
new_args = tree_util.tree_unflatten(in_tree, args_flat)
new_in_axes = tree_util.tree_unflatten(in_tree, in_axes_flat)
return vmap(fun, in_axes=new_in_axes, out_axes=out_axes)(*new_args)
return batched_fun
@jax.jit
def _csr_to_coo(indices: Array, indptr: Array) -> tuple[Array, Array]:
"""Given CSR (indices, indptr) return COO (row, col)"""
return jnp.cumsum(jnp.zeros_like(indices).at[indptr].add(1)) - 1, indices
def _csr_extract(indices: Array, indptr: Array, mat: Array) -> Array:
"""Extract values of dense matrix mat at given CSR indices."""
row, col = _csr_to_coo(indices, indptr)
return _coo_extract(row, col, mat)
def _coo_extract(row: Array, col: Array, mat: Array) -> Array:
"""Extract values of dense matrix mat at given COO indices."""
return mat[row, col]
def _count_stored_elements_per_batch(mat: Array, n_batch: int = 0, n_dense: int = 0) -> Array:
"""Return per-batch number of stored elements (nse) of a dense matrix."""
mat = jnp.asarray(mat)
mask = (mat != 0)
if n_dense > 0:
mask = mask.any(tuple(-(i + 1) for i in range(n_dense)))
mask = mask.sum(tuple(range(n_batch, mask.ndim)))
return mask
def _count_stored_elements(mat: Array, n_batch: int = 0, n_dense: int = 0) -> Array:
"""Return the number of stored elements (nse) of the given dense matrix."""
return _count_stored_elements_per_batch(mat, n_batch, n_dense).max(initial=0)
def _dot_general_validated_shape(
lhs_shape: tuple[int, ...], rhs_shape: tuple[int, ...],
dimension_numbers: DotDimensionNumbers) -> tuple[int, ...]:
"""Validate the inputs and return the output shape."""
lhs = core.ShapedArray(lhs_shape, np.float32)
rhs = core.ShapedArray(rhs_shape, np.float32)
return _dot_general_shape_rule(
lhs, rhs, dimension_numbers=dimension_numbers,
precision=None, preferred_element_type=None, out_sharding=None)
| SparseInfo |
python | Lightning-AI__lightning | src/lightning/pytorch/core/module.py | {
"start": 78586,
"end": 79203
} | class ____:
"""Intercepts attribute access on LightningModule's trainer reference and redirects it to the Fabric object."""
def __init__(self, fabric: lf.Fabric) -> None:
super().__init__()
self._fabric = fabric
def __getattr__(self, item: Any) -> Any:
try:
return getattr(self._fabric, item)
except AttributeError:
raise AttributeError(
f"Your LightningModule code tried to access `self.trainer.{item}` but this attribute is not available"
f" when using Fabric with a LightningModule."
)
| _TrainerFabricShim |
python | astropy__astropy | astropy/modeling/functional_models.py | {
"start": 18169,
"end": 19641
} | class ____(Fittable1DModel):
"""
Shift a coordinate.
Parameters
----------
offset : float
Offset to add to a coordinate.
"""
offset = Parameter(default=0, description="Offset to add to a model")
linear = True
_has_inverse_bounding_box = True
@property
def input_units(self):
if self.offset.input_unit is None:
return None
return {self.inputs[0]: self.offset.input_unit}
@property
def inverse(self):
"""One dimensional inverse Shift model function."""
inv = self.copy()
inv.offset *= -1
try:
self.bounding_box # noqa: B018
except NotImplementedError:
pass
else:
inv.bounding_box = tuple(
self.evaluate(x, self.offset) for x in self.bounding_box
)
return inv
@staticmethod
def evaluate(x, offset):
"""One dimensional Shift model function."""
return x + offset
@staticmethod
def sum_of_implicit_terms(x):
"""Evaluate the implicit term (x) of one dimensional Shift model."""
return x
@staticmethod
def fit_deriv(x, *params):
"""One dimensional Shift model derivative with respect to parameter."""
d_offset = np.ones_like(x)
return [d_offset]
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {"offset": outputs_unit[self.outputs[0]]}
| Shift |
python | realpython__materials | python-magic-methods/point.py | {
"start": 0,
"end": 1343
} | class ____:
def __init__(self, x, y):
self.x = x
self.y = y
def __eq__(self, other):
if not isinstance(other, type(self)):
raise TypeError(f"'other' must be of type '{type(self).__name__}'")
return self.x == other.x and self.y == other.y
def __lt__(self, other):
if not isinstance(other, type(self)):
raise TypeError(f"'other' must be of type '{type(self).__name__}'")
return self.x < other.x and self.y < other.y
def __gt__(self, other):
if not isinstance(other, type(self)):
raise TypeError(f"'other' must be of type '{type(self).__name__}'")
return self.x > other.x and self.y > other.y
def __le__(self, other):
if not isinstance(other, type(self)):
raise TypeError(f"'other' must be of type '{type(self).__name__}'")
return self.x <= other.x and self.y <= other.y
def __ge__(self, other):
if not isinstance(other, type(self)):
raise TypeError(f"'other' must be of type '{type(self).__name__}'")
return self.x >= other.x and self.y >= other.y
def __ne__(self, other):
if not isinstance(other, type(self)):
raise TypeError(f"'other' must be of type '{type(self).__name__}'")
return self.x != other.x and self.y != other.y
| Point |
python | coleifer__peewee | peewee.py | {
"start": 244378,
"end": 246474
} | class ____(_ModelQueryHelper):
def union_all(self, rhs):
return ModelCompoundSelectQuery(self.model, self, 'UNION ALL', rhs)
__add__ = union_all
def union(self, rhs):
return ModelCompoundSelectQuery(self.model, self, 'UNION', rhs)
__or__ = union
def intersect(self, rhs):
return ModelCompoundSelectQuery(self.model, self, 'INTERSECT', rhs)
__and__ = intersect
def except_(self, rhs):
return ModelCompoundSelectQuery(self.model, self, 'EXCEPT', rhs)
__sub__ = except_
def __iter__(self):
if not self._cursor_wrapper:
self.execute()
return iter(self._cursor_wrapper)
def prefetch(self, *subqueries, **kwargs):
return prefetch(self, *subqueries, **kwargs)
def get(self, database=None):
clone = self.paginate(1, 1)
clone._cursor_wrapper = None
try:
return clone.execute(database)[0]
except IndexError:
sql, params = clone.sql()
raise self.model.DoesNotExist('%s instance matching query does '
'not exist:\nSQL: %s\nParams: %s' %
(clone.model, sql, params))
def get_or_none(self, database=None):
try:
return self.get(database=database)
except self.model.DoesNotExist:
pass
@Node.copy
def group_by(self, *columns):
grouping = []
for column in columns:
if is_model(column):
grouping.extend(column._meta.sorted_fields)
elif isinstance(column, Table):
if not column._columns:
raise ValueError('Cannot pass a table to group_by() that '
'does not have columns explicitly '
'declared.')
grouping.extend([getattr(column, col_name)
for col_name in column._columns])
else:
grouping.append(column)
self._group_by = grouping
| BaseModelSelect |
python | pytorch__pytorch | torch/onnx/_internal/exporter/_schemas.py | {
"start": 3542,
"end": 10652
} | class ____:
"""A parameter in the function signature that represents an ONNX attribute."""
name: str
type: ir.AttributeType
required: bool
default: ir.Attr | None = None
def __str__(self) -> str:
type_str = self.type.name
if self.has_default():
return f"{self.name}: {type_str} = {self.default}"
return f"{self.name}: {type_str}"
def has_default(self) -> bool:
return self.default is not None
def _get_type_from_str(
type_str: str,
) -> ir.TensorType | ir.SequenceType | ir.OptionalType:
"""Converter a type_str from ONNX Opschema to ir.TypeProtocol.
A type str has the form of "tensor(float)" or composite type like "seq(tensor(float))".
"""
# TODO: Upstream this to IR
# Split the type_str a sequence types and dtypes
# 1. Remove the ending ")"
striped = type_str.rstrip(")")
# 2. Split the type_str by "("
type_parts = striped.split("(")
# Convert the dtype to ir.DataType
dtype = ir.DataType[type_parts[-1].upper()]
# Create a place holder type first
type_: ir.TypeProtocol = ir.TensorType(ir.DataType.UNDEFINED)
# Construct the type
for type_part in reversed(type_parts[:-1]):
if type_part == "tensor":
type_ = ir.TensorType(dtype)
elif type_part == "seq":
type_ = ir.SequenceType(type_)
elif type_part == "optional":
type_ = ir.OptionalType(type_)
else:
raise ValueError(f"Unknown type part: '{type_part}' in type '{type_str}'")
return type_ # type: ignore[return-value]
def _convert_formal_parameter(
param: onnx.defs.OpSchema.FormalParameter,
type_constraints: Mapping[str, TypeConstraintParam],
) -> Parameter:
"""Convert a formal parameter from ONNX Opschema to Parameter."""
if param.type_str in type_constraints:
type_constraint = type_constraints[param.type_str]
else:
# param.type_str can be a plain type like 'int64'.
type_constraint = TypeConstraintParam(
name=param.name,
allowed_types={_get_type_from_str(param.type_str)},
)
return Parameter(
name=param.name,
type_constraint=type_constraint,
required=param.option != onnx.defs.OpSchema.FormalParameterOption.Optional,
variadic=param.option == onnx.defs.OpSchema.FormalParameterOption.Variadic,
)
def _is_optional(type_: type) -> bool:
"""Returns whether a type_ is an Optional."""
origin_type = typing.get_origin(type_)
if origin_type is Union and type(None) in typing.get_args(type_):
# Python < 3.10
return True
if origin_type is Optional:
# Python >= 3.10
return True
if (
hasattr(types, "UnionType")
and origin_type is types.UnionType
and type(None) in typing.get_args(type_)
):
# Python >= 3.10
return True
return False
def _get_attr_type(type_: type) -> ir.AttributeType:
"""Obtain the type of the attribute from a Python class."""
try:
if type_ in _PY_TYPE_TO_ATTR_TYPE:
return _PY_TYPE_TO_ATTR_TYPE[type_]
origin_type = typing.get_origin(type_)
if origin_type is None:
return ir.AttributeType.UNDEFINED
if origin_type in (
collections.abc.Sequence,
Sequence,
list,
list,
tuple,
tuple,
):
inner_type = typing.get_args(type_)[0]
if inner_type in _LIST_TYPE_TO_ATTR_TYPE:
return _LIST_TYPE_TO_ATTR_TYPE[inner_type]
except TypeError:
logger.warning("TypeError when checking %s.", type_, exc_info=True)
return ir.AttributeType.UNDEFINED
def _get_type_constraint_name(type_: TypeAnnotationValue) -> str | None:
"""Returns the name of the type constraint for a given type annotation.
Args:
type_: A Python type.
Returns:
The name of the type constraint if it is a TypeVar.
- Prefixes the name with "Sequence_" if the type annotation is a Sequence[].
"""
if isinstance(type_, TypeVar):
return type_.__name__
if _is_optional(type_):
subtypes = typing.get_args(type_)
for subtype in subtypes:
if subtype is type(None):
continue
type_param_name = _get_type_constraint_name(subtype)
return type_param_name if type_param_name else None
origin_type = typing.get_origin(type_)
if isinstance(origin_type, type) and issubclass(origin_type, Sequence):
subtypes = typing.get_args(type_)
type_param_name = _get_type_constraint_name(subtypes[0])
return f"Sequence_{type_param_name}" if type_param_name else None
return None
def _get_allowed_types_from_type_annotation(
type_: TypeAnnotationValue,
) -> set[ir.TypeProtocol]:
"""Obtain the allowed types from a type annotation."""
if type_ is onnxscript.onnx_types.TensorType:
# Any tensor type
return {ir.TensorType(dtype) for dtype in ir.DataType}
allowed_types: set[ir.TypeProtocol]
if isinstance(type_, TypeVar):
allowed_types = set()
if constraints := type_.__constraints__:
for constraint in constraints:
allowed_types.update(
_get_allowed_types_from_type_annotation(constraint)
)
else:
bound = type_.__bound__
if bound is None:
allowed_types = _ALL_VALUE_TYPES # type: ignore[assignment]
else:
allowed_types.update(_get_allowed_types_from_type_annotation(bound))
return allowed_types
if hasattr(type_, "dtype"):
# A single tensor type like INT64, FLOAT, etc.
return {ir.TensorType(ir.DataType(type_.dtype))}
if _is_optional(type_):
allowed_types = set()
subtypes = typing.get_args(type_)
for subtype in subtypes:
if subtype is type(None):
continue
allowed_types.update(_get_allowed_types_from_type_annotation(subtype))
# NOTE: We do not consider dynamic optional types like optional(float) because they are not very useful.
return allowed_types
origin_type = typing.get_origin(type_)
if origin_type is Union:
allowed_types = set()
subtypes = typing.get_args(type_)
for subtype in subtypes:
assert subtype is not type(None), (
"Union should not contain None type because it is handled by _is_optional."
)
allowed_types.update(_get_allowed_types_from_type_annotation(subtype))
return allowed_types
if isinstance(origin_type, type) and issubclass(origin_type, Sequence):
subtypes = typing.get_args(type_)
return {
ir.SequenceType(t)
for t in _get_allowed_types_from_type_annotation(subtypes[0])
}
# Allow everything by default
return _ALL_VALUE_TYPES # type: ignore[return-value]
@dataclasses.dataclass
| AttributeParameter |
python | pytorch__pytorch | torch/_functorch/_aot_autograd/descriptors.py | {
"start": 22129,
"end": 22361
} | class ____(DifferentiableAOTOutput):
"""A plain tensor output at position idx of the output tuple"""
idx: int
def expr(self) -> str:
return f"output[{self.idx}]"
@dataclasses.dataclass(frozen=True)
| PlainAOTOutput |
python | google__jax | tests/multiprocess/all_reduce_test.py | {
"start": 897,
"end": 5281
} | class ____(jt_multiprocess.MultiProcessTest):
def test_psum_simple(self):
mesh = jtu.create_mesh((jax.device_count(),), "x")
spec = jax.P("x")
@jax.shard_map(mesh=mesh, in_specs=spec, out_specs=spec)
def f(x):
return lax.psum(x, "x")
out = f(jnp.array([1] * jax.device_count()))
for o in out.addressable_shards:
self.assertEqual(o.data, np.array([jax.device_count()]))
@parameterized.parameters(
(np.int32,), (jnp.float32,), (jnp.float16,), (jnp.bfloat16,)
)
def test_psum(self, dtype):
mesh_shape = (jax.process_count(), jax.local_device_count())
mesh = jtu.create_mesh(mesh_shape, ("x", "y"))
spec = jax.P("x", "y")
@jax.shard_map(mesh=mesh, in_specs=spec, out_specs=spec)
def f(x):
return lax.psum(x, ("x", "y"))
xs = (
jnp.arange(jax.local_device_count())
+ jax.process_index() * jax.local_device_count()
)
xs = jnp.expand_dims(xs, axis=0).astype(dtype)
sharding = jax.NamedSharding(mesh, spec)
global_xs = jax.make_array_from_process_local_data(sharding, xs, mesh_shape)
local_xs = jnp.sum(jnp.arange(jax.device_count())).reshape(1, 1)
out = f(global_xs)
for actual in out.addressable_shards:
jtu.check_close(actual.data, local_xs)
def test_psum_subset_devices(self):
mesh_shape = (jax.process_count(), jax.local_device_count())
mesh = jtu.create_mesh(mesh_shape, ("x", "y"))
spec = jax.P("x")
@jax.shard_map(mesh=mesh, in_specs=spec, out_specs=spec)
def f(x):
return lax.psum(x, "x")
xs = (
jnp.arange(jax.local_device_count())
+ jax.process_index() * jax.local_device_count()
)
xs = jnp.expand_dims(xs, axis=0)
sharding = jax.NamedSharding(mesh, spec)
global_xs = jax.make_array_from_process_local_data(sharding, xs, mesh_shape)
local_xs = (
jnp.arange(jax.device_count())
.reshape(mesh_shape)
.sum(axis=0, keepdims=True)
)
out = f(global_xs)
for actual in out.addressable_shards:
jtu.check_close(actual.data, local_xs)
def test_psum_multiple_operands(self):
mesh_shape = (jax.process_count(), jax.local_device_count())
mesh = jtu.create_mesh(mesh_shape, ("x", "y"))
spec = jax.P("x", "y")
sharding = jax.NamedSharding(mesh, spec)
x = (
jnp.arange(jax.local_device_count())
+ jax.process_index() * jax.local_device_count()
)
x = jnp.expand_dims(x, axis=(0, -1))
@jax.shard_map(mesh=mesh, in_specs=spec, out_specs=spec)
def f(x):
return lax.psum(x, ("x", "y"))
length = 100
xs = jnp.tile(x, (1, 1, length))
global_shape = mesh_shape + (length,)
global_xs = jax.make_array_from_process_local_data(sharding, xs, global_shape)
local_xs = jnp.sum(jnp.arange(jax.device_count())) * jnp.ones((1, 1, length))
out = f(global_xs)
for actual in out.addressable_shards:
jtu.check_close(actual.data, local_xs)
length = 200
xs = jnp.tile(x, (1, 1, length))
global_shape = mesh_shape + (length,)
global_xs = jax.make_array_from_process_local_data(sharding, xs, global_shape)
local_xs = jnp.sum(jnp.arange(jax.device_count())) * jnp.ones((1, 1, length))
out = f(global_xs)
for actual in out.addressable_shards:
jtu.check_close(actual.data, local_xs)
# TODO(dsuo): Remove this warning once PmapSharding is removed. We don't
# convert this to shard_map since axis_index_groups raises a
# NotImplementedError.
@jtu.ignore_warning(category=DeprecationWarning)
def test_psum_axis_index_groups(self):
devices = list(range(jax.device_count()))
axis_index_groups = [devices[0::2], devices[1::2]]
print(axis_index_groups, jax.devices())
f = jax.pmap(
lambda x: lax.psum(x, "i", axis_index_groups=axis_index_groups),
axis_name="i",
)
xs = randint_sample([jax.process_count(), jax.local_device_count(), 100])
out = f(xs[jax.process_index()])
xs = xs.reshape([jax.device_count(), 100])
group0_expected = sum(xs[0::2, :])
group1_expected = sum(xs[1::2, :])
for i, actual in enumerate(out):
device_id = i + jax.process_index() * jax.local_device_count()
expected = group0_expected if device_id % 2 == 0 else group1_expected
np.testing.assert_array_equal(actual, expected)
if __name__ == "__main__":
jt_multiprocess.main()
| AllReduceTest |
python | huggingface__transformers | src/transformers/models/levit/modeling_levit.py | {
"start": 2882,
"end": 4799
} | class ____(nn.Module):
"""
LeViT patch embeddings, for final embeddings to be passed to transformer blocks. It consists of multiple
`LevitConvEmbeddings`.
"""
def __init__(self, config):
super().__init__()
self.embedding_layer_1 = LevitConvEmbeddings(
config.num_channels, config.hidden_sizes[0] // 8, config.kernel_size, config.stride, config.padding
)
self.activation_layer_1 = nn.Hardswish()
self.embedding_layer_2 = LevitConvEmbeddings(
config.hidden_sizes[0] // 8, config.hidden_sizes[0] // 4, config.kernel_size, config.stride, config.padding
)
self.activation_layer_2 = nn.Hardswish()
self.embedding_layer_3 = LevitConvEmbeddings(
config.hidden_sizes[0] // 4, config.hidden_sizes[0] // 2, config.kernel_size, config.stride, config.padding
)
self.activation_layer_3 = nn.Hardswish()
self.embedding_layer_4 = LevitConvEmbeddings(
config.hidden_sizes[0] // 2, config.hidden_sizes[0], config.kernel_size, config.stride, config.padding
)
self.num_channels = config.num_channels
def forward(self, pixel_values):
num_channels = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration."
)
embeddings = self.embedding_layer_1(pixel_values)
embeddings = self.activation_layer_1(embeddings)
embeddings = self.embedding_layer_2(embeddings)
embeddings = self.activation_layer_2(embeddings)
embeddings = self.embedding_layer_3(embeddings)
embeddings = self.activation_layer_3(embeddings)
embeddings = self.embedding_layer_4(embeddings)
return embeddings.flatten(2).transpose(1, 2)
| LevitPatchEmbeddings |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/quantization_ops/quantization_ops_test.py | {
"start": 4387,
"end": 6032
} | class ____(
test_util.TensorFlowTestCase):
@test_util.run_in_graph_and_eager_modes
def test_invalid_inputs(self):
gradients = constant_op.constant(
value=[[1.0], [2.0], [4.0]], dtype=dtypes.float32)
inputs = constant_op.constant(
value=[[1.0], [2.0], [4.0]], dtype=dtypes.float32)
with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
"Shapes must be equal rank|must be rank 1"):
self.evaluate(
array_ops.fake_quant_with_min_max_vars_per_channel_gradient(
gradients=gradients, inputs=inputs, min=[[0.0]], max=[1.0]))
with self.assertRaisesRegex(
(ValueError, errors.InvalidArgumentError),
"Dimension 0 in both shapes must be equal|incorrect size"):
self.evaluate(
array_ops.fake_quant_with_min_max_vars_per_channel_gradient(
gradients=gradients, inputs=inputs, min=[0.0, 0.1], max=[1.0]))
with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
"Shapes must be equal rank|must be rank 1"):
self.evaluate(
array_ops.fake_quant_with_min_max_vars_per_channel_gradient(
gradients=gradients, inputs=inputs, min=[1.0], max=[[1.0]]))
with self.assertRaisesRegex(
(ValueError, errors.InvalidArgumentError),
"Dimension 0 in both shapes must be equal|incorrect size"):
self.evaluate(
array_ops.fake_quant_with_min_max_vars_per_channel_gradient(
gradients=gradients, inputs=inputs, min=[0.0], max=[1.0, 1.1]))
| FakeQuantWithMinMaxVarsPerChannelGradientOpTest |
python | getsentry__sentry | tests/sentry/auth/test_access.py | {
"start": 36689,
"end": 42518
} | class ____(AccessFactoryTestCase):
def setUp(self) -> None:
super().setUp()
# Partner's normal Sentry account.
self.user = self.create_user("integration@example.com")
self.org = self.create_organization()
self.org2 = self.create_organization()
self.out_of_scope_org = self.create_organization()
self.team = self.create_team(organization=self.org)
self.team2 = self.create_team(organization=self.org2)
self.out_of_scope_team = self.create_team(organization=self.out_of_scope_org)
self.project = self.create_project(organization=self.org, teams=[self.team])
self.out_of_scope_project = self.create_project(
organization=self.out_of_scope_org, teams=[self.out_of_scope_team]
)
self.sentry_app = self.create_sentry_app(name="SlowDB", organization=self.org)
self.out_of_scope_sentry_app = self.create_sentry_app(
name="Other App", organization=self.out_of_scope_org
)
self.proxy_user = self.sentry_app.proxy_user
self.out_of_scope_proxy_user = self.out_of_scope_sentry_app.proxy_user
self.install = self.create_sentry_app_installation(
organization=self.org, slug=self.sentry_app.slug, user=self.user
)
self.install2 = self.create_sentry_app_installation(
organization=self.org2, slug=self.sentry_app.slug, user=self.user
)
def test_has_access(self) -> None:
request = self.make_request(user=self.proxy_user)
result = self.from_request(request, self.org)
assert result.has_global_access
assert result.has_team_access(self.team)
assert result.team_ids_with_membership == frozenset({self.team.id})
assert result.scopes == frozenset()
assert result.has_project_access(self.project)
assert result.has_project_membership(self.project)
assert not result.has_project_access(self.out_of_scope_project)
assert not result.permissions
def test_no_access_due_to_no_app(self) -> None:
user = self.create_user("integration2@example.com")
request = self.make_request(user=user)
result = self.from_request(request, self.org)
assert not result.has_team_access(self.team)
assert not result.has_team_access(self.team2)
assert not result.has_team_access(self.out_of_scope_team)
assert not result.has_project_access(self.project)
assert not result.has_project_access(self.out_of_scope_project)
def test_no_access_due_to_no_installation_unowned(self) -> None:
request = self.make_request(user=self.proxy_user)
result = self.from_request(request, self.out_of_scope_org)
assert not result.has_team_access(self.team)
assert not result.has_team_access(self.team2)
assert not result.has_team_access(self.out_of_scope_team)
assert not result.has_project_access(self.project)
assert not result.has_project_access(self.out_of_scope_project)
def test_no_access_due_to_no_installation_owned(self) -> None:
request = self.make_request(user=self.out_of_scope_proxy_user)
result = self.from_request(request, self.out_of_scope_org)
assert not result.has_team_access(self.team)
assert not result.has_team_access(self.team2)
assert not result.has_team_access(self.out_of_scope_team)
assert not result.has_project_access(self.project)
assert not result.has_project_access(self.out_of_scope_project)
def test_no_access_due_to_invalid_user(self) -> None:
request = self.make_request(user=self.out_of_scope_proxy_user)
result = self.from_request(request, self.org)
assert not result.has_team_access(self.team)
assert not result.has_team_access(self.team2)
assert not result.has_team_access(self.out_of_scope_team)
assert not result.has_project_access(self.project)
assert not result.has_project_access(self.out_of_scope_project)
def test_no_deleted_projects(self) -> None:
self.create_member(organization=self.org, user=self.user, role="owner", teams=[self.team])
deleted_project = self.create_project(
organization=self.org, status=ObjectStatus.PENDING_DELETION, teams=[self.team]
)
request = self.make_request(user=self.proxy_user)
result = self.from_request(request, self.org)
assert result.has_project_access(deleted_project) is False
assert result.has_project_membership(deleted_project) is False
def test_no_deleted_teams(self) -> None:
deleted_team = self.create_team(organization=self.org, status=TeamStatus.PENDING_DELETION)
self.create_member(
organization=self.org, user=self.user, role="owner", teams=[self.team, deleted_team]
)
request = self.make_request(user=self.proxy_user)
result = self.from_request(request, self.org)
assert result.has_team_access(deleted_team) is False
def test_has_app_scopes(self) -> None:
app_with_scopes = self.create_sentry_app(name="ScopeyTheApp", organization=self.org)
with assume_test_silo_mode(SiloMode.CONTROL):
app_with_scopes.update(scope_list=["team:read", "team:write"])
self.create_sentry_app_installation(
organization=self.org, slug=app_with_scopes.slug, user=self.user
)
request = self.make_request(user=app_with_scopes.proxy_user)
result = self.from_request(request, self.org)
assert result.scopes == frozenset({"team:read", "team:write"})
assert result.has_scope("team:read") is True
assert result.has_scope("team:write") is True
assert result.has_scope("team:admin") is False
@no_silo_test
| FromSentryAppTest |
python | Textualize__textual | tests/tree/test_node_refresh.py | {
"start": 637,
"end": 2462
} | class ____(App[None]):
def compose(self) -> ComposeResult:
yield HistoryTree()
def on_mount(self) -> None:
self.query_one(HistoryTree).root.expand_all()
async def test_initial_state() -> None:
"""Initially all the visible nodes should have had a render call."""
app = RefreshApp()
async with app.run_test():
assert app.query_one(HistoryTree).render_hits == {(0,0), (1,0), (2,0)}
async def test_root_refresh() -> None:
"""A refresh of the root node should cause a subsequent render call."""
async with RefreshApp().run_test() as pilot:
assert (0, 1) not in pilot.app.query_one(HistoryTree).render_hits
pilot.app.query_one(HistoryTree).counter += 1
pilot.app.query_one(HistoryTree).root.refresh()
await pilot.pause()
assert (0, 1) in pilot.app.query_one(HistoryTree).render_hits
async def test_child_refresh() -> None:
"""A refresh of the child node should cause a subsequent render call."""
async with RefreshApp().run_test() as pilot:
assert (1, 1) not in pilot.app.query_one(HistoryTree).render_hits
pilot.app.query_one(HistoryTree).counter += 1
pilot.app.query_one(HistoryTree).root.children[0].refresh()
await pilot.pause()
assert (1, 1) in pilot.app.query_one(HistoryTree).render_hits
async def test_grandchild_refresh() -> None:
"""A refresh of the grandchild node should cause a subsequent render call."""
async with RefreshApp().run_test() as pilot:
assert (2, 1) not in pilot.app.query_one(HistoryTree).render_hits
pilot.app.query_one(HistoryTree).counter += 1
pilot.app.query_one(HistoryTree).root.children[0].children[0].refresh()
await pilot.pause()
assert (2, 1) in pilot.app.query_one(HistoryTree).render_hits
| RefreshApp |
python | great-expectations__great_expectations | great_expectations/_version.py | {
"start": 1608,
"end": 18937
} | class ____(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY: dict = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen(
[c] + args,
cwd=cwd,
env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr else None),
)
break
except OSError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print(f"unable to run {dispcmd}")
print(e)
return None, None
else:
if verbose:
print(f"unable to find command, tried {commands}")
return None, None
stdout = p.communicate()[0].strip()
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print(f"unable to run {dispcmd} (error)")
print(f"stdout was {stdout}")
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root) # noqa: PTH119 # FIXME CoP
if dirname.startswith(parentdir_prefix):
return {
"version": dirname[len(parentdir_prefix) :],
"full-revisionid": None,
"dirty": False,
"error": None,
"date": None,
}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level # noqa: PTH120 # FIXME CoP
if verbose:
print(f"Tried directories {rootdirs!s} but none started with prefix {parentdir_prefix}")
raise NotThisMethod("rootdir doesn't start with parentdir_prefix") # noqa: TRY003 # FIXME CoP
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs): # noqa: C901 # too complex
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs)
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except OSError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose): # noqa: C901 # too complex
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird") # noqa: TRY003 # FIXME CoP
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball") # noqa: TRY003 # FIXME CoP
refs = {r.strip() for r in refnames.strip("()").split(",")}
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = {r[len(TAG) :] for r in refs if r.startswith(TAG)}
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = {r for r in refs if re.search(r"\d", r)}
if verbose:
print(f"discarding '{','.join(refs - tags)}', no digits")
if verbose:
print(f"likely tags: {','.join(sorted(tags))}")
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix) :]
if verbose:
print(f"picking {r}")
return {
"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": None,
"date": date,
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {
"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": "no suitable tags",
"date": None,
}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs( # noqa: C901 # 11
tag_prefix, root, verbose, run_command=run_command
):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
_out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True)
if rc != 0:
if verbose:
print(f"Directory {root} not under git control")
raise NotThisMethod("'git rev-parse --git-dir' returned error") # noqa: TRY003 # FIXME CoP
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(
GITS,
[
"describe",
"--tags",
"--dirty",
"--always",
"--long",
"--match",
f"{tag_prefix}*",
],
cwd=root,
)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed") # noqa: TRY003 # FIXME CoP
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed") # noqa: TRY003 # FIXME CoP
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[: git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe)
if not mo:
# unparsable. Maybe git-describe is misbehaving?
pieces["error"] = f"unable to parse git-describe output: '{describe_out}'"
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = f"tag '{full_tag}' doesn't start with prefix '{tag_prefix}'"
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix) :]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"]) # noqa: UP031 # FIXME CoP
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"]) # noqa: UP031 # FIXME CoP
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"] # noqa: UP031 # FIXME CoP
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"] # noqa: UP031 # FIXME CoP
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"] # noqa: UP031 # FIXME CoP
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += f"g{pieces['short']}"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"] # noqa: UP031 # FIXME CoP
if pieces["dirty"]:
rendered += ".dev0"
rendered += f"+g{pieces['short']}"
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"] # noqa: UP031 # FIXME CoP
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"] # noqa: UP031 # FIXME CoP
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) # noqa: UP031 # FIXME CoP
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) # noqa: UP031 # FIXME CoP
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style): # noqa: C901 # too complex
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {
"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None,
}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError(f"unknown style '{style}'") # noqa: TRY003 # FIXME CoP
return {
"version": rendered,
"full-revisionid": pieces["long"],
"dirty": pieces["dirty"],
"error": None,
"date": pieces.get("date"),
}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for _ in cfg.versionfile_source.split("/"):
root = os.path.dirname(root) # noqa: PTH120 # FIXME CoP
except NameError:
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None,
}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to compute version",
"date": None,
}
| NotThisMethod |
python | TheAlgorithms__Python | data_structures/binary_tree/distribute_coins.py | {
"start": 839,
"end": 938
} | class ____:
data: int
left: TreeNode | None = None
right: TreeNode | None = None
| TreeNode |
python | numpy__numpy | numpy/testing/tests/test_utils.py | {
"start": 33757,
"end": 35280
} | class ____:
def _assert_func(self, *args, **kwargs):
assert_approx_equal(*args, **kwargs)
def test_simple_0d_arrays(self):
x = np.array(1234.22)
y = np.array(1234.23)
self._assert_func(x, y, significant=5)
self._assert_func(x, y, significant=6)
assert_raises(AssertionError,
lambda: self._assert_func(x, y, significant=7))
def test_simple_items(self):
x = 1234.22
y = 1234.23
self._assert_func(x, y, significant=4)
self._assert_func(x, y, significant=5)
self._assert_func(x, y, significant=6)
assert_raises(AssertionError,
lambda: self._assert_func(x, y, significant=7))
def test_nan_array(self):
anan = np.array(np.nan)
aone = np.array(1)
ainf = np.array(np.inf)
self._assert_func(anan, anan)
assert_raises(AssertionError, lambda: self._assert_func(anan, aone))
assert_raises(AssertionError, lambda: self._assert_func(anan, ainf))
assert_raises(AssertionError, lambda: self._assert_func(ainf, anan))
def test_nan_items(self):
anan = np.array(np.nan)
aone = np.array(1)
ainf = np.array(np.inf)
self._assert_func(anan, anan)
assert_raises(AssertionError, lambda: self._assert_func(anan, aone))
assert_raises(AssertionError, lambda: self._assert_func(anan, ainf))
assert_raises(AssertionError, lambda: self._assert_func(ainf, anan))
| TestApproxEqual |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 132835,
"end": 133230
} | class ____(sgqlc.types.Input):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("field", "direction")
field = sgqlc.types.Field(
sgqlc.types.non_null(RepositoryOrderField), graphql_name="field"
)
direction = sgqlc.types.Field(
sgqlc.types.non_null(OrderDirection), graphql_name="direction"
)
| RepositoryOrder |
python | airbytehq__airbyte | airbyte-integrations/connectors/destination-sqlite/destination_sqlite/destination.py | {
"start": 435,
"end": 6359
} | class ____(Destination):
@staticmethod
def _get_destination_path(destination_path: str) -> str:
"""
Get a normalized version of the destination path.
Automatically append /local/ to the start of the path
"""
if not destination_path.startswith("/local"):
destination_path = os.path.join("/local", destination_path)
destination_path = os.path.normpath(destination_path)
if not destination_path.startswith("/local"):
raise ValueError(
f"destination_path={destination_path} is not a valid path." "A valid path shall start with /local or no / prefix"
)
return destination_path
def write(
self, config: Mapping[str, Any], configured_catalog: ConfiguredAirbyteCatalog, input_messages: Iterable[AirbyteMessage]
) -> Iterable[AirbyteMessage]:
"""
Reads the input stream of messages, config, and catalog to write data to the destination.
This method returns an iterable (typically a generator of AirbyteMessages via yield) containing state messages received
in the input message stream. Outputting a state message means that every AirbyteRecordMessage which came before it has been
successfully persisted to the destination. This is used to ensure fault tolerance in the case that a sync fails before fully completing,
then the source is given the last state message output from this method as the starting point of the next sync.
:param config: dict of JSON configuration matching the configuration declared in spec.json
:param configured_catalog: The Configured Catalog describing the schema of the data being received and how it should be persisted in the
destination
:param input_messages: The stream of input messages received from the source
:return: Iterable of AirbyteStateMessages wrapped in AirbyteMessage structs
"""
streams = {s.stream.name for s in configured_catalog.streams}
path = config.get("destination_path", "")
if path is None:
path = ""
path = self._get_destination_path(path)
con = sqlite3.connect(path)
with con:
# create the tables if needed
for configured_stream in configured_catalog.streams:
name = configured_stream.stream.name
table_name = f"_airbyte_raw_{name}"
if configured_stream.destination_sync_mode == DestinationSyncMode.overwrite:
# delete the tables
query = """
DROP TABLE IF EXISTS {}
""".format(table_name)
con.execute(query)
# create the table if needed
query = """
CREATE TABLE IF NOT EXISTS {table_name} (
_airbyte_ab_id TEXT PRIMARY KEY,
_airbyte_emitted_at TEXT,
_airbyte_data TEXT
)
""".format(table_name=table_name)
con.execute(query)
buffer = defaultdict(list)
for message in input_messages:
if message.type == Type.STATE:
# flush the buffer
for stream_name in buffer.keys():
query = """
INSERT INTO {table_name}
VALUES (?,?,?)
""".format(table_name=f"_airbyte_raw_{stream_name}")
con.executemany(query, buffer[stream_name])
con.commit()
buffer = defaultdict(list)
yield message
elif message.type == Type.RECORD:
data = message.record.data
stream = message.record.stream
if stream not in streams:
logger.debug(f"Stream {stream} was not present in configured streams, skipping")
continue
# add to buffer
buffer[stream].append((str(uuid.uuid4()), datetime.datetime.now().isoformat(), json.dumps(data)))
# flush any remaining messages
for stream_name in buffer.keys():
query = """
INSERT INTO {table_name}
VALUES (?,?,?)
""".format(table_name=f"_airbyte_raw_{stream_name}")
con.executemany(query, buffer[stream_name])
con.commit()
def check(self, logger: logging.Logger, config: Mapping[str, Any]) -> AirbyteConnectionStatus:
"""
Tests if the input configuration can be used to successfully connect to the destination with the needed permissions
e.g: if a provided API token or password can be used to connect and write to the destination.
:param logger: Logging object to display debug/info/error to the logs
(logs will not be accessible via airbyte UI if they are not passed to this logger)
:param config: Json object containing the configuration of this destination, content of this json is as specified in
the properties of the spec.json file
:return: AirbyteConnectionStatus indicating a Success or Failure
"""
try:
# parse the destination path
path = config.get("destination_path", "")
if path is None:
path = ""
path = self._get_destination_path(path)
os.makedirs(os.path.dirname(path), exist_ok=True)
con = sqlite3.connect(path)
con.execute("SELECT 1;")
return AirbyteConnectionStatus(status=Status.SUCCEEDED)
except Exception as e:
return AirbyteConnectionStatus(status=Status.FAILED, message=f"An exception occurred: {repr(e)}")
| DestinationSqlite |
python | walkccc__LeetCode | solutions/3496. Maximize Score After Pair Deletions/3496.py | {
"start": 0,
"end": 206
} | class ____:
def maxScore(self, nums: list[int]) -> int:
summ = sum(nums)
if len(nums) % 2 == 1:
return summ - min(nums)
return summ - min(a + b for a, b in itertools.pairwise(nums))
| Solution |
python | pytorch__pytorch | torch/fx/passes/reinplace.py | {
"start": 2219,
"end": 34556
} | class ____(torch.fx.Interpreter):
def run_node(self, node: Node):
self.node_counter += 1
result = super().run_node(node)
node.meta["fake_result"] = result
node.meta["node_idx"] = self.node_counter
# (1) Update metadata with the list of nodes that are used by this node
# copy_() doesn't read from its first argument; it writes to it, overwriting previous data.
# We don't want to treat it as "being used as an input".
node_args = node.args
if node.target is torch.ops.aten.copy_.default:
node_args = node_args[1:]
# (2) Update metadata to track aliasing information about view tensor nodes.
if node.op == "call_function":
view_type = _get_view_type(node.target)
if view_type == _ViewType.SingleOutputView:
assert isinstance(node.args[0], Node)
node.meta["view_of"] = node.args[0]
elif view_type == _ViewType.MultiOutputView:
self.multi_output_view_nodes[node] = node.args[0]
# Check if we returned a multi-output view,
# and we're now grabbing the individual views from the output.
#
# For multi-output views, we want to map each output view to the base,
# but this mapping involves two separate nodes in FX IR.
# e.g. "a, b = x_1.split(...)" becomes:
# %split_tensor : [num_users=2] = call_function[target=torch.ops.aten.split.Tensor](args = (%x_1, 2), kwargs = {})
# %getitem : [num_users=1] = call_function[target=operator.getitem](args = (%split_tensor, 0), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%split_tensor, 1), kwargs = {})
# And we'd like to set:
# getitem1.meta['view_of'] = x_1
elif node.target is _operator.getitem:
list_arg = node.args[0]
maybe_base_of_view = self.multi_output_view_nodes.get(list_arg, None)
if maybe_base_of_view is not None:
# Note: we could also track indexing info here for multi-output views.
# I don't think this metadata is strictly needed for de-functionalization.
assert isinstance(maybe_base_of_view, Node)
node.meta["view_of"] = maybe_base_of_view
if "view_of" in node.meta:
# We're linking the current node with its first argument as views.
# Assert here that this is actually the case, and their storages are the same.
assert isinstance(node.meta["fake_result"], FakeTensor)
assert isinstance(node.meta["view_of"].meta["fake_result"], FakeTensor)
view_storage = StorageWeakRef(node.meta["fake_result"]._typed_storage())
base_storage = StorageWeakRef(
node.meta["view_of"].meta["fake_result"]._typed_storage()
)
assert view_storage == base_storage
return result
def propagate(self, *args):
self.multi_output_view_nodes = {}
self.node_counter = -1
with FakeTensorMode() as mode:
fake_args = [
mode.from_tensor(a) if isinstance(a, torch.Tensor) else a for a in args
]
return super().run(*fake_args)
def _schemas_match(functional_schema, inplace_schema):
names_match = (
inplace_schema.name.endswith("_")
and inplace_schema.name[:-1] == functional_schema.name
)
arg_types_match = len(functional_schema.arguments) == len(
inplace_schema.arguments
) and all(
a1.type == a2.type
for a1, a2 in zip(functional_schema.arguments, inplace_schema.arguments)
)
# for the inplace op, its first argument should be mutable
assert (
inplace_schema.arguments[0].alias_info is not None
and inplace_schema.arguments[0].alias_info.is_write
)
# and its remaining arguments shouldn't be.
assert all(a.alias_info is None for a in inplace_schema.arguments[1:])
return names_match and arg_types_match
# TODO: this should be beefed up to be able to properly re-inplace with:
# - mutating ops (e.g. _fused_moving_avg_obs_fq_helper)
# - out= ops (e.g. angle -> angle.out)
# TODO: we should also figure this info out using torchgen.
def _maybe_get_inplace_op(op):
# __module__ seems broken; it returns torch._ops.aten which doesn't exist
if not isinstance(op, torch._ops.OpOverload):
return None
# Some view ops have inplace variants (as_strided_, etc),
# but we do NOT want the reinplacing pass to directly add these into the program.
# (they'll require extra special handling, aren't aren't really useful for perf anyway)
if _is_view_op(op):
return None
op_namespace = op.__module__.split(".")[-1]
op_base_name = op.overloadpacket.__name__
maybe_namespace_module = getattr(torch.ops, op_namespace)
maybe_inplace_op = (
None
if maybe_namespace_module is None
else getattr(maybe_namespace_module, f"{op_base_name}_", None)
)
if maybe_inplace_op is None:
return None
inplace_overloads = [
getattr(maybe_inplace_op, overload_name)
for overload_name in maybe_inplace_op.overloads()
]
inplace_overloads_with_matching_schemas = [
f for f in inplace_overloads if _schemas_match(op._schema, f._schema)
]
# Just because foo() and foo_() are both existing operators,
# They aren't guaranteed to have compatible schemas.
# For example, pow.Scalar(Scalar self, Tensor exponent) has no valid inplace variant,
# Even though several overloads of pow_ exist.
if len(inplace_overloads_with_matching_schemas) == 0:
return None
assert len(inplace_overloads_with_matching_schemas) == 1
inplace_op = inplace_overloads_with_matching_schemas[0]
return inplace_op
_VIEW_INVERSE_MAP: dict[Callable[..., Any], Callable[..., Any]] = {
torch.ops.aten.diagonal_scatter.default: torch.ops.aten.diagonal.default,
torch.ops.aten.select_scatter.default: torch.ops.aten.select.int,
torch.ops.aten.slice_scatter.default: torch.ops.aten.slice.Tensor,
torch.ops.aten.as_strided_scatter.default: torch.ops.aten.as_strided.default,
}
# This function, given a set of set of (aliased) tensor nodes,
# Returns any nodes in the graph that *use* any of the aliases, that occur *after* op_index
# in the node ordering.
def _get_all_later_node_usages(tensor_aliases: set[Node], op_index: int):
def _add_if_tensor(x, set_):
if isinstance(x, FakeTensor):
set_.add(StorageWeakRef(x._typed_storage()))
nodes_used_after = set()
for t in tensor_aliases:
# get all nodes that use the current alias
usage_nodes = t.users
for n in usage_nodes:
# We only care about usages after the current node
if "node_idx" not in n.meta or n.meta["node_idx"] <= op_index:
continue
# We also don't care about intermediate view ops.
# They only matter if their output is then used elsewhere
# (either in an out-of-place op, or as an output to the function).
if n in tensor_aliases:
if (
isinstance(n.target, torch._ops.OpOverload)
or n.target is _operator.getitem
):
continue
nodes_used_after.add(n)
return nodes_used_after
# Given an op that we're trying to re-inplace, "b = foo(a)",
# And given a {view}_scatter op that shows up later in the graph, "y = {view}_scatter(base, x, args...)"
# Then re-inplacing `foo()` would allow us to remove the `{view}_scatter` op entirely, IF:
# If there are any aliases in the alias_set(a) that satisfy:
# (1) The base of "alias", "alias_base", has the same size/stride/offset metadata as "base"
# (2) The output of running {view}(alias, args...) gives you the same size/stride/offset metadata
# as "alias"
def _get_view_inverse_node_usages(
later_node_usages: set[Node], self_aliases: set[Node]
) -> set[Node]:
def matching_view_metadata(a, b):
return (
a.size() == b.size()
and a.stride() == b.stride()
and a.storage_offset() == b.storage_offset()
)
view_inverse_nodes = set()
# Go through them in node order, so we can see chains of view_scatter ops.
for n in sorted(later_node_usages, key=lambda x: x.meta["node_idx"]):
if n.target not in _VIEW_INVERSE_MAP:
continue
base = n.args[0]
mutated_view = n.args[1]
assert isinstance(base, Node)
assert isinstance(base.meta["fake_result"], FakeTensor)
assert isinstance(mutated_view, Node)
assert isinstance(mutated_view.meta["fake_result"], FakeTensor)
assert not isinstance(n.target, str)
# Check that this view_inverse op actually corresponds to taking doing the inverse
# of one of our existing self_alias nodes.
original_view = _VIEW_INVERSE_MAP[n.target]
for self_alias in self_aliases:
# We're looking for some alias of the self arg, "alias",
# that was created from some op `alias = foo(base, args...)`
# such that the current _scatter op "inverts" that foo call.
# We can check that by running the original op again, and checking that the strides match.
if "view_of" not in self_alias.meta:
continue
self_alias_base = self_alias.meta["view_of"]
try:
# The we're trying to reuse the args from the view_scatter call inside of the corresponding
# view op, which might throw. This just indicates that view_scatter op isn't a valid inverse
# of the current alias we're looking at.
view_replay_metadata = original_view(
self_alias_base.meta["fake_result"], *n.args[2:], **n.kwargs
)
expected_metadata = self_alias.meta["fake_result"]
# If the alias and its base both have matching metadata, then this view_scatter op is valid to re-inplace.
if matching_view_metadata(
self_alias_base.meta["fake_result"], base.meta["fake_result"]
) and matching_view_metadata(view_replay_metadata, expected_metadata):
view_inverse_nodes.add(n)
except Exception:
continue
return view_inverse_nodes
@compatibility(is_backward_compatible=True)
def reinplace(gm, *sample_args):
"""
Given an fx.GraphModule, modifies it to perform "reinplacing",
mutating the nodes of the graph.
We look for out-of-place op call sites like `b = a.add(...)`,
and convert them to be inplace (`b = a.add_(...)`),
as long as the input to the current operator ("a") isn't reused
anywhere later in the graph.
This pass currently expects to operate on a **functional, ATen** graph.
This can be obtained by running `make_fx(functionalize(f))`.
Sample inputs are needed to determine aliasing relationships of the inputs.
In general, we can't reinplace node `b = a.add(...)` if "a" aliases any of the
inputs to the program.
Given a node "b = foo(a, args...) the algorithm for re-inplacing is as follows:
(1) Perform some initial checks on the metadata of "a" and "args..."
that can disqualify them from being reinplaced.
(1a) Check that the self argument we're attempting to reinplace
has acceptable dtype/size metadata to reinplace with.
For example, if we have:
a = torch.ones(1)
b = torch.ones(10)
out = torch.add(a, b)
We can't turn that into
a.add_(b)
Because that would require resizing "a".
Similarly, we can't convert torch.ge(a, b) into a.ge_(b),
because that would require changing a's dtype (from e.g. float32 to bool).
Note that in this specific example, we could technically do better..
If we see the pattern:
a_1 = a.ge(b)
a_2 = aten._to_copy(a_1, a.dtype)
Then we this should be valid to completely re-inplace
(this is exactly what functionalization will emit when it sees a.ge_(b)).
This optimization is only really important for user programs
that directly use inplace comparison ops though.
We also cannot re-inplace on tensors that have overlapping memory,
e.g. torch.ones(1).expand(4, 4).add_(1)
(1b) Check if "a" is an alias of any of the program inputs.
If it is, skip and move to the next node.
Inplace'ing an op that would cause it to mutate a program is not sound,
because that would be a side effect visible to the user.
NOTE: there's a future optimization that we should make:
if "a" is a (alias of a) program input, but later in the program
there is a node that looks like "a.copy_(...)",
Then re-inplacing is ok to do - we are temporarily reusing a's buffer,
which will later be overwritten by the copy_() call.
This will be an important optimization to have for programs that mutate
their inputs. It currently isn't implemented though.
(1c) Check if "a" and "args..." alias
For example, re-inplacing to create code like the below
isn't guaranteed to be sound:
aten.mul_(a, a)
(2) Check that "a" and all of its outstanding aliases are not used anywhere
later in the graph. If this is the case, then it's safe to re-inplace
to "b = foo_(a)".
There are a few caveats to this, explained in more detail below:
(a) If "a" is used later as an argument to a view op, that is okay.
It's only a problem if "a" (or that view) is later passed
into a normal operator, or if it is returned as the program output.
(b) If "a" is a repeat argument in `foo()`, then don't reinplace.
Most ATen kernels don't make any guarantees that this is sound,
e.g. if you do aten.mul_(a, a).
So we'll just ban re-inplacing in this case.
It's only a problem if "a" (or that view) is later passed
(c) If "a" is used as an input into a view "inverse" / "scatter"
operator, it is potentially fine to re-inplace
(and remove that scatter operator from the graph).
See below for a more detailed example.
NOTE: there is an optimization in this step that is crucial
to fully recovering performance from functionalization.
Given this program:
def f(x):
a = torch.ops.aten.add(x, x)
b = torch.ops.aten.diagonal(a)
torch.ops.aten.fill_(b, 0)
return d
Functionalization will emit the following:
def f(x):
a = torch.ops.aten.add(x, x)
b = torch.ops.aten.diagonal(a, 0, 1)
b_updated = torch.ops.aten.fill(b, 0)
a_updated = torch.ops.aten.diagonal_scatter(a, b_updated, 0, 1)
return a_updated
Ordinarily, we would not be able to reinplace the fill,
because "b" aliases with "a" which is used by the diagonal_scatter call.
"re-inplacing" is on the hook for figuring out that it is ok to
completely, the expensive diagonal_scatter call, if we re-inplace the add().
So, for every `alias in alias_set(a)`, instead of checking
that "alias" is not used anywhere later in the graph,
we check that
EITHER:
(a) alias is not used anywhere later in the graph
OR:
(b) alias is used exactly once later on in the graph,
in the following op:
out = foo_scatter(alias, x, args...)
where the following must hold:
(i) "foo_scatter" is the "inverse" operator for foo.
This only applies to "foo" ops that are view operators,
which view into a subset of the original tensor's memory.
In practice, there are ~4 operators where this applies:
diagonal -> diagonal_scatter
slice -> slice_scatter
select -> select_scatter
as_strided -> as_strided_scatter
(ii) "args..." are the same between the foo() and foo_scatter() calls.
(3) Perform the actual re-inplacing on foo!
(3b) is the common case, but special care is needed for {view}_scatter (3a)
(3a) {view}_scatter ops.
Consider this program:
a = torch.zeros(2, 2)
b = torch.ones(2)
a[0] = b
Post functionalization, that will look like:
a = torch.zeros(2)
b = torch.ones(1)
a_updated = torch.select_scatter(a, b, 0, 0)
In this case though, there is no "functional" op to re-inplace!
Instead, we'd like to directly remove toe select_scatter call.
We already know from (3) that this is valid,
because "a" has no later usages in the graph.
We perform the re-inplacing on the {view}_scatter op like so
Before:
a_updated = torch.select_scatter(a, b, args...)
After:
a_slice = a.select(a, args...)
a_slice.copy_(b)
(3b) Otherwise, replace the functional op with its inplace variant.
Before:
b = foo(a, args...)
After:
a.foo_(args...)
(4) Finally, after converting either:
Before:
b = foo(a)
After:
foo_(a)
or
Before:
b = {slice}_scatter(a, mutated_slice, args...)
After:
slice = {slice}(a, args...)
slice.copy_(mutated_slice)
We now need to find all later nodes that use "b" as an argument
and update them to take in "a" instead.
Note that for the majority of inplace ops, this isn't actually necessary
(because most inplace ops return "self" as their output).
This isn't generally true for all mutable ops though, which is why
we need to actually replace all of the arguments.
We also need to update our metadata of Dict[StorageWeakRef, Set[Node]],
That maps a given tensor storage to the set of all nodes that take in that storage
as an input.
Specifically, re-inplacing `b = foo(a)` causes "a" and "b"'s sets to get fused
together.
(5) Any "view_inverse/scatter" nodes that were identified as "it's ok to ignore them"
during step (3) get manually deleted from the graph.
Their outputs are no longer used, so technically standard DCE would be able
to do this, but we can no longer run FX's DCE pass now that we have mutable
ops in the graph.
"""
_FunctionalizationMetadataProp(gm).propagate(*sample_args)
# Useful debug printing
# def _print(x):
# if isinstance(x, FakeTensor):
# print(f'fake_result: {StorageWeakRef(x._typed_storage()).cdata}')
# for n in gm.graph.nodes:
# print(n.format_node())
# if hasattr(n, 'meta'):
# print(f'node_idx: {n.meta["node_idx"]}')
# if 'fake_result' in n.meta:
# tree_map(_print, n.meta['fake_result'])
# if 'view_of' in n.meta:
# print(f'view_of: {str(n.meta["view_of"])}')
# print()
# We need to know which nodes correspond to inputs (or their aliases)
# so we know not to re-inplace them.
# NOTE: later, we'll need to add an optimization for fully recovering performance
# on programs that mutate inputs.
input_storages = {
StorageWeakRef(node.meta["fake_result"]._typed_storage())
for node in gm.graph.nodes
if (
node.op == "placeholder"
and isinstance(node.meta["fake_result"], torch.Tensor)
)
}
# We also need to know for a given node, what are all of its aliasing nodes.
storage_to_nodes: dict[StorageWeakRef, set[Node]] = defaultdict(set)
for n in gm.graph.nodes:
if "fake_result" in n.meta:
# Tree-mapping because some ops can return lists of tensors.
def _add_to_map(x):
if isinstance(x, FakeTensor):
storage_to_nodes[StorageWeakRef(x._typed_storage())].add(n)
pytree.tree_map_(_add_to_map, n.meta["fake_result"])
# inplace-ify functional ops, subject to the constraints written below.
all_later_view_inverse_nodes_to_delete = set()
for node in gm.graph.nodes:
if node.op == "call_function":
# Today, the re-inplace pass on directly acts on:
# - functional ops with an inplace variant
# - {view}_scatter ops that can be potentially removed from the graph.
# Both of these ops take in tensor first args, so filtering on this condition
# makes the later code simpler.
# We should revisit this at some point though, particularly when we also want
# the reinplacer to be able to handle out= and mutable operators
# and tensorlist first args (like `_foreach_` ops).
if not isinstance(node.target, torch._ops.OpOverload):
continue
if len(node.target._schema.arguments) < 1:
continue
if type(node.target._schema.arguments[0].type) is not torch.TensorType:
continue
# Step 1a: Check that the self argument we're attempting to reinplace
# has the same size/stride as the output.
# For example, we shouldn't try to reinplace torch.add(scalar_tensor, larger_tensor)
# As it would require resizing scalar_tensor.
# (We could potentially swizzle this into larger_tensor.add_(scalar_tensor),
# this is probably an optimization to revisit later).
self_arg = node.args[0]
self_flattened = pytree.tree_leaves(self_arg.meta["fake_result"])
node_flattened = pytree.tree_leaves(node.meta["fake_result"])
self_has_wrong_metadata = False
if len(self_flattened) == len(node_flattened):
for self_meta, node_meta in zip(self_flattened, node_flattened):
if self_meta.numel() != node_meta.numel():
self_has_wrong_metadata = True
if self_meta.dtype != node_meta.dtype:
self_has_wrong_metadata = True
# We also cannot re-inplace on tensors that have internal memory overlap.
# e.g. torch.ones(1).expand(4, 4).add_(1)
if torch._debug_has_internal_overlap(self_meta) == 1:
self_has_wrong_metadata = True
# Here, we (optimistically) assume that a.resize(b) is valid to re-inplace,
# Since users should never really be calling the functional "torch.ops.aten.resize"
# op directly in their programs.
if self_has_wrong_metadata and node.target != torch.ops.aten.resize.default:
continue
# Step 1b: ensure that the op we're trying to re-inplace isn't a program input
self_arg_storage = StorageWeakRef(
self_arg.meta["fake_result"]._typed_storage()
)
if self_arg_storage in input_storages:
# TODO: later, add the optimization for handling `copy_()` calls in the graph.
continue
if len([x for x in node.args if x is self_arg]) > 1:
# Step 1c:
# Calling stuff like aten.mul_(a, a) isn't guaranteed to be sound,
# so we prevent re-inplacing in this case.
continue
self_arg_storage = StorageWeakRef(
self_arg.meta["fake_result"]._typed_storage()
)
self_aliases = storage_to_nodes[self_arg_storage]
# First, we find all later usages of any of the aliases of self_arg.
later_node_usages = _get_all_later_node_usages(
self_aliases, node.meta["node_idx"]
)
# Then, we check if any of those later usages are actually view_scatter ops
# that are safe to fully remove.
later_view_inverse_node_usages = _get_view_inverse_node_usages(
later_node_usages, self_aliases
)
# Step 2: Check to see if the input to the op is reused later in the graph.
# If not (same goes for its aliases), then this op is safe to re-in place.
# This is a slightly roundabout way to check that there are no later usages of the current self argument.
# (later_view_inverse_node_usages corresponds to "view_scatter" nodes that we are allowed to delete)
can_reinplace = len(later_node_usages - later_view_inverse_node_usages) == 0
if not can_reinplace:
continue
# Step 3a: Special handling for when we see *_scatter operators.
# When we see an operator like `b = torch.slice_scatter(a, ...)`,
# instead of trying to "inplace" it into a.slice_scatter_(..._),
# we would prefer to remove it from the graph entirely,
# and instead copy_() the slice directly into the larger tensor.
# See the description of the algorithm for a full example.
if (
node.target in _VIEW_INVERSE_MAP
and node not in all_later_view_inverse_nodes_to_delete
):
view_op = _VIEW_INVERSE_MAP[node.target]
# Before:
# base_updated = torch.ops.aten.slice_scatter.default(base, mutated_slice, args...)
# After:
# slice = torch.ops.aten.slice.default(base, args...)
# slice.copy_(mutated_slice)
with gm.graph.inserting_before(node):
mutated_slice_node = node.args[1]
remaining_slice_args = node.args[2:]
slice_node = gm.graph.create_node(
"call_function",
view_op,
(self_arg,) + tuple(remaining_slice_args),
node.kwargs,
)
gm.graph.create_node(
"call_function",
torch.ops.aten.copy_.default,
(
slice_node,
mutated_slice_node,
),
{},
)
# Add the slice_scatter node to our "nodes to delete" list.
all_later_view_inverse_nodes_to_delete.add(node)
else:
# Step 3b: Check to see if this operator has an inplace variant.
maybe_inplace_op = _maybe_get_inplace_op(node.target)
if maybe_inplace_op is None:
continue
# And if so, replace it with its inplace variant.
node.target = maybe_inplace_op
# At this point, 'storage_to_nodes' will be stale.
# Now that we're inplacing `b = foo(a)`, we need to effectively
# union together the dict values for b and a's storage.
# Hmm... morally I think we also want to keep the `fake_result` metadata
# up to date here, but I'm not sure how easy it is to do.
# Maybe it's fine to wait until the end of the pass to update it.
curr_node_storage = StorageWeakRef(
node.meta["fake_result"]._typed_storage()
)
storage_to_nodes[self_arg_storage].update(
storage_to_nodes[curr_node_storage]
)
storage_to_nodes[curr_node_storage].update(
storage_to_nodes[self_arg_storage]
)
# Need to remember the view_scatter view nodes we found so we can remove them alter.
all_later_view_inverse_nodes_to_delete.update(
later_view_inverse_node_usages
)
# Step 4:
# Now that we've replaced b = a.foo() with a.foo_(),
# We need to replace any later usages of "b" with "a"
for old in itertools.chain([node], later_view_inverse_node_usages):
new = old.args[0]
nodes_to_update = [
n for n in old.users if n.meta["node_idx"] > node.meta["node_idx"]
]
for node_to_update in nodes_to_update:
def replace_arg(a):
if a == old:
return new
return a
# First, replace usages of "b" with "a"
node_to_update.args = tree_map_only(
Node, replace_arg, node_to_update.args
)
node_to_update.kwargs = tree_map_only(
Node, replace_arg, node_to_update.kwargs
)
# Second, update our storage_to_nodes data structure.
old_flattened_res = pytree.tree_leaves(old.meta["fake_result"])
node_flattened_res = pytree.tree_leaves(
node_to_update.meta["fake_result"]
)
old_res_storage = {
StorageWeakRef(x._typed_storage())
for x in old_flattened_res
if isinstance(x, FakeTensor)
}
node_res_storage = {
StorageWeakRef(x._typed_storage())
for x in node_flattened_res
if isinstance(x, FakeTensor)
}
# This will happen if we're updating a view op, e.g.
# e.g. replacing
# x = view(old)
# x = view(new)
# When that happens, we need to make sure to keep our
# storage mapping up to date.
#
# We're checking for len(...) == 1 here because all view ops are guaranteed to return either a single tensor,
# or multiple tensors that all share the same storage.
# We can't just check equality because we might encounter FX nodes that return zero tensor outputs.
if (
len(old_res_storage) == 1
and len(node_res_storage) == 1
and old_res_storage == node_res_storage
):
new_flattened_res = pytree.tree_leaves(new.meta["fake_result"])
new_res_storage = {
StorageWeakRef(x._typed_storage())
for x in new_flattened_res
if isinstance(x, FakeTensor)
}
assert len(new_res_storage) == 1
(new_ref,) = new_res_storage
(node_ref,) = node_res_storage
# Technically, "old_ref" and all its aliases will remain
# in our mapping.
# That should be fine though, since we deleted "old"
# from the graph at this point.
storage_to_nodes[node_ref].update(storage_to_nodes[new_ref])
storage_to_nodes[new_ref].update(storage_to_nodes[node_ref])
# Step 4: delete any _scatter nodes that we de-functionalized
# Need to take care not to delete any of these nodes until after *all* modifications
# to the graph are finished.
for to_delete in all_later_view_inverse_nodes_to_delete:
gm.graph.erase_node(to_delete)
gm.recompile()
return gm
| _FunctionalizationMetadataProp |
python | scikit-learn__scikit-learn | sklearn/gaussian_process/kernels.py | {
"start": 68393,
"end": 73959
} | class ____(StationaryKernelMixin, NormalizedKernelMixin, Kernel):
r"""Exp-Sine-Squared kernel (aka periodic kernel).
The ExpSineSquared kernel allows one to model functions which repeat
themselves exactly. It is parameterized by a length scale
parameter :math:`l>0` and a periodicity parameter :math:`p>0`.
Only the isotropic variant where :math:`l` is a scalar is
supported at the moment. The kernel is given by:
.. math::
k(x_i, x_j) = \text{exp}\left(-
\frac{ 2\sin^2(\pi d(x_i, x_j)/p) }{ l^ 2} \right)
where :math:`l` is the length scale of the kernel, :math:`p` the
periodicity of the kernel and :math:`d(\cdot,\cdot)` is the
Euclidean distance.
Read more in the :ref:`User Guide <gp_kernels>`.
.. versionadded:: 0.18
Parameters
----------
length_scale : float > 0, default=1.0
The length scale of the kernel.
periodicity : float > 0, default=1.0
The periodicity of the kernel.
length_scale_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5)
The lower and upper bound on 'length_scale'.
If set to "fixed", 'length_scale' cannot be changed during
hyperparameter tuning.
periodicity_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5)
The lower and upper bound on 'periodicity'.
If set to "fixed", 'periodicity' cannot be changed during
hyperparameter tuning.
Examples
--------
>>> from sklearn.datasets import make_friedman2
>>> from sklearn.gaussian_process import GaussianProcessRegressor
>>> from sklearn.gaussian_process.kernels import ExpSineSquared
>>> X, y = make_friedman2(n_samples=50, noise=0, random_state=0)
>>> kernel = ExpSineSquared(length_scale=1, periodicity=1)
>>> gpr = GaussianProcessRegressor(kernel=kernel, alpha=5,
... random_state=0).fit(X, y)
>>> gpr.score(X, y)
0.0144
>>> gpr.predict(X[:2,:], return_std=True)
(array([425.6, 457.5]), array([0.3894, 0.3467]))
"""
def __init__(
self,
length_scale=1.0,
periodicity=1.0,
length_scale_bounds=(1e-5, 1e5),
periodicity_bounds=(1e-5, 1e5),
):
self.length_scale = length_scale
self.periodicity = periodicity
self.length_scale_bounds = length_scale_bounds
self.periodicity_bounds = periodicity_bounds
@property
def hyperparameter_length_scale(self):
"""Returns the length scale"""
return Hyperparameter("length_scale", "numeric", self.length_scale_bounds)
@property
def hyperparameter_periodicity(self):
return Hyperparameter("periodicity", "numeric", self.periodicity_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : ndarray of shape (n_samples_Y, n_features), default=None
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool, default=False
Determines whether the gradient with respect to the log of
the kernel hyperparameter is computed.
Only supported when Y is None.
Returns
-------
K : ndarray of shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims), \
optional
The gradient of the kernel k(X, X) with respect to the log of the
hyperparameter of the kernel. Only returned when `eval_gradient`
is True.
"""
X = np.atleast_2d(X)
if Y is None:
dists = squareform(pdist(X, metric="euclidean"))
arg = np.pi * dists / self.periodicity
sin_of_arg = np.sin(arg)
K = np.exp(-2 * (sin_of_arg / self.length_scale) ** 2)
else:
if eval_gradient:
raise ValueError("Gradient can only be evaluated when Y is None.")
dists = cdist(X, Y, metric="euclidean")
K = np.exp(
-2 * (np.sin(np.pi / self.periodicity * dists) / self.length_scale) ** 2
)
if eval_gradient:
cos_of_arg = np.cos(arg)
# gradient with respect to length_scale
if not self.hyperparameter_length_scale.fixed:
length_scale_gradient = 4 / self.length_scale**2 * sin_of_arg**2 * K
length_scale_gradient = length_scale_gradient[:, :, np.newaxis]
else: # length_scale is kept fixed
length_scale_gradient = np.empty((K.shape[0], K.shape[1], 0))
# gradient with respect to p
if not self.hyperparameter_periodicity.fixed:
periodicity_gradient = (
4 * arg / self.length_scale**2 * cos_of_arg * sin_of_arg * K
)
periodicity_gradient = periodicity_gradient[:, :, np.newaxis]
else: # p is kept fixed
periodicity_gradient = np.empty((K.shape[0], K.shape[1], 0))
return K, np.dstack((length_scale_gradient, periodicity_gradient))
else:
return K
def __repr__(self):
return "{0}(length_scale={1:.3g}, periodicity={2:.3g})".format(
self.__class__.__name__, self.length_scale, self.periodicity
)
| ExpSineSquared |
python | rapidsai__cudf | python/cudf/cudf/core/udf/groupby_typing.py | {
"start": 9819,
"end": 10046
} | class ____(AttributeTemplate):
def resolve(self, value, attr):
raise UDFError(
f"JIT GroupBy.apply() does not support DataFrame.{attr}(). "
)
@cuda_registry.register_attr
| DataFrameAttributeTemplate |
python | dask__dask | dask/delayed.py | {
"start": 22362,
"end": 27692
} | class ____(DaskMethodsMixin, OperatorMethodMixin):
"""Represents a value to be computed by dask.
Equivalent to the output from a single key in a dask graph.
"""
__slots__ = ("_key", "_dask", "_length", "_layer")
def __init__(self, key, dsk, length=None, layer=None):
self._key = key
self._dask = dsk
self._length = length
# NOTE: Layer is used by `to_delayed` in other collections, but not in normal Delayed use
self._layer = layer or key
if isinstance(dsk, HighLevelGraph) and self._layer not in dsk.layers:
raise ValueError(
f"Layer {self._layer} not in the HighLevelGraph's layers: {list(dsk.layers)}"
)
@property
def key(self):
return self._key
@property
def dask(self):
return self._dask
def __dask_graph__(self) -> Graph:
return self.dask
def __dask_keys__(self) -> NestedKeys:
return [self.key]
def __dask_layers__(self) -> Sequence[str]:
return (self._layer,)
def __dask_tokenize__(self):
return self.key
__dask_scheduler__ = staticmethod(DEFAULT_GET)
__dask_optimize__ = globalmethod(optimize, key="delayed_optimize")
def __dask_postcompute__(self):
return single_key, ()
def __dask_postpersist__(self):
return self._rebuild, ()
def _rebuild(self, dsk, *, rename=None):
key = replace_name_in_key(self.key, rename) if rename else self.key
if isinstance(dsk, HighLevelGraph) and len(dsk.layers) == 1:
# FIXME Delayed is currently the only collection type that supports both high- and low-level graphs.
# The HLG output of `optimize` will have a layer name that doesn't match `key`.
# Remove this when Delayed is HLG-only (because `optimize` will only be passed HLGs, so it won't have
# to generate random layer names).
layer = next(iter(dsk.layers))
else:
layer = None
return Delayed(key, dsk, self._length, layer=layer)
def __repr__(self):
return f"Delayed({self.key!r})"
def __hash__(self):
return hash(self.key)
def __dir__(self):
return dir(type(self))
def __getattr__(self, attr):
if attr.startswith("_"):
raise AttributeError(f"Attribute {attr} not found")
if attr == "visualise":
# added to warn users in case of spelling error
# for more details: https://github.com/dask/dask/issues/5721
warnings.warn(
"dask.delayed objects have no `visualise` method. "
"Perhaps you meant `visualize`?"
)
return DelayedAttr(self, attr)
def __setattr__(self, attr, val):
try:
object.__setattr__(self, attr, val)
except AttributeError:
# attr is neither in type(self).__slots__ nor in the __slots__ of any of its
# parent classes, and all the parent classes define __slots__ too.
# This last bit needs to be unit tested: if any of the parent classes omit
# the __slots__ declaration, self will gain a __dict__ and this branch will
# become unreachable.
raise TypeError("Delayed objects are immutable")
def __setitem__(self, index, val):
raise TypeError("Delayed objects are immutable")
def __iter__(self):
if self._length is None:
raise TypeError("Delayed objects of unspecified length are not iterable")
for i in range(self._length):
yield self[i]
def __len__(self):
if self._length is None:
raise TypeError("Delayed objects of unspecified length have no len()")
return self._length
def __call__(self, *args, pure=None, dask_key_name=None, **kwargs):
func = delayed(apply, pure=pure)
if dask_key_name is not None:
return func(self, args, kwargs, dask_key_name=dask_key_name)
return func(self, args, kwargs)
def __bool__(self):
raise TypeError("Truth of Delayed objects is not supported")
__nonzero__ = __bool__
def __get__(self, instance, cls):
if instance is None:
return self
return types.MethodType(self, instance)
@classmethod
def _get_binary_operator(cls, op, inv=False):
method = delayed(right(op) if inv else op, pure=True)
return lambda *args, **kwargs: method(*args, **kwargs)
_get_unary_operator = _get_binary_operator
def call_function(func, func_token, args, kwargs, pure=None, nout=None):
dask_key_name = kwargs.pop("dask_key_name", None)
pure = kwargs.pop("pure", pure)
if dask_key_name is None:
name = f"{funcname(func)}-{tokenize(func_token, *args, pure=pure, **kwargs)}"
else:
name = dask_key_name
args2, collections = unzip(map(unpack_collections, args), 2)
collections = list(concat(collections))
dask_kwargs, collections2 = unpack_collections(kwargs)
collections.extend(collections2)
task = Task(name, func, *args2, **dask_kwargs)
graph = HighLevelGraph.from_collections(
name, {name: task}, dependencies=collections
)
nout = nout if nout is not None else None
return Delayed(name, graph, length=nout)
| Delayed |
python | psf__black | tests/data/cases/comments2.py | {
"start": 3484,
"end": 7332
} | class ____:
def _init_host(self, parsed) -> None:
if (parsed.hostname is None or # type: ignore
not parsed.hostname.strip()):
pass
#######################
### SECTION COMMENT ###
#######################
instruction()#comment with bad spacing
# END COMMENTS
# MORE END COMMENTS
# output
from com.my_lovely_company.my_lovely_team.my_lovely_project.my_lovely_component import (
MyLovelyCompanyTeamProjectComponent, # NOT DRY
)
from com.my_lovely_company.my_lovely_team.my_lovely_project.my_lovely_component import (
MyLovelyCompanyTeamProjectComponent as component, # DRY
)
# Please keep __all__ alphabetized within each category.
__all__ = [
# Super-special typing primitives.
"Any",
"Callable",
"ClassVar",
# ABCs (from collections.abc).
"AbstractSet", # collections.abc.Set.
"ByteString",
"Container",
# Concrete collection types.
"Counter",
"Deque",
"Dict",
"DefaultDict",
"List",
"Set",
"FrozenSet",
"NamedTuple", # Not really a type.
"Generator",
]
not_shareables = [
# singletons
True,
False,
NotImplemented,
...,
# builtin types and objects
type,
object,
object(),
Exception(),
42,
100.0,
"spam",
# user-defined types and objects
Cheese,
Cheese("Wensleydale"),
SubBytes(b"spam"),
]
if "PYTHON" in os.environ:
add_compiler(compiler_from_env())
else:
# for compiler in compilers.values():
# add_compiler(compiler)
add_compiler(compilers[(7.0, 32)])
# add_compiler(compilers[(7.1, 64)])
# Comment before function.
def inline_comments_in_brackets_ruin_everything():
if typedargslist:
parameters.children = [children[0], body, children[-1]] # (1 # )1
parameters.children = [
children[0],
body,
children[-1], # type: ignore
]
else:
parameters.children = [
parameters.children[0], # (2 what if this was actually long
body,
parameters.children[-1], # )2
]
parameters.children = [parameters.what_if_this_was_actually_long.children[0], body, parameters.children[-1]] # type: ignore
if (
self._proc is not None
# has the child process finished?
and self._returncode is None
# the child process has finished, but the
# transport hasn't been notified yet?
and self._proc.poll() is None
):
pass
# no newline before or after
short = [
# one
1,
# two
2,
]
# no newline after
call(
arg1,
arg2,
"""
short
""",
arg3=True,
)
############################################################################
call2(
# short
arg1,
# but
arg2,
# multiline
"""
short
""",
# yup
arg3=True,
)
lcomp = [
element for element in collection if element is not None # yup # yup # right
]
lcomp2 = [
# hello
element
# yup
for element in collection
# right
if element is not None
]
lcomp3 = [
# This one is actually too long to fit in a single line.
element.split("\n", 1)[0]
# yup
for element in collection.select_elements()
# right
if element is not None
]
while True:
if False:
continue
# and round and round we go
# and round and round we go
# let's return
return Node(
syms.simple_stmt,
[Node(statement, result), Leaf(token.NEWLINE, "\n")], # FIXME: \r\n?
)
CONFIG_FILES = (
[
CONFIG_FILE,
]
+ SHARED_CONFIG_FILES
+ USER_CONFIG_FILES
) # type: Final
| Test |
python | python-excel__xlrd | xlrd/xldate.py | {
"start": 1415,
"end": 1497
} | class ____(XLDateError):
"``datemode`` arg is neither 0 nor 1"
| XLDateBadDatemode |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 753617,
"end": 761076
} | class ____(
MarkPropDefnumberArray, NumericArrayMarkPropDef
):
"""
FieldOrDatumDefWithConditionDatumDefnumberArray schema wrapper.
Parameters
----------
bandPosition : float
Relative position on a band of a stacked, binned, time unit, or band scale. For
example, the marks will be positioned at the beginning of the band if set to ``0``,
and at the middle of the band if set to ``0.5``.
condition : dict, :class:`ConditionalValueDefnumberArrayExprRef`, :class:`ConditionalParameterValueDefnumberArrayExprRef`, :class:`ConditionalPredicateValueDefnumberArrayExprRef`, Sequence[dict, :class:`ConditionalValueDefnumberArrayExprRef`, :class:`ConditionalParameterValueDefnumberArrayExprRef`, :class:`ConditionalPredicateValueDefnumberArrayExprRef`]
One or more value definition(s) with `a parameter or a test predicate
<https://vega.github.io/vega-lite/docs/condition.html>`__.
**Note:** A field definition's ``condition`` property can only contain `conditional
value definitions <https://vega.github.io/vega-lite/docs/condition.html#value>`__
since Vega-Lite only allows at most one encoded field per encoding channel.
datum : str, bool, dict, float, :class:`ExprRef`, :class:`DateTime`, :class:`RepeatRef`, :class:`PrimitiveValue`, None
A constant value in data domain.
title : str, :class:`Text`, Sequence[str], None
A title for the field. If ``null``, the title will be removed.
**Default value:** derived from the field's name and transformation function
(``aggregate``, ``bin`` and ``timeUnit``). If the field has an aggregate function,
the function is displayed as part of the title (e.g., ``"Sum of Profit"``). If the
field is binned or has a time unit applied, the applied function is shown in
parentheses (e.g., ``"Profit (binned)"``, ``"Transaction Date (year-month)"``).
Otherwise, the title is simply the field name.
**Notes**:
1) You can customize the default field title format by providing the `fieldTitle
<https://vega.github.io/vega-lite/docs/config.html#top-level-config>`__ property in
the `config <https://vega.github.io/vega-lite/docs/config.html>`__ or `fieldTitle
function via the compile function's options
<https://vega.github.io/vega-lite/usage/compile.html#field-title>`__.
2) If both field definition's ``title`` and axis, header, or legend ``title`` are
defined, axis/header/legend title will be used.
type : :class:`Type`, Literal['quantitative', 'ordinal', 'temporal', 'nominal', 'geojson']
The type of measurement (``"quantitative"``, ``"temporal"``, ``"ordinal"``, or
``"nominal"``) for the encoded field or constant value (``datum``). It can also be a
``"geojson"`` type for encoding `'geoshape'
<https://vega.github.io/vega-lite/docs/geoshape.html>`__.
Vega-Lite automatically infers data types in many cases as discussed below. However,
type is required for a field if: (1) the field is not nominal and the field encoding
has no specified ``aggregate`` (except ``argmin`` and ``argmax``), ``bin``, scale
type, custom ``sort`` order, nor ``timeUnit`` or (2) if you wish to use an ordinal
scale for a field with ``bin`` or ``timeUnit``.
**Default value:**
1) For a data ``field``, ``"nominal"`` is the default data type unless the field
encoding has ``aggregate``, ``channel``, ``bin``, scale type, ``sort``, or
``timeUnit`` that satisfies the following criteria:
* ``"quantitative"`` is the default type if (1) the encoded field contains ``bin``
or ``aggregate`` except ``"argmin"`` and ``"argmax"``, (2) the encoding channel is
``latitude`` or ``longitude`` channel or (3) if the specified scale type is `a
quantitative scale <https://vega.github.io/vega-lite/docs/scale.html#type>`__.
* ``"temporal"`` is the default type if (1) the encoded field contains ``timeUnit``
or (2) the specified scale type is a time or utc scale
* ``"ordinal"`` is the default type if (1) the encoded field contains a `custom sort
order
<https://vega.github.io/vega-lite/docs/sort.html#specifying-custom-sort-order>`__,
(2) the specified scale type is an ordinal/point/band scale, or (3) the encoding
channel is ``order``.
2) For a constant value in data domain (``datum``):
* ``"quantitative"`` if the datum is a number
* ``"nominal"`` if the datum is a string
* ``"temporal"`` if the datum is `a date time object
<https://vega.github.io/vega-lite/docs/datetime.html>`__
**Note:**
* Data ``type`` describes the semantics of the data rather than the primitive data
types (number, string, etc.). The same primitive data type can have different
types of measurement. For example, numeric data can represent quantitative,
ordinal, or nominal data.
* Data values for a temporal field can be either a date-time string (e.g.,
``"2015-03-07 12:32:17"``, ``"17:01"``, ``"2015-03-16"``. ``"2015"``) or a
timestamp number (e.g., ``1552199579097``).
* When using with `bin <https://vega.github.io/vega-lite/docs/bin.html>`__, the
``type`` property can be either ``"quantitative"`` (for using a linear bin scale)
or `"ordinal" (for using an ordinal bin scale)
<https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `timeUnit
<https://vega.github.io/vega-lite/docs/timeunit.html>`__, the ``type`` property
can be either ``"temporal"`` (default, for using a temporal scale) or `"ordinal"
(for using an ordinal scale)
<https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `aggregate
<https://vega.github.io/vega-lite/docs/aggregate.html>`__, the ``type`` property
refers to the post-aggregation data type. For example, we can calculate count
``distinct`` of a categorical field ``"cat"`` using ``{"aggregate": "distinct",
"field": "cat"}``. The ``"type"`` of the aggregate output is ``"quantitative"``.
* Secondary channels (e.g., ``x2``, ``y2``, ``xError``, ``yError``) do not have
``type`` as they must have exactly the same type as their primary channels (e.g.,
``x``, ``y``).
**See also:** `type <https://vega.github.io/vega-lite/docs/type.html>`__
documentation.
"""
_schema = {"$ref": "#/definitions/FieldOrDatumDefWithCondition<DatumDef,number[]>"}
def __init__(
self,
bandPosition: Optional[float] = Undefined,
condition: Optional[SchemaBase | Sequence[SchemaBase | Map] | Map] = Undefined,
datum: Optional[
Temporal | Parameter | SchemaBase | Map | PrimitiveValue_T
] = Undefined,
title: Optional[str | SchemaBase | Sequence[str] | None] = Undefined,
type: Optional[SchemaBase | Type_T] = Undefined,
**kwds,
):
super().__init__(
bandPosition=bandPosition,
condition=condition,
datum=datum,
title=title,
type=type,
**kwds,
)
| FieldOrDatumDefWithConditionDatumDefnumberArray |
python | getsentry__sentry | tests/sentry/notifications/models/test_notificationsettingprovider.py | {
"start": 576,
"end": 2677
} | class ____(TestCase):
def test_remove_for_user(self) -> None:
NotificationSettingProvider.objects.create(
user_id=self.user.id,
scope_type="user",
scope_identifier=self.user.id,
type="alerts",
value="never",
provider="slack",
)
# Refresh user for actor
self.user = User.objects.get(id=self.user.id)
# Deletion is deferred and tasks aren't run in tests.
with outbox_runner():
self.user.delete()
assert_no_notification_settings()
def test_remove_for_team(self) -> None:
NotificationSettingProvider.objects.create(
team_id=self.team.id,
scope_type="team",
scope_identifier=self.team.id,
type="alerts",
value="never",
provider="slack",
)
# Deletion is deferred and tasks aren't run in tests.
with assume_test_silo_mode(SiloMode.REGION), outbox_runner():
self.team.delete()
with self.tasks():
schedule_hybrid_cloud_foreign_key_jobs_control()
assert_no_notification_settings()
def test_remove_for_project(self) -> None:
NotificationSettingProvider.objects.create(
user_id=self.user.id,
scope_type="project",
scope_identifier=self.project.id,
type="alerts",
value="never",
provider="slack",
)
with assume_test_silo_mode(SiloMode.REGION):
self.project.delete()
assert_no_notification_settings()
def test_remove_for_organization(self) -> None:
NotificationSettingProvider.objects.create(
user_id=self.user.id,
scope_type="organization",
scope_identifier=self.organization.id,
type="alerts",
value="never",
provider="slack",
)
with assume_test_silo_mode(SiloMode.REGION), outbox_runner():
self.organization.delete()
assert_no_notification_settings()
| NotificationSettingTest |
python | huggingface__transformers | src/transformers/models/owlv2/modeling_owlv2.py | {
"start": 17982,
"end": 22957
} | class ____(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config):
super().__init__()
self.config = config
self.embed_dim = config.hidden_size
self.num_heads = config.num_attention_heads
self.head_dim = self.embed_dim // self.num_heads
if self.head_dim * self.num_heads != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
f" {self.num_heads})."
)
self.scale = self.head_dim**-0.5
self.dropout = config.attention_dropout
self.k_proj = nn.Linear(self.embed_dim, self.embed_dim)
self.v_proj = nn.Linear(self.embed_dim, self.embed_dim)
self.q_proj = nn.Linear(self.embed_dim, self.embed_dim)
self.out_proj = nn.Linear(self.embed_dim, self.embed_dim)
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
causal_attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = False,
) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
bsz, tgt_len, embed_dim = hidden_states.size()
# get query proj
query_states = self.q_proj(hidden_states) * self.scale
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
proj_shape = (bsz * self.num_heads, -1, self.head_dim)
query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
key_states = key_states.view(*proj_shape)
value_states = value_states.view(*proj_shape)
src_len = key_states.size(1)
attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
raise ValueError(
f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
f" {attn_weights.size()}"
)
# apply the causal_attention_mask first
if causal_attention_mask is not None:
if causal_attention_mask.size() != (bsz, 1, tgt_len, src_len):
raise ValueError(
f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is"
f" {causal_attention_mask.size()}"
)
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + causal_attention_mask
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
if attention_mask is not None:
if attention_mask.size() != (bsz, 1, tgt_len, src_len):
raise ValueError(
f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
)
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
if output_attentions:
# this operation is a bit awkward, but it's required to
# make sure that attn_weights keeps its gradient.
# In order to do so, attn_weights have to reshaped
# twice and have to be reused in the following
attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
else:
attn_weights_reshaped = None
attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
# For int8 compatibility, sometimes the `attn_probs` are in `fp32`
attn_probs = attn_probs.to(value_states.dtype)
attn_output = torch.bmm(attn_probs, value_states)
if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
raise ValueError(
f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
f" {attn_output.size()}"
)
attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
attn_output = attn_output.transpose(1, 2)
attn_output = attn_output.reshape(bsz, tgt_len, embed_dim)
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights_reshaped
# Copied from transformers.models.clip.modeling_clip.CLIPMLP with CLIP->Owlv2
| Owlv2Attention |
python | pydata__xarray | xarray/coding/frequencies.py | {
"start": 4004,
"end": 9380
} | class ____: # (pd.tseries.frequencies._FrequencyInferer):
def __init__(self, index):
self.index = index
self.values = index.asi8
if len(index) < 3:
raise ValueError("Need at least 3 dates to infer frequency")
self.is_monotonic = (
self.index.is_monotonic_decreasing or self.index.is_monotonic_increasing
)
self._deltas = None
self._year_deltas = None
self._month_deltas = None
def get_freq(self):
"""Find the appropriate frequency string to describe the inferred frequency of self.index
Adapted from `pandas.tsseries.frequencies._FrequencyInferer.get_freq` for CFTimeIndexes.
Returns
-------
str or None
"""
if not self.is_monotonic or not self.index.is_unique:
return None
delta = self.deltas[0] # Smallest delta
if _is_multiple(delta, _ONE_DAY):
return self._infer_daily_rule()
# There is no possible intraday frequency with a non-unique delta
# Different from pandas: we don't need to manage DST and business offsets in cftime
elif len(self.deltas) != 1:
return None
if _is_multiple(delta, _ONE_HOUR):
return _maybe_add_count("h", delta / _ONE_HOUR)
elif _is_multiple(delta, _ONE_MINUTE):
return _maybe_add_count("min", delta / _ONE_MINUTE)
elif _is_multiple(delta, _ONE_SECOND):
return _maybe_add_count("s", delta / _ONE_SECOND)
elif _is_multiple(delta, _ONE_MILLI):
return _maybe_add_count("ms", delta / _ONE_MILLI)
else:
return _maybe_add_count("us", delta / _ONE_MICRO)
def _infer_daily_rule(self):
annual_rule = self._get_annual_rule()
if annual_rule:
nyears = self.year_deltas[0]
month = _MONTH_ABBREVIATIONS[self.index[0].month]
alias = f"{annual_rule}-{month}"
return _maybe_add_count(alias, nyears)
quartely_rule = self._get_quartely_rule()
if quartely_rule:
nquarters = self.month_deltas[0] / 3
mod_dict = {0: 12, 2: 11, 1: 10}
month = _MONTH_ABBREVIATIONS[mod_dict[self.index[0].month % 3]]
alias = f"{quartely_rule}-{month}"
return _maybe_add_count(alias, nquarters)
monthly_rule = self._get_monthly_rule()
if monthly_rule:
return _maybe_add_count(monthly_rule, self.month_deltas[0])
if len(self.deltas) == 1:
# Daily as there is no "Weekly" offsets with CFTime
days = self.deltas[0] / _ONE_DAY
return _maybe_add_count("D", days)
# CFTime has no business freq and no "week of month" (WOM)
return None
def _get_annual_rule(self):
if len(self.year_deltas) > 1:
return None
if len(np.unique(self.index.month)) > 1:
return None
return {"cs": "YS", "ce": "YE"}.get(month_anchor_check(self.index))
def _get_quartely_rule(self):
if len(self.month_deltas) > 1:
return None
if self.month_deltas[0] % 3 != 0:
return None
return {"cs": "QS", "ce": "QE"}.get(month_anchor_check(self.index))
def _get_monthly_rule(self):
if len(self.month_deltas) > 1:
return None
return {"cs": "MS", "ce": "ME"}.get(month_anchor_check(self.index))
@property
def deltas(self):
"""Sorted unique timedeltas as microseconds."""
if self._deltas is None:
self._deltas = _unique_deltas(self.values)
return self._deltas
@property
def year_deltas(self):
"""Sorted unique year deltas."""
if self._year_deltas is None:
self._year_deltas = _unique_deltas(self.index.year)
return self._year_deltas
@property
def month_deltas(self):
"""Sorted unique month deltas."""
if self._month_deltas is None:
self._month_deltas = _unique_deltas(self.index.year * 12 + self.index.month)
return self._month_deltas
def _unique_deltas(arr):
"""Sorted unique deltas of numpy array"""
return np.sort(np.unique(np.diff(arr)))
def _is_multiple(us, mult: int):
"""Whether us is a multiple of mult"""
return us % mult == 0
def _maybe_add_count(base: str, count: float):
"""If count is greater than 1, add it to the base offset string"""
if count != 1:
assert count == int(count)
count = int(count)
return f"{count}{base}"
else:
return base
def month_anchor_check(dates):
"""Return the monthly offset string.
Return "cs" if all dates are the first days of the month,
"ce" if all dates are the last day of the month,
None otherwise.
Replicated pandas._libs.tslibs.resolution.month_position_check
but without business offset handling.
"""
calendar_end = True
calendar_start = True
for date in dates:
if calendar_start:
calendar_start &= date.day == 1
if calendar_end:
cal = date.day == date.daysinmonth
calendar_end &= cal
elif not calendar_start:
break
if calendar_end:
return "ce"
elif calendar_start:
return "cs"
else:
return None
| _CFTimeFrequencyInferer |
python | pytorch__pytorch | test/dynamo/test_subclasses.py | {
"start": 79545,
"end": 81191
} | class ____(torch.nn.Module):
def forward(
self,
primals_1: "Sym(s47)", # PlainAOTInput(idx=0)
primals_2: "Sym(s16)", # PlainAOTInput(idx=1)
primals_3: "f32[s47, s16]", # SubclassGetAttrAOTInput(base=PlainAOTInput(idx=2), attr='a')
primals_4: "f32[s47, s16]", # SubclassGetAttrAOTInput(base=PlainAOTInput(idx=2), attr='b')
primals_5: "Sym(s47)", # SubclassSizeAOTInput(base=PlainAOTInput(idx=2), idx=0)
primals_6: "Sym(s16)", # SubclassSizeAOTInput(base=PlainAOTInput(idx=2), idx=1)
primals_7: "Sym(s16)", # SubclassStrideAOTInput(base=PlainAOTInput(idx=2), idx=0)
):
mul: "f32[s47, s16]" = torch.ops.aten.mul.Tensor(primals_3, primals_1); primals_3 = None
mul_3: "f32[s47, s16]" = torch.ops.aten.mul.Tensor(primals_4, primals_1); primals_4 = None
return (
mul, # SubclassGetAttrAOTOutput(base=PlainAOTOutput(idx=0), attr='a')
mul_3, # SubclassGetAttrAOTOutput(base=PlainAOTOutput(idx=0), attr='b')
primals_5, # SubclassSizeAOTOutput(base=PlainAOTOutput(idx=0), idx=0)
primals_7, # SubclassSizeAOTOutput(base=PlainAOTOutput(idx=0), idx=1)
primals_7, # SubclassStrideAOTOutput(base=PlainAOTOutput(idx=0), idx=0)
primals_1, # SavedForBackwardsAOTOutput(idx=0)
primals_5, # SavedForBackwardsAOTOutput(idx=1)
primals_7, # SavedForBackwardsAOTOutput(idx=2)
)
""", # noqa: B950
)
self.assertExpectedInline(
normalize_gm(bw[0].print_readable(print_output=False, expanded_def=True)),
"""\
| GraphModule |
python | getsentry__sentry | src/sentry/dynamic_sampling/rules/utils.py | {
"start": 582,
"end": 922
} | class ____(TypedDict):
"""
A bias that can be activated, where activated means that the bias is enabled.
"""
id: str
active: bool
# These represent the biases that are applied to user by default as part of the adaptive dynamic sampling
# experience. These can be overridden by the project details endpoint
| ActivatableBias |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/literalString3.py | {
"start": 204,
"end": 564
} | class ____(Generic[T]):
def __init__(self, val: T) -> None: ...
def func1(x: T) -> ClassA[T]:
return ClassA(x)
def func2(x: T_LS | None, default: T_LS) -> ClassA[T_LS]:
if x is None:
x = default
reveal_type(x, expected_text="T_LS@func2")
out = func1(x)
reveal_type(out, expected_text="ClassA[T_LS@func2]")
return out
| ClassA |
python | OmkarPathak__pygorithm | tests/test_data_structure.py | {
"start": 1226,
"end": 2502
} | class ____(unittest.TestCase):
def test_minimum_spanning_tree(self):
"""
test inspired from the example at the following link: https://en.wikipedia.org/wiki/Kruskal%27s_algorithm
"""
edges_weighted = [((1, 2), 7), ((2, 3), 8), ((1, 4), 5), ((2, 4), 9),
((2, 5), 7), ((3, 5), 5), ((4, 6), 6), ((5, 6), 8),
((5, 7), 9), ((6, 7), 11), ((4, 5), 15)]
wgraph = graph.WeightedGraph()
for (u, v), weight in edges_weighted:
wgraph.add_edge(u, v, weight)
expected = [((1, 4), 5), ((3, 5), 5), ((4, 6), 6), ((1, 2), 7), ((2, 5), 7), ((5, 7), 9)]
self.assertEqual(wgraph.kruskal_mst(), expected)
def test_minimum_spanning_tree_2(self):
"""
Test inspired by the gif at the left of the page https://en.wikipedia.org/wiki/Kruskal%27s_algorithm
"""
edges_weighted = [((1, 2), 3), ((1, 5), 1), ((2, 5), 4), ((2, 3), 5), ((3, 5), 6), ((3, 4), 2), ((4, 5), 7)]
wgraph = graph.WeightedGraph()
for (u, v), weight in edges_weighted:
wgraph.add_edge(u, v, weight)
expected = [((1, 5), 1), ((3, 4), 2), ((1, 2), 3), ((2, 3), 5)]
self.assertEqual(wgraph.kruskal_mst(), expected)
| TestKruskal |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/circular2.py | {
"start": 303,
"end": 326
} | class ____(D):
pass
| E |
python | encode__django-rest-framework | rest_framework/schemas/inspectors.py | {
"start": 294,
"end": 3623
} | class ____:
"""
Descriptor class on APIView.
Provide subclass for per-view schema generation
"""
# Used in _get_description_section()
header_regex = re.compile('^[a-zA-Z][0-9A-Za-z_]*:')
def __init__(self):
self.instance_schemas = WeakKeyDictionary()
def __get__(self, instance, owner):
"""
Enables `ViewInspector` as a Python _Descriptor_.
This is how `view.schema` knows about `view`.
`__get__` is called when the descriptor is accessed on the owner.
(That will be when view.schema is called in our case.)
`owner` is always the owner class. (An APIView, or subclass for us.)
`instance` is the view instance or `None` if accessed from the class,
rather than an instance.
See: https://docs.python.org/3/howto/descriptor.html for info on
descriptor usage.
"""
if instance in self.instance_schemas:
return self.instance_schemas[instance]
self.view = instance
return self
def __set__(self, instance, other):
self.instance_schemas[instance] = other
if other is not None:
other.view = instance
@property
def view(self):
"""View property."""
assert self._view is not None, (
"Schema generation REQUIRES a view instance. (Hint: you accessed "
"`schema` from the view class rather than an instance.)"
)
return self._view
@view.setter
def view(self, value):
self._view = value
@view.deleter
def view(self):
self._view = None
def get_description(self, path, method):
"""
Determine a path description.
This will be based on the method docstring if one exists,
or else the class docstring.
"""
view = self.view
method_name = getattr(view, 'action', method.lower())
method_func = getattr(view, method_name, None)
method_docstring = method_func.__doc__
if method_func and method_docstring:
# An explicit docstring on the method or action.
return self._get_description_section(view, method.lower(), formatting.dedent(smart_str(method_docstring)))
else:
return self._get_description_section(view, getattr(view, 'action', method.lower()),
view.get_view_description())
def _get_description_section(self, view, header, description):
lines = description.splitlines()
current_section = ''
sections = {'': ''}
for line in lines:
if self.header_regex.match(line):
current_section, separator, lead = line.partition(':')
sections[current_section] = lead.strip()
else:
sections[current_section] += '\n' + line
# TODO: SCHEMA_COERCE_METHOD_NAMES appears here and in `SchemaGenerator.get_keys`
coerce_method_names = api_settings.SCHEMA_COERCE_METHOD_NAMES
if header in sections:
return sections[header].strip()
if header in coerce_method_names:
if coerce_method_names[header] in sections:
return sections[coerce_method_names[header]].strip()
return sections[''].strip()
| ViewInspector |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-facebook-marketing/source_facebook_marketing/streams/streams.py | {
"start": 12558,
"end": 12691
} | class ____(AdsInsights):
action_breakdowns = ["action_carousel_card_id", "action_carousel_card_name"]
| AdsInsightsActionCarouselCard |
python | spack__spack | lib/spack/spack/provider_index.py | {
"start": 391,
"end": 10190
} | class ____:
#: This is a dict of dicts used for finding providers of particular
#: virtual dependencies. The dict of dicts looks like:
#:
#: { vpkg name :
#: { full vpkg spec : set(packages providing spec) } }
#:
#: Callers can use this to first find which packages provide a vpkg,
#: then find a matching full spec. e.g., in this scenario:
#:
#: { 'mpi' :
#: { mpi@:1.1 : set([mpich]),
#: mpi@:2.3 : set([mpich2@1.9:]) } }
#:
#: Calling providers_for(spec) will find specs that provide a
#: matching implementation of MPI. Derived class need to construct
#: this attribute according to the semantics above.
providers: Dict[str, Dict["spack.spec.Spec", Set["spack.spec.Spec"]]]
def __init__(
self,
repository: "spack.repo.RepoType",
specs: Optional[Iterable["spack.spec.Spec"]] = None,
restrict: bool = False,
):
"""Provider index based on a single mapping of providers.
Args:
specs: if provided, will call update on each
single spec to initialize this provider index.
restrict: "restricts" values to the verbatim input specs; do not
pre-apply package's constraints.
TODO: rename this. It is intended to keep things as broad
TODO: as possible without overly restricting results, so it is
TODO: not the best name.
"""
self.repository = repository
self.restrict = restrict
self.providers = {}
specs = specs or []
for spec in specs:
if isinstance(spec, str):
from spack.spec import Spec
spec = Spec(spec)
if self.repository.is_virtual_safe(spec.name):
continue
self.update(spec)
def providers_for(self, virtual: Union[str, "spack.spec.Spec"]) -> List["spack.spec.Spec"]:
"""Return a list of specs of all packages that provide virtual packages with the supplied
spec.
Args:
virtual: either a Spec or a string name of a virtual package
"""
result: Set["spack.spec.Spec"] = set()
if isinstance(virtual, str):
# In the common case where just a package name is passed, we can avoid running the
# spec parser and intersects, since intersects is always true.
if virtual.isalnum():
if virtual in self.providers:
for p_spec, spec_set in self.providers[virtual].items():
result.update(spec_set)
return list(result)
from spack.spec import Spec
virtual = Spec(virtual)
# Add all the providers that satisfy the vpkg spec.
if virtual.name in self.providers:
for p_spec, spec_set in self.providers[virtual.name].items():
if p_spec.intersects(virtual, deps=False):
result.update(spec_set)
return list(result)
def __contains__(self, name):
return name in self.providers
def __eq__(self, other):
return self.providers == other.providers
def _transform(self, transform_fun, out_mapping_type=dict):
"""Transform this provider index dictionary and return it.
Args:
transform_fun: transform_fun takes a (vpkg, pset) mapping and runs
it on each pair in nested dicts.
out_mapping_type: type to be used internally on the
transformed (vpkg, pset)
Returns:
Transformed mapping
"""
return _transform(self.providers, transform_fun, out_mapping_type)
def __str__(self):
return str(self.providers)
def __repr__(self):
return repr(self.providers)
def update(self, spec: Union[str, "spack.spec.Spec"]) -> None:
"""Update the provider index with additional virtual specs.
Args:
spec: spec potentially providing additional virtual specs
"""
if isinstance(spec, str):
from spack.spec import Spec
spec = Spec(spec)
if not spec.name:
# Empty specs do not have a package
return
msg = "cannot update an index passing the virtual spec '{}'".format(spec.name)
assert not self.repository.is_virtual_safe(spec.name), msg
pkg_cls = self.repository.get_pkg_class(spec.name)
for provider_spec_readonly, provided_specs in pkg_cls.provided.items():
for provided_spec in provided_specs:
# TODO: fix this comment.
# We want satisfaction other than flags
provider_spec = provider_spec_readonly.copy()
provider_spec.compiler_flags = spec.compiler_flags.copy()
if spec.intersects(provider_spec, deps=False):
provided_name = provided_spec.name
provider_map = self.providers.setdefault(provided_name, {})
if provided_spec not in provider_map:
provider_map[provided_spec] = set()
if self.restrict:
provider_set = provider_map[provided_spec]
# If this package existed in the index before,
# need to take the old versions out, as they're
# now more constrained.
old = set([s for s in provider_set if s.name == spec.name])
provider_set.difference_update(old)
# Now add the new version.
provider_set.add(spec)
else:
# Before putting the spec in the map, constrain
# it so that it provides what was asked for.
constrained = spec.copy()
constrained.constrain(provider_spec)
provider_map[provided_spec].add(constrained)
def to_json(self, stream=None):
"""Dump a JSON representation of this object.
Args:
stream: stream where to dump
"""
provider_list = self._transform(
lambda vpkg, pset: [vpkg.to_node_dict(), [p.to_node_dict() for p in pset]], list
)
sjson.dump({"provider_index": {"providers": provider_list}}, stream)
def merge(self, other):
"""Merge another provider index into this one.
Args:
other (ProviderIndex): provider index to be merged
"""
other = other.copy() # defensive copy.
for pkg in other.providers:
if pkg not in self.providers:
self.providers[pkg] = other.providers[pkg]
continue
spdict, opdict = self.providers[pkg], other.providers[pkg]
for provided_spec in opdict:
if provided_spec not in spdict:
spdict[provided_spec] = opdict[provided_spec]
continue
spdict[provided_spec] = spdict[provided_spec].union(opdict[provided_spec])
def remove_provider(self, pkg_name):
"""Remove a provider from the ProviderIndex."""
empty_pkg_dict = []
for pkg, pkg_dict in self.providers.items():
empty_pset = []
for provided, pset in pkg_dict.items():
same_name = set(p for p in pset if p.fullname == pkg_name)
pset.difference_update(same_name)
if not pset:
empty_pset.append(provided)
for provided in empty_pset:
del pkg_dict[provided]
if not pkg_dict:
empty_pkg_dict.append(pkg)
for pkg in empty_pkg_dict:
del self.providers[pkg]
def copy(self):
"""Return a deep copy of this index."""
clone = ProviderIndex(repository=self.repository)
clone.providers = self._transform(lambda vpkg, pset: (vpkg, set((p.copy() for p in pset))))
return clone
@staticmethod
def from_json(stream, repository):
"""Construct a provider index from its JSON representation.
Args:
stream: stream where to read from the JSON data
"""
data = sjson.load(stream)
if not isinstance(data, dict):
raise ProviderIndexError("JSON ProviderIndex data was not a dict.")
if "provider_index" not in data:
raise ProviderIndexError("YAML ProviderIndex does not start with 'provider_index'")
index = ProviderIndex(repository=repository)
providers = data["provider_index"]["providers"]
from spack.spec import SpecfileLatest
index.providers = _transform(
providers,
lambda vpkg, plist: (
SpecfileLatest.from_node_dict(vpkg),
set(SpecfileLatest.from_node_dict(p) for p in plist),
),
)
return index
def _transform(providers, transform_fun, out_mapping_type=dict):
"""Syntactic sugar for transforming a providers dict.
Args:
providers: provider dictionary
transform_fun: transform_fun takes a (vpkg, pset) mapping and runs
it on each pair in nested dicts.
out_mapping_type: type to be used internally on the
transformed (vpkg, pset)
Returns:
Transformed mapping
"""
def mapiter(mappings):
if isinstance(mappings, dict):
return mappings.items()
else:
return iter(mappings)
return dict(
(name, out_mapping_type([transform_fun(vpkg, pset) for vpkg, pset in mapiter(mappings)]))
for name, mappings in providers.items()
)
| ProviderIndex |
python | django-haystack__django-haystack | test_haystack/whoosh_tests/test_whoosh_backend.py | {
"start": 48076,
"end": 49809
} | class ____(WhooshTestCase):
fixtures = ["bulk_data.json"]
def setUp(self):
super().setUp()
# Stow.
self.old_ui = connections["whoosh"].get_unified_index()
self.ui = UnifiedIndex()
self.wacsi = WhooshAutocompleteMockModelSearchIndex()
self.ui.build(indexes=[self.wacsi])
self.sb = connections["whoosh"].get_backend()
connections["whoosh"]._index = self.ui
# Stow.
import haystack
self.sb.setup()
self.sqs = SearchQuerySet("whoosh")
# Wipe it clean.
self.sqs.query.backend.clear()
self.wacsi.update(using="whoosh")
def tearDown(self):
connections["whoosh"]._index = self.old_ui
super().tearDown()
def test_autocomplete(self):
autocomplete = self.sqs.autocomplete(text_auto="mod")
self.assertEqual(autocomplete.count(), 5)
self.assertEqual(
[result.pk for result in autocomplete], ["1", "12", "6", "7", "14"]
)
self.assertTrue("mod" in autocomplete[0].text.lower())
self.assertTrue("mod" in autocomplete[1].text.lower())
self.assertTrue("mod" in autocomplete[2].text.lower())
self.assertTrue("mod" in autocomplete[3].text.lower())
self.assertTrue("mod" in autocomplete[4].text.lower())
self.assertEqual(len([result.pk for result in autocomplete]), 5)
def test_edgengram_regression(self):
autocomplete = self.sqs.autocomplete(text_auto="ngm")
self.assertEqual(autocomplete.count(), 0)
def test_extra_whitespace(self):
autocomplete = self.sqs.autocomplete(text_auto="mod ")
self.assertEqual(autocomplete.count(), 5)
| LiveWhooshAutocompleteTestCase |
python | catalyst-team__catalyst | catalyst/callbacks/scheduler.py | {
"start": 6318,
"end": 9202
} | class ____(ABC, ISchedulerCallback):
"""Class interface for all Lr updaters."""
def __init__(self, optimizer_key: str = None):
"""
Args:
optimizer_key: which optimizer key to use
for learning rate scheduling
"""
super().__init__()
self.init_lr = 0
self.optimizer_key = optimizer_key
self.optimizer = None
@abstractmethod
def calc_lr(self) -> float:
"""Interface for calculating learning rate."""
pass
@abstractmethod
def calc_momentum(self) -> float:
"""Interface for calculating momentum"""
pass
@staticmethod
def _update_lr(optimizer: TorchOptimizer, new_lr: float) -> None:
for pg in optimizer.param_groups:
pg["lr"] = new_lr
@staticmethod
def _update_momentum(optimizer: TorchOptimizer, new_momentum: float) -> None:
if "betas" in optimizer.param_groups[0]:
for pg in optimizer.param_groups:
pg["betas"] = (new_momentum, pg["betas"][1])
else:
for pg in optimizer.param_groups:
pg["momentum"] = new_momentum
def _update_optimizer(self, optimizer: TorchOptimizer) -> Tuple[float, float]:
new_lr = self.calc_lr()
if new_lr is not None:
self._update_lr(optimizer, new_lr)
else:
new_lr = optimizer.param_groups[0]["lr"]
new_momentum = self.calc_momentum()
if new_momentum is not None:
self._update_momentum(optimizer, new_momentum)
else:
new_momentum = get_optimizer_momentum(optimizer)
return new_lr, new_momentum
def update_optimizer(self, runner: "IRunner") -> None:
"""Update learning rate and momentum in runner.
Args:
runner: current runner
"""
lr, momentum = self._update_optimizer(optimizer=self.optimizer)
if self.optimizer_key is not None:
runner.batch_metrics[f"lr_{self.optimizer_key}"] = lr
runner.batch_metrics[f"momentum_{self.optimizer_key}"] = momentum
else:
runner.batch_metrics["lr"] = lr
runner.batch_metrics["momentum"] = momentum
def on_experiment_start(self, runner: "IRunner") -> None:
"""Event handler."""
self.optimizer = optimizer = get_attr(
runner, key="optimizer", inner_key=self.optimizer_key
)
self.optimizer = optimizer
self.init_lr = optimizer.param_groups[0]["lr"]
def on_loader_start(self, runner: "IRunner") -> None:
"""Event handler."""
if runner.is_train_loader:
self.update_optimizer(runner=runner)
def on_batch_end(self, runner: "IRunner") -> None:
"""Event handler."""
if runner.is_train_loader:
self.update_optimizer(runner=runner)
| ILRUpdater |
python | allegroai__clearml | clearml/utilities/attrs.py | {
"start": 258,
"end": 816
} | class ____(object):
def __init__(self, *args: Any, **kwargs: Any) -> None:
if any(x in kwargs for x in ("eq", "order")):
raise RuntimeError("Only `cmp` is supported for attr.attrs, not `eq` or `order`")
if Version(attr_version) >= Version("19.2"):
cmp = kwargs.pop("cmp", None)
if cmp is not None:
kwargs["eq"] = kwargs["order"] = cmp
self.args = args
self.kwargs = kwargs
def __call__(self, f: Any) -> Any:
return attr.attrs(*self.args, **self.kwargs)(f)
| attrs |
python | dagster-io__dagster | python_modules/libraries/dagster-shared/dagster_shared/serdes/objects/definition_metadata.py | {
"start": 944,
"end": 2000
} | class ____:
assets: list[DgAssetMetadata]
asset_checks: list[DgAssetCheckMetadata]
jobs: list[DgJobMetadata]
resources: list[DgResourceMetadata]
schedules: list[DgScheduleMetadata]
sensors: list[DgSensorMetadata]
def to_dict(self) -> Mapping[str, Sequence[Mapping[str, Any]]]:
return {
"assets": [as_dict(asset) for asset in self.assets],
"asset_checks": [as_dict(asset_check) for asset_check in self.asset_checks],
"jobs": [as_dict(job) for job in self.jobs],
"resources": [as_dict(resource) for resource in self.resources],
"schedules": [as_dict(schedule) for schedule in self.schedules],
"sensors": [as_dict(sensor) for sensor in self.sensors],
}
@property
def is_empty(self) -> bool:
return (
not self.assets
and not self.asset_checks
and not self.jobs
and not self.resources
and not self.schedules
and not self.sensors
)
| DgDefinitionMetadata |
python | pytest-dev__pytest-django | tests/test_db_setup.py | {
"start": 13908,
"end": 14875
} | class ____:
db_settings: ClassVar = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": ":memory:",
"TEST": {"NAME": ":memory:"},
}
}
def test_sqlite_in_memory_used(self, django_pytester: DjangoPytester) -> None:
pytest.importorskip("xdist")
django_pytester.create_test_module(
"""
import pytest
from django.db import connections
@pytest.mark.django_db
def test_a():
(conn, ) = connections.all()
assert conn.vendor == 'sqlite'
db_name = conn.creation._get_test_db_name()
assert 'file:memorydb' in db_name or db_name == ':memory:'
"""
)
result = django_pytester.runpytest_subprocess("--tb=short", "-vv", "-n1")
assert result.ret == 0
result.stdout.fnmatch_lines(["*PASSED*test_a*"])
| TestSqliteInMemoryWithXdist |
python | scipy__scipy | scipy/signal/tests/test_signaltools.py | {
"start": 110610,
"end": 113436
} | class ____:
@make_xp_test_case(signal.correlate)
def test_consistency_correlate_funcs(self, xp):
# Compare np.correlate, signal.correlate, signal.correlate2d
a = np.arange(5)
b = np.array([3.2, 1.4, 3])
for mode in ['full', 'valid', 'same']:
a_xp, b_xp = xp.asarray(a), xp.asarray(b)
np_corr_result = np.correlate(a, b, mode=mode)
assert_almost_equal(signal.correlate(a_xp, b_xp, mode=mode),
xp.asarray(np_corr_result))
# See gh-5897
if mode == 'valid':
np_corr_result = np.correlate(b, a, mode=mode)
assert_almost_equal(signal.correlate(b_xp, a_xp, mode=mode),
xp.asarray(np_corr_result))
@skip_xp_backends(np_only=True)
@make_xp_test_case(signal.correlate2d)
def test_consistency_correlate_funcs_2(self, xp):
# Compare np.correlate, signal.correlate, signal.correlate2d
a = np.arange(5)
b = np.array([3.2, 1.4, 3])
for mode in ['full', 'valid', 'same']:
assert_almost_equal(np.squeeze(signal.correlate2d([a], [b],
mode=mode)),
signal.correlate(a, b, mode=mode))
# See gh-5897
if mode == 'valid':
assert_almost_equal(np.squeeze(signal.correlate2d([b], [a],
mode=mode)),
signal.correlate(b, a, mode=mode))
@skip_xp_backends(np_only=True)
@make_xp_test_case(signal.correlate2d)
def test_invalid_shapes(self, xp):
# By "invalid," we mean that no one
# array has dimensions that are all at
# least as large as the corresponding
# dimensions of the other array. This
# setup should throw a ValueError.
a = np.arange(1, 7).reshape((2, 3))
b = np.arange(-6, 0).reshape((3, 2))
assert_raises(ValueError, signal.correlate2d, *(a, b), **{'mode': 'valid'})
assert_raises(ValueError, signal.correlate2d, *(b, a), **{'mode': 'valid'})
@make_xp_test_case(signal.correlate2d)
def test_complex_input(self, xp):
xp_assert_equal(signal.correlate2d(xp.asarray([[1]]), xp.asarray([[2j]])),
xp.asarray([-2j]), check_shape=False, check_dtype=False)
xp_assert_equal(signal.correlate2d(xp.asarray([[2j]]), xp.asarray([[3j]])),
xp.asarray([6+0j]), check_shape=False, check_dtype=False)
xp_assert_equal(signal.correlate2d(xp.asarray([[3j]]), xp.asarray([[4]])),
xp.asarray([12j]), check_shape=False, check_dtype=False)
@make_xp_test_case(lfilter_zi)
| TestCorrelate2d |
python | PrefectHQ__prefect | src/prefect/client/schemas/sorting.py | {
"start": 478,
"end": 828
} | class ____(AutoEnum):
"""Defines task run sorting options."""
ID_DESC = AutoEnum.auto()
EXPECTED_START_TIME_ASC = AutoEnum.auto()
EXPECTED_START_TIME_DESC = AutoEnum.auto()
NAME_ASC = AutoEnum.auto()
NAME_DESC = AutoEnum.auto()
NEXT_SCHEDULED_START_TIME_ASC = AutoEnum.auto()
END_TIME_DESC = AutoEnum.auto()
| TaskRunSort |
python | kubernetes-client__python | kubernetes/client/models/v1_counter_set.py | {
"start": 383,
"end": 4878
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'counters': 'dict(str, V1Counter)',
'name': 'str'
}
attribute_map = {
'counters': 'counters',
'name': 'name'
}
def __init__(self, counters=None, name=None, local_vars_configuration=None): # noqa: E501
"""V1CounterSet - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._counters = None
self._name = None
self.discriminator = None
self.counters = counters
self.name = name
@property
def counters(self):
"""Gets the counters of this V1CounterSet. # noqa: E501
Counters defines the set of counters for this CounterSet The name of each counter must be unique in that set and must be a DNS label. The maximum number of counters in all sets is 32. # noqa: E501
:return: The counters of this V1CounterSet. # noqa: E501
:rtype: dict(str, V1Counter)
"""
return self._counters
@counters.setter
def counters(self, counters):
"""Sets the counters of this V1CounterSet.
Counters defines the set of counters for this CounterSet The name of each counter must be unique in that set and must be a DNS label. The maximum number of counters in all sets is 32. # noqa: E501
:param counters: The counters of this V1CounterSet. # noqa: E501
:type: dict(str, V1Counter)
"""
if self.local_vars_configuration.client_side_validation and counters is None: # noqa: E501
raise ValueError("Invalid value for `counters`, must not be `None`") # noqa: E501
self._counters = counters
@property
def name(self):
"""Gets the name of this V1CounterSet. # noqa: E501
Name defines the name of the counter set. It must be a DNS label. # noqa: E501
:return: The name of this V1CounterSet. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this V1CounterSet.
Name defines the name of the counter set. It must be a DNS label. # noqa: E501
:param name: The name of this V1CounterSet. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1CounterSet):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1CounterSet):
return True
return self.to_dict() != other.to_dict()
| V1CounterSet |
python | ApeWorX__ape | src/ape/exceptions.py | {
"start": 19941,
"end": 20046
} | class ____(ApeException):
"""
An error raised whilst managing a subprocess.
"""
| SubprocessError |
python | doocs__leetcode | solution/0700-0799/0773.Sliding Puzzle/Solution.py | {
"start": 0,
"end": 1395
} | class ____:
def slidingPuzzle(self, board: List[List[int]]) -> int:
t = [None] * 6
def gets():
for i in range(2):
for j in range(3):
t[i * 3 + j] = str(board[i][j])
return ''.join(t)
def setb(s):
for i in range(2):
for j in range(3):
board[i][j] = int(s[i * 3 + j])
def f():
res = []
i, j = next((i, j) for i in range(2) for j in range(3) if board[i][j] == 0)
for a, b in [[0, -1], [0, 1], [1, 0], [-1, 0]]:
x, y = i + a, j + b
if 0 <= x < 2 and 0 <= y < 3:
board[i][j], board[x][y] = board[x][y], board[i][j]
res.append(gets())
board[i][j], board[x][y] = board[x][y], board[i][j]
return res
start = gets()
end = "123450"
if start == end:
return 0
vis = {start}
q = deque([start])
ans = 0
while q:
ans += 1
for _ in range(len(q)):
x = q.popleft()
setb(x)
for y in f():
if y == end:
return ans
if y not in vis:
vis.add(y)
q.append(y)
return -1
| Solution |
python | ZoranPandovski__al-go-rithms | data_structures/hash_table/python/heaps.py | {
"start": 0,
"end": 1873
} | class ____:
def __init__(self):
self.size = 11
self.slots = [None] * self.size
self.data = [None] * self.size
def put(self,key,data):
hashvalue = self.hashfunction(key,len(self.slots))
if self.slots[hashvalue] == None:
self.slots[hashvalue] = key
self.data[hashvalue] = data
else:
if self.slots[hashvalue] == key:
self.data[hashvalue] = data #replace
else:
nextslot = self.rehash(hashvalue,len(self.slots))
while self.slots[nextslot] != None and \
self.slots[nextslot] != key:
nextslot = self.rehash(nextslot,len(self.slots))
if self.slots[nextslot] == None:
self.slots[nextslot]=key
self.data[nextslot]=data
else:
self.data[nextslot] = data #replace
def hashfunction(self,key,size):
return key%size
def rehash(self,oldhash,size):
return (oldhash+1)%size
def get(self,key):
startslot = self.hashfunction(key,len(self.slots))
data = None
stop = False
found = False
position = startslot
while self.slots[position] != None and \
not found and not stop:
if self.slots[position] == key:
found = True
data = self.data[position]
else:
position=self.rehash(position,len(self.slots))
if position == startslot:
stop = True
return data
def __getitem__(self,key):
return self.get(key)
def __setitem__(self,key,data):
self.put(key,data)
H=HashTable()
H[54]="cat"
H[26]="dog"
H[93]="lion"
H[17]="tiger"
H[77]="bird"
H[31]="cow"
H[44]="goat"
H[55]="pig"
H[20]="chicken"
print(H.slots)
print(H.data)
print(H[20])
print(H[17])
H[20]='duck'
print(H[20])
print(H[99])
| HashTable |
python | getsentry__sentry | src/sentry/replays/usecases/ingest/__init__.py | {
"start": 3882,
"end": 14002
} | class ____:
actions_event: ParsedEventMeta | None
context: EventContext
filedata: bytes
filename: str
recording_size_uncompressed: int
recording_size: int
replay_event: dict[str, Any] | None
trace_items: list[TraceItem]
video_size: int | None
@sentry_sdk.trace
def process_recording_event(
message: Event, use_new_recording_parser: bool = False
) -> ProcessedEvent:
parsed_output = parse_replay_events(message, use_new_recording_parser)
if parsed_output:
replay_events, trace_items = parsed_output
else:
replay_events = None
trace_items = []
filename = _make_recording_filename(
project_id=message["context"]["project_id"],
replay_id=message["context"]["replay_id"],
segment_id=message["context"]["segment_id"],
retention_days=message["context"]["retention_days"],
)
if message["replay_video"]:
filedata = pack_replay_video(message["payload"], message["replay_video"])
video_size = len(message["replay_video"])
else:
filedata = message["payload_compressed"]
video_size = None
return ProcessedEvent(
actions_event=replay_events,
context=message["context"],
filedata=filedata,
filename=filename,
recording_size_uncompressed=len(message["payload"]),
recording_size=len(message["payload_compressed"]),
replay_event=message["replay_event"],
trace_items=trace_items,
video_size=video_size,
)
def parse_replay_events(message: Event, use_new_recording_parser: bool):
try:
if use_new_recording_parser:
events = parse_recording_data(message["payload"])
else:
events = json.loads(message["payload"])
return parse_events(
{
"organization_id": message["context"]["org_id"],
"project_id": message["context"]["project_id"],
"received": message["context"]["received"],
"replay_id": message["context"]["replay_id"],
"retention_days": message["context"]["retention_days"],
"segment_id": message["context"]["segment_id"],
"trace_id": extract_trace_id(message["replay_event"]),
},
events,
)
except Exception:
logger.exception(
"Failed to parse recording org=%s, project=%s, replay=%s, segment=%s",
message["context"]["org_id"],
message["context"]["project_id"],
message["context"]["replay_id"],
message["context"]["segment_id"],
)
return None
def extract_trace_id(replay_event: dict[str, Any] | None) -> str | None:
"""Return the trace-id if only one trace-id was provided."""
try:
if replay_event:
trace_ids = replay_event.get("trace_ids", [])
return str(trace_ids[0]) if trace_ids and len(trace_ids) == 1 else None
except Exception:
pass
return None
@sentry_sdk.trace
def pack_replay_video(recording: bytes, video: bytes):
return zlib.compress(pack(rrweb=recording, video=video))
@sentry_sdk.trace
def commit_recording_message(recording: ProcessedEvent, context: ProcessorContext) -> None:
# Write to GCS.
storage_kv.set(recording.filename, recording.filedata)
# Write to billing consumer if its a billable event.
if recording.context["segment_id"] == 0:
if context["has_sent_replays_cache"] is not None:
_track_initial_segment_event_new(
recording.context["org_id"],
recording.context["project_id"],
recording.context["replay_id"],
recording.context["key_id"],
recording.context["received"],
context["has_sent_replays_cache"],
)
else:
_track_initial_segment_event_old(
recording.context["org_id"],
recording.context["project_id"],
recording.context["replay_id"],
recording.context["key_id"],
recording.context["received"],
)
metrics.incr(
"replays.should_publish_replay_event",
tags={"value": recording.context["should_publish_replay_event"]},
)
if recording.context["should_publish_replay_event"] and recording.replay_event:
replay_event_kafka_message = {
"start_time": recording.context["received"],
"replay_id": recording.context["replay_id"],
"project_id": recording.context["project_id"],
"retention_days": recording.context["retention_days"],
"payload": recording.replay_event,
}
publish_replay_event(json.dumps(replay_event_kafka_message))
# Write to replay-event consumer.
if recording.actions_event:
emit_replay_events(
recording.actions_event,
recording.context["org_id"],
recording.context["project_id"],
recording.context["replay_id"],
recording.context["retention_days"],
recording.replay_event,
context,
)
emit_trace_items_to_eap(recording.trace_items)
@sentry_sdk.trace
def emit_replay_events(
event_meta: ParsedEventMeta,
org_id: int,
project_id: int,
replay_id: str,
retention_days: int,
replay_event: dict[str, Any] | None,
context: ProcessorContext,
) -> None:
environment = replay_event.get("environment") if replay_event else None
emit_click_events(
event_meta.click_events,
project_id,
replay_id,
retention_days,
start_time=time.time(),
environment=environment,
)
emit_tap_events(
event_meta.tap_events,
project_id,
replay_id,
retention_days,
start_time=time.time(),
environment=environment,
)
emit_request_response_metrics(event_meta)
log_canvas_size(event_meta, org_id, project_id, replay_id)
log_mutation_events(event_meta, project_id, replay_id)
log_option_events(event_meta, project_id, replay_id)
log_multiclick_events(event_meta, project_id, replay_id)
log_rage_click_events(event_meta, project_id, replay_id)
report_hydration_error(event_meta, project_id, replay_id, replay_event, context)
report_rage_click(event_meta, project_id, replay_id, replay_event, context)
def _track_initial_segment_event_old(
org_id: int,
project_id: int,
replay_id,
key_id: int | None,
received: int,
) -> None:
try:
# I have to do this because of looker and amplitude statistics. This could be
# replaced with a simple update statement on the model...
project = Project.objects.get_from_cache(id=project_id)
assert isinstance(project, Project)
except Project.DoesNotExist as exc:
logger.warning(
"Recording segment was received for a project that does not exist.",
extra={
"project_id": project_id,
"replay_id": replay_id,
},
)
raise DropEvent("Could not find project.") from exc
set_project_flag_and_signal(project, "has_replays", first_replay_received)
track_outcome(
org_id=org_id,
project_id=project.id,
key_id=key_id,
outcome=Outcome.ACCEPTED,
reason=None,
timestamp=datetime.fromtimestamp(received, timezone.utc),
event_id=replay_id,
category=DataCategory.REPLAY,
quantity=1,
)
def _track_initial_segment_event_new(
org_id: int,
project_id: int,
replay_id,
key_id: int | None,
received: int,
has_seen_replays: AutoCache[int, bool],
) -> None:
# We'll skip querying for projects if we've seen the project before or the project has already
# recorded its first replay.
if has_seen_replays[project_id] is False:
try:
# I have to do this because of looker and amplitude statistics. This could be
# replaced with a simple update statement on the model...
project = Project.objects.get(id=project_id)
assert isinstance(project, Project)
except Project.DoesNotExist as exc:
logger.warning(
"Recording segment was received for a project that does not exist.",
extra={
"project_id": project_id,
"replay_id": replay_id,
},
)
raise DropEvent("Could not find project.") from exc
set_project_flag_and_signal(project, "has_replays", first_replay_received)
# We've set the has_replays flag and can cache it.
has_seen_replays[project_id] = True
track_outcome(
org_id=org_id,
project_id=project_id,
key_id=key_id,
outcome=Outcome.ACCEPTED,
reason=None,
timestamp=datetime.fromtimestamp(received, timezone.utc),
event_id=replay_id,
category=DataCategory.REPLAY,
quantity=1,
)
@sentry_sdk.trace
def track_recording_metadata(recording: ProcessedEvent) -> None:
# Report size metrics to determine usage patterns.
metrics.distribution(
"replays.usecases.ingest.size_compressed", recording.recording_size, unit="byte"
)
metrics.distribution(
"replays.usecases.ingest.size_uncompressed",
recording.recording_size_uncompressed,
unit="byte",
)
if recording.video_size:
# Track the number of replay-video events we receive.
metrics.incr("replays.recording_consumer.replay_video_count")
# Record video size for COGS analysis.
metrics.distribution(
"replays.recording_consumer.replay_video_size",
recording.video_size,
unit="byte",
)
# Track combined payload size for COGs analysis.
metrics.distribution(
"replays.recording_consumer.replay_video_event_size",
len(recording.filedata),
unit="byte",
)
| ProcessedEvent |
python | getsentry__sentry | src/sentry/sentry_apps/api/bases/sentryapps.py | {
"start": 13732,
"end": 15942
} | class ____(SentryPermission):
scope_map = {
"GET": ("org:read", "org:integrations", "org:write", "org:admin"),
"DELETE": ("org:integrations", "org:write", "org:admin"),
# NOTE(mn): The only POST endpoint right now is to create External
# Issues, which uses this baseclass since it's nested under an
# installation.
#
# The scopes below really only make sense for that endpoint. Any other
# nested endpoints will probably need different scopes - figure out how
# to deal with that when it happens.
"POST": ("org:integrations", "event:write", "event:admin"),
}
def has_permission(self, request: Request, *args, **kwargs) -> bool:
# To let the app mark the installation as installed, we don't care about permissions
if request.user.is_authenticated and request.user.is_sentry_app and request.method == "PUT":
return True
return super().has_permission(request, *args, **kwargs)
def has_object_permission(self, request: Request, view, installation):
if not hasattr(request, "user") or not request.user or not request.user.is_authenticated:
return False
self.determine_access(request, installation.organization_id)
if superuser_has_permission(request):
return True
# if user is an app, make sure it's for that same app
if request.user.is_sentry_app:
return request.user.id == installation.sentry_app.proxy_user_id
org_context = organization_service.get_organization_by_id(
id=installation.organization_id,
user_id=request.user.id,
include_teams=False,
include_projects=False,
)
if (
not org_context
or not org_context.member
or org_context.organization.status != OrganizationStatus.ACTIVE
):
raise SentryAppError(message="Given organization is not valid", status_code=404)
assert request.method, "method must be present in request to get permissions"
return ensure_scoped_permission(request, self.scope_map.get(request.method))
| SentryAppInstallationPermission |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/tryExcept5.py | {
"start": 124,
"end": 612
} | class ____:
async def do_stuff(self):
try:
while True:
await asyncio.sleep(1)
my_var = 3
finally:
# This should generate an error because
# my_var may be unbound at this point.
print(my_var)
self.cleanup()
def cleanup(self):
pass
async def main():
c = asyncio.create_task(MyJob().do_stuff())
await asyncio.sleep(5)
c.cancel()
asyncio.run(main())
| MyJob |
python | tensorflow__tensorflow | tensorflow/compiler/tests/fifo_queue_test.py | {
"start": 961,
"end": 6865
} | class ____(xla_test.XLATestCase):
def testEnqueue(self):
with self.session(), self.test_scope():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
enqueue_op = q.enqueue((10.0,))
enqueue_op.run()
def testEnqueueWithShape(self):
with self.session(), self.test_scope():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, shapes=(3, 2))
enqueue_correct_op = q.enqueue(([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],))
enqueue_correct_op.run()
with self.assertRaises(ValueError):
q.enqueue(([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]],))
self.assertEqual(1, self.evaluate(q.size()))
def testMultipleDequeues(self):
with self.session(), self.test_scope():
q = data_flow_ops.FIFOQueue(10, [dtypes_lib.int32], shapes=[()])
self.evaluate(q.enqueue([1]))
self.evaluate(q.enqueue([2]))
self.evaluate(q.enqueue([3]))
a, b, c = self.evaluate([q.dequeue(), q.dequeue(), q.dequeue()])
self.assertAllEqual(set([1, 2, 3]), set([a, b, c]))
def testQueuesDontShare(self):
with self.session(), self.test_scope():
q = data_flow_ops.FIFOQueue(10, [dtypes_lib.int32], shapes=[()])
self.evaluate(q.enqueue(1))
q2 = data_flow_ops.FIFOQueue(10, [dtypes_lib.int32], shapes=[()])
self.evaluate(q2.enqueue(2))
self.assertAllEqual(self.evaluate(q2.dequeue()), 2)
self.assertAllEqual(self.evaluate(q.dequeue()), 1)
def testEnqueueDictWithoutNames(self):
with self.session(), self.test_scope():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
with self.assertRaisesRegex(ValueError, "must have names"):
q.enqueue({"a": 12.0})
def testParallelEnqueue(self):
with self.session() as sess, self.test_scope():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
# Run one producer thread for each element in elems.
def enqueue(enqueue_op):
sess.run(enqueue_op)
threads = [
self.checkedThread(target=enqueue, args=(e,)) for e in enqueue_ops
]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# Dequeue every element using a single thread.
results = []
for _ in range(len(elems)):
results.append(self.evaluate(dequeued_t))
self.assertItemsEqual(elems, results)
def testParallelDequeue(self):
with self.session() as sess, self.test_scope():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
# Enqueue every element using a single thread.
for enqueue_op in enqueue_ops:
enqueue_op.run()
# Run one consumer thread for each element in elems.
results = []
def dequeue():
results.append(sess.run(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in enqueue_ops]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, results)
def testDequeue(self):
with self.session(), self.test_scope():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
for i in range(len(elems)):
vals = self.evaluate(dequeued_t)
self.assertEqual([elems[i]], vals)
def testEnqueueAndBlockingDequeue(self):
with self.session() as sess, self.test_scope():
q = data_flow_ops.FIFOQueue(3, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
def enqueue():
# The enqueue_ops should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
for enqueue_op in enqueue_ops:
sess.run(enqueue_op)
results = []
def dequeue():
for _ in range(len(elems)):
results.append(sess.run(dequeued_t))
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
for elem, result in zip(elems, results):
self.assertEqual([elem], result)
def testMultiEnqueueAndDequeue(self):
with self.session() as sess, self.test_scope():
q = data_flow_ops.FIFOQueue(10, (dtypes_lib.int32, dtypes_lib.float32))
elems = [(5, 10.0), (10, 20.0), (15, 30.0)]
enqueue_ops = [q.enqueue((x, y)) for x, y in elems]
dequeued_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
for i in range(len(elems)):
x_val, y_val = sess.run(dequeued_t)
x, y = elems[i]
self.assertEqual([x], x_val)
self.assertEqual([y], y_val)
def testQueueSizeEmpty(self):
with self.session(), self.test_scope():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
self.assertEqual([0], self.evaluate(q.size()))
def testQueueSizeAfterEnqueueAndDequeue(self):
with self.session(), self.test_scope():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue()
size = q.size()
self.assertEqual([], size.get_shape())
enqueue_op.run()
self.assertEqual(1, self.evaluate(size))
dequeued_t.op.run()
self.assertEqual(0, self.evaluate(size))
if __name__ == "__main__":
test.main()
| FIFOQueueTest |
python | tensorflow__tensorflow | tensorflow/python/util/variable_utils_test.py | {
"start": 1251,
"end": 1407
} | class ____(composite_tensor.CompositeTensor):
"""A generic CompositeTensor, used for constructing tests."""
@property
def _type_spec(self):
pass
| CT |
python | sympy__sympy | sympy/codegen/rewriting.py | {
"start": 2341,
"end": 5342
} | class ____(Optimization):
""" Rewriting optimization calling replace on expressions.
Explanation
===========
The instance can be used as a function on expressions for which
it will apply the ``replace`` method (see
:meth:`sympy.core.basic.Basic.replace`).
Parameters
==========
query :
First argument passed to replace.
value :
Second argument passed to replace.
Examples
========
>>> from sympy import Symbol
>>> from sympy.codegen.rewriting import ReplaceOptim
>>> from sympy.codegen.cfunctions import exp2
>>> x = Symbol('x')
>>> exp2_opt = ReplaceOptim(lambda p: p.is_Pow and p.base == 2,
... lambda p: exp2(p.exp))
>>> exp2_opt(2**x)
exp2(x)
"""
def __init__(self, query, value, **kwargs):
super().__init__(**kwargs)
self.query = query
self.value = value
def __call__(self, expr):
return expr.replace(self.query, self.value)
def optimize(expr, optimizations):
""" Apply optimizations to an expression.
Parameters
==========
expr : expression
optimizations : iterable of ``Optimization`` instances
The optimizations will be sorted with respect to ``priority`` (highest first).
Examples
========
>>> from sympy import log, Symbol
>>> from sympy.codegen.rewriting import optims_c99, optimize
>>> x = Symbol('x')
>>> optimize(log(x+3)/log(2) + log(x**2 + 1), optims_c99)
log1p(x**2) + log2(x + 3)
"""
for optim in sorted(optimizations, key=lambda opt: opt.priority, reverse=True):
new_expr = optim(expr)
if optim.cost_function is None:
expr = new_expr
else:
expr = optim.cheapest(expr, new_expr)
return expr
exp2_opt = ReplaceOptim(
lambda p: p.is_Pow and p.base == 2,
lambda p: exp2(p.exp)
)
_d = Wild('d', properties=[lambda x: x.is_Dummy])
_u = Wild('u', properties=[lambda x: not x.is_number and not x.is_Add])
_v = Wild('v')
_w = Wild('w')
_n = Wild('n', properties=[lambda x: x.is_number])
sinc_opt1 = ReplaceOptim(
sin(_w)/_w, sinc(_w)
)
sinc_opt2 = ReplaceOptim(
sin(_n*_w)/_w, _n*sinc(_n*_w)
)
sinc_opts = (sinc_opt1, sinc_opt2)
log2_opt = ReplaceOptim(_v*log(_w)/log(2), _v*log2(_w), cost_function=lambda expr: expr.count(
lambda e: ( # division & eval of transcendentals are expensive floating point operations...
e.is_Pow and e.exp.is_negative # division
or (isinstance(e, (log, log2)) and not e.args[0].is_number)) # transcendental
)
)
log2const_opt = ReplaceOptim(log(2)*log2(_w), log(_w))
logsumexp_2terms_opt = ReplaceOptim(
lambda l: (isinstance(l, log)
and l.args[0].is_Add
and len(l.args[0].args) == 2
and all(isinstance(t, exp) for t in l.args[0].args)),
lambda l: (
Max(*[e.args[0] for e in l.args[0].args]) +
log1p(exp(Min(*[e.args[0] for e in l.args[0].args])))
)
)
| ReplaceOptim |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.