language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | huggingface__transformers | tests/models/smolvlm/test_image_processing_smolvlm.py | {
"start": 6469,
"end": 18145
} | class ____(ImageProcessingTestMixin, unittest.TestCase):
image_processing_class = SmolVLMImageProcessor if is_vision_available() else None
fast_image_processing_class = SmolVLMImageProcessorFast if is_torchvision_available() else None
def setUp(self):
super().setUp()
self.image_processor_tester = SmolVLMImageProcessingTester(self)
@property
def image_processor_dict(self):
return self.image_processor_tester.prepare_image_processor_dict()
def test_image_processor_properties(self):
for image_processing_class in self.image_processor_list:
image_processing = image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(image_processing, "do_convert_rgb"))
self.assertTrue(hasattr(image_processing, "do_resize"))
self.assertTrue(hasattr(image_processing, "size"))
self.assertTrue(hasattr(image_processing, "resample"))
self.assertTrue(hasattr(image_processing, "do_image_splitting"))
self.assertTrue(hasattr(image_processing, "max_image_size"))
self.assertTrue(hasattr(image_processing, "do_rescale"))
self.assertTrue(hasattr(image_processing, "rescale_factor"))
self.assertTrue(hasattr(image_processing, "do_normalize"))
self.assertTrue(hasattr(image_processing, "image_mean"))
self.assertTrue(hasattr(image_processing, "image_std"))
self.assertTrue(hasattr(image_processing, "do_pad"))
self.assertTrue(hasattr(image_processing, "do_image_splitting"))
def test_call_numpy(self):
for image_processing_class in self.image_processor_list:
# Initialize image_processing
image_processing = image_processing_class(**self.image_processor_dict)
# create random numpy tensors
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, numpify=True)
for sample_images in image_inputs:
for image in sample_images:
self.assertIsInstance(image, np.ndarray)
# Test not batched input
encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values
expected_output_image_shape = self.image_processor_tester.expected_output_image_shape([image_inputs[0]])
self.assertEqual(tuple(encoded_images.shape), (1, *expected_output_image_shape))
# Test batched
encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values
expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs)
self.assertEqual(
tuple(encoded_images.shape), (self.image_processor_tester.batch_size, *expected_output_image_shape)
)
def test_call_numpy_4_channels(self):
# SmolVLM always processes images as RGB, so it always returns images with 3 channels
for image_processing_class in self.image_processor_list:
# Initialize image_processing
image_processor_dict = self.image_processor_dict
image_processing = image_processing_class(**image_processor_dict)
# create random numpy tensors
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, numpify=True)
for sample_images in image_inputs:
for image in sample_images:
self.assertIsInstance(image, np.ndarray)
# Test not batched input
encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values
expected_output_image_shape = self.image_processor_tester.expected_output_image_shape([image_inputs[0]])
self.assertEqual(tuple(encoded_images.shape), (1, *expected_output_image_shape))
# Test batched
encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values
expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs)
self.assertEqual(
tuple(encoded_images.shape), (self.image_processor_tester.batch_size, *expected_output_image_shape)
)
def test_call_pil(self):
for image_processing_class in self.image_processor_list:
# Initialize image_processing
image_processing = image_processing_class(**self.image_processor_dict)
# create random PIL images
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False)
for images in image_inputs:
for image in images:
self.assertIsInstance(image, Image.Image)
# Test not batched input
encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values
expected_output_image_shape = self.image_processor_tester.expected_output_image_shape([image_inputs[0]])
self.assertEqual(tuple(encoded_images.shape), (1, *expected_output_image_shape))
# Test batched
encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values
expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs)
self.assertEqual(
tuple(encoded_images.shape), (self.image_processor_tester.batch_size, *expected_output_image_shape)
)
def test_call_pytorch(self):
for image_processing_class in self.image_processor_list:
# Initialize image_processing
image_processing = image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True)
for images in image_inputs:
for image in images:
self.assertIsInstance(image, torch.Tensor)
# Test not batched input
encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values
expected_output_image_shape = self.image_processor_tester.expected_output_image_shape([image_inputs[0]])
self.assertEqual(tuple(encoded_images.shape), (1, *expected_output_image_shape))
# Test batched
expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs)
encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values
self.assertEqual(
tuple(encoded_images.shape),
(self.image_processor_tester.batch_size, *expected_output_image_shape),
)
@require_vision
@require_torch
def test_slow_fast_equivalence(self):
if not self.test_slow_image_processor or not self.test_fast_image_processor:
self.skipTest(reason="Skipping slow/fast equivalence test")
if self.image_processing_class is None or self.fast_image_processing_class is None:
self.skipTest(reason="Skipping slow/fast equivalence test as one of the image processors is not defined")
dummy_image = load_image(url_to_local_path("http://images.cocodataset.org/val2017/000000039769.jpg"))
dummy_image = dummy_image.resize((100, 150))
image_processor_slow = self.image_processing_class(
**self.image_processor_dict, resample=PILImageResampling.BICUBIC
)
image_processor_fast = self.fast_image_processing_class(
**self.image_processor_dict, resample=PILImageResampling.BICUBIC
)
encoding_slow = image_processor_slow(dummy_image, return_tensors="pt", return_row_col_info=True)
encoding_fast = image_processor_fast(dummy_image, return_tensors="pt", return_row_col_info=True)
self._assert_slow_fast_tensors_equivalence(encoding_slow.pixel_values, encoding_fast.pixel_values)
self._assert_slow_fast_tensors_equivalence(
encoding_slow.pixel_attention_mask.float(), encoding_fast.pixel_attention_mask.float()
)
self.assertEqual(encoding_slow.rows, encoding_fast.rows)
self.assertEqual(encoding_slow.cols, encoding_fast.cols)
@require_vision
@require_torch
def test_slow_fast_equivalence_batched(self):
if not self.test_slow_image_processor or not self.test_fast_image_processor:
self.skipTest(reason="Skipping slow/fast equivalence test")
if self.image_processing_class is None or self.fast_image_processing_class is None:
self.skipTest(reason="Skipping slow/fast equivalence test as one of the image processors is not defined")
if hasattr(self.image_processor_tester, "do_center_crop") and self.image_processor_tester.do_center_crop:
self.skipTest(
reason="Skipping as do_center_crop is True and center_crop functions are not equivalent for fast and slow processors"
)
dummy_images = self.image_processor_tester.prepare_image_inputs(
equal_resolution=False, num_images=5, torchify=True
)
# pop some images to have non homogenous batches:
indices_to_pop = [i if np.random.random() < 0.5 else None for i in range(len(dummy_images))]
for i in indices_to_pop:
if i is not None:
dummy_images[i].pop()
image_processor_slow = self.image_processing_class(
**self.image_processor_dict, resample=PILImageResampling.BICUBIC
)
image_processor_fast = self.fast_image_processing_class(
**self.image_processor_dict, resample=PILImageResampling.BICUBIC
)
encoding_slow = image_processor_slow(dummy_images, return_tensors="pt", return_row_col_info=True)
encoding_fast = image_processor_fast(dummy_images, return_tensors="pt", return_row_col_info=True)
self._assert_slow_fast_tensors_equivalence(encoding_slow.pixel_values, encoding_fast.pixel_values, atol=3e-1)
self._assert_slow_fast_tensors_equivalence(
encoding_slow.pixel_attention_mask.float(), encoding_fast.pixel_attention_mask.float()
)
self.assertEqual(encoding_slow.rows, encoding_fast.rows)
self.assertEqual(encoding_slow.cols, encoding_fast.cols)
def test_get_num_patches_without_images(self):
for image_processing_class in self.image_processor_list:
image_processing = image_processing_class(**self.image_processor_dict)
num_patches_and_row_cols = image_processing.get_number_of_image_patches(
height=100, width=100, images_kwargs={}
)
self.assertEqual(num_patches_and_row_cols, (5, 2, 2))
num_patches_and_row_cols = image_processing.get_number_of_image_patches(
height=300, width=500, images_kwargs={"do_image_splitting": False}
)
self.assertEqual(num_patches_and_row_cols, (1, 1, 1))
num_patches_and_row_cols = image_processing.get_number_of_image_patches(
height=300, width=500, images_kwargs={"do_image_splitting": True}
)
self.assertEqual(num_patches_and_row_cols, (5, 2, 2))
num_patches_and_row_cols = image_processing.get_number_of_image_patches(
height=300,
width=600,
images_kwargs={"do_image_splitting": True, "max_image_size": {"longest_edge": 30}},
)
self.assertEqual(num_patches_and_row_cols, (3, 1, 2))
| SmolVLMImageProcessingTest |
python | nedbat__coveragepy | coverage/debug.py | {
"start": 13888,
"end": 21753
} | class ____:
"""A file-like object that includes pid and cwd information."""
def __init__(
self,
outfile: IO[str] | None,
filters: Iterable[Callable[[str], str]],
):
self.outfile = outfile
self.filters = list(filters)
self.pid = os.getpid()
@classmethod
def get_one(
cls,
fileobj: IO[str] | None = None,
file_name: str | None = None,
filters: Iterable[Callable[[str], str]] = (),
interim: bool = False,
) -> DebugOutputFile:
"""Get a DebugOutputFile.
If `fileobj` is provided, then a new DebugOutputFile is made with it.
If `fileobj` isn't provided, then a file is chosen (`file_name` if
provided, or COVERAGE_DEBUG_FILE, or stderr), and a process-wide
singleton DebugOutputFile is made.
`filters` are the text filters to apply to the stream to annotate with
pids, etc.
If `interim` is true, then a future `get_one` can replace this one.
"""
if fileobj is not None:
# Make DebugOutputFile around the fileobj passed.
return cls(fileobj, filters)
the_one, is_interim = cls._get_singleton_data()
if the_one is None or is_interim:
if file_name is not None:
fileobj = open(file_name, "a", encoding="utf-8")
else:
# $set_env.py: COVERAGE_DEBUG_FILE - Where to write debug output
file_name = os.getenv("COVERAGE_DEBUG_FILE", FORCED_DEBUG_FILE)
if file_name in ["stdout", "stderr"]:
fileobj = getattr(sys, file_name)
elif file_name:
fileobj = open(file_name, "a", encoding="utf-8")
atexit.register(fileobj.close)
else:
fileobj = sys.stderr
the_one = cls(fileobj, filters)
cls._set_singleton_data(the_one, interim)
if not (the_one.filters):
the_one.filters = list(filters)
return the_one
# Because of the way igor.py deletes and re-imports modules,
# this class can be defined more than once. But we really want
# a process-wide singleton. So stash it in sys.modules instead of
# on a class attribute. Yes, this is aggressively gross.
SYS_MOD_NAME: Final[str] = "$coverage.debug.DebugOutputFile.the_one"
SINGLETON_ATTR: Final[str] = "the_one_and_is_interim"
@classmethod
def _set_singleton_data(cls, the_one: DebugOutputFile, interim: bool) -> None:
"""Set the one DebugOutputFile to rule them all."""
singleton_module = types.ModuleType(cls.SYS_MOD_NAME)
setattr(singleton_module, cls.SINGLETON_ATTR, (the_one, interim))
sys.modules[cls.SYS_MOD_NAME] = singleton_module
@classmethod
def _get_singleton_data(cls) -> tuple[DebugOutputFile | None, bool]:
"""Get the one DebugOutputFile."""
singleton_module = sys.modules.get(cls.SYS_MOD_NAME)
return getattr(singleton_module, cls.SINGLETON_ATTR, (None, True))
@classmethod
def _del_singleton_data(cls) -> None:
"""Delete the one DebugOutputFile, just for tests to use."""
if cls.SYS_MOD_NAME in sys.modules:
del sys.modules[cls.SYS_MOD_NAME]
def write(self, text: str) -> None:
"""Just like file.write, but filter through all our filters."""
assert self.outfile is not None
if not self.outfile.closed:
self.outfile.write(filter_text(text, self.filters))
self.outfile.flush()
def flush(self) -> None:
"""Flush our file."""
assert self.outfile is not None
if not self.outfile.closed:
self.outfile.flush()
def log(msg: str, stack: bool = False) -> None: # pragma: debugging
"""Write a log message as forcefully as possible."""
out = DebugOutputFile.get_one(interim=True)
out.write(msg + "\n")
if stack:
dump_stack_frames(out=out, skip=1)
def decorate_methods(
decorator: Callable[..., Any],
butnot: Iterable[str] = (),
private: bool = False,
) -> Callable[..., Any]: # pragma: debugging
"""A class decorator to apply a decorator to methods."""
def _decorator(cls): # type: ignore[no-untyped-def]
for name, meth in inspect.getmembers(cls, inspect.isroutine):
if name not in cls.__dict__:
continue
if name != "__init__":
if not private and name.startswith("_"):
continue
if name in butnot:
continue
setattr(cls, name, decorator(meth))
return cls
return _decorator
def break_in_pudb(func: AnyCallable) -> AnyCallable: # pragma: debugging
"""A function decorator to stop in the debugger for each call."""
@functools.wraps(func)
def _wrapper(*args: Any, **kwargs: Any) -> Any:
import pudb
sys.stdout = sys.__stdout__
pudb.set_trace()
return func(*args, **kwargs)
return _wrapper
OBJ_IDS = itertools.count()
CALLS = itertools.count()
OBJ_ID_ATTR = "$coverage.object_id"
def show_calls(
show_args: bool = True,
show_stack: bool = False,
show_return: bool = False,
) -> Callable[..., Any]: # pragma: debugging
"""A method decorator to debug-log each call to the function."""
def _decorator(func: AnyCallable) -> AnyCallable:
@functools.wraps(func)
def _wrapper(self: Any, *args: Any, **kwargs: Any) -> Any:
oid = getattr(self, OBJ_ID_ATTR, None)
if oid is None:
oid = f"{os.getpid():08d} {next(OBJ_IDS):04d}"
setattr(self, OBJ_ID_ATTR, oid)
extra = ""
if show_args:
eargs = ", ".join(map(repr, args))
ekwargs = ", ".join("{}={!r}".format(*item) for item in kwargs.items())
extra += "("
extra += eargs
if eargs and ekwargs:
extra += ", "
extra += ekwargs
extra += ")"
if show_stack:
extra += " @ "
extra += "; ".join(short_stack(short_filenames=True).splitlines())
callid = next(CALLS)
msg = f"{oid} {callid:04d} {func.__name__}{extra}\n"
DebugOutputFile.get_one(interim=True).write(msg)
ret = func(self, *args, **kwargs)
if show_return:
msg = f"{oid} {callid:04d} {func.__name__} return {ret!r}\n"
DebugOutputFile.get_one(interim=True).write(msg)
return ret
return _wrapper
return _decorator
def relevant_environment_display(env: Mapping[str, str]) -> list[tuple[str, str]]:
"""Filter environment variables for a debug display.
Select variables to display (with COV or PY in the name, or HOME, TEMP, or
TMP), and also cloak sensitive values with asterisks.
Arguments:
env: a dict of environment variable names and values.
Returns:
A list of pairs (name, value) to show.
"""
SLUGS = {"COV", "PY"}
INCLUDE = {"HOME", "TEMP", "TMP"}
CLOAK = {"API", "TOKEN", "KEY", "SECRET", "PASS", "SIGNATURE"}
TRUNCATE = {"COVERAGE_PROCESS_CONFIG"}
TRUNCATE_LEN = 60
to_show = []
for name, val in env.items():
show = False
if name in INCLUDE:
show = True
elif any(slug in name for slug in SLUGS):
show = True
if show:
if any(slug in name for slug in CLOAK):
val = re.sub(r"\w", "*", val)
if name in TRUNCATE:
if len(val) > TRUNCATE_LEN:
val = val[: TRUNCATE_LEN - 3] + "..."
to_show.append((name, val))
return human_sorted_items(to_show)
| DebugOutputFile |
python | ray-project__ray | python/ray/train/tests/test_xgboost_trainer.py | {
"start": 1486,
"end": 6991
} | class ____(XGBoostTrainer):
def training_loop(self) -> None:
pgf = train.get_context().get_trial_resources()
assert pgf.strategy == "SPREAD"
return super().training_loop()
def test_fit_with_advanced_scaling_config(ray_start_4_cpus):
"""Ensure that extra ScalingConfig arguments are respected."""
train_dataset = ray.data.from_pandas(train_df)
valid_dataset = ray.data.from_pandas(test_df)
trainer = ScalingConfigAssertingXGBoostTrainer(
scaling_config=ScalingConfig(
num_workers=2,
placement_strategy="SPREAD",
),
label_column="target",
params=params,
datasets={TRAIN_DATASET_KEY: train_dataset, "valid": valid_dataset},
)
trainer.fit()
def test_resume_from_checkpoint(ray_start_4_cpus, tmpdir):
train_dataset = ray.data.from_pandas(train_df)
valid_dataset = ray.data.from_pandas(test_df)
trainer = XGBoostTrainer(
scaling_config=scale_config,
label_column="target",
params=params,
num_boost_round=5,
datasets={TRAIN_DATASET_KEY: train_dataset, "valid": valid_dataset},
)
result = trainer.fit()
checkpoint = result.checkpoint
xgb_model = XGBoostTrainer.get_model(checkpoint)
assert xgb_model.num_boosted_rounds() == 5
trainer = XGBoostTrainer(
scaling_config=scale_config,
label_column="target",
params=params,
num_boost_round=10,
datasets={TRAIN_DATASET_KEY: train_dataset, "valid": valid_dataset},
resume_from_checkpoint=result.checkpoint,
)
result = trainer.fit()
model = XGBoostTrainer.get_model(result.checkpoint)
assert model.num_boosted_rounds() == 10
@pytest.mark.parametrize(
"freq_end_expected",
[
# With num_boost_round=25 with 0 indexing, the checkpoints will be at:
(4, True, 7), # 3, 7, 11, 15, 19, 23, 24 (end)
(4, False, 6), # 3, 7, 11, 15, 19, 23
(5, True, 5), # 4, 9, 14, 19, 24
(0, True, 1), # 24 (end)
(0, False, 0),
],
)
def test_checkpoint_freq(ray_start_4_cpus, freq_end_expected):
freq, end, expected = freq_end_expected
train_dataset = ray.data.from_pandas(train_df)
valid_dataset = ray.data.from_pandas(test_df)
trainer = XGBoostTrainer(
run_config=ray.train.RunConfig(
checkpoint_config=ray.train.CheckpointConfig(
checkpoint_frequency=freq, checkpoint_at_end=end
)
),
scaling_config=scale_config,
label_column="target",
params=params,
num_boost_round=25,
datasets={TRAIN_DATASET_KEY: train_dataset, "valid": valid_dataset},
)
result = trainer.fit()
# Assert number of checkpoints
assert len(result.best_checkpoints) == expected, str(
[(metrics["training_iteration"], cp) for cp, metrics in result.best_checkpoints]
)
# Assert checkpoint numbers are increasing
cp_paths = [cp.path for cp, _ in result.best_checkpoints]
assert cp_paths == sorted(cp_paths), str(cp_paths)
@pytest.mark.parametrize("rank", [None, 0, 1])
def test_checkpoint_only_on_rank0(rank):
"""Tests that the callback only reports checkpoints on rank 0,
or if the rank is not available (Tune usage)."""
callback = RayTrainReportCallback(frequency=2, checkpoint_at_end=True)
booster = mock.MagicMock()
with mock.patch("ray.train.get_context") as mock_get_context:
mock_context = mock.MagicMock()
mock_context.get_world_rank.return_value = rank
mock_get_context.return_value = mock_context
with callback._get_checkpoint(booster) as checkpoint:
if rank in (0, None):
assert checkpoint
else:
assert not checkpoint
def test_tune(ray_start_8_cpus):
train_dataset = ray.data.from_pandas(train_df)
valid_dataset = ray.data.from_pandas(test_df)
trainer = XGBoostTrainer(
scaling_config=scale_config,
label_column="target",
params={**params, "max_depth": 1},
datasets={TRAIN_DATASET_KEY: train_dataset, "valid": valid_dataset},
)
tuner = tune.Tuner(
trainer,
param_space={"params": {"max_depth": tune.grid_search([2, 4])}},
)
results = tuner.fit()
assert sorted([r.config["params"]["max_depth"] for r in results]) == [2, 4]
def test_validation(ray_start_4_cpus):
valid_dataset = ray.data.from_pandas(test_df)
with pytest.raises(ValueError, match=TRAIN_DATASET_KEY):
XGBoostTrainer(
scaling_config=ScalingConfig(num_workers=2),
label_column="target",
params=params,
datasets={"valid": valid_dataset},
)
with pytest.raises(ValueError, match="label_column"):
XGBoostTrainer(
scaling_config=ScalingConfig(num_workers=2),
datasets={"train": valid_dataset},
)
def test_callback_get_model(tmp_path):
custom_filename = "custom.json"
bst = xgb.train(
params,
dtrain=xgb.DMatrix(train_df, label=train_df["target"]),
num_boost_round=1,
)
bst.save_model(tmp_path.joinpath(custom_filename).as_posix())
checkpoint = train.Checkpoint.from_directory(tmp_path.as_posix())
RayTrainReportCallback.get_model(checkpoint, filename=custom_filename)
if __name__ == "__main__":
import sys
import pytest
sys.exit(pytest.main(["-v", "-x", __file__]))
| ScalingConfigAssertingXGBoostTrainer |
python | sympy__sympy | sympy/printing/pretty/tests/test_pretty.py | {
"start": 4399,
"end": 177713
} | class ____(sym.lowergamma):
pass # testing notation inheritance by a subclass with same name
a, b, c, d, x, y, z, k, n, s, p = symbols('a,b,c,d,x,y,z,k,n,s,p')
f = Function("f")
th = Symbol('theta')
ph = Symbol('phi')
"""
Expressions whose pretty-printing is tested here:
(A '#' to the right of an expression indicates that its various acceptable
orderings are accounted for by the tests.)
BASIC EXPRESSIONS:
oo
(x**2)
1/x
y*x**-2
x**Rational(-5,2)
(-2)**x
Pow(3, 1, evaluate=False)
(x**2 + x + 1) #
1-x #
1-2*x #
x/y
-x/y
(x+2)/y #
(1+x)*y #3
-5*x/(x+10) # correct placement of negative sign
1 - Rational(3,2)*(x+1)
-(-x + 5)*(-x - 2*sqrt(2) + 5) - (-y + 5)*(-y + 5) # issue 5524
ORDERING:
x**2 + x + 1
1 - x
1 - 2*x
2*x**4 + y**2 - x**2 + y**3
RELATIONAL:
Eq(x, y)
Lt(x, y)
Gt(x, y)
Le(x, y)
Ge(x, y)
Ne(x/(y+1), y**2) #
RATIONAL NUMBERS:
y*x**-2
y**Rational(3,2) * x**Rational(-5,2)
sin(x)**3/tan(x)**2
FUNCTIONS (ABS, CONJ, EXP, FUNCTION BRACES, FACTORIAL, FLOOR, CEILING):
(2*x + exp(x)) #
Abs(x)
Abs(x/(x**2+1)) #
Abs(1 / (y - Abs(x)))
factorial(n)
factorial(2*n)
subfactorial(n)
subfactorial(2*n)
factorial(factorial(factorial(n)))
factorial(n+1) #
conjugate(x)
conjugate(f(x+1)) #
f(x)
f(x, y)
f(x/(y+1), y) #
f(x**x**x**x**x**x)
sin(x)**2
conjugate(a+b*I)
conjugate(exp(a+b*I))
conjugate( f(1 + conjugate(f(x))) ) #
f(x/(y+1), y) # denom of first arg
floor(1 / (y - floor(x)))
ceiling(1 / (y - ceiling(x)))
SQRT:
sqrt(2)
2**Rational(1,3)
2**Rational(1,1000)
sqrt(x**2 + 1)
(1 + sqrt(5))**Rational(1,3)
2**(1/x)
sqrt(2+pi)
(2+(1+x**2)/(2+x))**Rational(1,4)+(1+x**Rational(1,1000))/sqrt(3+x**2)
DERIVATIVES:
Derivative(log(x), x, evaluate=False)
Derivative(log(x), x, evaluate=False) + x #
Derivative(log(x) + x**2, x, y, evaluate=False)
Derivative(2*x*y, y, x, evaluate=False) + x**2 #
beta(alpha).diff(alpha)
INTEGRALS:
Integral(log(x), x)
Integral(x**2, x)
Integral((sin(x))**2 / (tan(x))**2)
Integral(x**(2**x), x)
Integral(x**2, (x,1,2))
Integral(x**2, (x,Rational(1,2),10))
Integral(x**2*y**2, x,y)
Integral(x**2, (x, None, 1))
Integral(x**2, (x, 1, None))
Integral(sin(th)/cos(ph), (th,0,pi), (ph, 0, 2*pi))
MATRICES:
Matrix([[x**2+1, 1], [y, x+y]]) #
Matrix([[x/y, y, th], [0, exp(I*k*ph), 1]])
PIECEWISE:
Piecewise((x,x<1),(x**2,True))
ITE:
ITE(x, y, z)
SEQUENCES (TUPLES, LISTS, DICTIONARIES):
()
[]
{}
(1/x,)
[x**2, 1/x, x, y, sin(th)**2/cos(ph)**2]
(x**2, 1/x, x, y, sin(th)**2/cos(ph)**2)
{x: sin(x)}
{1/x: 1/y, x: sin(x)**2} #
[x**2]
(x**2,)
{x**2: 1}
LIMITS:
Limit(x, x, oo)
Limit(x**2, x, 0)
Limit(1/x, x, 0)
Limit(sin(x)/x, x, 0)
UNITS:
joule => kg*m**2/s
SUBS:
Subs(f(x), x, ph**2)
Subs(f(x).diff(x), x, 0)
Subs(f(x).diff(x)/y, (x, y), (0, Rational(1, 2)))
ORDER:
O(1)
O(1/x)
O(x**2 + y**2)
"""
def pretty(expr, order=None):
"""ASCII pretty-printing"""
return xpretty(expr, order=order, use_unicode=False, wrap_line=False)
def upretty(expr, order=None):
"""Unicode pretty-printing"""
return xpretty(expr, order=order, use_unicode=True, wrap_line=False)
def test_pretty_ascii_str():
assert pretty( 'xxx' ) == 'xxx'
assert pretty( "xxx" ) == 'xxx'
assert pretty( 'xxx\'xxx' ) == 'xxx\'xxx'
assert pretty( 'xxx"xxx' ) == 'xxx\"xxx'
assert pretty( 'xxx\"xxx' ) == 'xxx\"xxx'
assert pretty( "xxx'xxx" ) == 'xxx\'xxx'
assert pretty( "xxx\'xxx" ) == 'xxx\'xxx'
assert pretty( "xxx\"xxx" ) == 'xxx\"xxx'
assert pretty( "xxx\"xxx\'xxx" ) == 'xxx"xxx\'xxx'
assert pretty( "xxx\nxxx" ) == 'xxx\nxxx'
def test_pretty_unicode_str():
assert pretty( 'xxx' ) == 'xxx'
assert pretty( 'xxx' ) == 'xxx'
assert pretty( 'xxx\'xxx' ) == 'xxx\'xxx'
assert pretty( 'xxx"xxx' ) == 'xxx\"xxx'
assert pretty( 'xxx\"xxx' ) == 'xxx\"xxx'
assert pretty( "xxx'xxx" ) == 'xxx\'xxx'
assert pretty( "xxx\'xxx" ) == 'xxx\'xxx'
assert pretty( "xxx\"xxx" ) == 'xxx\"xxx'
assert pretty( "xxx\"xxx\'xxx" ) == 'xxx"xxx\'xxx'
assert pretty( "xxx\nxxx" ) == 'xxx\nxxx'
def test_upretty_greek():
assert upretty( oo ) == '∞'
assert upretty( Symbol('alpha^+_1') ) == 'α⁺₁'
assert upretty( Symbol('beta') ) == 'β'
assert upretty(Symbol('lambda')) == 'λ'
def test_upretty_multiindex():
assert upretty( Symbol('beta12') ) == 'β₁₂'
assert upretty( Symbol('Y00') ) == 'Y₀₀'
assert upretty( Symbol('Y_00') ) == 'Y₀₀'
assert upretty( Symbol('F^+-') ) == 'F⁺⁻'
def test_upretty_sub_super():
assert upretty( Symbol('beta_1_2') ) == 'β₁ ₂'
assert upretty( Symbol('beta^1^2') ) == 'β¹ ²'
assert upretty( Symbol('beta_1^2') ) == 'β²₁'
assert upretty( Symbol('beta_10_20') ) == 'β₁₀ ₂₀'
assert upretty( Symbol('beta_ax_gamma^i') ) == 'βⁱₐₓ ᵧ'
assert upretty( Symbol("F^1^2_3_4") ) == 'F¹ ²₃ ₄'
assert upretty( Symbol("F_1_2^3^4") ) == 'F³ ⁴₁ ₂'
assert upretty( Symbol("F_1_2_3_4") ) == 'F₁ ₂ ₃ ₄'
assert upretty( Symbol("F^1^2^3^4") ) == 'F¹ ² ³ ⁴'
def test_upretty_subs_missing_in_24():
assert upretty( Symbol('F_beta') ) == 'Fᵦ'
assert upretty( Symbol('F_gamma') ) == 'Fᵧ'
assert upretty( Symbol('F_rho') ) == 'Fᵨ'
assert upretty( Symbol('F_phi') ) == 'Fᵩ'
assert upretty( Symbol('F_chi') ) == 'Fᵪ'
assert upretty( Symbol('F_a') ) == 'Fₐ'
assert upretty( Symbol('F_e') ) == 'Fₑ'
assert upretty( Symbol('F_i') ) == 'Fᵢ'
assert upretty( Symbol('F_o') ) == 'Fₒ'
assert upretty( Symbol('F_u') ) == 'Fᵤ'
assert upretty( Symbol('F_r') ) == 'Fᵣ'
assert upretty( Symbol('F_v') ) == 'Fᵥ'
assert upretty( Symbol('F_x') ) == 'Fₓ'
def test_missing_in_2X_issue_9047():
assert upretty( Symbol('F_h') ) == 'Fₕ'
assert upretty( Symbol('F_k') ) == 'Fₖ'
assert upretty( Symbol('F_l') ) == 'Fₗ'
assert upretty( Symbol('F_m') ) == 'Fₘ'
assert upretty( Symbol('F_n') ) == 'Fₙ'
assert upretty( Symbol('F_p') ) == 'Fₚ'
assert upretty( Symbol('F_s') ) == 'Fₛ'
assert upretty( Symbol('F_t') ) == 'Fₜ'
def test_upretty_modifiers():
# Accents
assert upretty( Symbol('Fmathring') ) == 'F̊'
assert upretty( Symbol('Fddddot') ) == 'F⃜'
assert upretty( Symbol('Fdddot') ) == 'F⃛'
assert upretty( Symbol('Fddot') ) == 'F̈'
assert upretty( Symbol('Fdot') ) == 'Ḟ'
assert upretty( Symbol('Fcheck') ) == 'F̌'
assert upretty( Symbol('Fbreve') ) == 'F̆'
assert upretty( Symbol('Facute') ) == 'F́'
assert upretty( Symbol('Fgrave') ) == 'F̀'
assert upretty( Symbol('Ftilde') ) == 'F̃'
assert upretty( Symbol('Fhat') ) == 'F̂'
assert upretty( Symbol('Fbar') ) == 'F̅'
assert upretty( Symbol('Fvec') ) == 'F⃗'
assert upretty( Symbol('Fprime') ) == 'F′'
assert upretty( Symbol('Fprm') ) == 'F′'
# No faces are actually implemented, but test to make sure the modifiers are stripped
assert upretty( Symbol('Fbold') ) == 'Fbold'
assert upretty( Symbol('Fbm') ) == 'Fbm'
assert upretty( Symbol('Fcal') ) == 'Fcal'
assert upretty( Symbol('Fscr') ) == 'Fscr'
assert upretty( Symbol('Ffrak') ) == 'Ffrak'
# Brackets
assert upretty( Symbol('Fnorm') ) == '‖F‖'
assert upretty( Symbol('Favg') ) == '⟨F⟩'
assert upretty( Symbol('Fabs') ) == '|F|'
assert upretty( Symbol('Fmag') ) == '|F|'
# Combinations
assert upretty( Symbol('xvecdot') ) == 'x⃗̇'
assert upretty( Symbol('xDotVec') ) == 'ẋ⃗'
assert upretty( Symbol('xHATNorm') ) == '‖x̂‖'
assert upretty( Symbol('xMathring_yCheckPRM__zbreveAbs') ) == 'x̊_y̌′__|z̆|'
assert upretty( Symbol('alphadothat_nVECDOT__tTildePrime') ) == 'α̇̂_n⃗̇__t̃′'
assert upretty( Symbol('x_dot') ) == 'x_dot'
assert upretty( Symbol('x__dot') ) == 'x__dot'
def test_pretty_Cycle():
from sympy.combinatorics.permutations import Cycle
assert pretty(Cycle(1, 2)) == '(1 2)'
assert pretty(Cycle(2)) == '(2)'
assert pretty(Cycle(1, 3)(4, 5)) == '(1 3)(4 5)'
assert pretty(Cycle()) == '()'
def test_pretty_Permutation():
from sympy.combinatorics.permutations import Permutation
p1 = Permutation(1, 2)(3, 4)
assert xpretty(p1, perm_cyclic=True, use_unicode=True) == "(1 2)(3 4)"
assert xpretty(p1, perm_cyclic=True, use_unicode=False) == "(1 2)(3 4)"
assert xpretty(p1, perm_cyclic=False, use_unicode=True) == \
'⎛0 1 2 3 4⎞\n'\
'⎝0 2 1 4 3⎠'
assert xpretty(p1, perm_cyclic=False, use_unicode=False) == \
"/0 1 2 3 4\\\n"\
"\\0 2 1 4 3/"
with warns_deprecated_sympy():
old_print_cyclic = Permutation.print_cyclic
Permutation.print_cyclic = False
assert xpretty(p1, use_unicode=True) == \
'⎛0 1 2 3 4⎞\n'\
'⎝0 2 1 4 3⎠'
assert xpretty(p1, use_unicode=False) == \
"/0 1 2 3 4\\\n"\
"\\0 2 1 4 3/"
Permutation.print_cyclic = old_print_cyclic
def test_pretty_basic():
assert pretty( -Rational(1)/2 ) == '-1/2'
assert pretty( -Rational(13)/22 ) == \
"""\
-13 \n\
----\n\
22 \
"""
expr = oo
ascii_str = \
"""\
oo\
"""
ucode_str = \
"""\
∞\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = (x**2)
ascii_str = \
"""\
2\n\
x \
"""
ucode_str = \
"""\
2\n\
x \
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = 1/x
ascii_str = \
"""\
1\n\
-\n\
x\
"""
ucode_str = \
"""\
1\n\
─\n\
x\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
# not the same as 1/x
expr = x**-1.0
ascii_str = \
"""\
-1.0\n\
x \
"""
ucode_str = \
"""\
-1.0\n\
x \
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
# see issue #2860
expr = Pow(S(2), -1.0, evaluate=False)
ascii_str = \
"""\
-1.0\n\
2 \
"""
ucode_str = \
"""\
-1.0\n\
2 \
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = y*x**-2
ascii_str = \
"""\
y \n\
--\n\
2\n\
x \
"""
ucode_str = \
"""\
y \n\
──\n\
2\n\
x \
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
#see issue #14033
expr = x**Rational(1, 3)
ascii_str = \
"""\
1/3\n\
x \
"""
ucode_str = \
"""\
1/3\n\
x \
"""
assert xpretty(expr, use_unicode=False, wrap_line=False,\
root_notation = False) == ascii_str
assert xpretty(expr, use_unicode=True, wrap_line=False,\
root_notation = False) == ucode_str
expr = x**Rational(-5, 2)
ascii_str = \
"""\
1 \n\
----\n\
5/2\n\
x \
"""
ucode_str = \
"""\
1 \n\
────\n\
5/2\n\
x \
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = (-2)**x
ascii_str = \
"""\
x\n\
(-2) \
"""
ucode_str = \
"""\
x\n\
(-2) \
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
# See issue 4923
expr = Pow(3, 1, evaluate=False)
ascii_str = \
"""\
1\n\
3 \
"""
ucode_str = \
"""\
1\n\
3 \
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = (x**2 + x + 1)
ascii_str_1 = \
"""\
2\n\
1 + x + x \
"""
ascii_str_2 = \
"""\
2 \n\
x + x + 1\
"""
ascii_str_3 = \
"""\
2 \n\
x + 1 + x\
"""
ucode_str_1 = \
"""\
2\n\
1 + x + x \
"""
ucode_str_2 = \
"""\
2 \n\
x + x + 1\
"""
ucode_str_3 = \
"""\
2 \n\
x + 1 + x\
"""
assert pretty(expr) in [ascii_str_1, ascii_str_2, ascii_str_3]
assert upretty(expr) in [ucode_str_1, ucode_str_2, ucode_str_3]
expr = 1 - x
ascii_str_1 = \
"""\
1 - x\
"""
ascii_str_2 = \
"""\
-x + 1\
"""
ucode_str_1 = \
"""\
1 - x\
"""
ucode_str_2 = \
"""\
-x + 1\
"""
assert pretty(expr) in [ascii_str_1, ascii_str_2]
assert upretty(expr) in [ucode_str_1, ucode_str_2]
expr = 1 - 2*x
ascii_str_1 = \
"""\
1 - 2*x\
"""
ascii_str_2 = \
"""\
-2*x + 1\
"""
ucode_str_1 = \
"""\
1 - 2⋅x\
"""
ucode_str_2 = \
"""\
-2⋅x + 1\
"""
assert pretty(expr) in [ascii_str_1, ascii_str_2]
assert upretty(expr) in [ucode_str_1, ucode_str_2]
expr = x/y
ascii_str = \
"""\
x\n\
-\n\
y\
"""
ucode_str = \
"""\
x\n\
─\n\
y\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = -x/y
ascii_str = \
"""\
-x \n\
---\n\
y \
"""
ucode_str = \
"""\
-x \n\
───\n\
y \
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = (x + 2)/y
ascii_str_1 = \
"""\
2 + x\n\
-----\n\
y \
"""
ascii_str_2 = \
"""\
x + 2\n\
-----\n\
y \
"""
ucode_str_1 = \
"""\
2 + x\n\
─────\n\
y \
"""
ucode_str_2 = \
"""\
x + 2\n\
─────\n\
y \
"""
assert pretty(expr) in [ascii_str_1, ascii_str_2]
assert upretty(expr) in [ucode_str_1, ucode_str_2]
expr = (1 + x)*y
ascii_str_1 = \
"""\
y*(1 + x)\
"""
ascii_str_2 = \
"""\
(1 + x)*y\
"""
ascii_str_3 = \
"""\
y*(x + 1)\
"""
ucode_str_1 = \
"""\
y⋅(1 + x)\
"""
ucode_str_2 = \
"""\
(1 + x)⋅y\
"""
ucode_str_3 = \
"""\
y⋅(x + 1)\
"""
assert pretty(expr) in [ascii_str_1, ascii_str_2, ascii_str_3]
assert upretty(expr) in [ucode_str_1, ucode_str_2, ucode_str_3]
# Test for correct placement of the negative sign
expr = -5*x/(x + 10)
ascii_str_1 = \
"""\
-5*x \n\
------\n\
10 + x\
"""
ascii_str_2 = \
"""\
-5*x \n\
------\n\
x + 10\
"""
ucode_str_1 = \
"""\
-5⋅x \n\
──────\n\
10 + x\
"""
ucode_str_2 = \
"""\
-5⋅x \n\
──────\n\
x + 10\
"""
assert pretty(expr) in [ascii_str_1, ascii_str_2]
assert upretty(expr) in [ucode_str_1, ucode_str_2]
expr = -S.Half - 3*x
ascii_str = \
"""\
-3*x - 1/2\
"""
ucode_str = \
"""\
-3⋅x - 1/2\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = S.Half - 3*x
ascii_str = \
"""\
1/2 - 3*x\
"""
ucode_str = \
"""\
1/2 - 3⋅x\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = -S.Half - 3*x/2
ascii_str = \
"""\
3*x 1\n\
- --- - -\n\
2 2\
"""
ucode_str = \
"""\
3⋅x 1\n\
- ─── - ─\n\
2 2\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = S.Half - 3*x/2
ascii_str = \
"""\
1 3*x\n\
- - ---\n\
2 2 \
"""
ucode_str = \
"""\
1 3⋅x\n\
─ - ───\n\
2 2 \
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
def test_negative_fractions():
expr = -x/y
ascii_str =\
"""\
-x \n\
---\n\
y \
"""
ucode_str =\
"""\
-x \n\
───\n\
y \
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = -x*z/y
ascii_str =\
"""\
-x*z \n\
-----\n\
y \
"""
ucode_str =\
"""\
-x⋅z \n\
─────\n\
y \
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = x**2/y
ascii_str =\
"""\
2\n\
x \n\
--\n\
y \
"""
ucode_str =\
"""\
2\n\
x \n\
──\n\
y \
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = -x**2/y
ascii_str =\
"""\
2 \n\
-x \n\
----\n\
y \
"""
ucode_str =\
"""\
2 \n\
-x \n\
────\n\
y \
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = -x/(y*z)
ascii_str =\
"""\
-x \n\
---\n\
y*z\
"""
ucode_str =\
"""\
-x \n\
───\n\
y⋅z\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = -a/y**2
ascii_str =\
"""\
-a \n\
---\n\
2 \n\
y \
"""
ucode_str =\
"""\
-a \n\
───\n\
2 \n\
y \
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = y**(-a/b)
ascii_str =\
"""\
-a \n\
---\n\
b \n\
y \
"""
ucode_str =\
"""\
-a \n\
───\n\
b \n\
y \
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = -1/y**2
ascii_str =\
"""\
-1 \n\
---\n\
2 \n\
y \
"""
ucode_str =\
"""\
-1 \n\
───\n\
2 \n\
y \
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = -10/b**2
ascii_str =\
"""\
-10 \n\
----\n\
2 \n\
b \
"""
ucode_str =\
"""\
-10 \n\
────\n\
2 \n\
b \
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = Rational(-200, 37)
ascii_str =\
"""\
-200 \n\
-----\n\
37 \
"""
ucode_str =\
"""\
-200 \n\
─────\n\
37 \
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
def test_Mul():
expr = Mul(0, 1, evaluate=False)
assert pretty(expr) == "0*1"
assert upretty(expr) == "0⋅1"
expr = Mul(1, 0, evaluate=False)
assert pretty(expr) == "1*0"
assert upretty(expr) == "1⋅0"
expr = Mul(1, 1, evaluate=False)
assert pretty(expr) == "1*1"
assert upretty(expr) == "1⋅1"
expr = Mul(1, 1, 1, evaluate=False)
assert pretty(expr) == "1*1*1"
assert upretty(expr) == "1⋅1⋅1"
expr = Mul(1, 2, evaluate=False)
assert pretty(expr) == "1*2"
assert upretty(expr) == "1⋅2"
expr = Add(0, 1, evaluate=False)
assert pretty(expr) == "0 + 1"
assert upretty(expr) == "0 + 1"
expr = Mul(1, 1, 2, evaluate=False)
assert pretty(expr) == "1*1*2"
assert upretty(expr) == "1⋅1⋅2"
expr = Add(0, 0, 1, evaluate=False)
assert pretty(expr) == "0 + 0 + 1"
assert upretty(expr) == "0 + 0 + 1"
expr = Mul(1, -1, evaluate=False)
assert pretty(expr) == "1*-1"
assert upretty(expr) == "1⋅-1"
expr = Mul(1.0, x, evaluate=False)
assert pretty(expr) == "1.0*x"
assert upretty(expr) == "1.0⋅x"
expr = Mul(1, 1, 2, 3, x, evaluate=False)
assert pretty(expr) == "1*1*2*3*x"
assert upretty(expr) == "1⋅1⋅2⋅3⋅x"
expr = Mul(-1, 1, evaluate=False)
assert pretty(expr) == "-1*1"
assert upretty(expr) == "-1⋅1"
expr = Mul(4, 3, 2, 1, 0, y, x, evaluate=False)
assert pretty(expr) == "4*3*2*1*0*y*x"
assert upretty(expr) == "4⋅3⋅2⋅1⋅0⋅y⋅x"
expr = Mul(4, 3, 2, 1+z, 0, y, x, evaluate=False)
assert pretty(expr) == "4*3*2*(z + 1)*0*y*x"
assert upretty(expr) == "4⋅3⋅2⋅(z + 1)⋅0⋅y⋅x"
expr = Mul(Rational(2, 3), Rational(5, 7), evaluate=False)
assert pretty(expr) == "2/3*5/7"
assert upretty(expr) == "2/3⋅5/7"
expr = Mul(x + y, Rational(1, 2), evaluate=False)
assert pretty(expr) == "(x + y)*1/2"
assert upretty(expr) == "(x + y)⋅1/2"
expr = Mul(Rational(1, 2), x + y, evaluate=False)
assert pretty(expr) == "x + y\n-----\n 2 "
assert upretty(expr) == "x + y\n─────\n 2 "
expr = Mul(S.One, x + y, evaluate=False)
assert pretty(expr) == "1*(x + y)"
assert upretty(expr) == "1⋅(x + y)"
expr = Mul(x - y, S.One, evaluate=False)
assert pretty(expr) == "(x - y)*1"
assert upretty(expr) == "(x - y)⋅1"
expr = Mul(Rational(1, 2), x - y, S.One, x + y, evaluate=False)
assert pretty(expr) == "1/2*(x - y)*1*(x + y)"
assert upretty(expr) == "1/2⋅(x - y)⋅1⋅(x + y)"
expr = Mul(x + y, Rational(3, 4), S.One, y - z, evaluate=False)
assert pretty(expr) == "(x + y)*3/4*1*(y - z)"
assert upretty(expr) == "(x + y)⋅3/4⋅1⋅(y - z)"
expr = Mul(x + y, Rational(1, 1), Rational(3, 4), Rational(5, 6),evaluate=False)
assert pretty(expr) == "(x + y)*1*3/4*5/6"
assert upretty(expr) == "(x + y)⋅1⋅3/4⋅5/6"
expr = Mul(Rational(3, 4), x + y, S.One, y - z, evaluate=False)
assert pretty(expr) == "3/4*(x + y)*1*(y - z)"
assert upretty(expr) == "3/4⋅(x + y)⋅1⋅(y - z)"
def test_issue_5524():
assert pretty(-(-x + 5)*(-x - 2*sqrt(2) + 5) - (-y + 5)*(-y + 5)) == \
"""\
2 / ___ \\\n\
- (5 - y) + (x - 5)*\\-x - 2*\\/ 2 + 5/\
"""
assert upretty(-(-x + 5)*(-x - 2*sqrt(2) + 5) - (-y + 5)*(-y + 5)) == \
"""\
2 \n\
- (5 - y) + (x - 5)⋅(-x - 2⋅√2 + 5)\
"""
def test_pretty_ordering():
assert pretty(x**2 + x + 1, order='lex') == \
"""\
2 \n\
x + x + 1\
"""
assert pretty(x**2 + x + 1, order='rev-lex') == \
"""\
2\n\
1 + x + x \
"""
assert pretty(1 - x, order='lex') == '-x + 1'
assert pretty(1 - x, order='rev-lex') == '1 - x'
assert pretty(1 - 2*x, order='lex') == '-2*x + 1'
assert pretty(1 - 2*x, order='rev-lex') == '1 - 2*x'
f = 2*x**4 + y**2 - x**2 + y**3
assert pretty(f, order=None) == \
"""\
4 2 3 2\n\
2*x - x + y + y \
"""
assert pretty(f, order='lex') == \
"""\
4 2 3 2\n\
2*x - x + y + y \
"""
assert pretty(f, order='rev-lex') == \
"""\
2 3 2 4\n\
y + y - x + 2*x \
"""
expr = x - x**3/6 + x**5/120 + O(x**6)
ascii_str = \
"""\
3 5 \n\
x x / 6\\\n\
x - -- + --- + O\\x /\n\
6 120 \
"""
ucode_str = \
"""\
3 5 \n\
x x ⎛ 6⎞\n\
x - ── + ─── + O⎝x ⎠\n\
6 120 \
"""
assert pretty(expr, order=None) == ascii_str
assert upretty(expr, order=None) == ucode_str
assert pretty(expr, order='lex') == ascii_str
assert upretty(expr, order='lex') == ucode_str
assert pretty(expr, order='rev-lex') == ascii_str
assert upretty(expr, order='rev-lex') == ucode_str
def test_EulerGamma():
assert pretty(EulerGamma) == str(EulerGamma) == "EulerGamma"
assert upretty(EulerGamma) == "γ"
def test_GoldenRatio():
assert pretty(GoldenRatio) == str(GoldenRatio) == "GoldenRatio"
assert upretty(GoldenRatio) == "φ"
def test_Catalan():
assert pretty(Catalan) == upretty(Catalan) == "G"
def test_pretty_relational():
expr = Eq(x, y)
ascii_str = \
"""\
x = y\
"""
ucode_str = \
"""\
x = y\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = Lt(x, y)
ascii_str = \
"""\
x < y\
"""
ucode_str = \
"""\
x < y\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = Gt(x, y)
ascii_str = \
"""\
x > y\
"""
ucode_str = \
"""\
x > y\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = Le(x, y)
ascii_str = \
"""\
x <= y\
"""
ucode_str = \
"""\
x ≤ y\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = Ge(x, y)
ascii_str = \
"""\
x >= y\
"""
ucode_str = \
"""\
x ≥ y\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = Ne(x/(y + 1), y**2)
ascii_str_1 = \
"""\
x 2\n\
----- != y \n\
1 + y \
"""
ascii_str_2 = \
"""\
x 2\n\
----- != y \n\
y + 1 \
"""
ucode_str_1 = \
"""\
x 2\n\
───── ≠ y \n\
1 + y \
"""
ucode_str_2 = \
"""\
x 2\n\
───── ≠ y \n\
y + 1 \
"""
assert pretty(expr) in [ascii_str_1, ascii_str_2]
assert upretty(expr) in [ucode_str_1, ucode_str_2]
def test_Assignment():
expr = Assignment(x, y)
ascii_str = \
"""\
x := y\
"""
ucode_str = \
"""\
x := y\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
def test_AugmentedAssignment():
expr = AddAugmentedAssignment(x, y)
ascii_str = \
"""\
x += y\
"""
ucode_str = \
"""\
x += y\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = SubAugmentedAssignment(x, y)
ascii_str = \
"""\
x -= y\
"""
ucode_str = \
"""\
x -= y\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = MulAugmentedAssignment(x, y)
ascii_str = \
"""\
x *= y\
"""
ucode_str = \
"""\
x *= y\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = DivAugmentedAssignment(x, y)
ascii_str = \
"""\
x /= y\
"""
ucode_str = \
"""\
x /= y\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = ModAugmentedAssignment(x, y)
ascii_str = \
"""\
x %= y\
"""
ucode_str = \
"""\
x %= y\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
def test_pretty_rational():
expr = y*x**-2
ascii_str = \
"""\
y \n\
--\n\
2\n\
x \
"""
ucode_str = \
"""\
y \n\
──\n\
2\n\
x \
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = y**Rational(3, 2) * x**Rational(-5, 2)
ascii_str = \
"""\
3/2\n\
y \n\
----\n\
5/2\n\
x \
"""
ucode_str = \
"""\
3/2\n\
y \n\
────\n\
5/2\n\
x \
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = sin(x)**3/tan(x)**2
ascii_str = \
"""\
3 \n\
sin (x)\n\
-------\n\
2 \n\
tan (x)\
"""
ucode_str = \
"""\
3 \n\
sin (x)\n\
───────\n\
2 \n\
tan (x)\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
@_both_exp_pow
def test_pretty_functions():
"""Tests for Abs, conjugate, exp, function braces, and factorial."""
expr = (2*x + exp(x))
ascii_str_1 = \
"""\
x\n\
2*x + e \
"""
ascii_str_2 = \
"""\
x \n\
e + 2*x\
"""
ucode_str_1 = \
"""\
x\n\
2⋅x + ℯ \
"""
ucode_str_2 = \
"""\
x \n\
ℯ + 2⋅x\
"""
ucode_str_3 = \
"""\
x \n\
ℯ + 2⋅x\
"""
assert pretty(expr) in [ascii_str_1, ascii_str_2]
assert upretty(expr) in [ucode_str_1, ucode_str_2, ucode_str_3]
expr = Abs(x)
ascii_str = \
"""\
|x|\
"""
ucode_str = \
"""\
│x│\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = Abs(x/(x**2 + 1))
ascii_str_1 = \
"""\
| x |\n\
|------|\n\
| 2|\n\
|1 + x |\
"""
ascii_str_2 = \
"""\
| x |\n\
|------|\n\
| 2 |\n\
|x + 1|\
"""
ucode_str_1 = \
"""\
│ x │\n\
│──────│\n\
│ 2│\n\
│1 + x │\
"""
ucode_str_2 = \
"""\
│ x │\n\
│──────│\n\
│ 2 │\n\
│x + 1│\
"""
assert pretty(expr) in [ascii_str_1, ascii_str_2]
assert upretty(expr) in [ucode_str_1, ucode_str_2]
expr = Abs(1 / (y - Abs(x)))
ascii_str = \
"""\
1 \n\
---------\n\
|y - |x||\
"""
ucode_str = \
"""\
1 \n\
─────────\n\
│y - │x││\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
n = Symbol('n', integer=True)
expr = factorial(n)
ascii_str = \
"""\
n!\
"""
ucode_str = \
"""\
n!\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = factorial(2*n)
ascii_str = \
"""\
(2*n)!\
"""
ucode_str = \
"""\
(2⋅n)!\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = factorial(factorial(factorial(n)))
ascii_str = \
"""\
((n!)!)!\
"""
ucode_str = \
"""\
((n!)!)!\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = factorial(n + 1)
ascii_str_1 = \
"""\
(1 + n)!\
"""
ascii_str_2 = \
"""\
(n + 1)!\
"""
ucode_str_1 = \
"""\
(1 + n)!\
"""
ucode_str_2 = \
"""\
(n + 1)!\
"""
assert pretty(expr) in [ascii_str_1, ascii_str_2]
assert upretty(expr) in [ucode_str_1, ucode_str_2]
expr = subfactorial(n)
ascii_str = \
"""\
!n\
"""
ucode_str = \
"""\
!n\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = subfactorial(2*n)
ascii_str = \
"""\
!(2*n)\
"""
ucode_str = \
"""\
!(2⋅n)\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
n = Symbol('n', integer=True)
expr = factorial2(n)
ascii_str = \
"""\
n!!\
"""
ucode_str = \
"""\
n!!\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = factorial2(2*n)
ascii_str = \
"""\
(2*n)!!\
"""
ucode_str = \
"""\
(2⋅n)!!\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = factorial2(factorial2(factorial2(n)))
ascii_str = \
"""\
((n!!)!!)!!\
"""
ucode_str = \
"""\
((n!!)!!)!!\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = factorial2(n + 1)
ascii_str_1 = \
"""\
(1 + n)!!\
"""
ascii_str_2 = \
"""\
(n + 1)!!\
"""
ucode_str_1 = \
"""\
(1 + n)!!\
"""
ucode_str_2 = \
"""\
(n + 1)!!\
"""
assert pretty(expr) in [ascii_str_1, ascii_str_2]
assert upretty(expr) in [ucode_str_1, ucode_str_2]
expr = 2*binomial(n, k)
ascii_str = \
"""\
/n\\\n\
2*| |\n\
\\k/\
"""
ucode_str = \
"""\
⎛n⎞\n\
2⋅⎜ ⎟\n\
⎝k⎠\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = 2*binomial(2*n, k)
ascii_str = \
"""\
/2*n\\\n\
2*| |\n\
\\ k /\
"""
ucode_str = \
"""\
⎛2⋅n⎞\n\
2⋅⎜ ⎟\n\
⎝ k ⎠\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = 2*binomial(n**2, k)
ascii_str = \
"""\
/ 2\\\n\
|n |\n\
2*| |\n\
\\k /\
"""
ucode_str = \
"""\
⎛ 2⎞\n\
⎜n ⎟\n\
2⋅⎜ ⎟\n\
⎝k ⎠\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = catalan(n)
ascii_str = \
"""\
C \n\
n\
"""
ucode_str = \
"""\
C \n\
n\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = catalan(n)
ascii_str = \
"""\
C \n\
n\
"""
ucode_str = \
"""\
C \n\
n\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = bell(n)
ascii_str = \
"""\
B \n\
n\
"""
ucode_str = \
"""\
B \n\
n\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = bernoulli(n)
ascii_str = \
"""\
B \n\
n\
"""
ucode_str = \
"""\
B \n\
n\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = bernoulli(n, x)
ascii_str = \
"""\
B (x)\n\
n \
"""
ucode_str = \
"""\
B (x)\n\
n \
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = fibonacci(n)
ascii_str = \
"""\
F \n\
n\
"""
ucode_str = \
"""\
F \n\
n\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = lucas(n)
ascii_str = \
"""\
L \n\
n\
"""
ucode_str = \
"""\
L \n\
n\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = tribonacci(n)
ascii_str = \
"""\
T \n\
n\
"""
ucode_str = \
"""\
T \n\
n\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = stieltjes(n)
ascii_str = \
"""\
stieltjes \n\
n\
"""
ucode_str = \
"""\
γ \n\
n\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = stieltjes(n, x)
ascii_str = \
"""\
stieltjes (x)\n\
n \
"""
ucode_str = \
"""\
γ (x)\n\
n \
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = mathieuc(x, y, z)
ascii_str = 'C(x, y, z)'
ucode_str = 'C(x, y, z)'
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = mathieus(x, y, z)
ascii_str = 'S(x, y, z)'
ucode_str = 'S(x, y, z)'
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = mathieucprime(x, y, z)
ascii_str = "C'(x, y, z)"
ucode_str = "C'(x, y, z)"
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = mathieusprime(x, y, z)
ascii_str = "S'(x, y, z)"
ucode_str = "S'(x, y, z)"
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = conjugate(x)
ascii_str = \
"""\
_\n\
x\
"""
ucode_str = \
"""\
_\n\
x\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
f = Function('f')
expr = conjugate(f(x + 1))
ascii_str_1 = \
"""\
________\n\
f(1 + x)\
"""
ascii_str_2 = \
"""\
________\n\
f(x + 1)\
"""
ucode_str_1 = \
"""\
________\n\
f(1 + x)\
"""
ucode_str_2 = \
"""\
________\n\
f(x + 1)\
"""
assert pretty(expr) in [ascii_str_1, ascii_str_2]
assert upretty(expr) in [ucode_str_1, ucode_str_2]
expr = f(x)
ascii_str = \
"""\
f(x)\
"""
ucode_str = \
"""\
f(x)\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = f(x, y)
ascii_str = \
"""\
f(x, y)\
"""
ucode_str = \
"""\
f(x, y)\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = f(x/(y + 1), y)
ascii_str_1 = \
"""\
/ x \\\n\
f|-----, y|\n\
\\1 + y /\
"""
ascii_str_2 = \
"""\
/ x \\\n\
f|-----, y|\n\
\\y + 1 /\
"""
ucode_str_1 = \
"""\
⎛ x ⎞\n\
f⎜─────, y⎟\n\
⎝1 + y ⎠\
"""
ucode_str_2 = \
"""\
⎛ x ⎞\n\
f⎜─────, y⎟\n\
⎝y + 1 ⎠\
"""
assert pretty(expr) in [ascii_str_1, ascii_str_2]
assert upretty(expr) in [ucode_str_1, ucode_str_2]
expr = f(x**x**x**x**x**x)
ascii_str = \
"""\
/ / / / / x\\\\\\\\\\
| | | | \\x /||||
| | | \\x /|||
| | \\x /||
| \\x /|
f\\x /\
"""
ucode_str = \
"""\
⎛ ⎛ ⎛ ⎛ ⎛ x⎞⎞⎞⎞⎞
⎜ ⎜ ⎜ ⎜ ⎝x ⎠⎟⎟⎟⎟
⎜ ⎜ ⎜ ⎝x ⎠⎟⎟⎟
⎜ ⎜ ⎝x ⎠⎟⎟
⎜ ⎝x ⎠⎟
f⎝x ⎠\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = sin(x)**2
ascii_str = \
"""\
2 \n\
sin (x)\
"""
ucode_str = \
"""\
2 \n\
sin (x)\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = conjugate(a + b*I)
ascii_str = \
"""\
_ _\n\
a - I*b\
"""
ucode_str = \
"""\
_ _\n\
a - ⅈ⋅b\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = conjugate(exp(a + b*I))
ascii_str = \
"""\
_ _\n\
a - I*b\n\
e \
"""
ucode_str = \
"""\
_ _\n\
a - ⅈ⋅b\n\
ℯ \
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = conjugate( f(1 + conjugate(f(x))) )
ascii_str_1 = \
"""\
___________\n\
/ ____\\\n\
f\\1 + f(x)/\
"""
ascii_str_2 = \
"""\
___________\n\
/____ \\\n\
f\\f(x) + 1/\
"""
ucode_str_1 = \
"""\
___________\n\
⎛ ____⎞\n\
f⎝1 + f(x)⎠\
"""
ucode_str_2 = \
"""\
___________\n\
⎛____ ⎞\n\
f⎝f(x) + 1⎠\
"""
assert pretty(expr) in [ascii_str_1, ascii_str_2]
assert upretty(expr) in [ucode_str_1, ucode_str_2]
expr = f(x/(y + 1), y)
ascii_str_1 = \
"""\
/ x \\\n\
f|-----, y|\n\
\\1 + y /\
"""
ascii_str_2 = \
"""\
/ x \\\n\
f|-----, y|\n\
\\y + 1 /\
"""
ucode_str_1 = \
"""\
⎛ x ⎞\n\
f⎜─────, y⎟\n\
⎝1 + y ⎠\
"""
ucode_str_2 = \
"""\
⎛ x ⎞\n\
f⎜─────, y⎟\n\
⎝y + 1 ⎠\
"""
assert pretty(expr) in [ascii_str_1, ascii_str_2]
assert upretty(expr) in [ucode_str_1, ucode_str_2]
expr = floor(1 / (y - floor(x)))
ascii_str = \
"""\
/ 1 \\\n\
floor|------------|\n\
\\y - floor(x)/\
"""
ucode_str = \
"""\
⎢ 1 ⎥\n\
⎢───────⎥\n\
⎣y - ⌊x⌋⎦\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = ceiling(1 / (y - ceiling(x)))
ascii_str = \
"""\
/ 1 \\\n\
ceiling|--------------|\n\
\\y - ceiling(x)/\
"""
ucode_str = \
"""\
⎡ 1 ⎤\n\
⎢───────⎥\n\
⎢y - ⌈x⌉⎥\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = euler(n)
ascii_str = \
"""\
E \n\
n\
"""
ucode_str = \
"""\
E \n\
n\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = euler(1/(1 + 1/(1 + 1/n)))
ascii_str = \
"""\
E \n\
1 \n\
---------\n\
1 \n\
1 + -----\n\
1\n\
1 + -\n\
n\
"""
ucode_str = \
"""\
E \n\
1 \n\
─────────\n\
1 \n\
1 + ─────\n\
1\n\
1 + ─\n\
n\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = euler(n, x)
ascii_str = \
"""\
E (x)\n\
n \
"""
ucode_str = \
"""\
E (x)\n\
n \
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = euler(n, x/2)
ascii_str = \
"""\
/x\\\n\
E |-|\n\
n\\2/\
"""
ucode_str = \
"""\
⎛x⎞\n\
E ⎜─⎟\n\
n⎝2⎠\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
def test_pretty_sqrt():
expr = sqrt(2)
ascii_str = \
"""\
___\n\
\\/ 2 \
"""
ucode_str = \
"√2"
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = 2**Rational(1, 3)
ascii_str = \
"""\
3 ___\n\
\\/ 2 \
"""
ucode_str = \
"""\
3 ___\n\
╲╱ 2 \
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = 2**Rational(1, 1000)
ascii_str = \
"""\
1000___\n\
\\/ 2 \
"""
ucode_str = \
"""\
1000___\n\
╲╱ 2 \
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = sqrt(x**2 + 1)
ascii_str = \
"""\
________\n\
/ 2 \n\
\\/ x + 1 \
"""
ucode_str = \
"""\
________\n\
╱ 2 \n\
╲╱ x + 1 \
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = (1 + sqrt(5))**Rational(1, 3)
ascii_str = \
"""\
___________\n\
3 / ___ \n\
\\/ 1 + \\/ 5 \
"""
ucode_str = \
"""\
3 ________\n\
╲╱ 1 + √5 \
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = 2**(1/x)
ascii_str = \
"""\
x ___\n\
\\/ 2 \
"""
ucode_str = \
"""\
x ___\n\
╲╱ 2 \
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = sqrt(2 + pi)
ascii_str = \
"""\
________\n\
\\/ 2 + pi \
"""
ucode_str = \
"""\
_______\n\
╲╱ 2 + π \
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = (2 + (
1 + x**2)/(2 + x))**Rational(1, 4) + (1 + x**Rational(1, 1000))/sqrt(3 + x**2)
ascii_str = \
"""\
____________ \n\
/ 2 1000___ \n\
/ x + 1 \\/ x + 1\n\
4 / 2 + ------ + -----------\n\
\\/ x + 2 ________\n\
/ 2 \n\
\\/ x + 3 \
"""
ucode_str = \
"""\
____________ \n\
╱ 2 1000___ \n\
╱ x + 1 ╲╱ x + 1\n\
4 ╱ 2 + ────── + ───────────\n\
╲╱ x + 2 ________\n\
╱ 2 \n\
╲╱ x + 3 \
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
def test_pretty_sqrt_char_knob():
# See PR #9234.
expr = sqrt(2)
ucode_str1 = \
"""\
___\n\
╲╱ 2 \
"""
ucode_str2 = \
"√2"
assert xpretty(expr, use_unicode=True,
use_unicode_sqrt_char=False) == ucode_str1
assert xpretty(expr, use_unicode=True,
use_unicode_sqrt_char=True) == ucode_str2
def test_pretty_sqrt_longsymbol_no_sqrt_char():
# Do not use unicode sqrt char for long symbols (see PR #9234).
expr = sqrt(Symbol('C1'))
ucode_str = \
"""\
____\n\
╲╱ C₁ \
"""
assert upretty(expr) == ucode_str
def test_pretty_KroneckerDelta():
x, y = symbols("x, y")
expr = KroneckerDelta(x, y)
ascii_str = \
"""\
d \n\
x,y\
"""
ucode_str = \
"""\
δ \n\
x,y\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
def test_pretty_product():
n, m, k, l = symbols('n m k l')
f = symbols('f', cls=Function)
expr = Product(f((n/3)**2), (n, k**2, l))
unicode_str = \
"""\
l \n\
─┬──────┬─ \n\
│ │ ⎛ 2⎞\n\
│ │ ⎜n ⎟\n\
│ │ f⎜──⎟\n\
│ │ ⎝9 ⎠\n\
│ │ \n\
2 \n\
n = k """
ascii_str = \
"""\
l \n\
__________ \n\
| | / 2\\\n\
| | |n |\n\
| | f|--|\n\
| | \\9 /\n\
| | \n\
2 \n\
n = k """
expr = Product(f((n/3)**2), (n, k**2, l), (l, 1, m))
unicode_str = \
"""\
m l \n\
─┬──────┬─ ─┬──────┬─ \n\
│ │ │ │ ⎛ 2⎞\n\
│ │ │ │ ⎜n ⎟\n\
│ │ │ │ f⎜──⎟\n\
│ │ │ │ ⎝9 ⎠\n\
│ │ │ │ \n\
l = 1 2 \n\
n = k """
ascii_str = \
"""\
m l \n\
__________ __________ \n\
| | | | / 2\\\n\
| | | | |n |\n\
| | | | f|--|\n\
| | | | \\9 /\n\
| | | | \n\
l = 1 2 \n\
n = k """
assert pretty(expr) == ascii_str
assert upretty(expr) == unicode_str
def test_pretty_Lambda():
# S.IdentityFunction is a special case
expr = Lambda(y, y)
assert pretty(expr) == "x -> x"
assert upretty(expr) == "x ↦ x"
expr = Lambda(x, x+1)
assert pretty(expr) == "x -> x + 1"
assert upretty(expr) == "x ↦ x + 1"
expr = Lambda(x, x**2)
ascii_str = \
"""\
2\n\
x -> x \
"""
ucode_str = \
"""\
2\n\
x ↦ x \
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = Lambda(x, x**2)**2
ascii_str = \
"""\
2
/ 2\\ \n\
\\x -> x / \
"""
ucode_str = \
"""\
2
⎛ 2⎞ \n\
⎝x ↦ x ⎠ \
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = Lambda((x, y), x)
ascii_str = "(x, y) -> x"
ucode_str = "(x, y) ↦ x"
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = Lambda((x, y), x**2)
ascii_str = \
"""\
2\n\
(x, y) -> x \
"""
ucode_str = \
"""\
2\n\
(x, y) ↦ x \
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = Lambda(((x, y),), x**2)
ascii_str = \
"""\
2\n\
((x, y),) -> x \
"""
ucode_str = \
"""\
2\n\
((x, y),) ↦ x \
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
def test_pretty_TransferFunction():
tf1 = TransferFunction(s - 1, s + 1, s)
assert upretty(tf1) == "s - 1\n─────\ns + 1"
tf2 = TransferFunction(2*s + 1, 3 - p, s)
assert upretty(tf2) == "2⋅s + 1\n───────\n 3 - p "
tf3 = TransferFunction(p, p + 1, p)
assert upretty(tf3) == " p \n─────\np + 1"
def test_pretty_DiscreteTransferFunction():
tf1 = DiscreteTransferFunction(s - 1, s + 1, s)
assert upretty(tf1) == \
"""\
s - 1 \n\
───── [st: 1]\n\
s + 1 \
"""
tf2 = DiscreteTransferFunction(2*s + 1, 3 - p, s, Symbol('T'))
assert upretty(tf2) == \
"""\
2⋅s + 1 \n\
─────── [st: T]\n\
3 - p \
"""
tf3 = DiscreteTransferFunction(p, p + 1, p, 0.1)
assert upretty(tf3) == \
"""\
p \n\
───── [st: 0.100000000000000]\n\
p + 1 \
"""
def test_pretty_Series():
tf1 = TransferFunction(x + y, x - 2*y, y)
tf2 = TransferFunction(x - y, x + y, y)
tf3 = TransferFunction(x**2 + y, y - x, y)
tf4 = TransferFunction(2, 3, y)
tfm1 = TransferFunctionMatrix([[tf1, tf2], [tf3, tf4]])
tfm2 = TransferFunctionMatrix([[tf3], [-tf4]])
tfm3 = TransferFunctionMatrix([[tf1, -tf2, -tf3], [tf3, -tf4, tf2]])
tfm4 = TransferFunctionMatrix([[tf1, tf2], [tf3, -tf4], [-tf2, -tf1]])
tfm5 = TransferFunctionMatrix([[-tf2, -tf1], [tf4, -tf3], [tf1, tf2]])
expected1 = \
"""\
⎛ 2 ⎞\n\
⎛ x + y ⎞ ⎜x + y⎟\n\
⎜───────⎟⋅⎜──────⎟\n\
⎝x - 2⋅y⎠ ⎝-x + y⎠\
"""
expected2 = \
"""\
⎛-x + y⎞ ⎛-x - y ⎞\n\
⎜──────⎟⋅⎜───────⎟\n\
⎝x + y ⎠ ⎝x - 2⋅y⎠\
"""
expected3 = \
"""\
⎛ 2 ⎞ \n\
⎜x + y⎟ ⎛ x + y ⎞ ⎛-x - y x - y⎞\n\
⎜──────⎟⋅⎜───────⎟⋅⎜─────── + ─────⎟\n\
⎝-x + y⎠ ⎝x - 2⋅y⎠ ⎝x - 2⋅y x + y⎠\
"""
expected4 = \
"""\
⎛ 2 ⎞\n\
⎛ x + y x - y⎞ ⎜x - y x + y⎟\n\
⎜─────── + ─────⎟⋅⎜───── + ──────⎟\n\
⎝x - 2⋅y x + y⎠ ⎝x + y -x + y⎠\
"""
expected5 = \
"""\
⎡ x + y x - y⎤ ⎡ 2 ⎤ \n\
⎢─────── ─────⎥ ⎢x + y⎥ \n\
⎢x - 2⋅y x + y⎥ ⎢──────⎥ \n\
⎢ ⎥ ⎢-x + y⎥ \n\
⎢ 2 ⎥ ⋅⎢ ⎥ \n\
⎢x + y 2 ⎥ ⎢ -2 ⎥ \n\
⎢────── ─ ⎥ ⎢ ─── ⎥ \n\
⎣-x + y 3 ⎦τ ⎣ 3 ⎦τ\
"""
expected6 = \
"""\
⎛⎡ x + y x - y ⎤ ⎡ x - y x + y ⎤ ⎞\n\
⎜⎢─────── ───── ⎥ ⎢ ───── ───────⎥ ⎟\n\
⎡ x + y x - y⎤ ⎡ 2 ⎤ ⎜⎢x - 2⋅y x + y ⎥ ⎢ x + y x - 2⋅y⎥ ⎟\n\
⎢─────── ─────⎥ ⎢ x + y -x + y - x - y⎥ ⎜⎢ ⎥ ⎢ ⎥ ⎟\n\
⎢x - 2⋅y x + y⎥ ⎢─────── ────── ────────⎥ ⎜⎢ 2 ⎥ ⎢ 2 ⎥ ⎟\n\
⎢ ⎥ ⎢x - 2⋅y x + y -x + y ⎥ ⎜⎢x + y -2 ⎥ ⎢ -2 x + y ⎥ ⎟\n\
⎢ 2 ⎥ ⋅⎢ ⎥ ⋅⎜⎢────── ─── ⎥ + ⎢ ─── ────── ⎥ ⎟\n\
⎢x + y 2 ⎥ ⎢ 2 ⎥ ⎜⎢-x + y 3 ⎥ ⎢ 3 -x + y ⎥ ⎟\n\
⎢────── ─ ⎥ ⎢x + y -2 x - y ⎥ ⎜⎢ ⎥ ⎢ ⎥ ⎟\n\
⎣-x + y 3 ⎦τ ⎢────── ─── ───── ⎥ ⎜⎢-x + y -x - y ⎥ ⎢-x - y -x + y ⎥ ⎟\n\
⎣-x + y 3 x + y ⎦τ ⎜⎢────── ───────⎥ ⎢─────── ────── ⎥ ⎟\n\
⎝⎣x + y x - 2⋅y⎦τ ⎣x - 2⋅y x + y ⎦τ⎠\
"""
assert upretty(Series(tf1, tf3)) == expected1
assert upretty(Series(-tf2, -tf1)) == expected2
assert upretty(Series(tf3, tf1, Parallel(-tf1, tf2))) == expected3
assert upretty(Series(Parallel(tf1, tf2), Parallel(tf2, tf3))) == expected4
assert upretty(MIMOSeries(tfm2, tfm1)) == expected5
assert upretty(MIMOSeries(MIMOParallel(tfm4, -tfm5), tfm3, tfm1)) == expected6
def test_pretty_Parallel():
tf1 = TransferFunction(x + y, x - 2*y, y)
tf2 = TransferFunction(x - y, x + y, y)
tf3 = TransferFunction(x**2 + y, y - x, y)
tf4 = TransferFunction(y**2 - x, x**3 + x, y)
tfm1 = TransferFunctionMatrix([[tf1, tf2], [tf3, -tf4], [-tf2, -tf1]])
tfm2 = TransferFunctionMatrix([[-tf2, -tf1], [tf4, -tf3], [tf1, tf2]])
tfm3 = TransferFunctionMatrix([[-tf1, tf2], [-tf3, tf4], [tf2, tf1]])
tfm4 = TransferFunctionMatrix([[-tf1, -tf2], [-tf3, -tf4]])
expected1 = \
"""\
x + y x - y\n\
─────── + ─────\n\
x - 2⋅y x + y\
"""
expected2 = \
"""\
-x + y -x - y \n\
────── + ───────
x + y x - 2⋅y\
"""
expected3 = \
"""\
2 \n\
x + y x + y ⎛-x - y ⎞ ⎛x - y⎞
────── + ─────── + ⎜───────⎟⋅⎜─────⎟
-x + y x - 2⋅y ⎝x - 2⋅y⎠ ⎝x + y⎠\
"""
expected4 = \
"""\
⎛ 2 ⎞\n\
⎛ x + y ⎞ ⎛x - y⎞ ⎛x - y⎞ ⎜x + y⎟\n\
⎜───────⎟⋅⎜─────⎟ + ⎜─────⎟⋅⎜──────⎟\n\
⎝x - 2⋅y⎠ ⎝x + y⎠ ⎝x + y⎠ ⎝-x + y⎠\
"""
expected5 = \
"""\
⎡ x + y -x + y ⎤ ⎡ x - y x + y ⎤ ⎡ x + y x - y ⎤ \n\
⎢─────── ────── ⎥ ⎢ ───── ───────⎥ ⎢─────── ───── ⎥ \n\
⎢x - 2⋅y x + y ⎥ ⎢ x + y x - 2⋅y⎥ ⎢x - 2⋅y x + y ⎥ \n\
⎢ ⎥ ⎢ ⎥ ⎢ ⎥ \n\
⎢ 2 2 ⎥ ⎢ 2 2 ⎥ ⎢ 2 2 ⎥ \n\
⎢x + y x - y ⎥ ⎢x - y x + y ⎥ ⎢x + y x - y ⎥ \n\
⎢────── ────── ⎥ + ⎢────── ────── ⎥ + ⎢────── ────── ⎥ \n\
⎢-x + y 3 ⎥ ⎢ 3 -x + y ⎥ ⎢-x + y 3 ⎥ \n\
⎢ x + x ⎥ ⎢x + x ⎥ ⎢ x + x ⎥ \n\
⎢ ⎥ ⎢ ⎥ ⎢ ⎥ \n\
⎢-x + y -x - y ⎥ ⎢-x - y -x + y ⎥ ⎢-x + y -x - y ⎥ \n\
⎢────── ───────⎥ ⎢─────── ────── ⎥ ⎢────── ───────⎥ \n\
⎣x + y x - 2⋅y⎦τ ⎣x - 2⋅y x + y ⎦τ ⎣x + y x - 2⋅y⎦τ\
"""
expected6 = \
"""\
⎡ x - y x + y ⎤ ⎡-x + y -x - y ⎤ \n\
⎢ ───── ───────⎥ ⎢────── ─────── ⎥ \n\
⎢ x + y x - 2⋅y⎥ ⎡-x - y -x + y⎤ ⎢x + y x - 2⋅y ⎥ \n\
⎢ ⎥ ⎢─────── ──────⎥ ⎢ ⎥ \n\
⎢ 2 2 ⎥ ⎢x - 2⋅y x + y ⎥ ⎢ 2 2 ⎥ \n\
⎢x - y x + y ⎥ ⎢ ⎥ ⎢-x + y - x - y⎥ \n\
⎢────── ────── ⎥ ⋅⎢ 2 2⎥ + ⎢─────── ────────⎥ \n\
⎢ 3 -x + y ⎥ ⎢- x - y x - y ⎥ ⎢ 3 -x + y ⎥ \n\
⎢x + x ⎥ ⎢──────── ──────⎥ ⎢x + x ⎥ \n\
⎢ ⎥ ⎢ -x + y 3 ⎥ ⎢ ⎥ \n\
⎢-x - y -x + y ⎥ ⎣ x + x⎦τ ⎢ x + y x - y ⎥ \n\
⎢─────── ────── ⎥ ⎢─────── ───── ⎥ \n\
⎣x - 2⋅y x + y ⎦τ ⎣x - 2⋅y x + y ⎦τ\
"""
assert upretty(Parallel(tf1, tf2)) == expected1
assert upretty(Parallel(-tf2, -tf1)) == expected2
assert upretty(Parallel(tf3, tf1, Series(-tf1, tf2))) == expected3
assert upretty(Parallel(Series(tf1, tf2), Series(tf2, tf3))) == expected4
assert upretty(MIMOParallel(-tfm3, -tfm2, tfm1)) == expected5
assert upretty(MIMOParallel(MIMOSeries(tfm4, -tfm2), tfm2)) == expected6
def test_pretty_Feedback():
tf = TransferFunction(1, 1, y)
tf1 = TransferFunction(x + y, x - 2*y, y)
tf2 = TransferFunction(x - y, x + y, y)
tf3 = TransferFunction(y**2 - 2*y + 1, y + 5, y)
tf4 = TransferFunction(x - 2*y**3, x + y, x)
tf5 = TransferFunction(1 - x, x - y, y)
tf6 = TransferFunction(2, 2, x)
expected1 = \
"""\
⎛1⎞ \n\
⎜─⎟ \n\
⎝1⎠ \n\
─────────────\n\
1 ⎛ x + y ⎞\n\
─ + ⎜───────⎟\n\
1 ⎝x - 2⋅y⎠\
"""
expected2 = \
"""\
⎛1⎞ \n\
⎜─⎟ \n\
⎝1⎠ \n\
────────────────────────────────────\n\
⎛ 2 ⎞\n\
1 ⎛x - y⎞ ⎛ x + y ⎞ ⎜y - 2⋅y + 1⎟\n\
─ + ⎜─────⎟⋅⎜───────⎟⋅⎜────────────⎟\n\
1 ⎝x + y⎠ ⎝x - 2⋅y⎠ ⎝ y + 5 ⎠\
"""
expected3 = \
"""\
⎛ x + y ⎞ \n\
⎜───────⎟ \n\
⎝x - 2⋅y⎠ \n\
────────────────────────────────────────────\n\
⎛ 2 ⎞ \n\
1 ⎛ x + y ⎞ ⎛x - y⎞ ⎜y - 2⋅y + 1⎟ ⎛1 - x⎞\n\
─ + ⎜───────⎟⋅⎜─────⎟⋅⎜────────────⎟⋅⎜─────⎟\n\
1 ⎝x - 2⋅y⎠ ⎝x + y⎠ ⎝ y + 5 ⎠ ⎝x - y⎠\
"""
expected4 = \
"""\
⎛ x + y ⎞ ⎛x - y⎞ \n\
⎜───────⎟⋅⎜─────⎟ \n\
⎝x - 2⋅y⎠ ⎝x + y⎠ \n\
─────────────────────\n\
1 ⎛ x + y ⎞ ⎛x - y⎞\n\
─ + ⎜───────⎟⋅⎜─────⎟\n\
1 ⎝x - 2⋅y⎠ ⎝x + y⎠\
"""
expected5 = \
"""\
⎛ x + y ⎞ ⎛x - y⎞ \n\
⎜───────⎟⋅⎜─────⎟ \n\
⎝x - 2⋅y⎠ ⎝x + y⎠ \n\
─────────────────────────────\n\
1 ⎛ x + y ⎞ ⎛x - y⎞ ⎛1 - x⎞\n\
─ + ⎜───────⎟⋅⎜─────⎟⋅⎜─────⎟\n\
1 ⎝x - 2⋅y⎠ ⎝x + y⎠ ⎝x - y⎠\
"""
expected6 = \
"""\
⎛ 2 ⎞ \n\
⎜y - 2⋅y + 1⎟ ⎛1 - x⎞ \n\
⎜────────────⎟⋅⎜─────⎟ \n\
⎝ y + 5 ⎠ ⎝x - y⎠ \n\
────────────────────────────────────────────\n\
⎛ 2 ⎞ \n\
1 ⎜y - 2⋅y + 1⎟ ⎛1 - x⎞ ⎛x - y⎞ ⎛ x + y ⎞\n\
─ + ⎜────────────⎟⋅⎜─────⎟⋅⎜─────⎟⋅⎜───────⎟\n\
1 ⎝ y + 5 ⎠ ⎝x - y⎠ ⎝x + y⎠ ⎝x - 2⋅y⎠\
"""
expected7 = \
"""\
⎛ 3⎞ \n\
⎜x - 2⋅y ⎟ \n\
⎜────────⎟ \n\
⎝ x + y ⎠ \n\
──────────────────\n\
⎛ 3⎞ \n\
1 ⎜x - 2⋅y ⎟ ⎛2⎞\n\
─ + ⎜────────⎟⋅⎜─⎟\n\
1 ⎝ x + y ⎠ ⎝2⎠\
"""
expected8 = \
"""\
⎛1 - x⎞ \n\
⎜─────⎟ \n\
⎝x - y⎠ \n\
───────────\n\
1 ⎛1 - x⎞\n\
─ + ⎜─────⎟\n\
1 ⎝x - y⎠\
"""
expected9 = \
"""\
⎛ x + y ⎞ ⎛x - y⎞ \n\
⎜───────⎟⋅⎜─────⎟ \n\
⎝x - 2⋅y⎠ ⎝x + y⎠ \n\
─────────────────────────────\n\
1 ⎛ x + y ⎞ ⎛x - y⎞ ⎛1 - x⎞\n\
─ - ⎜───────⎟⋅⎜─────⎟⋅⎜─────⎟\n\
1 ⎝x - 2⋅y⎠ ⎝x + y⎠ ⎝x - y⎠\
"""
expected10 = \
"""\
⎛1 - x⎞ \n\
⎜─────⎟ \n\
⎝x - y⎠ \n\
───────────\n\
1 ⎛1 - x⎞\n\
─ - ⎜─────⎟\n\
1 ⎝x - y⎠\
"""
assert upretty(Feedback(tf, tf1)) == expected1
assert upretty(Feedback(tf, tf2*tf1*tf3)) == expected2
assert upretty(Feedback(tf1, tf2*tf3*tf5)) == expected3
assert upretty(Feedback(tf1*tf2, tf)) == expected4
assert upretty(Feedback(tf1*tf2, tf5)) == expected5
assert upretty(Feedback(tf3*tf5, tf2*tf1)) == expected6
assert upretty(Feedback(tf4, tf6)) == expected7
assert upretty(Feedback(tf5, tf)) == expected8
assert upretty(Feedback(tf1*tf2, tf5, 1)) == expected9
assert upretty(Feedback(tf5, tf, 1)) == expected10
def test_pretty_MIMOFeedback():
tf1 = TransferFunction(x + y, x - 2*y, y)
tf2 = TransferFunction(x - y, x + y, y)
tfm_1 = TransferFunctionMatrix([[tf1, tf2], [tf2, tf1]])
tfm_2 = TransferFunctionMatrix([[tf2, tf1], [tf1, tf2]])
tfm_3 = TransferFunctionMatrix([[tf1, tf1], [tf2, tf2]])
expected1 = \
"""\
⎛ ⎡ x + y x - y ⎤ ⎡ x - y x + y ⎤ ⎞-1 ⎡ x + y x - y ⎤ \n\
⎜ ⎢─────── ───── ⎥ ⎢ ───── ───────⎥ ⎟ ⎢─────── ───── ⎥ \n\
⎜ ⎢x - 2⋅y x + y ⎥ ⎢ x + y x - 2⋅y⎥ ⎟ ⎢x - 2⋅y x + y ⎥ \n\
⎜I - ⎢ ⎥ ⋅⎢ ⎥ ⎟ ⋅ ⎢ ⎥ \n\
⎜ ⎢ x - y x + y ⎥ ⎢ x + y x - y ⎥ ⎟ ⎢ x - y x + y ⎥ \n\
⎜ ⎢ ───── ───────⎥ ⎢─────── ───── ⎥ ⎟ ⎢ ───── ───────⎥ \n\
⎝ ⎣ x + y x - 2⋅y⎦τ ⎣x - 2⋅y x + y ⎦τ⎠ ⎣ x + y x - 2⋅y⎦τ\
"""
expected2 = \
"""\
⎛ ⎡ x + y x - y ⎤ ⎡ x - y x + y ⎤ ⎡ x + y x + y ⎤ ⎞-1 ⎡ x + y x - y ⎤ ⎡ x - y x + y ⎤ \n\
⎜ ⎢─────── ───── ⎥ ⎢ ───── ───────⎥ ⎢─────── ───────⎥ ⎟ ⎢─────── ───── ⎥ ⎢ ───── ───────⎥ \n\
⎜ ⎢x - 2⋅y x + y ⎥ ⎢ x + y x - 2⋅y⎥ ⎢x - 2⋅y x - 2⋅y⎥ ⎟ ⎢x - 2⋅y x + y ⎥ ⎢ x + y x - 2⋅y⎥ \n\
⎜I + ⎢ ⎥ ⋅⎢ ⎥ ⋅⎢ ⎥ ⎟ ⋅ ⎢ ⎥ ⋅⎢ ⎥ \n\
⎜ ⎢ x - y x + y ⎥ ⎢ x + y x - y ⎥ ⎢ x - y x - y ⎥ ⎟ ⎢ x - y x + y ⎥ ⎢ x + y x - y ⎥ \n\
⎜ ⎢ ───── ───────⎥ ⎢─────── ───── ⎥ ⎢ ───── ───── ⎥ ⎟ ⎢ ───── ───────⎥ ⎢─────── ───── ⎥ \n\
⎝ ⎣ x + y x - 2⋅y⎦τ ⎣x - 2⋅y x + y ⎦τ ⎣ x + y x + y ⎦τ⎠ ⎣ x + y x - 2⋅y⎦τ ⎣x - 2⋅y x + y ⎦τ\
"""
assert upretty(MIMOFeedback(tfm_1, tfm_2, 1)) == \
expected1 # Positive MIMOFeedback
assert upretty(MIMOFeedback(tfm_1*tfm_2, tfm_3)) == \
expected2 # Negative MIMOFeedback (Default)
def test_pretty_TransferFunctionMatrix():
tf1 = TransferFunction(x + y, x - 2*y, y)
tf2 = TransferFunction(x - y, x + y, y)
tf3 = TransferFunction(y**2 - 2*y + 1, y + 5, y)
tf4 = TransferFunction(y, x**2 + x + 1, y)
tf5 = TransferFunction(1 - x, x - y, y)
tf6 = TransferFunction(2, 2, y)
expected1 = \
"""\
⎡ x + y ⎤ \n\
⎢───────⎥ \n\
⎢x - 2⋅y⎥ \n\
⎢ ⎥ \n\
⎢ x - y ⎥ \n\
⎢ ───── ⎥ \n\
⎣ x + y ⎦τ\
"""
expected2 = \
"""\
⎡ x + y ⎤ \n\
⎢ ─────── ⎥ \n\
⎢ x - 2⋅y ⎥ \n\
⎢ ⎥ \n\
⎢ x - y ⎥ \n\
⎢ ───── ⎥ \n\
⎢ x + y ⎥ \n\
⎢ ⎥ \n\
⎢ 2 ⎥ \n\
⎢- y + 2⋅y - 1⎥ \n\
⎢──────────────⎥ \n\
⎣ y + 5 ⎦τ\
"""
expected3 = \
"""\
⎡ x + y x - y ⎤ \n\
⎢ ─────── ───── ⎥ \n\
⎢ x - 2⋅y x + y ⎥ \n\
⎢ ⎥ \n\
⎢ 2 ⎥ \n\
⎢y - 2⋅y + 1 y ⎥ \n\
⎢──────────── ──────────⎥ \n\
⎢ y + 5 2 ⎥ \n\
⎢ x + x + 1⎥ \n\
⎢ ⎥ \n\
⎢ 1 - x 2 ⎥ \n\
⎢ ───── ─ ⎥ \n\
⎣ x - y 2 ⎦τ\
"""
expected4 = \
"""\
⎡ x - y x + y y ⎤ \n\
⎢ ───── ─────── ──────────⎥ \n\
⎢ x + y x - 2⋅y 2 ⎥ \n\
⎢ x + x + 1⎥ \n\
⎢ ⎥ \n\
⎢ 2 ⎥ \n\
⎢- y + 2⋅y - 1 x - 1 -2 ⎥ \n\
⎢────────────── ───── ─── ⎥ \n\
⎣ y + 5 x - y 2 ⎦τ\
"""
expected5 = \
"""\
⎡ x + y x - y x + y y ⎤ \n\
⎢───────⋅───── ─────── ──────────⎥ \n\
⎢x - 2⋅y x + y x - 2⋅y 2 ⎥ \n\
⎢ x + x + 1⎥ \n\
⎢ ⎥ \n\
⎢ 1 - x 2 x + y -2 ⎥ \n\
⎢ ───── + ─ ─────── ─── ⎥ \n\
⎣ x - y 2 x - 2⋅y 2 ⎦τ\
"""
assert upretty(TransferFunctionMatrix([[tf1], [tf2]])) == expected1
assert upretty(TransferFunctionMatrix([[tf1], [tf2], [-tf3]])) == expected2
assert upretty(TransferFunctionMatrix([[tf1, tf2], [tf3, tf4], [tf5, tf6]])) == expected3
assert upretty(TransferFunctionMatrix([[tf2, tf1, tf4], [-tf3, -tf5, -tf6]])) == expected4
assert upretty(TransferFunctionMatrix([[Series(tf2, tf1), tf1, tf4], [Parallel(tf6, tf5), tf1, -tf6]])) == \
expected5
dtf1 = DiscreteTransferFunction(x + y, x - 2*y, y, 0.1)
dtf2 = DiscreteTransferFunction(x - y, x + y, y, 0.1)
expected6 = \
"""\
⎡ x + y ⎤ \n\
⎢───────⎥ \n\
⎢x - 2⋅y⎥ \n\
⎢ ⎥ \n\
⎢ x - y ⎥ \n\
⎢ ───── ⎥ \n\
⎣ x + y ⎦{k} \n\
[st: 0.100000000000000]\
"""
assert upretty(TransferFunctionMatrix([[dtf1], [dtf2]])) == expected6
def test_pretty_StateSpace():
ss1 = StateSpace(Matrix([a]), Matrix([b]), Matrix([c]), Matrix([d]))
A = Matrix([[0, 1], [1, 0]])
B = Matrix([1, 0])
C = Matrix([[0, 1]])
D = Matrix([0])
ss2 = StateSpace(A, B, C, D)
ss3 = StateSpace(Matrix([[-1.5, -2], [1, 0]]),
Matrix([[0.5, 0], [0, 1]]),
Matrix([[0, 1], [0, 2]]),
Matrix([[2, 2], [1, 1]]))
expected1 = \
"""\
⎡[a] [b]⎤\n\
⎢ ⎥\n\
⎣[c] [d]⎦\
"""
expected2 = \
"""\
⎡⎡0 1⎤ ⎡1⎤⎤\n\
⎢⎢ ⎥ ⎢ ⎥⎥\n\
⎢⎣1 0⎦ ⎣0⎦⎥\n\
⎢ ⎥\n\
⎣[0 1] [0]⎦\
"""
expected3 = \
"""\
⎡⎡-1.5 -2⎤ ⎡0.5 0⎤⎤\n\
⎢⎢ ⎥ ⎢ ⎥⎥\n\
⎢⎣ 1 0 ⎦ ⎣ 0 1⎦⎥\n\
⎢ ⎥\n\
⎢ ⎡0 1⎤ ⎡2 2⎤ ⎥\n\
⎢ ⎢ ⎥ ⎢ ⎥ ⎥\n\
⎣ ⎣0 2⎦ ⎣1 1⎦ ⎦\
"""
assert upretty(ss1) == expected1
assert upretty(ss2) == expected2
assert upretty(ss3) == expected3
def test_pretty_DiscreteStateSpace():
ss1 = DiscreteStateSpace(Matrix([a]), Matrix([b]), Matrix([c]), Matrix([d]))
A = Matrix([[0, 1], [1, 0]])
B = Matrix([1, 0])
C = Matrix([[0, 1]])
D = Matrix([0])
ss2 = DiscreteStateSpace(A, B, C, D, Symbol('T'))
ss3 = DiscreteStateSpace(Matrix([[-1.5, -2], [1, 0]]),
Matrix([[0.5, 0], [0, 1]]),
Matrix([[0, 1], [0, 2]]),
Matrix([[2, 2], [1, 1]]), 0.1)
expected1 = \
"""\
⎡[a] [b]⎤\n\
⎢ ⎥\n\
⎣[c] [d]⎦\n\
\n\
[st: 1] \
"""
expected2 = \
"""\
⎡⎡0 1⎤ ⎡1⎤⎤\n\
⎢⎢ ⎥ ⎢ ⎥⎥\n\
⎢⎣1 0⎦ ⎣0⎦⎥\n\
⎢ ⎥\n\
⎣[0 1] [0]⎦\n\
\n\
[st: T] \
"""
expected3 = \
"""\
⎡⎡-1.5 -2⎤ ⎡0.5 0⎤⎤ \n\
⎢⎢ ⎥ ⎢ ⎥⎥ \n\
⎢⎣ 1 0 ⎦ ⎣ 0 1⎦⎥ \n\
⎢ ⎥ \n\
⎢ ⎡0 1⎤ ⎡2 2⎤ ⎥ \n\
⎢ ⎢ ⎥ ⎢ ⎥ ⎥ \n\
⎣ ⎣0 2⎦ ⎣1 1⎦ ⎦ \n\
\n\
[st: 0.100000000000000]\
"""
assert upretty(ss1) == expected1
assert upretty(ss2) == expected2
assert upretty(ss3) == expected3
def test_pretty_order():
expr = O(1)
ascii_str = \
"""\
O(1)\
"""
ucode_str = \
"""\
O(1)\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = O(1/x)
ascii_str = \
"""\
/1\\\n\
O|-|\n\
\\x/\
"""
ucode_str = \
"""\
⎛1⎞\n\
O⎜─⎟\n\
⎝x⎠\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = O(x**2 + y**2)
ascii_str = \
"""\
/ 2 2 \\\n\
O\\x + y ; (x, y) -> (0, 0)/\
"""
ucode_str = \
"""\
⎛ 2 2 ⎞\n\
O⎝x + y ; (x, y) → (0, 0)⎠\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = O(1, (x, oo))
ascii_str = \
"""\
O(1; x -> oo)\
"""
ucode_str = \
"""\
O(1; x → ∞)\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = O(1/x, (x, oo))
ascii_str = \
"""\
/1 \\\n\
O|-; x -> oo|\n\
\\x /\
"""
ucode_str = \
"""\
⎛1 ⎞\n\
O⎜─; x → ∞⎟\n\
⎝x ⎠\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = O(x**2 + y**2, (x, oo), (y, oo))
ascii_str = \
"""\
/ 2 2 \\\n\
O\\x + y ; (x, y) -> (oo, oo)/\
"""
ucode_str = \
"""\
⎛ 2 2 ⎞\n\
O⎝x + y ; (x, y) → (∞, ∞)⎠\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
def test_pretty_derivatives():
# Simple
expr = Derivative(log(x), x, evaluate=False)
ascii_str = \
"""\
d \n\
--(log(x))\n\
dx \
"""
ucode_str = \
"""\
d \n\
──(log(x))\n\
dx \
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = Derivative(log(x), x, evaluate=False) + x
ascii_str_1 = \
"""\
d \n\
x + --(log(x))\n\
dx \
"""
ascii_str_2 = \
"""\
d \n\
--(log(x)) + x\n\
dx \
"""
ucode_str_1 = \
"""\
d \n\
x + ──(log(x))\n\
dx \
"""
ucode_str_2 = \
"""\
d \n\
──(log(x)) + x\n\
dx \
"""
assert pretty(expr) in [ascii_str_1, ascii_str_2]
assert upretty(expr) in [ucode_str_1, ucode_str_2]
# basic partial derivatives
expr = Derivative(log(x + y) + x, x)
ascii_str_1 = \
"""\
d \n\
--(log(x + y) + x)\n\
dx \
"""
ascii_str_2 = \
"""\
d \n\
--(x + log(x + y))\n\
dx \
"""
ucode_str_1 = \
"""\
∂ \n\
──(log(x + y) + x)\n\
∂x \
"""
ucode_str_2 = \
"""\
∂ \n\
──(x + log(x + y))\n\
∂x \
"""
assert pretty(expr) in [ascii_str_1, ascii_str_2]
assert upretty(expr) in [ucode_str_1, ucode_str_2], upretty(expr)
# Multiple symbols
expr = Derivative(log(x) + x**2, x, y)
ascii_str_1 = \
"""\
2 \n\
d / 2\\\n\
-----\\log(x) + x /\n\
dy dx \
"""
ascii_str_2 = \
"""\
2 \n\
d / 2 \\\n\
-----\\x + log(x)/\n\
dy dx \
"""
ascii_str_3 = \
"""\
2 \n\
d / 2 \\\n\
-----\\x + log(x)/\n\
dy dx \
"""
ucode_str_1 = \
"""\
2 \n\
d ⎛ 2⎞\n\
─────⎝log(x) + x ⎠\n\
dy dx \
"""
ucode_str_2 = \
"""\
2 \n\
d ⎛ 2 ⎞\n\
─────⎝x + log(x)⎠\n\
dy dx \
"""
ucode_str_3 = \
"""\
2 \n\
d ⎛ 2 ⎞\n\
─────⎝x + log(x)⎠\n\
dy dx \
"""
assert pretty(expr) in [ascii_str_1, ascii_str_2, ascii_str_3]
assert upretty(expr) in [ucode_str_1, ucode_str_2, ucode_str_3]
expr = Derivative(2*x*y, y, x) + x**2
ascii_str_1 = \
"""\
2 \n\
d 2\n\
-----(2*x*y) + x \n\
dx dy \
"""
ascii_str_2 = \
"""\
2 \n\
2 d \n\
x + -----(2*x*y)\n\
dx dy \
"""
ascii_str_3 = \
"""\
2 \n\
2 d \n\
x + -----(2*x*y)\n\
dx dy \
"""
ucode_str_1 = \
"""\
2 \n\
∂ 2\n\
─────(2⋅x⋅y) + x \n\
∂x ∂y \
"""
ucode_str_2 = \
"""\
2 \n\
2 ∂ \n\
x + ─────(2⋅x⋅y)\n\
∂x ∂y \
"""
ucode_str_3 = \
"""\
2 \n\
2 ∂ \n\
x + ─────(2⋅x⋅y)\n\
∂x ∂y \
"""
assert pretty(expr) in [ascii_str_1, ascii_str_2, ascii_str_3]
assert upretty(expr) in [ucode_str_1, ucode_str_2, ucode_str_3]
expr = Derivative(2*x*y, x, x)
ascii_str = \
"""\
2 \n\
d \n\
---(2*x*y)\n\
2 \n\
dx \
"""
ucode_str = \
"""\
2 \n\
∂ \n\
───(2⋅x⋅y)\n\
2 \n\
∂x \
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = Derivative(2*x*y, x, 17)
ascii_str = \
"""\
17 \n\
d \n\
----(2*x*y)\n\
17 \n\
dx \
"""
ucode_str = \
"""\
17 \n\
∂ \n\
────(2⋅x⋅y)\n\
17 \n\
∂x \
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = Derivative(2*x*y, x, x, y)
ascii_str = \
"""\
3 \n\
d \n\
------(2*x*y)\n\
2 \n\
dy dx \
"""
ucode_str = \
"""\
3 \n\
∂ \n\
──────(2⋅x⋅y)\n\
2 \n\
∂y ∂x \
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
# Greek letters
alpha = Symbol('alpha')
beta = Function('beta')
expr = beta(alpha).diff(alpha)
ascii_str = \
"""\
d \n\
------(beta(alpha))\n\
dalpha \
"""
ucode_str = \
"""\
d \n\
──(β(α))\n\
dα \
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = Derivative(f(x), (x, n))
ascii_str = \
"""\
n \n\
d \n\
---(f(x))\n\
n \n\
dx \
"""
ucode_str = \
"""\
n \n\
d \n\
───(f(x))\n\
n \n\
dx \
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
def test_pretty_integrals():
expr = Integral(log(x), x)
ascii_str = \
"""\
/ \n\
| \n\
| log(x) dx\n\
| \n\
/ \
"""
ucode_str = \
"""\
⌠ \n\
⎮ log(x) dx\n\
⌡ \
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = Integral(x**2, x)
ascii_str = \
"""\
/ \n\
| \n\
| 2 \n\
| x dx\n\
| \n\
/ \
"""
ucode_str = \
"""\
⌠ \n\
⎮ 2 \n\
⎮ x dx\n\
⌡ \
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = Integral((sin(x))**2 / (tan(x))**2)
ascii_str = \
"""\
/ \n\
| \n\
| 2 \n\
| sin (x) \n\
| ------- dx\n\
| 2 \n\
| tan (x) \n\
| \n\
/ \
"""
ucode_str = \
"""\
⌠ \n\
⎮ 2 \n\
⎮ sin (x) \n\
⎮ ─────── dx\n\
⎮ 2 \n\
⎮ tan (x) \n\
⌡ \
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = Integral(x**(2**x), x)
ascii_str = \
"""\
/ \n\
| \n\
| / x\\ \n\
| \\2 / \n\
| x dx\n\
| \n\
/ \
"""
ucode_str = \
"""\
⌠ \n\
⎮ ⎛ x⎞ \n\
⎮ ⎝2 ⎠ \n\
⎮ x dx\n\
⌡ \
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = Integral(x**2, (x, 1, 2))
ascii_str = \
"""\
2 \n\
/ \n\
| \n\
| 2 \n\
| x dx\n\
| \n\
/ \n\
1 \
"""
ucode_str = \
"""\
2 \n\
⌠ \n\
⎮ 2 \n\
⎮ x dx\n\
⌡ \n\
1 \
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = Integral(x**2, (x, Rational(1, 2), 10))
ascii_str = \
"""\
10 \n\
/ \n\
| \n\
| 2 \n\
| x dx\n\
| \n\
/ \n\
1/2 \
"""
ucode_str = \
"""\
10 \n\
⌠ \n\
⎮ 2 \n\
⎮ x dx\n\
⌡ \n\
1/2 \
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = Integral(x**2*y**2, x, y)
ascii_str = \
"""\
/ / \n\
| | \n\
| | 2 2 \n\
| | x *y dx dy\n\
| | \n\
/ / \
"""
ucode_str = \
"""\
⌠ ⌠ \n\
⎮ ⎮ 2 2 \n\
⎮ ⎮ x ⋅y dx dy\n\
⌡ ⌡ \
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = Integral(sin(th)/cos(ph), (th, 0, pi), (ph, 0, 2*pi))
ascii_str = \
"""\
2*pi pi \n\
/ / \n\
| | \n\
| | sin(theta) \n\
| | ---------- d(theta) d(phi)\n\
| | cos(phi) \n\
| | \n\
/ / \n\
0 0 \
"""
ucode_str = \
"""\
2⋅π π \n\
⌠ ⌠ \n\
⎮ ⎮ sin(θ) \n\
⎮ ⎮ ────── dθ dφ\n\
⎮ ⎮ cos(φ) \n\
⌡ ⌡ \n\
0 0 \
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
def test_pretty_matrix():
# Empty Matrix
expr = Matrix()
ascii_str = "[]"
unicode_str = "[]"
assert pretty(expr) == ascii_str
assert upretty(expr) == unicode_str
expr = Matrix(2, 0, lambda i, j: 0)
ascii_str = "[]"
unicode_str = "[]"
assert pretty(expr) == ascii_str
assert upretty(expr) == unicode_str
expr = Matrix(0, 2, lambda i, j: 0)
ascii_str = "[]"
unicode_str = "[]"
assert pretty(expr) == ascii_str
assert upretty(expr) == unicode_str
expr = Matrix([[x**2 + 1, 1], [y, x + y]])
ascii_str_1 = \
"""\
[ 2 ]
[1 + x 1 ]
[ ]
[ y x + y]\
"""
ascii_str_2 = \
"""\
[ 2 ]
[x + 1 1 ]
[ ]
[ y x + y]\
"""
ucode_str_1 = \
"""\
⎡ 2 ⎤
⎢1 + x 1 ⎥
⎢ ⎥
⎣ y x + y⎦\
"""
ucode_str_2 = \
"""\
⎡ 2 ⎤
⎢x + 1 1 ⎥
⎢ ⎥
⎣ y x + y⎦\
"""
assert pretty(expr) in [ascii_str_1, ascii_str_2]
assert upretty(expr) in [ucode_str_1, ucode_str_2]
expr = Matrix([[x/y, y, th], [0, exp(I*k*ph), 1]])
ascii_str = \
"""\
[x ]
[- y theta]
[y ]
[ ]
[ I*k*phi ]
[0 e 1 ]\
"""
ucode_str = \
"""\
⎡x ⎤
⎢─ y θ⎥
⎢y ⎥
⎢ ⎥
⎢ ⅈ⋅k⋅φ ⎥
⎣0 ℯ 1⎦\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
unicode_str = \
"""\
⎡v̇_msc_00 0 0 ⎤
⎢ ⎥
⎢ 0 v̇_msc_01 0 ⎥
⎢ ⎥
⎣ 0 0 v̇_msc_02⎦\
"""
expr = diag(*MatrixSymbol('vdot_msc',1,3))
assert upretty(expr) == unicode_str
def test_pretty_ndim_arrays():
x, y, z, w = symbols("x y z w")
for ArrayType in (ImmutableDenseNDimArray, ImmutableSparseNDimArray, MutableDenseNDimArray, MutableSparseNDimArray):
# Basic: scalar array
M = ArrayType(x)
assert pretty(M) == "x"
assert upretty(M) == "x"
M = ArrayType([[1/x, y], [z, w]])
M1 = ArrayType([1/x, y, z])
M2 = tensorproduct(M1, M)
M3 = tensorproduct(M, M)
ascii_str = \
"""\
[1 ]\n\
[- y]\n\
[x ]\n\
[ ]\n\
[z w]\
"""
ucode_str = \
"""\
⎡1 ⎤\n\
⎢─ y⎥\n\
⎢x ⎥\n\
⎢ ⎥\n\
⎣z w⎦\
"""
assert pretty(M) == ascii_str
assert upretty(M) == ucode_str
ascii_str = \
"""\
[1 ]\n\
[- y z]\n\
[x ]\
"""
ucode_str = \
"""\
⎡1 ⎤\n\
⎢─ y z⎥\n\
⎣x ⎦\
"""
assert pretty(M1) == ascii_str
assert upretty(M1) == ucode_str
ascii_str = \
"""\
[[1 y] ]\n\
[[-- -] [z ]]\n\
[[ 2 x] [ y 2 ] [- y*z]]\n\
[[x ] [ - y ] [x ]]\n\
[[ ] [ x ] [ ]]\n\
[[z w] [ ] [ 2 ]]\n\
[[- -] [y*z w*y] [z w*z]]\n\
[[x x] ]\
"""
ucode_str = \
"""\
⎡⎡1 y⎤ ⎤\n\
⎢⎢── ─⎥ ⎡z ⎤⎥\n\
⎢⎢ 2 x⎥ ⎡ y 2 ⎤ ⎢─ y⋅z⎥⎥\n\
⎢⎢x ⎥ ⎢ ─ y ⎥ ⎢x ⎥⎥\n\
⎢⎢ ⎥ ⎢ x ⎥ ⎢ ⎥⎥\n\
⎢⎢z w⎥ ⎢ ⎥ ⎢ 2 ⎥⎥\n\
⎢⎢─ ─⎥ ⎣y⋅z w⋅y⎦ ⎣z w⋅z⎦⎥\n\
⎣⎣x x⎦ ⎦\
"""
assert pretty(M2) == ascii_str
assert upretty(M2) == ucode_str
ascii_str = \
"""\
[ [1 y] ]\n\
[ [-- -] ]\n\
[ [ 2 x] [ y 2 ]]\n\
[ [x ] [ - y ]]\n\
[ [ ] [ x ]]\n\
[ [z w] [ ]]\n\
[ [- -] [y*z w*y]]\n\
[ [x x] ]\n\
[ ]\n\
[[z ] [ w ]]\n\
[[- y*z] [ - w*y]]\n\
[[x ] [ x ]]\n\
[[ ] [ ]]\n\
[[ 2 ] [ 2 ]]\n\
[[z w*z] [w*z w ]]\
"""
ucode_str = \
"""\
⎡ ⎡1 y⎤ ⎤\n\
⎢ ⎢── ─⎥ ⎥\n\
⎢ ⎢ 2 x⎥ ⎡ y 2 ⎤⎥\n\
⎢ ⎢x ⎥ ⎢ ─ y ⎥⎥\n\
⎢ ⎢ ⎥ ⎢ x ⎥⎥\n\
⎢ ⎢z w⎥ ⎢ ⎥⎥\n\
⎢ ⎢─ ─⎥ ⎣y⋅z w⋅y⎦⎥\n\
⎢ ⎣x x⎦ ⎥\n\
⎢ ⎥\n\
⎢⎡z ⎤ ⎡ w ⎤⎥\n\
⎢⎢─ y⋅z⎥ ⎢ ─ w⋅y⎥⎥\n\
⎢⎢x ⎥ ⎢ x ⎥⎥\n\
⎢⎢ ⎥ ⎢ ⎥⎥\n\
⎢⎢ 2 ⎥ ⎢ 2 ⎥⎥\n\
⎣⎣z w⋅z⎦ ⎣w⋅z w ⎦⎦\
"""
assert pretty(M3) == ascii_str
assert upretty(M3) == ucode_str
Mrow = ArrayType([[x, y, 1 / z]])
Mcolumn = ArrayType([[x], [y], [1 / z]])
Mcol2 = ArrayType([Mcolumn.tolist()])
ascii_str = \
"""\
[[ 1]]\n\
[[x y -]]\n\
[[ z]]\
"""
ucode_str = \
"""\
⎡⎡ 1⎤⎤\n\
⎢⎢x y ─⎥⎥\n\
⎣⎣ z⎦⎦\
"""
assert pretty(Mrow) == ascii_str
assert upretty(Mrow) == ucode_str
ascii_str = \
"""\
[x]\n\
[ ]\n\
[y]\n\
[ ]\n\
[1]\n\
[-]\n\
[z]\
"""
ucode_str = \
"""\
⎡x⎤\n\
⎢ ⎥\n\
⎢y⎥\n\
⎢ ⎥\n\
⎢1⎥\n\
⎢─⎥\n\
⎣z⎦\
"""
assert pretty(Mcolumn) == ascii_str
assert upretty(Mcolumn) == ucode_str
ascii_str = \
"""\
[[x]]\n\
[[ ]]\n\
[[y]]\n\
[[ ]]\n\
[[1]]\n\
[[-]]\n\
[[z]]\
"""
ucode_str = \
"""\
⎡⎡x⎤⎤\n\
⎢⎢ ⎥⎥\n\
⎢⎢y⎥⎥\n\
⎢⎢ ⎥⎥\n\
⎢⎢1⎥⎥\n\
⎢⎢─⎥⎥\n\
⎣⎣z⎦⎦\
"""
assert pretty(Mcol2) == ascii_str
assert upretty(Mcol2) == ucode_str
def test_tensor_TensorProduct():
A = MatrixSymbol("A", 3, 3)
B = MatrixSymbol("B", 3, 3)
assert upretty(TensorProduct(A, B)) == "A\u2297B"
assert upretty(TensorProduct(A, B, A)) == "A\u2297B\u2297A"
def test_diffgeom_print_WedgeProduct():
from sympy.diffgeom.rn import R2
from sympy.diffgeom import WedgeProduct
wp = WedgeProduct(R2.dx, R2.dy)
assert upretty(wp) == "ⅆ x∧ⅆ y"
assert pretty(wp) == r"d x/\d y"
def test_Adjoint():
X = MatrixSymbol('X', 2, 2)
Y = MatrixSymbol('Y', 2, 2)
assert pretty(Adjoint(X)) == " +\nX "
assert pretty(Adjoint(X + Y)) == " +\n(X + Y) "
assert pretty(Adjoint(X) + Adjoint(Y)) == " + +\nX + Y "
assert pretty(Adjoint(X*Y)) == " +\n(X*Y) "
assert pretty(Adjoint(Y)*Adjoint(X)) == " + +\nY *X "
assert pretty(Adjoint(X**2)) == " +\n/ 2\\ \n\\X / "
assert pretty(Adjoint(X)**2) == " 2\n/ +\\ \n\\X / "
assert pretty(Adjoint(Inverse(X))) == " +\n/ -1\\ \n\\X / "
assert pretty(Inverse(Adjoint(X))) == " -1\n/ +\\ \n\\X / "
assert pretty(Adjoint(Transpose(X))) == " +\n/ T\\ \n\\X / "
assert pretty(Transpose(Adjoint(X))) == " T\n/ +\\ \n\\X / "
assert upretty(Adjoint(X)) == " †\nX "
assert upretty(Adjoint(X + Y)) == " †\n(X + Y) "
assert upretty(Adjoint(X) + Adjoint(Y)) == " † †\nX + Y "
assert upretty(Adjoint(X*Y)) == " †\n(X⋅Y) "
assert upretty(Adjoint(Y)*Adjoint(X)) == " † †\nY ⋅X "
assert upretty(Adjoint(X**2)) == \
" †\n⎛ 2⎞ \n⎝X ⎠ "
assert upretty(Adjoint(X)**2) == \
" 2\n⎛ †⎞ \n⎝X ⎠ "
assert upretty(Adjoint(Inverse(X))) == \
" †\n⎛ -1⎞ \n⎝X ⎠ "
assert upretty(Inverse(Adjoint(X))) == \
" -1\n⎛ †⎞ \n⎝X ⎠ "
assert upretty(Adjoint(Transpose(X))) == \
" †\n⎛ T⎞ \n⎝X ⎠ "
assert upretty(Transpose(Adjoint(X))) == \
" T\n⎛ †⎞ \n⎝X ⎠ "
m = Matrix(((1, 2), (3, 4)))
assert upretty(Adjoint(m)) == \
' †\n'\
'⎡1 2⎤ \n'\
'⎢ ⎥ \n'\
'⎣3 4⎦ '
assert upretty(Adjoint(m+X)) == \
' †\n'\
'⎛⎡1 2⎤ ⎞ \n'\
'⎜⎢ ⎥ + X⎟ \n'\
'⎝⎣3 4⎦ ⎠ '
assert upretty(Adjoint(BlockMatrix(((OneMatrix(2, 2), X),
(m, ZeroMatrix(2, 2)))))) == \
' †\n'\
'⎡ 𝟙 X⎤ \n'\
'⎢ ⎥ \n'\
'⎢⎡1 2⎤ ⎥ \n'\
'⎢⎢ ⎥ 𝟘⎥ \n'\
'⎣⎣3 4⎦ ⎦ '
def test_Transpose():
X = MatrixSymbol('X', 2, 2)
Y = MatrixSymbol('Y', 2, 2)
assert pretty(Transpose(X)) == " T\nX "
assert pretty(Transpose(X + Y)) == " T\n(X + Y) "
assert pretty(Transpose(X) + Transpose(Y)) == " T T\nX + Y "
assert pretty(Transpose(X*Y)) == " T\n(X*Y) "
assert pretty(Transpose(Y)*Transpose(X)) == " T T\nY *X "
assert pretty(Transpose(X**2)) == " T\n/ 2\\ \n\\X / "
assert pretty(Transpose(X)**2) == " 2\n/ T\\ \n\\X / "
assert pretty(Transpose(Inverse(X))) == " T\n/ -1\\ \n\\X / "
assert pretty(Inverse(Transpose(X))) == " -1\n/ T\\ \n\\X / "
assert upretty(Transpose(X)) == " T\nX "
assert upretty(Transpose(X + Y)) == " T\n(X + Y) "
assert upretty(Transpose(X) + Transpose(Y)) == " T T\nX + Y "
assert upretty(Transpose(X*Y)) == " T\n(X⋅Y) "
assert upretty(Transpose(Y)*Transpose(X)) == " T T\nY ⋅X "
assert upretty(Transpose(X**2)) == \
" T\n⎛ 2⎞ \n⎝X ⎠ "
assert upretty(Transpose(X)**2) == \
" 2\n⎛ T⎞ \n⎝X ⎠ "
assert upretty(Transpose(Inverse(X))) == \
" T\n⎛ -1⎞ \n⎝X ⎠ "
assert upretty(Inverse(Transpose(X))) == \
" -1\n⎛ T⎞ \n⎝X ⎠ "
m = Matrix(((1, 2), (3, 4)))
assert upretty(Transpose(m)) == \
' T\n'\
'⎡1 2⎤ \n'\
'⎢ ⎥ \n'\
'⎣3 4⎦ '
assert upretty(Transpose(m+X)) == \
' T\n'\
'⎛⎡1 2⎤ ⎞ \n'\
'⎜⎢ ⎥ + X⎟ \n'\
'⎝⎣3 4⎦ ⎠ '
assert upretty(Transpose(BlockMatrix(((OneMatrix(2, 2), X),
(m, ZeroMatrix(2, 2)))))) == \
' T\n'\
'⎡ 𝟙 X⎤ \n'\
'⎢ ⎥ \n'\
'⎢⎡1 2⎤ ⎥ \n'\
'⎢⎢ ⎥ 𝟘⎥ \n'\
'⎣⎣3 4⎦ ⎦ '
def test_pretty_Trace_issue_9044():
X = Matrix([[1, 2], [3, 4]])
Y = Matrix([[2, 4], [6, 8]])
ascii_str_1 = \
"""\
/[1 2]\\
tr|[ ]|
\\[3 4]/\
"""
ucode_str_1 = \
"""\
⎛⎡1 2⎤⎞
tr⎜⎢ ⎥⎟
⎝⎣3 4⎦⎠\
"""
ascii_str_2 = \
"""\
/[1 2]\\ /[2 4]\\
tr|[ ]| + tr|[ ]|
\\[3 4]/ \\[6 8]/\
"""
ucode_str_2 = \
"""\
⎛⎡1 2⎤⎞ ⎛⎡2 4⎤⎞
tr⎜⎢ ⎥⎟ + tr⎜⎢ ⎥⎟
⎝⎣3 4⎦⎠ ⎝⎣6 8⎦⎠\
"""
assert pretty(Trace(X)) == ascii_str_1
assert upretty(Trace(X)) == ucode_str_1
assert pretty(Trace(X) + Trace(Y)) == ascii_str_2
assert upretty(Trace(X) + Trace(Y)) == ucode_str_2
def test_MatrixSlice():
n = Symbol('n', integer=True)
x, y, z, w, t, = symbols('x y z w t')
X = MatrixSymbol('X', n, n)
Y = MatrixSymbol('Y', 10, 10)
Z = MatrixSymbol('Z', 10, 10)
expr = MatrixSlice(X, (None, None, None), (None, None, None))
assert pretty(expr) == upretty(expr) == 'X[:, :]'
expr = X[x:x + 1, y:y + 1]
assert pretty(expr) == upretty(expr) == 'X[x:x + 1, y:y + 1]'
expr = X[x:x + 1:2, y:y + 1:2]
assert pretty(expr) == upretty(expr) == 'X[x:x + 1:2, y:y + 1:2]'
expr = X[:x, y:]
assert pretty(expr) == upretty(expr) == 'X[:x, y:]'
expr = X[:x, y:]
assert pretty(expr) == upretty(expr) == 'X[:x, y:]'
expr = X[x:, :y]
assert pretty(expr) == upretty(expr) == 'X[x:, :y]'
expr = X[x:y, z:w]
assert pretty(expr) == upretty(expr) == 'X[x:y, z:w]'
expr = X[x:y:t, w:t:x]
assert pretty(expr) == upretty(expr) == 'X[x:y:t, w:t:x]'
expr = X[x::y, t::w]
assert pretty(expr) == upretty(expr) == 'X[x::y, t::w]'
expr = X[:x:y, :t:w]
assert pretty(expr) == upretty(expr) == 'X[:x:y, :t:w]'
expr = X[::x, ::y]
assert pretty(expr) == upretty(expr) == 'X[::x, ::y]'
expr = MatrixSlice(X, (0, None, None), (0, None, None))
assert pretty(expr) == upretty(expr) == 'X[:, :]'
expr = MatrixSlice(X, (None, n, None), (None, n, None))
assert pretty(expr) == upretty(expr) == 'X[:, :]'
expr = MatrixSlice(X, (0, n, None), (0, n, None))
assert pretty(expr) == upretty(expr) == 'X[:, :]'
expr = MatrixSlice(X, (0, n, 2), (0, n, 2))
assert pretty(expr) == upretty(expr) == 'X[::2, ::2]'
expr = X[1:2:3, 4:5:6]
assert pretty(expr) == upretty(expr) == 'X[1:2:3, 4:5:6]'
expr = X[1:3:5, 4:6:8]
assert pretty(expr) == upretty(expr) == 'X[1:3:5, 4:6:8]'
expr = X[1:10:2]
assert pretty(expr) == upretty(expr) == 'X[1:10:2, :]'
expr = Y[:5, 1:9:2]
assert pretty(expr) == upretty(expr) == 'Y[:5, 1:9:2]'
expr = Y[:5, 1:10:2]
assert pretty(expr) == upretty(expr) == 'Y[:5, 1::2]'
expr = Y[5, :5:2]
assert pretty(expr) == upretty(expr) == 'Y[5:6, :5:2]'
expr = X[0:1, 0:1]
assert pretty(expr) == upretty(expr) == 'X[:1, :1]'
expr = X[0:1:2, 0:1:2]
assert pretty(expr) == upretty(expr) == 'X[:1:2, :1:2]'
expr = (Y + Z)[2:, 2:]
assert pretty(expr) == upretty(expr) == '(Y + Z)[2:, 2:]'
def test_MatrixExpressions():
n = Symbol('n', integer=True)
X = MatrixSymbol('X', n, n)
assert pretty(X) == upretty(X) == "X"
# Apply function elementwise (`ElementwiseApplyFunc`):
expr = (X.T*X).applyfunc(sin)
ascii_str = """\
/ T \\\n\
(d -> sin(d)).\\X *X/\
"""
ucode_str = """\
⎛ T ⎞\n\
(d ↦ sin(d))˳⎝X ⋅X⎠\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
lamda = Lambda(x, 1/x)
expr = (n*X).applyfunc(lamda)
ascii_str = """\
/ 1\\ \n\
|x -> -|.(n*X)\n\
\\ x/ \
"""
ucode_str = """\
⎛ 1⎞ \n\
⎜x ↦ ─⎟˳(n⋅X)\n\
⎝ x⎠ \
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
def test_pretty_dotproduct():
from sympy.matrices.expressions.dotproduct import DotProduct
n = symbols("n", integer=True)
A = MatrixSymbol('A', n, 1)
B = MatrixSymbol('B', n, 1)
C = Matrix(1, 3, [1, 2, 3])
D = Matrix(1, 3, [1, 3, 4])
assert pretty(DotProduct(A, B)) == "A*B"
assert pretty(DotProduct(C, D)) == "[1 2 3]*[1 3 4]"
assert upretty(DotProduct(A, B)) == "A⋅B"
assert upretty(DotProduct(C, D)) == "[1 2 3]⋅[1 3 4]"
def test_pretty_Determinant():
from sympy.matrices import Determinant, Inverse, BlockMatrix, OneMatrix, ZeroMatrix
m = Matrix(((1, 2), (3, 4)))
assert upretty(Determinant(m)) == '│1 2│\n│ │\n│3 4│'
assert upretty(Determinant(Inverse(m))) == \
'│ -1│\n'\
'│⎡1 2⎤ │\n'\
'│⎢ ⎥ │\n'\
'│⎣3 4⎦ │'
X = MatrixSymbol('X', 2, 2)
assert upretty(Determinant(X)) == '│X│'
assert upretty(Determinant(X + m)) == \
'│⎡1 2⎤ │\n'\
'│⎢ ⎥ + X│\n'\
'│⎣3 4⎦ │'
assert upretty(Determinant(BlockMatrix(((OneMatrix(2, 2), X),
(m, ZeroMatrix(2, 2)))))) == \
'│ 𝟙 X│\n'\
'│ │\n'\
'│⎡1 2⎤ │\n'\
'│⎢ ⎥ 𝟘│\n'\
'│⎣3 4⎦ │'
def test_pretty_piecewise():
expr = Piecewise((x, x < 1), (x**2, True))
ascii_str = \
"""\
/x for x < 1\n\
| \n\
< 2 \n\
|x otherwise\n\
\\ \
"""
ucode_str = \
"""\
⎧x for x < 1\n\
⎪ \n\
⎨ 2 \n\
⎪x otherwise\n\
⎩ \
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = -Piecewise((x, x < 1), (x**2, True))
ascii_str = \
"""\
//x for x < 1\\\n\
|| |\n\
-|< 2 |\n\
||x otherwise|\n\
\\\\ /\
"""
ucode_str = \
"""\
⎛⎧x for x < 1⎞\n\
⎜⎪ ⎟\n\
-⎜⎨ 2 ⎟\n\
⎜⎪x otherwise⎟\n\
⎝⎩ ⎠\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = x + Piecewise((x, x > 0), (y, True)) + Piecewise((x/y, x < 2),
(y**2, x > 2), (1, True)) + 1
ascii_str = \
"""\
//x \\ \n\
||- for x < 2| \n\
||y | \n\
//x for x > 0\\ || | \n\
x + |< | + |< 2 | + 1\n\
\\\\y otherwise/ ||y for x > 2| \n\
|| | \n\
||1 otherwise| \n\
\\\\ / \
"""
ucode_str = \
"""\
⎛⎧x ⎞ \n\
⎜⎪─ for x < 2⎟ \n\
⎜⎪y ⎟ \n\
⎛⎧x for x > 0⎞ ⎜⎪ ⎟ \n\
x + ⎜⎨ ⎟ + ⎜⎨ 2 ⎟ + 1\n\
⎝⎩y otherwise⎠ ⎜⎪y for x > 2⎟ \n\
⎜⎪ ⎟ \n\
⎜⎪1 otherwise⎟ \n\
⎝⎩ ⎠ \
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = x - Piecewise((x, x > 0), (y, True)) + Piecewise((x/y, x < 2),
(y**2, x > 2), (1, True)) + 1
ascii_str = \
"""\
//x \\ \n\
||- for x < 2| \n\
||y | \n\
//x for x > 0\\ || | \n\
x - |< | + |< 2 | + 1\n\
\\\\y otherwise/ ||y for x > 2| \n\
|| | \n\
||1 otherwise| \n\
\\\\ / \
"""
ucode_str = \
"""\
⎛⎧x ⎞ \n\
⎜⎪─ for x < 2⎟ \n\
⎜⎪y ⎟ \n\
⎛⎧x for x > 0⎞ ⎜⎪ ⎟ \n\
x - ⎜⎨ ⎟ + ⎜⎨ 2 ⎟ + 1\n\
⎝⎩y otherwise⎠ ⎜⎪y for x > 2⎟ \n\
⎜⎪ ⎟ \n\
⎜⎪1 otherwise⎟ \n\
⎝⎩ ⎠ \
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = x*Piecewise((x, x > 0), (y, True))
ascii_str = \
"""\
//x for x > 0\\\n\
x*|< |\n\
\\\\y otherwise/\
"""
ucode_str = \
"""\
⎛⎧x for x > 0⎞\n\
x⋅⎜⎨ ⎟\n\
⎝⎩y otherwise⎠\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = Piecewise((x, x > 0), (y, True))*Piecewise((x/y, x < 2), (y**2, x >
2), (1, True))
ascii_str = \
"""\
//x \\\n\
||- for x < 2|\n\
||y |\n\
//x for x > 0\\ || |\n\
|< |*|< 2 |\n\
\\\\y otherwise/ ||y for x > 2|\n\
|| |\n\
||1 otherwise|\n\
\\\\ /\
"""
ucode_str = \
"""\
⎛⎧x ⎞\n\
⎜⎪─ for x < 2⎟\n\
⎜⎪y ⎟\n\
⎛⎧x for x > 0⎞ ⎜⎪ ⎟\n\
⎜⎨ ⎟⋅⎜⎨ 2 ⎟\n\
⎝⎩y otherwise⎠ ⎜⎪y for x > 2⎟\n\
⎜⎪ ⎟\n\
⎜⎪1 otherwise⎟\n\
⎝⎩ ⎠\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = -Piecewise((x, x > 0), (y, True))*Piecewise((x/y, x < 2), (y**2, x
> 2), (1, True))
ascii_str = \
"""\
//x \\\n\
||- for x < 2|\n\
||y |\n\
//x for x > 0\\ || |\n\
-|< |*|< 2 |\n\
\\\\y otherwise/ ||y for x > 2|\n\
|| |\n\
||1 otherwise|\n\
\\\\ /\
"""
ucode_str = \
"""\
⎛⎧x ⎞\n\
⎜⎪─ for x < 2⎟\n\
⎜⎪y ⎟\n\
⎛⎧x for x > 0⎞ ⎜⎪ ⎟\n\
-⎜⎨ ⎟⋅⎜⎨ 2 ⎟\n\
⎝⎩y otherwise⎠ ⎜⎪y for x > 2⎟\n\
⎜⎪ ⎟\n\
⎜⎪1 otherwise⎟\n\
⎝⎩ ⎠\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = Piecewise((0, Abs(1/y) < 1), (1, Abs(y) < 1), (y*meijerg(((2, 1),
()), ((), (1, 0)), 1/y), True))
ascii_str = \
"""\
/ 1 \n\
| 0 for --- < 1\n\
| |y| \n\
| \n\
< 1 for |y| < 1\n\
| \n\
| __0, 2 /1, 2 | 1\\ \n\
|y*/__ | | -| otherwise \n\
\\ \\_|2, 2 \\ 0, 1 | y/ \
"""
ucode_str = \
"""\
⎧ 1 \n\
⎪ 0 for ─── < 1\n\
⎪ │y│ \n\
⎪ \n\
⎨ 1 for │y│ < 1\n\
⎪ \n\
⎪ ╭─╮0, 2 ⎛1, 2 │ 1⎞ \n\
⎪y⋅│╶┐ ⎜ │ ─⎟ otherwise \n\
⎩ ╰─╯2, 2 ⎝ 0, 1 │ y⎠ \
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
# XXX: We have to use evaluate=False here because Piecewise._eval_power
# denests the power.
expr = Pow(Piecewise((x, x > 0), (y, True)), 2, evaluate=False)
ascii_str = \
"""\
2\n\
//x for x > 0\\ \n\
|< | \n\
\\\\y otherwise/ \
"""
ucode_str = \
"""\
2\n\
⎛⎧x for x > 0⎞ \n\
⎜⎨ ⎟ \n\
⎝⎩y otherwise⎠ \
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
def test_pretty_ITE():
expr = ITE(x, y, z)
assert pretty(expr) == (
'/y for x \n'
'< \n'
'\\z otherwise'
)
assert upretty(expr) == """\
⎧y for x \n\
⎨ \n\
⎩z otherwise\
"""
def test_pretty_seq():
expr = ()
ascii_str = \
"""\
()\
"""
ucode_str = \
"""\
()\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = []
ascii_str = \
"""\
[]\
"""
ucode_str = \
"""\
[]\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = {}
expr_2 = {}
ascii_str = \
"""\
{}\
"""
ucode_str = \
"""\
{}\
"""
assert pretty(expr) == ascii_str
assert pretty(expr_2) == ascii_str
assert upretty(expr) == ucode_str
assert upretty(expr_2) == ucode_str
expr = (1/x,)
ascii_str = \
"""\
1 \n\
(-,)\n\
x \
"""
ucode_str = \
"""\
⎛1 ⎞\n\
⎜─,⎟\n\
⎝x ⎠\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = [x**2, 1/x, x, y, sin(th)**2/cos(ph)**2]
ascii_str = \
"""\
2 \n\
2 1 sin (theta) \n\
[x , -, x, y, -----------]\n\
x 2 \n\
cos (phi) \
"""
ucode_str = \
"""\
⎡ 2 ⎤\n\
⎢ 2 1 sin (θ)⎥\n\
⎢x , ─, x, y, ───────⎥\n\
⎢ x 2 ⎥\n\
⎣ cos (φ)⎦\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = (x**2, 1/x, x, y, sin(th)**2/cos(ph)**2)
ascii_str = \
"""\
2 \n\
2 1 sin (theta) \n\
(x , -, x, y, -----------)\n\
x 2 \n\
cos (phi) \
"""
ucode_str = \
"""\
⎛ 2 ⎞\n\
⎜ 2 1 sin (θ)⎟\n\
⎜x , ─, x, y, ───────⎟\n\
⎜ x 2 ⎟\n\
⎝ cos (φ)⎠\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = Tuple(x**2, 1/x, x, y, sin(th)**2/cos(ph)**2)
ascii_str = \
"""\
2 \n\
2 1 sin (theta) \n\
(x , -, x, y, -----------)\n\
x 2 \n\
cos (phi) \
"""
ucode_str = \
"""\
⎛ 2 ⎞\n\
⎜ 2 1 sin (θ)⎟\n\
⎜x , ─, x, y, ───────⎟\n\
⎜ x 2 ⎟\n\
⎝ cos (φ)⎠\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = {x: sin(x)}
expr_2 = Dict({x: sin(x)})
ascii_str = \
"""\
{x: sin(x)}\
"""
ucode_str = \
"""\
{x: sin(x)}\
"""
assert pretty(expr) == ascii_str
assert pretty(expr_2) == ascii_str
assert upretty(expr) == ucode_str
assert upretty(expr_2) == ucode_str
expr = {1/x: 1/y, x: sin(x)**2}
expr_2 = Dict({1/x: 1/y, x: sin(x)**2})
ascii_str = \
"""\
1 1 2 \n\
{-: -, x: sin (x)}\n\
x y \
"""
ucode_str = \
"""\
⎧1 1 2 ⎫\n\
⎨─: ─, x: sin (x)⎬\n\
⎩x y ⎭\
"""
assert pretty(expr) == ascii_str
assert pretty(expr_2) == ascii_str
assert upretty(expr) == ucode_str
assert upretty(expr_2) == ucode_str
# There used to be a bug with pretty-printing sequences of even height.
expr = [x**2]
ascii_str = \
"""\
2 \n\
[x ]\
"""
ucode_str = \
"""\
⎡ 2⎤\n\
⎣x ⎦\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = (x**2,)
ascii_str = \
"""\
2 \n\
(x ,)\
"""
ucode_str = \
"""\
⎛ 2 ⎞\n\
⎝x ,⎠\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = Tuple(x**2)
ascii_str = \
"""\
2 \n\
(x ,)\
"""
ucode_str = \
"""\
⎛ 2 ⎞\n\
⎝x ,⎠\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = {x**2: 1}
expr_2 = Dict({x**2: 1})
ascii_str = \
"""\
2 \n\
{x : 1}\
"""
ucode_str = \
"""\
⎧ 2 ⎫\n\
⎨x : 1⎬\n\
⎩ ⎭\
"""
assert pretty(expr) == ascii_str
assert pretty(expr_2) == ascii_str
assert upretty(expr) == ucode_str
assert upretty(expr_2) == ucode_str
def test_any_object_in_sequence():
# Cf. issue 5306
b1 = Basic()
b2 = Basic(Basic())
expr = [b2, b1]
assert pretty(expr) == "[Basic(Basic()), Basic()]"
assert upretty(expr) == "[Basic(Basic()), Basic()]"
expr = {b2, b1}
assert pretty(expr) == "{Basic(), Basic(Basic())}"
assert upretty(expr) == "{Basic(), Basic(Basic())}"
expr = {b2: b1, b1: b2}
expr2 = Dict({b2: b1, b1: b2})
assert pretty(expr) == "{Basic(): Basic(Basic()), Basic(Basic()): Basic()}"
assert pretty(
expr2) == "{Basic(): Basic(Basic()), Basic(Basic()): Basic()}"
assert upretty(
expr) == "{Basic(): Basic(Basic()), Basic(Basic()): Basic()}"
assert upretty(
expr2) == "{Basic(): Basic(Basic()), Basic(Basic()): Basic()}"
def test_print_builtin_set():
assert pretty(set()) == 'set()'
assert upretty(set()) == 'set()'
assert pretty(frozenset()) == 'frozenset()'
assert upretty(frozenset()) == 'frozenset()'
s1 = {1/x, x}
s2 = frozenset(s1)
assert pretty(s1) == \
"""\
1 \n\
{-, x}
x \
"""
assert upretty(s1) == \
"""\
⎧1 ⎫
⎨─, x⎬
⎩x ⎭\
"""
assert pretty(s2) == \
"""\
1 \n\
frozenset({-, x})
x \
"""
assert upretty(s2) == \
"""\
⎛⎧1 ⎫⎞
frozenset⎜⎨─, x⎬⎟
⎝⎩x ⎭⎠\
"""
def test_pretty_sets():
s = FiniteSet
assert pretty(s(*[x*y, x**2])) == \
"""\
2 \n\
{x , x*y}\
"""
assert pretty(s(*range(1, 6))) == "{1, 2, 3, 4, 5}"
assert pretty(s(*range(1, 13))) == "{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}"
assert pretty({x*y, x**2}) == \
"""\
2 \n\
{x , x*y}\
"""
assert pretty(set(range(1, 6))) == "{1, 2, 3, 4, 5}"
assert pretty(set(range(1, 13))) == \
"{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}"
assert pretty(frozenset([x*y, x**2])) == \
"""\
2 \n\
frozenset({x , x*y})\
"""
assert pretty(frozenset(range(1, 6))) == "frozenset({1, 2, 3, 4, 5})"
assert pretty(frozenset(range(1, 13))) == \
"frozenset({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12})"
assert pretty(Range(0, 3, 1)) == '{0, 1, 2}'
ascii_str = '{0, 1, ..., 29}'
ucode_str = '{0, 1, …, 29}'
assert pretty(Range(0, 30, 1)) == ascii_str
assert upretty(Range(0, 30, 1)) == ucode_str
ascii_str = '{30, 29, ..., 2}'
ucode_str = '{30, 29, …, 2}'
assert pretty(Range(30, 1, -1)) == ascii_str
assert upretty(Range(30, 1, -1)) == ucode_str
ascii_str = '{0, 2, ...}'
ucode_str = '{0, 2, …}'
assert pretty(Range(0, oo, 2)) == ascii_str
assert upretty(Range(0, oo, 2)) == ucode_str
ascii_str = '{..., 2, 0}'
ucode_str = '{…, 2, 0}'
assert pretty(Range(oo, -2, -2)) == ascii_str
assert upretty(Range(oo, -2, -2)) == ucode_str
ascii_str = '{-2, -3, ...}'
ucode_str = '{-2, -3, …}'
assert pretty(Range(-2, -oo, -1)) == ascii_str
assert upretty(Range(-2, -oo, -1)) == ucode_str
def test_pretty_SetExpr():
iv = Interval(1, 3)
se = SetExpr(iv)
ascii_str = "SetExpr([1, 3])"
ucode_str = "SetExpr([1, 3])"
assert pretty(se) == ascii_str
assert upretty(se) == ucode_str
def test_pretty_ImageSet():
imgset = ImageSet(Lambda((x, y), x + y), {1, 2, 3}, {3, 4})
ascii_str = '{x + y | x in {1, 2, 3}, y in {3, 4}}'
ucode_str = '{x + y │ x ∊ {1, 2, 3}, y ∊ {3, 4}}'
assert pretty(imgset) == ascii_str
assert upretty(imgset) == ucode_str
imgset = ImageSet(Lambda(((x, y),), x + y), ProductSet({1, 2, 3}, {3, 4}))
ascii_str = '{x + y | (x, y) in {1, 2, 3} x {3, 4}}'
ucode_str = '{x + y │ (x, y) ∊ {1, 2, 3} × {3, 4}}'
assert pretty(imgset) == ascii_str
assert upretty(imgset) == ucode_str
imgset = ImageSet(Lambda(x, x**2), S.Naturals)
ascii_str = '''\
2 \n\
{x | x in Naturals}'''
ucode_str = '''\
⎧ 2 │ ⎫\n\
⎨x │ x ∊ ℕ⎬\n\
⎩ │ ⎭'''
assert pretty(imgset) == ascii_str
assert upretty(imgset) == ucode_str
# TODO: The "x in N" parts below should be centered independently of the
# 1/x**2 fraction
imgset = ImageSet(Lambda(x, 1/x**2), S.Naturals)
ascii_str = '''\
1 \n\
{-- | x in Naturals}
2 \n\
x '''
ucode_str = '''\
⎧1 │ ⎫\n\
⎪── │ x ∊ ℕ⎪\n\
⎨ 2 │ ⎬\n\
⎪x │ ⎪\n\
⎩ │ ⎭'''
assert pretty(imgset) == ascii_str
assert upretty(imgset) == ucode_str
imgset = ImageSet(Lambda((x, y), 1/(x + y)**2), S.Naturals, S.Naturals)
ascii_str = '''\
1 \n\
{-------- | x in Naturals, y in Naturals}
2 \n\
(x + y) '''
ucode_str = '''\
⎧ 1 │ ⎫
⎪──────── │ x ∊ ℕ, y ∊ ℕ⎪
⎨ 2 │ ⎬
⎪(x + y) │ ⎪
⎩ │ ⎭'''
assert pretty(imgset) == ascii_str
assert upretty(imgset) == ucode_str
# issue 23449 centering issue
assert upretty([Symbol("ihat") / (Symbol("i") + 1)]) == '''\
⎡ î ⎤
⎢─────⎥
⎣i + 1⎦\
'''
assert upretty(Matrix([Symbol("ihat"), Symbol("i") + 1])) == '''\
⎡ î ⎤
⎢ ⎥
⎣i + 1⎦\
'''
def test_pretty_ConditionSet():
ascii_str = '{x | x in (-oo, oo) and sin(x) = 0}'
ucode_str = '{x │ x ∊ ℝ ∧ (sin(x) = 0)}'
assert pretty(ConditionSet(x, Eq(sin(x), 0), S.Reals)) == ascii_str
assert upretty(ConditionSet(x, Eq(sin(x), 0), S.Reals)) == ucode_str
assert pretty(ConditionSet(x, Contains(x, S.Reals, evaluate=False), FiniteSet(1))) == '{1}'
assert upretty(ConditionSet(x, Contains(x, S.Reals, evaluate=False), FiniteSet(1))) == '{1}'
assert pretty(ConditionSet(x, And(x > 1, x < -1), FiniteSet(1, 2, 3))) == "EmptySet"
assert upretty(ConditionSet(x, And(x > 1, x < -1), FiniteSet(1, 2, 3))) == "∅"
assert pretty(ConditionSet(x, Or(x > 1, x < -1), FiniteSet(1, 2))) == '{2}'
assert upretty(ConditionSet(x, Or(x > 1, x < -1), FiniteSet(1, 2))) == '{2}'
condset = ConditionSet(x, 1/x**2 > 0)
ascii_str = '''\
1 \n\
{x | -- > 0}
2 \n\
x '''
ucode_str = '''\
⎧ │ ⎛1 ⎞⎫
⎪x │ ⎜── > 0⎟⎪
⎨ │ ⎜ 2 ⎟⎬
⎪ │ ⎝x ⎠⎪
⎩ │ ⎭'''
assert pretty(condset) == ascii_str
assert upretty(condset) == ucode_str
condset = ConditionSet(x, 1/x**2 > 0, S.Reals)
ascii_str = '''\
1 \n\
{x | x in (-oo, oo) and -- > 0}
2 \n\
x '''
ucode_str = '''\
⎧ │ ⎛1 ⎞⎫
⎪x │ x ∊ ℝ ∧ ⎜── > 0⎟⎪
⎨ │ ⎜ 2 ⎟⎬
⎪ │ ⎝x ⎠⎪
⎩ │ ⎭'''
assert pretty(condset) == ascii_str
assert upretty(condset) == ucode_str
def test_pretty_ComplexRegion():
from sympy.sets.fancysets import ComplexRegion
cregion = ComplexRegion(Interval(3, 5)*Interval(4, 6))
ascii_str = '{x + y*I | x, y in [3, 5] x [4, 6]}'
ucode_str = '{x + y⋅ⅈ │ x, y ∊ [3, 5] × [4, 6]}'
assert pretty(cregion) == ascii_str
assert upretty(cregion) == ucode_str
cregion = ComplexRegion(Interval(0, 1)*Interval(0, 2*pi), polar=True)
ascii_str = '{r*(I*sin(theta) + cos(theta)) | r, theta in [0, 1] x [0, 2*pi)}'
ucode_str = '{r⋅(ⅈ⋅sin(θ) + cos(θ)) │ r, θ ∊ [0, 1] × [0, 2⋅π)}'
assert pretty(cregion) == ascii_str
assert upretty(cregion) == ucode_str
cregion = ComplexRegion(Interval(3, 1/a**2)*Interval(4, 6))
ascii_str = '''\
1 \n\
{x + y*I | x, y in [3, --] x [4, 6]}
2 \n\
a '''
ucode_str = '''\
⎧ │ ⎡ 1 ⎤ ⎫
⎪x + y⋅ⅈ │ x, y ∊ ⎢3, ──⎥ × [4, 6]⎪
⎨ │ ⎢ 2⎥ ⎬
⎪ │ ⎣ a ⎦ ⎪
⎩ │ ⎭'''
assert pretty(cregion) == ascii_str
assert upretty(cregion) == ucode_str
cregion = ComplexRegion(Interval(0, 1/a**2)*Interval(0, 2*pi), polar=True)
ascii_str = '''\
1 \n\
{r*(I*sin(theta) + cos(theta)) | r, theta in [0, --] x [0, 2*pi)}
2 \n\
a '''
ucode_str = '''\
⎧ │ ⎡ 1 ⎤ ⎫
⎪r⋅(ⅈ⋅sin(θ) + cos(θ)) │ r, θ ∊ ⎢0, ──⎥ × [0, 2⋅π)⎪
⎨ │ ⎢ 2⎥ ⎬
⎪ │ ⎣ a ⎦ ⎪
⎩ │ ⎭'''
assert pretty(cregion) == ascii_str
assert upretty(cregion) == ucode_str
def test_pretty_Union_issue_10414():
a, b = Interval(2, 3), Interval(4, 7)
ucode_str = '[2, 3] ∪ [4, 7]'
ascii_str = '[2, 3] U [4, 7]'
assert upretty(Union(a, b)) == ucode_str
assert pretty(Union(a, b)) == ascii_str
def test_pretty_Intersection_issue_10414():
x, y, z, w = symbols('x, y, z, w')
a, b = Interval(x, y), Interval(z, w)
ucode_str = '[x, y] ∩ [z, w]'
ascii_str = '[x, y] n [z, w]'
assert upretty(Intersection(a, b)) == ucode_str
assert pretty(Intersection(a, b)) == ascii_str
def test_ProductSet_exponent():
ucode_str = ' 1\n[0, 1] '
assert upretty(Interval(0, 1)**1) == ucode_str
ucode_str = ' 2\n[0, 1] '
assert upretty(Interval(0, 1)**2) == ucode_str
def test_ProductSet_parenthesis():
ucode_str = '([4, 7] × {1, 2}) ∪ ([2, 3] × [4, 7])'
a, b = Interval(2, 3), Interval(4, 7)
assert upretty(Union(a*b, b*FiniteSet(1, 2))) == ucode_str
def test_ProductSet_prod_char_issue_10413():
ascii_str = '[2, 3] x [4, 7]'
ucode_str = '[2, 3] × [4, 7]'
a, b = Interval(2, 3), Interval(4, 7)
assert pretty(a*b) == ascii_str
assert upretty(a*b) == ucode_str
def test_pretty_sequences():
s1 = SeqFormula(a**2, (0, oo))
s2 = SeqPer((1, 2))
ascii_str = '[0, 1, 4, 9, ...]'
ucode_str = '[0, 1, 4, 9, …]'
assert pretty(s1) == ascii_str
assert upretty(s1) == ucode_str
ascii_str = '[1, 2, 1, 2, ...]'
ucode_str = '[1, 2, 1, 2, …]'
assert pretty(s2) == ascii_str
assert upretty(s2) == ucode_str
s3 = SeqFormula(a**2, (0, 2))
s4 = SeqPer((1, 2), (0, 2))
ascii_str = '[0, 1, 4]'
ucode_str = '[0, 1, 4]'
assert pretty(s3) == ascii_str
assert upretty(s3) == ucode_str
ascii_str = '[1, 2, 1]'
ucode_str = '[1, 2, 1]'
assert pretty(s4) == ascii_str
assert upretty(s4) == ucode_str
s5 = SeqFormula(a**2, (-oo, 0))
s6 = SeqPer((1, 2), (-oo, 0))
ascii_str = '[..., 9, 4, 1, 0]'
ucode_str = '[…, 9, 4, 1, 0]'
assert pretty(s5) == ascii_str
assert upretty(s5) == ucode_str
ascii_str = '[..., 2, 1, 2, 1]'
ucode_str = '[…, 2, 1, 2, 1]'
assert pretty(s6) == ascii_str
assert upretty(s6) == ucode_str
ascii_str = '[1, 3, 5, 11, ...]'
ucode_str = '[1, 3, 5, 11, …]'
assert pretty(SeqAdd(s1, s2)) == ascii_str
assert upretty(SeqAdd(s1, s2)) == ucode_str
ascii_str = '[1, 3, 5]'
ucode_str = '[1, 3, 5]'
assert pretty(SeqAdd(s3, s4)) == ascii_str
assert upretty(SeqAdd(s3, s4)) == ucode_str
ascii_str = '[..., 11, 5, 3, 1]'
ucode_str = '[…, 11, 5, 3, 1]'
assert pretty(SeqAdd(s5, s6)) == ascii_str
assert upretty(SeqAdd(s5, s6)) == ucode_str
ascii_str = '[0, 2, 4, 18, ...]'
ucode_str = '[0, 2, 4, 18, …]'
assert pretty(SeqMul(s1, s2)) == ascii_str
assert upretty(SeqMul(s1, s2)) == ucode_str
ascii_str = '[0, 2, 4]'
ucode_str = '[0, 2, 4]'
assert pretty(SeqMul(s3, s4)) == ascii_str
assert upretty(SeqMul(s3, s4)) == ucode_str
ascii_str = '[..., 18, 4, 2, 0]'
ucode_str = '[…, 18, 4, 2, 0]'
assert pretty(SeqMul(s5, s6)) == ascii_str
assert upretty(SeqMul(s5, s6)) == ucode_str
# Sequences with symbolic limits, issue 12629
s7 = SeqFormula(a**2, (a, 0, x))
raises(NotImplementedError, lambda: pretty(s7))
raises(NotImplementedError, lambda: upretty(s7))
b = Symbol('b')
s8 = SeqFormula(b*a**2, (a, 0, 2))
ascii_str = '[0, b, 4*b]'
ucode_str = '[0, b, 4⋅b]'
assert pretty(s8) == ascii_str
assert upretty(s8) == ucode_str
def test_pretty_FourierSeries():
f = fourier_series(x, (x, -pi, pi))
ascii_str = \
"""\
2*sin(3*x) \n\
2*sin(x) - sin(2*x) + ---------- + ...\n\
3 \
"""
ucode_str = \
"""\
2⋅sin(3⋅x) \n\
2⋅sin(x) - sin(2⋅x) + ────────── + …\n\
3 \
"""
assert pretty(f) == ascii_str
assert upretty(f) == ucode_str
def test_pretty_FormalPowerSeries():
f = fps(log(1 + x))
ascii_str = \
"""\
oo \n\
____ \n\
\\ ` \n\
\\ -k k \n\
\\ -(-1) *x \n\
/ -----------\n\
/ k \n\
/___, \n\
k = 1 \
"""
ucode_str = \
"""\
∞ \n\
____ \n\
╲ \n\
╲ -k k \n\
╲ -(-1) ⋅x \n\
╱ ───────────\n\
╱ k \n\
╱ \n\
‾‾‾‾ \n\
k = 1 \
"""
assert pretty(f) == ascii_str
assert upretty(f) == ucode_str
def test_pretty_limits():
expr = Limit(x, x, oo)
ascii_str = \
"""\
lim x\n\
x->oo \
"""
ucode_str = \
"""\
lim x\n\
x─→∞ \
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = Limit(x**2, x, 0)
ascii_str = \
"""\
2\n\
lim x \n\
x->0+ \
"""
ucode_str = \
"""\
2\n\
lim x \n\
x─→0⁺ \
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = Limit(1/x, x, 0)
ascii_str = \
"""\
1\n\
lim -\n\
x->0+x\
"""
ucode_str = \
"""\
1\n\
lim ─\n\
x─→0⁺x\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = Limit(sin(x)/x, x, 0)
ascii_str = \
"""\
/sin(x)\\\n\
lim |------|\n\
x->0+\\ x /\
"""
ucode_str = \
"""\
⎛sin(x)⎞\n\
lim ⎜──────⎟\n\
x─→0⁺⎝ x ⎠\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = Limit(sin(x)/x, x, 0, "-")
ascii_str = \
"""\
/sin(x)\\\n\
lim |------|\n\
x->0-\\ x /\
"""
ucode_str = \
"""\
⎛sin(x)⎞\n\
lim ⎜──────⎟\n\
x─→0⁻⎝ x ⎠\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = Limit(x + sin(x), x, 0)
ascii_str = \
"""\
lim (x + sin(x))\n\
x->0+ \
"""
ucode_str = \
"""\
lim (x + sin(x))\n\
x─→0⁺ \
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = Limit(x, x, 0)**2
ascii_str = \
"""\
2\n\
/ lim x\\ \n\
\\x->0+ / \
"""
ucode_str = \
"""\
2\n\
⎛ lim x⎞ \n\
⎝x─→0⁺ ⎠ \
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = Limit(x*Limit(y/2,y,0), x, 0)
ascii_str = \
"""\
/ /y\\\\\n\
lim |x* lim |-||\n\
x->0+\\ y->0+\\2//\
"""
ucode_str = \
"""\
⎛ ⎛y⎞⎞\n\
lim ⎜x⋅ lim ⎜─⎟⎟\n\
x─→0⁺⎝ y─→0⁺⎝2⎠⎠\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = 2*Limit(x*Limit(y/2,y,0), x, 0)
ascii_str = \
"""\
/ /y\\\\\n\
2* lim |x* lim |-||\n\
x->0+\\ y->0+\\2//\
"""
ucode_str = \
"""\
⎛ ⎛y⎞⎞\n\
2⋅ lim ⎜x⋅ lim ⎜─⎟⎟\n\
x─→0⁺⎝ y─→0⁺⎝2⎠⎠\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = Limit(sin(x), x, 0, dir='+-')
ascii_str = \
"""\
lim sin(x)\n\
x->0 \
"""
ucode_str = \
"""\
lim sin(x)\n\
x─→0 \
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
def test_pretty_ComplexRootOf():
expr = rootof(x**5 + 11*x - 2, 0)
ascii_str = \
"""\
/ 5 \\\n\
CRootOf\\x + 11*x - 2, 0/\
"""
ucode_str = \
"""\
⎛ 5 ⎞\n\
CRootOf⎝x + 11⋅x - 2, 0⎠\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
def test_pretty_RootSum():
expr = RootSum(x**5 + 11*x - 2, auto=False)
ascii_str = \
"""\
/ 5 \\\n\
RootSum\\x + 11*x - 2/\
"""
ucode_str = \
"""\
⎛ 5 ⎞\n\
RootSum⎝x + 11⋅x - 2⎠\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = RootSum(x**5 + 11*x - 2, Lambda(z, exp(z)))
ascii_str = \
"""\
/ 5 z\\\n\
RootSum\\x + 11*x - 2, z -> e /\
"""
ucode_str = \
"""\
⎛ 5 z⎞\n\
RootSum⎝x + 11⋅x - 2, z ↦ ℯ ⎠\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
def test_GroebnerBasis():
expr = groebner([], x, y)
ascii_str = \
"""\
GroebnerBasis([], x, y, domain=ZZ, order=lex)\
"""
ucode_str = \
"""\
GroebnerBasis([], x, y, domain=ℤ, order=lex)\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
F = [x**2 - 3*y - x + 1, y**2 - 2*x + y - 1]
expr = groebner(F, x, y, order='grlex')
ascii_str = \
"""\
/[ 2 2 ] \\\n\
GroebnerBasis\\[x - x - 3*y + 1, y - 2*x + y - 1], x, y, domain=ZZ, order=grlex/\
"""
ucode_str = \
"""\
⎛⎡ 2 2 ⎤ ⎞\n\
GroebnerBasis⎝⎣x - x - 3⋅y + 1, y - 2⋅x + y - 1⎦, x, y, domain=ℤ, order=grlex⎠\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = expr.fglm('lex')
ascii_str = \
"""\
/[ 2 4 3 2 ] \\\n\
GroebnerBasis\\[2*x - y - y + 1, y + 2*y - 3*y - 16*y + 7], x, y, domain=ZZ, order=lex/\
"""
ucode_str = \
"""\
⎛⎡ 2 4 3 2 ⎤ ⎞\n\
GroebnerBasis⎝⎣2⋅x - y - y + 1, y + 2⋅y - 3⋅y - 16⋅y + 7⎦, x, y, domain=ℤ, order=lex⎠\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
def test_pretty_UniversalSet():
assert pretty(S.UniversalSet) == "UniversalSet"
assert upretty(S.UniversalSet) == '𝕌'
def test_pretty_Boolean():
expr = Not(x, evaluate=False)
assert pretty(expr) == "Not(x)"
assert upretty(expr) == "¬x"
expr = And(x, y)
assert pretty(expr) == "And(x, y)"
assert upretty(expr) == "x ∧ y"
expr = Or(x, y)
assert pretty(expr) == "Or(x, y)"
assert upretty(expr) == "x ∨ y"
syms = symbols('a:f')
expr = And(*syms)
assert pretty(expr) == "And(a, b, c, d, e, f)"
assert upretty(expr) == "a ∧ b ∧ c ∧ d ∧ e ∧ f"
expr = Or(*syms)
assert pretty(expr) == "Or(a, b, c, d, e, f)"
assert upretty(expr) == "a ∨ b ∨ c ∨ d ∨ e ∨ f"
expr = Xor(x, y, evaluate=False)
assert pretty(expr) == "Xor(x, y)"
assert upretty(expr) == "x ⊻ y"
expr = Nand(x, y, evaluate=False)
assert pretty(expr) == "Nand(x, y)"
assert upretty(expr) == "x ⊼ y"
expr = Nor(x, y, evaluate=False)
assert pretty(expr) == "Nor(x, y)"
assert upretty(expr) == "x ⊽ y"
expr = Implies(x, y, evaluate=False)
assert pretty(expr) == "Implies(x, y)"
assert upretty(expr) == "x → y"
# don't sort args
expr = Implies(y, x, evaluate=False)
assert pretty(expr) == "Implies(y, x)"
assert upretty(expr) == "y → x"
expr = Equivalent(x, y, evaluate=False)
assert pretty(expr) == "Equivalent(x, y)"
assert upretty(expr) == "x ⇔ y"
expr = Equivalent(y, x, evaluate=False)
assert pretty(expr) == "Equivalent(x, y)"
assert upretty(expr) == "x ⇔ y"
def test_pretty_Domain():
expr = FF(23)
assert pretty(expr) == "GF(23)"
assert upretty(expr) == "ℤ₂₃"
expr = ZZ
assert pretty(expr) == "ZZ"
assert upretty(expr) == "ℤ"
expr = QQ
assert pretty(expr) == "QQ"
assert upretty(expr) == "ℚ"
expr = RR
assert pretty(expr) == "RR"
assert upretty(expr) == "ℝ"
expr = QQ[x]
assert pretty(expr) == "QQ[x]"
assert upretty(expr) == "ℚ[x]"
expr = QQ[x, y]
assert pretty(expr) == "QQ[x, y]"
assert upretty(expr) == "ℚ[x, y]"
expr = ZZ.frac_field(x)
assert pretty(expr) == "ZZ(x)"
assert upretty(expr) == "ℤ(x)"
expr = ZZ.frac_field(x, y)
assert pretty(expr) == "ZZ(x, y)"
assert upretty(expr) == "ℤ(x, y)"
expr = QQ.poly_ring(x, y, order=grlex)
assert pretty(expr) == "QQ[x, y, order=grlex]"
assert upretty(expr) == "ℚ[x, y, order=grlex]"
expr = QQ.poly_ring(x, y, order=ilex)
assert pretty(expr) == "QQ[x, y, order=ilex]"
assert upretty(expr) == "ℚ[x, y, order=ilex]"
def test_pretty_prec():
assert xpretty(S("0.3"), full_prec=True, wrap_line=False) == "0.300000000000000"
assert xpretty(S("0.3"), full_prec="auto", wrap_line=False) == "0.300000000000000"
assert xpretty(S("0.3"), full_prec=False, wrap_line=False) == "0.3"
assert xpretty(S("0.3")*x, full_prec=True, use_unicode=False, wrap_line=False) in [
"0.300000000000000*x",
"x*0.300000000000000"
]
assert xpretty(S("0.3")*x, full_prec="auto", use_unicode=False, wrap_line=False) in [
"0.3*x",
"x*0.3"
]
assert xpretty(S("0.3")*x, full_prec=False, use_unicode=False, wrap_line=False) in [
"0.3*x",
"x*0.3"
]
def test_pprint():
import sys
from io import StringIO
fd = StringIO()
sso = sys.stdout
sys.stdout = fd
try:
pprint(pi, use_unicode=False, wrap_line=False)
finally:
sys.stdout = sso
assert fd.getvalue() == 'pi\n'
def test_pretty_class():
"""Test that the printer dispatcher correctly handles classes."""
class C:
pass # C has no .__class__ and this was causing problems
class D:
pass
assert pretty( C ) == str( C )
assert pretty( D ) == str( D )
def test_pretty_no_wrap_line():
huge_expr = 0
for i in range(20):
huge_expr += i*sin(i + x)
assert xpretty(huge_expr, num_columns=80 ).find('\n') != -1
assert xpretty(huge_expr, num_columns=80, wrap_line=False).find('\n') == -1
def test_settings():
raises(TypeError, lambda: pretty(S(4), method="garbage"))
def test_pretty_sum():
from sympy.abc import x, a, b, k, m, n
expr = Sum(k**k, (k, 0, n))
ascii_str = \
"""\
n \n\
___ \n\
\\ ` \n\
\\ k\n\
/ k \n\
/__, \n\
k = 0 \
"""
ucode_str = \
"""\
n \n\
___ \n\
╲ \n\
╲ k\n\
╱ k \n\
╱ \n\
‾‾‾ \n\
k = 0 \
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = Sum(k**k, (k, oo, n))
ascii_str = \
"""\
n \n\
___ \n\
\\ ` \n\
\\ k\n\
/ k \n\
/__, \n\
k = oo \
"""
ucode_str = \
"""\
n \n\
___ \n\
╲ \n\
╲ k\n\
╱ k \n\
╱ \n\
‾‾‾ \n\
k = ∞ \
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = Sum(k**(Integral(x**n, (x, -oo, oo))), (k, 0, n**n))
ascii_str = \
"""\
n \n\
n \n\
______ \n\
\\ ` \n\
\\ oo \n\
\\ / \n\
\\ | \n\
\\ | n \n\
) | x dx\n\
/ | \n\
/ / \n\
/ -oo \n\
/ k \n\
/_____, \n\
k = 0 \
"""
ucode_str = \
"""\
n \n\
n \n\
______ \n\
╲ \n\
╲ \n\
╲ ∞ \n\
╲ ⌠ \n\
╲ ⎮ n \n\
╱ ⎮ x dx\n\
╱ ⌡ \n\
╱ -∞ \n\
╱ k \n\
╱ \n\
‾‾‾‾‾‾ \n\
k = 0 \
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = Sum(k**(
Integral(x**n, (x, -oo, oo))), (k, 0, Integral(x**x, (x, -oo, oo))))
ascii_str = \
"""\
oo \n\
/ \n\
| \n\
| x \n\
| x dx \n\
| \n\
/ \n\
-oo \n\
______ \n\
\\ ` \n\
\\ oo \n\
\\ / \n\
\\ | \n\
\\ | n \n\
) | x dx\n\
/ | \n\
/ / \n\
/ -oo \n\
/ k \n\
/_____, \n\
k = 0 \
"""
ucode_str = \
"""\
∞ \n\
⌠ \n\
⎮ x \n\
⎮ x dx \n\
⌡ \n\
-∞ \n\
______ \n\
╲ \n\
╲ \n\
╲ ∞ \n\
╲ ⌠ \n\
╲ ⎮ n \n\
╱ ⎮ x dx\n\
╱ ⌡ \n\
╱ -∞ \n\
╱ k \n\
╱ \n\
‾‾‾‾‾‾ \n\
k = 0 \
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = Sum(k**(Integral(x**n, (x, -oo, oo))), (
k, x + n + x**2 + n**2 + (x/n) + (1/x), Integral(x**x, (x, -oo, oo))))
ascii_str = \
"""\
oo \n\
/ \n\
| \n\
| x \n\
| x dx \n\
| \n\
/ \n\
-oo \n\
______ \n\
\\ ` \n\
\\ oo \n\
\\ / \n\
\\ | \n\
\\ | n \n\
) | x dx\n\
/ | \n\
/ / \n\
/ -oo \n\
/ k \n\
/_____, \n\
2 2 1 x \n\
k = n + n + x + x + - + - \n\
x n \
"""
ucode_str = \
"""\
∞ \n\
⌠ \n\
⎮ x \n\
⎮ x dx \n\
⌡ \n\
-∞ \n\
______ \n\
╲ \n\
╲ \n\
╲ ∞ \n\
╲ ⌠ \n\
╲ ⎮ n \n\
╱ ⎮ x dx\n\
╱ ⌡ \n\
╱ -∞ \n\
╱ k \n\
╱ \n\
‾‾‾‾‾‾ \n\
2 2 1 x \n\
k = n + n + x + x + ─ + ─ \n\
x n \
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = Sum(k**(
Integral(x**n, (x, -oo, oo))), (k, 0, x + n + x**2 + n**2 + (x/n) + (1/x)))
ascii_str = \
"""\
2 2 1 x \n\
n + n + x + x + - + - \n\
x n \n\
______ \n\
\\ ` \n\
\\ oo \n\
\\ / \n\
\\ | \n\
\\ | n \n\
) | x dx\n\
/ | \n\
/ / \n\
/ -oo \n\
/ k \n\
/_____, \n\
k = 0 \
"""
ucode_str = \
"""\
2 2 1 x \n\
n + n + x + x + ─ + ─ \n\
x n \n\
______ \n\
╲ \n\
╲ \n\
╲ ∞ \n\
╲ ⌠ \n\
╲ ⎮ n \n\
╱ ⎮ x dx\n\
╱ ⌡ \n\
╱ -∞ \n\
╱ k \n\
╱ \n\
‾‾‾‾‾‾ \n\
k = 0 \
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = Sum(x, (x, 0, oo))
ascii_str = \
"""\
oo \n\
__ \n\
\\ ` \n\
) x\n\
/_, \n\
x = 0 \
"""
ucode_str = \
"""\
∞ \n\
___ \n\
╲ \n\
╲ \n\
╱ x\n\
╱ \n\
‾‾‾ \n\
x = 0 \
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = Sum(x**2, (x, 0, oo))
ascii_str = \
"""\
oo \n\
___ \n\
\\ ` \n\
\\ 2\n\
/ x \n\
/__, \n\
x = 0 \
"""
ucode_str = \
"""\
∞ \n\
___ \n\
╲ \n\
╲ 2\n\
╱ x \n\
╱ \n\
‾‾‾ \n\
x = 0 \
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = Sum(x/2, (x, 0, oo))
ascii_str = \
"""\
oo \n\
___ \n\
\\ ` \n\
\\ x\n\
) -\n\
/ 2\n\
/__, \n\
x = 0 \
"""
ucode_str = \
"""\
∞ \n\
____ \n\
╲ \n\
╲ \n\
╲ x\n\
╱ ─\n\
╱ 2\n\
╱ \n\
‾‾‾‾ \n\
x = 0 \
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = Sum(x**3/2, (x, 0, oo))
ascii_str = \
"""\
oo \n\
____ \n\
\\ ` \n\
\\ 3\n\
\\ x \n\
/ --\n\
/ 2 \n\
/___, \n\
x = 0 \
"""
ucode_str = \
"""\
∞ \n\
____ \n\
╲ \n\
╲ 3\n\
╲ x \n\
╱ ──\n\
╱ 2 \n\
╱ \n\
‾‾‾‾ \n\
x = 0 \
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = Sum((x**3*y**(x/2))**n, (x, 0, oo))
ascii_str = \
"""\
oo \n\
____ \n\
\\ ` \n\
\\ n\n\
\\ / x\\ \n\
) | -| \n\
/ | 3 2| \n\
/ \\x *y / \n\
/___, \n\
x = 0 \
"""
ucode_str = \
"""\
∞ \n\
_____ \n\
╲ \n\
╲ \n\
╲ n\n\
╲ ⎛ x⎞ \n\
╱ ⎜ ─⎟ \n\
╱ ⎜ 3 2⎟ \n\
╱ ⎝x ⋅y ⎠ \n\
╱ \n\
‾‾‾‾‾ \n\
x = 0 \
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = Sum(1/x**2, (x, 0, oo))
ascii_str = \
"""\
oo \n\
____ \n\
\\ ` \n\
\\ 1 \n\
\\ --\n\
/ 2\n\
/ x \n\
/___, \n\
x = 0 \
"""
ucode_str = \
"""\
∞ \n\
____ \n\
╲ \n\
╲ 1 \n\
╲ ──\n\
╱ 2\n\
╱ x \n\
╱ \n\
‾‾‾‾ \n\
x = 0 \
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = Sum(1/y**(a/b), (x, 0, oo))
ascii_str = \
"""\
oo \n\
____ \n\
\\ ` \n\
\\ -a \n\
\\ ---\n\
/ b \n\
/ y \n\
/___, \n\
x = 0 \
"""
ucode_str = \
"""\
∞ \n\
____ \n\
╲ \n\
╲ -a \n\
╲ ───\n\
╱ b \n\
╱ y \n\
╱ \n\
‾‾‾‾ \n\
x = 0 \
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = Sum(1/y**(a/b), (x, 0, oo), (y, 1, 2))
ascii_str = \
"""\
2 oo \n\
____ ____ \n\
\\ ` \\ ` \n\
\\ \\ -a\n\
\\ \\ --\n\
/ / b \n\
/ / y \n\
/___, /___, \n\
y = 1 x = 0 \
"""
ucode_str = \
"""\
2 ∞ \n\
____ ____ \n\
╲ ╲ \n\
╲ ╲ -a\n\
╲ ╲ ──\n\
╱ ╱ b \n\
╱ ╱ y \n\
╱ ╱ \n\
‾‾‾‾ ‾‾‾‾ \n\
y = 1 x = 0 \
"""
expr = Sum(1/(1 + 1/(
1 + 1/k)) + 1, (k, 111, 1 + 1/n), (k, 1/(1 + m), oo)) + 1/(1 + 1/k)
ascii_str = \
"""\
1 \n\
1 + - \n\
oo n \n\
_____ _____ \n\
\\ ` \\ ` \n\
\\ \\ / 1 \\ \n\
\\ \\ |1 + ---------| \n\
\\ \\ | 1 | 1 \n\
) ) | 1 + -----| + -----\n\
/ / | 1| 1\n\
/ / | 1 + -| 1 + -\n\
/ / \\ k/ k\n\
/____, /____, \n\
1 k = 111 \n\
k = ----- \n\
m + 1 \
"""
ucode_str = \
"""\
1 \n\
1 + ─ \n\
∞ n \n\
______ ______ \n\
╲ ╲ \n\
╲ ╲ \n\
╲ ╲ ⎛ 1 ⎞ \n\
╲ ╲ ⎜1 + ─────────⎟ \n\
╲ ╲ ⎜ 1 ⎟ 1 \n\
╱ ╱ ⎜ 1 + ─────⎟ + ─────\n\
╱ ╱ ⎜ 1⎟ 1\n\
╱ ╱ ⎜ 1 + ─⎟ 1 + ─\n\
╱ ╱ ⎝ k⎠ k\n\
╱ ╱ \n\
‾‾‾‾‾‾ ‾‾‾‾‾‾ \n\
1 k = 111 \n\
k = ───── \n\
m + 1 \
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
def test_units():
expr = joule
ascii_str1 = \
"""\
2\n\
kilogram*meter \n\
---------------\n\
2 \n\
second \
"""
unicode_str1 = \
"""\
2\n\
kilogram⋅meter \n\
───────────────\n\
2 \n\
second \
"""
ascii_str2 = \
"""\
2\n\
3*x*y*kilogram*meter \n\
---------------------\n\
2 \n\
second \
"""
unicode_str2 = \
"""\
2\n\
3⋅x⋅y⋅kilogram⋅meter \n\
─────────────────────\n\
2 \n\
second \
"""
from sympy.physics.units import kg, m, s
assert upretty(expr) == "joule"
assert pretty(expr) == "joule"
assert upretty(expr.convert_to(kg*m**2/s**2)) == unicode_str1
assert pretty(expr.convert_to(kg*m**2/s**2)) == ascii_str1
assert upretty(3*kg*x*m**2*y/s**2) == unicode_str2
assert pretty(3*kg*x*m**2*y/s**2) == ascii_str2
def test_pretty_Subs():
f = Function('f')
expr = Subs(f(x), x, ph**2)
ascii_str = \
"""\
(f(x))| 2\n\
|x=phi \
"""
unicode_str = \
"""\
(f(x))│ 2\n\
│x=φ \
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == unicode_str
expr = Subs(f(x).diff(x), x, 0)
ascii_str = \
"""\
/d \\| \n\
|--(f(x))|| \n\
\\dx /|x=0\
"""
unicode_str = \
"""\
⎛d ⎞│ \n\
⎜──(f(x))⎟│ \n\
⎝dx ⎠│x=0\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == unicode_str
expr = Subs(f(x).diff(x)/y, (x, y), (0, Rational(1, 2)))
ascii_str = \
"""\
/d \\| \n\
|--(f(x))|| \n\
|dx || \n\
|--------|| \n\
\\ y /|x=0, y=1/2\
"""
unicode_str = \
"""\
⎛d ⎞│ \n\
⎜──(f(x))⎟│ \n\
⎜dx ⎟│ \n\
⎜────────⎟│ \n\
⎝ y ⎠│x=0, y=1/2\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == unicode_str
def test_gammas():
assert upretty(lowergamma(x, y)) == "γ(x, y)"
assert upretty(uppergamma(x, y)) == "Γ(x, y)"
assert xpretty(gamma(x), use_unicode=True) == 'Γ(x)'
assert xpretty(gamma, use_unicode=True) == 'Γ'
assert xpretty(symbols('gamma', cls=Function)(x), use_unicode=True) == 'γ(x)'
assert xpretty(symbols('gamma', cls=Function), use_unicode=True) == 'γ'
def test_beta():
assert xpretty(beta(x,y), use_unicode=True) == 'Β(x, y)'
assert xpretty(beta(x,y), use_unicode=False) == 'B(x, y)'
assert xpretty(beta, use_unicode=True) == 'Β'
assert xpretty(beta, use_unicode=False) == 'B'
mybeta = Function('beta')
assert xpretty(mybeta(x), use_unicode=True) == 'β(x)'
assert xpretty(mybeta(x, y, z), use_unicode=False) == 'beta(x, y, z)'
assert xpretty(mybeta, use_unicode=True) == 'β'
# test that notation passes to subclasses of the same name only
def test_function_subclass_different_name():
class mygamma(gamma):
pass
assert xpretty(mygamma, use_unicode=True) == r"mygamma"
assert xpretty(mygamma(x), use_unicode=True) == r"mygamma(x)"
def test_SingularityFunction():
assert xpretty(SingularityFunction(x, 0, n), use_unicode=True) == (
"""\
n\n\
<x> \
""")
assert xpretty(SingularityFunction(x, 1, n), use_unicode=True) == (
"""\
n\n\
<x - 1> \
""")
assert xpretty(SingularityFunction(x, -1, n), use_unicode=True) == (
"""\
n\n\
<x + 1> \
""")
assert xpretty(SingularityFunction(x, a, n), use_unicode=True) == (
"""\
n\n\
<-a + x> \
""")
assert xpretty(SingularityFunction(x, y, n), use_unicode=True) == (
"""\
n\n\
<x - y> \
""")
assert xpretty(SingularityFunction(x, 0, n), use_unicode=False) == (
"""\
n\n\
<x> \
""")
assert xpretty(SingularityFunction(x, 1, n), use_unicode=False) == (
"""\
n\n\
<x - 1> \
""")
assert xpretty(SingularityFunction(x, -1, n), use_unicode=False) == (
"""\
n\n\
<x + 1> \
""")
assert xpretty(SingularityFunction(x, a, n), use_unicode=False) == (
"""\
n\n\
<-a + x> \
""")
assert xpretty(SingularityFunction(x, y, n), use_unicode=False) == (
"""\
n\n\
<x - y> \
""")
def test_deltas():
assert xpretty(DiracDelta(x), use_unicode=True) == 'δ(x)'
assert xpretty(DiracDelta(x, 1), use_unicode=True) == \
"""\
(1) \n\
δ (x)\
"""
assert xpretty(x*DiracDelta(x, 1), use_unicode=True) == \
"""\
(1) \n\
x⋅δ (x)\
"""
def test_hyper():
expr = hyper((), (), z)
ucode_str = \
"""\
┌─ ⎛ │ ⎞\n\
├─ ⎜ │ z⎟\n\
0╵ 0 ⎝ │ ⎠\
"""
ascii_str = \
"""\
_ \n\
|_ / | \\\n\
| | | z|\n\
0 0 \\ | /\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = hyper((), (1,), x)
ucode_str = \
"""\
┌─ ⎛ │ ⎞\n\
├─ ⎜ │ x⎟\n\
0╵ 1 ⎝1 │ ⎠\
"""
ascii_str = \
"""\
_ \n\
|_ / | \\\n\
| | | x|\n\
0 1 \\1 | /\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = hyper([2], [1], x)
ucode_str = \
"""\
┌─ ⎛2 │ ⎞\n\
├─ ⎜ │ x⎟\n\
1╵ 1 ⎝1 │ ⎠\
"""
ascii_str = \
"""\
_ \n\
|_ /2 | \\\n\
| | | x|\n\
1 1 \\1 | /\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = hyper((pi/3, -2*k), (3, 4, 5, -3), x)
ucode_str = \
"""\
⎛ π │ ⎞\n\
┌─ ⎜ ─, -2⋅k │ ⎟\n\
├─ ⎜ 3 │ x⎟\n\
2╵ 4 ⎜ │ ⎟\n\
⎝-3, 3, 4, 5 │ ⎠\
"""
ascii_str = \
"""\
\n\
_ / pi | \\\n\
|_ | --, -2*k | |\n\
| | 3 | x|\n\
2 4 | | |\n\
\\-3, 3, 4, 5 | /\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = hyper((pi, S('2/3'), -2*k), (3, 4, 5, -3), x**2)
ucode_str = \
"""\
┌─ ⎛2/3, π, -2⋅k │ 2⎞\n\
├─ ⎜ │ x ⎟\n\
3╵ 4 ⎝-3, 3, 4, 5 │ ⎠\
"""
ascii_str = \
"""\
_ \n\
|_ /2/3, pi, -2*k | 2\\
| | | x |
3 4 \\ -3, 3, 4, 5 | /"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = hyper([1, 2], [3, 4], 1/(1/(1/(1/x + 1) + 1) + 1))
ucode_str = \
"""\
⎛ │ 1 ⎞\n\
⎜ │ ─────────────⎟\n\
⎜ │ 1 ⎟\n\
┌─ ⎜1, 2 │ 1 + ─────────⎟\n\
├─ ⎜ │ 1 ⎟\n\
2╵ 2 ⎜3, 4 │ 1 + ─────⎟\n\
⎜ │ 1⎟\n\
⎜ │ 1 + ─⎟\n\
⎝ │ x⎠\
"""
ascii_str = \
"""\
\n\
/ | 1 \\\n\
| | -------------|\n\
_ | | 1 |\n\
|_ |1, 2 | 1 + ---------|\n\
| | | 1 |\n\
2 2 |3, 4 | 1 + -----|\n\
| | 1|\n\
| | 1 + -|\n\
\\ | x/\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
def test_meijerg():
expr = meijerg([pi, pi, x], [1], [0, 1], [1, 2, 3], z)
ucode_str = \
"""\
╭─╮2, 3 ⎛π, π, x 1 │ ⎞\n\
│╶┐ ⎜ │ z⎟\n\
╰─╯4, 5 ⎝ 0, 1 1, 2, 3 │ ⎠\
"""
ascii_str = \
"""\
__2, 3 /pi, pi, x 1 | \\\n\
/__ | | z|\n\
\\_|4, 5 \\ 0, 1 1, 2, 3 | /\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = meijerg([1, pi/7], [2, pi, 5], [], [], z**2)
ucode_str = \
"""\
⎛ π │ ⎞\n\
╭─╮0, 2 ⎜1, ─ 2, 5, π │ 2⎟\n\
│╶┐ ⎜ 7 │ z ⎟\n\
╰─╯5, 0 ⎜ │ ⎟\n\
⎝ │ ⎠\
"""
ascii_str = \
"""\
/ pi | \\\n\
__0, 2 |1, -- 2, 5, pi | 2|\n\
/__ | 7 | z |\n\
\\_|5, 0 | | |\n\
\\ | /\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
ucode_str = \
"""\
╭─╮ 1, 10 ⎛1, 1, 1, 1, 1, 1, 1, 1, 1, 1 1 │ ⎞\n\
│╶┐ ⎜ │ z⎟\n\
╰─╯11, 2 ⎝ 1 1 │ ⎠\
"""
ascii_str = \
"""\
__ 1, 10 /1, 1, 1, 1, 1, 1, 1, 1, 1, 1 1 | \\\n\
/__ | | z|\n\
\\_|11, 2 \\ 1 1 | /\
"""
expr = meijerg([1]*10, [1], [1], [1], z)
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = meijerg([1, 2, ], [4, 3], [3], [4, 5], 1/(1/(1/(1/x + 1) + 1) + 1))
ucode_str = \
"""\
⎛ │ 1 ⎞\n\
⎜ │ ─────────────⎟\n\
⎜ │ 1 ⎟\n\
╭─╮1, 2 ⎜1, 2 3, 4 │ 1 + ─────────⎟\n\
│╶┐ ⎜ │ 1 ⎟\n\
╰─╯4, 3 ⎜ 3 4, 5 │ 1 + ─────⎟\n\
⎜ │ 1⎟\n\
⎜ │ 1 + ─⎟\n\
⎝ │ x⎠\
"""
ascii_str = \
"""\
/ | 1 \\\n\
| | -------------|\n\
| | 1 |\n\
__1, 2 |1, 2 3, 4 | 1 + ---------|\n\
/__ | | 1 |\n\
\\_|4, 3 | 3 4, 5 | 1 + -----|\n\
| | 1|\n\
| | 1 + -|\n\
\\ | x/\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = Integral(expr, x)
ucode_str = \
"""\
⌠ \n\
⎮ ⎛ │ 1 ⎞ \n\
⎮ ⎜ │ ─────────────⎟ \n\
⎮ ⎜ │ 1 ⎟ \n\
⎮ ╭─╮1, 2 ⎜1, 2 3, 4 │ 1 + ─────────⎟ \n\
⎮ │╶┐ ⎜ │ 1 ⎟ dx\n\
⎮ ╰─╯4, 3 ⎜ 3 4, 5 │ 1 + ─────⎟ \n\
⎮ ⎜ │ 1⎟ \n\
⎮ ⎜ │ 1 + ─⎟ \n\
⎮ ⎝ │ x⎠ \n\
⌡ \
"""
ascii_str = \
"""\
/ \n\
| \n\
| / | 1 \\ \n\
| | | -------------| \n\
| | | 1 | \n\
| __1, 2 |1, 2 3, 4 | 1 + ---------| \n\
| /__ | | 1 | dx\n\
| \\_|4, 3 | 3 4, 5 | 1 + -----| \n\
| | | 1| \n\
| | | 1 + -| \n\
| \\ | x/ \n\
| \n\
/ \
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
def test_noncommutative():
A, B, C = symbols('A,B,C', commutative=False)
expr = A*B*C**-1
ascii_str = \
"""\
-1\n\
A*B*C \
"""
ucode_str = \
"""\
-1\n\
A⋅B⋅C \
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = C**-1*A*B
ascii_str = \
"""\
-1 \n\
C *A*B\
"""
ucode_str = \
"""\
-1 \n\
C ⋅A⋅B\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = A*C**-1*B
ascii_str = \
"""\
-1 \n\
A*C *B\
"""
ucode_str = \
"""\
-1 \n\
A⋅C ⋅B\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = A*C**-1*B/x
ascii_str = \
"""\
-1 \n\
A*C *B\n\
-------\n\
x \
"""
ucode_str = \
"""\
-1 \n\
A⋅C ⋅B\n\
───────\n\
x \
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
def test_pretty_special_functions():
x, y = symbols("x y")
# atan2
expr = atan2(y/sqrt(200), sqrt(x))
ascii_str = \
"""\
/ ___ \\\n\
|\\/ 2 *y ___|\n\
atan2|-------, \\/ x |\n\
\\ 20 /\
"""
ucode_str = \
"""\
⎛√2⋅y ⎞\n\
atan2⎜────, √x⎟\n\
⎝ 20 ⎠\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
def test_pretty_geometry():
e = Segment((0, 1), (0, 2))
assert pretty(e) == 'Segment2D(Point2D(0, 1), Point2D(0, 2))'
e = Ray((1, 1), angle=4.02*pi)
assert pretty(e) == 'Ray2D(Point2D(1, 1), Point2D(2, tan(pi/50) + 1))'
def test_expint():
expr = Ei(x)
string = 'Ei(x)'
assert pretty(expr) == string
assert upretty(expr) == string
expr = expint(1, z)
ucode_str = "E₁(z)"
ascii_str = "expint(1, z)"
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
assert pretty(Shi(x)) == 'Shi(x)'
assert pretty(Si(x)) == 'Si(x)'
assert pretty(Ci(x)) == 'Ci(x)'
assert pretty(Chi(x)) == 'Chi(x)'
assert upretty(Shi(x)) == 'Shi(x)'
assert upretty(Si(x)) == 'Si(x)'
assert upretty(Ci(x)) == 'Ci(x)'
assert upretty(Chi(x)) == 'Chi(x)'
def test_elliptic_functions():
ascii_str = \
"""\
/ 1 \\\n\
K|-----|\n\
\\z + 1/\
"""
ucode_str = \
"""\
⎛ 1 ⎞\n\
K⎜─────⎟\n\
⎝z + 1⎠\
"""
expr = elliptic_k(1/(z + 1))
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
ascii_str = \
"""\
/ | 1 \\\n\
F|1|-----|\n\
\\ |z + 1/\
"""
ucode_str = \
"""\
⎛ │ 1 ⎞\n\
F⎜1│─────⎟\n\
⎝ │z + 1⎠\
"""
expr = elliptic_f(1, 1/(1 + z))
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
ascii_str = \
"""\
/ 1 \\\n\
E|-----|\n\
\\z + 1/\
"""
ucode_str = \
"""\
⎛ 1 ⎞\n\
E⎜─────⎟\n\
⎝z + 1⎠\
"""
expr = elliptic_e(1/(z + 1))
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
ascii_str = \
"""\
/ | 1 \\\n\
E|1|-----|\n\
\\ |z + 1/\
"""
ucode_str = \
"""\
⎛ │ 1 ⎞\n\
E⎜1│─────⎟\n\
⎝ │z + 1⎠\
"""
expr = elliptic_e(1, 1/(1 + z))
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
ascii_str = \
"""\
/ |4\\\n\
Pi|3|-|\n\
\\ |x/\
"""
ucode_str = \
"""\
⎛ │4⎞\n\
Π⎜3│─⎟\n\
⎝ │x⎠\
"""
expr = elliptic_pi(3, 4/x)
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
ascii_str = \
"""\
/ 4| \\\n\
Pi|3; -|6|\n\
\\ x| /\
"""
ucode_str = \
"""\
⎛ 4│ ⎞\n\
Π⎜3; ─│6⎟\n\
⎝ x│ ⎠\
"""
expr = elliptic_pi(3, 4/x, 6)
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
def test_RandomDomain():
from sympy.stats import Normal, Die, Exponential, pspace, where
X = Normal('x1', 0, 1)
assert upretty(where(X > 0)) == "Domain: 0 < x₁ ∧ x₁ < ∞"
D = Die('d1', 6)
assert upretty(where(D > 4)) == 'Domain: d₁ = 5 ∨ d₁ = 6'
A = Exponential('a', 1)
B = Exponential('b', 1)
assert upretty(pspace(Tuple(A, B)).domain) == \
'Domain: 0 ≤ a ∧ 0 ≤ b ∧ a < ∞ ∧ b < ∞'
def test_PrettyPoly():
F = QQ.frac_field(x, y)
R = QQ.poly_ring(x, y)
expr = F.convert(x/(x + y))
assert pretty(expr) == "x/(x + y)"
assert upretty(expr) == "x/(x + y)"
expr = R.convert(x + y)
assert pretty(expr) == "x + y"
assert upretty(expr) == "x + y"
def test_issue_6285():
assert pretty(Pow(2, -5, evaluate=False)) == '1 \n--\n 5\n2 '
assert pretty(Pow(x, (1/pi))) == \
' 1 \n'\
' --\n'\
' pi\n'\
'x '
def test_issue_6359():
assert pretty(Integral(x**2, x)**2) == \
"""\
2
/ / \\ \n\
| | | \n\
| | 2 | \n\
| | x dx| \n\
| | | \n\
\\/ / \
"""
assert upretty(Integral(x**2, x)**2) == \
"""\
2
⎛⌠ ⎞ \n\
⎜⎮ 2 ⎟ \n\
⎜⎮ x dx⎟ \n\
⎝⌡ ⎠ \
"""
assert pretty(Sum(x**2, (x, 0, 1))**2) == \
"""\
2\n\
/ 1 \\ \n\
|___ | \n\
|\\ ` | \n\
| \\ 2| \n\
| / x | \n\
|/__, | \n\
\\x = 0 / \
"""
assert upretty(Sum(x**2, (x, 0, 1))**2) == \
"""\
2
⎛ 1 ⎞ \n\
⎜ ___ ⎟ \n\
⎜ ╲ ⎟ \n\
⎜ ╲ 2⎟ \n\
⎜ ╱ x ⎟ \n\
⎜ ╱ ⎟ \n\
⎜ ‾‾‾ ⎟ \n\
⎝x = 0 ⎠ \
"""
assert pretty(Product(x**2, (x, 1, 2))**2) == \
"""\
2
/ 2 \\ \n\
|______ | \n\
| | | 2| \n\
| | | x | \n\
| | | | \n\
\\x = 1 / \
"""
assert upretty(Product(x**2, (x, 1, 2))**2) == \
"""\
2
⎛ 2 ⎞ \n\
⎜─┬──┬─ ⎟ \n\
⎜ │ │ 2⎟ \n\
⎜ │ │ x ⎟ \n\
⎜ │ │ ⎟ \n\
⎝x = 1 ⎠ \
"""
f = Function('f')
assert pretty(Derivative(f(x), x)**2) == \
"""\
2
/d \\ \n\
|--(f(x))| \n\
\\dx / \
"""
assert upretty(Derivative(f(x), x)**2) == \
"""\
2
⎛d ⎞ \n\
⎜──(f(x))⎟ \n\
⎝dx ⎠ \
"""
def test_issue_6739():
ascii_str = \
"""\
1 \n\
-----\n\
___\n\
\\/ x \
"""
ucode_str = \
"""\
1 \n\
──\n\
√x\
"""
assert pretty(1/sqrt(x)) == ascii_str
assert upretty(1/sqrt(x)) == ucode_str
def test_complicated_symbol_unchanged():
for symb_name in ["dexpr2_d1tau", "dexpr2^d1tau"]:
assert pretty(Symbol(symb_name)) == symb_name
def test_categories():
from sympy.categories import (Object, IdentityMorphism,
NamedMorphism, Category, Diagram, DiagramGrid)
A1 = Object("A1")
A2 = Object("A2")
A3 = Object("A3")
f1 = NamedMorphism(A1, A2, "f1")
f2 = NamedMorphism(A2, A3, "f2")
id_A1 = IdentityMorphism(A1)
K1 = Category("K1")
assert pretty(A1) == "A1"
assert upretty(A1) == "A₁"
assert pretty(f1) == "f1:A1-->A2"
assert upretty(f1) == "f₁:A₁——▶A₂"
assert pretty(id_A1) == "id:A1-->A1"
assert upretty(id_A1) == "id:A₁——▶A₁"
assert pretty(f2*f1) == "f2*f1:A1-->A3"
assert upretty(f2*f1) == "f₂∘f₁:A₁——▶A₃"
assert pretty(K1) == "K1"
assert upretty(K1) == "K₁"
# Test how diagrams are printed.
d = Diagram()
assert pretty(d) == "EmptySet"
assert upretty(d) == "∅"
d = Diagram({f1: "unique", f2: S.EmptySet})
assert pretty(d) == "{f2*f1:A1-->A3: EmptySet, id:A1-->A1: " \
"EmptySet, id:A2-->A2: EmptySet, id:A3-->A3: " \
"EmptySet, f1:A1-->A2: {unique}, f2:A2-->A3: EmptySet}"
assert upretty(d) == "{f₂∘f₁:A₁——▶A₃: ∅, id:A₁——▶A₁: ∅, " \
"id:A₂——▶A₂: ∅, id:A₃——▶A₃: ∅, f₁:A₁——▶A₂: {unique}, f₂:A₂——▶A₃: ∅}"
d = Diagram({f1: "unique", f2: S.EmptySet}, {f2 * f1: "unique"})
assert pretty(d) == "{f2*f1:A1-->A3: EmptySet, id:A1-->A1: " \
"EmptySet, id:A2-->A2: EmptySet, id:A3-->A3: " \
"EmptySet, f1:A1-->A2: {unique}, f2:A2-->A3: EmptySet}" \
" ==> {f2*f1:A1-->A3: {unique}}"
assert upretty(d) == "{f₂∘f₁:A₁——▶A₃: ∅, id:A₁——▶A₁: ∅, id:A₂——▶A₂: " \
"∅, id:A₃——▶A₃: ∅, f₁:A₁——▶A₂: {unique}, f₂:A₂——▶A₃: ∅}" \
" ══▶ {f₂∘f₁:A₁——▶A₃: {unique}}"
grid = DiagramGrid(d)
assert pretty(grid) == "A1 A2\n \nA3 "
assert upretty(grid) == "A₁ A₂\n \nA₃ "
def test_PrettyModules():
R = QQ.old_poly_ring(x, y)
F = R.free_module(2)
M = F.submodule([x, y], [1, x**2])
ucode_str = \
"""\
2\n\
ℚ[x, y] \
"""
ascii_str = \
"""\
2\n\
QQ[x, y] \
"""
assert upretty(F) == ucode_str
assert pretty(F) == ascii_str
ucode_str = \
"""\
╱ ⎡ 2⎤╲\n\
╲[x, y], ⎣1, x ⎦╱\
"""
ascii_str = \
"""\
2 \n\
<[x, y], [1, x ]>\
"""
assert upretty(M) == ucode_str
assert pretty(M) == ascii_str
I = R.ideal(x**2, y)
ucode_str = \
"""\
╱ 2 ╲\n\
╲x , y╱\
"""
ascii_str = \
"""\
2 \n\
<x , y>\
"""
assert upretty(I) == ucode_str
assert pretty(I) == ascii_str
Q = F / M
ucode_str = \
"""\
2 \n\
ℚ[x, y] \n\
─────────────────\n\
╱ ⎡ 2⎤╲\n\
╲[x, y], ⎣1, x ⎦╱\
"""
ascii_str = \
"""\
2 \n\
QQ[x, y] \n\
-----------------\n\
2 \n\
<[x, y], [1, x ]>\
"""
assert upretty(Q) == ucode_str
assert pretty(Q) == ascii_str
ucode_str = \
"""\
╱⎡ 3⎤ ╲\n\
│⎢ x ⎥ ╱ ⎡ 2⎤╲ ╱ ⎡ 2⎤╲│\n\
│⎢1, ──⎥ + ╲[x, y], ⎣1, x ⎦╱, [2, y] + ╲[x, y], ⎣1, x ⎦╱│\n\
╲⎣ 2 ⎦ ╱\
"""
ascii_str = \
"""\
3 \n\
x 2 2 \n\
<[1, --] + <[x, y], [1, x ]>, [2, y] + <[x, y], [1, x ]>>\n\
2 \
"""
def test_QuotientRing():
R = QQ.old_poly_ring(x)/[x**2 + 1]
ucode_str = \
"""\
ℚ[x] \n\
────────\n\
╱ 2 ╲\n\
╲x + 1╱\
"""
ascii_str = \
"""\
QQ[x] \n\
--------\n\
2 \n\
<x + 1>\
"""
assert upretty(R) == ucode_str
assert pretty(R) == ascii_str
ucode_str = \
"""\
╱ 2 ╲\n\
1 + ╲x + 1╱\
"""
ascii_str = \
"""\
2 \n\
1 + <x + 1>\
"""
assert upretty(R.one) == ucode_str
assert pretty(R.one) == ascii_str
def test_Homomorphism():
from sympy.polys.agca import homomorphism
R = QQ.old_poly_ring(x)
expr = homomorphism(R.free_module(1), R.free_module(1), [0])
ucode_str = \
"""\
1 1\n\
[0] : ℚ[x] ──> ℚ[x] \
"""
ascii_str = \
"""\
1 1\n\
[0] : QQ[x] --> QQ[x] \
"""
assert upretty(expr) == ucode_str
assert pretty(expr) == ascii_str
expr = homomorphism(R.free_module(2), R.free_module(2), [0, 0])
ucode_str = \
"""\
⎡0 0⎤ 2 2\n\
⎢ ⎥ : ℚ[x] ──> ℚ[x] \n\
⎣0 0⎦ \
"""
ascii_str = \
"""\
[0 0] 2 2\n\
[ ] : QQ[x] --> QQ[x] \n\
[0 0] \
"""
assert upretty(expr) == ucode_str
assert pretty(expr) == ascii_str
expr = homomorphism(R.free_module(1), R.free_module(1) / [[x]], [0])
ucode_str = \
"""\
1\n\
1 ℚ[x] \n\
[0] : ℚ[x] ──> ─────\n\
<[x]>\
"""
ascii_str = \
"""\
1\n\
1 QQ[x] \n\
[0] : QQ[x] --> ------\n\
<[x]> \
"""
assert upretty(expr) == ucode_str
assert pretty(expr) == ascii_str
def test_Tr():
A, B = symbols('A B', commutative=False)
t = Tr(A*B)
assert pretty(t) == r'Tr(A*B)'
assert upretty(t) == 'Tr(A⋅B)'
def test_pretty_Add():
eq = Mul(-2, x - 2, evaluate=False) + 5
assert pretty(eq) == '5 - 2*(x - 2)'
def test_issue_7179():
assert upretty(Not(Equivalent(x, y))) == 'x ⇎ y'
assert upretty(Not(Implies(x, y))) == 'x ↛ y'
def test_issue_7180():
assert upretty(Equivalent(x, y)) == 'x ⇔ y'
def test_pretty_Complement():
assert pretty(S.Reals - S.Naturals) == '(-oo, oo) \\ Naturals'
assert upretty(S.Reals - S.Naturals) == 'ℝ \\ ℕ'
assert pretty(S.Reals - S.Naturals0) == '(-oo, oo) \\ Naturals0'
assert upretty(S.Reals - S.Naturals0) == 'ℝ \\ ℕ₀'
def test_pretty_SymmetricDifference():
from sympy.sets.sets import SymmetricDifference
assert upretty(SymmetricDifference(Interval(2,3), Interval(3,5), \
evaluate = False)) == '[2, 3] ∆ [3, 5]'
with raises(NotImplementedError):
pretty(SymmetricDifference(Interval(2,3), Interval(3,5), evaluate = False))
def test_pretty_Contains():
assert pretty(Contains(x, S.Integers)) == 'Contains(x, Integers)'
assert upretty(Contains(x, S.Integers)) == 'x ∈ ℤ'
def test_issue_8292():
from sympy.core import sympify
e = sympify('((x+x**4)/(x-1))-(2*(x-1)**4/(x-1)**4)', evaluate=False)
ucode_str = \
"""\
4 4 \n\
2⋅(x - 1) x + x\n\
- ────────── + ──────\n\
4 x - 1 \n\
(x - 1) \
"""
ascii_str = \
"""\
4 4 \n\
2*(x - 1) x + x\n\
- ---------- + ------\n\
4 x - 1 \n\
(x - 1) \
"""
assert pretty(e) == ascii_str
assert upretty(e) == ucode_str
def test_issue_4335():
y = Function('y')
expr = -y(x).diff(x)
ucode_str = \
"""\
d \n\
-──(y(x))\n\
dx \
"""
ascii_str = \
"""\
d \n\
- --(y(x))\n\
dx \
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
def test_issue_8344():
from sympy.core import sympify
e = sympify('2*x*y**2/1**2 + 1', evaluate=False)
ucode_str = \
"""\
2 \n\
2⋅x⋅y \n\
────── + 1\n\
2 \n\
1 \
"""
assert upretty(e) == ucode_str
def test_issue_6324():
x = Pow(2, 3, evaluate=False)
y = Pow(10, -2, evaluate=False)
e = Mul(x, y, evaluate=False)
ucode_str = \
"""\
3 \n\
2 \n\
───\n\
2\n\
10 \
"""
assert upretty(e) == ucode_str
def test_issue_7927():
e = sin(x/2)**cos(x/2)
ucode_str = \
"""\
⎛x⎞\n\
cos⎜─⎟\n\
⎝2⎠\n\
⎛ ⎛x⎞⎞ \n\
⎜sin⎜─⎟⎟ \n\
⎝ ⎝2⎠⎠ \
"""
assert upretty(e) == ucode_str
e = sin(x)**(S(11)/13)
ucode_str = \
"""\
11\n\
──\n\
13\n\
(sin(x)) \
"""
assert upretty(e) == ucode_str
def test_issue_6134():
from sympy.abc import lamda, t
phi = Function('phi')
e = lamda*x*Integral(phi(t)*pi*sin(pi*t), (t, 0, 1)) + lamda*x**2*Integral(phi(t)*2*pi*sin(2*pi*t), (t, 0, 1))
ucode_str = \
"""\
1 1 \n\
2 ⌠ ⌠ \n\
λ⋅x ⋅⎮ 2⋅π⋅φ(t)⋅sin(2⋅π⋅t) dt + λ⋅x⋅⎮ π⋅φ(t)⋅sin(π⋅t) dt\n\
⌡ ⌡ \n\
0 0 \
"""
assert upretty(e) == ucode_str
def test_issue_9877():
ucode_str1 = '(2, 3) ∪ ([1, 2] \\ {x})'
a, b, c = Interval(2, 3, True, True), Interval(1, 2), FiniteSet(x)
assert upretty(Union(a, Complement(b, c))) == ucode_str1
ucode_str2 = '{x} ∩ {y} ∩ ({z} \\ [1, 2])'
d, e, f, g = FiniteSet(x), FiniteSet(y), FiniteSet(z), Interval(1, 2)
assert upretty(Intersection(d, e, Complement(f, g))) == ucode_str2
def test_issue_13651():
expr1 = c + Mul(-1, a + b, evaluate=False)
assert pretty(expr1) == 'c - (a + b)'
expr2 = c + Mul(-1, a - b + d, evaluate=False)
assert pretty(expr2) == 'c - (a - b + d)'
def test_pretty_primenu():
from sympy.functions.combinatorial.numbers import primenu
ascii_str1 = "nu(n)"
ucode_str1 = "ν(n)"
n = symbols('n', integer=True)
assert pretty(primenu(n)) == ascii_str1
assert upretty(primenu(n)) == ucode_str1
def test_pretty_primeomega():
from sympy.functions.combinatorial.numbers import primeomega
ascii_str1 = "Omega(n)"
ucode_str1 = "Ω(n)"
n = symbols('n', integer=True)
assert pretty(primeomega(n)) == ascii_str1
assert upretty(primeomega(n)) == ucode_str1
def test_pretty_Mod():
from sympy.core import Mod
ascii_str1 = "x mod 7"
ucode_str1 = "x mod 7"
ascii_str2 = "(x + 1) mod 7"
ucode_str2 = "(x + 1) mod 7"
ascii_str3 = "2*x mod 7"
ucode_str3 = "2⋅x mod 7"
ascii_str4 = "(x mod 7) + 1"
ucode_str4 = "(x mod 7) + 1"
ascii_str5 = "2*(x mod 7)"
ucode_str5 = "2⋅(x mod 7)"
x = symbols('x', integer=True)
assert pretty(Mod(x, 7)) == ascii_str1
assert upretty(Mod(x, 7)) == ucode_str1
assert pretty(Mod(x + 1, 7)) == ascii_str2
assert upretty(Mod(x + 1, 7)) == ucode_str2
assert pretty(Mod(2 * x, 7)) == ascii_str3
assert upretty(Mod(2 * x, 7)) == ucode_str3
assert pretty(Mod(x, 7) + 1) == ascii_str4
assert upretty(Mod(x, 7) + 1) == ucode_str4
assert pretty(2 * Mod(x, 7)) == ascii_str5
assert upretty(2 * Mod(x, 7)) == ucode_str5
def test_issue_11801():
assert pretty(Symbol("")) == ""
assert upretty(Symbol("")) == ""
def test_pretty_UnevaluatedExpr():
x = symbols('x')
he = UnevaluatedExpr(1/x)
ucode_str = \
"""\
1\n\
─\n\
x\
"""
assert upretty(he) == ucode_str
ucode_str = \
"""\
2\n\
⎛1⎞ \n\
⎜─⎟ \n\
⎝x⎠ \
"""
assert upretty(he**2) == ucode_str
ucode_str = \
"""\
1\n\
1 + ─\n\
x\
"""
assert upretty(he + 1) == ucode_str
ucode_str = \
('''\
1\n\
x⋅─\n\
x\
''')
assert upretty(x*he) == ucode_str
def test_issue_10472():
M = (Matrix([[0, 0], [0, 0]]), Matrix([0, 0]))
ucode_str = \
"""\
⎛⎡0 0⎤ ⎡0⎤⎞
⎜⎢ ⎥, ⎢ ⎥⎟
⎝⎣0 0⎦ ⎣0⎦⎠\
"""
assert upretty(M) == ucode_str
def test_MatrixElement_printing():
# test cases for issue #11821
A = MatrixSymbol("A", 1, 3)
B = MatrixSymbol("B", 1, 3)
C = MatrixSymbol("C", 1, 3)
ascii_str1 = "A_00"
ucode_str1 = "A₀₀"
assert pretty(A[0, 0]) == ascii_str1
assert upretty(A[0, 0]) == ucode_str1
ascii_str1 = "3*A_00"
ucode_str1 = "3⋅A₀₀"
assert pretty(3*A[0, 0]) == ascii_str1
assert upretty(3*A[0, 0]) == ucode_str1
ascii_str1 = "(-B + A)[0, 0]"
ucode_str1 = "(-B + A)[0, 0]"
F = C[0, 0].subs(C, A - B)
assert pretty(F) == ascii_str1
assert upretty(F) == ucode_str1
def test_issue_12675():
x, y, t, j = symbols('x y t j')
e = CoordSys3D('e')
ucode_str = \
"""\
⎛ t⎞ \n\
⎜⎛x⎞ ⎟ j_e\n\
⎜⎜─⎟ ⎟ \n\
⎝⎝y⎠ ⎠ \
"""
assert upretty((x/y)**t*e.j) == ucode_str
ucode_str = \
"""\
⎛1⎞ \n\
⎜─⎟ j_e\n\
⎝y⎠ \
"""
assert upretty((1/y)*e.j) == ucode_str
def test_MatrixSymbol_printing():
# test cases for issue #14237
A = MatrixSymbol("A", 3, 3)
B = MatrixSymbol("B", 3, 3)
C = MatrixSymbol("C", 3, 3)
assert pretty(-A*B*C) == "-A*B*C"
assert pretty(A - B) == "-B + A"
assert pretty(A*B*C - A*B - B*C) == "-A*B -B*C + A*B*C"
# issue #14814
x = MatrixSymbol('x', n, n)
y = MatrixSymbol('y*', n, n)
assert pretty(x + y) == "x + y*"
ascii_str = \
"""\
2 \n\
-2*y* -a*x\
"""
assert pretty(-a*x + -2*y*y) == ascii_str
def test_degree_printing():
expr1 = 90*degree
assert pretty(expr1) == '90°'
expr2 = x*degree
assert pretty(expr2) == 'x°'
expr3 = cos(x*degree + 90*degree)
assert pretty(expr3) == 'cos(x° + 90°)'
def test_vector_expr_pretty_printing():
A = CoordSys3D('A')
assert upretty(Cross(A.i, A.x*A.i+3*A.y*A.j)) == "(i_A)×((x_A) i_A + (3⋅y_A) j_A)"
assert upretty(x*Cross(A.i, A.j)) == 'x⋅(i_A)×(j_A)'
assert upretty(Curl(A.x*A.i + 3*A.y*A.j)) == "∇×((x_A) i_A + (3⋅y_A) j_A)"
assert upretty(Divergence(A.x*A.i + 3*A.y*A.j)) == "∇⋅((x_A) i_A + (3⋅y_A) j_A)"
assert upretty(Dot(A.i, A.x*A.i+3*A.y*A.j)) == "(i_A)⋅((x_A) i_A + (3⋅y_A) j_A)"
assert upretty(Gradient(A.x+3*A.y)) == "∇(x_A + 3⋅y_A)"
assert upretty(Laplacian(A.x+3*A.y)) == "∆(x_A + 3⋅y_A)"
# TODO: add support for ASCII pretty.
def test_pretty_print_tensor_expr():
L = TensorIndexType("L")
i, j, k = tensor_indices("i j k", L)
i0 = tensor_indices("i_0", L)
A, B, C, D = tensor_heads("A B C D", [L])
A0 = tensor_heads("A_0", [L])
H = TensorHead("H", [L, L])
expr = -i
ascii_str = \
"""\
-i\
"""
ucode_str = \
"""\
-i\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = A(i)
ascii_str = \
"""\
i\n\
A \n\
\
"""
ucode_str = \
"""\
i\n\
A \n\
\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = A(i0)
ascii_str = \
"""\
i_0\n\
A \n\
\
"""
ucode_str = \
"""\
i₀\n\
A \n\
\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = A0(i0)
ascii_str = \
"""\
i_0\n\
A_0 \n\
\
"""
ucode_str = \
"""\
i₀\n\
A₀ \n\
\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = A(-i)
ascii_str = \
"""\
\n\
A \n\
i\
"""
ucode_str = \
"""\
\n\
A \n\
i\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = -3*A(-i)
ascii_str = \
"""\
\n\
-3*A \n\
i\
"""
ucode_str = \
"""\
\n\
-3⋅A \n\
i\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = H(i, -j)
ascii_str = \
"""\
i \n\
H \n\
j\
"""
ucode_str = \
"""\
i \n\
H \n\
j\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = H(i, -i)
ascii_str = \
"""\
L_0 \n\
H \n\
L_0\
"""
ucode_str = \
"""\
L₀ \n\
H \n\
L₀\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = H(i, -j)*A(j)*B(k)
ascii_str = \
"""\
i L_0 k\n\
H *A *B \n\
L_0 \
"""
ucode_str = \
"""\
i L₀ k\n\
H ⋅A ⋅B \n\
L₀ \
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = (1+x)*A(i)
ascii_str = \
"""\
i\n\
(x + 1)*A \n\
\
"""
ucode_str = \
"""\
i\n\
(x + 1)⋅A \n\
\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = A(i) + 3*B(i)
ascii_str = \
"""\
i i\n\
3*B + A \n\
\
"""
ucode_str = \
"""\
i i\n\
3⋅B + A \n\
\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
def test_pretty_print_tensor_partial_deriv():
from sympy.tensor.toperators import PartialDerivative
L = TensorIndexType("L")
i, j, k = tensor_indices("i j k", L)
A, B, C, D = tensor_heads("A B C D", [L])
H = TensorHead("H", [L, L])
expr = PartialDerivative(A(i), A(j))
ascii_str = \
"""\
d / i\\\n\
---|A |\n\
j\\ /\n\
dA \n\
\
"""
ucode_str = \
"""\
∂ ⎛ i⎞\n\
───⎜A ⎟\n\
j⎝ ⎠\n\
∂A \n\
\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = A(i)*PartialDerivative(H(k, -i), A(j))
ascii_str = \
"""\
L_0 d / k \\\n\
A *---|H |\n\
j\\ L_0/\n\
dA \n\
\
"""
ucode_str = \
"""\
L₀ ∂ ⎛ k ⎞\n\
A ⋅───⎜H ⎟\n\
j⎝ L₀⎠\n\
∂A \n\
\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = A(i)*PartialDerivative(B(k)*C(-i) + 3*H(k, -i), A(j))
ascii_str = \
"""\
L_0 d / k k \\\n\
A *---|3*H + B *C |\n\
j\\ L_0 L_0/\n\
dA \n\
\
"""
ucode_str = \
"""\
L₀ ∂ ⎛ k k ⎞\n\
A ⋅───⎜3⋅H + B ⋅C ⎟\n\
j⎝ L₀ L₀⎠\n\
∂A \n\
\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = (A(i) + B(i))*PartialDerivative(C(j), D(j))
ascii_str = \
"""\
/ i i\\ d / L_0\\\n\
|A + B |*-----|C |\n\
\\ / L_0\\ /\n\
dD \n\
\
"""
ucode_str = \
"""\
⎛ i i⎞ ∂ ⎛ L₀⎞\n\
⎜A + B ⎟⋅────⎜C ⎟\n\
⎝ ⎠ L₀⎝ ⎠\n\
∂D \n\
\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = (A(i) + B(i))*PartialDerivative(C(-i), D(j))
ascii_str = \
"""\
/ L_0 L_0\\ d / \\\n\
|A + B |*---|C |\n\
\\ / j\\ L_0/\n\
dD \n\
\
"""
ucode_str = \
"""\
⎛ L₀ L₀⎞ ∂ ⎛ ⎞\n\
⎜A + B ⎟⋅───⎜C ⎟\n\
⎝ ⎠ j⎝ L₀⎠\n\
∂D \n\
\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = PartialDerivative(B(-i) + A(-i), A(-j), A(-n))
ucode_str = """\
2 \n\
∂ ⎛ ⎞\n\
───────⎜A + B ⎟\n\
⎝ i i⎠\n\
∂A ∂A \n\
n j \
"""
assert upretty(expr) == ucode_str
expr = PartialDerivative(3*A(-i), A(-j), A(-n))
ucode_str = """\
2 \n\
∂ ⎛ ⎞\n\
───────⎜3⋅A ⎟\n\
⎝ i⎠\n\
∂A ∂A \n\
n j \
"""
assert upretty(expr) == ucode_str
expr = TensorElement(H(i, j), {i:1})
ascii_str = \
"""\
i=1,j\n\
H \n\
\
"""
ucode_str = ascii_str
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = TensorElement(H(i, j), {i: 1, j: 1})
ascii_str = \
"""\
i=1,j=1\n\
H \n\
\
"""
ucode_str = ascii_str
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = TensorElement(H(i, j), {j: 1})
ascii_str = \
"""\
i,j=1\n\
H \n\
\
"""
ucode_str = ascii_str
expr = TensorElement(H(-i, j), {-i: 1})
ascii_str = \
"""\
j\n\
H \n\
i=1 \
"""
ucode_str = ascii_str
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
def test_issue_15560():
a = MatrixSymbol('a', 1, 1)
e = pretty(a*(KroneckerProduct(a, a)))
result = 'a*(a x a)'
assert e == result
def test_print_polylog():
# Part of issue 6013
uresult = 'Li₂(3)'
aresult = 'polylog(2, 3)'
assert pretty(polylog(2, 3)) == aresult
assert upretty(polylog(2, 3)) == uresult
# Issue #25312
def test_print_expint_polylog_symbolic_order():
s, z = symbols("s, z")
uresult = 'Liₛ(z)'
aresult = 'polylog(s, z)'
assert pretty(polylog(s, z)) == aresult
assert upretty(polylog(s, z)) == uresult
# TODO: TBD polylog(s - 1, z)
uresult = 'Eₛ(z)'
aresult = 'expint(s, z)'
assert pretty(expint(s, z)) == aresult
assert upretty(expint(s, z)) == uresult
def test_print_polylog_long_order_issue_25309():
s, z = symbols("s, z")
ucode_str = \
"""\
⎛ 2 ⎞\n\
polylog⎝s , z⎠\
"""
assert upretty(polylog(s**2, z)) == ucode_str
def test_print_lerchphi():
# Part of issue 6013
a = Symbol('a')
pretty(lerchphi(a, 1, 2))
uresult = 'Φ(a, 1, 2)'
aresult = 'lerchphi(a, 1, 2)'
assert pretty(lerchphi(a, 1, 2)) == aresult
assert upretty(lerchphi(a, 1, 2)) == uresult
def test_issue_15583():
N = mechanics.ReferenceFrame('N')
result = '(n_x, n_y, n_z)'
e = pretty((N.x, N.y, N.z))
assert e == result
def test_matrixSymbolBold():
# Issue 15871
def boldpretty(expr):
return xpretty(expr, use_unicode=True, wrap_line=False, mat_symbol_style="bold")
from sympy.matrices.expressions.trace import trace
A = MatrixSymbol("A", 2, 2)
assert boldpretty(trace(A)) == 'tr(𝐀)'
A = MatrixSymbol("A", 3, 3)
B = MatrixSymbol("B", 3, 3)
C = MatrixSymbol("C", 3, 3)
assert boldpretty(-A) == '-𝐀'
assert boldpretty(A - A*B - B) == '-𝐁 -𝐀⋅𝐁 + 𝐀'
assert boldpretty(-A*B - A*B*C - B) == '-𝐁 -𝐀⋅𝐁 -𝐀⋅𝐁⋅𝐂'
A = MatrixSymbol("Addot", 3, 3)
assert boldpretty(A) == '𝐀̈'
omega = MatrixSymbol("omega", 3, 3)
assert boldpretty(omega) == 'ω'
omega = MatrixSymbol("omeganorm", 3, 3)
assert boldpretty(omega) == '‖ω‖'
a = Symbol('alpha')
b = Symbol('b')
c = MatrixSymbol("c", 3, 1)
d = MatrixSymbol("d", 3, 1)
assert boldpretty(a*B*c+b*d) == 'b⋅𝐝 + α⋅𝐁⋅𝐜'
d = MatrixSymbol("delta", 3, 1)
B = MatrixSymbol("Beta", 3, 3)
assert boldpretty(a*B*c+b*d) == 'b⋅δ + α⋅Β⋅𝐜'
A = MatrixSymbol("A_2", 3, 3)
assert boldpretty(A) == '𝐀₂'
def test_center_accent():
assert center_accent('a', '\N{COMBINING TILDE}') == 'ã'
assert center_accent('aa', '\N{COMBINING TILDE}') == 'aã'
assert center_accent('aaa', '\N{COMBINING TILDE}') == 'aãa'
assert center_accent('aaaa', '\N{COMBINING TILDE}') == 'aaãa'
assert center_accent('aaaaa', '\N{COMBINING TILDE}') == 'aaãaa'
assert center_accent('abcdefg', '\N{COMBINING FOUR DOTS ABOVE}') == 'abcd⃜efg'
def test_imaginary_unit():
from sympy.printing.pretty import pretty # b/c it was redefined above
assert pretty(1 + I, use_unicode=False) == '1 + I'
assert pretty(1 + I, use_unicode=True) == '1 + ⅈ'
assert pretty(1 + I, use_unicode=False, imaginary_unit='j') == '1 + I'
assert pretty(1 + I, use_unicode=True, imaginary_unit='j') == '1 + ⅉ'
raises(TypeError, lambda: pretty(I, imaginary_unit=I))
raises(ValueError, lambda: pretty(I, imaginary_unit="kkk"))
def test_str_special_matrices():
from sympy.matrices import Identity, ZeroMatrix, OneMatrix
from sympy.matrices.expressions.special import MatrixUnit
assert pretty(Identity(4)) == 'I'
assert upretty(Identity(4)) == '𝕀'
assert pretty(ZeroMatrix(2, 2)) == '0'
assert upretty(ZeroMatrix(2, 2)) == '𝟘'
assert pretty(OneMatrix(2, 2)) == '1'
assert upretty(OneMatrix(2, 2)) == '𝟙'
assert pretty(MatrixUnit(3, 3, 1, 2)) == 'E_12'
assert upretty(MatrixUnit(3, 3, 1, 2)) == '𝔼₁₂'
def test_pretty_misc_functions():
assert pretty(LambertW(x)) == 'W(x)'
assert upretty(LambertW(x)) == 'W(x)'
assert pretty(LambertW(x, y)) == 'W(x, y)'
assert upretty(LambertW(x, y)) == 'W(x, y)'
assert pretty(airyai(x)) == 'Ai(x)'
assert upretty(airyai(x)) == 'Ai(x)'
assert pretty(airybi(x)) == 'Bi(x)'
assert upretty(airybi(x)) == 'Bi(x)'
assert pretty(airyaiprime(x)) == "Ai'(x)"
assert upretty(airyaiprime(x)) == "Ai'(x)"
assert pretty(airybiprime(x)) == "Bi'(x)"
assert upretty(airybiprime(x)) == "Bi'(x)"
assert pretty(fresnelc(x)) == 'C(x)'
assert upretty(fresnelc(x)) == 'C(x)'
assert pretty(fresnels(x)) == 'S(x)'
assert upretty(fresnels(x)) == 'S(x)'
assert pretty(Heaviside(x)) == 'Heaviside(x)'
assert upretty(Heaviside(x)) == 'θ(x)'
assert pretty(Heaviside(x, y)) == 'Heaviside(x, y)'
assert upretty(Heaviside(x, y)) == 'θ(x, y)'
assert pretty(dirichlet_eta(x)) == 'dirichlet_eta(x)'
assert upretty(dirichlet_eta(x)) == 'η(x)'
def test_hadamard_power():
m, n, p = symbols('m, n, p', integer=True)
A = MatrixSymbol('A', m, n)
B = MatrixSymbol('B', m, n)
# Testing printer:
expr = hadamard_power(A, n)
ascii_str = \
"""\
.n\n\
A \
"""
ucode_str = \
"""\
∘n\n\
A \
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = hadamard_power(A, 1+n)
ascii_str = \
"""\
.(n + 1)\n\
A \
"""
ucode_str = \
"""\
∘(n + 1)\n\
A \
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = hadamard_power(A*B.T, 1+n)
ascii_str = \
"""\
.(n + 1)\n\
/ T\\ \n\
\\A*B / \
"""
ucode_str = \
"""\
∘(n + 1)\n\
⎛ T⎞ \n\
⎝A⋅B ⎠ \
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
def test_issue_17258():
n = Symbol('n', integer=True)
assert pretty(Sum(n, (n, -oo, 1))) == \
' 1 \n'\
' __ \n'\
' \\ ` \n'\
' ) n\n'\
' /_, \n'\
'n = -oo '
assert upretty(Sum(n, (n, -oo, 1))) == \
"""\
1 \n\
___ \n\
╲ \n\
╲ \n\
╱ n\n\
╱ \n\
‾‾‾ \n\
n = -∞ \
"""
def test_is_combining():
line = "v̇_m"
assert [is_combining(sym) for sym in line] == \
[False, True, False, False]
def test_issue_17616():
assert pretty(pi**(1/exp(1))) == \
' / -1\\\n'\
' \\e /\n'\
'pi '
assert upretty(pi**(1/exp(1))) == \
' ⎛ -1⎞\n'\
' ⎝ℯ ⎠\n'\
'π '
assert pretty(pi**(1/pi)) == \
' 1 \n'\
' --\n'\
' pi\n'\
'pi '
assert upretty(pi**(1/pi)) == \
' 1\n'\
' ─\n'\
' π\n'\
'π '
assert pretty(pi**(1/EulerGamma)) == \
' 1 \n'\
' ----------\n'\
' EulerGamma\n'\
'pi '
assert upretty(pi**(1/EulerGamma)) == \
' 1\n'\
' ─\n'\
' γ\n'\
'π '
z = Symbol("x_17")
assert upretty(7**(1/z)) == \
'x₁₇___\n'\
' ╲╱ 7 '
assert pretty(7**(1/z)) == \
'x_17___\n'\
' \\/ 7 '
def test_issue_17857():
assert pretty(Range(-oo, oo)) == '{..., -1, 0, 1, ...}'
assert pretty(Range(oo, -oo, -1)) == '{..., 1, 0, -1, ...}'
def test_issue_18272():
x = Symbol('x')
n = Symbol('n')
assert upretty(ConditionSet(x, Eq(-x + exp(x), 0), S.Complexes)) == \
'⎧ │ ⎛ x ⎞⎫\n'\
'⎨x │ x ∊ ℂ ∧ ⎝-x + ℯ = 0⎠⎬\n'\
'⎩ │ ⎭'
assert upretty(ConditionSet(x, Contains(n/2, Interval(0, oo)), FiniteSet(-n/2, n/2))) == \
'⎧ │ ⎧-n n⎫ ⎛n ⎞⎫\n'\
'⎨x │ x ∊ ⎨───, ─⎬ ∧ ⎜─ ∈ [0, ∞)⎟⎬\n'\
'⎩ │ ⎩ 2 2⎭ ⎝2 ⎠⎭'
assert upretty(ConditionSet(x, Eq(Piecewise((1, x >= 3), (x/2 - 1/2, x >= 2), (1/2, x >= 1),
(x/2, True)) - 1/2, 0), Interval(0, 3))) == \
'⎧ │ ⎛⎛⎧ 1 for x ≥ 3⎞ ⎞⎫\n'\
'⎪ │ ⎜⎜⎪ ⎟ ⎟⎪\n'\
'⎪ │ ⎜⎜⎪x ⎟ ⎟⎪\n'\
'⎪ │ ⎜⎜⎪─ - 0.5 for x ≥ 2⎟ ⎟⎪\n'\
'⎪ │ ⎜⎜⎪2 ⎟ ⎟⎪\n'\
'⎨x │ x ∊ [0, 3] ∧ ⎜⎜⎨ ⎟ - 0.5 = 0⎟⎬\n'\
'⎪ │ ⎜⎜⎪ 0.5 for x ≥ 1⎟ ⎟⎪\n'\
'⎪ │ ⎜⎜⎪ ⎟ ⎟⎪\n'\
'⎪ │ ⎜⎜⎪ x ⎟ ⎟⎪\n'\
'⎪ │ ⎜⎜⎪ ─ otherwise⎟ ⎟⎪\n'\
'⎩ │ ⎝⎝⎩ 2 ⎠ ⎠⎭'
def test_Str():
from sympy.core.symbol import Str
assert pretty(Str('x')) == 'x'
def test_symbolic_probability():
mu = symbols("mu")
sigma = symbols("sigma", positive=True)
X = Normal("X", mu, sigma)
assert pretty(Expectation(X)) == r'E[X]'
assert pretty(Variance(X)) == r'Var(X)'
assert pretty(Probability(X > 0)) == r'P(X > 0)'
Y = Normal("Y", mu, sigma)
assert pretty(Covariance(X, Y)) == 'Cov(X, Y)'
def test_issue_21758():
from sympy.functions.elementary.piecewise import piecewise_fold
from sympy.series.fourier import FourierSeries
x = Symbol('x')
k, n = symbols('k n')
fo = FourierSeries(x, (x, -pi, pi), (0, SeqFormula(0, (k, 1, oo)), SeqFormula(
Piecewise((-2*pi*cos(n*pi)/n + 2*sin(n*pi)/n**2, (n > -oo) & (n < oo) & Ne(n, 0)),
(0, True))*sin(n*x)/pi, (n, 1, oo))))
assert upretty(piecewise_fold(fo)) == \
'⎧ 2⋅sin(3⋅x) \n'\
'⎪2⋅sin(x) - sin(2⋅x) + ────────── + … for n > -∞ ∧ n < ∞ ∧ n ≠ 0\n'\
'⎨ 3 \n'\
'⎪ \n'\
'⎩ 0 otherwise '
assert pretty(FourierSeries(x, (x, -pi, pi), (0, SeqFormula(0, (k, 1, oo)),
SeqFormula(0, (n, 1, oo))))) == '0'
def test_diffgeom():
from sympy.diffgeom import Manifold, Patch, CoordSystem, BaseScalarField
x,y = symbols('x y', real=True)
m = Manifold('M', 2)
assert pretty(m) == 'M'
p = Patch('P', m)
assert pretty(p) == "P"
rect = CoordSystem('rect', p, [x, y])
assert pretty(rect) == "rect"
b = BaseScalarField(rect, 0)
assert pretty(b) == "x"
def test_deprecated_prettyForm():
with warns_deprecated_sympy():
from sympy.printing.pretty.pretty_symbology import xstr
assert xstr(1) == '1'
with warns_deprecated_sympy():
from sympy.printing.pretty.stringpict import prettyForm
p = prettyForm('s', unicode='s')
with warns_deprecated_sympy():
assert p.unicode == p.s == 's'
def test_center():
assert center('1', 2) == '1 '
assert center('1', 3) == ' 1 '
assert center('1', 3, '-') == '-1-'
assert center('1', 5, '-') == '--1--'
| lowergamma |
python | openai__openai-python | src/openai/types/realtime/realtime_transcription_session_audio.py | {
"start": 295,
"end": 414
} | class ____(BaseModel):
input: Optional[RealtimeTranscriptionSessionAudioInput] = None
| RealtimeTranscriptionSessionAudio |
python | huggingface__transformers | src/transformers/models/idefics/configuration_idefics.py | {
"start": 6838,
"end": 15304
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`IdeficsModel`]. It is used to instantiate an
Idefics model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the Idefics-9B.
e.g. [HuggingFaceM4/idefics-9b](https://huggingface.co/HuggingFaceM4/idefics-9b)
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
additional_vocab_size (`int`, *optional*, defaults to 0):
Additional vocabulary size of the model, typically for the special "<img>" token. Additional vocab tokens
are always trainable whereas regular vocab tokens can be frozen or not.
vocab_size (`int`, *optional*, defaults to 32000):
Vocabulary size of the Idefics model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`~IdeficsModel`]
hidden_size (`int`, *optional*, defaults to 4096):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 11008):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 32):
Number of attention heads for each attention layer in the Transformer encoder.
dropout (`float`, *optional*, defaults to 0.0):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
alpha_initializer (`str`, *optional*, defaults to `"zeros"`):
Initialization type for the alphas.
alphas_initializer_range (`float`, *optional*, defaults to 0.0):
The standard deviation of the truncated_normal_initializer for initializing the alphas in the Gated Cross
Attention.
alpha_type (`str`, *optional*, defaults to `"float"`):
Whether the gating alphas should be vectors or single floats.
rms_norm_eps (`float`, *optional*, defaults to 1e-6):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
pad_token_id (`int`, *optional*, defaults to 0)
Padding token id.
bos_token_id (`int`, *optional*, defaults to 1)
Beginning of stream token id.
eos_token_id (`int`, *optional*, defaults to 2)
End of stream token id.
tie_word_embeddings(`bool`, *optional*, defaults to `False`):
Whether to tie weight embeddings
cross_layer_interval (`int`, *optional*, default to 1)
Interval for cross attention (from text to image) layers.
qk_layer_norms (`bool`, *optional*, defaults to `False`): Whether to add layer norm after q and k
freeze_text_layers (`bool`, *optional*, defaults to `True`): Whether to freeze text layers
freeze_text_module_exceptions (`bool`, *optional*, defaults to `[]`):
Exceptions to freezing text layers when `freeze_text_layers` is `True`
freeze_lm_head (`bool`, *optional*, defaults to `False`): Whether to freeze lm head
freeze_vision_layers (`bool`, *optional*, defaults to `True`): Whether to freeze vision layers
freeze_vision_module_exceptions (`bool`, *optional*, defaults to `[]`):
Exceptions to freezing vision layers when `freeze_vision_layers` is `True`
use_resampler (`bool`, *optional*, defaults to `False`): Whether to use the Resampler
vision_config (`IdeficsVisionConfig`, *optional*): Custom vision config or dict
perceiver_config (`IdeficsPerceiverConfig`, *optional*): Custom perceiver config or dict
Example:
```python
>>> from transformers import IdeficsModel, IdeficsConfig
>>> # Initializing a Idefics idefics-9b style configuration
>>> configuration = IdeficsConfig()
>>> # Initializing a model from the idefics-9b style configuration
>>> model = IdeficsModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "idefics"
sub_configs = {"perceiver_config": IdeficsPerceiverConfig, "vision_config": IdeficsVisionConfig}
def __init__(
self,
vocab_size=32000,
additional_vocab_size=0,
hidden_size=4096,
intermediate_size=11008,
num_hidden_layers=32,
num_attention_heads=32,
dropout=0.0,
hidden_act="silu",
initializer_range=0.02,
alpha_initializer="zeros",
alphas_initializer_range=0.0,
alpha_type="float",
rms_norm_eps=1e-6,
use_cache=True,
pad_token_id=0,
bos_token_id=1,
eos_token_id=2,
tie_word_embeddings=False,
cross_layer_interval=1,
qk_layer_norms=False,
freeze_text_layers=True,
freeze_text_module_exceptions=[],
freeze_lm_head=False,
freeze_vision_layers=True,
freeze_vision_module_exceptions=[],
use_resampler=False,
vision_config=None,
perceiver_config=None,
**kwargs,
):
self.vocab_size = vocab_size
self.additional_vocab_size = additional_vocab_size
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.dropout = dropout
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.alpha_initializer = alpha_initializer
self.alphas_initializer_range = alphas_initializer_range
self.alpha_type = alpha_type
self.rms_norm_eps = rms_norm_eps
self.use_cache = use_cache
self.cross_layer_interval = cross_layer_interval
self.qk_layer_norms = qk_layer_norms
self.freeze_vision_layers = freeze_vision_layers
self.freeze_text_layers = freeze_text_layers
self.freeze_text_module_exceptions = freeze_text_module_exceptions
self.freeze_vision_module_exceptions = freeze_vision_module_exceptions
self.freeze_lm_head = freeze_lm_head
self.use_resampler = use_resampler
if perceiver_config is None:
self.perceiver_config = IdeficsPerceiverConfig()
elif isinstance(perceiver_config, dict):
self.perceiver_config = IdeficsPerceiverConfig(**perceiver_config)
elif isinstance(perceiver_config, IdeficsPerceiverConfig):
self.perceiver_config = perceiver_config
if vision_config is None:
self.vision_config = IdeficsVisionConfig()
elif isinstance(vision_config, dict):
self.vision_config = IdeficsVisionConfig(**vision_config)
elif isinstance(vision_config, IdeficsVisionConfig):
self.vision_config = vision_config
super().__init__(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
tie_word_embeddings=tie_word_embeddings,
**kwargs,
)
# IMPORTANT: Do not do any __init__ args-based checks in the constructor, since
# PreTrainedConfig.from_dict first instantiates the class with the config dict and only then
# updates the config object with `kwargs` from from_pretrained, so during the instantiation
# of this object many attributes have default values and haven't yet been overridden.
# Do any required checks inside `from_pretrained` once the superclass' `from_pretrained` was run.
__all__ = ["IdeficsConfig"]
| IdeficsConfig |
python | run-llama__llama_index | llama-index-core/llama_index/core/evaluation/batch_runner.py | {
"start": 2237,
"end": 15469
} | class ____:
"""
Batch evaluation runner.
Args:
evaluators (Dict[str, BaseEvaluator]): Dictionary of evaluators.
workers (int): Number of workers to use for parallelization.
Defaults to 2.
show_progress (bool): Whether to show progress bars. Defaults to False.
"""
def __init__(
self,
evaluators: Dict[str, BaseEvaluator],
workers: int = 2,
show_progress: bool = False,
):
self.evaluators = evaluators
self.workers = workers
self.semaphore = asyncio.Semaphore(self.workers)
self.show_progress = show_progress
self.asyncio_mod = asyncio_module(show_progress=self.show_progress)
def _format_results(
self, results: List[Tuple[str, EvaluationResult]]
) -> Dict[str, List[EvaluationResult]]:
"""Format results."""
# Format results
results_dict: Dict[str, List[EvaluationResult]] = {
name: [] for name in self.evaluators
}
for name, result in results:
results_dict[name].append(result)
return results_dict
def _validate_and_clean_inputs(
self,
*inputs_list: Any,
) -> List[Any]:
"""
Validate and clean input lists.
Enforce that at least one of the inputs is not None.
Make sure that all inputs have the same length.
Make sure that None inputs are replaced with [None] * len(inputs).
"""
assert len(inputs_list) > 0
# first, make sure at least one of queries or response_strs is not None
input_len: Optional[int] = None
for inputs in inputs_list:
if inputs is not None:
input_len = len(inputs)
break
if input_len is None:
raise ValueError("At least one item in inputs_list must be provided.")
new_inputs_list = []
for inputs in inputs_list:
if inputs is None:
new_inputs_list.append([None] * input_len)
else:
if len(inputs) != input_len:
raise ValueError("All inputs must have the same length.")
new_inputs_list.append(inputs)
return new_inputs_list
def _validate_nested_eval_kwargs_types(
self, eval_kwargs_lists: Dict[str, Any]
) -> Dict[str, Any]:
"""
Ensure eval kwargs are acceptable format.
either a Dict[str, List] or a Dict[str, Dict[str, List]].
Allows use of different kwargs (e.g. references) with different evaluators
while keeping backwards compatibility for single evaluators
"""
if not isinstance(eval_kwargs_lists, dict):
raise ValueError(
f"eval_kwargs_lists must be a dict. Got {eval_kwargs_lists}"
)
for evaluator, eval_kwargs in eval_kwargs_lists.items():
if isinstance(eval_kwargs, list):
# maintain backwards compatibility - for use with single evaluator
eval_kwargs_lists[evaluator] = self._validate_and_clean_inputs(
eval_kwargs
)[0]
elif isinstance(eval_kwargs, dict):
# for use with multiple evaluators
for k in eval_kwargs:
v = eval_kwargs[k]
if not isinstance(v, list):
raise ValueError(
f"nested inner values in eval_kwargs must be a list. Got {evaluator}: {k}: {v}"
)
eval_kwargs_lists[evaluator][k] = self._validate_and_clean_inputs(
v
)[0]
else:
raise ValueError(
f"eval_kwargs must be a list or a dict. Got {evaluator}: {eval_kwargs}"
)
return eval_kwargs_lists
def _get_eval_kwargs(
self, eval_kwargs_lists: Dict[str, Any], idx: int
) -> Dict[str, Any]:
"""
Get eval kwargs from eval_kwargs_lists at a given idx.
Since eval_kwargs_lists is a dict of lists, we need to get the
value at idx for each key.
"""
return {k: v[idx] for k, v in eval_kwargs_lists.items()}
async def aevaluate_response_strs(
self,
queries: Optional[List[str]] = None,
response_strs: Optional[List[str]] = None,
contexts_list: Optional[List[List[str]]] = None,
**eval_kwargs_lists: Dict[str, Any],
) -> Dict[str, List[EvaluationResult]]:
"""
Evaluate query, response pairs.
This evaluates queries, responses, contexts as string inputs.
Can supply additional kwargs to the evaluator in eval_kwargs_lists.
Args:
queries (Optional[List[str]]): List of query strings. Defaults to None.
response_strs (Optional[List[str]]): List of response strings.
Defaults to None.
contexts_list (Optional[List[List[str]]]): List of context lists.
Defaults to None.
**eval_kwargs_lists (Dict[str, Any]): Dict of either dicts or lists
of kwargs to pass to evaluator. Defaults to None.
multiple evaluators: {evaluator: {kwarg: [list of values]},...}
single evaluator: {kwarg: [list of values]}
"""
queries, response_strs, contexts_list = self._validate_and_clean_inputs(
queries, response_strs, contexts_list
)
eval_kwargs_lists = self._validate_nested_eval_kwargs_types(eval_kwargs_lists)
# boolean to check if using multi kwarg evaluator
multi_kwargs = len(eval_kwargs_lists) > 0 and isinstance(
next(iter(eval_kwargs_lists.values())), dict
)
# run evaluations
eval_jobs = []
for idx, query in enumerate(cast(List[str], queries)):
response_str = cast(List, response_strs)[idx]
contexts = cast(List, contexts_list)[idx]
for name, evaluator in self.evaluators.items():
if multi_kwargs:
# multi-evaluator - get appropriate runtime kwargs if present
kwargs = (
eval_kwargs_lists[name] if name in eval_kwargs_lists else {}
)
else:
# single evaluator (maintain backwards compatibility)
kwargs = eval_kwargs_lists
eval_kwargs = self._get_eval_kwargs(kwargs, idx)
eval_jobs.append(
eval_worker(
self.semaphore,
evaluator,
name,
query=query,
response_str=response_str,
contexts=contexts,
eval_kwargs=eval_kwargs,
)
)
results = await self.asyncio_mod.gather(*eval_jobs)
# Format results
return self._format_results(results)
async def aevaluate_responses(
self,
queries: Optional[List[str]] = None,
responses: Optional[List[Response]] = None,
**eval_kwargs_lists: Dict[str, Any],
) -> Dict[str, List[EvaluationResult]]:
"""
Evaluate query, response pairs.
This evaluates queries and response objects.
Args:
queries (Optional[List[str]]): List of query strings. Defaults to None.
responses (Optional[List[Response]]): List of response objects.
Defaults to None.
**eval_kwargs_lists (Dict[str, Any]): Dict of either dicts or lists
of kwargs to pass to evaluator. Defaults to None.
multiple evaluators: {evaluator: {kwarg: [list of values]},...}
single evaluator: {kwarg: [list of values]}
"""
queries, responses = self._validate_and_clean_inputs(queries, responses)
eval_kwargs_lists = self._validate_nested_eval_kwargs_types(eval_kwargs_lists)
# boolean to check if using multi kwarg evaluator
multi_kwargs = len(eval_kwargs_lists) > 0 and isinstance(
next(iter(eval_kwargs_lists.values())), dict
)
# run evaluations
eval_jobs = []
for idx, query in enumerate(cast(List[str], queries)):
response = cast(List, responses)[idx]
for name, evaluator in self.evaluators.items():
if multi_kwargs:
# multi-evaluator - get appropriate runtime kwargs if present
kwargs = (
eval_kwargs_lists[name] if name in eval_kwargs_lists else {}
)
else:
# single evaluator (maintain backwards compatibility)
kwargs = eval_kwargs_lists
eval_kwargs = self._get_eval_kwargs(kwargs, idx)
eval_jobs.append(
eval_response_worker(
self.semaphore,
evaluator,
name,
query=query,
response=response,
eval_kwargs=eval_kwargs,
)
)
results = await self.asyncio_mod.gather(*eval_jobs)
# Format results
return self._format_results(results)
async def aevaluate_queries(
self,
query_engine: BaseQueryEngine,
queries: Optional[List[str]] = None,
**eval_kwargs_lists: Dict[str, Any],
) -> Dict[str, List[EvaluationResult]]:
"""
Evaluate queries.
Args:
query_engine (BaseQueryEngine): Query engine.
queries (Optional[List[str]]): List of query strings. Defaults to None.
**eval_kwargs_lists (Dict[str, Any]): Dict of lists of kwargs to
pass to evaluator. Defaults to None.
"""
if queries is None:
raise ValueError("`queries` must be provided")
# gather responses
response_jobs = []
for query in queries:
response_jobs.append(response_worker(self.semaphore, query_engine, query))
responses = await self.asyncio_mod.gather(*response_jobs)
return await self.aevaluate_responses(
queries=queries,
responses=responses,
**eval_kwargs_lists,
)
def evaluate_response_strs(
self,
queries: Optional[List[str]] = None,
response_strs: Optional[List[str]] = None,
contexts_list: Optional[List[List[str]]] = None,
**eval_kwargs_lists: Dict[str, Any],
) -> Dict[str, List[EvaluationResult]]:
"""
Evaluate query, response pairs.
Sync version of aevaluate_response_strs.
"""
return asyncio_run(
self.aevaluate_response_strs(
queries=queries,
response_strs=response_strs,
contexts_list=contexts_list,
**eval_kwargs_lists,
)
)
def evaluate_responses(
self,
queries: Optional[List[str]] = None,
responses: Optional[List[Response]] = None,
**eval_kwargs_lists: Dict[str, Any],
) -> Dict[str, List[EvaluationResult]]:
"""
Evaluate query, response objs.
Sync version of aevaluate_responses.
"""
return asyncio_run(
self.aevaluate_responses(
queries=queries,
responses=responses,
**eval_kwargs_lists,
)
)
def evaluate_queries(
self,
query_engine: BaseQueryEngine,
queries: Optional[List[str]] = None,
**eval_kwargs_lists: Dict[str, Any],
) -> Dict[str, List[EvaluationResult]]:
"""
Evaluate queries.
Sync version of aevaluate_queries.
"""
return asyncio_run(
self.aevaluate_queries(
query_engine=query_engine,
queries=queries,
**eval_kwargs_lists,
)
)
def upload_eval_results(
self,
project_name: str,
app_name: str,
results: Dict[str, List[EvaluationResult]],
) -> None:
"""
Upload the evaluation results to LlamaCloud.
Args:
project_name (str): The name of the project.
app_name (str): The name of the app.
results (Dict[str, List[EvaluationResult]]):
The evaluation results, a mapping of metric name to a list of EvaluationResult objects.
Examples:
```python
results = batch_runner.evaluate_responses(...)
batch_runner.upload_eval_results(
project_name="my_project",
app_name="my_app",
results=results
)
```
"""
from llama_index.core.evaluation.eval_utils import upload_eval_results
upload_eval_results(
project_name=project_name, app_name=app_name, results=results
)
| BatchEvalRunner |
python | pytorch__pytorch | test/inductor/test_group_batch_fusion.py | {
"start": 7517,
"end": 8531
} | class ____(torch.nn.Module):
def __init__(self, device, has_bias=True):
super().__init__()
self.device = device
def forward(self, x):
inputs = torch.split(x.to(self.device), 500, dim=1)
x_split = torch.split(inputs[0].to(self.device), 50, dim=1)
y_split = torch.split(inputs[1].to(self.device), 50, dim=1)
sigmoid_1 = [torch.sigmoid(x_split[i]) for i in range(len(x_split))]
sigmoid_2 = [torch.sigmoid(y_split[i]) for i in range(len(y_split))]
relu_1 = [torch.nn.functional.relu(sigmoid_1[i]) for i in range(len(sigmoid_1))]
relu_2 = [torch.nn.functional.relu(sigmoid_2[i]) for i in range(len(sigmoid_2))]
add = [torch.add(relu_1[i], relu_2[i]) for i in range(len(relu_1))]
mul = [torch.mul(add[i], add[i]) for i in range(len(add))]
sub = [torch.sub(mul[i], mul[i]) for i in range(len(mul))]
div = [torch.div(sub[i], sub[i]) for i in range(len(sub))]
return torch.cat(div, dim=1)
| TestPoitwiseOps |
python | networkx__networkx | networkx/classes/reportviews.py | {
"start": 4365,
"end": 9191
} | class ____(Mapping, Set):
"""A NodeView class to act as G.nodes for a NetworkX Graph
Set operations act on the nodes without considering data.
Iteration is over nodes. Node data can be looked up like a dict.
Use NodeDataView to iterate over node data or to specify a data
attribute for lookup. NodeDataView is created by calling the NodeView.
Parameters
----------
graph : NetworkX graph-like class
Examples
--------
>>> G = nx.path_graph(3)
>>> NV = G.nodes()
>>> 2 in NV
True
>>> for n in NV:
... print(n)
0
1
2
>>> assert NV & {1, 2, 3} == {1, 2}
>>> G.add_node(2, color="blue")
>>> NV[2]
{'color': 'blue'}
>>> G.add_node(8, color="red")
>>> NDV = G.nodes(data=True)
>>> (2, NV[2]) in NDV
True
>>> for n, dd in NDV:
... print((n, dd.get("color", "aqua")))
(0, 'aqua')
(1, 'aqua')
(2, 'blue')
(8, 'red')
>>> NDV[2] == NV[2]
True
>>> NVdata = G.nodes(data="color", default="aqua")
>>> (2, NVdata[2]) in NVdata
True
>>> for n, dd in NVdata:
... print((n, dd))
(0, 'aqua')
(1, 'aqua')
(2, 'blue')
(8, 'red')
>>> NVdata[2] == NV[2] # NVdata gets 'color', NV gets datadict
False
"""
__slots__ = ("_nodes",)
def __getstate__(self):
return {"_nodes": self._nodes}
def __setstate__(self, state):
self._nodes = state["_nodes"]
def __init__(self, graph):
self._nodes = graph._node
# Mapping methods
def __len__(self):
return len(self._nodes)
def __iter__(self):
return iter(self._nodes)
def __getitem__(self, n):
if isinstance(n, slice):
raise nx.NetworkXError(
f"{type(self).__name__} does not support slicing, "
f"try list(G.nodes)[{n.start}:{n.stop}:{n.step}]"
)
return self._nodes[n]
# Set methods
def __contains__(self, n):
return n in self._nodes
@classmethod
def _from_iterable(cls, it):
return set(it)
# DataView method
def __call__(self, data=False, default=None):
if data is False:
return self
return NodeDataView(self._nodes, data, default)
def data(self, data=True, default=None):
"""
Return a read-only view of node data.
Parameters
----------
data : bool or node data key, default=True
If ``data=True`` (the default), return a `NodeDataView` object that
maps each node to *all* of its attributes. `data` may also be an
arbitrary key, in which case the `NodeDataView` maps each node to
the value for the keyed attribute. In this case, if a node does
not have the `data` attribute, the `default` value is used.
default : object, default=None
The value used when a node does not have a specific attribute.
Returns
-------
NodeDataView
The layout of the returned NodeDataView depends on the value of the
`data` parameter.
Notes
-----
If ``data=False``, returns a `NodeView` object without data.
See Also
--------
NodeDataView
Examples
--------
>>> G = nx.Graph()
>>> G.add_nodes_from(
... [
... (0, {"color": "red", "weight": 10}),
... (1, {"color": "blue"}),
... (2, {"color": "yellow", "weight": 2}),
... ]
... )
Accessing node data with ``data=True`` (the default) returns a
NodeDataView mapping each node to all of its attributes:
>>> G.nodes.data()
NodeDataView({0: {'color': 'red', 'weight': 10}, 1: {'color': 'blue'}, 2: {'color': 'yellow', 'weight': 2}})
If `data` represents a key in the node attribute dict, a NodeDataView mapping
the nodes to the value for that specific key is returned:
>>> G.nodes.data("color")
NodeDataView({0: 'red', 1: 'blue', 2: 'yellow'}, data='color')
If a specific key is not found in an attribute dict, the value specified
by `default` is returned:
>>> G.nodes.data("weight", default=-999)
NodeDataView({0: 10, 1: -999, 2: 2}, data='weight')
Note that there is no check that the `data` key is in any of the
node attribute dictionaries:
>>> G.nodes.data("height")
NodeDataView({0: None, 1: None, 2: None}, data='height')
"""
if data is False:
return self
return NodeDataView(self._nodes, data, default)
def __str__(self):
return str(list(self))
def __repr__(self):
return f"{self.__class__.__name__}({tuple(self)})"
| NodeView |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 684957,
"end": 685284
} | class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
node = sgqlc.types.Field("VerifiableDomain", graphql_name="node")
| VerifiableDomainEdge |
python | pytorch__pytorch | test/inductor/test_pallas.py | {
"start": 22377,
"end": 22570
} | class ____(PallasTestsMixin, TestCase):
DEVICE = "cpu"
@unittest.skipUnless(has_jax_tpu_backend(), "requires JAX TPU backend")
@config.patch({"_debug_cpu_to_tpu_pallas": True})
| PallasTestsCPU |
python | kamyu104__LeetCode-Solutions | Python/132-pattern.py | {
"start": 29,
"end": 472
} | class ____(object):
def find132pattern(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
ak = float("-inf")
stk = []
for i in reversed(xrange(len(nums))):
if nums[i] < ak:
return True
while stk and stk[-1] < nums[i]:
ak = stk.pop()
stk.append(nums[i])
return False
# Time: O(n^2)
# Space: O(1)
| Solution |
python | ApeWorX__ape | tests/functional/test_coverage.py | {
"start": 3382,
"end": 3810
} | class ____:
def test_function_rate(self, source_contract):
assert source_contract.function_rate == 0.5
def test_lines_coverage(self, source_contract):
assert source_contract.lines_covered == 4
def test_miss_count(self, source_contract):
assert source_contract.miss_count == 2
def test_line_rate(self, source_contract):
assert source_contract.line_rate == 2 / 3
| TestSourceCoverage |
python | great-expectations__great_expectations | great_expectations/core/factory/validation_definition_factory.py | {
"start": 673,
"end": 4094
} | class ____(Factory[ValidationDefinition]):
"""
Responsible for basic CRUD operations on a Data Context's ValidationDefinitions.
"""
def __init__(self, store: ValidationDefinitionStore) -> None:
self._store = store
@public_api
@override
def add(self, validation: ValidationDefinition) -> ValidationDefinition:
"""Add a ValidationDefinition to the collection.
Parameters:
validation: ValidationDefinition to add
Raises:
DataContextError: if ValidationDefinition already exists
"""
key = self._store.get_key(name=validation.name, id=None)
if self._store.has_key(key=key):
raise DataContextError( # noqa: TRY003 # FIXME CoP
f"Cannot add ValidationDefinition with name {validation.name} because it already exists." # noqa: E501 # FIXME CoP
)
self._store.add(key=key, value=validation)
return validation
@public_api
@override
def delete(self, name: str) -> None:
"""Delete a ValidationDefinition from the collection.
Parameters:
name: The name of the ValidationDefinition to delete
Raises:
DataContextError: if ValidationDefinition doesn't exist
"""
try:
validation_definition = self.get(name=name)
except DataContextError as e:
raise DataContextError( # noqa: TRY003 # FIXME CoP
f"Cannot delete ValidationDefinition with name {name} because it cannot be found."
) from e
key = self._store.get_key(name=validation_definition.name, id=validation_definition.id)
self._store.remove_key(key=key)
@public_api
@override
def get(self, name: str) -> ValidationDefinition:
"""Get a ValidationDefinition from the collection by name.
Parameters:
name: Name of ValidationDefinition to get
Raises:
DataContextError: when ValidationDefinition is not found.
"""
key = self._store.get_key(name=name, id=None)
if not self._store.has_key(key=key):
raise DataContextError(f"ValidationDefinition with name {name} was not found.") # noqa: TRY003 # FIXME CoP
return cast("ValidationDefinition", self._store.get(key=key))
@public_api
@override
def all(self) -> Iterable[ValidationDefinition]:
"""Get all ValidationDefinitions."""
return self._store.get_all()
@public_api
@override
def add_or_update(self, validation: ValidationDefinition) -> ValidationDefinition:
"""Add or update an ValidationDefinition by name.
If an ValidationDefinition with the same name exists, overwrite it, otherwise
create a new ValidationDefinition.
Args:
validation: ValidationDefinition to add or update
"""
# Always add or update underlying suite to avoid freshness issues
suite_factory = project_manager.get_suite_factory()
validation.suite = suite_factory.add_or_update(suite=validation.suite)
validation.data.save()
try:
existing_validation = self.get(name=validation.name)
except DataContextError:
return self.add(validation=validation)
validation.id = existing_validation.id
validation.save()
return validation
| ValidationDefinitionFactory |
python | getsentry__sentry | src/sentry/api/serializers/models/exploresavedquery.py | {
"start": 556,
"end": 740
} | class ____(TypedDict, total=False):
environment: list[str]
query: str
range: str
start: str
end: str
interval: str
mode: str
| ExploreSavedQueryResponseOptional |
python | ApeWorX__ape | src/ape/plugins/_utils.py | {
"start": 24077,
"end": 26034
} | class ____:
"""
A str-builder for all installed Ape plugins.
"""
def __init__(
self,
metadata: PluginMetadataList,
include: Optional[Sequence[PluginType]] = None,
include_version: bool = True,
output_format: Optional[OutputFormat] = None,
):
self.include = include or (PluginType.INSTALLED, PluginType.THIRD_PARTY)
self.metadata = metadata
self.include_version = include_version
self.output_format = output_format
@log_instead_of_fail(default="<ApePluginsRepr>")
def __repr__(self) -> str:
to_display_str = ", ".join([x.value for x in self.include])
return f"<PluginMap to_display='{to_display_str}'>"
def __str__(self) -> str:
sections = []
if PluginType.CORE in self.include and self.metadata.core:
sections.append(self.metadata.core)
if PluginType.INSTALLED in self.include and self.metadata.installed:
sections.append(self.metadata.installed)
if PluginType.THIRD_PARTY in self.include and self.metadata.third_party:
sections.append(self.metadata.third_party)
if PluginType.AVAILABLE in self.include and self.metadata.available:
sections.append(self.metadata.available)
if not sections:
return ""
# Use a single max length for all the sections.
max_length = max(x.max_name_length for x in sections)
version_skips = (PluginType.CORE, PluginType.AVAILABLE)
def include_version(section):
return section.plugin_type not in version_skips if self.include_version else False
formatted_sections = [
x.to_str(
max_length=max_length,
include_version=include_version(x),
output_format=self.output_format,
)
for x in sections
]
return "\n\n".join(formatted_sections)
| ApePluginsRepr |
python | sympy__sympy | sympy/matrices/immutable.py | {
"start": 2237,
"end": 3917
} | class ____(DenseMatrix, ImmutableRepMatrix): # type: ignore
"""Create an immutable version of a matrix.
Examples
========
>>> from sympy import eye, ImmutableMatrix
>>> ImmutableMatrix(eye(3))
Matrix([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
>>> _[0, 0] = 42
Traceback (most recent call last):
...
TypeError: Cannot set values of ImmutableDenseMatrix
"""
# MatrixExpr is set as NotIterable, but we want explicit matrices to be
# iterable
_iterable = True
_class_priority = 8
_op_priority = 10.001
@classmethod
def _new(cls, *args, **kwargs):
if len(args) == 1 and isinstance(args[0], ImmutableDenseMatrix):
return args[0]
if kwargs.get('copy', True) is False:
if len(args) != 3:
raise TypeError("'copy=False' requires a matrix be initialized as rows,cols,[list]")
rows, cols, flat_list = args
else:
rows, cols, flat_list = cls._handle_creation_inputs(*args, **kwargs)
flat_list = list(flat_list) # create a shallow copy
rep = cls._flat_list_to_DomainMatrix(rows, cols, flat_list)
return cls._fromrep(rep)
@classmethod
def _fromrep(cls, rep):
rows, cols = rep.shape
flat_list = rep.to_sympy().to_list_flat()
obj = Basic.__new__(cls,
Integer(rows),
Integer(cols),
Tuple(*flat_list, sympify=False))
obj._rows = rows
obj._cols = cols
obj._rep = rep
return obj
# make sure ImmutableDenseMatrix is aliased as ImmutableMatrix
ImmutableMatrix = ImmutableDenseMatrix
| ImmutableDenseMatrix |
python | pytest-dev__pytest | src/_pytest/pathlib.py | {
"start": 33886,
"end": 37879
} | class ____(Exception):
"""Custom exception raised by resolve_pkg_root_and_module_name."""
def scandir(
path: str | os.PathLike[str],
sort_key: Callable[[os.DirEntry[str]], object] = lambda entry: entry.name,
) -> list[os.DirEntry[str]]:
"""Scan a directory recursively, in breadth-first order.
The returned entries are sorted according to the given key.
The default is to sort by name.
If the directory does not exist, return an empty list.
"""
entries = []
# Attempt to create a scandir iterator for the given path.
try:
scandir_iter = os.scandir(path)
except FileNotFoundError:
# If the directory does not exist, return an empty list.
return []
# Use the scandir iterator in a context manager to ensure it is properly closed.
with scandir_iter as s:
for entry in s:
try:
entry.is_file()
except OSError as err:
if _ignore_error(err):
continue
# Reraise non-ignorable errors to avoid hiding issues.
raise
entries.append(entry)
entries.sort(key=sort_key) # type: ignore[arg-type]
return entries
def visit(
path: str | os.PathLike[str], recurse: Callable[[os.DirEntry[str]], bool]
) -> Iterator[os.DirEntry[str]]:
"""Walk a directory recursively, in breadth-first order.
The `recurse` predicate determines whether a directory is recursed.
Entries at each directory level are sorted.
"""
entries = scandir(path)
yield from entries
for entry in entries:
if entry.is_dir() and recurse(entry):
yield from visit(entry.path, recurse)
def absolutepath(path: str | os.PathLike[str]) -> Path:
"""Convert a path to an absolute path using os.path.abspath.
Prefer this over Path.resolve() (see #6523).
Prefer this over Path.absolute() (not public, doesn't normalize).
"""
return Path(os.path.abspath(path))
def commonpath(path1: Path, path2: Path) -> Path | None:
"""Return the common part shared with the other path, or None if there is
no common part.
If one path is relative and one is absolute, returns None.
"""
try:
return Path(os.path.commonpath((str(path1), str(path2))))
except ValueError:
return None
def bestrelpath(directory: Path, dest: Path) -> str:
"""Return a string which is a relative path from directory to dest such
that directory/bestrelpath == dest.
The paths must be either both absolute or both relative.
If no such path can be determined, returns dest.
"""
assert isinstance(directory, Path)
assert isinstance(dest, Path)
if dest == directory:
return os.curdir
# Find the longest common directory.
base = commonpath(directory, dest)
# Can be the case on Windows for two absolute paths on different drives.
# Can be the case for two relative paths without common prefix.
# Can be the case for a relative path and an absolute path.
if not base:
return str(dest)
reldirectory = directory.relative_to(base)
reldest = dest.relative_to(base)
return os.path.join(
# Back from directory to base.
*([os.pardir] * len(reldirectory.parts)),
# Forward from base to dest.
*reldest.parts,
)
def safe_exists(p: Path) -> bool:
"""Like Path.exists(), but account for input arguments that might be too long (#11394)."""
try:
return p.exists()
except (ValueError, OSError):
# ValueError: stat: path too long for Windows
# OSError: [WinError 123] The filename, directory name, or volume label syntax is incorrect
return False
def samefile_nofollow(p1: Path, p2: Path) -> bool:
"""Test whether two paths reference the same actual file or directory.
Unlike Path.samefile(), does not resolve symlinks.
"""
return os.path.samestat(p1.lstat(), p2.lstat())
| CouldNotResolvePathError |
python | wandb__wandb | wandb/vendor/watchdog_0_9_0/wandb_watchdog/observers/fsevents2.py | {
"start": 5824,
"end": 8862
} | class ____(EventEmitter):
"""
FSEvents based event emitter. Handles conversion of native events.
"""
def __init__(self, event_queue, watch, timeout=DEFAULT_EMITTER_TIMEOUT):
EventEmitter.__init__(self, event_queue, watch, timeout)
self._fsevents = FSEventsQueue(watch.path)
self._fsevents.start()
def on_thread_stop(self):
self._fsevents.stop()
def queue_events(self, timeout):
events = self._fsevents.read_events()
if events is None:
return
i = 0
while i < len(events):
event = events[i]
# For some reason the create and remove flags are sometimes also
# set for rename and modify type events, so let those take
# precedence.
if event.is_renamed:
# Internal moves appears to always be consecutive in the same
# buffer and have IDs differ by exactly one (while others
# don't) making it possible to pair up the two events coming
# from a singe move operation. (None of this is documented!)
# Otherwise, guess whether file was moved in or out.
#TODO: handle id wrapping
if (i+1 < len(events) and events[i+1].is_renamed and
events[i+1].event_id == event.event_id + 1):
cls = DirMovedEvent if event.is_directory else FileMovedEvent
self.queue_event(cls(event.path, events[i+1].path))
self.queue_event(DirModifiedEvent(os.path.dirname(event.path)))
self.queue_event(DirModifiedEvent(os.path.dirname(events[i+1].path)))
i += 1
elif os.path.exists(event.path):
cls = DirCreatedEvent if event.is_directory else FileCreatedEvent
self.queue_event(cls(event.path))
self.queue_event(DirModifiedEvent(os.path.dirname(event.path)))
else:
cls = DirDeletedEvent if event.is_directory else FileDeletedEvent
self.queue_event(cls(event.path))
self.queue_event(DirModifiedEvent(os.path.dirname(event.path)))
#TODO: generate events for tree
elif event.is_modified or event.is_inode_meta_mod or event.is_xattr_mod :
cls = DirModifiedEvent if event.is_directory else FileModifiedEvent
self.queue_event(cls(event.path))
elif event.is_created:
cls = DirCreatedEvent if event.is_directory else FileCreatedEvent
self.queue_event(cls(event.path))
self.queue_event(DirModifiedEvent(os.path.dirname(event.path)))
elif event.is_removed:
cls = DirDeletedEvent if event.is_directory else FileDeletedEvent
self.queue_event(cls(event.path))
self.queue_event(DirModifiedEvent(os.path.dirname(event.path)))
i += 1
| FSEventsEmitter |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/links/alloy_db.py | {
"start": 1310,
"end": 1514
} | class ____(BaseGoogleLink):
"""Helper class for constructing AlloyDB cluster Link."""
name = "AlloyDB Cluster"
key = "alloy_db_cluster"
format_str = ALLOY_DB_CLUSTER_LINK
| AlloyDBClusterLink |
python | lepture__authlib | authlib/oidc/core/errors.py | {
"start": 41,
"end": 514
} | class ____(OAuth2Error):
"""The Authorization Server requires End-User interaction of some form
to proceed. This error MAY be returned when the prompt parameter value
in the Authentication Request is none, but the Authentication Request
cannot be completed without displaying a user interface for End-User
interaction.
http://openid.net/specs/openid-connect-core-1_0.html#AuthError
"""
error = "interaction_required"
| InteractionRequiredError |
python | kubernetes-client__python | kubernetes/client/models/v1_resource_quota_list.py | {
"start": 383,
"end": 7128
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'items': 'list[V1ResourceQuota]',
'kind': 'str',
'metadata': 'V1ListMeta'
}
attribute_map = {
'api_version': 'apiVersion',
'items': 'items',
'kind': 'kind',
'metadata': 'metadata'
}
def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
"""V1ResourceQuotaList - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._items = None
self._kind = None
self._metadata = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
self.items = items
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
@property
def api_version(self):
"""Gets the api_version of this V1ResourceQuotaList. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1ResourceQuotaList. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1ResourceQuotaList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1ResourceQuotaList. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def items(self):
"""Gets the items of this V1ResourceQuotaList. # noqa: E501
Items is a list of ResourceQuota objects. More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/ # noqa: E501
:return: The items of this V1ResourceQuotaList. # noqa: E501
:rtype: list[V1ResourceQuota]
"""
return self._items
@items.setter
def items(self, items):
"""Sets the items of this V1ResourceQuotaList.
Items is a list of ResourceQuota objects. More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/ # noqa: E501
:param items: The items of this V1ResourceQuotaList. # noqa: E501
:type: list[V1ResourceQuota]
"""
if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
self._items = items
@property
def kind(self):
"""Gets the kind of this V1ResourceQuotaList. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1ResourceQuotaList. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1ResourceQuotaList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1ResourceQuotaList. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1ResourceQuotaList. # noqa: E501
:return: The metadata of this V1ResourceQuotaList. # noqa: E501
:rtype: V1ListMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1ResourceQuotaList.
:param metadata: The metadata of this V1ResourceQuotaList. # noqa: E501
:type: V1ListMeta
"""
self._metadata = metadata
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1ResourceQuotaList):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1ResourceQuotaList):
return True
return self.to_dict() != other.to_dict()
| V1ResourceQuotaList |
python | google__jax | tests/pallas/pipelining/schedule_api_test.py | {
"start": 1348,
"end": 3997
} | class ____(absltest.TestCase):
def setUp(self):
super().setUp()
if not jtu.test_device_matches(["cpu"]):
self.skipTest("Only works on CPU")
def test_basic_pipeline(self):
# Use reads/writes to mimic the Ref effects of DMAs.
copy_in = schedule_api.AsyncStage(max_in_flight=2)
@copy_in.def_start
def copy_in_start(_, x_ref, o_ref):
del o_ref
# dma_start creates a write_effect to x_ref
x_ref[...] = jnp.ones_like(x_ref)
@copy_in.def_end
def copy_in_end(_, x_ref, o_ref):
del o_ref
# dma_end creates a write_effect to x_ref
x_ref[...] = jnp.ones_like(x_ref)
@schedule_api.stage(max_in_flight=2)
def kernel_body(_, x_ref, o_ref):
o_ref[...] = x_ref[...] + 1.0
copy_out = schedule_api.AsyncStage(max_in_flight=2)
@copy_out.def_start
def copy_out_start(_, x_ref, o_ref):
del x_ref
# dma_start creates a read_effect to o_ref
_ = o_ref[...]
@copy_out.def_end
def copy_out_end(_, x_ref, o_ref):
del x_ref
# dma_end creates a read_effect to o_ref
_ = o_ref[...]
pipeline = schedule_api.schedule_pipeline(
stages=(copy_in, kernel_body, copy_out),
grid=(4,),
args=(
MemoryRef(shape=(128, 128), dtype=jnp.dtype(jnp.float32),
memory_space="VMEM"),
MemoryRef(shape=(128, 128), dtype=jnp.dtype(jnp.float32),
memory_space="VMEM"),
),
eval_fn=test_util.print_stage,
)
ref = jnp.ones((128, 128), jnp.float32)
ref = jax.new_ref(ref)
with jtu.capture_stdout() as stdout:
pipeline(ref, ref)
output = stdout().strip().split("\n")
expected = [
# step
"[itr=0] copy_in_start",
"[itr=1] copy_in_start",
# step
"[itr=0] copy_in_end",
"[itr=0] kernel_body",
"[itr=0] copy_out_start",
"[itr=2] copy_in_start",
# step
"[itr=1] copy_in_end",
"[itr=1] kernel_body",
"[itr=1] copy_out_start",
"[itr=3] copy_in_start",
# step
test_util.AnyOrder([
"[itr=0] copy_out_end",
"[itr=2] copy_in_end"]),
"[itr=2] kernel_body",
"[itr=2] copy_out_start",
# step
test_util.AnyOrder([
"[itr=1] copy_out_end",
"[itr=3] copy_in_end"]),
"[itr=3] kernel_body",
"[itr=3] copy_out_start",
# step
"[itr=2] copy_out_end",
"[itr=3] copy_out_end",
]
self.assertTrue(test_util.compare_lists(output, expected))
if __name__ == "__main__":
absltest.main()
| ApiTest |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 422391,
"end": 424669
} | class ____(sgqlc.types.Interface):
"""Represents an author of discussions in repositories."""
__schema__ = github_schema
__field_names__ = ("repository_discussions",)
repository_discussions = sgqlc.types.Field(
sgqlc.types.non_null("DiscussionConnection"),
graphql_name="repositoryDiscussions",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
(
"order_by",
sgqlc.types.Arg(DiscussionOrder, graphql_name="orderBy", default={"field": "CREATED_AT", "direction": "DESC"}),
),
("repository_id", sgqlc.types.Arg(ID, graphql_name="repositoryId", default=None)),
("answered", sgqlc.types.Arg(Boolean, graphql_name="answered", default=None)),
("states", sgqlc.types.Arg(sgqlc.types.list_of(sgqlc.types.non_null(DiscussionState)), graphql_name="states", default=())),
)
),
)
"""Discussions this user has started.
Arguments:
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
* `order_by` (`DiscussionOrder`): Ordering options for discussions
returned from the connection. (default: `{field: CREATED_AT,
direction: DESC}`)
* `repository_id` (`ID`): Filter discussions to only those in a
specific repository.
* `answered` (`Boolean`): Filter discussions to only those that
have been answered or not. Defaults to including both answered
and unanswered discussions. (default: `null`)
* `states` (`[DiscussionState!]`): A list of states to filter the
discussions by. (default: `[]`)
"""
| RepositoryDiscussionAuthor |
python | huggingface__transformers | src/transformers/models/siglip/modeling_siglip.py | {
"start": 36291,
"end": 39759
} | class ____(SiglipPreTrainedModel):
main_input_name = "pixel_values"
input_modalities = ("image",)
def __init__(self, config: SiglipConfig) -> None:
super().__init__(config)
self.num_labels = config.num_labels
# Create the vision model with proper attention
# and take only vision_model submodule (for backward compatibility)
vision_model = SiglipVisionModel._from_config(config.vision_config)
self.vision_model = vision_model.vision_model
# Classifier head
self.classifier = (
nn.Linear(config.vision_config.hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@check_model_inputs()
@auto_docstring
def forward(
self,
pixel_values: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
interpolate_pos_encoding: bool = False,
**kwargs: Unpack[TransformersKwargs],
) -> ImageClassifierOutput:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
Examples:
```python
>>> from transformers import AutoImageProcessor, SiglipForImageClassification
>>> import torch
>>> from PIL import Image
>>> import requests
>>> torch.manual_seed(3) # doctest: +IGNORE_RESULT
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> # note: we are loading a `SiglipModel` from the hub here,
>>> # so the head will be randomly initialized, hence the predictions will be random if seed is not set above.
>>> image_processor = AutoImageProcessor.from_pretrained("google/siglip-base-patch16-224")
>>> model = SiglipForImageClassification.from_pretrained("google/siglip-base-patch16-224")
>>> inputs = image_processor(images=image, return_tensors="pt")
>>> outputs = model(**inputs)
>>> logits = outputs.logits
>>> # model predicts one of the two classes
>>> predicted_class_idx = logits.argmax(-1).item()
>>> print("Predicted class:", model.config.id2label[predicted_class_idx])
Predicted class: LABEL_1
```"""
outputs: BaseModelOutputWithPooling = self.vision_model(
pixel_values,
interpolate_pos_encoding=interpolate_pos_encoding,
**kwargs,
)
sequence_output = outputs.last_hidden_state
# average pool the patch tokens
sequence_output = torch.mean(sequence_output, dim=1)
# apply classifier
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss = self.loss_function(labels, logits, self.config)
return ImageClassifierOutput(
loss=loss,
logits=logits,
)
__all__ = [
"SiglipModel",
"SiglipPreTrainedModel",
"SiglipTextModel",
"SiglipVisionModel",
"SiglipForImageClassification",
]
| SiglipForImageClassification |
python | apache__airflow | providers/google/tests/unit/google/cloud/hooks/test_os_login.py | {
"start": 4261,
"end": 5687
} | class ____:
def setup_method(self):
with mock.patch(
"airflow.providers.google.cloud.hooks.os_login.OSLoginHook.__init__",
new=mock_base_gcp_hook_no_default_project_id,
):
self.hook = OSLoginHook(gcp_conn_id="test")
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, TEST_PROJECT_ID_2),
)
@mock.patch("airflow.providers.google.cloud.hooks.os_login.OSLoginHook.get_conn")
def test_import_ssh_public_key(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
self.hook.import_ssh_public_key(
user=TEST_USER,
ssh_public_key=TEST_BODY,
project_id=TEST_PROJECT_ID,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
mock_get_conn.return_value.import_ssh_public_key.assert_called_once_with(
request=dict(parent=TEST_PARENT, ssh_public_key=TEST_BODY, project_id=TEST_PROJECT_ID),
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
TEST_MESSAGE = re.escape(
"The project id must be passed either as keyword project_id parameter or as project_id extra in "
"Google Cloud connection definition. Both are not set!"
)
| TestOSLoginHookWithoutDefaultProjectIdHook |
python | getsentry__sentry | tests/sentry/integrations/jira/utils/test_create_issue_schema_transformers.py | {
"start": 469,
"end": 7611
} | class ____(TestCase):
def setUp(self) -> None:
# TODO(Gabe): Add an interface for the Jira client to share among the different impls
self.client: Any = StubJiraApiClient()
def test_transform_with_empty_fields_set(self) -> None:
transformed_data = transform_fields(
self.client.user_id_field(),
[],
**{"field1": "abcd", "field2": "1234", "field3": "foobar"},
)
assert transformed_data == {}
def create_standard_field(
self,
name: str,
schema_type: JiraSchemaTypes,
is_array: bool = False,
key: str | None = None,
required: bool = False,
) -> JiraField:
if is_array:
jira_schema = JiraSchema(
schema_type=JiraSchemaTypes.array,
items=schema_type,
)
else:
jira_schema = JiraSchema(
schema_type=schema_type,
)
return JiraField(
name=name,
key=key or name,
operations=[],
has_default_value=False,
required=required,
schema=jira_schema,
)
def test_multi_user_array(self) -> None:
field = self.create_standard_field(
name="Foo Bar", key="foobar", schema_type=JiraSchemaTypes.user, is_array=True
)
transformed_data = transform_fields(
self.client.user_id_field(), jira_fields=[field], **{"foobar": "abcd"}
)
assert transformed_data == {"foobar": [{"accountId": "abcd"}]}
transformed_data = transform_fields(
self.client.user_id_field(), jira_fields=[field], **{"foobar": ["abcd", "efgh"]}
)
assert transformed_data == {"foobar": [{"accountId": "abcd"}, {"accountId": "efgh"}]}
def test_transform_single_user(self) -> None:
field = self.create_standard_field(schema_type=JiraSchemaTypes.user, name="barfoo")
transformed_data = transform_fields(
self.client.user_id_field(), jira_fields=[field], **{"barfoo": "abcd"}
)
assert transformed_data == {"barfoo": {"accountId": "abcd"}}
def test_transform_number_field(self) -> None:
field = self.create_standard_field(schema_type=JiraSchemaTypes.number, name="num_field")
with pytest.raises(IntegrationFormError) as exc:
transform_fields(
self.client.user_id_field(), jira_fields=[field], **{"num_field": "abcd"}
)
assert exc.value.field_errors == {
"num_field": "Invalid number value provided for field: 'abcd'"
}
transformed_data = transform_fields(
self.client.user_id_field(), jira_fields=[field], **{"num_field": "1.5"}
)
assert transformed_data == {"num_field": 1.5}
transformed_data = transform_fields(
self.client.user_id_field(), jira_fields=[field], **{"num_field": "5"}
)
assert transformed_data == {"num_field": 5}
def test_transform_issue_type_field(self) -> None:
field = self.create_standard_field(name="issue", schema_type=JiraSchemaTypes.issue_type)
transformed_data = transform_fields(
self.client.user_id_field(), jira_fields=[field], **{"issue": "abcd"}
)
assert transformed_data == {"issue": {"id": "abcd"}}
def test_transform_option_field(self) -> None:
field = self.create_standard_field(name="option_thing", schema_type=JiraSchemaTypes.option)
transformed_data = transform_fields(
self.client.user_id_field(),
jira_fields=[field],
**{"option_thing": "abcd"},
)
assert transformed_data == {"option_thing": {"value": "abcd"}}
def test_transform_issue_link_field(self) -> None:
field = self.create_standard_field(name="link", schema_type=JiraSchemaTypes.issue_link)
transformed_data = transform_fields(
self.client.user_id_field(),
jira_fields=[field],
**{"link": "abcd"},
)
assert transformed_data == {"link": {"key": "abcd"}}
def test_transform_project_field(self) -> None:
field = self.create_standard_field(name="project", schema_type=JiraSchemaTypes.project)
transformed_data = transform_fields(
self.client.user_id_field(),
jira_fields=[field],
**{"project": "abcd"},
)
assert transformed_data == {"project": {"id": "abcd"}}
def test_sprint_custom_field(self) -> None:
sprint_field = JiraField(
schema=JiraSchema(
custom_id=1001,
custom=JIRA_CUSTOM_FIELD_TYPES["sprint"],
schema_type=JiraSchemaTypes.array,
items=JiraSchemaTypes.json,
),
name="sprint",
key="sprint",
required=False,
has_default_value=False,
operations=[],
)
transformed_data = transform_fields(
self.client.user_id_field(),
jira_fields=[sprint_field],
**{"sprint": 2},
)
assert transformed_data == {"sprint": 2}
def test_version_custom_field(self) -> None:
version_field = JiraField(
schema=JiraSchema(
schema_type=JiraSchemaTypes.version,
),
name="fixVersion",
key="fixVersion",
required=False,
has_default_value=False,
operations=[],
)
transformed_data = transform_fields(
self.client.user_id_field(),
jira_fields=[version_field],
**{"fixVersion": 2},
)
assert transformed_data == {"fixVersion": {"id": 2}}
transformed_data = transform_fields(
self.client.user_id_field(),
jira_fields=[version_field],
**{"fixVersion": ""},
)
assert transformed_data == {}
transformed_data = transform_fields(
self.client.user_id_field(),
jira_fields=[version_field],
**{"fixVersion": 0},
)
assert transformed_data == {"fixVersion": {"id": 0}}
def test_title_field(self) -> None:
field = self.create_standard_field(name="summary", schema_type=JiraSchemaTypes.string)
transformed_data = transform_fields(
self.client.user_id_field(), jira_fields=[field], **{"title": "a" * 512}
)
assert transformed_data == {"summary": "a" * 255}
transformed_data = transform_fields(
self.client.user_id_field(), jira_fields=[field], **{"title": "Test Title"}
)
assert transformed_data == {"summary": "Test Title"}
def test_field_capitalization(self) -> None:
field = self.create_standard_field(name="issuetype", schema_type=JiraSchemaTypes.issue_type)
transformed_data = transform_fields(
self.client.user_id_field(), jira_fields=[field], **{"issueType": "1122"}
)
assert transformed_data == {"issuetype": {"id": "1122"}}
| TestDataTransformer |
python | tensorflow__tensorflow | tensorflow/python/data/experimental/kernel_tests/service/coordinated_read_ft_test.py | {
"start": 1128,
"end": 4585
} | class ____(data_service_test_base.TestBase,
parameterized.TestCase):
@combinations.generate(
combinations.times(test_base.eager_only_combinations(),
combinations.combine(workers_to_add=[1, 3, 10])))
def testAddWorkers(self, workers_to_add):
starting_workers = 3
cluster = data_service_test_base.TestCluster(num_workers=starting_workers)
num_consumers = 7
ds = self.make_coordinated_read_dataset(cluster, num_consumers)
get_next = self.getNext(ds, requires_initialization=True)
results = []
zeros_seen = 0
for _ in range(25):
results.append(self.evaluate(get_next()))
if results[-1] == 0:
zeros_seen += 1
for _ in range(workers_to_add):
cluster.add_worker()
# Read until all new workers have joined.
while zeros_seen < starting_workers + workers_to_add:
results.append(self.evaluate(get_next()))
if results[-1] == 0:
zeros_seen += 1
# Read some more.
for _ in range(25):
results.append(self.evaluate(get_next()))
self.checkCoordinatedReadGroups(results, num_consumers)
cluster.stop_workers()
@combinations.generate(test_base.eager_only_combinations())
def testRestartWorker(self):
num_workers = 3
# Set a shutdown quiet period to prevent workers from shutting down partway
# through a round.
cluster = data_service_test_base.TestCluster(
num_workers, worker_shutdown_quiet_period_ms=2000)
num_consumers = 5
ds = self.make_coordinated_read_dataset(cluster, num_consumers)
get_next = self.getNext(ds, requires_initialization=True)
results = []
self.read(get_next, results, 20)
cluster.workers[1].stop()
# Check that we can continue to read even with a worker stopped.
self.read(get_next, results, 20)
cluster.workers[1].restart()
# Read until we get results from the restarted worker, then read some more.
while results[-1] != 0:
results.append(self.evaluate(get_next()))
self.read(get_next, results, 20)
self.checkCoordinatedReadGroups(results, num_consumers)
cluster.stop_workers()
@combinations.generate(
combinations.times(
test_base.eager_only_combinations(),
combinations.combine(sharding_policy=[
data_service_ops.ShardingPolicy.OFF,
data_service_ops.ShardingPolicy.DYNAMIC
])))
def testMultiStartStop(self, sharding_policy):
num_workers = 3
# Set a shutdown quiet period to prevent workers from shutting down partway
# through a round.
cluster = data_service_test_base.TestCluster(
num_workers, worker_shutdown_quiet_period_ms=2000)
num_consumers = 5
ds = self.make_coordinated_read_dataset(cluster, num_consumers,
sharding_policy)
get_next = self.getNext(ds, requires_initialization=True)
results = []
self.read(get_next, results, 20)
for i in range(num_workers):
cluster.workers[i].stop()
self.read(get_next, results, 20)
cluster.workers[i].restart()
self.read(get_next, results, 20)
cluster.add_worker()
cluster.restart_dispatcher()
for i in range(num_workers):
cluster.workers[i].stop()
self.read(get_next, results, 20)
self.checkCoordinatedReadGroups(results, num_consumers)
cluster.stop_workers()
if __name__ == "__main__":
test.main()
| CoordinatedReadFTTest |
python | ray-project__ray | python/ray/llm/tests/serve/cpu/observability/usage_telemetry/test_usage.py | {
"start": 4941,
"end": 6357
} | class ____:
def wait_for_init(self):
"""
When this method returns, the actor initialization is guaranteed
to be complete.
This is used for synchronization between multiple replicas,
increasing the chance for get_telemetry_agent() to be called
at the same time.
"""
pass
def get_telemetry_agent(self):
return _retry_get_telemetry_agent()
def test_telemetry_race_condition():
replicas = [Replica.remote() for _ in range(30)]
init_refs = [replica.wait_for_init.remote() for replica in replicas]
ray.get(init_refs)
get_refs = [replica.get_telemetry_agent.remote() for replica in replicas]
telemetry_agents = ray.get(get_refs)
for telemetry_agent in telemetry_agents:
assert telemetry_agent is not None
assert len(set(telemetry_agents)) == 1
def test_infer_gpu_from_hardware():
# Test with a valid GPU type
def fake_get_gpu_type(*args, **kwargs):
return ["Intel Xeon", "A10G"]
result = HardwareUsage(fake_get_gpu_type).infer_gpu_from_hardware()
assert result == "A10G"
# Test with an unsupported GPU type
def fake_get_gpu_type(*args, **kwargs):
return ["Intel Xeon", "G"]
result = HardwareUsage(fake_get_gpu_type).infer_gpu_from_hardware()
assert result == "UNSPECIFIED"
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
| Replica |
python | encode__django-rest-framework | tests/schemas/test_coreapi.py | {
"start": 35548,
"end": 44085
} | class ____(TestCase):
def test_apiview_schema_descriptor(self):
view = APIView()
assert hasattr(view, 'schema')
assert isinstance(view.schema, AutoSchema)
def test_set_custom_inspector_class_on_view(self):
class CustomView(APIView):
schema = CustomViewInspector()
view = CustomView()
assert isinstance(view.schema, CustomViewInspector)
def test_set_custom_inspector_class_via_settings(self):
with override_settings(REST_FRAMEWORK={'DEFAULT_SCHEMA_CLASS': 'tests.schemas.test_coreapi.CustomViewInspector'}):
view = APIView()
assert isinstance(view.schema, CustomViewInspector)
def test_get_link_requires_instance(self):
descriptor = APIView.schema # Accessed from class
with pytest.raises(AssertionError):
descriptor.get_link(None, None, None) # ???: Do the dummy arguments require a tighter assert?
@pytest.mark.skipif(not coreapi, reason='coreapi is not installed')
def test_update_fields(self):
"""
That updating fields by-name helper is correct
Recall: `update_fields(fields, update_with)`
"""
schema = AutoSchema()
fields = []
# Adds a field...
fields = schema.update_fields(fields, [
coreapi.Field(
"my_field",
required=True,
location="path",
schema=coreschema.String()
),
])
assert len(fields) == 1
assert fields[0].name == "my_field"
# Replaces a field...
fields = schema.update_fields(fields, [
coreapi.Field(
"my_field",
required=False,
location="path",
schema=coreschema.String()
),
])
assert len(fields) == 1
assert fields[0].required is False
@pytest.mark.skipif(not coreapi, reason='coreapi is not installed')
def test_get_manual_fields(self):
"""That get_manual_fields is applied during get_link"""
class CustomView(APIView):
schema = AutoSchema(manual_fields=[
coreapi.Field(
"my_extra_field",
required=True,
location="path",
schema=coreschema.String()
),
])
view = CustomView()
link = view.schema.get_link('/a/url/{id}/', 'GET', '')
fields = link.fields
assert len(fields) == 2
assert "my_extra_field" in [f.name for f in fields]
@pytest.mark.skipif(not coreapi, reason='coreapi is not installed')
def test_viewset_action_with_schema(self):
class CustomViewSet(GenericViewSet):
@action(detail=True, schema=AutoSchema(manual_fields=[
coreapi.Field(
"my_extra_field",
required=True,
location="path",
schema=coreschema.String()
),
]))
def extra_action(self, pk, **kwargs):
pass
router = SimpleRouter()
router.register(r'detail', CustomViewSet, basename='detail')
generator = SchemaGenerator()
view = generator.create_view(router.urls[0].callback, 'GET')
link = view.schema.get_link('/a/url/{id}/', 'GET', '')
fields = link.fields
assert len(fields) == 2
assert "my_extra_field" in [f.name for f in fields]
@pytest.mark.skipif(not coreapi, reason='coreapi is not installed')
def test_viewset_action_with_null_schema(self):
class CustomViewSet(GenericViewSet):
@action(detail=True, schema=None)
def extra_action(self, pk, **kwargs):
pass
router = SimpleRouter()
router.register(r'detail', CustomViewSet, basename='detail')
generator = SchemaGenerator()
view = generator.create_view(router.urls[0].callback, 'GET')
assert view.schema is None
@pytest.mark.skipif(not coreapi, reason='coreapi is not installed')
def test_view_with_manual_schema(self):
path = '/example'
method = 'get'
base_url = None
fields = [
coreapi.Field(
"first_field",
required=True,
location="path",
schema=coreschema.String()
),
coreapi.Field(
"second_field",
required=True,
location="path",
schema=coreschema.String()
),
coreapi.Field(
"third_field",
required=True,
location="path",
schema=coreschema.String()
),
]
description = "A test endpoint"
class CustomView(APIView):
"""
ManualSchema takes list of fields for endpoint.
- Provides url and action, which are always dynamic
"""
schema = ManualSchema(fields, description)
expected = coreapi.Link(
url=path,
action=method,
fields=fields,
description=description
)
view = CustomView()
link = view.schema.get_link(path, method, base_url)
assert link == expected
@unittest.skipUnless(coreschema, 'coreschema is not installed')
def test_field_to_schema(self):
label = 'Test label'
help_text = 'This is a helpful test text'
cases = [
# tuples are ([field], [expected schema])
# TODO: Add remaining cases
(
serializers.BooleanField(label=label, help_text=help_text),
coreschema.Boolean(title=label, description=help_text)
),
(
serializers.DecimalField(1000, 1000, label=label, help_text=help_text),
coreschema.Number(title=label, description=help_text)
),
(
serializers.FloatField(label=label, help_text=help_text),
coreschema.Number(title=label, description=help_text)
),
(
serializers.IntegerField(label=label, help_text=help_text),
coreschema.Integer(title=label, description=help_text)
),
(
serializers.DateField(label=label, help_text=help_text),
coreschema.String(title=label, description=help_text, format='date')
),
(
serializers.DateTimeField(label=label, help_text=help_text),
coreschema.String(title=label, description=help_text, format='date-time')
),
(
serializers.JSONField(label=label, help_text=help_text),
coreschema.Object(title=label, description=help_text)
),
]
for case in cases:
self.assertEqual(field_to_schema(case[0]), case[1])
@override_settings(REST_FRAMEWORK={'DEFAULT_SCHEMA_CLASS': 'rest_framework.schemas.AutoSchema'})
def test_docstring_is_not_stripped_by_get_description():
class ExampleDocstringAPIView(APIView):
"""
=== title
* item a
* item a-a
* item a-b
* item b
- item 1
- item 2
code block begin
code
code
code
code block end
the end
"""
def get(self, *args, **kwargs):
pass
def post(self, request, *args, **kwargs):
pass
view = ExampleDocstringAPIView()
schema = view.schema
descr = schema.get_description('example', 'get')
# the first and last character are '\n' correctly removed by get_description
assert descr == formatting.dedent(ExampleDocstringAPIView.__doc__[1:][:-1])
# Views for SchemaGenerationExclusionTests
with override_settings(REST_FRAMEWORK={'DEFAULT_SCHEMA_CLASS': 'rest_framework.schemas.AutoSchema'}):
class ExcludedAPIView(APIView):
schema = None
def get(self, request, *args, **kwargs):
pass
@api_view(['GET'])
@schema(None)
def excluded_fbv(request):
pass
@api_view(['GET'])
def included_fbv(request):
pass
@unittest.skipUnless(coreapi, 'coreapi is not installed')
@override_settings(REST_FRAMEWORK={'DEFAULT_SCHEMA_CLASS': 'rest_framework.schemas.AutoSchema'})
| TestAutoSchema |
python | kamyu104__LeetCode-Solutions | Python/linked-list-components.py | {
"start": 180,
"end": 642
} | class ____(object):
def numComponents(self, head, G):
"""
:type head: ListNode
:type G: List[int]
:rtype: int
"""
lookup = set(G)
dummy = ListNode(-1)
dummy.next = head
curr = dummy
result = 0
while curr and curr.next:
if curr.val not in lookup and curr.next.val in lookup:
result += 1
curr = curr.next
return result
| Solution |
python | kubernetes-client__python | kubernetes/client/models/v1_daemon_set_condition.py | {
"start": 383,
"end": 7295
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'last_transition_time': 'datetime',
'message': 'str',
'reason': 'str',
'status': 'str',
'type': 'str'
}
attribute_map = {
'last_transition_time': 'lastTransitionTime',
'message': 'message',
'reason': 'reason',
'status': 'status',
'type': 'type'
}
def __init__(self, last_transition_time=None, message=None, reason=None, status=None, type=None, local_vars_configuration=None): # noqa: E501
"""V1DaemonSetCondition - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._last_transition_time = None
self._message = None
self._reason = None
self._status = None
self._type = None
self.discriminator = None
if last_transition_time is not None:
self.last_transition_time = last_transition_time
if message is not None:
self.message = message
if reason is not None:
self.reason = reason
self.status = status
self.type = type
@property
def last_transition_time(self):
"""Gets the last_transition_time of this V1DaemonSetCondition. # noqa: E501
Last time the condition transitioned from one status to another. # noqa: E501
:return: The last_transition_time of this V1DaemonSetCondition. # noqa: E501
:rtype: datetime
"""
return self._last_transition_time
@last_transition_time.setter
def last_transition_time(self, last_transition_time):
"""Sets the last_transition_time of this V1DaemonSetCondition.
Last time the condition transitioned from one status to another. # noqa: E501
:param last_transition_time: The last_transition_time of this V1DaemonSetCondition. # noqa: E501
:type: datetime
"""
self._last_transition_time = last_transition_time
@property
def message(self):
"""Gets the message of this V1DaemonSetCondition. # noqa: E501
A human readable message indicating details about the transition. # noqa: E501
:return: The message of this V1DaemonSetCondition. # noqa: E501
:rtype: str
"""
return self._message
@message.setter
def message(self, message):
"""Sets the message of this V1DaemonSetCondition.
A human readable message indicating details about the transition. # noqa: E501
:param message: The message of this V1DaemonSetCondition. # noqa: E501
:type: str
"""
self._message = message
@property
def reason(self):
"""Gets the reason of this V1DaemonSetCondition. # noqa: E501
The reason for the condition's last transition. # noqa: E501
:return: The reason of this V1DaemonSetCondition. # noqa: E501
:rtype: str
"""
return self._reason
@reason.setter
def reason(self, reason):
"""Sets the reason of this V1DaemonSetCondition.
The reason for the condition's last transition. # noqa: E501
:param reason: The reason of this V1DaemonSetCondition. # noqa: E501
:type: str
"""
self._reason = reason
@property
def status(self):
"""Gets the status of this V1DaemonSetCondition. # noqa: E501
Status of the condition, one of True, False, Unknown. # noqa: E501
:return: The status of this V1DaemonSetCondition. # noqa: E501
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this V1DaemonSetCondition.
Status of the condition, one of True, False, Unknown. # noqa: E501
:param status: The status of this V1DaemonSetCondition. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and status is None: # noqa: E501
raise ValueError("Invalid value for `status`, must not be `None`") # noqa: E501
self._status = status
@property
def type(self):
"""Gets the type of this V1DaemonSetCondition. # noqa: E501
Type of DaemonSet condition. # noqa: E501
:return: The type of this V1DaemonSetCondition. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this V1DaemonSetCondition.
Type of DaemonSet condition. # noqa: E501
:param type: The type of this V1DaemonSetCondition. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501
raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
self._type = type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1DaemonSetCondition):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1DaemonSetCondition):
return True
return self.to_dict() != other.to_dict()
| V1DaemonSetCondition |
python | astropy__astropy | astropy/io/ascii/mrt.py | {
"start": 27317,
"end": 28775
} | class ____(core.BaseReader):
"""AAS MRT (Machine-Readable Table) format table.
**Reading**
::
>>> from astropy.io import ascii
>>> table = ascii.read('data.mrt', format='mrt')
**Writing**
Use ``ascii.write(table, 'data.mrt', format='mrt')`` to write tables to
Machine Readable Table (MRT) format.
Note that the metadata of the table, apart from units, column names and
description, will not be written. These have to be filled in by hand later.
See also: :ref:`cds_mrt_format`.
Caveats:
* The Units and Explanations are available in the column ``unit`` and
``description`` attributes, respectively.
* The other metadata defined by this format is not available in the output table.
"""
_format_name = "mrt"
_io_registry_format_aliases = ["mrt"]
_io_registry_can_write = True
_description = "MRT format table"
data_class = MrtData
header_class = MrtHeader
def write(self, table=None):
# Construct for writing empty table is not yet done.
if len(table) == 0:
raise NotImplementedError
self.data.header = self.header
self.header.position_line = None
self.header.start_line = None
# Create a copy of the ``table``, so that it the copy gets modified and
# written to the file, while the original table remains as it is.
table = table.copy()
return super().write(table)
| Mrt |
python | walkccc__LeetCode | solutions/69. Sqrt(x)/69.py | {
"start": 0,
"end": 155
} | class ____:
def mySqrt(self, x: int) -> int:
return bisect.bisect_right(range(x + 1), x,
key=lambda m: m * m) - 1
| Solution |
python | doocs__leetcode | solution/1000-1099/1088.Confusing Number II/Solution.py | {
"start": 0,
"end": 689
} | class ____:
def confusingNumberII(self, n: int) -> int:
def check(x: int) -> bool:
y, t = 0, x
while t:
t, v = divmod(t, 10)
y = y * 10 + d[v]
return x != y
def dfs(pos: int, limit: bool, x: int) -> int:
if pos >= len(s):
return int(check(x))
up = int(s[pos]) if limit else 9
ans = 0
for i in range(up + 1):
if d[i] != -1:
ans += dfs(pos + 1, limit and i == up, x * 10 + i)
return ans
d = [0, 1, -1, -1, -1, -1, 9, -1, 8, 6]
s = str(n)
return dfs(0, True, 0)
| Solution |
python | doocs__leetcode | solution/2400-2499/2471.Minimum Number of Operations to Sort a Binary Tree by Level/Solution.py | {
"start": 192,
"end": 1050
} | class ____:
def minimumOperations(self, root: Optional[TreeNode]) -> int:
def swap(arr, i, j):
arr[i], arr[j] = arr[j], arr[i]
def f(t):
n = len(t)
m = {v: i for i, v in enumerate(sorted(t))}
for i in range(n):
t[i] = m[t[i]]
ans = 0
for i in range(n):
while t[i] != i:
swap(t, i, t[i])
ans += 1
return ans
q = deque([root])
ans = 0
while q:
t = []
for _ in range(len(q)):
node = q.popleft()
t.append(node.val)
if node.left:
q.append(node.left)
if node.right:
q.append(node.right)
ans += f(t)
return ans
| Solution |
python | sqlalchemy__sqlalchemy | test/sql/test_types.py | {
"start": 31306,
"end": 34988
} | class ____(AssertsCompiledSQL, fixtures.TestBase):
__sparse_driver_backend__ = True
@testing.requires.array_type
def test_typedec_of_array_modified(self, metadata, connection):
"""test #7249"""
class SkipsFirst(TypeDecorator): # , Indexable):
impl = ARRAY(Integer, zero_indexes=True)
cache_ok = True
def process_bind_param(self, value, dialect):
return value[1:]
def copy(self, **kw):
return SkipsFirst(**kw)
def coerce_compared_value(self, op, value):
return self.impl.coerce_compared_value(op, value)
t = Table(
"t",
metadata,
Column("id", Integer, primary_key=True),
Column("data", SkipsFirst),
)
t.create(connection)
connection.execute(t.insert(), {"data": [1, 2, 3]})
val = connection.scalar(select(t.c.data))
eq_(val, [2, 3])
val = connection.scalar(select(t.c.data[0]))
eq_(val, 2)
def test_typedec_of_array_ops(self):
class ArrayDec(TypeDecorator):
impl = ARRAY(Integer, zero_indexes=True)
cache_ok = True
def coerce_compared_value(self, op, value):
return self.impl.coerce_compared_value(op, value)
expr1 = column("q", ArrayDec)[0]
expr2 = column("q", ARRAY(Integer, zero_indexes=True))[0]
eq_(expr1.right.type._type_affinity, Integer)
eq_(expr2.right.type._type_affinity, Integer)
self.assert_compile(
7 < column("q", ArrayDec).any_(),
"%(param_1)s < ANY (q)",
dialect="postgresql",
)
self.assert_compile(
column("q", ArrayDec)[5], "q[%(q_1)s]", dialect="postgresql"
)
def test_typedec_of_json_ops(self):
class JsonDec(TypeDecorator):
impl = JSON()
cache_ok = True
self.assert_compile(
column("q", JsonDec)["q"], "q -> %(q_1)s", dialect="postgresql"
)
self.assert_compile(
column("q", JsonDec)["q"].as_integer(),
"CAST(q ->> %(q_1)s AS INTEGER)",
dialect="postgresql",
)
@testing.requires.array_type
def test_typedec_of_array(self, metadata, connection):
"""test #7249"""
class ArrayDec(TypeDecorator):
impl = ARRAY(Integer, zero_indexes=True)
cache_ok = True
def coerce_compared_value(self, op, value):
return self.impl.coerce_compared_value(op, value)
t = Table(
"t",
metadata,
Column("id", Integer, primary_key=True),
Column("data", ArrayDec),
)
t.create(connection)
connection.execute(t.insert(), {"data": [1, 2, 3]})
val = connection.scalar(select(t.c.data))
eq_(val, [1, 2, 3])
val = connection.scalar(select(t.c.data[0]))
eq_(val, 1)
@testing.requires.json_type
def test_typedec_of_json(self, metadata, connection):
"""test #7249"""
class JsonDec(TypeDecorator):
impl = JSON()
cache_ok = True
t = Table(
"t",
metadata,
Column("id", Integer, primary_key=True),
Column("data", JsonDec),
)
t.create(connection)
connection.execute(t.insert(), {"data": {"key": "value"}})
val = connection.scalar(select(t.c.data))
eq_(val, {"key": "value"})
val = connection.scalar(select(t.c.data["key"].as_string()))
eq_(val, "value")
| TypeDecoratorSpecialCasesTest |
python | scipy__scipy | scipy/cluster/tests/test_hierarchy.py | {
"start": 37616,
"end": 50225
} | class ____:
def test_dendrogram_single_linkage_tdist(self, xp):
# Tests dendrogram calculation on single linkage of the tdist data set.
Z = xp.asarray(linkage(hierarchy_test_data.ytdist, 'single'))
R = dendrogram(Z, no_plot=True)
leaves = R["leaves"]
assert_equal(leaves, [2, 5, 1, 0, 3, 4])
def test_valid_orientation(self, xp):
Z = xp.asarray(linkage(hierarchy_test_data.ytdist, 'single'))
assert_raises(ValueError, dendrogram, Z, orientation="foo")
def test_labels_as_array_or_list(self, xp):
# test for gh-12418
Z = xp.asarray(linkage(hierarchy_test_data.ytdist, 'single'))
labels = [1, 3, 2, 6, 4, 5]
result1 = dendrogram(Z, labels=xp.asarray(labels), no_plot=True)
result2 = dendrogram(Z, labels=labels, no_plot=True)
assert result1 == result2
@pytest.mark.skipif(not have_matplotlib, reason="no matplotlib")
def test_valid_label_size(self, xp):
link = xp.asarray([
[0, 1, 1.0, 4],
[2, 3, 1.0, 5],
[4, 5, 2.0, 6],
])
plt.figure()
with pytest.raises(ValueError) as exc_info:
dendrogram(link, labels=list(range(100)))
assert "Dimensions of Z and labels must be consistent."\
in str(exc_info.value)
with pytest.raises(
ValueError,
match="Dimensions of Z and labels must be consistent."):
dendrogram(link, labels=[])
plt.close()
@skip_xp_backends('torch',
reason='MPL 3.9.2 & torch DeprecationWarning from __array_wrap__'
' and NumPy 2.0'
)
@skip_xp_backends('dask.array',
reason='dask.array has bad interaction with matplotlib'
)
@pytest.mark.skipif(not have_matplotlib, reason="no matplotlib")
@pytest.mark.parametrize("orientation", ['top', 'bottom', 'left', 'right'])
def test_dendrogram_plot(self, orientation, xp):
# Tests dendrogram plotting.
Z = xp.asarray(linkage(hierarchy_test_data.ytdist, 'single'))
expected = {'color_list': ['C1', 'C0', 'C0', 'C0', 'C0'],
'dcoord': [[0.0, 138.0, 138.0, 0.0],
[0.0, 219.0, 219.0, 0.0],
[0.0, 255.0, 255.0, 219.0],
[0.0, 268.0, 268.0, 255.0],
[138.0, 295.0, 295.0, 268.0]],
'icoord': [[5.0, 5.0, 15.0, 15.0],
[45.0, 45.0, 55.0, 55.0],
[35.0, 35.0, 50.0, 50.0],
[25.0, 25.0, 42.5, 42.5],
[10.0, 10.0, 33.75, 33.75]],
'ivl': ['2', '5', '1', '0', '3', '4'],
'leaves': [2, 5, 1, 0, 3, 4],
'leaves_color_list': ['C1', 'C1', 'C0', 'C0', 'C0', 'C0'],
}
fig = plt.figure()
ax = fig.add_subplot(221)
# test that dendrogram accepts ax keyword
R1 = dendrogram(Z, ax=ax, orientation=orientation)
R1['dcoord'] = np.asarray(R1['dcoord'])
assert_equal(R1, expected)
# test that dendrogram accepts and handle the leaf_font_size and
# leaf_rotation keywords
dendrogram(Z, ax=ax, orientation=orientation,
leaf_font_size=20, leaf_rotation=90)
testlabel = (
ax.get_xticklabels()[0]
if orientation in ['top', 'bottom']
else ax.get_yticklabels()[0]
)
assert_equal(testlabel.get_rotation(), 90)
assert_equal(testlabel.get_size(), 20)
dendrogram(Z, ax=ax, orientation=orientation,
leaf_rotation=90)
testlabel = (
ax.get_xticklabels()[0]
if orientation in ['top', 'bottom']
else ax.get_yticklabels()[0]
)
assert_equal(testlabel.get_rotation(), 90)
dendrogram(Z, ax=ax, orientation=orientation,
leaf_font_size=20)
testlabel = (
ax.get_xticklabels()[0]
if orientation in ['top', 'bottom']
else ax.get_yticklabels()[0]
)
assert_equal(testlabel.get_size(), 20)
plt.close()
# test plotting to gca (will import pylab)
R2 = dendrogram(Z, orientation=orientation)
plt.close()
R2['dcoord'] = np.asarray(R2['dcoord'])
assert_equal(R2, expected)
@skip_xp_backends('torch',
reason='MPL 3.9.2 & torch DeprecationWarning from __array_wrap__'
' and NumPy 2.0'
)
@skip_xp_backends('dask.array',
reason='dask.array has bad interaction with matplotlib'
)
@pytest.mark.skipif(not have_matplotlib, reason="no matplotlib")
def test_dendrogram_truncate_mode(self, xp):
Z = xp.asarray(linkage(hierarchy_test_data.ytdist, 'single'))
R = dendrogram(Z, 2, 'lastp', show_contracted=True)
plt.close()
R['dcoord'] = np.asarray(R['dcoord'])
assert_equal(R, {'color_list': ['C0'],
'dcoord': [[0.0, 295.0, 295.0, 0.0]],
'icoord': [[5.0, 5.0, 15.0, 15.0]],
'ivl': ['(2)', '(4)'],
'leaves': [6, 9],
'leaves_color_list': ['C0', 'C0'],
})
R = dendrogram(Z, 2, 'mtica', show_contracted=True)
plt.close()
R['dcoord'] = np.asarray(R['dcoord'])
assert_equal(R, {'color_list': ['C1', 'C0', 'C0', 'C0'],
'dcoord': [[0.0, 138.0, 138.0, 0.0],
[0.0, 255.0, 255.0, 0.0],
[0.0, 268.0, 268.0, 255.0],
[138.0, 295.0, 295.0, 268.0]],
'icoord': [[5.0, 5.0, 15.0, 15.0],
[35.0, 35.0, 45.0, 45.0],
[25.0, 25.0, 40.0, 40.0],
[10.0, 10.0, 32.5, 32.5]],
'ivl': ['2', '5', '1', '0', '(2)'],
'leaves': [2, 5, 1, 0, 7],
'leaves_color_list': ['C1', 'C1', 'C0', 'C0', 'C0'],
})
@pytest.fixture
def dendrogram_lock(self):
return Lock()
def test_dendrogram_colors(self, xp, dendrogram_lock):
# Tests dendrogram plots with alternate colors
Z = xp.asarray(linkage(hierarchy_test_data.ytdist, 'single'))
with dendrogram_lock:
# Global color palette might be changed concurrently
set_link_color_palette(['c', 'm', 'y', 'k'])
R = dendrogram(Z, no_plot=True,
above_threshold_color='g', color_threshold=250)
set_link_color_palette(['g', 'r', 'c', 'm', 'y', 'k'])
color_list = R['color_list']
assert_equal(color_list, ['c', 'm', 'g', 'g', 'g'])
# reset color palette (global list)
set_link_color_palette(None)
def test_dendrogram_leaf_colors_zero_dist(self, xp):
# tests that the colors of leafs are correct for tree
# with two identical points
X = np.asarray([[1, 0, 0],
[0, 0, 1],
[0, 2, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 0]])
Z = xp.asarray(linkage(X, "single"))
d = dendrogram(Z, no_plot=True)
exp_colors = ['C0', 'C1', 'C1', 'C0', 'C2', 'C2']
colors = d["leaves_color_list"]
assert_equal(colors, exp_colors)
def test_dendrogram_leaf_colors(self, xp):
# tests that the colors are correct for a tree
# with two near points ((0, 0, 1.1) and (0, 0, 1))
X = np.asarray([[1, 0, 0],
[0, 0, 1.1],
[0, 2, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 0]])
Z = xp.asarray(linkage(X, "single"))
d = dendrogram(Z, no_plot=True)
exp_colors = ['C0', 'C1', 'C1', 'C0', 'C2', 'C2']
colors = d["leaves_color_list"]
assert_equal(colors, exp_colors)
def calculate_maximum_distances(Z, xp):
# Used for testing correctness of maxdists.
n = Z.shape[0] + 1
B = xp.zeros((n-1,), dtype=Z.dtype)
for i in range(0, n - 1):
q = xp.zeros((3,))
left = Z[i, 0]
right = Z[i, 1]
if left >= n:
b_left = B[xp.asarray(left, dtype=xp.int64) - n]
q = xpx.at(q, 0).set(b_left)
if right >= n:
b_right = B[xp.asarray(right, dtype=xp.int64) - n]
q = xpx.at(q, 1).set(b_right)
q = xpx.at(q, 2).set(Z[i, 2])
B = xpx.at(B, i).set(xp.max(q))
return B
def calculate_maximum_inconsistencies(Z, R, k=3, xp=np):
# Used for testing correctness of maxinconsts.
n = Z.shape[0] + 1
dtype = xp.result_type(Z, R)
B = xp.zeros((n-1,), dtype=dtype)
for i in range(0, n - 1):
q = xp.zeros((3,))
left = Z[i, 0]
right = Z[i, 1]
if left >= n:
b_left = B[xp.asarray(left, dtype=xp.int64) - n]
q = xpx.at(q, 0).set(b_left)
if right >= n:
b_right = B[xp.asarray(right, dtype=xp.int64) - n]
q = xpx.at(q, 1).set(b_right)
q = xpx.at(q, 2).set(R[i, k])
B = xpx.at(B, i).set(xp.max(q))
return B
@make_xp_test_case(to_tree)
def test_node_compare(xp):
np.random.seed(23)
nobs = 50
X = np.random.randn(nobs, 4)
Z = xp.asarray(ward(X))
tree = to_tree(Z)
assert_(tree > tree.get_left())
assert_(tree.get_right() > tree.get_left())
assert_(tree.get_right() == tree.get_right())
assert_(tree.get_right() != tree.get_left())
@make_xp_test_case(cut_tree)
def test_cut_tree(xp):
np.random.seed(23)
nobs = 50
X = np.random.randn(nobs, 4)
Z = xp.asarray(ward(X))
cutree = cut_tree(Z)
# cutree.dtype varies between int32 and int64 over platforms
xp_assert_close(cutree[:, 0], xp.arange(nobs), rtol=1e-15, check_dtype=False)
xp_assert_close(cutree[:, -1], xp.zeros(nobs), rtol=1e-15, check_dtype=False)
assert_equal(np.asarray(cutree).max(0), np.arange(nobs - 1, -1, -1))
xp_assert_close(cutree[:, [-5]], cut_tree(Z, n_clusters=5), rtol=1e-15)
xp_assert_close(cutree[:, [-5, -10]], cut_tree(Z, n_clusters=[5, 10]), rtol=1e-15)
xp_assert_close(cutree[:, [-10, -5]], cut_tree(Z, n_clusters=[10, 5]), rtol=1e-15)
nodes = _order_cluster_tree(Z)
heights = xp.asarray([node.dist for node in nodes])
xp_assert_close(cutree[:, np.searchsorted(heights, [5])],
cut_tree(Z, height=5), rtol=1e-15)
xp_assert_close(cutree[:, np.searchsorted(heights, [5, 10])],
cut_tree(Z, height=[5, 10]), rtol=1e-15)
xp_assert_close(cutree[:, np.searchsorted(heights, [10, 5])],
cut_tree(Z, height=[10, 5]), rtol=1e-15)
@make_xp_test_case(optimal_leaf_ordering)
def test_optimal_leaf_ordering(xp):
# test with the distance vector y
Z = optimal_leaf_ordering(xp.asarray(linkage(hierarchy_test_data.ytdist)),
xp.asarray(hierarchy_test_data.ytdist))
expectedZ = hierarchy_test_data.linkage_ytdist_single_olo
xp_assert_close(Z, xp.asarray(expectedZ), atol=1e-10)
# test with the observation matrix X
Z = optimal_leaf_ordering(xp.asarray(linkage(hierarchy_test_data.X, 'ward')),
xp.asarray(hierarchy_test_data.X))
expectedZ = hierarchy_test_data.linkage_X_ward_olo
xp_assert_close(Z, xp.asarray(expectedZ), atol=1e-06)
@skip_xp_backends(np_only=True, reason='`Heap` only supports NumPy backend')
def test_Heap(xp):
values = xp.asarray([2, -1, 0, -1.5, 3])
heap = Heap(values)
pair = heap.get_min()
assert_equal(pair['key'], 3)
assert_equal(pair['value'], -1.5)
heap.remove_min()
pair = heap.get_min()
assert_equal(pair['key'], 1)
assert_equal(pair['value'], -1)
heap.change_value(1, 2.5)
pair = heap.get_min()
assert_equal(pair['key'], 2)
assert_equal(pair['value'], 0)
heap.remove_min()
heap.remove_min()
heap.change_value(1, 10)
pair = heap.get_min()
assert_equal(pair['key'], 4)
assert_equal(pair['value'], 3)
heap.remove_min()
pair = heap.get_min()
assert_equal(pair['key'], 1)
assert_equal(pair['value'], 10)
| TestDendrogram |
python | mozilla__bleach | bleach/_vendor/html5lib/filters/optionaltags.py | {
"start": 89,
"end": 10588
} | class ____(base.Filter):
"""Removes optional tags from the token stream"""
def slider(self):
previous1 = previous2 = None
for token in self.source:
if previous1 is not None:
yield previous2, previous1, token
previous2 = previous1
previous1 = token
if previous1 is not None:
yield previous2, previous1, None
def __iter__(self):
for previous, token, next in self.slider():
type = token["type"]
if type == "StartTag":
if (token["data"] or
not self.is_optional_start(token["name"], previous, next)):
yield token
elif type == "EndTag":
if not self.is_optional_end(token["name"], next):
yield token
else:
yield token
def is_optional_start(self, tagname, previous, next):
type = next and next["type"] or None
if tagname in 'html':
# An html element's start tag may be omitted if the first thing
# inside the html element is not a space character or a comment.
return type not in ("Comment", "SpaceCharacters")
elif tagname == 'head':
# A head element's start tag may be omitted if the first thing
# inside the head element is an element.
# XXX: we also omit the start tag if the head element is empty
if type in ("StartTag", "EmptyTag"):
return True
elif type == "EndTag":
return next["name"] == "head"
elif tagname == 'body':
# A body element's start tag may be omitted if the first thing
# inside the body element is not a space character or a comment,
# except if the first thing inside the body element is a script
# or style element and the node immediately preceding the body
# element is a head element whose end tag has been omitted.
if type in ("Comment", "SpaceCharacters"):
return False
elif type == "StartTag":
# XXX: we do not look at the preceding event, so we never omit
# the body element's start tag if it's followed by a script or
# a style element.
return next["name"] not in ('script', 'style')
else:
return True
elif tagname == 'colgroup':
# A colgroup element's start tag may be omitted if the first thing
# inside the colgroup element is a col element, and if the element
# is not immediately preceded by another colgroup element whose
# end tag has been omitted.
if type in ("StartTag", "EmptyTag"):
# XXX: we do not look at the preceding event, so instead we never
# omit the colgroup element's end tag when it is immediately
# followed by another colgroup element. See is_optional_end.
return next["name"] == "col"
else:
return False
elif tagname == 'tbody':
# A tbody element's start tag may be omitted if the first thing
# inside the tbody element is a tr element, and if the element is
# not immediately preceded by a tbody, thead, or tfoot element
# whose end tag has been omitted.
if type == "StartTag":
# omit the thead and tfoot elements' end tag when they are
# immediately followed by a tbody element. See is_optional_end.
if previous and previous['type'] == 'EndTag' and \
previous['name'] in ('tbody', 'thead', 'tfoot'):
return False
return next["name"] == 'tr'
else:
return False
return False
def is_optional_end(self, tagname, next):
type = next and next["type"] or None
if tagname in ('html', 'head', 'body'):
# An html element's end tag may be omitted if the html element
# is not immediately followed by a space character or a comment.
return type not in ("Comment", "SpaceCharacters")
elif tagname in ('li', 'optgroup', 'tr'):
# A li element's end tag may be omitted if the li element is
# immediately followed by another li element or if there is
# no more content in the parent element.
# An optgroup element's end tag may be omitted if the optgroup
# element is immediately followed by another optgroup element,
# or if there is no more content in the parent element.
# A tr element's end tag may be omitted if the tr element is
# immediately followed by another tr element, or if there is
# no more content in the parent element.
if type == "StartTag":
return next["name"] == tagname
else:
return type == "EndTag" or type is None
elif tagname in ('dt', 'dd'):
# A dt element's end tag may be omitted if the dt element is
# immediately followed by another dt element or a dd element.
# A dd element's end tag may be omitted if the dd element is
# immediately followed by another dd element or a dt element,
# or if there is no more content in the parent element.
if type == "StartTag":
return next["name"] in ('dt', 'dd')
elif tagname == 'dd':
return type == "EndTag" or type is None
else:
return False
elif tagname == 'p':
# A p element's end tag may be omitted if the p element is
# immediately followed by an address, article, aside,
# blockquote, datagrid, dialog, dir, div, dl, fieldset,
# footer, form, h1, h2, h3, h4, h5, h6, header, hr, menu,
# nav, ol, p, pre, section, table, or ul, element, or if
# there is no more content in the parent element.
if type in ("StartTag", "EmptyTag"):
return next["name"] in ('address', 'article', 'aside',
'blockquote', 'datagrid', 'dialog',
'dir', 'div', 'dl', 'fieldset', 'footer',
'form', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6',
'header', 'hr', 'menu', 'nav', 'ol',
'p', 'pre', 'section', 'table', 'ul')
else:
return type == "EndTag" or type is None
elif tagname == 'option':
# An option element's end tag may be omitted if the option
# element is immediately followed by another option element,
# or if it is immediately followed by an <code>optgroup</code>
# element, or if there is no more content in the parent
# element.
if type == "StartTag":
return next["name"] in ('option', 'optgroup')
else:
return type == "EndTag" or type is None
elif tagname in ('rt', 'rp'):
# An rt element's end tag may be omitted if the rt element is
# immediately followed by an rt or rp element, or if there is
# no more content in the parent element.
# An rp element's end tag may be omitted if the rp element is
# immediately followed by an rt or rp element, or if there is
# no more content in the parent element.
if type == "StartTag":
return next["name"] in ('rt', 'rp')
else:
return type == "EndTag" or type is None
elif tagname == 'colgroup':
# A colgroup element's end tag may be omitted if the colgroup
# element is not immediately followed by a space character or
# a comment.
if type in ("Comment", "SpaceCharacters"):
return False
elif type == "StartTag":
# XXX: we also look for an immediately following colgroup
# element. See is_optional_start.
return next["name"] != 'colgroup'
else:
return True
elif tagname in ('thead', 'tbody'):
# A thead element's end tag may be omitted if the thead element
# is immediately followed by a tbody or tfoot element.
# A tbody element's end tag may be omitted if the tbody element
# is immediately followed by a tbody or tfoot element, or if
# there is no more content in the parent element.
# A tfoot element's end tag may be omitted if the tfoot element
# is immediately followed by a tbody element, or if there is no
# more content in the parent element.
# XXX: we never omit the end tag when the following element is
# a tbody. See is_optional_start.
if type == "StartTag":
return next["name"] in ['tbody', 'tfoot']
elif tagname == 'tbody':
return type == "EndTag" or type is None
else:
return False
elif tagname == 'tfoot':
# A tfoot element's end tag may be omitted if the tfoot element
# is immediately followed by a tbody element, or if there is no
# more content in the parent element.
# XXX: we never omit the end tag when the following element is
# a tbody. See is_optional_start.
if type == "StartTag":
return next["name"] == 'tbody'
else:
return type == "EndTag" or type is None
elif tagname in ('td', 'th'):
# A td element's end tag may be omitted if the td element is
# immediately followed by a td or th element, or if there is
# no more content in the parent element.
# A th element's end tag may be omitted if the th element is
# immediately followed by a td or th element, or if there is
# no more content in the parent element.
if type == "StartTag":
return next["name"] in ('td', 'th')
else:
return type == "EndTag" or type is None
return False
| Filter |
python | kamyu104__LeetCode-Solutions | Python/xor-after-range-multiplication-queries-i.py | {
"start": 144,
"end": 1178
} | class ____(object):
def xorAfterQueries(self, nums, queries):
"""
:type nums: List[int]
:type queries: List[List[int]]
:rtype: int
"""
MOD = 10**9+7
def inv(x):
return pow(x, MOD-2, MOD)
block_size = int(len(nums)**0.5)+1
diffs = collections.defaultdict(lambda: [1]*len(nums))
for l, r, k, v in queries:
if k <= block_size:
diffs[k][l] = (diffs[k][l]*v)%MOD
r += k-(r-l)%k
if r < len(nums):
diffs[k][r] = (diffs[k][r]*inv(v))%MOD
else:
for i in xrange(l, r+1, k):
nums[i] = (nums[i]*v)%MOD
for k, diff in diffs.iteritems():
for i in xrange(len(diff)):
if i-k >= 0:
diff[i] = (diff[i]*diff[i-k])%MOD
nums[i] = (nums[i]*diff[i])%MOD
return reduce(lambda accu, x: accu^x, nums, 0)
# Time: O(q * n)
# Space: O(1)
# simulation
| Solution |
python | celery__celery | t/unit/tasks/test_canvas.py | {
"start": 3249,
"end": 3411
} | class ____(chunks):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.subtask_type = "chunks_subclass"
| chunks_subclass |
python | google__jax | jax/_src/debugging.py | {
"start": 22975,
"end": 35095
} | class ____:
def __init__(self, callback, module_context):
self.callback = callback
self.module_context = module_context
def _inspect_sharding_lowering_rule(ctx: mlir.LoweringRuleContext, value, *,
callback):
mesh = mesh_lib.thread_resources.env.physical_mesh
axis_context = ctx.module_context.axis_context
if isinstance(axis_context, sharding_impls.ShardingContext):
devices = axis_context.device_assignment
if devices is None:
raise AssertionError(
'Please file a bug at https://github.com/jax-ml/jax/issues')
am = axis_context.abstract_mesh
if am is not None:
mesh = mesh_lib.Mesh(np.array(devices).reshape(am.axis_sizes),
am.axis_names)
elif isinstance(axis_context, sharding_impls.SPMDAxisContext):
mesh = axis_context.mesh
devices = axis_context.mesh._flat_devices_tuple
else:
raise NotImplementedError(type(axis_context))
assert devices is not None
# If we have a nontrivial parallel computation, we need to wait until the SPMD
# partitioner calls back with the `HloSharding.
def _hlo_sharding_callback(hlo_sharding: xc.HloSharding):
if mesh.empty:
return callback(
sharding_impls.GSPMDSharding(devices, hlo_sharding))
pspec = (P() if hlo_sharding.is_manual() else
parse_flatten_op_sharding(hlo_sharding, mesh)[0])
return callback(NamedSharding(mesh, pspec))
if len(devices) == 1:
# If we only have one device in our computation, we can construct a
# replicated HloSharding and call it right now.
_hlo_sharding_callback(sharding_impls.replicated_hlo_sharding)
return []
key = xc.encode_inspect_sharding_callback(_hlo_sharding_callback)
# We need to make sure `_hlo_sharding_callback` is still alive when the SPMD
# partitioner runs so we keep it alive by attaching it to the executable. #
ctx.module_context.add_keepalive(_hlo_sharding_callback)
hlo.CustomCallOp([value.type], [value],
call_target_name=ir.StringAttr.get(
_INSPECT_SHARDING_CALL_NAME),
has_side_effect=ir.BoolAttr.get(True),
api_version=mlir.i32_attr(1),
called_computations=ir.ArrayAttr.get([]),
backend_config=ir.StringAttr.get(key),
operand_layouts=None,
result_layouts=None)
return []
mlir.register_lowering(inspect_sharding_p, _inspect_sharding_lowering_rule)
def inspect_sharding_prop_user_sharding(sharding, backend_string):
del sharding, backend_string
return []
def inspect_sharding_infer_sharding_from_operands(arg_shapes, arg_shardings,
shape, backend_string):
del arg_shapes, shape, backend_string
return arg_shardings[0]
def _slice_to_chunk_idx(size: int, slc: slice) -> int:
if slc.stop == slc.start == None:
return 0
slice_size = slc.stop - slc.start
assert slc.start % slice_size == 0
assert size % slice_size == 0
return slc.start // slice_size
def _raise_to_slice(slc: slice | int):
if isinstance(slc, int):
return slice(slc, slc + 1)
return slc
Color = Union[tuple[float, float, float], str]
ColorMap = Callable[[float], tuple[float, float, float, float]]
def _canonicalize_color(color: Color) -> str:
if isinstance(color, str):
return color
r, g, b = (int(a * 255) for a in color)
return f"#{r:02X}{g:02X}{b:02X}"
def _get_text_color(color: str) -> str:
r, g, b = map(lambda x: int(x, 16), (color[1:3], color[3:5], color[5:7]))
if (r * 0.299 + g * 0.587 + b * 0.114) > 186:
return "#000000"
return "#ffffff"
def make_color_iter(color_map, num_rows, num_cols):
num_colors = num_rows * num_cols
color_values = np.linspace(0, 1, num_colors)
idx = 0
for _ in range(num_colors):
yield color_map(color_values[idx])
idx = (idx + num_colors // 2 + bool(num_colors % 2 == 0)) % num_colors
def visualize_sharding(shape: Sequence[int], sharding: Sharding, *,
use_color: bool = True, scale: float = 1.,
min_width: int = 9, max_width: int = 80,
color_map: ColorMap | None = None):
"""Visualizes a ``Sharding`` using ``rich``."""
if not importlib.util.find_spec("rich"):
raise ValueError("`visualize_sharding` requires `rich` to be installed.")
# These imports are local so that they don't affect JAX import times.
# pytype: disable=import-error
import rich.align
import rich.console
import rich.box
import rich.padding
import rich.style
import rich.table
# pytype: enable=import-error
if len(shape) > 2 or len(shape) < 1:
raise ValueError(
"`visualize_sharding` only works for shapes with 1 and 2 dimensions.")
console = rich.console.Console(width=max_width)
use_color = use_color and console.color_system is not None
if use_color and not color_map:
try:
import matplotlib as mpl # pytype: disable=import-error
color_map = mpl.colormaps["tab20b"]
except ModuleNotFoundError:
use_color = False
base_height = int(10 * scale)
aspect_ratio = (shape[1] if len(shape) == 2 else 1) / shape[0]
base_width = int(base_height * aspect_ratio)
height_to_width_ratio = 2.5
# Grab the device kind from the first device
device_kind = next(iter(sharding.device_set)).platform.upper()
device_indices_map = sharding.devices_indices_map(tuple(shape))
slices: dict[tuple[int, ...], set[int]] = {}
heights: dict[tuple[int, ...], float | None] = {}
widths: dict[tuple[int, ...], float] = {}
for i, (dev, slcs) in enumerate(device_indices_map.items()):
assert slcs is not None
slcs = tuple(map(_raise_to_slice, slcs))
chunk_idxs = tuple(map(_slice_to_chunk_idx, shape, slcs))
if slcs is None:
raise NotImplementedError
if len(slcs) == 2:
vert, horiz = slcs
vert_size = ((vert.stop - vert.start ) if vert.stop is not None
else shape[0])
horiz_size = ((horiz.stop - horiz.start) if horiz.stop is not None
else shape[1])
chunk_height = vert_size / shape[0]
chunk_width = horiz_size / shape[1]
heights[chunk_idxs] = chunk_height
widths[chunk_idxs] = chunk_width
else:
# In the 1D case, we set the height to 1.
horiz, = slcs
vert = slice(0, 1, None)
horiz_size = (
(horiz.stop - horiz.start) if horiz.stop is not None else shape[0])
chunk_idxs = (0, *chunk_idxs)
heights[chunk_idxs] = None
widths[chunk_idxs] = horiz_size / shape[0]
slices.setdefault(chunk_idxs, set()).add(dev.id)
num_rows = max(a[0] for a in slices.keys()) + 1
if len(list(slices.keys())[0]) == 1:
num_cols = 1
else:
num_cols = max(a[1] for a in slices.keys()) + 1
color_iter = make_color_iter(color_map, num_rows, num_cols)
table = rich.table.Table(show_header=False, show_lines=not use_color,
padding=0,
highlight=not use_color, pad_edge=False,
box=rich.box.SQUARE if not use_color else None)
for i in range(num_rows):
col = []
for j in range(num_cols):
entry = f"{device_kind} "+",".join([str(s) for s in sorted(slices[i, j])])
width, maybe_height = widths[i, j], heights[i, j]
width = int(width * base_width * height_to_width_ratio)
if maybe_height is None:
height = 1
else:
height = int(maybe_height * base_height)
width = min(max(width, min_width), max_width)
left_padding, remainder = divmod(width - len(entry) - 2, 2)
right_padding = left_padding + remainder
top_padding, remainder = divmod(height - 2, 2)
bottom_padding = top_padding + remainder
if use_color:
color = _canonicalize_color(next(color_iter)[:3])
text_color = _get_text_color(color)
top_padding += 1
bottom_padding += 1
left_padding += 1
right_padding += 1
else:
color = None
text_color = None
padding = (
max(top_padding, 0),
max(right_padding, 0),
max(bottom_padding, 0),
max(left_padding, 0),
)
col.append(
rich.padding.Padding(
rich.align.Align(entry, "center", vertical="middle"), padding,
style=rich.style.Style(bgcolor=color,
color=text_color)))
table.add_row(*col)
console.print(table, end='\n\n')
def inspect_array_sharding(value, *, callback: Callable[[Sharding], None]):
"""Enables inspecting array sharding inside JIT-ted functions.
This function, when provided with a Pytree of arrays, calls back with each of
their shardings and works in ``jax.jit``-ted computations, enabling inspecting
the chosen intermediate shardings.
The policy for when ``callback`` is called is *as early as possible* when the
sharding information is available. This means if ``inspect_array_callback`` is
called without any transformations, the callback will happen immediately
since we have the array and its sharding readily available. Inside of a
``jax.jit``, the callback will happen at lowering time, meaning you can
trigger the callback using the AOT API (``jit(f).lower(...)``). When inside of
a ``jax.jit``, the callback happens *at compile time* since the sharding is
determined by XLA. You can trigger the callback by using JAX's AOT API
(``jax.jit(f).lower(...).compile()``). In all cases, the callback will be
triggered by running the function, since running a function entails lowering
and compiling it first. However, once the function is compiled and cached,
the callback will no longer occur.
This function is experimental and its behavior may change in the future.
Args:
value: A Pytree of JAX arrays.
callback: A callable that takes in a ``Sharding`` and doesn't return a value.
In the following example, we print out the sharding of an intermediate value
in a ``jax.jit``-ted computation:
>>> import jax
>>> import jax.numpy as jnp
>>> from jax.sharding import Mesh, PartitionSpec
>>>
>>> x = jnp.arange(8, dtype=jnp.float32)
>>> def f_(x):
... x = jnp.sin(x)
... jax.debug.inspect_array_sharding(x, callback=print)
... return jnp.square(x)
>>> f = jax.jit(f_, in_shardings=PartitionSpec('dev'),
... out_shardings=PartitionSpec('dev'))
>>> with jax.set_mesh(Mesh(jax.devices(), ('dev',))):
... f.lower(x).compile() # doctest: +SKIP
...
NamedSharding(mesh={'dev': 8}, partition_spec=PartitionSpec(('dev',),))
"""
def _inspect(val):
inspect_sharding_p.bind(val, callback=callback)
tree_util.tree_map(_inspect, value)
def visualize_array_sharding(arr, **kwargs):
"""Visualizes an array's sharding."""
def _visualize(sharding):
return visualize_sharding(arr.shape, sharding, **kwargs)
inspect_array_sharding(arr, callback=_visualize)
# TODO(mattjj): working around an apparent XLA or PjRt bug, remove eventually
def _debug_callback_eager_rule(
mesh,
*args,
callback: Callable[..., Any],
effect: DebugEffect,
partitioned: bool,
):
del effect
with core.eval_context():
all_blocks = zip(*map(list, args))
for (idx, device), blocks in zip(np.ndenumerate(mesh.devices), all_blocks):
callback(*blocks)
return []
shard_map.eager_rules[debug_callback_p] = _debug_callback_eager_rule
def _debug_print_eager_rule(
mesh,
*args,
fmt: str,
ordered,
partitioned,
in_tree,
static_args,
np_printoptions,
has_placeholders,
logging_record,
):
del ordered, partitioned
callback = partial(
_format_print_callback, fmt, dict(np_printoptions), has_placeholders,
logging_record,
)
callback = _make_flat_callback(in_tree, callback, static_args)
with core.eval_context():
all_blocks = zip(*map(list, args))
for (idx, device), blocks in zip(np.ndenumerate(mesh.devices), all_blocks):
callback(*blocks)
return []
shard_map.eager_rules[debug_print_p] = _debug_print_eager_rule
| ShardingCallbackInfo |
python | great-expectations__great_expectations | contrib/great_expectations_zipcode_expectations/great_expectations_zipcode_expectations/expectations/expect_column_values_to_be_valid_wyoming_zip.py | {
"start": 1743,
"end": 4078
} | class ____(ColumnMapExpectation):
"""Expect values in this column to be valid Wyoming zipcodes.
See https://pypi.org/project/zipcodes/ for more information.
"""
# These examples will be shown in the public gallery.
# They will also be executed as unit tests for your Expectation.
examples = [
{
"data": {
"valid_wyoming_zip": ["82001", "82701", "83011", "83414"],
"invalid_wyoming_zip": ["-10000", "1234", "99999", "25487"],
},
"tests": [
{
"title": "basic_positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "valid_wyoming_zip"},
"out": {"success": True},
},
{
"title": "basic_negative_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "invalid_wyoming_zip"},
"out": {"success": False},
},
],
}
]
# This is the id string of the Metric used by this Expectation.
# For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.
map_metric = "column_values.valid_wyoming_zip"
# This is a list of parameter names that can affect whether the Expectation evaluates to True or False
success_keys = ("mostly",)
# This dictionary contains default values for any parameters that should have default values
default_kwarg_values = {}
# This object contains metadata for display in the public Gallery
library_metadata = {
"maturity": "experimental", # "experimental", "beta", or "production"
"tags": [
"hackathon",
"typed-entities",
], # Tags for this Expectation in the Gallery
"contributors": [ # Github handles for all contributors to this Expectation.
"@luismdiaz01",
"@derekma73", # Don't forget to add your github handle here!
],
"requirements": ["zipcodes"],
}
if __name__ == "__main__":
ExpectColumnValuesToBeValidWyomingZip().print_diagnostic_checklist()
| ExpectColumnValuesToBeValidWyomingZip |
python | dagster-io__dagster | python_modules/dagster/dagster_tests/components_tests/state_backed_component_tests/test_state_backed_component.py | {
"start": 1196,
"end": 21575
} | class ____(StateBackedComponent, dg.Model, dg.Resolvable):
defs_state_key: Optional[str] = None
defs_state: ResolvedDefsStateConfig = DefsStateConfigArgs.versioned_state_storage()
@property
def defs_state_config(self) -> DefsStateConfig:
default_key = self.__class__.__name__
if self.defs_state_key is not None:
default_key = f"{default_key}[{self.defs_state_key}]"
return DefsStateConfig.from_args(self.defs_state, default_key=default_key)
def build_defs_from_state(
self, context: dg.ComponentLoadContext, state_path: Optional[Path]
) -> dg.Definitions:
if state_path is None:
value = "initial"
else:
with open(state_path) as f:
state = json.load(f)
value = state["value"]
@dg.asset(
name=f"the_asset_{context.component_path.file_path.stem}",
metadata={"state_value": value},
)
def the_asset():
return dg.MaterializeResult(metadata={"foo": value})
return dg.Definitions(assets=[the_asset])
async def write_state_to_path(self, state_path: Path):
with open(state_path, "w") as f:
json.dump({"value": f"bar_{random.randint(1000, 9999)}"}, f)
def _get_state_refresh_defs(self, context: dg.ComponentLoadContext) -> dg.Definitions:
project_root = context.project_root
@dg.op
def refresh_state_op():
asyncio.run(self.refresh_state(project_root))
@dg.job(name=f"state_refresh_job_{context.component_path.file_path.stem}")
def state_refresh_job():
refresh_state_op()
return dg.Definitions(jobs=[state_refresh_job])
def build_defs(self, context: dg.ComponentLoadContext) -> dg.Definitions:
return dg.Definitions.merge(
super().build_defs(context),
self._get_state_refresh_defs(context),
)
@pytest.mark.parametrize(
"storage_location",
[
DefsStateManagementType.LOCAL_FILESYSTEM,
DefsStateManagementType.VERSIONED_STATE_STORAGE,
],
)
@pytest.mark.parametrize(
"key",
[
None,
"CustomDefsStateKey",
],
)
def test_simple_state_backed_component(
storage_location: DefsStateManagementType, key: Optional[str]
) -> None:
expected_key = key or "MyStateBackedComponent"
with instance_for_test() as instance, create_defs_folder_sandbox() as sandbox:
component_path = sandbox.scaffold_component(
component_cls=MyStateBackedComponent,
defs_yaml_contents={
"type": "dagster_tests.components_tests.state_backed_component_tests.test_state_backed_component.MyStateBackedComponent",
"attributes": {
"defs_state": {
"management_type": storage_location.value,
**({"key": key} if key else {}),
}
},
},
defs_path="foo",
)
state_storage = DefsStateStorage.get()
assert state_storage
# add some state versions for other unrelated keys, should not show up in the load context
state_storage.set_latest_version("RandomKey1", LOCAL_STATE_VERSION)
state_storage.set_latest_version("RandomKey2", CODE_SERVER_STATE_VERSION)
state_storage.set_latest_version("RandomKey3", "xyz")
# initial load, no state written, so use "initial"
with (
scoped_definitions_load_context() as load_context,
sandbox.load_component_and_build_defs(defs_path=component_path) as (_, defs),
):
specs = defs.get_all_asset_specs()
spec = specs[0]
# the state value is set to some random number
original_metadata_value = spec.metadata["state_value"]
assert original_metadata_value == "initial" # hardcoded in the component
# materialize the asset, the random number should be preserved
result = dg.materialize([defs.get_assets_def(spec.key)], instance=instance)
assert result.success
mats = result.asset_materializations_for_node("the_asset_foo")
assert len(mats) == 1
assert mats[0].metadata["foo"] == dg.TextMetadataValue(original_metadata_value)
# should register that the key was accessed (but has no version)
assert load_context.accessed_defs_state_info
assert load_context.accessed_defs_state_info.info_mapping.keys() == {expected_key}
assert load_context.accessed_defs_state_info.get_version(expected_key) is None
# reload the definitions, state should be the same
with (
scoped_definitions_load_context() as load_context,
sandbox.load_component_and_build_defs(defs_path=component_path) as (_, defs),
):
specs = defs.get_all_asset_specs()
spec = specs[0]
# metadata should remain the same
assert spec.metadata["state_value"] == original_metadata_value
# now execute the job to refresh the state
refresh_job = defs.get_job_def("state_refresh_job_foo")
refresh_job.execute_in_process(instance=instance)
# same as above
assert load_context.accessed_defs_state_info
assert load_context.accessed_defs_state_info.info_mapping.keys() == {expected_key}
assert load_context.accessed_defs_state_info.get_version(expected_key) is None
# now we reload the definitions, state should be updated to something random
with (
scoped_definitions_load_context() as load_context,
sandbox.load_component_and_build_defs(defs_path=component_path) as (_, defs),
):
specs = defs.get_all_asset_specs()
spec = specs[0]
new_metadata_value = spec.metadata["state_value"]
assert new_metadata_value != original_metadata_value
# should have version information available
assert load_context.accessed_defs_state_info
assert load_context.accessed_defs_state_info.info_mapping.keys() == {expected_key}
assert load_context.accessed_defs_state_info.get_version(expected_key) is not None
@pytest.mark.parametrize(
"instance_available",
[True, False],
)
def test_code_server_state_backed_component(instance_available: bool) -> None:
instance_cm = contextlib.nullcontext() if instance_available else instance_for_test()
with instance_cm, create_defs_folder_sandbox() as sandbox:
component_path = sandbox.scaffold_component(
component_cls=MyStateBackedComponent,
defs_yaml_contents={
"type": "dagster_tests.components_tests.state_backed_component_tests.test_state_backed_component.MyStateBackedComponent",
"attributes": {
"defs_state": {
"management_type": DefsStateManagementType.LEGACY_CODE_SERVER_SNAPSHOTS.value
}
},
},
defs_path="foo",
)
with (
scoped_definitions_load_context() as load_context,
sandbox.load_component_and_build_defs(defs_path=component_path) as (_, defs),
):
specs = defs.get_all_asset_specs()
spec = specs[0]
original_metadata_value = spec.metadata["state_value"]
# should automatically load
assert original_metadata_value != "initial"
repo = defs.get_repository_def()
assert repo.repository_load_data is not None
assert load_context.load_type == DefinitionsLoadType.INITIALIZATION
pending_metadata = load_context.get_pending_reconstruction_metadata()
key = get_code_server_metadata_key("MyStateBackedComponent")
assert pending_metadata.keys() == {"defs-state-[MyStateBackedComponent]"}
# last bit is random
assert '{"value": "bar_' in pending_metadata[key]
assert load_context.accessed_defs_state_info is not None
# now simulate the reconstruction process
with scoped_definitions_load_context(
load_type=DefinitionsLoadType.RECONSTRUCTION,
repository_load_data=RepositoryLoadData(
cacheable_asset_data={},
reconstruction_metadata=pending_metadata,
defs_state_info=load_context.accessed_defs_state_info,
),
):
with sandbox.load_component_and_build_defs(defs_path=component_path) as (_, defs):
specs = defs.get_all_asset_specs()
spec = specs[0]
assert spec.metadata["state_value"] == original_metadata_value
assert "bar_" in spec.metadata["state_value"]
@pytest.mark.parametrize(
"instance_available",
[True, False],
)
def test_local_filesystem_state_backed_component(instance_available: bool) -> None:
instance_cm = contextlib.nullcontext() if instance_available else instance_for_test()
with instance_cm, create_defs_folder_sandbox() as sandbox:
component_path = sandbox.scaffold_component(
component_cls=MyStateBackedComponent,
defs_yaml_contents={
"type": "dagster_tests.components_tests.state_backed_component_tests.test_state_backed_component.MyStateBackedComponent",
"attributes": {
"defs_state": {
"management_type": DefsStateManagementType.LOCAL_FILESYSTEM.value,
}
},
},
defs_path="foo",
)
with (
scoped_definitions_load_context(),
sandbox.load_component_and_build_defs(defs_path=component_path) as (_, defs),
):
specs = defs.get_all_asset_specs()
spec = specs[0]
# will not automatically load
assert spec.metadata["state_value"] == "initial"
# now execute the job to refresh the state
refresh_job = defs.get_job_def("state_refresh_job_foo")
refresh_job.execute_in_process() # note: ephemeral instance
with (
scoped_definitions_load_context(),
sandbox.load_component_and_build_defs(defs_path=component_path) as (_, defs),
):
specs = defs.get_all_asset_specs()
spec = specs[0]
new_state_value = spec.metadata["state_value"]
# state should be updated to something random
assert new_state_value != "initial"
assert new_state_value.startswith("bar_")
with (
scoped_definitions_load_context(),
sandbox.load_component_and_build_defs(defs_path=component_path) as (_, defs),
):
specs = defs.get_all_asset_specs()
spec = specs[0]
# state should be the same as before
assert spec.metadata["state_value"] == new_state_value
@pytest.mark.parametrize(
"storage_location",
[
DefsStateManagementType.LOCAL_FILESYSTEM,
DefsStateManagementType.VERSIONED_STATE_STORAGE,
DefsStateManagementType.LEGACY_CODE_SERVER_SNAPSHOTS,
],
)
def test_dev_mode_state_backed_component(storage_location: DefsStateManagementType) -> None:
with (
instance_for_test(),
create_defs_folder_sandbox() as sandbox,
# we're in dev mode
environ({"DAGSTER_IS_DEV_CLI": "1"}),
):
component_path = sandbox.scaffold_component(
component_cls=MyStateBackedComponent,
defs_yaml_contents={
"type": "dagster_tests.components_tests.state_backed_component_tests.test_state_backed_component.MyStateBackedComponent",
"attributes": {"defs_state": {"management_type": storage_location.value}},
},
defs_path="foo",
)
with scoped_definitions_load_context() as load_context:
# nothing accessed yet
assert load_context.accessed_defs_state_info is None
with sandbox.load_component_and_build_defs(defs_path=component_path) as (_, defs):
assert load_context.accessed_defs_state_info is not None
assert load_context.accessed_defs_state_info.info_mapping.keys() == {
"MyStateBackedComponent"
}
specs = defs.get_all_asset_specs()
spec = specs[0]
metadata_value = spec.metadata["state_value"]
# should automatically load
assert metadata_value != "initial"
def test_multiple_components() -> None:
with instance_for_test(), create_defs_folder_sandbox() as sandbox:
sandbox.scaffold_component(
component_cls=MyStateBackedComponent,
defs_yaml_contents={
"type": "dagster_tests.components_tests.state_backed_component_tests.test_state_backed_component.MyStateBackedComponent",
},
defs_path="first",
)
second_component_path = sandbox.scaffold_component(
component_cls=MyStateBackedComponent,
defs_yaml_contents={
"type": "dagster_tests.components_tests.state_backed_component_tests.test_state_backed_component.MyStateBackedComponent",
},
defs_path="second",
)
# Should emit a warning but not raise an error when components share the same defs state key
with pytest.warns(
DuplicateDefsStateKeyWarning, match="Multiple components have the same defs state key"
):
with sandbox.build_all_defs() as defs:
# Both components should still load successfully
assert len(defs.get_all_asset_specs()) == 2
# now update the defs_state_key
shutil.rmtree(second_component_path)
sandbox.scaffold_component(
component_cls=MyStateBackedComponent,
defs_yaml_contents={
"type": "dagster_tests.components_tests.state_backed_component_tests.test_state_backed_component.MyStateBackedComponent",
"attributes": {"defs_state_key": "MyStateBackedComponent_but_different"},
},
defs_path="second",
)
with sandbox.build_all_defs() as defs:
assert len(defs.get_all_asset_specs()) == 2
def test_two_components_sharing_same_state_key() -> None:
"""Test that two components can share the same state key with a warning."""
with instance_for_test(), create_defs_folder_sandbox() as sandbox:
# Create two components with the same state key by explicitly setting it
sandbox.scaffold_component(
component_cls=MyStateBackedComponent,
defs_yaml_contents={
"type": "dagster_tests.components_tests.state_backed_component_tests.test_state_backed_component.MyStateBackedComponent",
"attributes": {"defs_state_key": "shared_key"},
},
defs_path="component_a",
)
sandbox.scaffold_component(
component_cls=MyStateBackedComponent,
defs_yaml_contents={
"type": "dagster_tests.components_tests.state_backed_component_tests.test_state_backed_component.MyStateBackedComponent",
"attributes": {"defs_state_key": "shared_key"},
},
defs_path="component_b",
)
# Should emit a warning but not raise an error
with pytest.warns(
DuplicateDefsStateKeyWarning,
match="Multiple components have the same defs state key: MyStateBackedComponent\\[shared_key\\]",
):
with sandbox.build_all_defs() as defs:
# Both components should load successfully
specs = defs.get_all_asset_specs()
assert len(specs) == 2
# Verify both assets were created
assert any("component_a" in spec.key.to_user_string() for spec in specs)
assert any("component_b" in spec.key.to_user_string() for spec in specs)
def test_state_backed_component_migration_from_versioned_to_local_storage() -> None:
"""Test migrating a component from versioned state storage to local storage.
This test demonstrates:
1. Start with versioned storage, no state available (initial value)
2. Run update state job to populate versioned storage
3. Reload with versioned storage, should have updated value
4. Switch to local storage, should reset to initial value
5. Load with DAGSTER_IS_DEV_CLI set, should force local refresh
"""
with instance_for_test() as instance, create_defs_folder_sandbox() as sandbox:
# Step 1: Start with versioned storage, no state available
component_path = sandbox.scaffold_component(
component_cls=MyStateBackedComponent,
defs_yaml_contents={
"type": "dagster_tests.components_tests.state_backed_component_tests.test_state_backed_component.MyStateBackedComponent",
"attributes": {
"defs_state": {
"management_type": DefsStateManagementType.VERSIONED_STATE_STORAGE.value,
}
},
},
defs_path="migration_test",
)
# Initial load with versioned storage - should be "initial"
with (
scoped_definitions_load_context(),
sandbox.load_component_and_build_defs(defs_path=component_path) as (_, defs),
):
specs = defs.get_all_asset_specs()
spec = specs[0]
initial_metadata_value = spec.metadata["state_value"]
assert initial_metadata_value == "initial"
# Step 2: Run the update state job to populate versioned storage
refresh_job = defs.get_job_def("state_refresh_job_migration_test")
refresh_job.execute_in_process(instance=instance)
# Step 3: Reload with versioned storage - should have updated value
with (
scoped_definitions_load_context(),
sandbox.load_component_and_build_defs(defs_path=component_path) as (_, defs),
):
specs = defs.get_all_asset_specs()
spec = specs[0]
versioned_metadata_value = spec.metadata["state_value"]
assert versioned_metadata_value != "initial"
assert versioned_metadata_value != initial_metadata_value
# Step 4: Switch to local storage - should reset to initial value
sandbox.scaffold_component(
component_cls=MyStateBackedComponent,
defs_yaml_contents={
"type": "dagster_tests.components_tests.state_backed_component_tests.test_state_backed_component.MyStateBackedComponent",
"attributes": {
"defs_state": {
"management_type": DefsStateManagementType.LOCAL_FILESYSTEM.value,
}
},
},
defs_path="migration_test",
)
# Load with local storage - should be back to "initial"
with (
scoped_definitions_load_context(),
sandbox.load_component_and_build_defs(defs_path=component_path) as (_, defs),
):
specs = defs.get_all_asset_specs()
spec = specs[0]
local_metadata_value = spec.metadata["state_value"]
assert local_metadata_value == "initial"
# Step 5: Load with DAGSTER_IS_DEV_CLI set - should force local refresh
with (
environ({"DAGSTER_IS_DEV_CLI": "1"}),
scoped_definitions_load_context(),
sandbox.load_component_and_build_defs(defs_path=component_path) as (_, defs),
):
specs = defs.get_all_asset_specs()
spec = specs[0]
dev_metadata_value = spec.metadata["state_value"]
# Should have a non-initial value due to forced refresh in dev mode
assert dev_metadata_value != "initial"
# Should be different from the versioned value since it's a new random value
assert dev_metadata_value != versioned_metadata_value
| MyStateBackedComponent |
python | pytorch__pytorch | test/dynamo/cpython/3_13/test_itertools.py | {
"start": 101444,
"end": 101723
} | class ____:
'Test immediate stop'
def __init__(self, seqn):
pass
def __iter__(self):
return self
def __next__(self):
raise StopIteration
def L(seqn):
'Test multiple tiers of iterators'
return chain(map(lambda x:x, R(Ig(G(seqn)))))
| S |
python | joke2k__faker | faker/providers/lorem/vi_VN/__init__.py | {
"start": 68,
"end": 3455
} | class ____(LoremProvider):
"""Implement lorem provider for ``vi_VN`` locale.
Word list is based on common Vietnamese words and phrases.
# Source : https://vi.wikipedia.org/wiki/Ng%E1%BB%AF_ph%C3%A1p_ti%E1%BA%BFng_Vi%E1%BB%87t
"""
word_list = (
"cái",
"đó",
"là",
"và",
"có",
"như",
"một",
"để",
"cũng",
"với",
"cho",
"trong",
"tôi",
"của",
"người",
"không",
"sẽ",
"đã",
"này",
"theo",
"làm",
"nơi",
"đang",
"nếu",
"bạn",
"được",
"khi",
"thì",
"về",
"mà",
"cũng",
"nào",
"của",
"nhưng",
"vì",
"rất",
"tại",
"tại",
"thế",
"để",
"giữa",
"với",
"cách",
"từ",
"lớn",
"có",
"vài",
"hơn",
"vẫn",
"dưới",
"đi",
"đến",
"vậy",
"điều",
"hoặc",
"chỉ",
"hơn",
"khiến",
"giống",
"sau",
"trong",
"đúng",
"của",
"mỗi",
"như",
"bên",
"để",
"chưa",
"như",
"thay",
"như",
"các",
"tự",
"số",
"từng",
"nhiều",
"gần",
"từ",
)
parts_of_speech: Dict[str, tuple] = {
"verb": (
"là",
"có",
"làm",
"đi",
"nói",
"thấy",
"nghe",
"đọc",
"viết",
"muốn",
"đi",
"ngồi",
"uống",
"ăn",
"học",
"chơi",
"nhìn",
"được",
"tìm",
"đặt",
"giúp",
"hỏi",
"giải",
"mua",
"bán",
"nói",
),
"noun": (
"người",
"sách",
"máy",
"bàn",
"ghế",
"cửa",
"nhà",
"bút",
"xe",
"điện thoại",
"bánh",
"cà phê",
"nước",
"trường",
"chúng tôi",
"học sinh",
"giáo viên",
"bố",
"mẹ",
"em",
"anh",
"chị",
),
"adverb": (
"thực sự",
"rất",
"nhanh",
"chậm",
"tốt",
"xấu",
"đúng",
"sai",
"vui",
"buồn",
"mới",
"cũ",
"dễ",
"khó",
"gần",
"xa",
"hơn",
"vẫn",
"đã",
"mới",
),
"adjective": (
"đẹp",
"xấu",
"tốt",
"xấu",
"to",
"nhỏ",
"ngọt",
"chua",
"mặn",
"nhanh",
"chậm",
"đầu",
"cuối",
"mới",
"cũ",
"dễ",
"khó",
"hơi",
"vui",
"buồn",
"mạnh",
"yếu",
),
}
| Provider |
python | doocs__leetcode | solution/2700-2799/2761.Prime Pairs With Target Sum/Solution.py | {
"start": 0,
"end": 412
} | class ____:
def findPrimePairs(self, n: int) -> List[List[int]]:
primes = [True] * n
for i in range(2, n):
if primes[i]:
for j in range(i + i, n, i):
primes[j] = False
ans = []
for x in range(2, n // 2 + 1):
y = n - x
if primes[x] and primes[y]:
ans.append([x, y])
return ans
| Solution |
python | walkccc__LeetCode | solutions/389. Find the Difference/389.py | {
"start": 0,
"end": 238
} | class ____:
def findTheDifference(self, s: str, t: str) -> str:
sXors = chr(functools.reduce(operator.xor, map(ord, s), 0))
tXors = chr(functools.reduce(operator.xor, map(ord, t), 0))
return chr(ord(sXors) ^ ord(tXors))
| Solution |
python | python__mypy | test-data/unit/plugins/type_anal_hook.py | {
"start": 322,
"end": 1436
} | class ____(Plugin):
def get_type_analyze_hook(self, fullname: str) -> Callable[[AnalyzeTypeContext], Type] | None:
if fullname == "m.Signal":
return signal_type_analyze_callback
return None
def signal_type_analyze_callback(ctx: AnalyzeTypeContext) -> Type:
if len(ctx.type.args) != 1 or not isinstance(ctx.type.args[0], TypeList):
ctx.api.fail('Invalid "Signal" type (expected "Signal[[t, ...]]")', ctx.context)
return AnyType(TypeOfAny.from_error)
args = ctx.type.args[0]
assert isinstance(args, TypeList)
analyzed = ctx.api.analyze_callable_args(args)
if analyzed is None:
return AnyType(TypeOfAny.from_error) # Error generated elsewhere
arg_types, arg_kinds, arg_names = analyzed
arg_types = [ctx.api.analyze_type(arg) for arg in arg_types]
type_arg = CallableType(
arg_types, arg_kinds, arg_names, NoneTyp(), ctx.api.named_type("builtins.function", [])
)
return ctx.api.named_type("m.Signal", [type_arg])
def plugin(version: str) -> type[TypeAnalyzePlugin]:
return TypeAnalyzePlugin
| TypeAnalyzePlugin |
python | kamyu104__LeetCode-Solutions | Python/pancake-sorting.py | {
"start": 519,
"end": 1461
} | class ____(object):
def pancakeSort(self, arr):
"""
:type arr: List[int]
:rtype: List[int]
"""
bit = BIT(len(arr))
result = []
for i in xrange(len(arr)):
n = bit.query((arr[i]-1)-1)
bit.add(arr[i]-1, 1)
if n == i: # already sorted
continue
if n == 0: # (0..i-1)i
if i > 1:
result.append(i) # (i-1..0)i
result.append(i+1) # i(0..i-1)
else: # (0..n-1)n(n+1..i-1)i
if n > 1:
result.append(n) # (n-1..0)n(n+1..i-1)i
result.append(i) # (i-1..n+1)n(0..n-1)i
result.append(i+1) # i(n-1..0)n(n+1..i-1)
result.append(n+1) # (0..n-1)in(n+1..i-1)
return result
# Time: O(nlogn)
# Space: O(n)
# merge sort solution
| Solution |
python | python-pillow__Pillow | src/PIL/TiffTags.py | {
"start": 585,
"end": 17251
} | class ____(_TagInfo):
__slots__: list[str] = []
def __new__(
cls,
value: int | None = None,
name: str = "unknown",
type: int | None = None,
length: int | None = None,
enum: dict[str, int] | None = None,
) -> TagInfo:
return super().__new__(cls, value, name, type, length, enum or {})
def cvt_enum(self, value: str) -> int | str:
# Using get will call hash(value), which can be expensive
# for some types (e.g. Fraction). Since self.enum is rarely
# used, it's usually better to test it first.
return self.enum.get(value, value) if self.enum else value
def lookup(tag: int, group: int | None = None) -> TagInfo:
"""
:param tag: Integer tag number
:param group: Which :py:data:`~PIL.TiffTags.TAGS_V2_GROUPS` to look in
.. versionadded:: 8.3.0
:returns: Taginfo namedtuple, From the ``TAGS_V2`` info if possible,
otherwise just populating the value and name from ``TAGS``.
If the tag is not recognized, "unknown" is returned for the name
"""
if group is not None:
info = TAGS_V2_GROUPS[group].get(tag) if group in TAGS_V2_GROUPS else None
else:
info = TAGS_V2.get(tag)
return info or TagInfo(tag, TAGS.get(tag, "unknown"))
##
# Map tag numbers to tag info.
#
# id: (Name, Type, Length[, enum_values])
#
# The length here differs from the length in the tiff spec. For
# numbers, the tiff spec is for the number of fields returned. We
# agree here. For string-like types, the tiff spec uses the length of
# field in bytes. In Pillow, we are using the number of expected
# fields, in general 1 for string-like types.
BYTE = 1
ASCII = 2
SHORT = 3
LONG = 4
RATIONAL = 5
SIGNED_BYTE = 6
UNDEFINED = 7
SIGNED_SHORT = 8
SIGNED_LONG = 9
SIGNED_RATIONAL = 10
FLOAT = 11
DOUBLE = 12
IFD = 13
LONG8 = 16
_tags_v2: dict[int, tuple[str, int, int] | tuple[str, int, int, dict[str, int]]] = {
254: ("NewSubfileType", LONG, 1),
255: ("SubfileType", SHORT, 1),
256: ("ImageWidth", LONG, 1),
257: ("ImageLength", LONG, 1),
258: ("BitsPerSample", SHORT, 0),
259: (
"Compression",
SHORT,
1,
{
"Uncompressed": 1,
"CCITT 1d": 2,
"Group 3 Fax": 3,
"Group 4 Fax": 4,
"LZW": 5,
"JPEG": 6,
"PackBits": 32773,
},
),
262: (
"PhotometricInterpretation",
SHORT,
1,
{
"WhiteIsZero": 0,
"BlackIsZero": 1,
"RGB": 2,
"RGB Palette": 3,
"Transparency Mask": 4,
"CMYK": 5,
"YCbCr": 6,
"CieLAB": 8,
"CFA": 32803, # TIFF/EP, Adobe DNG
"LinearRaw": 32892, # Adobe DNG
},
),
263: ("Threshholding", SHORT, 1),
264: ("CellWidth", SHORT, 1),
265: ("CellLength", SHORT, 1),
266: ("FillOrder", SHORT, 1),
269: ("DocumentName", ASCII, 1),
270: ("ImageDescription", ASCII, 1),
271: ("Make", ASCII, 1),
272: ("Model", ASCII, 1),
273: ("StripOffsets", LONG, 0),
274: ("Orientation", SHORT, 1),
277: ("SamplesPerPixel", SHORT, 1),
278: ("RowsPerStrip", LONG, 1),
279: ("StripByteCounts", LONG, 0),
280: ("MinSampleValue", SHORT, 0),
281: ("MaxSampleValue", SHORT, 0),
282: ("XResolution", RATIONAL, 1),
283: ("YResolution", RATIONAL, 1),
284: ("PlanarConfiguration", SHORT, 1, {"Contiguous": 1, "Separate": 2}),
285: ("PageName", ASCII, 1),
286: ("XPosition", RATIONAL, 1),
287: ("YPosition", RATIONAL, 1),
288: ("FreeOffsets", LONG, 1),
289: ("FreeByteCounts", LONG, 1),
290: ("GrayResponseUnit", SHORT, 1),
291: ("GrayResponseCurve", SHORT, 0),
292: ("T4Options", LONG, 1),
293: ("T6Options", LONG, 1),
296: ("ResolutionUnit", SHORT, 1, {"none": 1, "inch": 2, "cm": 3}),
297: ("PageNumber", SHORT, 2),
301: ("TransferFunction", SHORT, 0),
305: ("Software", ASCII, 1),
306: ("DateTime", ASCII, 1),
315: ("Artist", ASCII, 1),
316: ("HostComputer", ASCII, 1),
317: ("Predictor", SHORT, 1, {"none": 1, "Horizontal Differencing": 2}),
318: ("WhitePoint", RATIONAL, 2),
319: ("PrimaryChromaticities", RATIONAL, 6),
320: ("ColorMap", SHORT, 0),
321: ("HalftoneHints", SHORT, 2),
322: ("TileWidth", LONG, 1),
323: ("TileLength", LONG, 1),
324: ("TileOffsets", LONG, 0),
325: ("TileByteCounts", LONG, 0),
330: ("SubIFDs", LONG, 0),
332: ("InkSet", SHORT, 1),
333: ("InkNames", ASCII, 1),
334: ("NumberOfInks", SHORT, 1),
336: ("DotRange", SHORT, 0),
337: ("TargetPrinter", ASCII, 1),
338: ("ExtraSamples", SHORT, 0),
339: ("SampleFormat", SHORT, 0),
340: ("SMinSampleValue", DOUBLE, 0),
341: ("SMaxSampleValue", DOUBLE, 0),
342: ("TransferRange", SHORT, 6),
347: ("JPEGTables", UNDEFINED, 1),
# obsolete JPEG tags
512: ("JPEGProc", SHORT, 1),
513: ("JPEGInterchangeFormat", LONG, 1),
514: ("JPEGInterchangeFormatLength", LONG, 1),
515: ("JPEGRestartInterval", SHORT, 1),
517: ("JPEGLosslessPredictors", SHORT, 0),
518: ("JPEGPointTransforms", SHORT, 0),
519: ("JPEGQTables", LONG, 0),
520: ("JPEGDCTables", LONG, 0),
521: ("JPEGACTables", LONG, 0),
529: ("YCbCrCoefficients", RATIONAL, 3),
530: ("YCbCrSubSampling", SHORT, 2),
531: ("YCbCrPositioning", SHORT, 1),
532: ("ReferenceBlackWhite", RATIONAL, 6),
700: ("XMP", BYTE, 0),
# Four private SGI tags
32995: ("Matteing", SHORT, 1),
32996: ("DataType", SHORT, 0),
32997: ("ImageDepth", LONG, 1),
32998: ("TileDepth", LONG, 1),
33432: ("Copyright", ASCII, 1),
33723: ("IptcNaaInfo", UNDEFINED, 1),
34377: ("PhotoshopInfo", BYTE, 0),
# FIXME add more tags here
34665: ("ExifIFD", LONG, 1),
34675: ("ICCProfile", UNDEFINED, 1),
34853: ("GPSInfoIFD", LONG, 1),
36864: ("ExifVersion", UNDEFINED, 1),
37724: ("ImageSourceData", UNDEFINED, 1),
40965: ("InteroperabilityIFD", LONG, 1),
41730: ("CFAPattern", UNDEFINED, 1),
# MPInfo
45056: ("MPFVersion", UNDEFINED, 1),
45057: ("NumberOfImages", LONG, 1),
45058: ("MPEntry", UNDEFINED, 1),
45059: ("ImageUIDList", UNDEFINED, 0), # UNDONE, check
45060: ("TotalFrames", LONG, 1),
45313: ("MPIndividualNum", LONG, 1),
45569: ("PanOrientation", LONG, 1),
45570: ("PanOverlap_H", RATIONAL, 1),
45571: ("PanOverlap_V", RATIONAL, 1),
45572: ("BaseViewpointNum", LONG, 1),
45573: ("ConvergenceAngle", SIGNED_RATIONAL, 1),
45574: ("BaselineLength", RATIONAL, 1),
45575: ("VerticalDivergence", SIGNED_RATIONAL, 1),
45576: ("AxisDistance_X", SIGNED_RATIONAL, 1),
45577: ("AxisDistance_Y", SIGNED_RATIONAL, 1),
45578: ("AxisDistance_Z", SIGNED_RATIONAL, 1),
45579: ("YawAngle", SIGNED_RATIONAL, 1),
45580: ("PitchAngle", SIGNED_RATIONAL, 1),
45581: ("RollAngle", SIGNED_RATIONAL, 1),
40960: ("FlashPixVersion", UNDEFINED, 1),
50741: ("MakerNoteSafety", SHORT, 1, {"Unsafe": 0, "Safe": 1}),
50780: ("BestQualityScale", RATIONAL, 1),
50838: ("ImageJMetaDataByteCounts", LONG, 0), # Can be more than one
50839: ("ImageJMetaData", UNDEFINED, 1), # see Issue #2006
}
_tags_v2_groups = {
# ExifIFD
34665: {
36864: ("ExifVersion", UNDEFINED, 1),
40960: ("FlashPixVersion", UNDEFINED, 1),
40965: ("InteroperabilityIFD", LONG, 1),
41730: ("CFAPattern", UNDEFINED, 1),
},
# GPSInfoIFD
34853: {
0: ("GPSVersionID", BYTE, 4),
1: ("GPSLatitudeRef", ASCII, 2),
2: ("GPSLatitude", RATIONAL, 3),
3: ("GPSLongitudeRef", ASCII, 2),
4: ("GPSLongitude", RATIONAL, 3),
5: ("GPSAltitudeRef", BYTE, 1),
6: ("GPSAltitude", RATIONAL, 1),
7: ("GPSTimeStamp", RATIONAL, 3),
8: ("GPSSatellites", ASCII, 0),
9: ("GPSStatus", ASCII, 2),
10: ("GPSMeasureMode", ASCII, 2),
11: ("GPSDOP", RATIONAL, 1),
12: ("GPSSpeedRef", ASCII, 2),
13: ("GPSSpeed", RATIONAL, 1),
14: ("GPSTrackRef", ASCII, 2),
15: ("GPSTrack", RATIONAL, 1),
16: ("GPSImgDirectionRef", ASCII, 2),
17: ("GPSImgDirection", RATIONAL, 1),
18: ("GPSMapDatum", ASCII, 0),
19: ("GPSDestLatitudeRef", ASCII, 2),
20: ("GPSDestLatitude", RATIONAL, 3),
21: ("GPSDestLongitudeRef", ASCII, 2),
22: ("GPSDestLongitude", RATIONAL, 3),
23: ("GPSDestBearingRef", ASCII, 2),
24: ("GPSDestBearing", RATIONAL, 1),
25: ("GPSDestDistanceRef", ASCII, 2),
26: ("GPSDestDistance", RATIONAL, 1),
27: ("GPSProcessingMethod", UNDEFINED, 0),
28: ("GPSAreaInformation", UNDEFINED, 0),
29: ("GPSDateStamp", ASCII, 11),
30: ("GPSDifferential", SHORT, 1),
},
# InteroperabilityIFD
40965: {1: ("InteropIndex", ASCII, 1), 2: ("InteropVersion", UNDEFINED, 1)},
}
# Legacy Tags structure
# these tags aren't included above, but were in the previous versions
TAGS: dict[int | tuple[int, int], str] = {
347: "JPEGTables",
700: "XMP",
# Additional Exif Info
32932: "Wang Annotation",
33434: "ExposureTime",
33437: "FNumber",
33445: "MD FileTag",
33446: "MD ScalePixel",
33447: "MD ColorTable",
33448: "MD LabName",
33449: "MD SampleInfo",
33450: "MD PrepDate",
33451: "MD PrepTime",
33452: "MD FileUnits",
33550: "ModelPixelScaleTag",
33723: "IptcNaaInfo",
33918: "INGR Packet Data Tag",
33919: "INGR Flag Registers",
33920: "IrasB Transformation Matrix",
33922: "ModelTiepointTag",
34264: "ModelTransformationTag",
34377: "PhotoshopInfo",
34735: "GeoKeyDirectoryTag",
34736: "GeoDoubleParamsTag",
34737: "GeoAsciiParamsTag",
34850: "ExposureProgram",
34852: "SpectralSensitivity",
34855: "ISOSpeedRatings",
34856: "OECF",
34864: "SensitivityType",
34865: "StandardOutputSensitivity",
34866: "RecommendedExposureIndex",
34867: "ISOSpeed",
34868: "ISOSpeedLatitudeyyy",
34869: "ISOSpeedLatitudezzz",
34908: "HylaFAX FaxRecvParams",
34909: "HylaFAX FaxSubAddress",
34910: "HylaFAX FaxRecvTime",
36864: "ExifVersion",
36867: "DateTimeOriginal",
36868: "DateTimeDigitized",
37121: "ComponentsConfiguration",
37122: "CompressedBitsPerPixel",
37724: "ImageSourceData",
37377: "ShutterSpeedValue",
37378: "ApertureValue",
37379: "BrightnessValue",
37380: "ExposureBiasValue",
37381: "MaxApertureValue",
37382: "SubjectDistance",
37383: "MeteringMode",
37384: "LightSource",
37385: "Flash",
37386: "FocalLength",
37396: "SubjectArea",
37500: "MakerNote",
37510: "UserComment",
37520: "SubSec",
37521: "SubSecTimeOriginal",
37522: "SubsecTimeDigitized",
40960: "FlashPixVersion",
40961: "ColorSpace",
40962: "PixelXDimension",
40963: "PixelYDimension",
40964: "RelatedSoundFile",
40965: "InteroperabilityIFD",
41483: "FlashEnergy",
41484: "SpatialFrequencyResponse",
41486: "FocalPlaneXResolution",
41487: "FocalPlaneYResolution",
41488: "FocalPlaneResolutionUnit",
41492: "SubjectLocation",
41493: "ExposureIndex",
41495: "SensingMethod",
41728: "FileSource",
41729: "SceneType",
41730: "CFAPattern",
41985: "CustomRendered",
41986: "ExposureMode",
41987: "WhiteBalance",
41988: "DigitalZoomRatio",
41989: "FocalLengthIn35mmFilm",
41990: "SceneCaptureType",
41991: "GainControl",
41992: "Contrast",
41993: "Saturation",
41994: "Sharpness",
41995: "DeviceSettingDescription",
41996: "SubjectDistanceRange",
42016: "ImageUniqueID",
42032: "CameraOwnerName",
42033: "BodySerialNumber",
42034: "LensSpecification",
42035: "LensMake",
42036: "LensModel",
42037: "LensSerialNumber",
42112: "GDAL_METADATA",
42113: "GDAL_NODATA",
42240: "Gamma",
50215: "Oce Scanjob Description",
50216: "Oce Application Selector",
50217: "Oce Identification Number",
50218: "Oce ImageLogic Characteristics",
# Adobe DNG
50706: "DNGVersion",
50707: "DNGBackwardVersion",
50708: "UniqueCameraModel",
50709: "LocalizedCameraModel",
50710: "CFAPlaneColor",
50711: "CFALayout",
50712: "LinearizationTable",
50713: "BlackLevelRepeatDim",
50714: "BlackLevel",
50715: "BlackLevelDeltaH",
50716: "BlackLevelDeltaV",
50717: "WhiteLevel",
50718: "DefaultScale",
50719: "DefaultCropOrigin",
50720: "DefaultCropSize",
50721: "ColorMatrix1",
50722: "ColorMatrix2",
50723: "CameraCalibration1",
50724: "CameraCalibration2",
50725: "ReductionMatrix1",
50726: "ReductionMatrix2",
50727: "AnalogBalance",
50728: "AsShotNeutral",
50729: "AsShotWhiteXY",
50730: "BaselineExposure",
50731: "BaselineNoise",
50732: "BaselineSharpness",
50733: "BayerGreenSplit",
50734: "LinearResponseLimit",
50735: "CameraSerialNumber",
50736: "LensInfo",
50737: "ChromaBlurRadius",
50738: "AntiAliasStrength",
50740: "DNGPrivateData",
50778: "CalibrationIlluminant1",
50779: "CalibrationIlluminant2",
50784: "Alias Layer Metadata",
}
TAGS_V2: dict[int, TagInfo] = {}
TAGS_V2_GROUPS: dict[int, dict[int, TagInfo]] = {}
def _populate() -> None:
for k, v in _tags_v2.items():
# Populate legacy structure.
TAGS[k] = v[0]
if len(v) == 4:
for sk, sv in v[3].items():
TAGS[(k, sv)] = sk
TAGS_V2[k] = TagInfo(k, *v)
for group, tags in _tags_v2_groups.items():
TAGS_V2_GROUPS[group] = {k: TagInfo(k, *v) for k, v in tags.items()}
_populate()
##
# Map type numbers to type names -- defined in ImageFileDirectory.
TYPES: dict[int, str] = {}
#
# These tags are handled by default in libtiff, without
# adding to the custom dictionary. From tif_dir.c, searching for
# case TIFFTAG in the _TIFFVSetField function:
# Line: item.
# 148: case TIFFTAG_SUBFILETYPE:
# 151: case TIFFTAG_IMAGEWIDTH:
# 154: case TIFFTAG_IMAGELENGTH:
# 157: case TIFFTAG_BITSPERSAMPLE:
# 181: case TIFFTAG_COMPRESSION:
# 202: case TIFFTAG_PHOTOMETRIC:
# 205: case TIFFTAG_THRESHHOLDING:
# 208: case TIFFTAG_FILLORDER:
# 214: case TIFFTAG_ORIENTATION:
# 221: case TIFFTAG_SAMPLESPERPIXEL:
# 228: case TIFFTAG_ROWSPERSTRIP:
# 238: case TIFFTAG_MINSAMPLEVALUE:
# 241: case TIFFTAG_MAXSAMPLEVALUE:
# 244: case TIFFTAG_SMINSAMPLEVALUE:
# 247: case TIFFTAG_SMAXSAMPLEVALUE:
# 250: case TIFFTAG_XRESOLUTION:
# 256: case TIFFTAG_YRESOLUTION:
# 262: case TIFFTAG_PLANARCONFIG:
# 268: case TIFFTAG_XPOSITION:
# 271: case TIFFTAG_YPOSITION:
# 274: case TIFFTAG_RESOLUTIONUNIT:
# 280: case TIFFTAG_PAGENUMBER:
# 284: case TIFFTAG_HALFTONEHINTS:
# 288: case TIFFTAG_COLORMAP:
# 294: case TIFFTAG_EXTRASAMPLES:
# 298: case TIFFTAG_MATTEING:
# 305: case TIFFTAG_TILEWIDTH:
# 316: case TIFFTAG_TILELENGTH:
# 327: case TIFFTAG_TILEDEPTH:
# 333: case TIFFTAG_DATATYPE:
# 344: case TIFFTAG_SAMPLEFORMAT:
# 361: case TIFFTAG_IMAGEDEPTH:
# 364: case TIFFTAG_SUBIFD:
# 376: case TIFFTAG_YCBCRPOSITIONING:
# 379: case TIFFTAG_YCBCRSUBSAMPLING:
# 383: case TIFFTAG_TRANSFERFUNCTION:
# 389: case TIFFTAG_REFERENCEBLACKWHITE:
# 393: case TIFFTAG_INKNAMES:
# Following pseudo-tags are also handled by default in libtiff:
# TIFFTAG_JPEGQUALITY 65537
# some of these are not in our TAGS_V2 dict and were included from tiff.h
# This list also exists in encode.c
LIBTIFF_CORE = {
255,
256,
257,
258,
259,
262,
263,
266,
274,
277,
278,
280,
281,
340,
341,
282,
283,
284,
286,
287,
296,
297,
321,
320,
338,
32995,
322,
323,
32998,
32996,
339,
32997,
330,
531,
530,
301,
532,
333,
# as above
269, # this has been in our tests forever, and works
65537,
}
LIBTIFF_CORE.remove(255) # We don't have support for subfiletypes
LIBTIFF_CORE.remove(322) # We don't have support for writing tiled images with libtiff
LIBTIFF_CORE.remove(323) # Tiled images
LIBTIFF_CORE.remove(333) # Ink Names either
# Note to advanced users: There may be combinations of these
# parameters and values that when added properly, will work and
# produce valid tiff images that may work in your application.
# It is safe to add and remove tags from this set from Pillow's point
# of view so long as you test against libtiff.
| TagInfo |
python | bokeh__bokeh | src/bokeh/models/graphs.py | {
"start": 5330,
"end": 5790
} | class ____(GraphHitTestPolicy):
'''
With the ``EdgesAndLinkedNodes`` policy, inspection or selection of graph
edges will result in the inspection or selection of the edge and of the
linked graph nodes. There is no direct selection or inspection of graph
nodes.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
| EdgesAndLinkedNodes |
python | pytorch__pytorch | torch/testing/_internal/logging_tensor.py | {
"start": 3113,
"end": 3303
} | class ____(LoggingTensor):
context = torch.overrides.enable_reentrant_dispatch
# https://stackoverflow.com/questions/36408496/python-logging-handler-to-append-to-list
| LoggingTensorReentrant |
python | apache__airflow | airflow-core/src/airflow/traces/tracer.py | {
"start": 1976,
"end": 2106
} | class ____:
"""If no Tracer is configured, EmptyContext is used as a fallback."""
trace_id = 1
span_id = 1
| EmptyContext |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/depends_on_virtual_with_abi/package.py | {
"start": 217,
"end": 504
} | class ____(Package):
"""
This has a virtual dependency on `virtual-with-abi`, mostly for testing
automatic splicing of providers.
"""
homepage = "https://www.example.com"
has_code = False
version("1.0")
depends_on("virtual-with-abi")
| DependsOnVirtualWithAbi |
python | pytorch__pytorch | torch/distributed/checkpoint/_extension.py | {
"start": 2762,
"end": 6336
} | class ____(StreamTransformExtension):
@staticmethod
def is_available() -> bool:
return zstandard is not None or pyzstd is not None
@staticmethod
# pyrefly: ignore [bad-override]
def from_descriptor(version: str) -> "ZStandard":
if version.partition(".")[0] != "1":
raise ValueError(f"Unknown extension {version=}")
if not ZStandard.is_available():
raise ValueError(
f"Stream with ZStandard compression cannot be processed because "
f"no module named '{zstandard_module_name}' or '{pyzstd_module_name}'"
)
return ZStandard()
@staticmethod
def registry_name() -> str:
return "stream.zstd"
def __init__(self) -> None:
super().__init__()
if not ZStandard.is_available():
raise ValueError(
f"ZStandard extension is unavailable because no module named '{zstandard_module_name}' or '{pyzstd_module_name}'"
)
def get_descriptor(self) -> str:
return f"{self.registry_name()}/1"
def transform_to(self, output: IO[bytes]) -> IO[bytes]:
if zstandard is not None:
compressor = zstandard.ZstdCompressor() # type: ignore[union-attr]
return compressor.stream_writer(output)
class Writer(io.RawIOBase):
def __init__(self, output: IO[bytes]) -> None:
self.output = output
self.compressor = pyzstd.ZstdCompressor() # type: ignore[union-attr]
def writeable(self) -> bool:
return True
def write(self, b: Buffer) -> Optional[int]:
outdata = self.compressor.compress(b)
if outdata:
self.output.write(outdata)
return len(memoryview(b))
def flush(self) -> None:
outdata = self.compressor.flush()
if outdata:
self.output.write(outdata)
self.output.flush()
return cast(IO[bytes], Writer(output))
def transform_from(self, input: IO[bytes]) -> IO[bytes]:
if zstandard is not None:
decompressor = zstandard.ZstdDecompressor() # type: ignore[union-attr]
return decompressor.stream_reader(input)
class Reader(io.RawIOBase):
def __init__(self, input: IO[bytes]) -> None:
self.input = input
self.decompressor = pyzstd.EndlessZstdDecompressor() # type: ignore[union-attr]
def readable(self) -> bool:
return True
def readinto(self, b: Buffer) -> Optional[int]:
# This needs to read enough so it can decompress
# something so the output doesn't look like EOF. This
# means reading at least one block. The max block
# size is 128KB, so we read that plus some
# overhead to be sure.
if self.decompressor.needs_input:
indata = self.input.read((128 + 6) * 1024)
else:
indata = b""
bview = memoryview(b)
blen = len(bview)
outdata = self.decompressor.decompress(indata, blen)
if outdata is None:
return None
count = len(outdata)
bview[:count] = outdata
return count
def seekable(self) -> bool:
return False
return cast(IO[bytes], Reader(input))
| ZStandard |
python | ray-project__ray | python/setup.py | {
"start": 2332,
"end": 28436
} | class ____:
def __init__(
self, type: SetupType, name: str, description: str, build_type: BuildType
):
self.type: SetupType = type
self.name: str = name
version = find_version("ray", "_version.py")
# add .dbg suffix if debug mode is on.
if build_type == BuildType.DEBUG:
self.version: str = f"{version}+dbg"
elif build_type == BuildType.ASAN:
self.version: str = f"{version}+asan"
elif build_type == BuildType.TSAN:
self.version: str = f"{version}+tsan"
elif build_type == BuildType.DEPS_ONLY:
self.version: str = DEPS_ONLY_VERSION
else:
self.version = version
self.description: str = description
self.build_type: BuildType = build_type
self.files_to_include: list = []
self.install_requires: list = []
self.extras: dict = {}
def get_packages(self):
if self.type == SetupType.RAY and self.build_type != BuildType.DEPS_ONLY:
return setuptools.find_packages(exclude=("tests", "*.tests", "*.tests.*"))
else:
return []
build_type = os.getenv("RAY_DEBUG_BUILD")
if build_type == "debug":
BUILD_TYPE = BuildType.DEBUG
elif build_type == "asan":
BUILD_TYPE = BuildType.ASAN
elif build_type == "tsan":
BUILD_TYPE = BuildType.TSAN
elif build_type == "deps-only":
BUILD_TYPE = BuildType.DEPS_ONLY
else:
BUILD_TYPE = BuildType.DEFAULT
if os.getenv("RAY_INSTALL_CPP") == "1":
# "ray-cpp" wheel package.
setup_spec = SetupSpec(
SetupType.RAY_CPP,
"ray-cpp",
"A subpackage of Ray which provides the Ray C++ API.",
BUILD_TYPE,
)
else:
# "ray" primary wheel package.
setup_spec = SetupSpec(
SetupType.RAY,
"ray",
"Ray provides a simple, "
"universal API for building distributed applications.",
BUILD_TYPE,
)
RAY_EXTRA_CPP = True
# Disable extra cpp for the development versions.
if "dev" in setup_spec.version or not BUILD_CPP:
RAY_EXTRA_CPP = False
# Ideally, we could include these files by putting them in a
# MANIFEST.in or using the package_data argument to setup, but the
# MANIFEST.in gets applied at the very beginning when setup.py runs
# before these files have been created, so we have to move the files
# manually.
# NOTE: The lists below must be kept in sync with ray/BUILD.bazel.
ray_files = [
"ray/_raylet" + pyd_suffix,
"ray/core/src/ray/gcs/gcs_server" + exe_suffix,
"ray/core/src/ray/raylet/raylet" + exe_suffix,
]
if sys.platform == "linux":
ray_files.append("ray/core/libjemalloc.so")
if BUILD_JAVA or os.path.exists(os.path.join(ROOT_DIR, "ray/jars/ray_dist.jar")):
ray_files.append("ray/jars/ray_dist.jar")
if setup_spec.type == SetupType.RAY_CPP:
setup_spec.files_to_include += ["ray/cpp/default_worker" + exe_suffix]
# C++ API library and project template files.
setup_spec.files_to_include += [
os.path.join(dirpath, filename)
for dirpath, dirnames, filenames in os.walk("ray/cpp")
for filename in filenames
]
# These are the directories where automatically generated Python protobuf
# bindings are created.
generated_python_directories = [
"ray/core/generated",
"ray/serve/generated",
]
ray_files.append("ray/nightly-wheels.yaml")
# Autoscaler files.
ray_files += [
"ray/autoscaler/aws/defaults.yaml",
"ray/autoscaler/aws/cloudwatch/prometheus.yml",
"ray/autoscaler/aws/cloudwatch/ray_prometheus_waiter.sh",
"ray/autoscaler/azure/defaults.yaml",
"ray/autoscaler/spark/defaults.yaml",
"ray/autoscaler/_private/readonly/defaults.yaml",
"ray/autoscaler/_private/_azure/azure-vm-template.json",
"ray/autoscaler/_private/_azure/azure-config-template.json",
"ray/autoscaler/gcp/defaults.yaml",
"ray/autoscaler/local/defaults.yaml",
"ray/autoscaler/vsphere/defaults.yaml",
"ray/autoscaler/ray-schema.json",
]
# Dashboard files.
ray_files += [
os.path.join(dirpath, filename)
for dirpath, dirnames, filenames in os.walk("ray/dashboard/client/build")
for filename in filenames
]
# Dashboard metrics files.
ray_files += [
os.path.join(dirpath, filename)
for dirpath, dirnames, filenames in os.walk("ray/dashboard/modules/metrics/export")
for filename in filenames
]
ray_files += [
os.path.join(dirpath, filename)
for dirpath, dirnames, filenames in os.walk(
"ray/dashboard/modules/metrics/dashboards"
)
for filename in filenames
if filename.endswith(".json")
]
# html templates for notebook integration
ray_files += [
p.as_posix() for p in pathlib.Path("ray/widgets/templates/").glob("*.html.j2")
]
# If you're adding dependencies for ray extras, please
# also update the matching section of requirements/requirements.txt
# in this directory
if setup_spec.type == SetupType.RAY:
pandas_dep = "pandas >= 1.3"
numpy_dep = "numpy >= 1.20"
pyarrow_deps = [
"pyarrow >= 9.0.0",
]
pydantic_dep = "pydantic!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,<3"
setup_spec.extras = {
"cgraph": [
"cupy-cuda12x; sys_platform != 'darwin'",
],
"client": [
# The Ray client needs a specific range of gRPC to work:
# Tracking issues: https://github.com/grpc/grpc/issues/33714
"grpcio != 1.56.0; sys_platform == 'darwin'",
"grpcio",
],
"data": [
numpy_dep,
pandas_dep,
*pyarrow_deps,
"fsspec",
],
"default": [
# If adding dependencies necessary to launch the dashboard api server,
# please add it to python/ray/dashboard/optional_deps.py as well.
"aiohttp >= 3.7",
"aiohttp_cors",
"colorful",
"py-spy >= 0.2.0; python_version < '3.12'", # noqa:E501
"py-spy >= 0.4.0; python_version >= '3.12'", # noqa:E501
"requests",
"grpcio >= 1.32.0; python_version < '3.10'", # noqa:E501
"grpcio >= 1.42.0; python_version >= '3.10'", # noqa:E501
"opencensus",
"opentelemetry-sdk >= 1.30.0",
"opentelemetry-exporter-prometheus",
"opentelemetry-proto",
pydantic_dep,
"prometheus_client >= 0.7.1",
"smart_open",
"virtualenv >=20.0.24, !=20.21.1", # For pip runtime env.
],
"observability": [
"memray; sys_platform != 'win32'",
],
"serve": [
"uvicorn[standard]",
"requests",
"starlette",
"fastapi",
"watchfiles",
],
"tune": [
"pandas",
# TODO: Remove pydantic dependency from tune once tune doesn't import train
pydantic_dep,
"tensorboardX>=1.9",
"requests",
*pyarrow_deps,
"fsspec",
],
}
# Both "adag" and "cgraph" are for Compiled Graphs.
# "adag" is deprecated and will be removed in the future.
setup_spec.extras["adag"] = list(setup_spec.extras["cgraph"])
# Ray Serve depends on the Ray dashboard components.
setup_spec.extras["serve"] = list(
set(setup_spec.extras["serve"] + setup_spec.extras["default"])
)
# Ensure gRPC library exists for Ray Serve gRPC support.
setup_spec.extras["serve-grpc"] = list(
set(
setup_spec.extras["serve"]
+ [
"grpcio >= 1.32.0; python_version < '3.10'", # noqa:E501
"grpcio >= 1.42.0; python_version >= '3.10'", # noqa:E501
"pyOpenSSL",
]
)
)
# This is required for supporting the asynchronous inference, allowing the ray serve applications to
# allow asynchronously execute their code, via the use of celery task processor.
setup_spec.extras["serve-async-inference"] = list(
set(
setup_spec.extras["serve"]
+ [
"celery",
]
)
)
if RAY_EXTRA_CPP:
setup_spec.extras["cpp"] = ["ray-cpp==" + setup_spec.version]
setup_spec.extras["rllib"] = setup_spec.extras["tune"] + [
"dm_tree",
"gymnasium==1.1.1",
"lz4",
"ormsgpack==1.7.0",
"pyyaml",
"scipy",
]
setup_spec.extras["train"] = setup_spec.extras["tune"] + [pydantic_dep]
# Ray AI Runtime should encompass Data, Tune, and Serve.
setup_spec.extras["air"] = list(
set(
setup_spec.extras["tune"]
+ setup_spec.extras["data"]
+ setup_spec.extras["train"]
+ setup_spec.extras["serve"]
)
)
# NOTE: While we keep ray[all] for compatibility, you probably
# shouldn't use it because it contains too many dependencies
# and no deployment needs all of them. Instead you should list
# the extras you actually need, see
# https://docs.ray.io/en/latest/ray-overview/installation.html#from-wheels
#
# "all" will not include "cpp" anymore. It is a big depedendency
# that most people do not need.
#
# Instead, when cpp is supported, we add a "all-cpp".
setup_spec.extras["all"] = list(
set(
chain.from_iterable([v for k, v in setup_spec.extras.items() if k != "cpp"])
)
)
if RAY_EXTRA_CPP:
setup_spec.extras["all-cpp"] = list(
set(setup_spec.extras["all"] + setup_spec.extras["cpp"])
)
# "llm" is not included in all, by design. vllm's dependency set is very
# large and specific, will likely run into dependency conflicts with other
# ML libraries. As a result, it is an "extra-extra" that is not part
# ray[all].
#
# ray[llm] depends on ray[data].
#
# Keep this in sync with python/requirements/llm/llm-requirements.txt
#
setup_spec.extras["llm"] = list(
set(
[
"vllm[audio]>=0.11.0",
"nixl>=0.6.1",
# TODO(llm): remove after next vLLM version bump
"transformers>=4.57.3",
"jsonref>=1.1.0",
"jsonschema",
"ninja",
# async-timeout is a backport of asyncio.timeout for python < 3.11
"async-timeout; python_version < '3.11'",
"typer",
"meson",
"pybind11",
"hf_transfer",
]
+ setup_spec.extras["data"]
+ setup_spec.extras["serve"]
)
)
# These are the main dependencies for users of ray. This list
# should be carefully curated. If you change it, please reflect
# the change in the matching section of requirements/requirements.txt
#
# NOTE: if you add any unbounded dependency, please also update
# install-core-prerelease-dependencies.sh so we can test
# new releases candidates.
if setup_spec.type == SetupType.RAY:
setup_spec.install_requires = [
"click>=7.0",
"filelock",
"jsonschema",
"msgpack >= 1.0.0, < 2.0.0",
"packaging",
"protobuf>=3.20.3",
"pyyaml",
"requests",
]
def is_native_windows_or_msys():
"""Check to see if we are running on native Windows,
but NOT WSL (which is seen as Linux)."""
return sys.platform == "msys" or sys.platform == "win32"
def is_invalid_windows_platform():
# 'GCC' check is how you detect MinGW:
# https://github.com/msys2/MINGW-packages/blob/abd06ca92d876b9db05dd65f27d71c4ebe2673a9/mingw-w64-python2/0410-MINGW-build-extensions-with-GCC.patch#L53
platform = sys.platform
ver = sys.version
return platform == "msys" or (platform == "win32" and ver and "GCC" in ver)
def _find_bazel_bin():
candidates = []
# User specified bazel location.
bazel_path = os.getenv("BAZEL_PATH")
if bazel_path:
candidates.append(bazel_path)
# Default bazel locations; prefers bazelisk.
candidates.extend(["bazelisk", "bazel"])
if sys.platform == "win32":
mingw_dir = os.getenv("MINGW_DIR")
if mingw_dir:
candidates.append(os.path.join(mingw_dir, "bin", "bazel.exe"))
else:
home_dir = os.path.expanduser("~")
candidates.append(os.path.join(home_dir, "bin", "bazel"))
for bazel in candidates:
bazel_bin = shutil.which(bazel)
if bazel_bin:
return bazel_bin
raise RuntimeError("Cannot find bazel in PATH")
def patch_isdir():
"""
Python on Windows is having hard times at telling if a symlink is
a directory - it can "guess" wrong at times, which bites when
finding packages. Replace with a fixed version which unwraps links first.
"""
orig_isdir = os.path.isdir
def fixed_isdir(path):
while os.path.islink(path):
try:
link = os.readlink(path)
except OSError:
break
path = os.path.abspath(os.path.join(os.path.dirname(path), link))
return orig_isdir(path)
os.path.isdir = fixed_isdir
def replace_symlinks_with_junctions():
"""
Per default Windows requires admin access to create symlinks, while
junctions (which behave similarly) can be created by users.
This function replaces symlinks (which might be broken when checked
out without admin rights) with junctions so Ray can be built both
with and without admin access.
"""
assert is_native_windows_or_msys()
# Update this list if new symlinks are introduced to the source tree
_LINKS = {
r"ray\rllib": "../../rllib",
}
root_dir = os.path.dirname(__file__)
for link, default in _LINKS.items():
path = os.path.join(root_dir, link)
try:
out = subprocess.check_output(
"DIR /A:LD /B", shell=True, cwd=os.path.dirname(path)
)
except subprocess.CalledProcessError:
out = b""
if os.path.basename(path) in out.decode("utf8").splitlines():
logger.info(f"'{link}' is already converted to junction point")
else:
logger.info(f"Converting '{link}' to junction point...")
if os.path.isfile(path):
with open(path) as inp:
target = inp.read()
os.unlink(path)
elif os.path.isdir(path):
target = default
try:
# unlink() works on links as well as on regular files,
# and links to directories are considered directories now
os.unlink(path)
except OSError as err:
# On Windows attempt to unlink a regular directory results
# in a PermissionError with errno set to errno.EACCES.
if err.errno != errno.EACCES:
raise
# For regular directories deletion is done with rmdir call.
os.rmdir(path)
else:
raise ValueError(f"Unexpected type of entry: '{path}'")
target = os.path.abspath(os.path.join(os.path.dirname(path), target))
logger.info("Setting {} -> {}".format(link, target))
subprocess.check_call(
f'MKLINK /J "{os.path.basename(link)}" "{target}"',
shell=True,
cwd=os.path.dirname(path),
)
if is_conda_forge_build and is_native_windows_or_msys():
# Automated replacements should only happen in automatic build
# contexts for now
patch_isdir()
replace_symlinks_with_junctions()
def build(build_python, build_java, build_cpp, build_redis):
if tuple(sys.version_info[:2]) not in SUPPORTED_PYTHONS:
msg = (
"Detected Python version {}, which is not supported. "
"Only Python {} are supported."
).format(
".".join(map(str, sys.version_info[:2])),
", ".join(".".join(map(str, v)) for v in SUPPORTED_PYTHONS),
)
raise RuntimeError(msg)
if is_invalid_windows_platform():
msg = (
"Please use official native CPython on Windows,"
" not Cygwin/MSYS/MSYS2/MinGW/etc.\n"
+ "Detected: {}\n at: {!r}".format(sys.version, sys.executable)
)
raise OSError(msg)
# Vendor thirdparty packages.
#
# TODO(ray-core, ray-ci): the version of these vendored packages should be
# pinned, so that the build is reproducible.
if not os.getenv("SKIP_THIRDPARTY_INSTALL_CONDA_FORGE"):
pip_packages = ["psutil", "colorama"]
subprocess.check_call(
[
sys.executable,
"-m",
"pip",
"install",
"-q",
"--target=" + os.path.join(ROOT_DIR, THIRDPARTY_SUBDIR),
]
+ pip_packages,
env=dict(os.environ, CC="gcc"),
)
# runtime env agent dependenceis
runtime_env_agent_pip_packages = ["aiohttp"]
subprocess.check_call(
[
sys.executable,
"-m",
"pip",
"install",
"-q",
"--target="
+ os.path.join(ROOT_DIR, RUNTIME_ENV_AGENT_THIRDPARTY_SUBDIR),
]
+ runtime_env_agent_pip_packages
)
bazel_targets = []
if build_python:
bazel_targets.append("//:gen_ray_pkg")
if build_cpp:
bazel_targets.append("//cpp:gen_ray_cpp_pkg")
if build_java:
bazel_targets.append("//java:gen_ray_java_pkg")
if build_redis:
bazel_targets.append("//:gen_redis_pkg")
if not bazel_targets:
return
bazel_env = os.environ.copy()
bazel_env["PYTHON3_BIN_PATH"] = sys.executable
if is_native_windows_or_msys():
SHELL = bazel_env.get("SHELL")
if SHELL:
bazel_env.setdefault("BAZEL_SH", os.path.normpath(SHELL))
BAZEL_SH = bazel_env.get("BAZEL_SH", "")
SYSTEMROOT = os.getenv("SystemRoot")
wsl_bash = os.path.join(SYSTEMROOT, "System32", "bash.exe")
if (not BAZEL_SH) and SYSTEMROOT and os.path.isfile(wsl_bash):
msg = (
"You appear to have Bash from WSL,"
" which Bazel may invoke unexpectedly. "
"To avoid potential problems,"
" please explicitly set the {name!r}"
" environment variable for Bazel."
).format(name="BAZEL_SH")
raise RuntimeError(msg)
bazel_flags = ["--verbose_failures"]
if BAZEL_ARGS:
bazel_flags.extend(shlex.split(BAZEL_ARGS))
if BAZEL_LIMIT_CPUS:
n = int(BAZEL_LIMIT_CPUS) # the value must be an int
bazel_flags.append(f"--local_cpu_resources={n}")
warnings.warn(
"Setting BAZEL_LIMIT_CPUS is deprecated and will be removed in a future"
" version. Please use BAZEL_ARGS instead.",
FutureWarning,
)
if is_conda_forge_build:
src_dir = os.environ.get("SRC_DIR", False) or os.getcwd()
src_dir = os.path.abspath(src_dir)
if is_native_windows_or_msys():
drive = os.path.splitdrive(src_dir)[0] + "\\"
root_dir = os.path.join(drive, "bazel-root")
out_dir = os.path.join(drive, "b-o")
bazel_flags.append("--enable_runfiles=false")
else:
root_dir = os.path.join(src_dir, "..", "bazel-root")
out_dir = os.path.join(src_dir, "..", "b-o")
for d in (root_dir, out_dir):
if not os.path.exists(d):
os.makedirs(d)
bazel_precmd_flags = [
"--output_user_root=" + root_dir,
"--output_base=" + out_dir,
]
else:
bazel_precmd_flags = []
if sys.platform == "win32":
bazel_precmd_flags = ["--output_user_root=C:/tmp"]
if setup_spec.build_type == BuildType.DEBUG:
bazel_flags.append("--config=debug")
if setup_spec.build_type == BuildType.ASAN:
bazel_flags.append("--config=asan-build")
if setup_spec.build_type == BuildType.TSAN:
bazel_flags.append("--config=tsan")
bazel_bin = _find_bazel_bin()
# Build all things first.
subprocess.check_call(
[bazel_bin]
+ bazel_precmd_flags
+ ["build"]
+ bazel_flags
+ ["--"]
+ bazel_targets,
env=bazel_env,
)
# Then run the actions.
for action in bazel_targets:
subprocess.check_call(
[bazel_bin] + bazel_precmd_flags + ["run"] + bazel_flags + [action],
env=bazel_env,
)
def _walk_thirdparty_dir(directory):
file_list = []
for root, dirs, filenames in os.walk(directory):
# Exclude generated bytecode cache directories and tests directories
# from vendored packages.
for exclude_dir in ["__pycache__", "tests"]:
if exclude_dir in dirs:
dirs.remove(exclude_dir)
for name in filenames:
file_list.append(os.path.join(root, name))
return file_list
def copy_file(target_dir, filename, rootdir):
# TODO(rkn): This feels very brittle. It may not handle all cases. See
# https://github.com/apache/arrow/blob/master/python/setup.py for an
# example.
# File names can be absolute paths, e.g. from _walk_thirdparty_dir().
source = os.path.relpath(filename, rootdir)
destination = os.path.join(target_dir, source)
# Create the target directory if it doesn't already exist.
os.makedirs(os.path.dirname(destination), exist_ok=True)
if not os.path.exists(destination):
if sys.platform == "win32":
# Does not preserve file mode (needed to avoid read-only bit)
shutil.copyfile(source, destination, follow_symlinks=True)
else:
# Preserves file mode (needed to copy executable bit)
shutil.copy(source, destination, follow_symlinks=True)
return 1
return 0
def pip_run(build_ext):
if SKIP_BAZEL_BUILD or setup_spec.build_type == BuildType.DEPS_ONLY:
build(False, False, False, False)
else:
build(BUILD_CORE, BUILD_JAVA, BUILD_CPP, BUILD_REDIS)
if setup_spec.type == SetupType.RAY:
if setup_spec.build_type == BuildType.DEPS_ONLY:
setup_spec.files_to_include = []
return
setup_spec.files_to_include += ray_files
thirdparty_dir = os.path.join(ROOT_DIR, THIRDPARTY_SUBDIR)
setup_spec.files_to_include += _walk_thirdparty_dir(thirdparty_dir)
runtime_env_agent_thirdparty_dir = os.path.join(
ROOT_DIR, RUNTIME_ENV_AGENT_THIRDPARTY_SUBDIR
)
setup_spec.files_to_include += _walk_thirdparty_dir(
runtime_env_agent_thirdparty_dir
)
# Copy over the autogenerated protobuf Python bindings.
for directory in generated_python_directories:
for filename in os.listdir(directory):
if filename[-3:] == ".py":
setup_spec.files_to_include.append(
os.path.join(directory, filename)
)
copied_files = 0
for filename in setup_spec.files_to_include:
copied_files += copy_file(build_ext.build_lib, filename, ROOT_DIR)
print("# of files copied to {}: {}".format(build_ext.build_lib, copied_files))
if __name__ == "__main__":
import setuptools
import setuptools.command.build_ext
class build_ext(setuptools.command.build_ext.build_ext):
def run(self):
return pip_run(self)
class BinaryDistribution(setuptools.Distribution):
def has_ext_modules(self):
return True
# Ensure no remaining lib files.
build_dir = os.path.join(ROOT_DIR, "build")
if os.path.isdir(build_dir):
shutil.rmtree(build_dir)
setuptools.setup(
name=setup_spec.name,
version=setup_spec.version,
author="Ray Team",
author_email="ray-dev@googlegroups.com",
description=(setup_spec.description),
long_description=io.open(
os.path.join(ROOT_DIR, os.path.pardir, "README.rst"), "r", encoding="utf-8"
).read(),
url="https://github.com/ray-project/ray",
keywords=(
"ray distributed parallel machine-learning hyperparameter-tuning"
"reinforcement-learning deep-learning serving python"
),
python_requires=">=3.9",
classifiers=[
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
"Programming Language :: Python :: 3.13",
],
packages=setup_spec.get_packages(),
cmdclass={"build_ext": build_ext},
distclass=( # Avoid building extensions for deps-only builds.
BinaryDistribution if setup_spec.build_type != BuildType.DEPS_ONLY else None
),
install_requires=setup_spec.install_requires,
setup_requires=["cython >= 3.0.12", "pip", "wheel"],
extras_require=setup_spec.extras,
entry_points={
"console_scripts": [
"ray=ray.scripts.scripts:main",
"tune=ray.tune.cli.scripts:cli",
"serve=ray.serve.scripts:cli",
]
},
package_data={
"ray": [
"includes/*.pxd",
"*.pxd",
"llm/_internal/serve/config_generator/base_configs/templates/*.yaml",
],
},
include_package_data=True,
exclude_package_data={
# Empty string means "any package".
# Therefore, exclude BUILD from every package:
"": ["BUILD", "BUILD.bazel"],
},
zip_safe=False,
license="Apache 2.0",
)
| SetupSpec |
python | vyperlang__vyper | vyper/builtins/functions.py | {
"start": 48523,
"end": 49644
} | class ____(BuiltinFunctionT):
_inputs = [("a", UINT256_T), ("b", UINT256_T), ("c", UINT256_T)]
_return_type = UINT256_T
def _try_fold(self, node):
validate_call_args(node, 3)
args = [i.get_folded_value() for i in node.args]
if isinstance(args[2], vy_ast.Int) and args[2].value == 0:
raise ZeroDivisionException("Modulo by 0", node.args[2])
for arg in args:
if not isinstance(arg, vy_ast.Int):
raise UnfoldableNode
value = self._eval_fn(args[0].value, args[1].value) % args[2].value
return vy_ast.Int.from_node(node, value=value)
@process_inputs
def build_IR(self, expr, args, kwargs, context):
x, y, z = args
with x.cache_when_complex("x") as (b1, x):
with y.cache_when_complex("y") as (b2, y):
with z.cache_when_complex("z") as (b3, z):
ret = IRnode.from_list(
["seq", ["assert", z], [self._opcode, x, y, z]], typ=UINT256_T
)
return b1.resolve(b2.resolve(b3.resolve(ret)))
| _AddMulMod |
python | getsentry__sentry | src/sentry/utils/arroyo.py | {
"start": 867,
"end": 4359
} | class ____(Metrics):
"""
Metrics adapter for use with the Arroyo library. This allows consumer
metrics instrumented via the Arroyo library to be automatically recorded
and sent to Sentry's configured metrics backend.
"""
def __init__(
self,
backend: MetricsBackend,
name: str | None = None,
tags: Tags | None = None,
) -> None:
self.__backend = backend
self.__name = name
self.__tags = tags
def __merge_name(self, name: str) -> str:
if self.__name is None:
return name
else:
return f"{self.__name}.{name}"
def __merge_tags(self, tags: Tags | None) -> Tags | None:
if self.__tags is None:
return tags
elif tags is None:
return self.__tags
else:
return {**self.__tags, **tags}
def increment(
self,
name: str,
value: int | float = 1,
tags: Tags | None = None,
stacklevel: int = 0,
) -> None:
# sentry metrics backend uses `incr` instead of `increment`
self.__backend.incr(
key=self.__merge_name(name),
amount=value,
tags=self.__merge_tags(tags),
stacklevel=stacklevel + 1,
sample_rate=1,
)
def gauge(
self,
name: str,
value: int | float,
tags: Tags | None = None,
stacklevel: int = 0,
) -> None:
self.__backend.gauge(
key=self.__merge_name(name),
value=value,
tags=self.__merge_tags(tags),
stacklevel=stacklevel + 1,
sample_rate=1,
)
def timing(
self,
name: str,
value: int | float,
tags: Tags | None = None,
stacklevel: int = 0,
) -> None:
self.__backend.timing(
key=self.__merge_name(name),
value=value,
tags=self.__merge_tags(tags),
stacklevel=stacklevel + 1,
sample_rate=1,
)
def _get_arroyo_subprocess_initializer(
initializer: Callable[[], None] | None,
) -> Callable[[], None]:
from sentry.metrics.middleware import get_current_global_tags
# One can add integer tags and other invalid types today. Filter out any
# tags that may not be pickleable. Because those tags are getting pickled
# as part of the constructed partial()
tags: Tags = {k: v for k, v in get_current_global_tags().items() if isinstance(v, str)}
return partial(_initialize_arroyo_subprocess, initializer=initializer, tags=tags)
def _initialize_arroyo_subprocess(initializer: Callable[[], None] | None, tags: Tags) -> None:
from sentry.runner import configure
configure()
if initializer:
initializer()
from sentry.metrics.middleware import add_global_tags
# Inherit global tags from the parent process
add_global_tags(all_threads=True, tags=tags)
def initialize_arroyo_main() -> None:
from arroyo import configure_metrics
from sentry.utils.metrics import backend
# XXX: we initially called this function only to initialize sentry consumer
# metrics, and namespaced arroyo metrics under sentry.consumer. Now we
# initialize arroyo metrics in every sentry process, and so even producer
# metrics are namespaced under sentry.consumer.
metrics_wrapper = MetricsWrapper(backend, name="consumer")
configure_metrics(metrics_wrapper)
| MetricsWrapper |
python | numpy__numpy | numpy/_core/tests/test_getlimits.py | {
"start": 5219,
"end": 5453
} | class ____:
def test_finfo_generic(self):
assert isinstance(np.finfo[np.float64], types.GenericAlias)
def test_iinfo_generic(self):
assert isinstance(np.iinfo[np.int_], types.GenericAlias)
| TestRuntimeSubscriptable |
python | kamyu104__LeetCode-Solutions | Python/last-moment-before-all-ants-fall-out-of-a-plank.py | {
"start": 29,
"end": 282
} | class ____(object):
def getLastMoment(self, n, left, right):
"""
:type n: int
:type left: List[int]
:type right: List[int]
:rtype: int
"""
return max(max(left or [0]), n-min(right or [n]))
| Solution |
python | PyCQA__pylint | tests/functional/i/inherit_non_class.py | {
"start": 2355,
"end": 2466
} | class ____(Channel[T]):
async def get(self) -> T:
"""An implementation of the generic."""
| DirectChannel |
python | django__django | tests/admin_default_site/tests.py | {
"start": 450,
"end": 906
} | class ____(SimpleTestCase):
def setUp(self):
# Reset admin.site since it may have already been instantiated by
# another test app.
self._old_site = admin.site
admin.site = sites.site = sites.DefaultAdminSite()
def tearDown(self):
admin.site = sites.site = self._old_site
def test_use_custom_admin_site(self):
self.assertEqual(admin.site.__class__.__name__, "CustomAdminSite")
| CustomAdminSiteTests |
python | aimacode__aima-python | probability.py | {
"start": 6556,
"end": 7649
} | class ____(BayesNet):
"""An abstract class for a decision network as a wrapper for a BayesNet.
Represents an agent's current state, its possible actions, reachable states
and utilities of those states."""
def __init__(self, action, infer):
"""action: a single action node
infer: the preferred method to carry out inference on the given BayesNet"""
super(DecisionNetwork, self).__init__()
self.action = action
self.infer = infer
def best_action(self):
"""Return the best action in the network"""
return self.action
def get_utility(self, action, state):
"""Return the utility for a particular action and state in the network"""
raise NotImplementedError
def get_expected_utility(self, action, evidence):
"""Compute the expected utility given an action and evidence"""
u = 0.0
prob_dist = self.infer(action, evidence, self).prob
for item, _ in prob_dist.items():
u += prob_dist[item] * self.get_utility(action, item)
return u
| DecisionNetwork |
python | run-llama__llama_index | llama-index-core/llama_index/core/llms/llm.py | {
"start": 2493,
"end": 4186
} | class ____(Protocol):
def __call__(self, prompt: str) -> str:
pass
def stream_completion_response_to_tokens(
completion_response_gen: CompletionResponseGen,
) -> TokenGen:
"""Convert a stream completion response to a stream of tokens."""
def gen() -> TokenGen:
for response in completion_response_gen:
yield response.delta or ""
return gen()
def stream_chat_response_to_tokens(
chat_response_gen: ChatResponseGen,
) -> TokenGen:
"""Convert a stream completion response to a stream of tokens."""
def gen() -> TokenGen:
for response in chat_response_gen:
yield response.delta or ""
return gen()
async def astream_completion_response_to_tokens(
completion_response_gen: CompletionResponseAsyncGen,
) -> TokenAsyncGen:
"""Convert a stream completion response to a stream of tokens."""
async def gen() -> TokenAsyncGen:
async for response in completion_response_gen:
yield response.delta or ""
return gen()
async def astream_chat_response_to_tokens(
chat_response_gen: ChatResponseAsyncGen,
) -> TokenAsyncGen:
"""Convert a stream completion response to a stream of tokens."""
async def gen() -> TokenAsyncGen:
async for response in chat_response_gen:
yield response.delta or ""
return gen()
def default_completion_to_prompt(prompt: str) -> str:
return prompt
MessagesToPromptCallable = Annotated[
Optional[MessagesToPromptType],
WithJsonSchema({"type": "string"}),
]
CompletionToPromptCallable = Annotated[
Optional[CompletionToPromptType],
WithJsonSchema({"type": "string"}),
]
| CompletionToPromptType |
python | fluentpython__example-code | 21-class-metaprog/bulkfood/bulkfood_v7.py | {
"start": 1736,
"end": 2098
} | class ____(model.Entity): # <1>
description = model.NonBlank()
weight = model.Quantity()
price = model.Quantity()
def __init__(self, description, weight, price):
self.description = description
self.weight = weight
self.price = price
def subtotal(self):
return self.weight * self.price
# END LINEITEM_V7
| LineItem |
python | openai__openai-python | src/openai/resources/fine_tuning/alpha/alpha.py | {
"start": 2264,
"end": 2498
} | class ____:
def __init__(self, alpha: Alpha) -> None:
self._alpha = alpha
@cached_property
def graders(self) -> GradersWithRawResponse:
return GradersWithRawResponse(self._alpha.graders)
| AlphaWithRawResponse |
python | sqlalchemy__sqlalchemy | test/orm/test_cycles.py | {
"start": 36503,
"end": 42390
} | class ____(fixtures.MappedTest):
"""Post_update on a single self-referential mapper."""
@classmethod
def define_tables(cls, metadata):
Table(
"node",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("path", String(50), nullable=False),
Column("parent_id", Integer, ForeignKey("node.id"), nullable=True),
Column(
"prev_sibling_id",
Integer,
ForeignKey("node.id"),
nullable=True,
),
Column(
"next_sibling_id",
Integer,
ForeignKey("node.id"),
nullable=True,
),
)
@classmethod
def setup_classes(cls):
class Node(cls.Basic):
def __init__(self, path=""):
self.path = path
def test_one(self):
"""Post_update only fires off when needed.
This test case used to produce many superfluous update statements,
particularly upon delete
"""
node, Node = self.tables.node, self.classes.Node
self.mapper_registry.map_imperatively(
Node,
node,
properties={
"children": relationship(
Node,
primaryjoin=node.c.id == node.c.parent_id,
cascade="all",
backref=backref("parent", remote_side=node.c.id),
),
"prev_sibling": relationship(
Node,
primaryjoin=node.c.prev_sibling_id == node.c.id,
remote_side=node.c.id,
uselist=False,
),
"next_sibling": relationship(
Node,
primaryjoin=node.c.next_sibling_id == node.c.id,
remote_side=node.c.id,
uselist=False,
post_update=True,
),
},
)
session = fixture_session(autoflush=False)
def append_child(parent, child):
if parent.children:
parent.children[-1].next_sibling = child
child.prev_sibling = parent.children[-1]
parent.children.append(child)
def remove_child(parent, child):
child.parent = None
node = child.next_sibling
node.prev_sibling = child.prev_sibling
child.prev_sibling.next_sibling = node
session.delete(child)
root = Node("root")
about = Node("about")
cats = Node("cats")
stories = Node("stories")
bruce = Node("bruce")
append_child(root, about)
assert about.prev_sibling is None
append_child(root, cats)
assert cats.prev_sibling is about
assert cats.next_sibling is None
assert about.next_sibling is cats
assert about.prev_sibling is None
append_child(root, stories)
append_child(root, bruce)
session.add(root)
session.flush()
remove_child(root, cats)
# pre-trigger lazy loader on 'cats' to make the test easier
cats.children
self.assert_sql_execution(
testing.db,
session.flush,
AllOf(
CompiledSQL(
"UPDATE node SET prev_sibling_id=:prev_sibling_id "
"WHERE node.id = :node_id",
lambda ctx: {
"prev_sibling_id": about.id,
"node_id": stories.id,
},
),
CompiledSQL(
"UPDATE node SET next_sibling_id=:next_sibling_id "
"WHERE node.id = :node_id",
lambda ctx: {
"next_sibling_id": stories.id,
"node_id": about.id,
},
),
CompiledSQL(
"UPDATE node SET next_sibling_id=:next_sibling_id "
"WHERE node.id = :node_id",
lambda ctx: {"next_sibling_id": None, "node_id": cats.id},
),
),
CompiledSQL(
"DELETE FROM node WHERE node.id = :id",
lambda ctx: [{"id": cats.id}],
),
)
session.delete(root)
self.assert_sql_execution(
testing.db,
session.flush,
CompiledSQL(
"UPDATE node SET next_sibling_id=:next_sibling_id "
"WHERE node.id = :node_id",
lambda ctx: [
{"node_id": about.id, "next_sibling_id": None},
{"node_id": stories.id, "next_sibling_id": None},
],
),
AllOf(
CompiledSQL(
"DELETE FROM node WHERE node.id = :id",
lambda ctx: {"id": about.id},
),
CompiledSQL(
"DELETE FROM node WHERE node.id = :id",
lambda ctx: {"id": stories.id},
),
CompiledSQL(
"DELETE FROM node WHERE node.id = :id",
lambda ctx: {"id": bruce.id},
),
),
CompiledSQL(
"DELETE FROM node WHERE node.id = :id",
lambda ctx: {"id": root.id},
),
)
about = Node("about")
cats = Node("cats")
about.next_sibling = cats
cats.prev_sibling = about
session.add(about)
session.flush()
session.delete(about)
cats.prev_sibling = None
session.flush()
| SelfReferentialPostUpdateTest |
python | sympy__sympy | sympy/vector/operators.py | {
"start": 1194,
"end": 1737
} | class ____(Expr):
"""
Represents unevaluated Divergence.
Examples
========
>>> from sympy.vector import CoordSys3D, Divergence
>>> R = CoordSys3D('R')
>>> v = R.y*R.z*R.i + R.x*R.z*R.j + R.x*R.y*R.k
>>> Divergence(v)
Divergence(R.y*R.z*R.i + R.x*R.z*R.j + R.x*R.y*R.k)
"""
def __new__(cls, expr):
expr = sympify(expr)
obj = Expr.__new__(cls, expr)
obj._expr = expr
return obj
def doit(self, **hints):
return divergence(self._expr, doit=True)
| Divergence |
python | django__django | django/contrib/postgres/fields/array.py | {
"start": 12047,
"end": 12303
} | class ____:
def __init__(self, index, base_field):
self.index = index
self.base_field = base_field
def __call__(self, *args, **kwargs):
return IndexTransform(self.index, self.base_field, *args, **kwargs)
| IndexTransformFactory |
python | ray-project__ray | release/ray_release/exception.py | {
"start": 2407,
"end": 2508
} | class ____(ClusterManagerError):
exit_code = ExitCode.CLUSTER_STARTUP_TIMEOUT
| ClusterStartupTimeout |
python | PyCQA__pylint | tests/pyreverse/functional/class_diagrams/inheritance/simple_inheritance.py | {
"start": 39,
"end": 82
} | class ____(Parent):
"""child class"""
| Child |
python | sympy__sympy | sympy/stats/crv_types.py | {
"start": 76825,
"end": 78481
} | class ____(SingleContinuousDistribution):
_argnames = ('a',)
set = Interval(0, oo)
@staticmethod
def check(a):
_value_check(a > 0, "Parameter a must be positive.")
def pdf(self, x):
a = self.a
return sqrt(2/pi)*x**2*exp(-x**2/(2*a**2))/a**3
def _cdf(self, x):
a = self.a
return erf(sqrt(2)*x/(2*a)) - sqrt(2)*x*exp(-x**2/(2*a**2))/(sqrt(pi)*a)
def Maxwell(name, a):
r"""
Create a continuous random variable with a Maxwell distribution.
Explanation
===========
The density of the Maxwell distribution is given by
.. math::
f(x) := \sqrt{\frac{2}{\pi}} \frac{x^2 e^{-x^2/(2a^2)}}{a^3}
with :math:`x \geq 0`.
.. TODO - what does the parameter mean?
Parameters
==========
a : Real number, `a > 0`
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import Maxwell, density, E, variance
>>> from sympy import Symbol, simplify
>>> a = Symbol("a", positive=True)
>>> z = Symbol("z")
>>> X = Maxwell("x", a)
>>> density(X)(z)
sqrt(2)*z**2*exp(-z**2/(2*a**2))/(sqrt(pi)*a**3)
>>> E(X)
2*sqrt(2)*a/sqrt(pi)
>>> simplify(variance(X))
a**2*(-8 + 3*pi)/pi
References
==========
.. [1] https://en.wikipedia.org/wiki/Maxwell_distribution
.. [2] https://mathworld.wolfram.com/MaxwellDistribution.html
"""
return rv(name, MaxwellDistribution, (a, ))
#-------------------------------------------------------------------------------
# Moyal Distribution -----------------------------------------------------------
| MaxwellDistribution |
python | Textualize__textual | src/textual/_extrema.py | {
"start": 134,
"end": 1662
} | class ____(NamedTuple):
"""Specifies minimum and maximum dimensions."""
min_width: Fraction | None = None
max_width: Fraction | None = None
min_height: Fraction | None = None
max_height: Fraction | None = None
def apply_width(self, width: Fraction) -> Fraction:
"""Apply width extrema.
Args:
width: Width value.
Returns:
Width, clamped between minimum and maximum.
"""
min_width, max_width = self[:2]
if min_width is not None:
width = max(width, min_width)
if max_width is not None:
width = min(width, max_width)
return width
def apply_height(self, height: Fraction) -> Fraction:
"""Apply height extrema.
Args:
height: Height value.
Returns:
Height, clamped between minimum and maximum.
"""
min_height, max_height = self[2:]
if min_height is not None:
height = max(height, min_height)
if max_height is not None:
height = min(height, max_height)
return height
def apply_dimensions(self, width: int, height: int) -> Size:
"""Apply extrema to integer dimensions.
Args:
width: Integer width.
height: Integer height.
Returns:
Size with extrema applied.
"""
return Size(
int(self.apply_width(Fraction(width))),
int(self.apply_height(Fraction(height))),
)
| Extrema |
python | python-excel__xlwt | xlwt/Column.py | {
"start": 66,
"end": 1493
} | class ____(object):
def __init__(self, colx, parent_sheet):
if not(isinstance(colx, int) and 0 <= colx <= 255):
raise ValueError("column index (%r) not an int in range(256)" % colx)
self._index = colx
self._parent = parent_sheet
self._parent_wb = parent_sheet.get_parent()
self._xf_index = 0x0F
self.width = 0x0B92
self.hidden = 0
self.level = 0
self.collapse = 0
self.user_set = 0
self.best_fit = 0
self.unused = 0
def set_width(self, width):
if not(isinstance(width, int) and 0 <= width <= 65535):
raise ValueError("column width (%r) not an int in range(65536)" % width)
self._width = width
def get_width(self):
return self._width
width = property(get_width, set_width)
def set_style(self, style):
self._xf_index = self._parent_wb.add_style(style)
def width_in_pixels(self):
# *** Approximation ****
return int(round(self.width * 0.0272 + 0.446, 0))
def get_biff_record(self):
options = (self.hidden & 0x01) << 0
options |= (self.user_set & 0x01) << 1
options |= (self.best_fit & 0x01) << 2
options |= (self.level & 0x07) << 8
options |= (self.collapse & 0x01) << 12
return ColInfoRecord(self._index, self._index, self.width, self._xf_index, options, self.unused).get()
| Column |
python | Textualize__textual | tests/snapshot_tests/snapshot_apps/text_area_alternate_screen.py | {
"start": 215,
"end": 362
} | class ____(App[None]):
def on_mount(self):
self.push_screen(AltScreen())
if __name__ == "__main__":
app = TABug()
app.run()
| TABug |
python | ansible__ansible | test/lib/ansible_test/_internal/cli/parsers/__init__.py | {
"start": 7351,
"end": 8821
} | class ____(TargetsNamespaceParser, Parser):
"""Composite argument parser for a Python target."""
def __init__(self, allow_venv: bool) -> None:
super().__init__()
self.allow_venv = allow_venv
@property
def option_name(self) -> str:
"""The option name used for this parser."""
return '--target-python'
def get_value(self, state: ParserState) -> t.Any:
"""Parse the input from the given state and return the result, without storing the result in the namespace."""
versions = list(SUPPORTED_PYTHON_VERSIONS)
for target in state.root_namespace.targets or []: # type: PosixConfig
versions.remove(target.python.version)
parser = PythonParser(versions, allow_venv=self.allow_venv, allow_default=True)
python = parser.parse(state)
value = ControllerConfig(python=python)
return value
def document(self, state: DocumentationState) -> t.Optional[str]:
"""Generate and return documentation for this parser."""
section = f'{self.option_name} options (choose one):'
state.sections[section] = '\n'.join([
f' {PythonParser(SUPPORTED_PYTHON_VERSIONS, allow_venv=False, allow_default=True).document(state)} # non-origin controller',
f' {PythonParser(SUPPORTED_PYTHON_VERSIONS, allow_venv=True, allow_default=True).document(state)} # origin controller',
])
return None
| PythonTargetParser |
python | etianen__django-reversion | tests/test_app/models.py | {
"start": 465,
"end": 925
} | class ____(models.Model):
name = models.CharField(
max_length=191,
default="v1",
)
related = models.ManyToManyField(
"TestModelRelated",
blank=True,
related_name="+",
)
related_through = models.ManyToManyField(
"TestModelRelated",
blank=True,
through="TestModelThrough",
related_name="+",
)
generic_inlines = GenericRelation(TestModelGenericInline)
| TestModel |
python | sympy__sympy | sympy/polys/domains/domainelement.py | {
"start": 93,
"end": 860
} | class ____:
"""
Represents an element of a domain.
Mix in this trait into a class whose instances should be recognized as
elements of a domain. Method ``parent()`` gives that domain.
"""
__slots__ = ()
def parent(self):
"""Get the domain associated with ``self``
Examples
========
>>> from sympy import ZZ, symbols
>>> x, y = symbols('x, y')
>>> K = ZZ[x,y]
>>> p = K(x)**2 + K(y)**2
>>> p
x**2 + y**2
>>> p.parent()
ZZ[x,y]
Notes
=====
This is used by :py:meth:`~.Domain.convert` to identify the domain
associated with a domain element.
"""
raise NotImplementedError("abstract method")
| DomainElement |
python | sphinx-doc__sphinx | tests/test_ext_intersphinx/test_ext_intersphinx_cache.py | {
"start": 4324,
"end": 4786
} | class ____(FakeInventory):
protocol_version = 2
def _write_headers(self, buffer: BinaryIO) -> None:
super()._write_headers(buffer)
buffer.write(b'# The remainder of this file is compressed using zlib.\n')
def _write_body(self, buffer: BinaryIO, lines: Iterable[bytes]) -> None:
compressor = zlib.compressobj(9)
buffer.writelines(map(compressor.compress, lines))
buffer.write(compressor.flush())
| FakeInventoryV2 |
python | more-itertools__more-itertools | tests/test_recipes.py | {
"start": 12345,
"end": 12640
} | class ____(TestCase):
"""Tests for ``powerset()``"""
def test_combinatorics(self):
"""Ensure a proper enumeration"""
p = mi.powerset([1, 2, 3])
self.assertEqual(
list(p), [(), (1,), (2,), (3,), (1, 2), (1, 3), (2, 3), (1, 2, 3)]
)
| PowersetTests |
python | pypa__pipenv | pipenv/vendor/colorama/ansi.py | {
"start": 489,
"end": 938
} | class ____(object):
def __init__(self):
# the subclasses declare class attributes which are numbers.
# Upon instantiation we define instance attributes, which are the same
# as the class attributes but wrapped with the ANSI escape sequence
for name in dir(self):
if not name.startswith('_'):
value = getattr(self, name)
setattr(self, name, code_to_chars(value))
| AnsiCodes |
python | Textualize__textual | docs/examples/how-to/containers04.py | {
"start": 272,
"end": 769
} | class ____(App):
"""Simple app to play with containers."""
CSS = """
.with-border {
border: heavy green;
}
"""
def compose(self) -> ComposeResult:
with Horizontal(classes="with-border"):
yield Box()
yield Box()
yield Box()
with Horizontal(classes="with-border"):
yield Box()
yield Box()
yield Box()
if __name__ == "__main__":
app = ContainerApp()
app.run()
| ContainerApp |
python | pytorch__pytorch | torch/_inductor/fx_passes/group_batch_fusion.py | {
"start": 49892,
"end": 50106
} | class ____(BatchPointwiseMathOpsPostGradFusion):
def __init__(self, **kwargs) -> None:
super().__init__(aten.sub.Tensor, **kwargs)
@register_fusion("batch_aten_div", pre_grad=False)
| BatchSubPostGradFusion |
python | ansible__ansible | lib/ansible/module_utils/_internal/_datatag/__init__.py | {
"start": 20371,
"end": 20493
} | class ____(dict[type[_TAnsibleDatatagBase], _TAnsibleDatatagBase]):
__slots__ = _NO_INSTANCE_STORAGE
| _AnsibleTagsMapping |
python | PrefectHQ__prefect | src/prefect/server/events/actions.py | {
"start": 58253,
"end": 59939
} | class ____(Action):
"""Base class for Actions that operate on Automations and need to infer them from
events"""
source: Literal["selected", "inferred"] = Field(
"selected",
description=(
"Whether this Action applies to a specific selected "
"automation (given by `automation_id`), or to an automation that is "
"inferred from the triggering event. If the source is 'inferred', "
"the `automation_id` may not be set. If the source is 'selected', the "
"`automation_id` must be set."
),
)
automation_id: Optional[UUID] = Field(
None, description="The identifier of the automation to act on"
)
@model_validator(mode="after")
def selected_automation_requires_id(self) -> Self:
wants_selected_automation = self.source == "selected"
has_automation_id = bool(self.automation_id)
if wants_selected_automation != has_automation_id:
raise ValueError(
"automation_id is "
+ ("not allowed" if has_automation_id else "required")
)
return self
async def automation_id_to_use(self, triggered_action: "TriggeredAction") -> UUID:
if self.source == "selected":
assert self.automation_id
return self.automation_id
event = triggered_action.triggering_event
if not event:
raise ActionFailed("No event to infer the automation")
assert event
if id := _id_of_first_resource_of_kind(event, "prefect.automation"):
return id
raise ActionFailed("No automation could be inferred")
| AutomationAction |
python | pytorch__pytorch | test/dynamo/test_higher_order_ops.py | {
"start": 86619,
"end": 108676
} | class ____(torch.nn.Module):
def forward(self, L_x_: "f32[3]"):
l_x_ = L_x_
wrap_body_0 = self.wrap_body_0
wrap = torch.ops.higher_order.wrap(wrap_body_0, l_x_); wrap_body_0 = l_x_ = None
getitem: "f32[3]" = wrap[0]; wrap = None
return (getitem,)
class wrap_body_0(torch.nn.Module):
def forward(self, l_x_: "f32[3]"):
neg: "f32[3]" = -l_x_; l_x_ = None
return (neg,)
""",
)
def test_access_module_attr(self):
counters.clear()
backend = EagerAndRecordGraphs()
cnt = CompileCounterWithBackend(backend)
mod = torch.nn.Linear(3, 3)
x = torch.randn(3, 3)
@torch.compile(backend=cnt, fullgraph=True)
def f(x):
y = mod(x)
return wrap(lambda y: y - mod.bias, y)
result = f(x)
self.assertEqual(result, mod(x) - mod.bias)
self.assertEqual(cnt.frame_count, 1)
self.assertEqual(len(backend.graphs), 1)
wrap_node = find_first_node(backend.graphs[0], wrap)
self.assertTrue(len(wrap_node.args), 3)
# Check that the linear bias and weight are getattr in the outer graph
if not torch._dynamo.config.inline_inbuilt_nn_modules:
self.assertTrue(len(dict(backend.graphs[0].named_parameters())) == 2)
# Check that the inner function has one op and its a linear op
body_function = getattr(backend.graphs[0], wrap_node.args[0].name)
self.assertEqual(op_count(body_function), 1)
# Check that the innermost graph does not have any params
self.assertTrue(len(dict(body_function.named_parameters())) == 0)
self.assertTrue(len(dict(body_function.named_children())) == 0)
def test_make_closure(self):
def f(x, y):
def g(x):
return x + y
return g(x)
def h(x, y):
return wrap(f, x, y)
x = torch.randn(3, 3)
y = torch.randn(3, 3)
arg_count = ifdynstaticdefault(3, 4)
self._test_wrap_simple(h, default_args_generator((x, y)), arg_count)
def test_internal_nonlocal(self):
def f(x, y):
w = 1
def g(x):
nonlocal w
w = x
return x
def h(x):
nonlocal w
w = w + 1
return x
g(x)
h(x)
return w + y
def h(x, y):
return wrap(f, x, y)
x = torch.randn(3, 3)
y = torch.randn(3, 3)
arg_count = ifdynstaticdefault(3, 4)
self._test_wrap_simple(h, default_args_generator((x, y)), arg_count)
def test_capture_numpy_number(self):
import numpy as np
y = np.float32(1.0)
def f(x):
return wrap(lambda x: x + y, x)
x = torch.randn(3)
# np.number are lifted to graph inputs
arg_count = ifdynstaticdefault(3, 4)
self._test_wrap_simple(f, default_args_generator((x,)), arg_count)
def test_freevars_as_inputs_to_wrap(self):
y = torch.randn(3)
def f(x):
return wrap(lambda x, y: x + y, x, y)
x = torch.randn(3)
arg_count = ifdynstaticdefault(3, 4)
self._test_wrap_simple(f, default_args_generator((x,)), arg_count)
def test_lift_tensor_constant(self):
def f(x):
y = torch.tensor(1.0)
return wrap(lambda x: x + y, x)
x = torch.randn(3)
arg_count = ifdynstaticdefault(3, 4)
self._test_wrap_simple(
f, default_args_generator((x,)), arg_count, expected_opcount=3
)
def test_nested_wrap(self):
class MockModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = torch.nn.Linear(10, 10)
def forward(self, x):
return self.linear(x)
mod = MockModule()
# Two levels of wrap ops
def gn(x):
return torch.cos(x) + wrap(mod, x)
def fn(x):
return wrap(gn, x)
arg_count = ifdynstaticdefault(4, 5)
self._test_wrap_simple(
fn, default_args_generator((torch.randn(10, 10),)), arg_count
)
def test_fn_with_kwargs_in_torch_ops(self):
def fn(x):
return wrap(lambda z: torch.cos(input=z), x)
x = torch.randn(3)
arg_count = ifdynstaticdefault(2, 3)
self._test_wrap_simple(fn, default_args_generator((x,)), arg_count)
def test_hooks(self):
class ToyModel(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.net = torch.nn.Linear(10, 10)
def forward(self, x):
return self.net(x)
model = ToyModel()
forward_handles = {}
activations = {}
def save_activations(mod, inp, out):
activations[name] = inp
for name, module in model.named_children():
forward_handles[name] = module.register_forward_hook(save_activations)
@torch.compile(backend="eager")
def fn(x):
return wrap(lambda x: model(x), x)
for _ in range(2):
# second iteration is key, hooks would have fired during aot trace
# on first iter
activations.clear()
x = torch.randn((10, 10))
pred = fn(x)
loss = pred.sum()
loss.backward()
self.assertTrue(activations.keys() == forward_handles.keys())
def _get_source_fn_stack(self, gm, node_names):
ret = {}
for mod in gm.modules():
for node in mod.graph.nodes:
if node.name in node_names:
actual_stack = [
name for name, _ in node.meta.get("source_fn_stack", [])
]
ret[node.name] = actual_stack
return ret
def test_wrap_source_fn_stack(self):
class MockModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = torch.nn.Linear(4, 4)
def forward(self, x):
return self.linear(x)
mod = MockModule()
def gn(x):
return torch.cos(x) + wrap(mod, x)
def fn(x):
return wrap(gn, x)
backend = EagerAndRecordGraphs()
inp = torch.randn((4, 4))
torch.compile(fn, backend=backend, fullgraph=True)(inp)
gm = backend.graphs[0]
actual_stack = self._get_source_fn_stack(gm, {"cos", "add", "linear"})
self.assertExpectedInline(
pprint.pformat(actual_stack),
"""\
{'add': ['wrap', 'add'],
'cos': ['wrap', 'cos'],
'linear': ['wrap', 'wrap', 'linear']}""",
)
def test_cond_source_fn_stack(self):
backend = EagerAndRecordGraphs()
@torch.compile(backend=backend, fullgraph=True)
def cond_f(pred, pred2, x, y):
def true_fn(pred2, x, y):
return x + y
def false_fn(pred2, x, y):
def true_fn2(x, y):
return x.sin() - y.cos()
def false_fn2(x, y):
return x.cos() - y.sin()
return control_flow.cond(pred2, true_fn2, false_fn2, [x, y])
return control_flow.cond(pred, true_fn, false_fn, [pred2, x, y])
pred = torch.tensor(True)
pred2 = torch.tensor(False)
xs = torch.randn(2, 3, 3)
y = torch.randn(3, 3)
cond_f(pred, pred2, xs, y)
gm = backend.graphs[0]
actual_stack = self._get_source_fn_stack(gm, {"cos", "add", "sin", "sub"})
self.assertExpectedInline(
pprint.pformat(actual_stack),
"""\
{'add': ['cond', 'add'],
'cos': ['cond', 'cond', 'cos'],
'sin': ['cond', 'cond', 'sin'],
'sub': ['cond', 'cond', 'sub']}""",
)
def test_map_source_fn_stack(self):
backend = EagerAndRecordGraphs()
xs = torch.randn(2, 3, 3)
y = torch.randn(3)
@torch.compile(backend=backend, fullgraph=True)
def map_f(xs, y):
def inner(x, y):
def inner2(x, y):
return x + y
return control_flow.map(inner2, x, y) * y.cos()
return control_flow.map(inner, xs, y).sin()
map_f(xs, y)
gm = backend.graphs[0]
actual_stack = self._get_source_fn_stack(gm, {"cos", "add", "sin"})
self.assertExpectedInline(
pprint.pformat(actual_stack),
"""\
{'add': ['map_impl', 'map_impl', 'add'],
'cos': ['map_impl', 'cos'],
'sin': ['sin']}""",
)
def test_grad_source_fn_stack(self):
backend = EagerAndRecordGraphs()
def fn(x):
return x.sin().sum()
@torch.compile(backend=backend, fullgraph=False)
def wrapper_fn(x):
return torch.func.grad(torch.func.grad(fn))(x)
x = torch.randn(())
wrapper_fn(x)
gm = backend.graphs[0]
actual_stack = self._get_source_fn_stack(gm, {"sum_1", "sin"})
self.assertExpectedInline(
pprint.pformat(actual_stack),
"""{'sin': ['sin']}""",
)
def test_vmap_multiply_scalar(self):
@torch.compile(backend="inductor", fullgraph=True)
def g(x):
return torch.vmap(torch.mul, in_dims=(0, None))(x, 3.14)
x = torch.randn(3)
y = g(x)
self.assertEqual(y, x * 3.14)
@torch.compile(backend="inductor", fullgraph=True)
def f(x):
return torch.vmap(torch.mul, in_dims=(0, None))(x, 314)
x = torch.randn(3)
y = f(x)
self.assertEqual(y, x * 314)
def test_vmap_source_fn_stack(self):
backend = EagerAndRecordGraphs()
def inner_fn(x):
return torch.func.vmap(lambda x: x.sum(0) + x.sum(1))(x)
@torch.compile(backend=backend, fullgraph=True)
def fn(x):
return torch.func.vmap(lambda x: inner_fn(x.cos()))(x)
x = torch.randn(3, 3, 3, 3)
fn(x)
gm = backend.graphs[0]
actual_stack = self._get_source_fn_stack(
gm, {"sum_1", "sum_2", "batched_output"}
)
self.assertExpectedInline(
pprint.pformat(actual_stack),
"""{'sum_1': ['sum_1'], 'sum_2': ['sum_2']}""",
)
# https://github.com/pytorch/pytorch/issues/137061
def test_dynamic_shapes_over_vmap_batch_size(self):
def gn(a, b, c, d):
return a + b + c + d
def fn(func, a, b, c, d):
a = torch.arange(a)
b = torch.arange(b)
c = torch.arange(c)
d = torch.arange(d)
func = torch.vmap(func, in_dims=(0, None, None, None))
func = torch.vmap(func, in_dims=(None, 0, None, None))
func = torch.vmap(func, in_dims=(None, None, 0, None))
func = torch.vmap(func, in_dims=(None, None, None, 0))
return func(a, b, c, d)
cnt = CompileCounterWithBackend("eager")
# We generate corresponding dynamic shapes test case at
# `test/dynamo/test_dynamic_shapes.py` automatically.
compiled_fn = torch.compile(fn, backend=cnt)
a, b, c, d = 2, 4, 8, 8
self.assertEqual(fn(gn, a, b, c, d), compiled_fn(gn, a, b, c, d))
self.assertEqual(cnt.frame_count, 1)
a, b, c, d = 4, 8, 16, 16
self.assertEqual(fn(gn, a, b, c, d), compiled_fn(gn, a, b, c, d))
# Ensure no recompile if dynamic shapes enabled.
self.assertEqual(cnt.frame_count, ifdynstaticdefault(2, 1))
graph = cnt.graphs[0]
# Check dynamic shapes generates correct graph.
if check_dynamic_shape_capture():
self.assertExpectedInline(
graph.code.strip(),
"""\
def forward(self, L_a_ : torch.SymInt, L_b_ : torch.SymInt, L_c_ : torch.SymInt, L_d_ : torch.SymInt):
l_a_ = L_a_
l_b_ = L_b_
l_c_ = L_c_
l_d_ = L_d_
a = torch.arange(l_a_)
b = torch.arange(l_b_)
c = torch.arange(l_c_)
d = torch.arange(l_d_)
lazy_load_decompositions = torch._functorch.predispatch.lazy_load_decompositions(); lazy_load_decompositions = None
_vmap_increment_nesting = torch._functorch.predispatch._vmap_increment_nesting(l_d_, 'error'); _vmap_increment_nesting = None
child = torch._functorch.predispatch._add_batch_dim(d, 0, 1); d = None
lazy_load_decompositions_1 = torch._functorch.predispatch.lazy_load_decompositions(); lazy_load_decompositions_1 = None
_vmap_increment_nesting_1 = torch._functorch.predispatch._vmap_increment_nesting(l_c_, 'error'); _vmap_increment_nesting_1 = None
child_1 = torch._functorch.predispatch._add_batch_dim(c, 0, 2); c = None
lazy_load_decompositions_2 = torch._functorch.predispatch.lazy_load_decompositions(); lazy_load_decompositions_2 = None
_vmap_increment_nesting_2 = torch._functorch.predispatch._vmap_increment_nesting(l_b_, 'error'); _vmap_increment_nesting_2 = None
child_2 = torch._functorch.predispatch._add_batch_dim(b, 0, 3); b = None
lazy_load_decompositions_3 = torch._functorch.predispatch.lazy_load_decompositions(); lazy_load_decompositions_3 = None
_vmap_increment_nesting_3 = torch._functorch.predispatch._vmap_increment_nesting(l_a_, 'error'); _vmap_increment_nesting_3 = None
_add_batch_dim_3 = torch._functorch.predispatch._add_batch_dim(a, 0, 4); a = None
add = _add_batch_dim_3 + child_2; _add_batch_dim_3 = child_2 = None
add_1 = add + child_1; add = child_1 = None
batched_outputs = add_1 + child; add_1 = child = None
batched_outputs_1 = torch._functorch.predispatch._remove_batch_dim(batched_outputs, 4, l_a_, 0); batched_outputs = l_a_ = None
_vmap_decrement_nesting = torch._functorch.predispatch._vmap_decrement_nesting(); _vmap_decrement_nesting = None
batched_outputs_2 = torch._functorch.predispatch._remove_batch_dim(batched_outputs_1, 3, l_b_, 0); batched_outputs_1 = l_b_ = None
_vmap_decrement_nesting_1 = torch._functorch.predispatch._vmap_decrement_nesting(); _vmap_decrement_nesting_1 = None
batched_outputs_3 = torch._functorch.predispatch._remove_batch_dim(batched_outputs_2, 2, l_c_, 0); batched_outputs_2 = l_c_ = None
_vmap_decrement_nesting_2 = torch._functorch.predispatch._vmap_decrement_nesting(); _vmap_decrement_nesting_2 = None
_remove_batch_dim_3 = torch._functorch.predispatch._remove_batch_dim(batched_outputs_3, 1, l_d_, 0); batched_outputs_3 = l_d_ = None
_vmap_decrement_nesting_3 = torch._functorch.predispatch._vmap_decrement_nesting(); _vmap_decrement_nesting_3 = None
return (_remove_batch_dim_3,)""", # noqa: B950
)
def test_cond_pytree_operands(self):
def _construct_pytree():
a = torch.randn(3, 3)
b = torch.randn(3, 3)
c = torch.randn(3, 3)
d = torch.randn(3, 3)
e = torch.randn(3, 3)
f = torch.randn(3, 3)
g = torch.randn(3, 3)
return (a, [[[b]]], c, (d, (e,), f), {"g": g})
pred = torch.tensor(True)
inp = _construct_pytree()
def _reduce_sum(flattened):
init = 0
for val in flattened:
init += val
return init
def _reduce_max(flattened):
init = flattened[0]
for val in flattened:
init = max(val, init)
return init
def true_fn(pytree_in):
flattened, spec = pytree.tree_flatten(pytree_in)
return _reduce_sum(flattened)
def false_fn(pytree_in):
flattened, spec = pytree.tree_flatten(pytree_in)
return _reduce_max(flattened)
def fn(pred, pytree_in):
return torch.cond(pred, true_fn, false_fn, [pytree_in])
backend = EagerAndRecordGraphs()
compiled_res = torch.compile(fn, backend=backend)(pred, inp)
eager_res = fn(pred, inp)
self.assertEqual(compiled_res, eager_res)
graph = backend.graphs[0]
# Dynamic shapes produce a slightly different graph.
if check_dynamic_shape_capture():
return
self.assertExpectedInline(
graph.code.strip(),
"""\
def forward(self, L_pred_ : torch.Tensor, L_pytree_in_0_ : torch.Tensor, L_pytree_in_1_0_0_0_ : torch.Tensor, L_pytree_in_2_ : torch.Tensor, L_pytree_in_3_0_ : torch.Tensor, L_pytree_in_3_1_0_ : torch.Tensor, L_pytree_in_3_2_ : torch.Tensor, L_pytree_in_4_g_ : torch.Tensor):
l_pred_ = L_pred_
l_pytree_in_0_ = L_pytree_in_0_
l_pytree_in_1_0_0_0_ = L_pytree_in_1_0_0_0_
l_pytree_in_2_ = L_pytree_in_2_
l_pytree_in_3_0_ = L_pytree_in_3_0_
l_pytree_in_3_1_0_ = L_pytree_in_3_1_0_
l_pytree_in_3_2_ = L_pytree_in_3_2_
l_pytree_in_4_g_ = L_pytree_in_4_g_
cond_true_0 = self.cond_true_0
cond_false_0 = self.cond_false_0
cond = torch.ops.higher_order.cond(l_pred_, cond_true_0, cond_false_0, (l_pytree_in_0_, l_pytree_in_1_0_0_0_, l_pytree_in_2_, l_pytree_in_3_0_, l_pytree_in_3_1_0_, l_pytree_in_3_2_, l_pytree_in_4_g_)); l_pred_ = cond_true_0 = cond_false_0 = l_pytree_in_0_ = l_pytree_in_1_0_0_0_ = l_pytree_in_2_ = l_pytree_in_3_0_ = l_pytree_in_3_1_0_ = l_pytree_in_3_2_ = l_pytree_in_4_g_ = None
getitem = cond[0]; cond = None
return (getitem,)""", # noqa: B950
)
def test_cond_pytree_operands_with_non_tensor_leaves(self):
def fn(pred, pytree_in):
return torch.cond(
pred, lambda x: x[0] + 1, lambda x: x[0] * 2, (pytree_in,)
)
pred = torch.tensor(True)
for pytree_in in [("string",), (1.0,)]:
with self.assertRaisesRegex(
RuntimeError,
r"Expect operands to be a tuple of possibly nested dict/list/tuple",
):
fn(pred, pytree_in)
for pytree_in in [("string",), (1.0,)]:
with self.assertRaisesRegex(
torch._dynamo.exc.UncapturedHigherOrderOpError,
r"Cond doesn't work unless it is captured completely with torch.compile",
):
torch.compile(fn, backend="eager")(pred, pytree_in)
def test_cond_with_empty_operands(self):
@torch.compile(fullgraph=True)
def fn(x, y, z):
def true_fn():
return y + 2
def false_fn():
return z + 1
return torch.cond(x, true_fn, false_fn)
zeros = torch.zeros(1)
ones = torch.ones(1)
self.assertEqual(fn(zeros, ones, ones), torch.tensor([2.0]))
self.assertEqual(fn(ones, ones, ones), torch.tensor([3.0]))
def test_hopify_generic_wrap(self):
from torch._higher_order_ops.wrap import dynamo_bypassing_wrapper
def my_hop_fn_impl(fn, *args, k=1, **kwargs):
def wrapper(*args, **kwargs):
out = fn(*args, **kwargs)
if isinstance(out, tuple):
return (out[0] + k,)
return out + k
return wrapper
def my_hop_fn(fn, *args, k=1, **kwargs):
return dynamo_bypassing_wrapper(
functools.partial(my_hop_fn_impl, k=k), fn, *args, **kwargs
)
def my_hop_fn_2_impl(fn, *args, g=None):
def wrapper(*args, **kwargs):
assert g is not None
out = fn(*args)
if isinstance(out, tuple):
return (g(out[0]),)
return g(out)
return wrapper
def my_hop_fn_2(fn, *args, g=None, **kwargs):
return dynamo_bypassing_wrapper(
functools.partial(my_hop_fn_2_impl, g=g), fn, *args, **kwargs
)
def gn(x, h=1):
return x.sin() + h
def fn(x, b):
out = my_hop_fn(gn, x, h=b, k=2)
return out
a = torch.rand((4, 4), requires_grad=True)
b = torch.rand((4, 4))
compiled_fn = torch.compile(
fn, backend="aot_eager_decomp_partition", fullgraph=True
)
self.assertEqual(compiled_fn(a, b), fn(a, b))
def g(x):
return x.cos()
def fn_2(x, b):
out = my_hop_fn_2(fn, x, b, g=g)
return out
a = torch.rand((4, 4), requires_grad=True)
compiled_fn_2 = torch.compile(
fn_2, backend="aot_eager_decomp_partition", fullgraph=True
)
self.assertEqual(compiled_fn_2(a, b), fn_2(a, b))
def test_hints_wrapper(self):
def ref_fn(x, y):
x = x + y
x = torch.relu(x)
x = x + y
return torch.abs(x)
def fn_with_hints(x, y):
x = x + y
def inner_body_fn(x, y):
x = torch.relu(x)
x = x + y
return x
def outer_body_fn(x, y):
x = hints_wrapper(inner_body_fn, (x, y), {}, hints={"inner_body": True})
x = torch.abs(x)
return x
res = hints_wrapper(outer_body_fn, (x, y), {}, hints={"outer_body": True})
return res
backend = EagerAndRecordGraphs()
cnt = CompileCounterWithBackend(backend)
x = torch.randn(2, 4)
y = torch.ones(4)
eager_res = fn_with_hints(x, y)
compiled_res = torch.compile(fn_with_hints, backend=cnt)(x, y)
ref_res = ref_fn(x, y)
self.assertEqual(eager_res, ref_res)
self.assertEqual(compiled_res, ref_res)
self.assertEqual(len(cnt.graphs), 1)
# Dynamic shapes produce a slightly different graph.
if check_dynamic_shape_capture():
return
graph = backend.graphs[0]
self.assertExpectedInline(
normalize_gm(graph.print_readable(print_output=False)),
"""\
| GraphModule |
python | doocs__leetcode | solution/2200-2299/2224.Minimum Number of Operations to Convert Time/Solution.py | {
"start": 0,
"end": 307
} | class ____:
def convertTime(self, current: str, correct: str) -> int:
a = int(current[:2]) * 60 + int(current[3:])
b = int(correct[:2]) * 60 + int(correct[3:])
ans, d = 0, b - a
for i in [60, 15, 5, 1]:
ans += d // i
d %= i
return ans
| Solution |
python | FactoryBoy__factory_boy | tests/test_fuzzy.py | {
"start": 20215,
"end": 21305
} | class ____(unittest.TestCase):
def test_seeding(self):
fuzz = fuzzy.FuzzyInteger(1, 1000)
random.reseed_random(42)
value = utils.evaluate_declaration(fuzz)
random.reseed_random(42)
value2 = utils.evaluate_declaration(fuzz)
self.assertEqual(value, value2)
def test_seeding_warning(self):
with warnings.catch_warnings(record=True) as w:
# Do not turn expected warning into an error.
warnings.filterwarnings("default", category=UserWarning, module=r"tests\.test_fuzzy")
fuzz = fuzzy.FuzzyDate(datetime.date(2013, 1, 1))
utils.evaluate_declaration(fuzz)
self.assertEqual(1, len(w))
self.assertIn('factory_boy/issues/331', str(w[-1].message))
def test_reset_state(self):
fuzz = fuzzy.FuzzyInteger(1, 1000)
state = random.get_random_state()
value = utils.evaluate_declaration(fuzz)
random.set_random_state(state)
value2 = utils.evaluate_declaration(fuzz)
self.assertEqual(value, value2)
| FuzzyRandomTestCase |
python | dabeaz-course__practical-python | Solutions/6_15/portfolio.py | {
"start": 16,
"end": 718
} | class ____:
def __init__(self, holdings):
self._holdings = holdings
def __iter__(self):
return self._holdings.__iter__()
def __len__(self):
return len(self._holdings)
def __getitem__(self, index):
return self._holdings[index]
def __contains__(self, name):
return any(s.name == name for s in self._holdings)
@property
def total_cost(self):
return sum(s.shares * s.price for s in self._holdings)
def tabulate_shares(self):
from collections import Counter
total_shares = Counter()
for s in self._holdings:
total_shares[s.name] += s.shares
return total_shares
| Portfolio |
python | getsentry__sentry | tests/sentry/utils/test_event_frames.py | {
"start": 1336,
"end": 2396
} | class ____(unittest.TestCase):
def test_return_none(self) -> None:
assert not get_crashing_thread([])
assert not get_crashing_thread(None)
assert not get_crashing_thread([{}, {}, {}])
assert not get_crashing_thread([{}])
def test_single_crashed_thread(self) -> None:
thread_frames = [{"id": 1, "crashed": True}, {"id": 2, "crashed": False}]
assert get_crashing_thread(thread_frames) == thread_frames[0]
def test_multiple_crashed_threads(self) -> None:
thread_frames = [{"id": 1, "crashed": True}, {"id": 2, "crashed": True}]
assert not get_crashing_thread(thread_frames)
def test_single_current_thread(self) -> None:
thread_frames = [{"id": 1, "current": True}, {"id": 2, "crashed": False}]
assert get_crashing_thread(thread_frames) == thread_frames[0]
def test_multiple_current_thread(self) -> None:
thread_frames = [{"id": 1, "current": True}, {"id": 2, "current": True}]
assert not get_crashing_thread(thread_frames)
| CrashingThreadTestCase |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/orm/attributes.py | {
"start": 17004,
"end": 18916
} | class ____(QueryableAttribute[_T_co]):
"""Class bound instrumented attribute which adds basic
:term:`descriptor` methods.
See :class:`.QueryableAttribute` for a description of most features.
"""
__slots__ = ()
inherit_cache = True
""":meta private:"""
# hack to make __doc__ writeable on instances of
# InstrumentedAttribute, while still keeping classlevel
# __doc__ correct
@util.rw_hybridproperty
def __doc__(self) -> Optional[str]:
return self._doc
@__doc__.setter # type: ignore
def __doc__(self, value: Optional[str]) -> None:
self._doc = value
@__doc__.classlevel # type: ignore
def __doc__(cls) -> Optional[str]:
return super().__doc__
def __set__(self, instance: object, value: Any) -> None:
self.impl.set(
instance_state(instance), instance_dict(instance), value, None
)
def __delete__(self, instance: object) -> None:
self.impl.delete(instance_state(instance), instance_dict(instance))
@overload
def __get__(
self, instance: None, owner: Any
) -> InstrumentedAttribute[_T_co]: ...
@overload
def __get__(self, instance: object, owner: Any) -> _T_co: ...
def __get__(
self, instance: Optional[object], owner: Any
) -> Union[InstrumentedAttribute[_T_co], _T_co]:
if instance is None:
return self
dict_ = instance_dict(instance)
if self.impl.supports_population and self.key in dict_:
return dict_[self.key] # type: ignore[no-any-return]
else:
try:
state = instance_state(instance)
except AttributeError as err:
raise orm_exc.UnmappedInstanceError(instance) from err
return self.impl.get(state, dict_) # type: ignore[no-any-return]
@dataclasses.dataclass(frozen=True)
| InstrumentedAttribute |
python | django__django | django/db/models/fields/json.py | {
"start": 24432,
"end": 25261
} | class ____(KeyTransformNumericLookupMixin, lookups.GreaterThanOrEqual):
pass
KeyTransform.register_lookup(KeyTransformIn)
KeyTransform.register_lookup(KeyTransformExact)
KeyTransform.register_lookup(KeyTransformIExact)
KeyTransform.register_lookup(KeyTransformIsNull)
KeyTransform.register_lookup(KeyTransformIContains)
KeyTransform.register_lookup(KeyTransformStartsWith)
KeyTransform.register_lookup(KeyTransformIStartsWith)
KeyTransform.register_lookup(KeyTransformEndsWith)
KeyTransform.register_lookup(KeyTransformIEndsWith)
KeyTransform.register_lookup(KeyTransformRegex)
KeyTransform.register_lookup(KeyTransformIRegex)
KeyTransform.register_lookup(KeyTransformLt)
KeyTransform.register_lookup(KeyTransformLte)
KeyTransform.register_lookup(KeyTransformGt)
KeyTransform.register_lookup(KeyTransformGte)
| KeyTransformGte |
python | django__django | django/urls/resolvers.py | {
"start": 6366,
"end": 9941
} | class ____(CheckURLMixin):
regex = LocaleRegexDescriptor()
def __init__(self, regex, name=None, is_endpoint=False):
self._regex = regex
self._regex_dict = {}
self._is_endpoint = is_endpoint
self.name = name
self.converters = {}
def match(self, path):
match = (
self.regex.fullmatch(path)
if self._is_endpoint and self.regex.pattern.endswith("$")
else self.regex.search(path)
)
if match:
# If there are any named groups, use those as kwargs, ignoring
# non-named groups. Otherwise, pass all non-named arguments as
# positional arguments.
kwargs = match.groupdict()
args = () if kwargs else match.groups()
kwargs = {k: v for k, v in kwargs.items() if v is not None}
return path[match.end() :], args, kwargs
return None
def check(self):
warnings = []
warnings.extend(self._check_pattern_startswith_slash())
if not self._is_endpoint:
warnings.extend(self._check_include_trailing_dollar())
return warnings
def _check_include_trailing_dollar(self):
if self._regex.endswith("$") and not self._regex.endswith(r"\$"):
return [
Warning(
"Your URL pattern {} uses include with a route ending with a '$'. "
"Remove the dollar from the route to avoid problems including "
"URLs.".format(self.describe()),
id="urls.W001",
)
]
else:
return []
def __str__(self):
return str(self._regex)
_PATH_PARAMETER_COMPONENT_RE = _lazy_re_compile(
r"<(?:(?P<converter>[^>:]+):)?(?P<parameter>[^>]+)>"
)
whitespace_set = frozenset(string.whitespace)
@functools.lru_cache
def _route_to_regex(route, is_endpoint):
"""
Convert a path pattern into a regular expression. Return the regular
expression and a dictionary mapping the capture names to the converters.
For example, 'foo/<int:pk>' returns '^foo\\/(?P<pk>[0-9]+)'
and {'pk': <django.urls.converters.IntConverter>}.
"""
parts = ["^"]
all_converters = get_converters()
converters = {}
previous_end = 0
for match_ in _PATH_PARAMETER_COMPONENT_RE.finditer(route):
if not whitespace_set.isdisjoint(match_[0]):
raise ImproperlyConfigured(
f"URL route {route!r} cannot contain whitespace in angle brackets <…>."
)
# Default to make converter "str" if unspecified (parameter always
# matches something).
raw_converter, parameter = match_.groups(default="str")
if not parameter.isidentifier():
raise ImproperlyConfigured(
f"URL route {route!r} uses parameter name {parameter!r} which "
"isn't a valid Python identifier."
)
try:
converter = all_converters[raw_converter]
except KeyError as e:
raise ImproperlyConfigured(
f"URL route {route!r} uses invalid converter {raw_converter!r}."
) from e
converters[parameter] = converter
start, end = match_.span()
parts.append(re.escape(route[previous_end:start]))
previous_end = end
parts.append(f"(?P<{parameter}>{converter.regex})")
parts.append(re.escape(route[previous_end:]))
if is_endpoint:
parts.append(r"\Z")
return "".join(parts), converters
| RegexPattern |
python | scikit-learn__scikit-learn | sklearn/linear_model/_bayes.py | {
"start": 696,
"end": 16243
} | class ____(RegressorMixin, LinearModel):
"""Bayesian ridge regression.
Fit a Bayesian ridge model. See the Notes section for details on this
implementation and the optimization of the regularization parameters
lambda (precision of the weights) and alpha (precision of the noise).
Read more in the :ref:`User Guide <bayesian_regression>`.
For an intuitive visualization of how the sinusoid is approximated by
a polynomial using different pairs of initial values, see
:ref:`sphx_glr_auto_examples_linear_model_plot_bayesian_ridge_curvefit.py`.
Parameters
----------
max_iter : int, default=300
Maximum number of iterations over the complete dataset before
stopping independently of any early stopping criterion.
.. versionchanged:: 1.3
tol : float, default=1e-3
Stop the algorithm if w has converged.
alpha_1 : float, default=1e-6
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter.
alpha_2 : float, default=1e-6
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter.
lambda_1 : float, default=1e-6
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter.
lambda_2 : float, default=1e-6
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter.
alpha_init : float, default=None
Initial value for alpha (precision of the noise).
If not set, alpha_init is 1/Var(y).
.. versionadded:: 0.22
lambda_init : float, default=None
Initial value for lambda (precision of the weights).
If not set, lambda_init is 1.
.. versionadded:: 0.22
compute_score : bool, default=False
If True, compute the log marginal likelihood at each iteration of the
optimization.
fit_intercept : bool, default=True
Whether to calculate the intercept for this model.
The intercept is not treated as a probabilistic parameter
and thus has no associated variance. If set
to False, no intercept will be used in calculations
(i.e. data is expected to be centered).
copy_X : bool, default=True
If True, X will be copied; else, it may be overwritten.
verbose : bool, default=False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array-like of shape (n_features,)
Coefficients of the regression model (mean of distribution)
intercept_ : float
Independent term in decision function. Set to 0.0 if
`fit_intercept = False`.
alpha_ : float
Estimated precision of the noise.
lambda_ : float
Estimated precision of the weights.
sigma_ : array-like of shape (n_features, n_features)
Estimated variance-covariance matrix of the weights
scores_ : array-like of shape (n_iter_+1,)
If computed_score is True, value of the log marginal likelihood (to be
maximized) at each iteration of the optimization. The array starts
with the value of the log marginal likelihood obtained for the initial
values of alpha and lambda and ends with the value obtained for the
estimated alpha and lambda.
n_iter_ : int
The actual number of iterations to reach the stopping criterion.
X_offset_ : ndarray of shape (n_features,)
If `fit_intercept=True`, offset subtracted for centering data to a
zero mean. Set to np.zeros(n_features) otherwise.
X_scale_ : ndarray of shape (n_features,)
Set to np.ones(n_features).
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
ARDRegression : Bayesian ARD regression.
Notes
-----
There exist several strategies to perform Bayesian ridge regression. This
implementation is based on the algorithm described in Appendix A of
(Tipping, 2001) where updates of the regularization parameters are done as
suggested in (MacKay, 1992). Note that according to A New
View of Automatic Relevance Determination (Wipf and Nagarajan, 2008) these
update rules do not guarantee that the marginal likelihood is increasing
between two consecutive iterations of the optimization.
References
----------
D. J. C. MacKay, Bayesian Interpolation, Computation and Neural Systems,
Vol. 4, No. 3, 1992.
M. E. Tipping, Sparse Bayesian Learning and the Relevance Vector Machine,
Journal of Machine Learning Research, Vol. 1, 2001.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.BayesianRidge()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
BayesianRidge()
>>> clf.predict([[1, 1]])
array([1.])
"""
_parameter_constraints: dict = {
"max_iter": [Interval(Integral, 1, None, closed="left")],
"tol": [Interval(Real, 0, None, closed="neither")],
"alpha_1": [Interval(Real, 0, None, closed="left")],
"alpha_2": [Interval(Real, 0, None, closed="left")],
"lambda_1": [Interval(Real, 0, None, closed="left")],
"lambda_2": [Interval(Real, 0, None, closed="left")],
"alpha_init": [None, Interval(Real, 0, None, closed="left")],
"lambda_init": [None, Interval(Real, 0, None, closed="left")],
"compute_score": ["boolean"],
"fit_intercept": ["boolean"],
"copy_X": ["boolean"],
"verbose": ["verbose"],
}
def __init__(
self,
*,
max_iter=300,
tol=1.0e-3,
alpha_1=1.0e-6,
alpha_2=1.0e-6,
lambda_1=1.0e-6,
lambda_2=1.0e-6,
alpha_init=None,
lambda_init=None,
compute_score=False,
fit_intercept=True,
copy_X=True,
verbose=False,
):
self.max_iter = max_iter
self.tol = tol
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.alpha_init = alpha_init
self.lambda_init = lambda_init
self.compute_score = compute_score
self.fit_intercept = fit_intercept
self.copy_X = copy_X
self.verbose = verbose
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y, sample_weight=None):
"""Fit the model.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Training data.
y : ndarray of shape (n_samples,)
Target values. Will be cast to X's dtype if necessary.
sample_weight : ndarray of shape (n_samples,), default=None
Individual weights for each sample.
.. versionadded:: 0.20
parameter *sample_weight* support to BayesianRidge.
Returns
-------
self : object
Returns the instance itself.
"""
X, y = validate_data(
self,
X,
y,
dtype=[np.float64, np.float32],
force_writeable=True,
y_numeric=True,
)
dtype = X.dtype
n_samples, n_features = X.shape
sw_sum = n_samples
y_var = y.var()
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X, dtype=dtype)
sw_sum = sample_weight.sum()
y_mean = np.average(y, weights=sample_weight)
y_var = np.average((y - y_mean) ** 2, weights=sample_weight)
X, y, X_offset_, y_offset_, X_scale_, _ = _preprocess_data(
X,
y,
fit_intercept=self.fit_intercept,
copy=self.copy_X,
sample_weight=sample_weight,
# Sample weight can be implemented via a simple rescaling.
rescale_with_sw=True,
)
self.X_offset_ = X_offset_
self.X_scale_ = X_scale_
# Initialization of the values of the parameters
eps = np.finfo(np.float64).eps
# Add `eps` in the denominator to omit division by zero
alpha_ = self.alpha_init
lambda_ = self.lambda_init
if alpha_ is None:
alpha_ = 1.0 / (y_var + eps)
if lambda_ is None:
lambda_ = 1.0
# Avoid unintended type promotion to float64 with numpy 2
alpha_ = np.asarray(alpha_, dtype=dtype)
lambda_ = np.asarray(lambda_, dtype=dtype)
verbose = self.verbose
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
self.scores_ = list()
coef_old_ = None
XT_y = np.dot(X.T, y)
# Let M, N = n_samples, n_features and K = min(M, N).
# The posterior covariance matrix needs Vh_full: (N, N).
# The full SVD is only required when n_samples < n_features.
# When n_samples < n_features, K=M and full_matrices=True
# U: (M, M), S: M, Vh_full: (N, N), Vh: (M, N)
# When n_samples > n_features, K=N and full_matrices=False
# U: (M, N), S: N, Vh_full: (N, N), Vh: (N, N)
U, S, Vh_full = linalg.svd(X, full_matrices=(n_samples < n_features))
K = len(S)
eigen_vals_ = S**2
eigen_vals_full = np.zeros(n_features, dtype=dtype)
eigen_vals_full[0:K] = eigen_vals_
Vh = Vh_full[0:K, :]
# Convergence loop of the bayesian ridge regression
for iter_ in range(self.max_iter):
# update posterior mean coef_ based on alpha_ and lambda_ and
# compute corresponding sse (sum of squared errors)
coef_, sse_ = self._update_coef_(
X, y, n_samples, n_features, XT_y, U, Vh, eigen_vals_, alpha_, lambda_
)
if self.compute_score:
# compute the log marginal likelihood
s = self._log_marginal_likelihood(
n_samples,
n_features,
sw_sum,
eigen_vals_,
alpha_,
lambda_,
coef_,
sse_,
)
self.scores_.append(s)
# Update alpha and lambda according to (MacKay, 1992)
gamma_ = np.sum((alpha_ * eigen_vals_) / (lambda_ + alpha_ * eigen_vals_))
lambda_ = (gamma_ + 2 * lambda_1) / (np.sum(coef_**2) + 2 * lambda_2)
alpha_ = (sw_sum - gamma_ + 2 * alpha_1) / (sse_ + 2 * alpha_2)
# Check for convergence
if iter_ != 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Convergence after ", str(iter_), " iterations")
break
coef_old_ = np.copy(coef_)
self.n_iter_ = iter_ + 1
# return regularization parameters and corresponding posterior mean,
# log marginal likelihood and posterior covariance
self.alpha_ = alpha_
self.lambda_ = lambda_
self.coef_, sse_ = self._update_coef_(
X, y, n_samples, n_features, XT_y, U, Vh, eigen_vals_, alpha_, lambda_
)
if self.compute_score:
# compute the log marginal likelihood
s = self._log_marginal_likelihood(
n_samples,
n_features,
sw_sum,
eigen_vals_,
alpha_,
lambda_,
coef_,
sse_,
)
self.scores_.append(s)
self.scores_ = np.array(self.scores_)
# posterior covariance
self.sigma_ = np.dot(
Vh_full.T, Vh_full / (alpha_ * eigen_vals_full + lambda_)[:, np.newaxis]
)
self._set_intercept(X_offset_, y_offset_, X_scale_)
return self
def predict(self, X, return_std=False):
"""Predict using the linear model.
In addition to the mean of the predictive distribution, also its
standard deviation can be returned.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Samples.
return_std : bool, default=False
Whether to return the standard deviation of posterior prediction.
Returns
-------
y_mean : array-like of shape (n_samples,)
Mean of predictive distribution of query points.
y_std : array-like of shape (n_samples,)
Standard deviation of predictive distribution of query points.
"""
y_mean = self._decision_function(X)
if not return_std:
return y_mean
else:
sigmas_squared_data = (np.dot(X, self.sigma_) * X).sum(axis=1)
y_std = np.sqrt(sigmas_squared_data + (1.0 / self.alpha_))
return y_mean, y_std
def _update_coef_(
self, X, y, n_samples, n_features, XT_y, U, Vh, eigen_vals_, alpha_, lambda_
):
"""Update posterior mean and compute corresponding sse (sum of squared errors).
Posterior mean is given by coef_ = scaled_sigma_ * X.T * y where
scaled_sigma_ = (lambda_/alpha_ * np.eye(n_features)
+ np.dot(X.T, X))^-1
"""
if n_samples > n_features:
coef_ = np.linalg.multi_dot(
[Vh.T, Vh / (eigen_vals_ + lambda_ / alpha_)[:, np.newaxis], XT_y]
)
else:
coef_ = np.linalg.multi_dot(
[X.T, U / (eigen_vals_ + lambda_ / alpha_)[None, :], U.T, y]
)
# Note: we do not need to explicitly use the weights in this sum because
# y and X were preprocessed by _rescale_data to handle the weights.
sse_ = np.sum((y - np.dot(X, coef_)) ** 2)
return coef_, sse_
def _log_marginal_likelihood(
self, n_samples, n_features, sw_sum, eigen_vals, alpha_, lambda_, coef, sse
):
"""Log marginal likelihood."""
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
# compute the log of the determinant of the posterior covariance.
# posterior covariance is given by
# sigma = (lambda_ * np.eye(n_features) + alpha_ * np.dot(X.T, X))^-1
if n_samples > n_features:
logdet_sigma = -np.sum(np.log(lambda_ + alpha_ * eigen_vals))
else:
logdet_sigma = np.full(n_features, lambda_, dtype=np.array(lambda_).dtype)
logdet_sigma[:n_samples] += alpha_ * eigen_vals
logdet_sigma = -np.sum(np.log(logdet_sigma))
score = lambda_1 * log(lambda_) - lambda_2 * lambda_
score += alpha_1 * log(alpha_) - alpha_2 * alpha_
score += 0.5 * (
n_features * log(lambda_)
+ sw_sum * log(alpha_)
- alpha_ * sse
- lambda_ * np.sum(coef**2)
+ logdet_sigma
- sw_sum * log(2 * np.pi)
)
return score
###############################################################################
# ARD (Automatic Relevance Determination) regression
| BayesianRidge |
python | PrefectHQ__prefect | src/prefect/server/orchestration/core_policy.py | {
"start": 27367,
"end": 28392
} | class ____(FlowRunOrchestrationRule):
"""
Removes a deployment concurrency lease if the client version is less than the minimum version for leasing.
"""
FROM_STATES = {states.StateType.PENDING}
TO_STATES = {states.StateType.RUNNING, states.StateType.CANCELLING}
async def after_transition(
self,
initial_state: states.State[Any] | None,
validated_state: states.State[Any] | None,
context: OrchestrationContext[orm_models.FlowRun, core.FlowRunPolicy],
) -> None:
if not initial_state or (
context.client_version
and Version(context.client_version)
>= MIN_CLIENT_VERSION_FOR_CONCURRENCY_LIMIT_LEASING
):
return
if lease_id := initial_state.state_details.deployment_concurrency_lease_id:
lease_storage = get_concurrency_lease_storage()
await lease_storage.revoke_lease(
lease_id=lease_id,
)
| RemoveDeploymentConcurrencyLeaseForOldClientVersions |
python | ray-project__ray | doc/source/ray-core/doc_code/actor-sync.py | {
"start": 129,
"end": 925
} | class ____:
def __init__(self):
self.ready_event = asyncio.Event()
def send(self, clear=False):
self.ready_event.set()
if clear:
self.ready_event.clear()
async def wait(self, should_wait=True):
if should_wait:
await self.ready_event.wait()
@ray.remote
def wait_and_go(signal):
ray.get(signal.wait.remote())
print("go!")
signal = SignalActor.remote()
tasks = [wait_and_go.remote(signal) for _ in range(4)]
print("ready...")
# Tasks will all be waiting for the signals.
print("set..")
ray.get(signal.send.remote())
# Tasks are unblocked.
ray.get(tasks)
# Output is:
# ready...
# set..
# (wait_and_go pid=77366) go!
# (wait_and_go pid=77372) go!
# (wait_and_go pid=77367) go!
# (wait_and_go pid=77358) go!
| SignalActor |
python | tornadoweb__tornado | tornado/httpclient.py | {
"start": 2189,
"end": 5047
} | class ____:
"""A blocking HTTP client.
This interface is provided to make it easier to share code between
synchronous and asynchronous applications. Applications that are
running an `.IOLoop` must use `AsyncHTTPClient` instead.
Typical usage looks like this::
http_client = httpclient.HTTPClient()
try:
response = http_client.fetch("http://www.google.com/")
print(response.body)
except httpclient.HTTPError as e:
# HTTPError is raised for non-200 responses; the response
# can be found in e.response.
print("Error: " + str(e))
except Exception as e:
# Other errors are possible, such as IOError.
print("Error: " + str(e))
http_client.close()
.. versionchanged:: 5.0
Due to limitations in `asyncio`, it is no longer possible to
use the synchronous ``HTTPClient`` while an `.IOLoop` is running.
Use `AsyncHTTPClient` instead.
"""
def __init__(
self,
async_client_class: "Optional[Type[AsyncHTTPClient]]" = None,
**kwargs: Any,
) -> None:
# Initialize self._closed at the beginning of the constructor
# so that an exception raised here doesn't lead to confusing
# failures in __del__.
self._closed = True
self._io_loop = IOLoop(make_current=False)
if async_client_class is None:
async_client_class = AsyncHTTPClient
# Create the client while our IOLoop is "current", without
# clobbering the thread's real current IOLoop (if any).
async def make_client() -> "AsyncHTTPClient":
await gen.sleep(0)
assert async_client_class is not None
return async_client_class(**kwargs)
self._async_client = self._io_loop.run_sync(make_client)
self._closed = False
def __del__(self) -> None:
self.close()
def close(self) -> None:
"""Closes the HTTPClient, freeing any resources used."""
if not self._closed:
self._async_client.close()
self._io_loop.close()
self._closed = True
def fetch(
self, request: Union["HTTPRequest", str], **kwargs: Any
) -> "HTTPResponse":
"""Executes a request, returning an `HTTPResponse`.
The request may be either a string URL or an `HTTPRequest` object.
If it is a string, we construct an `HTTPRequest` using any additional
kwargs: ``HTTPRequest(request, **kwargs)``
If an error occurs during the fetch, we raise an `HTTPError` unless
the ``raise_error`` keyword argument is set to False.
"""
response = self._io_loop.run_sync(
functools.partial(self._async_client.fetch, request, **kwargs)
)
return response
| HTTPClient |
python | HypothesisWorks__hypothesis | hypothesis-python/src/hypothesis/strategies/_internal/random.py | {
"start": 3889,
"end": 4394
} | class ____:
next_states: dict = field(default_factory=dict)
state_id: Any = None
def state_for_seed(data, seed):
if data.seeds_to_states is None:
data.seeds_to_states = {}
seeds_to_states = data.seeds_to_states
try:
state = seeds_to_states[seed]
except KeyError:
state = RandomState()
seeds_to_states[seed] = state
return state
def normalize_zero(f: float) -> float:
if f == 0.0:
return 0.0
else:
return f
| RandomState |
python | rapidsai__cudf | python/cudf/cudf/core/udf/strings_typing.py | {
"start": 2363,
"end": 3023
} | class ____(models.StructModel):
_members = (("meminfo", types.voidptr), ("udf_string", udf_string))
def __init__(self, dmm, fe_type):
super().__init__(dmm, fe_type, self._members)
def has_nrt_meminfo(self):
return True
def get_nrt_meminfo(self, builder, value):
udf_str_and_meminfo = cgutils.create_struct_proxy(managed_udf_string)(
cuda_target.target_context, builder, value=value
)
return udf_str_and_meminfo.meminfo
managed_udf_string = ManagedUDFString()
any_string_ty = (StringView, UDFString, ManagedUDFString, types.StringLiteral)
string_view = StringView()
| managed_udf_string_model |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.