language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | doocs__leetcode | solution/2200-2299/2220.Minimum Bit Flips to Convert Number/Solution.py | {
"start": 0,
"end": 115
} | class ____:
def minBitFlips(self, start: int, goal: int) -> int:
return (start ^ goal).bit_count()
| Solution |
python | streamlit__streamlit | lib/streamlit/components/v1/component_registry.py | {
"start": 5532,
"end": 5741
} | class ____:
@classmethod
def instance(cls) -> BaseComponentRegistry:
"""Returns the ComponentRegistry of the runtime instance."""
return get_instance().component_registry
| ComponentRegistry |
python | realpython__materials | asterioids-pygame-project/source_code_step_6/space_rocks/game.py | {
"start": 106,
"end": 2371
} | class ____:
MIN_ASTEROID_DISTANCE = 250
def __init__(self):
self._init_pygame()
self.screen = pygame.display.set_mode((800, 600))
self.background = load_sprite("space", False)
self.clock = pygame.time.Clock()
self.asteroids = []
self.spaceship = Spaceship((400, 300))
for _ in range(6):
while True:
position = get_random_position(self.screen)
if (
position.distance_to(self.spaceship.position)
> self.MIN_ASTEROID_DISTANCE
):
break
self.asteroids.append(Asteroid(position))
def main_loop(self):
while True:
self._handle_input()
self._process_game_logic()
self._draw()
def _init_pygame(self):
pygame.init()
pygame.display.set_caption("Space Rocks")
def _handle_input(self):
for event in pygame.event.get():
if event.type == pygame.QUIT or (
event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE
):
quit()
is_key_pressed = pygame.key.get_pressed()
if self.spaceship:
if is_key_pressed[pygame.K_RIGHT]:
self.spaceship.rotate(clockwise=True)
elif is_key_pressed[pygame.K_LEFT]:
self.spaceship.rotate(clockwise=False)
if is_key_pressed[pygame.K_UP]:
self.spaceship.accelerate()
def _process_game_logic(self):
for game_object in self._get_game_objects():
game_object.move(self.screen)
if self.spaceship:
for asteroid in self.asteroids:
if asteroid.collides_with(self.spaceship):
self.spaceship = None
break
def _draw(self):
self.screen.blit(self.background, (0, 0))
for game_object in self._get_game_objects():
game_object.draw(self.screen)
pygame.display.flip()
self.clock.tick(60)
def _get_game_objects(self):
game_objects = [*self.asteroids]
if self.spaceship:
game_objects.append(self.spaceship)
return game_objects
| SpaceRocks |
python | pandas-dev__pandas | pandas/tests/libs/test_hashtable.py | {
"start": 8930,
"end": 13849
} | class ____:
# TODO: moved from test_algos; may be redundancies with other tests
def test_string_hashtable_set_item_signature(self):
# GH#30419 fix typing in StringHashTable.set_item to prevent segfault
tbl = ht.StringHashTable()
tbl.set_item("key", 1)
assert tbl.get_item("key") == 1
with pytest.raises(TypeError, match="'key' has incorrect type"):
# key arg typed as string, not object
tbl.set_item(4, 6)
with pytest.raises(TypeError, match="'val' has incorrect type"):
tbl.get_item(4)
def test_lookup_nan(self, writable):
# GH#21688 ensure we can deal with readonly memory views
xs = np.array([2.718, 3.14, np.nan, -7, 5, 2, 3])
xs.setflags(write=writable)
m = ht.Float64HashTable()
m.map_locations(xs)
tm.assert_numpy_array_equal(m.lookup(xs), np.arange(len(xs), dtype=np.intp))
def test_add_signed_zeros(self):
# GH#21866 inconsistent hash-function for float64
# default hash-function would lead to different hash-buckets
# for 0.0 and -0.0 if there are more than 2^30 hash-buckets
# but this would mean 16GB
N = 4 # 12 * 10**8 would trigger the error, if you have enough memory
m = ht.Float64HashTable(N)
m.set_item(0.0, 0)
m.set_item(-0.0, 0)
assert len(m) == 1 # 0.0 and -0.0 are equivalent
def test_add_different_nans(self):
# GH#21866 inconsistent hash-function for float64
# create different nans from bit-patterns:
NAN1 = struct.unpack("d", struct.pack("=Q", 0x7FF8000000000000))[0]
NAN2 = struct.unpack("d", struct.pack("=Q", 0x7FF8000000000001))[0]
assert NAN1 != NAN1
assert NAN2 != NAN2
# default hash function would lead to different hash-buckets
# for NAN1 and NAN2 even if there are only 4 buckets:
m = ht.Float64HashTable()
m.set_item(NAN1, 0)
m.set_item(NAN2, 0)
assert len(m) == 1 # NAN1 and NAN2 are equivalent
def test_lookup_overflow(self, writable):
xs = np.array([1, 2, 2**63], dtype=np.uint64)
# GH 21688 ensure we can deal with readonly memory views
xs.setflags(write=writable)
m = ht.UInt64HashTable()
m.map_locations(xs)
tm.assert_numpy_array_equal(m.lookup(xs), np.arange(len(xs), dtype=np.intp))
@pytest.mark.parametrize("nvals", [0, 10]) # resizing to 0 is special case
@pytest.mark.parametrize(
"htable, uniques, dtype, safely_resizes",
[
(ht.PyObjectHashTable, ht.ObjectVector, "object", False),
(ht.StringHashTable, ht.ObjectVector, "object", True),
(ht.Float64HashTable, ht.Float64Vector, "float64", False),
(ht.Int64HashTable, ht.Int64Vector, "int64", False),
(ht.Int32HashTable, ht.Int32Vector, "int32", False),
(ht.UInt64HashTable, ht.UInt64Vector, "uint64", False),
],
)
def test_vector_resize(
self, writable, htable, uniques, dtype, safely_resizes, nvals
):
# Test for memory errors after internal vector
# reallocations (GH 7157)
# Changed from using np.random.default_rng(2).rand to range
# which could cause flaky CI failures when safely_resizes=False
vals = np.array(range(1000), dtype=dtype)
# GH 21688 ensures we can deal with read-only memory views
vals.setflags(write=writable)
# initialise instances; cannot initialise in parametrization,
# as otherwise external views would be held on the array (which is
# one of the things this test is checking)
htable = htable()
uniques = uniques()
# get_labels may append to uniques
htable.get_labels(vals[:nvals], uniques, 0, -1)
# to_array() sets an external_view_exists flag on uniques.
tmp = uniques.to_array()
oldshape = tmp.shape
# subsequent get_labels() calls can no longer append to it
# (except for StringHashTables + ObjectVector)
if safely_resizes:
htable.get_labels(vals, uniques, 0, -1)
else:
with pytest.raises(ValueError, match="external reference.*"):
htable.get_labels(vals, uniques, 0, -1)
uniques.to_array() # should not raise here
assert tmp.shape == oldshape
@pytest.mark.parametrize(
"hashtable",
[
ht.PyObjectHashTable,
ht.StringHashTable,
ht.Float64HashTable,
ht.Int64HashTable,
ht.Int32HashTable,
ht.UInt64HashTable,
],
)
def test_hashtable_large_sizehint(self, hashtable):
# GH#22729 smoketest for not raising when passing a large size_hint
size_hint = np.iinfo(np.uint32).max + 1
hashtable(size_hint=size_hint)
| TestHashTableUnsorted |
python | ZoranPandovski__al-go-rithms | sort/python/external-sort.py | {
"start": 2842,
"end": 4338
} | class ____(object):
def __init__(self, block_size):
self.block_size = block_size
def sort(self, filename, sort_key=None):
num_blocks = self.get_number_blocks(filename, self.block_size)
splitter = FileSplitter(filename)
splitter.split(self.block_size, sort_key)
merger = FileMerger(NWayMerge())
buffer_size = self.block_size / (num_blocks + 1)
merger.merge(splitter.get_block_filenames(), filename + '.out', buffer_size)
splitter.cleanup()
def get_number_blocks(self, filename, block_size):
return (os.stat(filename).st_size / block_size) + 1
def parse_memory(string):
if string[-1].lower() == 'k':
return int(string[:-1]) * 1024
elif string[-1].lower() == 'm':
return int(string[:-1]) * 1024 * 1024
elif string[-1].lower() == 'g':
return int(string[:-1]) * 1024 * 1024 * 1024
else:
return int(string)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-m',
'--mem',
help='amount of memory to use for sorting',
default='100M')
parser.add_argument('filename',
metavar='<filename>',
nargs=1,
help='name of file to sort')
args = parser.parse_args()
sorter = ExternalSort(parse_memory(args.mem))
sorter.sort(args.filename[0])
if __name__ == '__main__':
main()
| ExternalSort |
python | pytorch__pytorch | torch/csrc/lazy/test_mnist.py | {
"start": 329,
"end": 2721
} | class ____(nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.dropout1 = nn.Dropout(0.25)
self.dropout2 = nn.Dropout(0.5)
self.fc1 = nn.Linear(9216, 128)
self.fc2 = nn.Linear(128, 10)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output
def train(log_interval, model, device, train_loader, optimizer, epoch):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad(set_to_none=True)
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
torch._lazy.mark_step()
if batch_idx % log_interval == 0:
print(
f"Train Epoch: {epoch} "
f"[{batch_idx * len(data)}/{len(train_loader.dataset)} ({100.0 * batch_idx / len(train_loader):.0f}%)]"
f"\tLoss: {loss.item():.6f}"
)
if __name__ == "__main__":
bsz = 64
device = "lazy"
epochs = 14
log_interval = 10
lr = 1
gamma = 0.7
train_kwargs = {"batch_size": bsz}
# if we want to use CUDA
if "LTC_TS_CUDA" in os.environ:
cuda_kwargs = {
"num_workers": 1,
"pin_memory": True,
"shuffle": True,
"batch_size": bsz,
}
train_kwargs.update(cuda_kwargs)
transform = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]
)
dataset1 = datasets.MNIST("./data", train=True, download=True, transform=transform)
train_loader = torch.utils.data.DataLoader(dataset1, **train_kwargs)
model = Net().to(device)
optimizer = optim.Adadelta(model.parameters(), lr=lr)
scheduler = StepLR(optimizer, step_size=1, gamma=gamma)
for epoch in range(1, epochs + 1):
train(log_interval, model, device, train_loader, optimizer, epoch)
scheduler.step()
| Net |
python | joke2k__faker | faker/providers/company/vi_VN/__init__.py | {
"start": 45,
"end": 733
} | class ____(CompanyProvider):
# Source: https://vi.wikipedia.org/wiki/Danh_s%C3%A1ch_c%C3%B4ng_ty_Vi%E1%BB%87t_Nam
formats = (
"{{last_name}} {{company_suffix}}",
"{{last_name}} {{last_name}} {{company_suffix}}",
"{{last_name}} và {{last_name}} {{company_suffix}}",
"{{last_name}} và đối tác {{company_suffix}}",
)
company_suffixes = (
"Công ty TNHH",
"Công ty Cổ phần",
"Doanh nghiệp tư nhân",
"Công ty TNHH MTV",
"Công ty Hợp danh",
"Công ty Trách nhiệm hữu hạn",
"Tập Đoàn",
)
def company_suffix(self) -> str:
return self.random_element(self.company_suffixes)
| Provider |
python | airbytehq__airbyte | airbyte-ci/connectors/live-tests/src/live_tests/commons/json_schema_helper.py | {
"start": 1895,
"end": 2212
} | class ____(Enum):
NULL = 0
BOOLEAN = 1
INTEGER = 2
NUMBER = 3
STRING = 4
OBJECT = 5
def __lt__(self, other: Any) -> bool:
if self.__class__ is other.__class__:
return self.value < other.value # type: ignore
else:
return NotImplemented
| ComparableType |
python | wandb__wandb | wandb/sdk/wandb_require.py | {
"start": 424,
"end": 2713
} | class ____:
"""Internal feature class."""
_features: tuple[str, ...]
def __init__(self, features: str | Iterable[str]) -> None:
self._features = (
tuple([features]) if isinstance(features, str) else tuple(features)
)
def require_require(self) -> None:
pass
def require_service(self) -> None:
# Legacy no-op kept solely for backward compatibility:
# some integrations (e.g. PyTorch Lightning) still call
# `wandb.require('service')`, which routes here.
wandb.termwarn(
"`wandb.require('service')` is a no-op as it is now the default behavior."
)
def require_core(self) -> None:
# Legacy no-op kept solely for backward compatibility:
# many public codebases still call `wandb.require('core')`.
wandb.termwarn(
"`wandb.require('core')` is a no-op as it is now the default behavior."
)
def apply(self) -> None:
"""Call require_* method for supported features."""
last_message: str = ""
for feature_item in self._features:
full_feature = feature_item.split("@", 2)[0]
feature = full_feature.split(":", 2)[0]
func_str = "require_{}".format(feature.replace("-", "_"))
func = getattr(self, func_str, None)
if not func:
last_message = f"require() unsupported requirement: {feature}"
wandb.termwarn(last_message)
continue
func()
if last_message:
raise UnsupportedError(last_message)
def require(
requirement: str | Iterable[str] | None = None,
experiment: str | Iterable[str] | None = None,
) -> None:
"""Indicate which experimental features are used by the script.
This should be called before any other `wandb` functions, ideally right
after importing `wandb`.
Args:
requirement: The name of a feature to require or an iterable of
feature names.
experiment: An alias for `requirement`.
Raises:
wandb.errors.UnsupportedError: If a feature name is unknown.
"""
features = requirement or experiment
if not features:
return
f = _Requires(features=features)
f.apply()
| _Requires |
python | kamyu104__LeetCode-Solutions | Python/maximum-length-of-pair-chain.py | {
"start": 33,
"end": 394
} | class ____(object):
def findLongestChain(self, pairs):
"""
:type pairs: List[List[int]]
:rtype: int
"""
pairs.sort(key=lambda x: x[1])
cnt, i = 0, 0
for j in xrange(len(pairs)):
if j == 0 or pairs[i][1] < pairs[j][0]:
cnt += 1
i = j
return cnt
| Solution |
python | langchain-ai__langchain | libs/langchain/langchain_classic/chains/combine_documents/stuff.py | {
"start": 4342,
"end": 11579
} | class ____(BaseCombineDocumentsChain):
"""Chain that combines documents by stuffing into context.
This chain takes a list of documents and first combines them into a single string.
It does this by formatting each document into a string with the `document_prompt`
and then joining them together with `document_separator`. It then adds that new
string to the inputs with the variable name set by `document_variable_name`.
Those inputs are then passed to the `llm_chain`.
Example:
```python
from langchain_classic.chains import StuffDocumentsChain, LLMChain
from langchain_core.prompts import PromptTemplate
from langchain_openai import OpenAI
# This controls how each document will be formatted. Specifically,
# it will be passed to `format_document` - see that function for more
# details.
document_prompt = PromptTemplate(
input_variables=["page_content"], template="{page_content}"
)
document_variable_name = "context"
model = OpenAI()
# The prompt here should take as an input variable the
# `document_variable_name`
prompt = PromptTemplate.from_template("Summarize this content: {context}")
llm_chain = LLMChain(llm=model, prompt=prompt)
chain = StuffDocumentsChain(
llm_chain=llm_chain,
document_prompt=document_prompt,
document_variable_name=document_variable_name,
)
```
"""
llm_chain: LLMChain
"""LLM chain which is called with the formatted document string,
along with any other inputs."""
document_prompt: BasePromptTemplate = Field(
default_factory=lambda: DEFAULT_DOCUMENT_PROMPT,
)
"""Prompt to use to format each document, gets passed to `format_document`."""
document_variable_name: str
"""The variable name in the llm_chain to put the documents in.
If only one variable in the llm_chain, this need not be provided."""
document_separator: str = "\n\n"
"""The string with which to join the formatted documents"""
model_config = ConfigDict(
arbitrary_types_allowed=True,
extra="forbid",
)
@model_validator(mode="before")
@classmethod
def get_default_document_variable_name(cls, values: dict) -> Any:
"""Get default document variable name, if not provided.
If only one variable is present in the llm_chain.prompt,
we can infer that the formatted documents should be passed in
with this variable name.
"""
llm_chain_variables = values["llm_chain"].prompt.input_variables
if "document_variable_name" not in values:
if len(llm_chain_variables) == 1:
values["document_variable_name"] = llm_chain_variables[0]
else:
msg = (
"document_variable_name must be provided if there are "
"multiple llm_chain_variables"
)
raise ValueError(msg)
elif values["document_variable_name"] not in llm_chain_variables:
msg = (
f"document_variable_name {values['document_variable_name']} was "
f"not found in llm_chain input_variables: {llm_chain_variables}"
)
raise ValueError(msg)
return values
@property
@override
def input_keys(self) -> list[str]:
extra_keys = [
k for k in self.llm_chain.input_keys if k != self.document_variable_name
]
return super().input_keys + extra_keys
def _get_inputs(self, docs: list[Document], **kwargs: Any) -> dict:
"""Construct inputs from kwargs and docs.
Format and then join all the documents together into one input with name
`self.document_variable_name`. Also pluck any additional variables
from **kwargs.
Args:
docs: List of documents to format and then join into single input
**kwargs: additional inputs to chain, will pluck any other required
arguments from here.
Returns:
dictionary of inputs to LLMChain
"""
# Format each document according to the prompt
doc_strings = [format_document(doc, self.document_prompt) for doc in docs]
# Join the documents together to put them in the prompt.
inputs = {
k: v
for k, v in kwargs.items()
if k in self.llm_chain.prompt.input_variables
}
inputs[self.document_variable_name] = self.document_separator.join(doc_strings)
return inputs
def prompt_length(self, docs: list[Document], **kwargs: Any) -> int | None:
"""Return the prompt length given the documents passed in.
This can be used by a caller to determine whether passing in a list
of documents would exceed a certain prompt length. This useful when
trying to ensure that the size of a prompt remains below a certain
context limit.
Args:
docs: a list of documents to use to calculate the total prompt length.
**kwargs: additional parameters to use to get inputs to LLMChain.
Returns:
Returns None if the method does not depend on the prompt length,
otherwise the length of the prompt in tokens.
"""
inputs = self._get_inputs(docs, **kwargs)
prompt = self.llm_chain.prompt.format(**inputs)
return self.llm_chain._get_num_tokens(prompt) # noqa: SLF001
def combine_docs(
self,
docs: list[Document],
callbacks: Callbacks = None,
**kwargs: Any,
) -> tuple[str, dict]:
"""Stuff all documents into one prompt and pass to LLM.
Args:
docs: List of documents to join together into one variable
callbacks: Optional callbacks to pass along
**kwargs: additional parameters to use to get inputs to LLMChain.
Returns:
The first element returned is the single string output. The second
element returned is a dictionary of other keys to return.
"""
inputs = self._get_inputs(docs, **kwargs)
# Call predict on the LLM.
return self.llm_chain.predict(callbacks=callbacks, **inputs), {}
async def acombine_docs(
self,
docs: list[Document],
callbacks: Callbacks = None,
**kwargs: Any,
) -> tuple[str, dict]:
"""Async stuff all documents into one prompt and pass to LLM.
Args:
docs: List of documents to join together into one variable
callbacks: Optional callbacks to pass along
**kwargs: additional parameters to use to get inputs to LLMChain.
Returns:
The first element returned is the single string output. The second
element returned is a dictionary of other keys to return.
"""
inputs = self._get_inputs(docs, **kwargs)
# Call predict on the LLM.
return await self.llm_chain.apredict(callbacks=callbacks, **inputs), {}
@property
def _chain_type(self) -> str:
return "stuff_documents_chain"
| StuffDocumentsChain |
python | django__django | tests/model_fields/test_slugfield.py | {
"start": 79,
"end": 636
} | class ____(TestCase):
def test_slugfield_max_length(self):
"""
SlugField honors max_length.
"""
bs = BigS.objects.create(s="slug" * 50)
bs = BigS.objects.get(pk=bs.pk)
self.assertEqual(bs.s, "slug" * 50)
def test_slugfield_unicode_max_length(self):
"""
SlugField with allow_unicode=True honors max_length.
"""
bs = UnicodeSlugField.objects.create(s="你好你好" * 50)
bs = UnicodeSlugField.objects.get(pk=bs.pk)
self.assertEqual(bs.s, "你好你好" * 50)
| SlugFieldTests |
python | getsentry__sentry | tests/sentry/grouping/seer_similarity/test_seer_eligibility.py | {
"start": 12344,
"end": 14705
} | class ____(TestCase):
def get_eligible_event_data(self) -> dict[str, Any]:
return {
"title": "FailedToFetchError('Charlie didn't bring the ball back')",
"exception": {
"values": [
{
"type": "FailedToFetchError",
"value": "Charlie didn't bring the ball back",
"stacktrace": {
"frames": [
{
"function": "play_fetch",
"filename": "dogpark.py",
"context_line": "raise FailedToFetchError('Charlie didn't bring the ball back')",
}
]
},
}
]
},
"platform": "python",
}
def test_no_stacktrace(self) -> None:
good_event_data = self.get_eligible_event_data()
good_event = Event(
project_id=self.project.id,
event_id=uuid1().hex,
data=good_event_data,
)
bad_event_data = self.get_eligible_event_data()
del bad_event_data["exception"]
bad_event = Event(
project_id=self.project.id,
event_id=uuid1().hex,
data=bad_event_data,
)
assert _event_content_is_seer_eligible(good_event) is True
assert _event_content_is_seer_eligible(bad_event) is False
def test_platform_filter(self) -> None:
good_event_data = self.get_eligible_event_data()
good_event = Event(
project_id=self.project.id,
event_id=uuid1().hex,
data=good_event_data,
)
bad_event_data = self.get_eligible_event_data()
bad_event_data["platform"] = "other"
bad_event = Event(
project_id=self.project.id,
event_id=uuid1().hex,
data=bad_event_data,
)
assert good_event_data["platform"] not in SEER_INELIGIBLE_EVENT_PLATFORMS
assert bad_event_data["platform"] in SEER_INELIGIBLE_EVENT_PLATFORMS
assert _event_content_is_seer_eligible(good_event) is True
assert _event_content_is_seer_eligible(bad_event) is False
| EventContentIsSeerEligibleTest |
python | tensorflow__tensorflow | tensorflow/python/autograph/utils/tensor_list.py | {
"start": 1563,
"end": 2336
} | class ____(object):
"""Tensor list wrapper API-compatible with Python built-in list."""
def __init__(self, shape, dtype):
self.dtype = dtype
self.shape = shape
self.clear()
def append(self, value):
self.list_ = list_ops.tensor_list_push_back(self.list_, value)
def pop(self):
self.list_, value = list_ops.tensor_list_pop_back(self.list_, self.dtype)
return value
def clear(self):
self.list_ = list_ops.empty_tensor_list(self.shape, self.dtype)
def count(self):
return list_ops.tensor_list_length(self.list_)
def __getitem__(self, key):
return list_ops.tensor_list_get_item(self.list_, key, self.dtype)
def __setitem__(self, key, value):
self.list_ = list_ops.tensor_list_set_item(self.list_, key, value)
| TensorList |
python | python__mypy | test-data/unit/plugins/badreturn2.py | {
"start": 37,
"end": 128
} | class ____:
pass
def plugin(version: str) -> type[MyPlugin]:
return MyPlugin
| MyPlugin |
python | jupyterlab__jupyterlab | packages/services/examples/browser-require/main.py | {
"start": 516,
"end": 1375
} | class ____(ExtensionHandlerJinjaMixin, ExtensionHandlerMixin, JupyterHandler):
"""Handle requests between the main app page and notebook server."""
def get(self):
"""Get the main page for the application's interface."""
config_data = {
# Use camelCase here, since that's what the lab components expect
"baseUrl": self.base_url,
"token": self.settings["token"],
"fullStaticUrl": ujoin(self.base_url, "static", self.name),
"frontendUrl": ujoin(self.base_url, "example/"),
}
return self.write(
self.render_template(
"index.html",
static=self.static_url,
base_url=self.base_url,
token=self.settings["token"],
page_config=config_data,
)
)
| ExampleHandler |
python | numba__numba | numba/core/untyped_passes.py | {
"start": 7620,
"end": 8290
} | class ____(FunctionPass):
_name = "generic_rewrites"
def __init__(self):
FunctionPass.__init__(self)
def run_pass(self, state):
"""
Perform any intermediate representation rewrites before type
inference.
"""
assert state.func_ir
msg = ('Internal error in pre-inference rewriting '
'pass encountered during compilation of '
'function "%s"' % (state.func_id.func_name,))
with fallback_context(state, msg):
rewrites.rewrite_registry.apply('before-inference', state)
return True
@register_pass(mutates_CFG=True, analysis_only=False)
| GenericRewrites |
python | run-llama__llama_index | llama-index-integrations/llms/llama-index-llms-ollama/tests/test_llms_ollama.py | {
"start": 886,
"end": 13656
} | class ____(BaseModel):
"""A song with name and artist."""
artist_name: str = Field(description="The name of the artist")
song_name: str = Field(description="The name of the song")
def generate_song(
artist_name: Annotated[str, "The name of the artist"],
song_name: Annotated[str, "The name of the song"],
) -> Song:
"""Generates a song with provided name and artist."""
return Song(artist_name=artist_name, song_name=song_name)
tool = FunctionTool.from_defaults(fn=generate_song)
def test_embedding_class() -> None:
names_of_base_classes = [b.__name__ for b in Ollama.__mro__]
assert BaseLLM.__name__ in names_of_base_classes
@pytest.mark.skipif(
client is None, reason="Ollama client is not available or test model is missing"
)
def test_ollama_chat() -> None:
llm = Ollama(model=test_model)
response = llm.chat([ChatMessage(role="user", content="Hello!")])
assert response is not None
assert str(response).strip() != ""
@pytest.mark.skipif(
client is None, reason="Ollama client is not available or test model is missing"
)
def test_ollama_complete() -> None:
llm = Ollama(model=test_model)
response = llm.complete("Hello!")
assert response is not None
assert str(response).strip() != ""
@pytest.mark.skipif(
client is None, reason="Ollama client is not available or test model is missing"
)
def test_ollama_stream_chat() -> None:
llm = Ollama(model=test_model)
response = llm.stream_chat([ChatMessage(role="user", content="Hello!")])
for r in response:
assert r is not None
assert r.delta is not None
assert str(r).strip() != ""
@pytest.mark.skipif(
client is None, reason="Ollama client is not available or test model is missing"
)
def test_ollama_stream_complete() -> None:
llm = Ollama(model=test_model)
response = llm.stream_complete("Hello!")
for r in response:
assert r is not None
assert r.delta is not None
assert str(r).strip() != ""
@pytest.mark.skipif(
client is None, reason="Ollama client is not available or test model is missing"
)
@pytest.mark.asyncio
async def test_ollama_async_chat() -> None:
llm = Ollama(model=test_model)
response = await llm.achat([ChatMessage(role="user", content="Hello!")])
assert response is not None
assert str(response).strip() != ""
@pytest.mark.skipif(
client is None, reason="Ollama client is not available or test model is missing"
)
@pytest.mark.asyncio
async def test_ollama_async_complete() -> None:
llm = Ollama(model=test_model)
response = await llm.acomplete("Hello!")
assert response is not None
assert str(response).strip() != ""
@pytest.mark.skipif(
client is None, reason="Ollama client is not available or test model is missing"
)
@pytest.mark.asyncio
async def test_ollama_async_stream_chat() -> None:
llm = Ollama(model=test_model)
response = await llm.astream_chat([ChatMessage(role="user", content="Hello!")])
async for r in response:
assert r is not None
assert r.delta is not None
assert str(r).strip() != ""
@pytest.mark.skipif(
client is None, reason="Ollama client is not available or test model is missing"
)
@pytest.mark.asyncio
async def test_ollama_async_stream_complete() -> None:
llm = Ollama(model=test_model)
response = await llm.astream_complete("Hello!")
async for r in response:
assert r is not None
assert r.delta is not None
assert str(r).strip() != ""
@pytest.mark.skipif(
client is None, reason="Ollama client is not available or test model is missing"
)
def test_chat_with_tools() -> None:
llm = Ollama(model=test_model, context_window=8000)
response = llm.chat_with_tools(
[tool], user_msg="Hello! Generate a random artist and song."
)
tool_calls = llm.get_tool_calls_from_response(response)
assert len(tool_calls) == 1
assert tool_calls[0].tool_name == tool.metadata.name
tool_result = tool(**tool_calls[0].tool_kwargs)
assert tool_result.raw_output is not None
assert isinstance(tool_result.raw_output, Song)
@pytest.mark.skipif(
client is None, reason="Ollama client is not available or test model is missing"
)
def test_stream_chat_with_tools() -> None:
"""Makes sure that stream chat with tools returns tool call message without any errors"""
llm = Ollama(model=test_model, context_window=8000)
response = llm.stream_chat_with_tools(
[tool], user_msg="Hello! Generate a random artist and song."
)
for r in response:
tool_calls = llm.get_tool_calls_from_response(r)
assert len(tool_calls) == 1
assert tool_calls[0].tool_name == tool.metadata.name
tool_result = tool(**tool_calls[0].tool_kwargs)
assert tool_result.raw_output is not None
assert isinstance(tool_result.raw_output, Song)
@pytest.mark.skipif(
client is None, reason="Ollama client is not available or test model is missing"
)
@pytest.mark.asyncio
async def test_async_chat_with_tools() -> None:
llm = Ollama(model=test_model, context_window=8000)
response = await llm.achat_with_tools(
[tool], user_msg="Hello! Generate a random artist and song."
)
tool_calls = llm.get_tool_calls_from_response(response)
assert len(tool_calls) == 1
assert tool_calls[0].tool_name == tool.metadata.name
tool_result = tool(**tool_calls[0].tool_kwargs)
assert tool_result.raw_output is not None
assert isinstance(tool_result.raw_output, Song)
@pytest.mark.skipif(
client is None, reason="Ollama client is not available or test model is missing"
)
def test_chat_with_think() -> None:
llm = Ollama(model=thinking_test_model, thinking=True, request_timeout=360)
response = llm.chat(
[ChatMessage(role="user", content="Hello! What is 32 * 4?")], think=False
)
assert response is not None
assert str(response).strip() != ""
assert (
len(
[
block
for block in response.message.blocks
if isinstance(block, ThinkingBlock)
]
)
> 0
)
assert (
"".join(
[
block.content or ""
for block in response.message.blocks
if isinstance(block, ThinkingBlock)
]
)
!= ""
)
@pytest.mark.skipif(
client is None, reason="Ollama client is not available or test model is missing"
)
def test_chat_with_thinking_input() -> None:
llm = Ollama(model=thinking_test_model, thinking=True, request_timeout=360)
response = llm.chat(
[
ChatMessage(role="user", content="Hello! What is 32 * 4?"),
ChatMessage(
role="assistant",
blocks=[
ThinkingBlock(
content="The user is asking me to multiply two numbers, so I should reply concisely"
),
TextBlock(text="128"),
],
),
ChatMessage(
role="user",
content="Based on your previous reasoning, can you now tell me the result of 50*200?",
),
],
think=False,
)
assert response is not None
assert str(response).strip() != ""
assert (
len(
[
block
for block in response.message.blocks
if isinstance(block, ThinkingBlock)
]
)
> 0
)
assert (
"".join(
[
block.content or ""
for block in response.message.blocks
if isinstance(block, ThinkingBlock)
]
)
!= ""
)
@pytest.mark.skipif(
client is None, reason="Ollama client is not available or test model is missing"
)
@pytest.mark.asyncio
async def test_async_chat_with_think() -> None:
llm = Ollama(model=thinking_test_model, thinking=True)
response = await llm.achat(
[ChatMessage(role="user", content="Hello! What is 32 * 4?")], think=False
)
assert response is not None
assert str(response).strip() != ""
assert (
len(
[
block
for block in response.message.blocks
if isinstance(block, ThinkingBlock)
]
)
> 0
)
assert (
"".join(
[
block.content or ""
for block in response.message.blocks
if isinstance(block, ThinkingBlock)
]
)
!= ""
)
@pytest.mark.skipif(
client is None, reason="Ollama client is not available or test model is missing"
)
def test_chat_with_tools_returns_empty_array_if_no_tools_were_called() -> None:
"""Make sure get_tool_calls_from_response can gracefully handle no tools in response"""
llm = Ollama(model=test_model, context_window=1000)
response = llm.chat(
tools=[],
messages=[
ChatMessage(
role="system",
content="You are a useful tool calling agent.",
),
ChatMessage(role="user", content="Hello, how are you?"),
],
)
assert response.message.additional_kwargs.get("tool_calls", []) == []
tool_calls = llm.get_tool_calls_from_response(response, error_on_no_tool_call=False)
assert len(tool_calls) == 0
@pytest.mark.skipif(
client is None, reason="Ollama client is not available or test model is missing"
)
@pytest.mark.asyncio
async def test_async_chat_with_tools_returns_empty_array_if_no_tools_were_called() -> (
None
):
"""
Test that achat returns [] for no tool calls since subsequent processes expect []
instead of None
"""
llm = Ollama(model=test_model, context_window=1000)
response = await llm.achat(
tools=[],
messages=[
ChatMessage(
role="system",
content="You are a useful tool calling agent.",
),
ChatMessage(role="user", content="Hello, how are you?"),
],
)
assert (
len(
[
block
for block in response.message.blocks
if isinstance(block, ToolCallBlock)
]
)
== 0
)
@pytest.mark.skipif(
client is None, reason="Ollama client is not available or test model is missing"
)
@pytest.mark.asyncio
async def test_chat_methods_with_tool_input() -> None:
llm = Ollama(model=thinking_test_model)
input_messages = [
ChatMessage(
role="user",
content="Hello, can you tell me what is the weather today in London?",
),
ChatMessage(
role="assistant",
blocks=[
ThinkingBlock(
content="The user is asking for the weather in London, so I should use the get_weather tool"
),
ToolCallBlock(
tool_name="get_weather_tool", tool_kwargs={"location": "London"}
),
TextBlock(
text="The weather in London is rainy with a temperature of 15°C."
),
],
),
ChatMessage(
role="user",
content="Can you tell me what input did you give to the 'get_weather' tool? (do not call any other tool)",
),
]
response = llm.chat(messages=input_messages)
assert response.message.content is not None
assert (
len(
[
block
for block in response.message.blocks
if isinstance(block, ToolCallBlock)
]
)
== 0
)
aresponse = await llm.achat(messages=input_messages)
assert aresponse.message.content is not None
assert (
len(
[
block
for block in aresponse.message.blocks
if isinstance(block, ToolCallBlock)
]
)
== 0
)
response_stream = llm.stream_chat(messages=input_messages)
blocks = []
for r in response_stream:
blocks.extend(r.message.blocks)
assert len([block for block in blocks if isinstance(block, TextBlock)]) > 0
assert len([block for block in blocks if isinstance(block, ToolCallBlock)]) == 0
aresponse_stream = await llm.astream_chat(messages=input_messages)
ablocks = []
async for r in aresponse_stream:
ablocks.extend(r.message.blocks)
assert len([block for block in ablocks if isinstance(block, TextBlock)]) > 0
assert len([block for block in ablocks if isinstance(block, ToolCallBlock)]) == 0
| Song |
python | apache__avro | lang/py/avro/test/test_protocol.py | {
"start": 1374,
"end": 1515
} | class ____(TestProtocol):
"""A proxy for a valid protocol string that provides useful test metadata."""
valid = True
| ValidTestProtocol |
python | pypa__setuptools | setuptools/_distutils/command/build_py.py | {
"start": 318,
"end": 16696
} | class ____(Command):
description = "\"build\" pure Python modules (copy to build directory)"
user_options = [
('build-lib=', 'd', "directory to \"build\" (copy) to"),
('compile', 'c', "compile .py to .pyc"),
('no-compile', None, "don't compile .py files [default]"),
(
'optimize=',
'O',
"also compile with optimization: -O1 for \"python -O\", "
"-O2 for \"python -OO\", and -O0 to disable [default: -O0]",
),
('force', 'f', "forcibly build everything (ignore file timestamps)"),
]
boolean_options: ClassVar[list[str]] = ['compile', 'force']
negative_opt: ClassVar[dict[str, str]] = {'no-compile': 'compile'}
def initialize_options(self):
self.build_lib = None
self.py_modules = None
self.package = None
self.package_data = None
self.package_dir = None
self.compile = False
self.optimize = 0
self.force = None
def finalize_options(self) -> None:
self.set_undefined_options(
'build', ('build_lib', 'build_lib'), ('force', 'force')
)
# Get the distribution options that are aliases for build_py
# options -- list of packages and list of modules.
self.packages = self.distribution.packages
self.py_modules = self.distribution.py_modules
self.package_data = self.distribution.package_data
self.package_dir = {}
if self.distribution.package_dir:
for name, path in self.distribution.package_dir.items():
self.package_dir[name] = convert_path(path)
self.data_files = self.get_data_files()
# Ick, copied straight from install_lib.py (fancy_getopt needs a
# type system! Hell, *everything* needs a type system!!!)
if not isinstance(self.optimize, int):
try:
self.optimize = int(self.optimize)
assert 0 <= self.optimize <= 2
except (ValueError, AssertionError):
raise DistutilsOptionError("optimize must be 0, 1, or 2")
def run(self) -> None:
# XXX copy_file by default preserves atime and mtime. IMHO this is
# the right thing to do, but perhaps it should be an option -- in
# particular, a site administrator might want installed files to
# reflect the time of installation rather than the last
# modification time before the installed release.
# XXX copy_file by default preserves mode, which appears to be the
# wrong thing to do: if a file is read-only in the working
# directory, we want it to be installed read/write so that the next
# installation of the same module distribution can overwrite it
# without problems. (This might be a Unix-specific issue.) Thus
# we turn off 'preserve_mode' when copying to the build directory,
# since the build directory is supposed to be exactly what the
# installation will look like (ie. we preserve mode when
# installing).
# Two options control which modules will be installed: 'packages'
# and 'py_modules'. The former lets us work with whole packages, not
# specifying individual modules at all; the latter is for
# specifying modules one-at-a-time.
if self.py_modules:
self.build_modules()
if self.packages:
self.build_packages()
self.build_package_data()
self.byte_compile(self.get_outputs(include_bytecode=False))
def get_data_files(self):
"""Generate list of '(package,src_dir,build_dir,filenames)' tuples"""
data = []
if not self.packages:
return data
for package in self.packages:
# Locate package source directory
src_dir = self.get_package_dir(package)
# Compute package build directory
build_dir = os.path.join(*([self.build_lib] + package.split('.')))
# Length of path to strip from found files
plen = 0
if src_dir:
plen = len(src_dir) + 1
# Strip directory from globbed filenames
filenames = [file[plen:] for file in self.find_data_files(package, src_dir)]
data.append((package, src_dir, build_dir, filenames))
return data
def find_data_files(self, package, src_dir):
"""Return filenames for package's data files in 'src_dir'"""
globs = self.package_data.get('', []) + self.package_data.get(package, [])
files = []
for pattern in globs:
# Each pattern has to be converted to a platform-specific path
filelist = glob.glob(
os.path.join(glob.escape(src_dir), convert_path(pattern))
)
# Files that match more than one pattern are only added once
files.extend([
fn for fn in filelist if fn not in files and os.path.isfile(fn)
])
return files
def build_package_data(self) -> None:
"""Copy data files into build directory"""
for _package, src_dir, build_dir, filenames in self.data_files:
for filename in filenames:
target = os.path.join(build_dir, filename)
self.mkpath(os.path.dirname(target))
self.copy_file(
os.path.join(src_dir, filename), target, preserve_mode=False
)
def get_package_dir(self, package):
"""Return the directory, relative to the top of the source
distribution, where package 'package' should be found
(at least according to the 'package_dir' option, if any)."""
path = package.split('.')
if not self.package_dir:
if path:
return os.path.join(*path)
else:
return ''
else:
tail = []
while path:
try:
pdir = self.package_dir['.'.join(path)]
except KeyError:
tail.insert(0, path[-1])
del path[-1]
else:
tail.insert(0, pdir)
return os.path.join(*tail)
else:
# Oops, got all the way through 'path' without finding a
# match in package_dir. If package_dir defines a directory
# for the root (nameless) package, then fallback on it;
# otherwise, we might as well have not consulted
# package_dir at all, as we just use the directory implied
# by 'tail' (which should be the same as the original value
# of 'path' at this point).
pdir = self.package_dir.get('')
if pdir is not None:
tail.insert(0, pdir)
if tail:
return os.path.join(*tail)
else:
return ''
def check_package(self, package, package_dir):
# Empty dir name means current directory, which we can probably
# assume exists. Also, os.path.exists and isdir don't know about
# my "empty string means current dir" convention, so we have to
# circumvent them.
if package_dir != "":
if not os.path.exists(package_dir):
raise DistutilsFileError(
f"package directory '{package_dir}' does not exist"
)
if not os.path.isdir(package_dir):
raise DistutilsFileError(
f"supposed package directory '{package_dir}' exists, "
"but is not a directory"
)
# Directories without __init__.py are namespace packages (PEP 420).
if package:
init_py = os.path.join(package_dir, "__init__.py")
if os.path.isfile(init_py):
return init_py
# Either not in a package at all (__init__.py not expected), or
# __init__.py doesn't exist -- so don't return the filename.
return None
def check_module(self, module, module_file):
if not os.path.isfile(module_file):
log.warning("file %s (for module %s) not found", module_file, module)
return False
else:
return True
def find_package_modules(self, package, package_dir):
self.check_package(package, package_dir)
module_files = glob.glob(os.path.join(glob.escape(package_dir), "*.py"))
modules = []
setup_script = os.path.abspath(self.distribution.script_name)
for f in module_files:
abs_f = os.path.abspath(f)
if abs_f != setup_script:
module = os.path.splitext(os.path.basename(f))[0]
modules.append((package, module, f))
else:
self.debug_print(f"excluding {setup_script}")
return modules
def find_modules(self):
"""Finds individually-specified Python modules, ie. those listed by
module name in 'self.py_modules'. Returns a list of tuples (package,
module_base, filename): 'package' is a tuple of the path through
package-space to the module; 'module_base' is the bare (no
packages, no dots) module name, and 'filename' is the path to the
".py" file (relative to the distribution root) that implements the
module.
"""
# Map package names to tuples of useful info about the package:
# (package_dir, checked)
# package_dir - the directory where we'll find source files for
# this package
# checked - true if we have checked that the package directory
# is valid (exists, contains __init__.py, ... ?)
packages = {}
# List of (package, module, filename) tuples to return
modules = []
# We treat modules-in-packages almost the same as toplevel modules,
# just the "package" for a toplevel is empty (either an empty
# string or empty list, depending on context). Differences:
# - don't check for __init__.py in directory for empty package
for module in self.py_modules:
path = module.split('.')
package = '.'.join(path[0:-1])
module_base = path[-1]
try:
(package_dir, checked) = packages[package]
except KeyError:
package_dir = self.get_package_dir(package)
checked = False
if not checked:
init_py = self.check_package(package, package_dir)
packages[package] = (package_dir, 1)
if init_py:
modules.append((package, "__init__", init_py))
# XXX perhaps we should also check for just .pyc files
# (so greedy closed-source bastards can distribute Python
# modules too)
module_file = os.path.join(package_dir, module_base + ".py")
if not self.check_module(module, module_file):
continue
modules.append((package, module_base, module_file))
return modules
def find_all_modules(self):
"""Compute the list of all modules that will be built, whether
they are specified one-module-at-a-time ('self.py_modules') or
by whole packages ('self.packages'). Return a list of tuples
(package, module, module_file), just like 'find_modules()' and
'find_package_modules()' do."""
modules = []
if self.py_modules:
modules.extend(self.find_modules())
if self.packages:
for package in self.packages:
package_dir = self.get_package_dir(package)
m = self.find_package_modules(package, package_dir)
modules.extend(m)
return modules
def get_source_files(self):
return [module[-1] for module in self.find_all_modules()]
def get_module_outfile(self, build_dir, package, module):
outfile_path = [build_dir] + list(package) + [module + ".py"]
return os.path.join(*outfile_path)
def get_outputs(self, include_bytecode: bool = True) -> list[str]:
modules = self.find_all_modules()
outputs = []
for package, module, _module_file in modules:
package = package.split('.')
filename = self.get_module_outfile(self.build_lib, package, module)
outputs.append(filename)
if include_bytecode:
if self.compile:
outputs.append(
importlib.util.cache_from_source(filename, optimization='')
)
if self.optimize > 0:
outputs.append(
importlib.util.cache_from_source(
filename, optimization=self.optimize
)
)
outputs += [
os.path.join(build_dir, filename)
for package, src_dir, build_dir, filenames in self.data_files
for filename in filenames
]
return outputs
def build_module(self, module, module_file, package):
if isinstance(package, str):
package = package.split('.')
elif not isinstance(package, (list, tuple)):
raise TypeError(
"'package' must be a string (dot-separated), list, or tuple"
)
# Now put the module source file into the "build" area -- this is
# easy, we just copy it somewhere under self.build_lib (the build
# directory for Python source).
outfile = self.get_module_outfile(self.build_lib, package, module)
dir = os.path.dirname(outfile)
self.mkpath(dir)
return self.copy_file(module_file, outfile, preserve_mode=False)
def build_modules(self) -> None:
modules = self.find_modules()
for package, module, module_file in modules:
# Now "build" the module -- ie. copy the source file to
# self.build_lib (the build directory for Python source).
# (Actually, it gets copied to the directory for this package
# under self.build_lib.)
self.build_module(module, module_file, package)
def build_packages(self) -> None:
for package in self.packages:
# Get list of (package, module, module_file) tuples based on
# scanning the package directory. 'package' is only included
# in the tuple so that 'find_modules()' and
# 'find_package_tuples()' have a consistent interface; it's
# ignored here (apart from a sanity check). Also, 'module' is
# the *unqualified* module name (ie. no dots, no package -- we
# already know its package!), and 'module_file' is the path to
# the .py file, relative to the current directory
# (ie. including 'package_dir').
package_dir = self.get_package_dir(package)
modules = self.find_package_modules(package, package_dir)
# Now loop over the modules we found, "building" each one (just
# copy it to self.build_lib).
for package_, module, module_file in modules:
assert package == package_
self.build_module(module, module_file, package)
def byte_compile(self, files) -> None:
if sys.dont_write_bytecode:
self.warn('byte-compiling is disabled, skipping.')
return
from ..util import byte_compile
prefix = self.build_lib
if prefix[-1] != os.sep:
prefix = prefix + os.sep
# XXX this code is essentially the same as the 'byte_compile()
# method of the "install_lib" command, except for the determination
# of the 'prefix' string. Hmmm.
if self.compile:
byte_compile(
files, optimize=0, force=self.force, prefix=prefix, dry_run=self.dry_run
)
if self.optimize > 0:
byte_compile(
files,
optimize=self.optimize,
force=self.force,
prefix=prefix,
dry_run=self.dry_run,
)
| build_py |
python | django__django | django/contrib/gis/gdal/geometries.py | {
"start": 26802,
"end": 28848
} | class ____(GeometryCollection):
geos_support = False
# Class mapping dictionary (using the OGRwkbGeometryType as the key)
GEO_CLASSES = {
1: Point,
2: LineString,
3: Polygon,
4: MultiPoint,
5: MultiLineString,
6: MultiPolygon,
7: GeometryCollection,
8: CircularString,
9: CompoundCurve,
10: CurvePolygon,
11: MultiCurve,
12: MultiSurface,
101: LinearRing,
1008: CircularString, # CIRCULARSTRING Z
1009: CompoundCurve, # COMPOUNDCURVE Z
1010: CurvePolygon, # CURVEPOLYGON Z
1011: MultiCurve, # MULTICURVE Z
1012: MultiSurface, # MULTICURVE Z
2001: Point, # POINT M
2002: LineString, # LINESTRING M
2003: Polygon, # POLYGON M
2004: MultiPoint, # MULTIPOINT M
2005: MultiLineString, # MULTILINESTRING M
2006: MultiPolygon, # MULTIPOLYGON M
2007: GeometryCollection, # GEOMETRYCOLLECTION M
2008: CircularString, # CIRCULARSTRING M
2009: CompoundCurve, # COMPOUNDCURVE M
2010: CurvePolygon, # CURVEPOLYGON M
2011: MultiCurve, # MULTICURVE M
2012: MultiSurface, # MULTICURVE M
3001: Point, # POINT ZM
3002: LineString, # LINESTRING ZM
3003: Polygon, # POLYGON ZM
3004: MultiPoint, # MULTIPOINT ZM
3005: MultiLineString, # MULTILINESTRING ZM
3006: MultiPolygon, # MULTIPOLYGON ZM
3007: GeometryCollection, # GEOMETRYCOLLECTION ZM
3008: CircularString, # CIRCULARSTRING ZM
3009: CompoundCurve, # COMPOUNDCURVE ZM
3010: CurvePolygon, # CURVEPOLYGON ZM
3011: MultiCurve, # MULTICURVE ZM
3012: MultiSurface, # MULTISURFACE ZM
1 + OGRGeomType.wkb25bit: Point, # POINT Z
2 + OGRGeomType.wkb25bit: LineString, # LINESTRING Z
3 + OGRGeomType.wkb25bit: Polygon, # POLYGON Z
4 + OGRGeomType.wkb25bit: MultiPoint, # MULTIPOINT Z
5 + OGRGeomType.wkb25bit: MultiLineString, # MULTILINESTRING Z
6 + OGRGeomType.wkb25bit: MultiPolygon, # MULTIPOLYGON Z
7 + OGRGeomType.wkb25bit: GeometryCollection, # GEOMETRYCOLLECTION Z
}
| MultiCurve |
python | langchain-ai__langchain | libs/partners/openai/tests/integration_tests/chat_models/test_base.py | {
"start": 37904,
"end": 40584
} | class ____(BaseModel):
response: str
def test_stream_response_format() -> None:
full: BaseMessageChunk | None = None
chunks = []
for chunk in ChatOpenAI(model="gpt-5-nano").stream(
"how are ya", response_format=Foo
):
chunks.append(chunk)
full = chunk if full is None else full + chunk
assert len(chunks) > 1
assert isinstance(full, AIMessageChunk)
parsed = full.additional_kwargs["parsed"]
assert isinstance(parsed, Foo)
assert isinstance(full.content, str)
parsed_content = json.loads(full.content)
assert parsed.response == parsed_content["response"]
async def test_astream_response_format() -> None:
full: BaseMessageChunk | None = None
chunks = []
async for chunk in ChatOpenAI(model="gpt-5-nano").astream(
"how are ya", response_format=Foo
):
chunks.append(chunk)
full = chunk if full is None else full + chunk
assert len(chunks) > 1
assert isinstance(full, AIMessageChunk)
parsed = full.additional_kwargs["parsed"]
assert isinstance(parsed, Foo)
assert isinstance(full.content, str)
parsed_content = json.loads(full.content)
assert parsed.response == parsed_content["response"]
@pytest.mark.parametrize("use_responses_api", [False, True])
@pytest.mark.parametrize("use_max_completion_tokens", [True, False])
def test_o1(use_max_completion_tokens: bool, use_responses_api: bool) -> None:
# o1 models need higher token limits for reasoning
o1_token_limit = 1000
if use_max_completion_tokens:
kwargs: dict = {"max_completion_tokens": o1_token_limit}
else:
kwargs = {"max_tokens": o1_token_limit}
response = ChatOpenAI(
model="o1",
reasoning_effort="low",
use_responses_api=use_responses_api,
**kwargs,
).invoke(
[
{"role": "developer", "content": "respond in all caps"},
{"role": "user", "content": "HOW ARE YOU"},
]
)
assert isinstance(response, AIMessage)
assert isinstance(response.text, str)
assert response.text.upper() == response.text
@pytest.mark.scheduled
def test_o1_stream_default_works() -> None:
result = list(ChatOpenAI(model="o1").stream("say 'hi'"))
assert len(result) > 0
@pytest.mark.flaky(retries=3, delay=1)
def test_multi_party_conversation() -> None:
llm = ChatOpenAI(model="gpt-5-nano")
messages = [
HumanMessage("Hi, I have black hair.", name="Alice"),
HumanMessage("Hi, I have brown hair.", name="Bob"),
HumanMessage("Who just spoke?", name="Charlie"),
]
response = llm.invoke(messages)
assert "Bob" in response.content
| Foo |
python | huggingface__transformers | src/transformers/models/nystromformer/modeling_nystromformer.py | {
"start": 36277,
"end": 39718
} | class ____(NystromformerPreTrainedModel):
def __init__(self, config):
super().__init__(config)
config.num_labels = 2
self.num_labels = config.num_labels
self.nystromformer = NystromformerModel(config)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
start_positions: Optional[torch.LongTensor] = None,
end_positions: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple[torch.Tensor], QuestionAnsweringModelOutput]:
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.nystromformer(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions = start_positions.clamp(0, ignored_index)
end_positions = end_positions.clamp(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + outputs[1:]
return ((total_loss,) + output) if total_loss is not None else output
return QuestionAnsweringModelOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
__all__ = [
"NystromformerForMaskedLM",
"NystromformerForMultipleChoice",
"NystromformerForQuestionAnswering",
"NystromformerForSequenceClassification",
"NystromformerForTokenClassification",
"NystromformerLayer",
"NystromformerModel",
"NystromformerPreTrainedModel",
]
| NystromformerForQuestionAnswering |
python | ray-project__ray | python/ray/data/tests/unit/test_datatype.py | {
"start": 26672,
"end": 29988
} | class ____:
"""Test that pattern-matching DataTypes can be used as arguments to factory methods."""
@pytest.mark.parametrize(
"factory_call,expected_logical_dtype",
[
# list with pattern-matching element type
(lambda: DataType.list(DataType.list()), "list"),
(lambda: DataType.list(DataType.struct()), "list"),
(lambda: DataType.list(DataType.map()), "list"),
# large_list with pattern-matching element type
(lambda: DataType.large_list(DataType.list()), "large_list"),
(lambda: DataType.large_list(DataType.tensor()), "large_list"),
# struct with pattern-matching field types
(
lambda: DataType.struct(
[("a", DataType.list()), ("b", DataType.int64())]
),
"struct",
),
(
lambda: DataType.struct(
[("x", DataType.tensor()), ("y", DataType.map())]
),
"struct",
),
# map with pattern-matching key/value types
(lambda: DataType.map(DataType.list(), DataType.int64()), "map"),
(lambda: DataType.map(DataType.string(), DataType.struct()), "map"),
(lambda: DataType.map(DataType.list(), DataType.map()), "map"),
# tensor with pattern-matching dtype
(lambda: DataType.tensor((3, 4), DataType.list()), "tensor"),
(lambda: DataType.tensor((2, 2), DataType.struct()), "tensor"),
# variable_shaped_tensor with pattern-matching dtype
(
lambda: DataType.variable_shaped_tensor(DataType.list(), ndim=2),
"tensor",
),
(lambda: DataType.variable_shaped_tensor(DataType.map(), ndim=3), "tensor"),
],
)
def test_nested_pattern_matching_types(self, factory_call, expected_logical_dtype):
"""Test that pattern-matching DataTypes work as arguments to factory methods.
When a pattern-matching DataType (one with _physical_dtype=None) is passed
as an argument to another factory method, it should result in a pattern-matching
type, not try to call to_arrow_dtype() on it.
"""
from ray.data.datatype import _LogicalDataType
dt = factory_call()
# Should create a pattern-matching type, not a concrete type
assert dt._physical_dtype is None
assert dt._logical_dtype == _LogicalDataType(expected_logical_dtype)
def test_list_with_nested_pattern(self):
"""Test DataType.list(DataType.list()) returns pattern-matching LIST."""
from ray.data.datatype import _LogicalDataType
dt = DataType.list(DataType.list())
assert dt._physical_dtype is None
assert dt._logical_dtype == _LogicalDataType.LIST
assert repr(dt) == "DataType(logical_dtype:LIST)"
def test_struct_with_pattern_fields(self):
"""Test DataType.struct with pattern-matching field types."""
from ray.data.datatype import _LogicalDataType
dt = DataType.struct([("a", DataType.list()), ("b", DataType.tensor())])
assert dt._physical_dtype is None
assert dt._logical_dtype == _LogicalDataType.STRUCT
| TestNestedPatternMatching |
python | pandas-dev__pandas | pandas/tests/frame/methods/test_sort_values.py | {
"start": 22211,
"end": 29246
} | class ____: # test key sorting (issue 27237)
def test_sort_values_inplace_key(self, sort_by_key):
frame = DataFrame(
np.random.default_rng(2).standard_normal((4, 4)),
index=[1, 2, 3, 4],
columns=["A", "B", "C", "D"],
)
sorted_df = frame.copy()
return_value = sorted_df.sort_values(by="A", inplace=True, key=sort_by_key)
assert return_value is None
expected = frame.sort_values(by="A", key=sort_by_key)
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
return_value = sorted_df.sort_values(
by=1, axis=1, inplace=True, key=sort_by_key
)
assert return_value is None
expected = frame.sort_values(by=1, axis=1, key=sort_by_key)
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
return_value = sorted_df.sort_values(
by="A", ascending=False, inplace=True, key=sort_by_key
)
assert return_value is None
expected = frame.sort_values(by="A", ascending=False, key=sort_by_key)
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
sorted_df.sort_values(
by=["A", "B"], ascending=False, inplace=True, key=sort_by_key
)
expected = frame.sort_values(by=["A", "B"], ascending=False, key=sort_by_key)
tm.assert_frame_equal(sorted_df, expected)
def test_sort_values_key(self):
df = DataFrame(np.array([0, 5, np.nan, 3, 2, np.nan]))
result = df.sort_values(0)
expected = df.iloc[[0, 4, 3, 1, 2, 5]]
tm.assert_frame_equal(result, expected)
result = df.sort_values(0, key=lambda x: x + 5)
expected = df.iloc[[0, 4, 3, 1, 2, 5]]
tm.assert_frame_equal(result, expected)
result = df.sort_values(0, key=lambda x: -x, ascending=False)
expected = df.iloc[[0, 4, 3, 1, 2, 5]]
tm.assert_frame_equal(result, expected)
def test_sort_values_by_key(self):
df = DataFrame(
{
"a": np.array([0, 3, np.nan, 3, 2, np.nan]),
"b": np.array([0, 2, np.nan, 5, 2, np.nan]),
}
)
result = df.sort_values("a", key=lambda x: -x)
expected = df.iloc[[1, 3, 4, 0, 2, 5]]
tm.assert_frame_equal(result, expected)
result = df.sort_values(by=["a", "b"], key=lambda x: -x)
expected = df.iloc[[3, 1, 4, 0, 2, 5]]
tm.assert_frame_equal(result, expected)
result = df.sort_values(by=["a", "b"], key=lambda x: -x, ascending=False)
expected = df.iloc[[0, 4, 1, 3, 2, 5]]
tm.assert_frame_equal(result, expected)
def test_sort_values_by_key_by_name(self):
df = DataFrame(
{
"a": np.array([0, 3, np.nan, 3, 2, np.nan]),
"b": np.array([0, 2, np.nan, 5, 2, np.nan]),
}
)
def key(col):
if col.name == "a":
return -col
else:
return col
result = df.sort_values(by="a", key=key)
expected = df.iloc[[1, 3, 4, 0, 2, 5]]
tm.assert_frame_equal(result, expected)
result = df.sort_values(by=["a"], key=key)
expected = df.iloc[[1, 3, 4, 0, 2, 5]]
tm.assert_frame_equal(result, expected)
result = df.sort_values(by="b", key=key)
expected = df.iloc[[0, 1, 4, 3, 2, 5]]
tm.assert_frame_equal(result, expected)
result = df.sort_values(by=["a", "b"], key=key)
expected = df.iloc[[1, 3, 4, 0, 2, 5]]
tm.assert_frame_equal(result, expected)
def test_sort_values_key_string(self):
df = DataFrame(np.array([["hello", "goodbye"], ["hello", "Hello"]]))
result = df.sort_values(1)
expected = df[::-1]
tm.assert_frame_equal(result, expected)
result = df.sort_values([0, 1], key=lambda col: col.str.lower())
tm.assert_frame_equal(result, df)
result = df.sort_values(
[0, 1], key=lambda col: col.str.lower(), ascending=False
)
expected = df.sort_values(1, key=lambda col: col.str.lower(), ascending=False)
tm.assert_frame_equal(result, expected)
def test_sort_values_key_empty(self, sort_by_key):
df = DataFrame(np.array([]))
df.sort_values(0, key=sort_by_key)
df.sort_index(key=sort_by_key)
def test_changes_length_raises(self):
df = DataFrame({"A": [1, 2, 3]})
with pytest.raises(ValueError, match="change the shape"):
df.sort_values("A", key=lambda x: x[:1])
def test_sort_values_key_axes(self):
df = DataFrame({0: ["Hello", "goodbye"], 1: [0, 1]})
result = df.sort_values(0, key=lambda col: col.str.lower())
expected = df[::-1]
tm.assert_frame_equal(result, expected)
result = df.sort_values(1, key=lambda col: -col)
expected = df[::-1]
tm.assert_frame_equal(result, expected)
def test_sort_values_key_dict_axis(self):
df = DataFrame({0: ["Hello", 0], 1: ["goodbye", 1]})
result = df.sort_values(0, key=lambda col: col.str.lower(), axis=1)
expected = df.loc[:, ::-1]
tm.assert_frame_equal(result, expected)
result = df.sort_values(1, key=lambda col: -col, axis=1)
expected = df.loc[:, ::-1]
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("ordered", [True, False])
def test_sort_values_key_casts_to_categorical(self, ordered):
# https://github.com/pandas-dev/pandas/issues/36383
categories = ["c", "b", "a"]
df = DataFrame({"x": [1, 1, 1], "y": ["a", "b", "c"]})
def sorter(key):
if key.name == "y":
return pd.Series(
Categorical(key, categories=categories, ordered=ordered)
)
return key
result = df.sort_values(by=["x", "y"], key=sorter)
expected = DataFrame(
{"x": [1, 1, 1], "y": ["c", "b", "a"]}, index=pd.Index([2, 1, 0])
)
tm.assert_frame_equal(result, expected)
@pytest.fixture
def df_none():
return DataFrame(
{
"outer": ["a", "a", "a", "b", "b", "b"],
"inner": [1, 2, 2, 2, 1, 1],
"A": np.arange(6, 0, -1),
("B", 5): ["one", "one", "two", "two", "one", "one"],
}
)
@pytest.fixture(params=[["outer"], ["outer", "inner"]])
def df_idx(request, df_none):
levels = request.param
return df_none.set_index(levels)
@pytest.fixture(
params=[
"inner", # index level
["outer"], # list of index level
"A", # column
[("B", 5)], # list of column
["inner", "outer"], # two index levels
[("B", 5), "outer"], # index level and column
["A", ("B", 5)], # Two columns
["inner", "outer"], # two index levels and column
]
)
def sort_names(request):
return request.param
| TestDataFrameSortKey |
python | squidfunk__mkdocs-material | includes/debug/cairo-lookup-linux.py | {
"start": 83,
"end": 3491
} | class ____(subprocess.Popen):
def __init__(self, *args, **kwargs):
print(f"Subprocess command:\n {' '.join(args[0])}")
super().__init__(*args, **kwargs)
def communicate(self, *args, **kwargs):
out, _ = super().communicate(*args, **kwargs)
out = out.rstrip()
print("Subprocess output:")
if out:
print(f" {os.fsdecode(out)}")
else:
print(f" Output is empty")
return out, _
def __getattribute__(self, name_):
att = super().__getattribute__(name_)
if name_ == "stdout" and att is not None:
att.read = self.read_wrapper(att.read)
return att
@staticmethod
def read_wrapper(func):
if func.__name__ == "wrapper":
return func
def wrapper(*args, **kwargs):
output = func(*args, **kwargs)
print("Subprocess output:")
for line_ in os.fsdecode(output).split("\n"):
print(line_)
return output
return wrapper
subprocess.Popen = CustomPopen
print("ctypes.util script with the find_library:")
print(inspect.getsourcefile(util.find_library), end="\n\n")
print("find_library function:")
func_lines = list(map(str.rstrip, inspect.getsourcelines(util.find_library)[0]))
indent = len(func_lines[0]) - len(func_lines[0].lstrip())
for line in func_lines:
print(line.replace(" " * indent, "", 1))
library_names = ("cairo-2", "cairo", "libcairo-2")
filenames = ("libcairo.so.2", "libcairo.2.dylib", "libcairo-2.dll")
c_compiler = shutil.which("gcc") or shutil.which("cc")
ld_env = os.environ.get("LD_LIBRARY_PATH")
first_found = ""
print("\nLD_LIBRARY_PATH =", ld_env, end="\n\n")
for name in library_names:
if hasattr(util, "_findSoname_ldconfig"):
result = util._findSoname_ldconfig(name)
print(f"_findSoname_ldconfig({name}) ->", result)
if result:
print(f"Found {result}")
if not first_found:
first_found = result
print("---")
if c_compiler and hasattr(util, "_findLib_gcc"):
result = util._findLib_gcc(name)
print(f"_findLib_gcc({name}) ->", result)
if result and hasattr(util, "_get_soname"):
result = util._get_soname(result)
if result:
print(f"Found {result}")
if not first_found:
first_found = result
print("---")
if hasattr(util, "_findLib_ld"):
result = util._findLib_ld(name)
print(f"_findLib_ld({name}) ->", result)
if result and hasattr(util, "_get_soname"):
result = util._get_soname(result)
if result:
print(f"Found {result}")
if not first_found:
first_found = result
print("---")
if hasattr(util, "_findLib_crle"):
result = util._findLib_crle(name, False)
print(f"_findLib_crle({name}) ->", result)
if result and hasattr(util, "_get_soname"):
result = util._get_soname(result)
if result:
print(f"Found {result}")
if not first_found:
first_found = result
print("---")
if first_found:
filenames = (first_found,) + filenames
print(f"The path is {first_found or 'not found'}")
print("List of files that FFI will try to load:")
for filename in filenames:
print("-", filename)
| CustomPopen |
python | pytorch__pytorch | test/test_custom_ops.py | {
"start": 158988,
"end": 159830
} | class ____(CustomOpTestCaseBase):
test_ns = "mini_op_test"
def test_nonzero_again(self):
x = torch.tensor([0, 1, 2, 0, 0])
y = torch.ops.aten.nonzero.default(x)
self.assertEqual(y, torch.tensor([[1], [2]]))
optests.generate_opcheck_tests(
MiniOpTest,
["aten", "mini_op_test"],
get_file_path_2(os.path.dirname(__file__), "minioptest_failures_dict.json"),
additional_decorators={
"test_pt2_compliant_tag_mini_op_test_no_abstract": [unittest.expectedFailure]
},
test_utils=optests.generate_tests.DEPRECATED_DEFAULT_TEST_UTILS,
)
optests.generate_opcheck_tests(
MiniOpTestOther,
["aten", "mini_op_test"],
get_file_path_2(os.path.dirname(__file__), "minioptest_failures_dict.json"),
test_utils=optests.generate_tests.DEPRECATED_DEFAULT_TEST_UTILS,
)
| MiniOpTestOther |
python | kamyu104__LeetCode-Solutions | Python/shuffle-string.py | {
"start": 600,
"end": 898
} | class ____(object):
def restoreString(self, s, indices):
"""
:type s: str
:type indices: List[int]
:rtype: str
"""
result = ['']*len(s)
for i, c in itertools.izip(indices, s):
result[i] = c
return "".join(result)
| Solution2 |
python | modin-project__modin | modin/config/envvars.py | {
"start": 41492,
"end": 41874
} | class ____(EnvironmentVariable, type=int):
"""
Targeted max number of dataframe rows which should be transferred between engines.
This is often the same value as MODIN_NATIVE_MAX_ROWS but it can be independently
set to change how transfer costs are considered.
"""
varname = "MODIN_NATIVE_MAX_XFER_ROWS"
default = 10_000_000
| NativePandasTransferThreshold |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 495175,
"end": 496074
} | class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("has_pinned_items", "items")
has_pinned_items = sgqlc.types.Field(
sgqlc.types.non_null(Boolean), graphql_name="hasPinnedItems"
)
items = sgqlc.types.Field(
sgqlc.types.non_null(PinnableItemConnection),
graphql_name="items",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
(
"before",
sgqlc.types.Arg(String, graphql_name="before", default=None),
),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
| ProfileItemShowcase |
python | crytic__slither | slither/detectors/statements/chronicle_unchecked_price.py | {
"start": 425,
"end": 6172
} | class ____(AbstractDetector):
"""
Documentation: This detector finds calls to Chronicle oracle where the returned price is not checked
https://docs.chroniclelabs.org/Resources/FAQ/Oracles#how-do-i-check-if-an-oracle-becomes-inactive-gets-deprecated
"""
ARGUMENT = "chronicle-unchecked-price"
HELP = "Detect when Chronicle price is not checked."
IMPACT = DetectorClassification.MEDIUM
CONFIDENCE = DetectorClassification.MEDIUM
WIKI = "https://github.com/crytic/slither/wiki/Detector-Documentation#chronicle-unchecked-price"
WIKI_TITLE = "Chronicle unchecked price"
WIKI_DESCRIPTION = "Chronicle oracle is used and the price returned is not checked to be valid. For more information https://docs.chroniclelabs.org/Resources/FAQ/Oracles#how-do-i-check-if-an-oracle-becomes-inactive-gets-deprecated."
# region wiki_exploit_scenario
WIKI_EXPLOIT_SCENARIO = """
```solidity
contract C {
IChronicle chronicle;
constructor(address a) {
chronicle = IChronicle(a);
}
function bad() public {
uint256 price = chronicle.read();
}
```
The `bad` function gets the price from Chronicle by calling the read function however it does not check if the price is valid."""
# endregion wiki_exploit_scenario
WIKI_RECOMMENDATION = "Validate that the price returned by the oracle is valid."
def _var_is_checked(self, nodes: List[Node], var_to_check: Variable) -> bool:
visited = set()
checked = False
while nodes:
if checked:
break
next_node = nodes[0]
nodes = nodes[1:]
for node_ir in next_node.all_slithir_operations():
if isinstance(node_ir, Binary) and var_to_check in node_ir.read:
checked = True
break
# This case is for tryRead and tryReadWithAge
# if the isValid boolean is checked inside a require(isValid)
if (
isinstance(node_ir, SolidityCall)
and node_ir.function
in (
SolidityFunction("require(bool)"),
SolidityFunction("require(bool,string)"),
SolidityFunction("require(bool,error)"),
)
and var_to_check in node_ir.read
):
checked = True
break
if next_node not in visited:
visited.add(next_node)
for son in next_node.sons:
if son not in visited:
nodes.append(son)
return checked
# pylint: disable=too-many-nested-blocks,too-many-branches
def _detect(self) -> List[Output]:
results: List[Output] = []
for contract in self.compilation_unit.contracts_derived:
for target_contract, ir in sorted(
contract.all_high_level_calls,
key=lambda x: (x[1].node.node_id, x[1].node.function.full_name),
):
if target_contract.name in ("IScribe", "IChronicle") and ir.function_name in (
"read",
"tryRead",
"readWithAge",
"tryReadWithAge",
"latestAnswer",
"latestRoundData",
):
found = False
if ir.function_name in ("read", "latestAnswer"):
# We need to iterate the IRs as we are not always sure that the following IR is the assignment
# for example in case of type conversion it isn't
for node_ir in ir.node.irs:
if isinstance(node_ir, Assignment):
possible_unchecked_variable_ir = node_ir.lvalue
found = True
break
elif ir.function_name in ("readWithAge", "tryRead", "tryReadWithAge"):
# We are interested in the first item of the tuple
# readWithAge : value
# tryRead/tryReadWithAge : isValid
for node_ir in ir.node.irs:
if isinstance(node_ir, Unpack) and node_ir.index == 0:
possible_unchecked_variable_ir = node_ir.lvalue
found = True
break
elif ir.function_name == "latestRoundData":
found = False
for node_ir in ir.node.irs:
if isinstance(node_ir, Unpack) and node_ir.index == 1:
possible_unchecked_variable_ir = node_ir.lvalue
found = True
break
# If we did not find the variable assignment we know it's not checked
checked = (
self._var_is_checked(ir.node.sons, possible_unchecked_variable_ir)
if found
else False
)
if not checked:
info: DETECTOR_INFO = [
"Chronicle price is not checked to be valid in ",
ir.node.function,
"\n\t- ",
ir.node,
"\n",
]
res = self.generate_result(info)
results.append(res)
return results
| ChronicleUncheckedPrice |
python | walkccc__LeetCode | solutions/1996. The Number of Weak Characters in the Game/1996.py | {
"start": 0,
"end": 398
} | class ____:
def numberOfWeakCharacters(self, properties: list[list[int]]) -> int:
ans = 0
maxDefense = 0
# Sort properties by `attack` in descending order, then by `defense` in
# ascending order.
for _, defense in sorted(properties, key=lambda x: (-x[0], x[1])):
if defense < maxDefense:
ans += 1
maxDefense = max(maxDefense, defense)
return ans
| Solution |
python | huggingface__transformers | src/transformers/models/xlm/modeling_xlm.py | {
"start": 57628,
"end": 62985
} | class ____(XLMPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.transformer = XLMModel(config)
self.qa_outputs = XLMSQuADHead(config)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
langs: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
lengths: Optional[torch.Tensor] = None,
cache: Optional[dict[str, torch.Tensor]] = None,
inputs_embeds: Optional[torch.Tensor] = None,
start_positions: Optional[torch.Tensor] = None,
end_positions: Optional[torch.Tensor] = None,
is_impossible: Optional[torch.Tensor] = None,
cls_index: Optional[torch.Tensor] = None,
p_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, XLMForQuestionAnsweringOutput]:
r"""
langs (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
A parallel sequence of tokens to be used to indicate the language of each token in the input. Indices are
languages ids which can be obtained from the language names by using two conversion mappings provided in
the configuration of the model (only provided for multilingual models). More precisely, the *language name
to language id* mapping is in `model.config.lang2id` (which is a dictionary string to int) and the
*language id to language name* mapping is in `model.config.id2lang` (dictionary int to string).
See usage examples detailed in the [multilingual documentation](../multilingual).
lengths (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Length of each sentence that can be used to avoid performing attention on padding token indices. You can
also use *attention_mask* for the same result (see above), kept here for compatibility. Indices selected in
`[0, ..., input_ids.size(-1)]`.
cache (`dict[str, torch.FloatTensor]`, *optional*):
Instance of `EncoderDecoderCache` that contains precomputed KV states. Can be used to speed up sequential
decoding.
is_impossible (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels whether a question has an answer or no answer (SQuAD 2.0)
cls_index (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the classification token to use as input for computing plausibility of the
answer.
p_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Optional mask of tokens which can't be in answers (e.g. [CLS], [PAD], ...). 1.0 means token should be
masked. 0.0 mean token is not masked.
Example:
```python
>>> from transformers import AutoTokenizer, XLMForQuestionAnswering
>>> import torch
>>> tokenizer = AutoTokenizer.from_pretrained("FacebookAI/xlm-mlm-en-2048")
>>> model = XLMForQuestionAnswering.from_pretrained("FacebookAI/xlm-mlm-en-2048")
>>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(
... 0
... ) # Batch size 1
>>> start_positions = torch.tensor([1])
>>> end_positions = torch.tensor([3])
>>> outputs = model(input_ids, start_positions=start_positions, end_positions=end_positions)
>>> loss = outputs.loss
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
langs=langs,
token_type_ids=token_type_ids,
position_ids=position_ids,
lengths=lengths,
cache=cache,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
output = transformer_outputs[0]
outputs = self.qa_outputs(
output,
start_positions=start_positions,
end_positions=end_positions,
cls_index=cls_index,
is_impossible=is_impossible,
p_mask=p_mask,
return_dict=return_dict,
)
if not return_dict:
return outputs + transformer_outputs[1:]
return XLMForQuestionAnsweringOutput(
loss=outputs.loss,
start_top_log_probs=outputs.start_top_log_probs,
start_top_index=outputs.start_top_index,
end_top_log_probs=outputs.end_top_log_probs,
end_top_index=outputs.end_top_index,
cls_logits=outputs.cls_logits,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
@auto_docstring
| XLMForQuestionAnswering |
python | huggingface__transformers | src/transformers/models/hubert/modular_hubert.py | {
"start": 11573,
"end": 11760
} | class ____(Wav2Vec2ForSequenceClassification):
pass
__all__ = ["HubertForCTC", "HubertForSequenceClassification", "HubertModel", "HubertPreTrainedModel"]
| HubertForSequenceClassification |
python | pytorch__pytorch | torch/distributed/_composable/replicate_with_fsdp.py | {
"start": 10193,
"end": 14288
} | class ____(FSDPModule):
def __new__(cls, *args, **kwargs):
"""
Override ``__new__`` to remove the FSDP class and directly construct
the original class for cases like indexing into a container module.
"""
# Use index 2 since 0 is the dynamically constructed `FSDP<...>` class
# and index 1 is the `FSDPModule` class itself
orig_cls = cls.__mro__[3]
self = orig_cls.__new__(orig_cls, *args, **kwargs)
self.__init__(*args, **kwargs)
return self
def _get_managed_modules(
root_modules: tuple[nn.Module, ...],
ignored_params: Optional[set[nn.Parameter]] = None,
) -> list[nn.Module]:
modules: list[nn.Module] = []
root_modules_set = set(root_modules)
# Track visisted modules to avoid visiting shared modules multiple times
visited_modules: set[nn.Module] = set()
def dfs(module: nn.Module) -> None:
"""
Runs a DFS to collect managed modules, not recursing into modules with
a non-composable API or ``replicate`` already applied.
"""
if not is_composable_with_replicate(module):
return
elif (
module not in root_modules_set
and _get_module_replicate_state(module) is not None
):
return # nested `fully_shard` module
visited_modules.add(module)
for submodule in module.children():
if submodule not in visited_modules:
dfs(submodule)
modules.append(module)
for root_module in root_modules:
dfs(root_module)
if ignored_params is None:
return modules
adjusted_modules = _adjust_managed_modules(modules, ignored_params)
return adjusted_modules
def is_composable_with_replicate(module: nn.Module) -> bool:
"""Checks if replicate can be applied with module"""
registry = _get_registry(module)
if registry is None:
return True
# Registry keys by function name
return "fully_shard" not in registry
def replicate_mesh():
"""Creates a device mesh for replicate if the user doesn't provide one"""
if not dist.distributed_c10d.is_initialized():
dist.distributed_c10d.init_process_group()
default_pg = dist.distributed_c10d._get_default_group()
device = torch._C._get_accelerator()
mesh = init_device_mesh(
device.type,
mesh_shape=(default_pg.size(),),
mesh_dim_names=("replicate",),
)
return mesh
def _adjust_managed_modules(
modules: list[nn.Module], ignored_params: set[nn.Parameter]
) -> list[nn.Module]:
"""
Adjust the given list of managed modules by removing those with all parameters ignored.
"""
ignore_decision: dict[nn.Module, bool] = {}
new_modules = []
for module in modules:
ignored = _ignore_module(module, ignored_params, ignore_decision)
if not ignored:
new_modules.append(module)
return new_modules
def _ignore_module(
module: nn.Module,
ignored_params: set[nn.Parameter],
ignore_decision: dict[nn.Module, bool],
) -> bool:
"""
Decide if it is safe to ignore a module for applying replicate.
"""
if module in ignore_decision:
return ignore_decision[module]
if len(list(module.buffers(recurse=False))) > 0:
# Cannot ignore a module with any buffer
ignore_decision[module] = False
return False
for _, param in module.named_parameters(recurse=False):
if param not in ignored_params:
# at least one param is not ignored. So this module shouldn't be.
ignore_decision[module] = False
return False
# Need to consider descendants of module
for child in list(module.children()):
ignore_child = _ignore_module(child, ignored_params, ignore_decision)
if not ignore_child:
# Cannot ignore module if one of its children is not ignored
ignore_decision[module] = False
return False
# Safe to ignore module
ignore_decision[module] = True
return True
| ReplicateModule |
python | getsentry__sentry | tests/sentry/integrations/jira_server/test_utils.py | {
"start": 244,
"end": 539
} | class ____(TestCase):
def test_jira_server(self) -> None:
user_response = StubService.get_stub_data("jira", "jira_server_user.json")
assert build_user_choice(user_response, "name") == (
"bob",
"Bobby - bob@example.org (bob)",
)
| BuildUserChoiceTest |
python | PyCQA__pylint | tests/functional/s/super/super_init_not_called.py | {
"start": 501,
"end": 776
} | class ____(UninferableParent): # [undefined-variable]
"""An implementation that test if we don't crash on uninferable parents."""
def __init__(self):
...
# Tests for not calling the init of a parent that does not define one
# but inherits it.
| UninferableChild |
python | jmcnamara__XlsxWriter | xlsxwriter/test/worksheet/test_range_return_values.py | {
"start": 277,
"end": 3296
} | class ____(unittest.TestCase):
"""
Test the return value for various functions that handle 1 or 2D ranges.
"""
def test_range_return_values(self):
"""Test writing a worksheet with data out of bounds."""
worksheet = Worksheet()
max_row = 1048576
max_col = 16384
bound_error = -1
# Test some out of bound values.
got = worksheet.write_string(max_row, 0, "Foo")
self.assertEqual(got, bound_error)
got = worksheet.write_string(0, max_col, "Foo")
self.assertEqual(got, bound_error)
got = worksheet.write_string(max_row, max_col, "Foo")
self.assertEqual(got, bound_error)
got = worksheet.write_number(max_row, 0, 123)
self.assertEqual(got, bound_error)
got = worksheet.write_number(0, max_col, 123)
self.assertEqual(got, bound_error)
got = worksheet.write_number(max_row, max_col, 123)
self.assertEqual(got, bound_error)
got = worksheet.write_blank(max_row, 0, None, "format")
self.assertEqual(got, bound_error)
got = worksheet.write_blank(0, max_col, None, "format")
self.assertEqual(got, bound_error)
got = worksheet.write_blank(max_row, max_col, None, "format")
self.assertEqual(got, bound_error)
got = worksheet.write_formula(max_row, 0, "=A1")
self.assertEqual(got, bound_error)
got = worksheet.write_formula(0, max_col, "=A1")
self.assertEqual(got, bound_error)
got = worksheet.write_formula(max_row, max_col, "=A1")
self.assertEqual(got, bound_error)
got = worksheet.write_array_formula(0, 0, 0, max_col, "=A1")
self.assertEqual(got, bound_error)
got = worksheet.write_array_formula(0, 0, max_row, 0, "=A1")
self.assertEqual(got, bound_error)
got = worksheet.write_array_formula(0, max_col, 0, 0, "=A1")
self.assertEqual(got, bound_error)
got = worksheet.write_array_formula(max_row, 0, 0, 0, "=A1")
self.assertEqual(got, bound_error)
got = worksheet.write_array_formula(max_row, max_col, max_row, max_col, "=A1")
self.assertEqual(got, bound_error)
got = worksheet.merge_range(0, 0, 0, max_col, "Foo")
self.assertEqual(got, bound_error)
got = worksheet.merge_range(0, 0, max_row, 0, "Foo")
self.assertEqual(got, bound_error)
got = worksheet.merge_range(0, max_col, 0, 0, "Foo")
self.assertEqual(got, bound_error)
got = worksheet.merge_range(max_row, 0, 0, 0, "Foo")
self.assertEqual(got, bound_error)
# Column out of bounds.
got = worksheet.set_column(6, max_col, 17)
self.assertEqual(got, bound_error)
got = worksheet.set_column(max_col, 6, 17)
self.assertEqual(got, bound_error)
# Row out of bounds.
worksheet.set_row(max_row, 30)
# Reverse man and min column numbers
worksheet.set_column(0, 3, 17)
| TestRangeReturnValues |
python | apache__airflow | providers/docker/tests/unit/docker/decorators/test_docker.py | {
"start": 1939,
"end": 15680
} | class ____:
@pytest.mark.db_test
def test_basic_docker_operator(self, dag_maker, session):
@task.docker(image="python:3.9-slim", auto_remove="force")
def f():
import random
return [random.random() for _ in range(100)]
with dag_maker(session=session):
f()
session.commit()
dr = dag_maker.create_dagrun(session=session)
session.expunge_all()
dag_maker.run_ti("f", dr)
ti = dr.get_task_instances(session=session)[0]
assert len(ti.xcom_pull()) == 100
@pytest.mark.db_test
def test_basic_docker_operator_with_param(self, dag_maker, session):
@task.docker(image="python:3.9-slim", auto_remove="force")
def f(num_results):
import random
return [random.random() for _ in range(num_results)]
with dag_maker(session=session):
f(50)
dr = dag_maker.create_dagrun(session=session)
session.expunge_all()
dag_maker.run_ti("f", dr)
ti = dr.get_task_instances(session=session)[0]
result = ti.xcom_pull(session=session)
assert isinstance(result, list)
assert len(result) == 50
@pytest.mark.db_test
def test_basic_docker_operator_with_template_fields(self, dag_maker):
from docker.types import Mount
@task.docker(
image="python:3.9-slim",
container_name="python_{{dag_run.dag_id}}",
auto_remove="force",
mounts=[Mount(source="workspace", target="/{{task_instance.run_id}}")],
)
def f():
raise RuntimeError("Should not executed")
with dag_maker():
ret = f()
dr = dag_maker.create_dagrun()
if AIRFLOW_V_3_0_PLUS:
ti = TaskInstance(task=ret.operator, run_id=dr.run_id, dag_version_id=dr.created_dag_version_id)
else:
ti = TaskInstance(task=ret.operator, run_id=dr.run_id)
rendered = ti.render_templates()
assert rendered.container_name == f"python_{dr.dag_id}"
assert rendered.mounts[0]["Target"] == f"/{ti.run_id}"
@pytest.mark.db_test
def test_basic_docker_operator_multiple_output(self, dag_maker, session):
@task.docker(image="python:3.9-slim", multiple_outputs=True, auto_remove="force")
def return_dict(number: int):
return {"number": number + 1, "43": 43}
test_number = 10
with dag_maker(session=session):
return_dict(test_number)
dr = dag_maker.create_dagrun(session=session)
session.expunge_all()
dag_maker.run_ti("return_dict", dr)
ti = dr.get_task_instances(session=session)[0]
assert ti.xcom_pull(key="number", session=session) == test_number + 1
assert ti.xcom_pull(key="43", session=session) == 43
assert ti.xcom_pull(session=session) == {"number": test_number + 1, "43": 43}
@pytest.mark.db_test
def test_no_return(self, dag_maker, session):
@task.docker(image="python:3.9-slim", auto_remove="force")
def f():
pass
with dag_maker(session=session):
f()
dr = dag_maker.create_dagrun(session=session)
session.expunge_all()
dag_maker.run_ti("f", dr)
ti = dr.get_task_instances(session=session)[0]
assert ti.xcom_pull(session=session) is None
def test_call_decorated_multiple_times(self):
"""Test calling decorated function 21 times in a DAG"""
@task.docker(image="python:3.9-slim", network_mode="bridge", api_version="auto", auto_remove="force")
def do_run():
return 4
with DAG("test", schedule=None, start_date=DEFAULT_DATE) as dag:
do_run()
for _ in range(20):
do_run()
assert len(dag.task_ids) == 21
assert dag.task_ids[-1] == "do_run__20"
@pytest.mark.db_test
@pytest.mark.parametrize(
("kwargs", "actual_exit_code", "expected_state"),
[
({}, 0, TaskInstanceState.SUCCESS),
({}, 100, TaskInstanceState.FAILED),
({}, 101, TaskInstanceState.FAILED),
({"skip_on_exit_code": None}, 0, TaskInstanceState.SUCCESS),
({"skip_on_exit_code": None}, 100, TaskInstanceState.FAILED),
({"skip_on_exit_code": None}, 101, TaskInstanceState.FAILED),
({"skip_on_exit_code": 100}, 0, TaskInstanceState.SUCCESS),
({"skip_on_exit_code": 100}, 100, TaskInstanceState.SKIPPED),
({"skip_on_exit_code": 100}, 101, TaskInstanceState.FAILED),
({"skip_on_exit_code": 0}, 0, TaskInstanceState.SKIPPED),
({"skip_on_exit_code": [100]}, 0, TaskInstanceState.SUCCESS),
({"skip_on_exit_code": [100]}, 100, TaskInstanceState.SKIPPED),
({"skip_on_exit_code": [100]}, 101, TaskInstanceState.FAILED),
({"skip_on_exit_code": [100, 102]}, 101, TaskInstanceState.FAILED),
({"skip_on_exit_code": (100,)}, 0, TaskInstanceState.SUCCESS),
({"skip_on_exit_code": (100,)}, 100, TaskInstanceState.SKIPPED),
({"skip_on_exit_code": (100,)}, 101, TaskInstanceState.FAILED),
],
)
def test_skip_docker_operator(self, kwargs, actual_exit_code, expected_state, dag_maker, session):
@task.docker(image="python:3.9-slim", auto_remove="force", **kwargs)
def f(exit_code):
raise SystemExit(exit_code)
with dag_maker(session=session):
f(actual_exit_code)
dr = dag_maker.create_dagrun(session=session)
session.expunge_all()
if expected_state == TaskInstanceState.FAILED:
with pytest.raises(AirflowException):
dag_maker.run_ti("f", dr)
else:
dag_maker.run_ti("f", dr)
ti = dr.get_task_instances(session=session)[0]
assert ti.state == expected_state
@pytest.mark.db_test
def test_setup_decorator_with_decorated_docker_task(self, dag_maker):
@setup
@task.docker(image="python:3.9-slim", auto_remove="force")
def f():
pass
with dag_maker() as dag:
f()
assert len(dag.task_group.children) == 1
setup_task = dag.task_group.children["f"]
assert setup_task.is_setup
@pytest.mark.db_test
def test_teardown_decorator_with_decorated_docker_task(self, dag_maker):
@teardown
@task.docker(image="python:3.9-slim", auto_remove="force")
def f():
pass
with dag_maker() as dag:
f()
assert len(dag.task_group.children) == 1
teardown_task = dag.task_group.children["f"]
assert teardown_task.is_teardown
@pytest.mark.db_test
@pytest.mark.parametrize("on_failure_fail_dagrun", [True, False])
def test_teardown_decorator_with_decorated_docker_task_and_on_failure_fail_arg(
self, dag_maker, on_failure_fail_dagrun
):
@teardown(on_failure_fail_dagrun=on_failure_fail_dagrun)
@task.docker(image="python:3.9-slim", auto_remove="force")
def f():
pass
with dag_maker() as dag:
f()
assert len(dag.task_group.children) == 1
teardown_task = dag.task_group.children["f"]
assert teardown_task.is_teardown
assert teardown_task.on_failure_fail_dagrun is on_failure_fail_dagrun
@pytest.mark.db_test
@pytest.mark.parametrize(
"serializer",
[
pytest.param("pickle", id="pickle"),
pytest.param("dill", marks=DILL_MARKER, id="dill"),
pytest.param("cloudpickle", marks=CLOUDPICKLE_MARKER, id="cloudpickle"),
pytest.param(None, id="default"),
],
)
def test_deepcopy_with_python_operator(self, dag_maker, serializer):
import copy
from airflow.providers.docker.decorators.docker import _DockerDecoratedOperator
@task.docker(image="python:3.9-slim", auto_remove="force", serializer=serializer)
def f():
import logging
logger = logging.getLogger("airflow.task")
logger.info("info log in docker")
@task.python()
def g():
import logging
logger = logging.getLogger("airflow.task")
logger.info("info log in python")
with dag_maker() as dag:
docker_task = f()
python_task = g()
_ = python_task >> docker_task
docker_operator = getattr(docker_task, "operator", None)
assert isinstance(docker_operator, _DockerDecoratedOperator)
task_id = docker_operator.task_id
assert isinstance(dag, DAG)
assert hasattr(dag, "task_dict")
assert isinstance(dag.task_dict, dict)
assert task_id in dag.task_dict
some_task = dag.task_dict[task_id]
clone_of_docker_operator = copy.deepcopy(docker_operator)
assert isinstance(some_task, _DockerDecoratedOperator)
assert isinstance(clone_of_docker_operator, _DockerDecoratedOperator)
assert some_task.command == clone_of_docker_operator.command
assert some_task.expect_airflow == clone_of_docker_operator.expect_airflow
assert some_task.serializer == clone_of_docker_operator.serializer
assert some_task.pickling_library is clone_of_docker_operator.pickling_library
@pytest.mark.db_test
def test_respect_docker_host_env(self, monkeypatch, dag_maker):
monkeypatch.setenv("DOCKER_HOST", "tcp://docker-host-from-env:2375")
@task.docker(image="python:3.9-slim", auto_remove="force")
def f():
pass
with dag_maker():
ret = f()
assert ret.operator.docker_url == "tcp://docker-host-from-env:2375"
@pytest.mark.db_test
def test_docker_host_env_empty(self, monkeypatch, dag_maker):
monkeypatch.setenv("DOCKER_HOST", "")
@task.docker(image="python:3.9-slim", auto_remove="force")
def f():
pass
with dag_maker():
ret = f()
# The docker CLI ignores the empty string and defaults to unix://var/run/docker.sock
# We want to ensure the same behavior.
assert ret.operator.docker_url == "unix://var/run/docker.sock"
@pytest.mark.db_test
def test_docker_host_env_unset(self, monkeypatch, dag_maker):
monkeypatch.delenv("DOCKER_HOST", raising=False)
@task.docker(image="python:3.9-slim", auto_remove="force")
def f():
pass
with dag_maker():
ret = f()
assert ret.operator.docker_url == "unix://var/run/docker.sock"
@pytest.mark.db_test
def test_failing_task(self, dag_maker, session, caplog):
"""Test regression #39319
Check the log content of the DockerOperator when the task fails.
"""
@task.docker(image="python:3.9-slim", auto_remove="force")
def f():
raise ValueError("This task is expected to fail")
with dag_maker(session=session):
f()
dr = dag_maker.create_dagrun(session=session)
session.expunge_all()
with pytest.raises(AirflowException):
dag_maker.run_ti("f", dr)
ti = dr.get_task_instances(session=session)[0]
assert ti.state == TaskInstanceState.FAILED
assert 'with open(sys.argv[4], "w") as file:' not in caplog.text
assert "ValueError: This task is expected to fail" in caplog.messages
@pytest.mark.db_test
def test_invalid_serializer(self, dag_maker):
@task.docker(image="python:3.9-slim", auto_remove="force", serializer="airflow")
def f():
"""Ensure dill is correctly installed."""
import dill # noqa: F401
with dag_maker():
with pytest.raises(AirflowException, match="Unsupported serializer 'airflow'"):
f()
@skip_if_force_lowest_dependencies_marker
@pytest.mark.parametrize(
"serializer",
[
pytest.param(
"dill",
marks=pytest.mark.skipif(
DILL_INSTALLED, reason="For this test case `dill` shouldn't be installed"
),
id="dill",
),
pytest.param(
"cloudpickle",
marks=pytest.mark.skipif(
CLOUDPICKLE_INSTALLED, reason="For this test case `cloudpickle` shouldn't be installed"
),
id="cloudpickle",
),
],
)
def test_advanced_serializer_not_installed(self, dag_maker, serializer, caplog):
"""Test case for check raising an error if dill/cloudpickle is not installed."""
@task.docker(image="python:3.9-slim", auto_remove="force", serializer=serializer)
def f(): ...
with dag_maker():
with pytest.raises(ModuleNotFoundError):
f()
assert f"Unable to import `{serializer}` module." in caplog.text
@pytest.mark.db_test
@CLOUDPICKLE_MARKER
def test_add_cloudpickle(self, dag_maker):
@task.docker(image="python:3.9-slim", auto_remove="force", serializer="cloudpickle")
def f():
"""Ensure cloudpickle is correctly installed."""
import cloudpickle # noqa: F401
with dag_maker():
f()
@pytest.mark.db_test
@DILL_MARKER
def test_add_dill(self, dag_maker):
@task.docker(image="python:3.9-slim", auto_remove="force", serializer="dill")
def f():
"""Ensure dill is correctly installed."""
import dill # noqa: F401
with dag_maker():
f()
| TestDockerDecorator |
python | chroma-core__chroma | chromadb/execution/expression/operator.py | {
"start": 18202,
"end": 18855
} | class ____:
document: bool = False
embedding: bool = False
metadata: bool = False
rank: bool = False
uri: bool = False
@property
def included(self) -> Include:
includes = list()
if self.document:
includes.append("documents")
if self.embedding:
includes.append("embeddings")
if self.metadata:
includes.append("metadatas")
if self.rank:
includes.append("distances")
if self.uri:
includes.append("uris")
return includes # type: ignore[return-value]
# Rank expression types for hybrid search
@dataclass
| Projection |
python | huggingface__transformers | src/transformers/models/m2m_100/modeling_m2m_100.py | {
"start": 45875,
"end": 51380
} | class ____(M2M100PreTrainedModel, GenerationMixin):
base_model_prefix = "model"
_tied_weights_keys = {"lm_head.weight": "model.shared.weight"}
def __init__(self, config: M2M100Config):
super().__init__(config)
self.model = M2M100Model(config)
self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
decoder_input_ids: Optional[torch.LongTensor] = None,
decoder_attention_mask: Optional[torch.LongTensor] = None,
encoder_outputs: Optional[tuple[tuple[torch.FloatTensor]]] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.Tensor] = None,
) -> Union[tuple[torch.Tensor], Seq2SeqLMOutput]:
r"""
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are decoder input IDs?](../glossary#decoder-input-ids)
M2M100 uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If
`past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
`past_key_values`).
decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example Translation:
```python
>>> from transformers import AutoTokenizer, M2M100ForConditionalGeneration
>>> model = M2M100ForConditionalGeneration.from_pretrained("facebook/m2m100_418M")
>>> tokenizer = AutoTokenizer.from_pretrained("facebook/m2m100_418M")
>>> text_to_translate = "Life is like a box of chocolates"
>>> model_inputs = tokenizer(text_to_translate, return_tensors="pt")
>>> # translate to French
>>> gen_tokens = model.generate(**model_inputs, forced_bos_token_id=tokenizer.get_lang_id("fr"))
>>> print(tokenizer.batch_decode(gen_tokens, skip_special_tokens=True))
```
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None:
if decoder_input_ids is None:
decoder_input_ids = shift_tokens_right(
labels, self.config.pad_token_id, self.config.decoder_start_token_id
)
outputs = self.model(
input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
encoder_outputs=encoder_outputs,
decoder_attention_mask=decoder_attention_mask,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
decoder_inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
cache_position=cache_position,
)
lm_logits = self.lm_head(outputs[0])
masked_lm_loss = None
if labels is not None:
# move labels to the correct device to enable PP
labels = labels.to(lm_logits.device)
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (lm_logits,) + outputs[1:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return Seq2SeqLMOutput(
loss=masked_lm_loss,
logits=lm_logits,
past_key_values=outputs.past_key_values,
decoder_hidden_states=outputs.decoder_hidden_states,
decoder_attentions=outputs.decoder_attentions,
cross_attentions=outputs.cross_attentions,
encoder_last_hidden_state=outputs.encoder_last_hidden_state,
encoder_hidden_states=outputs.encoder_hidden_states,
encoder_attentions=outputs.encoder_attentions,
)
__all__ = ["M2M100ForConditionalGeneration", "M2M100Model", "M2M100PreTrainedModel"]
| M2M100ForConditionalGeneration |
python | sdispater__pendulum | src/pendulum/_helpers.py | {
"start": 931,
"end": 8796
} | class ____(NamedTuple):
years: int
months: int
days: int
hours: int
minutes: int
seconds: int
microseconds: int
total_days: int
def __repr__(self) -> str:
return (
f"{self.years} years "
f"{self.months} months "
f"{self.days} days "
f"{self.hours} hours "
f"{self.minutes} minutes "
f"{self.seconds} seconds "
f"{self.microseconds} microseconds"
)
def is_leap(year: int) -> bool:
return year % 4 == 0 and (year % 100 != 0 or year % 400 == 0)
def is_long_year(year: int) -> bool:
def p(y: int) -> int:
return y + y // 4 - y // 100 + y // 400
return p(year) % 7 == 4 or p(year - 1) % 7 == 3
def week_day(year: int, month: int, day: int) -> int:
if month < 3:
year -= 1
w = (
year
+ year // 4
- year // 100
+ year // 400
+ DAY_OF_WEEK_TABLE[month - 1]
+ day
) % 7
if not w:
w = 7
return w
def days_in_year(year: int) -> int:
if is_leap(year):
return DAYS_PER_L_YEAR
return DAYS_PER_N_YEAR
def local_time(
unix_time: int, utc_offset: int, microseconds: int
) -> tuple[int, int, int, int, int, int, int]:
"""
Returns a UNIX time as a broken-down time
for a particular transition type.
"""
year = EPOCH_YEAR
seconds = math.floor(unix_time)
# Shift to a base year that is 400-year aligned.
if seconds >= 0:
seconds -= 10957 * SECS_PER_DAY
year += 30 # == 2000
else:
seconds += (146097 - 10957) * SECS_PER_DAY
year -= 370 # == 1600
seconds += utc_offset
# Handle years in chunks of 400/100/4/1
year += 400 * (seconds // SECS_PER_400_YEARS)
seconds %= SECS_PER_400_YEARS
if seconds < 0:
seconds += SECS_PER_400_YEARS
year -= 400
leap_year = 1 # 4-century aligned
sec_per_100years = SECS_PER_100_YEARS[leap_year]
while seconds >= sec_per_100years:
seconds -= sec_per_100years
year += 100
leap_year = 0 # 1-century, non 4-century aligned
sec_per_100years = SECS_PER_100_YEARS[leap_year]
sec_per_4years = SECS_PER_4_YEARS[leap_year]
while seconds >= sec_per_4years:
seconds -= sec_per_4years
year += 4
leap_year = 1 # 4-year, non century aligned
sec_per_4years = SECS_PER_4_YEARS[leap_year]
sec_per_year = SECS_PER_YEAR[leap_year]
while seconds >= sec_per_year:
seconds -= sec_per_year
year += 1
leap_year = 0 # non 4-year aligned
sec_per_year = SECS_PER_YEAR[leap_year]
# Handle months and days
month = TM_DECEMBER + 1
day = seconds // SECS_PER_DAY + 1
seconds %= SECS_PER_DAY
while month != TM_JANUARY + 1:
month_offset = MONTHS_OFFSETS[leap_year][month]
if day > month_offset:
day -= month_offset
break
month -= 1
# Handle hours, minutes, seconds and microseconds
hour, seconds = divmod(seconds, SECS_PER_HOUR)
minute, second = divmod(seconds, SECS_PER_MIN)
return year, month, day, hour, minute, second, microseconds
def precise_diff(
d1: datetime.datetime | datetime.date, d2: datetime.datetime | datetime.date
) -> PreciseDiff:
"""
Calculate a precise difference between two datetimes.
:param d1: The first datetime
:param d2: The second datetime
"""
sign = 1
if d1 == d2:
return PreciseDiff(0, 0, 0, 0, 0, 0, 0, 0)
tzinfo1: datetime.tzinfo | None = (
d1.tzinfo if isinstance(d1, datetime.datetime) else None
)
tzinfo2: datetime.tzinfo | None = (
d2.tzinfo if isinstance(d2, datetime.datetime) else None
)
if (tzinfo1 is None and tzinfo2 is not None) or (
tzinfo2 is None and tzinfo1 is not None
):
raise ValueError(
"Comparison between naive and aware datetimes is not supported"
)
if d1 > d2:
d1, d2 = d2, d1
sign = -1
d_diff = 0
hour_diff = 0
min_diff = 0
sec_diff = 0
mic_diff = 0
total_days = _day_number(d2.year, d2.month, d2.day) - _day_number(
d1.year, d1.month, d1.day
)
in_same_tz = False
tz1 = None
tz2 = None
# Trying to figure out the timezone names
# If we can't find them, we assume different timezones
if tzinfo1 and tzinfo2:
tz1 = _get_tzinfo_name(tzinfo1)
tz2 = _get_tzinfo_name(tzinfo2)
in_same_tz = tz1 == tz2 and tz1 is not None
if isinstance(d2, datetime.datetime):
if isinstance(d1, datetime.datetime):
# If we are not in the same timezone
# we need to adjust
#
# We also need to adjust if we do not
# have variable-length units
if not in_same_tz or total_days == 0:
offset1 = d1.utcoffset()
offset2 = d2.utcoffset()
if offset1:
d1 = d1 - offset1
if offset2:
d2 = d2 - offset2
hour_diff = d2.hour - d1.hour
min_diff = d2.minute - d1.minute
sec_diff = d2.second - d1.second
mic_diff = d2.microsecond - d1.microsecond
else:
hour_diff = d2.hour
min_diff = d2.minute
sec_diff = d2.second
mic_diff = d2.microsecond
if mic_diff < 0:
mic_diff += 1000000
sec_diff -= 1
if sec_diff < 0:
sec_diff += 60
min_diff -= 1
if min_diff < 0:
min_diff += 60
hour_diff -= 1
if hour_diff < 0:
hour_diff += 24
d_diff -= 1
y_diff = d2.year - d1.year
m_diff = d2.month - d1.month
d_diff += d2.day - d1.day
if d_diff < 0:
year = d2.year
month = d2.month
if month == 1:
month = 12
year -= 1
else:
month -= 1
leap = int(is_leap(year))
days_in_last_month = DAYS_PER_MONTHS[leap][month]
days_in_month = DAYS_PER_MONTHS[int(is_leap(d2.year))][d2.month]
if d_diff < days_in_month - days_in_last_month:
# We don't have a full month, we calculate days
if days_in_last_month < d1.day:
d_diff += d1.day
else:
d_diff += days_in_last_month
elif d_diff == days_in_month - days_in_last_month:
# We have exactly a full month
# We remove the days difference
# and add one to the months difference
d_diff = 0
m_diff += 1
else:
# We have a full month
d_diff += days_in_last_month
m_diff -= 1
if m_diff < 0:
m_diff += 12
y_diff -= 1
return PreciseDiff(
sign * y_diff,
sign * m_diff,
sign * d_diff,
sign * hour_diff,
sign * min_diff,
sign * sec_diff,
sign * mic_diff,
sign * total_days,
)
def _day_number(year: int, month: int, day: int) -> int:
month = (month + 9) % 12
year = year - month // 10
return (
365 * year
+ year // 4
- year // 100
+ year // 400
+ (month * 306 + 5) // 10
+ (day - 1)
)
def _get_tzinfo_name(tzinfo: datetime.tzinfo | None) -> str | None:
if tzinfo is None:
return None
if hasattr(tzinfo, "key"):
# zoneinfo timezone
return cast("zoneinfo.ZoneInfo", tzinfo).key
elif hasattr(tzinfo, "name"):
# Pendulum timezone
return cast("Timezone", tzinfo).name
elif hasattr(tzinfo, "zone"):
# pytz timezone
return tzinfo.zone # type: ignore[no-any-return]
return None
| PreciseDiff |
python | ray-project__ray | doc/source/serve/doc_code/batching_guide.py | {
"start": 1896,
"end": 2857
} | class ____:
async def generate_numbers(self, max: str) -> AsyncGenerator[str, None]:
for i in range(max):
yield str(i)
await asyncio.sleep(0.1)
def __call__(self, request: Request) -> StreamingResponse:
max = int(request.query_params.get("max", "25"))
gen = self.generate_numbers(max)
return StreamingResponse(gen, status_code=200, media_type="text/plain")
# __single_stream_end__
import requests
serve.run(StreamingResponder.bind())
r = requests.get("http://localhost:8000/", stream=True)
chunks = []
for chunk in r.iter_content(chunk_size=None, decode_unicode=True):
chunks.append(chunk)
assert ",".join(list(map(str, range(25)))) == ",".join(chunks)
# __batch_stream_begin__
import asyncio
from typing import List, AsyncGenerator, Union
from starlette.requests import Request
from starlette.responses import StreamingResponse
from ray import serve
@serve.deployment
| StreamingResponder |
python | django__django | tests/migrations/migrations_test_apps/lookuperror_b/migrations/0002_b2.py | {
"start": 43,
"end": 690
} | class ____(migrations.Migration):
dependencies = [
("lookuperror_a", "0002_a2"),
("lookuperror_b", "0001_initial"),
]
operations = [
migrations.CreateModel(
name="B2",
fields=[
(
"id",
models.AutoField(
primary_key=True,
verbose_name="ID",
auto_created=True,
serialize=False,
),
),
("a1", models.ForeignKey("lookuperror_a.A1", models.CASCADE)),
],
),
]
| Migration |
python | joerick__pyinstrument | pyinstrument/stack_sampler.py | {
"start": 11597,
"end": 12550
} | class ____(NamedTuple):
state: LiteralStr["in_context", "out_of_context_awaited", "out_of_context_unknown"]
"""
Definitions:
``in_context``: indicates that the sample comes from the subscriber's
context.
``out_of_context_awaited``: the sample comes from outside the
subscriber's context, but we tracked the await that happened before the
context exited. :attr:`info` contains the call stack of the await.
``out_of_context_unknown``: the sample comes from outside the
subscriber's context, but the change of context didn't look like an
await. :attr:`info` contains the call stack when the context changed.
"""
info: Any = None
_timing_overhead: dict[TimerType, float] | None = None
def timing_overhead() -> dict[TimerType, float]:
global _timing_overhead
if _timing_overhead is None:
_timing_overhead = measure_timing_overhead()
return _timing_overhead
| AsyncState |
python | PrefectHQ__prefect | src/prefect/utilities/schema_tools/validation.py | {
"start": 668,
"end": 10743
} | class ____(Exception):
pass
PLACEHOLDERS_VALIDATOR_NAME = "_placeholders"
def _build_validator() -> type["_Validator"]:
def _applicable_validators(schema: Schema) -> Iterable[tuple[str, Any]]:
# the default implementation returns `schema.items()`
assert not isinstance(schema, bool)
schema = {**schema, PLACEHOLDERS_VALIDATOR_NAME: None}
return schema.items()
def _placeholders(
_validator: "_Validator", _property: object, instance: Any, _schema: Schema
) -> Iterator[JSONSchemaValidationError]:
if isinstance(instance, HydrationError):
yield JSONSchemaValidationError(instance.message)
validators = dict(Draft202012Validator.VALIDATORS)
validators.update({PLACEHOLDERS_VALIDATOR_NAME: _placeholders})
# It is necessary to `create` a new validator instead of using `extend` because
# the `extend` method does not accept an `application_validators` parameter.
# We want `_placeholders` to be applicable always, without needing to modify
# the schema itself.
return create(
meta_schema=Draft202012Validator.META_SCHEMA,
validators=validators,
version="prefect",
type_checker=Draft202012Validator.TYPE_CHECKER,
format_checker=Draft202012Validator.FORMAT_CHECKER,
id_of=cast( # the stub for create() is wrong here; id_of accepts (Schema) -> str | None
Callable[[Schema], str], Draft202012Validator.ID_OF
),
applicable_validators=_applicable_validators,
)
_VALIDATOR = _build_validator()
def is_valid_schema(schema: ObjectSchema, preprocess: bool = True) -> None:
if preprocess:
schema = preprocess_schema(schema)
try:
_VALIDATOR.check_schema(schema, format_checker=_VALIDATOR.FORMAT_CHECKER)
except jsonschema.SchemaError as exc:
raise ValueError(f"Invalid schema: {exc.message}") from exc
def validate(
obj: dict[str, Any],
schema: ObjectSchema,
raise_on_error: bool = False,
preprocess: bool = True,
ignore_required: bool = False,
allow_none_with_default: bool = False,
) -> list[JSONSchemaValidationError]:
if preprocess:
schema = preprocess_schema(schema, allow_none_with_default)
if ignore_required:
schema = remove_nested_keys(["required"], schema)
if raise_on_error:
try:
jsonschema.validate(obj, schema, _VALIDATOR)
except RecursionError:
raise CircularSchemaRefError
except JSONSchemaValidationError as exc:
if exc.json_path == "$":
error_message = "Validation failed."
else:
error_message = (
f"Validation failed for field {exc.json_path.replace('$.', '')!r}."
)
error_message += f" Failure reason: {exc.message}"
raise ValidationError(error_message) from exc
return []
else:
try:
validator = _VALIDATOR(schema, format_checker=_VALIDATOR.FORMAT_CHECKER)
errors = list(validator.iter_errors(obj)) # type: ignore
except RecursionError:
raise CircularSchemaRefError
return errors
def is_valid(obj: dict[str, Any], schema: ObjectSchema) -> bool:
errors = validate(obj, schema)
return not errors
def prioritize_placeholder_errors(
errors: list[JSONSchemaValidationError],
) -> list[JSONSchemaValidationError]:
errors_by_path: dict[str, list[JSONSchemaValidationError]] = defaultdict(list)
for error in errors:
path_str = "->".join(str(p) for p in error.relative_path)
errors_by_path[path_str].append(error)
filtered_errors: list[JSONSchemaValidationError] = []
for grouped_errors in errors_by_path.values():
placeholders_errors = [
error
for error in grouped_errors
if error.validator == PLACEHOLDERS_VALIDATOR_NAME # type: ignore # typing stubs are incomplete
]
if placeholders_errors:
filtered_errors.extend(placeholders_errors)
else:
filtered_errors.extend(grouped_errors)
return filtered_errors
def build_error_obj(errors: list[JSONSchemaValidationError]) -> dict[str, Any]:
error_response: dict[str, Any] = {"errors": []}
# If multiple errors are present for the same path and one of them
# is a placeholder error, we want only want to use the placeholder error.
errors = prioritize_placeholder_errors(errors)
for error in errors:
# If the Placeholder is not representing an error, we can skip it
if isinstance(error.instance, Placeholder) and not error.instance.is_error:
continue
path = deque(error.relative_path)
# Required errors should be moved one level down to the property
# they're associated with, so we add an extra level to the path.
if error.validator == "required": # type: ignore
required_field = error.message.partition(" ")[0].strip("'")
path.append(required_field)
current: list[Any] = error_response["errors"]
# error at the root, just append the error message
if not path:
current.append(error.message)
while path:
part = path.popleft()
if isinstance(part, int):
if not path:
current.append({"index": part, "errors": [error.message]})
else:
for entry in current:
if entry.get("index") == part:
current = cast(list[Any], entry["errors"])
break
else:
new_entry: dict[str, Any] = {"index": part, "errors": []}
current.append(new_entry)
current = new_entry["errors"]
else:
if not path:
current.append({"property": part, "errors": [error.message]})
else:
for entry in current:
if entry.get("property") == part:
current = entry.get("errors", [])
break
else:
new_entry = {"property": part, "errors": []}
current.append(new_entry)
current = new_entry["errors"]
valid = not bool(error_response["errors"])
error_response["valid"] = valid
return error_response
def _fix_null_typing(
key: str,
schema: dict[str, Any],
required_fields: list[str],
allow_none_with_default: bool = False,
) -> None:
"""
Pydantic V1 does not generate a valid Draft2020-12 schema for null types.
"""
if (
key not in required_fields
and "type" in schema
and schema.get("type") != "null"
and ("default" not in schema or allow_none_with_default)
):
schema["anyOf"] = [{"type": schema["type"]}, {"type": "null"}]
del schema["type"]
def _fix_tuple_items(schema: dict[str, Any]) -> None:
"""
Pydantic V1 does not generate a valid Draft2020-12 schema for tuples.
"""
if (
schema.get("items")
and isinstance(schema["items"], list)
and not schema.get("prefixItems")
):
schema["prefixItems"] = deepcopy(cast(list[Any], schema["items"]))
del schema["items"]
def process_properties(
properties: dict[str, dict[str, Any]],
required_fields: list[str],
allow_none_with_default: bool = False,
) -> None:
for key, schema in properties.items():
_fix_null_typing(key, schema, required_fields, allow_none_with_default)
_fix_tuple_items(schema)
if "properties" in schema:
required_fields = schema.get("required", [])
process_properties(schema["properties"], required_fields)
def preprocess_schema(
schema: ObjectSchema,
allow_none_with_default: bool = False,
) -> ObjectSchema:
schema = deepcopy(schema)
if "properties" in schema:
required_fields = schema.get("required", [])
process_properties(
schema["properties"], required_fields, allow_none_with_default
)
if "definitions" in schema: # Also process definitions for reused models
definitions = cast(dict[str, Any], schema["definitions"])
for definition in definitions.values():
if "properties" in definition:
required_fields = definition.get("required", [])
process_properties(
definition["properties"], required_fields, allow_none_with_default
)
# Allow block types to be referenced by their id
if "block_type_slug" in definition:
schema["definitions"][definition["title"]] = {
"oneOf": [
definition,
{
"type": "object",
"properties": {
"$ref": {
"oneOf": [
{
"type": "string",
"format": "uuid",
},
{
"type": "object",
"additionalProperties": {
"type": "string",
},
"minProperties": 1,
},
]
}
},
"required": [
"$ref",
],
},
]
}
return schema
| ValidationError |
python | tensorflow__tensorflow | tensorflow/python/tpu/tpu_embedding_v3_checkpoint_adapter.py | {
"start": 9403,
"end": 14590
} | class ____(checkpoint_adapter.ReshardCallback):
"""Reshard callback for embeddings."""
def __init__(
self,
object_local_name: str,
from_shard_layouts: Mapping[
str, Sequence[sparse_core_layout_pb2.SparseCoreTableLayout]
],
to_shard_layouts: Sequence[sparse_core_layout_pb2.SparseCoreTableLayout],
):
"""Initializes Reshard callback.
Args:
object_local_name: The local name of the object being restored.
from_shard_layouts: A dictionary in stacked table name to a list of its
consituent table layouts. The layouts are coming from the checkpoint
being restored.
to_shard_layouts: a list of target layouts that will be resharded to.
"""
logging.info("Creating EmbeddingReshardCallback for %s", object_local_name)
self._object_local_name = object_local_name
self._from_shard_layouts = from_shard_layouts
self._to_shard_layouts = to_shard_layouts
def object_name(self) -> str:
return self._object_local_name
def update_restore_inputs(
self, checkpoint_key: str, shape_and_slice_spec: str
) -> tuple[Sequence[str], Sequence[str]]:
"""Return the full shape of the stacked that is passed into restore_v2.
This shape information is required by the restore_v2 process to ensure it
loads the complete tensor from the checkpoint. The full tensor is required
to perform resharding operations.
Args:
checkpoint_key: The input checkpoint key to be read.
shape_and_slice_spec: The shape and slice spec of the checkpoint key to be
read.
Returns:
A tuple of (keys, slices) that should be passed to restore_v2 in order to
reshard according to the resharding plan. The restored tensors from
restore_v2 op will usually be passed to reshard method of this class to
get the final resharded value.
"""
keys = []
slices = []
for stacked_name, table_layouts in self._from_shard_layouts.items():
key = checkpoint_key.replace(self._object_local_name, stacked_name)
keys.append(key)
# use the first layout get the full shape of the stacked table
first_layout = table_layouts[0]
full_vocab_size = (
first_layout.total_rows_per_sparse_core_shard
* first_layout.num_sparse_cores
)
stack_dim = first_layout.unsharded_padded_shape[1]
full_shape = [full_vocab_size, stack_dim]
slices.append(
_shard_info_str(
full_shape,
trackable_base.ShardInfo(offset=[0, 0], shape=full_shape),
)
)
logging.info(
"Updating restore v2 inputs for %s[%s]:%s to stacked_tables: [%s],"
" slices: [%s]",
checkpoint_key,
self._object_local_name,
shape_and_slice_spec,
", ".join(keys),
", ".join(slices),
)
return (keys, slices)
def reshard(
self,
checkpoint_values: Sequence[tensor.Tensor],
shape_and_slice: str,
) -> tensor.Tensor:
# unshard
stime = time.time()
logging.info(
"EmbeddingReshardCallback: starting to reshard [%s],"
" from checkpoint_value with shapes: %s",
self._object_local_name,
", ".join([str(t.shape) for t in checkpoint_values]),
)
unsharded_tables = dict()
for stacked_table, layouts in zip(
checkpoint_values,
list(self._from_shard_layouts.values()),
):
logging.info(
"Unshard sc_to_cpu stacked_table: %s, shape: %s, no. of constituent"
" tables: %d",
layouts[0].stacked_table_name,
stacked_table.shape,
len(layouts),
)
unsharded_tensors = _unshard_from_sc_to_cpu(stacked_table, layouts)
for unshared_tensor, layout in zip(unsharded_tensors, layouts):
unsharded_tables[layout.table_name] = unshared_tensor
required_tables = [
unsharded_tables[layout.table_name] for layout in self._to_shard_layouts
]
ret = _shard_from_cpu_to_sc(
required_tables, shape_and_slice, self._to_shard_layouts
)
etime = time.time()
logging.info(
"EmbeddingReshardCallback: reshard [%s] took %s",
self._object_local_name,
etime - stime,
)
return ret
def _reorg_layouts(
layouts: Sequence[sparse_core_layout_pb2.SparseCoreTableLayout],
) -> Mapping[str, Sequence[sparse_core_layout_pb2.SparseCoreTableLayout]]:
"""Reorg the layouts to be in the order of the logical table.
Building a Dict[StackedTableName, SortedList[TableLayout]]
Args:
layouts: The layouts to be reorged.
Returns:
A dict of stacked table name to sorted list of table layouts.
"""
stacked_name_to_table_names = collections.defaultdict(list)
for layout in layouts:
stacked_name_to_table_names[layout.stacked_table_name].append(layout)
for stacked_name in stacked_name_to_table_names.keys():
sorted_layouts = sorted(
stacked_name_to_table_names[stacked_name],
key=lambda layout: layout.sparse_core_shard_row_offset,
)
stacked_name_to_table_names[stacked_name] = sorted_layouts
return stacked_name_to_table_names
| EmbeddingReshardCallback |
python | Textualize__textual | docs/examples/guide/reactivity/refresh01.py | {
"start": 151,
"end": 300
} | class ____(Widget):
"""Generates a greeting."""
who = reactive("name")
def render(self) -> str:
return f"Hello, {self.who}!"
| Name |
python | matplotlib__matplotlib | lib/matplotlib/hatch.py | {
"start": 5257,
"end": 5438
} | class ____(Circles):
size = 0.2
def __init__(self, hatch, density):
self.num_rows = (hatch.count('o')) * density
super().__init__(hatch, density)
| SmallCircles |
python | huggingface__transformers | tests/models/clip/test_modeling_clip.py | {
"start": 24176,
"end": 26148
} | class ____(CLIPModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (CLIPForImageClassification,) if is_torch_available() else ()
pipeline_model_mapping = {"image-classification": CLIPForImageClassification} if is_torch_available() else {}
test_resize_embeddings = False
test_attention_outputs = False
_is_composite = True
def setUp(self):
self.model_tester = CLIPForImageClassificationModelTester(self)
@unittest.skip(reason="CLIPForImageClassification does not support inputs_embeds")
def test_inputs_embeds(self):
pass
@unittest.skip(reason="CLIPForImageClassification does not support inputs_embeds")
def test_model_get_set_embeddings(self):
pass
@unittest.skip(reason="CLIPForImageClassification does not support gradient checkpointing yet")
def test_training_gradient_checkpointing(self):
pass
@unittest.skip(reason="CLIPForImageClassification does not support gradient checkpointing yet")
def test_training_gradient_checkpointing_use_reentrant(self):
pass
@unittest.skip(reason="CLIPForImageClassification does not support gradient checkpointing yet")
def test_training_gradient_checkpointing_use_reentrant_false(self):
pass
@parameterized.expand(TEST_EAGER_MATCHES_SDPA_INFERENCE_PARAMETERIZATION)
@slow
@is_flaky()
def test_eager_matches_sdpa_inference(self, *args):
# adding only flaky decorator here and call the parent test method
return getattr(ModelTesterMixin, self._testMethodName)(self)
def test_sdpa_can_dispatch_composite_models(self):
super().test_sdpa_can_dispatch_composite_models()
# We will verify our results on an image of cute cats
def prepare_img():
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
im = Image.open(requests.get(url, stream=True).raw)
return im
@require_vision
@require_torch
| CLIPForImageClassificationModelTest |
python | Pylons__pyramid | tests/test_authorization.py | {
"start": 18802,
"end": 19207
} | class ____:
def __init__(self, *arg, **kw):
self.__dict__.update(kw)
VIEW = 'view'
EDIT = 'edit'
CREATE = 'create'
DELETE = 'delete'
MODERATE = 'moderate'
ADMINISTER = 'administer'
COMMENT = 'comment'
GUEST_PERMS = (VIEW, COMMENT)
MEMBER_PERMS = GUEST_PERMS + (EDIT, CREATE, DELETE)
MODERATOR_PERMS = MEMBER_PERMS + (MODERATE,)
ADMINISTRATOR_PERMS = MODERATOR_PERMS + (ADMINISTER,)
| DummyContext |
python | Textualize__textual | docs/examples/styles/links.py | {
"start": 155,
"end": 376
} | class ____(App):
CSS_PATH = "links.tcss"
def compose(self) -> ComposeResult:
yield Static(TEXT)
yield Static(TEXT, id="custom")
if __name__ == "__main__":
app = LinksApp()
app.run()
| LinksApp |
python | tensorflow__tensorflow | tensorflow/python/debug/wrappers/dumping_wrapper.py | {
"start": 995,
"end": 5166
} | class ____(framework.NonInteractiveDebugWrapperSession):
"""Debug Session wrapper that dumps debug data to filesystem."""
def __init__(self,
sess,
session_root,
watch_fn=None,
thread_name_filter=None,
pass_through_operrors=None):
"""Constructor of DumpingDebugWrapperSession.
Args:
sess: The TensorFlow `Session` object being wrapped.
session_root: (`str`) Path to the session root directory. Must be a
directory that does not exist or an empty directory. If the directory
does not exist, it will be created by the debugger core during debug
`tf.Session.run`
calls.
As the `run()` calls occur, subdirectories will be added to
`session_root`. The subdirectories' names has the following pattern:
run_<epoch_time_stamp>_<zero_based_run_counter>
E.g., run_1480734393835964_ad4c953a85444900ae79fc1b652fb324
watch_fn: (`Callable`) A Callable that can be used to define per-run
debug ops and watched tensors. See the doc of
`NonInteractiveDebugWrapperSession.__init__()` for details.
thread_name_filter: Regular-expression white list for threads on which the
wrapper session will be active. See doc of `BaseDebugWrapperSession` for
more details.
pass_through_operrors: If true, all captured OpErrors will be
propagated. By default this captures all OpErrors.
Raises:
ValueError: If `session_root` is an existing and non-empty directory or
if `session_root` is a file.
"""
framework.NonInteractiveDebugWrapperSession.__init__(
self, sess, watch_fn=watch_fn, thread_name_filter=thread_name_filter,
pass_through_operrors=pass_through_operrors)
session_root = os.path.expanduser(session_root)
if gfile.Exists(session_root):
if not gfile.IsDirectory(session_root):
raise ValueError(
"session_root path points to a file: %s" % session_root)
elif gfile.ListDirectory(session_root):
raise ValueError(
"session_root path points to a non-empty directory: %s" %
session_root)
else:
gfile.MakeDirs(session_root)
self._session_root = session_root
self._run_counter = 0
self._run_counter_lock = threading.Lock()
def prepare_run_debug_urls(self, fetches, feed_dict):
"""Implementation of abstract method in superclass.
See doc of `NonInteractiveDebugWrapperSession.prepare_run_debug_urls()`
for details. This implementation creates a run-specific subdirectory under
self._session_root and stores information regarding run `fetches` and
`feed_dict.keys()` in the subdirectory.
Args:
fetches: Same as the `fetches` argument to `Session.run()`
feed_dict: Same as the `feed_dict` argument to `Session.run()`
Returns:
debug_urls: (`str` or `list` of `str`) file:// debug URLs to be used in
this `Session.run()` call.
"""
# Add a UUID to accommodate the possibility of concurrent run() calls.
self._run_counter_lock.acquire()
run_dir = os.path.join(self._session_root, "run_%d_%d" %
(int(time.time() * 1e6), self._run_counter))
self._run_counter += 1
self._run_counter_lock.release()
gfile.MkDir(run_dir)
fetches_event = event_pb2.Event()
fetches_event.log_message.message = repr(fetches)
fetches_path = os.path.join(
run_dir,
debug_data.METADATA_FILE_PREFIX + debug_data.FETCHES_INFO_FILE_TAG)
with gfile.Open(os.path.join(fetches_path), "wb") as f:
f.write(fetches_event.SerializeToString())
feed_keys_event = event_pb2.Event()
feed_keys_event.log_message.message = (repr(feed_dict.keys()) if feed_dict
else repr(feed_dict))
feed_keys_path = os.path.join(
run_dir,
debug_data.METADATA_FILE_PREFIX + debug_data.FEED_KEYS_INFO_FILE_TAG)
with gfile.Open(os.path.join(feed_keys_path), "wb") as f:
f.write(feed_keys_event.SerializeToString())
return ["file://" + run_dir]
| DumpingDebugWrapperSession |
python | huggingface__transformers | src/transformers/models/groupvit/modeling_groupvit.py | {
"start": 49208,
"end": 60038
} | class ____(GroupViTPreTrainedModel):
config: GroupViTConfig
def __init__(self, config: GroupViTConfig):
super().__init__(config)
if not isinstance(config.text_config, GroupViTTextConfig):
raise TypeError(
"config.text_config is expected to be of type GroupViTTextConfig but is of type"
f" {type(config.text_config)}."
)
if not isinstance(config.vision_config, GroupViTVisionConfig):
raise TypeError(
"config.vision_config is expected to be of type GroupViTVisionConfig but is of type"
f" {type(config.vision_config)}."
)
text_config = config.text_config
vision_config = config.vision_config
self.projection_dim = config.projection_dim
self.projection_intermediate_dim = config.projection_intermediate_dim
self.text_embed_dim = text_config.hidden_size
self.vision_embed_dim = vision_config.hidden_size
self.text_model = GroupViTTextTransformer(text_config)
self.vision_model = GroupViTVisionTransformer(vision_config)
self.visual_projection = nn.Sequential(
nn.Linear(self.vision_embed_dim, self.projection_intermediate_dim, bias=True),
nn.BatchNorm1d(self.projection_intermediate_dim),
nn.ReLU(inplace=True),
nn.Linear(self.projection_intermediate_dim, self.projection_dim, bias=True),
)
self.text_projection = nn.Sequential(
nn.Linear(self.text_embed_dim, self.projection_intermediate_dim, bias=True),
nn.BatchNorm1d(self.projection_intermediate_dim),
nn.ReLU(inplace=True),
nn.Linear(self.projection_intermediate_dim, self.projection_dim, bias=True),
)
self.logit_scale = nn.Parameter(torch.tensor(self.config.logit_scale_init_value))
# Initialize weights and apply final processing
self.post_init()
@filter_out_non_signature_kwargs()
@auto_docstring
def get_text_features(
self,
input_ids: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
) -> torch.FloatTensor:
r"""
Returns:
text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by
applying the projection layer to the pooled output of [`GroupViTTextModel`].
Examples:
```python
>>> import torch
>>> from transformers import CLIPTokenizer, GroupViTModel
>>> model = GroupViTModel.from_pretrained("nvidia/groupvit-gcc-yfcc")
>>> tokenizer = CLIPTokenizer.from_pretrained("nvidia/groupvit-gcc-yfcc")
>>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt")
>>> with torch.inference_mode():
... text_features = model.get_text_features(**inputs)
```"""
text_outputs: BaseModelOutputWithPooling = self.text_model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
)
text_features = self.text_projection(text_outputs.pooler_output)
return text_features
@filter_out_non_signature_kwargs()
@auto_docstring
def get_image_features(self, pixel_values: torch.Tensor) -> torch.FloatTensor:
r"""
Returns:
image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by
applying the projection layer to the pooled output of [`GroupViTVisionModel`].
Examples:
```python
>>> import torch
>>> from transformers import AutoProcessor, GroupViTModel
>>> from transformers.image_utils import load_image
>>> model = GroupViTModel.from_pretrained("nvidia/groupvit-gcc-yfcc")
>>> processor = AutoProcessor.from_pretrained("nvidia/groupvit-gcc-yfcc")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = load_image(url)
>>> inputs = processor(images=image, return_tensors="pt")
>>> with torch.inference_mode():
... image_features = model.get_image_features(**inputs)
```"""
vision_outputs: BaseModelOutputWithPooling = self.vision_model(pixel_values)
image_features = self.visual_projection(vision_outputs.pooler_output)
return image_features
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
pixel_values: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
return_loss: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
output_segmentation: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, GroupViTModelOutput]:
r"""
return_loss (`bool`, *optional*):
Whether or not to return the contrastive loss.
output_segmentation (`bool`, *optional*):
Whether or not to return the segmentation logits.
Examples:
```python
>>> from PIL import Image
>>> import requests
>>> from transformers import AutoProcessor, GroupViTModel
>>> model = GroupViTModel.from_pretrained("nvidia/groupvit-gcc-yfcc")
>>> processor = AutoProcessor.from_pretrained("nvidia/groupvit-gcc-yfcc")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = processor(
... text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="pt", padding=True
... )
>>> outputs = model(**inputs)
>>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score
>>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities
```"""
# Use GROUPVIT model's config for some fields (if specified) instead of those of vision & text components.
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_segmentation = (
output_segmentation if output_segmentation is not None else self.config.output_segmentation
)
if output_segmentation:
output_attentions = True
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
vision_outputs = self.vision_model(
pixel_values=pixel_values,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
text_outputs = self.text_model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
image_embeds = vision_outputs[1]
image_embeds = self.visual_projection(image_embeds)
text_embeds = text_outputs[1]
text_embeds = self.text_projection(text_embeds)
# normalized features
image_embeds = image_embeds / image_embeds.norm(dim=-1, keepdim=True)
text_embeds = text_embeds / text_embeds.norm(dim=-1, keepdim=True)
# cosine similarity as logits
logit_scale = self.logit_scale.exp()
logits_per_text = torch.matmul(text_embeds, image_embeds.t()) * logit_scale
logits_per_image = logits_per_text.t()
seg_logits = None
if output_segmentation:
# grouped features
# [batch_size_image, num_group, hidden_size]
image_group_embeds = vision_outputs[0]
# [batch_size_image*num_group, hidden_size]
image_group_embeds = self.visual_projection(image_group_embeds.reshape(-1, image_group_embeds.shape[-1]))
if output_hidden_states:
attentions = vision_outputs[3]
else:
attentions = vision_outputs[2]
# [batch_size_image, num_group, height, width]
grouping = get_grouping_from_attentions(attentions, pixel_values.shape[2:])
# normalized features
image_group_embeds = image_group_embeds / image_group_embeds.norm(dim=-1, keepdim=True)
# [batch_size_image x num_group, batch_size_text]
logits_per_image_group = torch.matmul(image_group_embeds, text_embeds.t()) * logit_scale
# [batch_size_image, batch_size_text, num_group]
logits_per_image_group = logits_per_image_group.reshape(
image_embeds.shape[0], -1, text_embeds.shape[0]
).permute(0, 2, 1)
# [batch_size_image, batch_size_text, height x width]
flatten_grouping = grouping.reshape(grouping.shape[0], grouping.shape[1], -1)
# [batch_size_image, batch_size_text, height, width]
seg_logits = torch.matmul(logits_per_image_group, flatten_grouping) * logit_scale
seg_logits = seg_logits.reshape(
seg_logits.shape[0], seg_logits.shape[1], grouping.shape[2], grouping.shape[3]
)
loss = None
if return_loss:
loss = groupvit_loss(logits_per_text)
if not return_dict:
if seg_logits is not None:
output = (
logits_per_image,
logits_per_text,
seg_logits,
text_embeds,
image_embeds,
text_outputs,
vision_outputs,
)
else:
output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs)
return ((loss,) + output) if loss is not None else output
return GroupViTModelOutput(
loss=loss,
logits_per_image=logits_per_image,
logits_per_text=logits_per_text,
segmentation_logits=seg_logits,
text_embeds=text_embeds,
image_embeds=image_embeds,
text_model_output=text_outputs,
vision_model_output=vision_outputs,
)
__all__ = ["GroupViTModel", "GroupViTPreTrainedModel", "GroupViTTextModel", "GroupViTVisionModel"]
| GroupViTModel |
python | pandas-dev__pandas | asv_bench/benchmarks/io/csv.py | {
"start": 4224,
"end": 5049
} | class ____(BaseIO):
fname = "__test__.csv"
params = ([1000, 10000], ["D", "h"])
param_names = ["nobs", "freq"]
def setup(self, nobs, freq):
rng = period_range(start="2000-01-01", periods=nobs, freq=freq)
self.data = DataFrame({"a": 1}, index=rng)
if freq == "D":
self.default_fmt = "%Y-%m-%d"
elif freq == "h":
self.default_fmt = "%Y-%m-%d %H:00"
def time_frame_period_formatting_index(self, nobs, freq):
self.data.to_csv(self.fname, date_format="%Y-%m-%d___%H:%M:%S")
def time_frame_period_formatting_index_default(self, nobs, freq):
self.data.to_csv(self.fname)
def time_frame_period_formatting_index_default_explicit(self, nobs, freq):
self.data.to_csv(self.fname, date_format=self.default_fmt)
| ToCSVPeriodIndex |
python | pytorch__pytorch | torch/fx/experimental/migrate_gradual_types/constraint.py | {
"start": 3365,
"end": 4150
} | class ____(Constraint):
"""
Greatest Upper bound for tensors with dynamic type
"""
def __init__(self, res, rhs1, rhs2):
"""
:param res: tensor variable that stores the result of the output
:param rhs1: tensor or tensor variable
:param rhs2: tensor or tensor variabke
"""
self.res = res
self.rhs1 = rhs1
self.rhs2 = rhs2
def __repr__(self):
return f"{self.res} = {self.rhs1}\u2294*{self.rhs2}"
def __eq__(self, other):
if isinstance(other, TGreatestUpperBound):
return (
self.res == other.res
and self.rhs1 == other.rhs1
and self.rhs2 == other.rhs2
)
else:
return False
| TGreatestUpperBound |
python | pypa__setuptools | setuptools/config/_validate_pyproject/formats.py | {
"start": 5106,
"end": 13564
} | class ____:
"""The ``trove_classifiers`` package is the official way of validating classifiers,
however this package might not be always available.
As a workaround we can still download a list from PyPI.
We also don't want to be over strict about it, so simply skipping silently is an
option (classifiers will be validated anyway during the upload to PyPI).
"""
downloaded: typing.Union[None, "Literal[False]", typing.Set[str]]
"""
None => not cached yet
False => unavailable
set => cached values
"""
def __init__(self) -> None:
self.downloaded = None
self._skip_download = False
self.__name__ = "trove_classifier" # Emulate a public function
def _disable_download(self) -> None:
# This is a private API. Only setuptools has the consent of using it.
self._skip_download = True
def __call__(self, value: str) -> bool:
if self.downloaded is False or self._skip_download is True:
return True
if os.getenv("NO_NETWORK") or os.getenv("VALIDATE_PYPROJECT_NO_NETWORK"):
self.downloaded = False
msg = (
"Install ``trove-classifiers`` to ensure proper validation. "
"Skipping download of classifiers list from PyPI (NO_NETWORK)."
)
_logger.debug(msg)
return True
if self.downloaded is None:
msg = (
"Install ``trove-classifiers`` to ensure proper validation. "
"Meanwhile a list of classifiers will be downloaded from PyPI."
)
_logger.debug(msg)
try:
self.downloaded = set(_download_classifiers().splitlines())
except Exception:
self.downloaded = False
_logger.debug("Problem with download, skipping validation")
return True
return value in self.downloaded or value.lower().startswith("private ::")
try:
from trove_classifiers import classifiers as _trove_classifiers
def trove_classifier(value: str) -> bool:
"""See https://pypi.org/classifiers/"""
return value in _trove_classifiers or value.lower().startswith("private ::")
except ImportError: # pragma: no cover
trove_classifier = _TroveClassifier()
# -------------------------------------------------------------------------------------
# Stub packages - PEP 561
def pep561_stub_name(value: str) -> bool:
"""Name of a directory containing type stubs.
It must follow the name scheme ``<package>-stubs`` as defined in
:pep:`561#stub-only-packages`.
"""
top, *children = value.split(".")
if not top.endswith("-stubs"):
return False
return python_module_name(".".join([top[: -len("-stubs")], *children]))
# -------------------------------------------------------------------------------------
# Non-PEP related
def url(value: str) -> bool:
"""Valid URL (validation uses :obj:`urllib.parse`).
For maximum compatibility please make sure to include a ``scheme`` prefix
in your URL (e.g. ``http://``).
"""
from urllib.parse import urlparse
try:
parts = urlparse(value)
if not parts.scheme:
_logger.warning(
"For maximum compatibility please make sure to include a "
"`scheme` prefix in your URL (e.g. 'http://'). "
f"Given value: {value}"
)
if not (value.startswith("/") or value.startswith("\\") or "@" in value):
parts = urlparse(f"http://{value}")
return bool(parts.scheme and parts.netloc)
except Exception:
return False
# https://packaging.python.org/specifications/entry-points/
ENTRYPOINT_PATTERN = r"[^\[\s=]([^=]*[^\s=])?"
ENTRYPOINT_REGEX = re.compile(f"^{ENTRYPOINT_PATTERN}$", re.I)
RECOMMEDED_ENTRYPOINT_PATTERN = r"[\w.-]+"
RECOMMEDED_ENTRYPOINT_REGEX = re.compile(f"^{RECOMMEDED_ENTRYPOINT_PATTERN}$", re.I)
ENTRYPOINT_GROUP_PATTERN = r"\w+(\.\w+)*"
ENTRYPOINT_GROUP_REGEX = re.compile(f"^{ENTRYPOINT_GROUP_PATTERN}$", re.I)
def python_identifier(value: str) -> bool:
"""Can be used as identifier in Python.
(Validation uses :obj:`str.isidentifier`).
"""
return value.isidentifier()
def python_qualified_identifier(value: str) -> bool:
"""
Python "dotted identifier", i.e. a sequence of :obj:`python_identifier`
concatenated with ``"."`` (e.g.: ``package.module.submodule``).
"""
if value.startswith(".") or value.endswith("."):
return False
return all(python_identifier(m) for m in value.split("."))
def python_module_name(value: str) -> bool:
"""Module name that can be used in an ``import``-statement in Python.
See :obj:`python_qualified_identifier`.
"""
return python_qualified_identifier(value)
def python_module_name_relaxed(value: str) -> bool:
"""Similar to :obj:`python_module_name`, but relaxed to also accept
dash characters (``-``) and cover special cases like ``pip-run``.
It is recommended, however, that beginners avoid dash characters,
as they require advanced knowledge about Python internals.
The following are disallowed:
* names starting/ending in dashes,
* names ending in ``-stubs`` (potentially collide with :obj:`pep561_stub_name`).
"""
if value.startswith("-") or value.endswith("-"):
return False
if value.endswith("-stubs"):
return False # Avoid collision with PEP 561
return python_module_name(value.replace("-", "_"))
def python_entrypoint_group(value: str) -> bool:
"""See ``Data model > group`` in the :ref:`PyPA's entry-points specification
<pypa:entry-points>`.
"""
return ENTRYPOINT_GROUP_REGEX.match(value) is not None
def python_entrypoint_name(value: str) -> bool:
"""See ``Data model > name`` in the :ref:`PyPA's entry-points specification
<pypa:entry-points>`.
"""
if not ENTRYPOINT_REGEX.match(value):
return False
if not RECOMMEDED_ENTRYPOINT_REGEX.match(value):
msg = f"Entry point `{value}` does not follow recommended pattern: "
msg += RECOMMEDED_ENTRYPOINT_PATTERN
_logger.warning(msg)
return True
def python_entrypoint_reference(value: str) -> bool:
"""Reference to a Python object using in the format::
importable.module:object.attr
See ``Data model >object reference`` in the :ref:`PyPA's entry-points specification
<pypa:entry-points>`.
"""
module, _, rest = value.partition(":")
if "[" in rest:
obj, _, extras_ = rest.partition("[")
if extras_.strip()[-1] != "]":
return False
extras = (x.strip() for x in extras_.strip(string.whitespace + "[]").split(","))
if not all(pep508_identifier(e) for e in extras):
return False
_logger.warning(f"`{value}` - using extras for entry points is not recommended")
else:
obj = rest
module_parts = module.split(".")
identifiers = _chain(module_parts, obj.split(".")) if rest else iter(module_parts)
return all(python_identifier(i.strip()) for i in identifiers)
def uint8(value: builtins.int) -> bool:
r"""Unsigned 8-bit integer (:math:`0 \leq x < 2^8`)"""
return 0 <= value < 2**8
def uint16(value: builtins.int) -> bool:
r"""Unsigned 16-bit integer (:math:`0 \leq x < 2^{16}`)"""
return 0 <= value < 2**16
def uint(value: builtins.int) -> bool:
r"""Unsigned 64-bit integer (:math:`0 \leq x < 2^{64}`)"""
return 0 <= value < 2**64
def int(value: builtins.int) -> bool:
r"""Signed 64-bit integer (:math:`-2^{63} \leq x < 2^{63}`)"""
return -(2**63) <= value < 2**63
try:
from packaging import licenses as _licenses
def SPDX(value: str) -> bool:
"""See :ref:`PyPA's License-Expression specification
<pypa:core-metadata-license-expression>` (added in :pep:`639`).
"""
try:
_licenses.canonicalize_license_expression(value)
return True
except _licenses.InvalidLicenseExpression:
return False
except ImportError: # pragma: no cover
_logger.warning(
"Could not find an up-to-date installation of `packaging`. "
"License expressions might not be validated. "
"To enforce validation, please install `packaging>=24.2`."
)
def SPDX(value: str) -> bool:
return True
| _TroveClassifier |
python | allegroai__clearml | clearml/automation/job.py | {
"start": 602,
"end": 19432
} | class ____(object):
_job_hash_description = "job_hash={}"
_job_hash_property = "pipeline_job_hash"
_hashing_callback = None
_last_batch_status_update_ts = 0
def __init__(self) -> None:
"""
Base Job is an abstract CLearML Job
"""
self._is_cached_task = False
self._worker = None
self.task_parameter_override = None
self.task = None
self.task_started = False
self._last_status_ts = 0
self._last_status = None
def get_metric(self, title: str, series: str) -> (float, float, float):
"""
Retrieve a specific scalar metric from the running Task.
:param str title: Graph title (metric)
:param str series: Series on the specific graph (variant)
:return: A tuple of min value, max value, last value
"""
metrics, title, series, values = self.get_metric_req_params(title, series)
res = self.task.send(
tasks_service.GetAllRequest(
id=[self.task.id],
page=0,
page_size=1,
only_fields=[
"id",
]
+ metrics,
)
)
response = res.wait()
return tuple(response.response_data["tasks"][0]["last_metrics"][title][series][v] for v in values)
@staticmethod
def get_metric_req_params(title: str, series: str) -> Tuple[List[str], str, str, List[str]]:
title = hashlib.md5(str(title).encode("utf-8")).hexdigest()
series = hashlib.md5(str(series).encode("utf-8")).hexdigest()
metric = "last_metrics.{}.{}.".format(title, series)
values = ["min_value", "max_value", "value"]
metrics = [metric + v for v in values]
return metrics, title, series, values
def launch(self, queue_name: str = None) -> bool:
"""
Send Job for execution on the requested execution queue
:param str queue_name:
:return False if Task is not in "created" status (i.e. cannot be enqueued) or cannot be enqueued
"""
if self._is_cached_task:
return False
try:
Task.enqueue(task=self.task, queue_name=queue_name)
return True
except Exception as ex:
logger.warning("Error enqueuing Task {} to {}: {}".format(self.task, queue_name, ex))
return False
def abort(self) -> None:
"""
Abort currently running job (can be called multiple times)
"""
if not self.task or self._is_cached_task:
return
if self.task.status == Task.TaskStatusEnum.queued:
Task.dequeue(self.task)
elif self.task.status == Task.TaskStatusEnum.in_progress:
try:
self.task.stopped()
except Exception as ex:
logger.warning(ex)
def elapsed(self) -> float:
"""
Return the time in seconds since job started. Return -1 if job is still pending
:return: Seconds from start.
"""
if not self.task_started and str(self.task.status) != Task.TaskStatusEnum.in_progress:
return -1
self.task_started = True
if not self.task.data.started:
self.task.reload()
if not self.task.data.started:
return -1
return (datetime.now(tz=self.task.data.started.tzinfo) - self.task.data.started).total_seconds()
def iterations(self) -> int:
"""
Return the last iteration value of the current job. -1 if job has not started yet
:return: Task last iteration.
"""
if not self.task_started and self.task.status != Task.TaskStatusEnum.in_progress:
return -1
self.task_started = True
return self.task.get_last_iteration()
def task_id(self) -> str:
"""
Return the Task id.
:return: The Task ID.
"""
return self.task.id
def status(self, force: bool = False) -> str:
"""
Return the Job Task current status. Options are: "created", "queued", "in_progress", "stopped", "published",
"publishing", "closed", "failed", "completed", "unknown".
:param force: Force status update, otherwise, only refresh state every 1 sec
:return: Task status Task.TaskStatusEnum in string.
"""
if self._last_status and not force and time() - self._last_status_ts < 1.0:
return self._last_status
self._last_status = self.task.status
# update timestamp after api call status()
self._last_status_ts = time()
return self._last_status
def status_message(self) -> str:
"""
Gets the status message of the task. Note that the message is updated only after `BaseJob.status()`
is called
:return: The status message of the corresponding task as a string
"""
return str(self.task.data.status_message)
@classmethod
def update_status_batch(cls, jobs: Sequence["BaseJob"]) -> None:
"""
Update the status of jobs, in batch_size
:param jobs: The jobs to update the status of
"""
have_job_with_no_status = False
id_map = {}
for job in jobs:
if not job.task:
continue
id_map[job.task.id] = job
# noinspection PyProtectedMember
if not job._last_status:
have_job_with_no_status = True
if not id_map or (time() - cls._last_batch_status_update_ts < 1 and not have_job_with_no_status):
return
# noinspection PyProtectedMember
batch_status = Task._get_tasks_status(list(id_map.keys()))
last_batch_update_ts = time()
cls._last_batch_status_update_ts = last_batch_update_ts
for status, message, task_id in batch_status:
if not status or not task_id:
continue
# noinspection PyProtectedMember
id_map[task_id]._last_status = status
# noinspection PyProtectedMember
id_map[task_id]._last_status_ts = last_batch_update_ts
def wait(
self,
timeout: Optional[float] = None,
pool_period: float = 30.0,
aborted_nonresponsive_as_running: bool = False,
) -> bool:
"""
Wait until the task is fully executed (i.e., aborted/completed/failed)
:param timeout: maximum time (minutes) to wait for Task to finish
:param pool_period: check task status every pool_period seconds
:param aborted_nonresponsive_as_running: (default: False) If True, ignore the stopped state if the backend
non-responsive watchdog sets this Task to stopped. This scenario could happen if
an instance running the job is killed without warning (e.g. spot instances)
:return: True, if Task finished.
"""
tic = time()
while timeout is None or time() - tic < timeout * 60.0:
if self.is_stopped(aborted_nonresponsive_as_running=aborted_nonresponsive_as_running):
return True
sleep(pool_period)
return self.is_stopped(aborted_nonresponsive_as_running=aborted_nonresponsive_as_running)
def get_console_output(self, number_of_reports: int = 1) -> Sequence[str]:
"""
Return a list of console outputs reported by the Task.
Returned console outputs are retrieved from the most updated console outputs.
:param int number_of_reports: number of reports to return, default 1, the last (most updated) console output
:return: List of strings each entry corresponds to one report.
"""
return self.task.get_reported_console_output(number_of_reports=number_of_reports)
def worker(self) -> Optional[str]:
"""
Return the current worker ID executing this Job. If job is pending, returns None
:return: ID of the worker executing / executed the job, or None if job is still pending.
"""
if self.is_pending():
return self._worker
if self._worker is None:
self.task.reload()
self._worker = self.task.last_worker
return self._worker
def is_running(self) -> bool:
"""
Return True, if job is currently running (pending is considered False)
:return: True, if the task is currently in progress.
"""
return self.status() == Task.TaskStatusEnum.in_progress
def is_stopped(self, aborted_nonresponsive_as_running: bool = False) -> bool:
"""
Return True, if job finished executing (for any reason)
:param aborted_nonresponsive_as_running: (default: False) If True, ignore the stopped state if the backend
non-responsive watchdog sets this Task to stopped. This scenario could happen if
an instance running the job is killed without warning (e.g. spot instances)
:return: True the task is currently one of these states, stopped / completed / failed / published.
"""
task_status = self.status()
# check if we are Not in any of the non-running states
if task_status not in (
Task.TaskStatusEnum.stopped,
Task.TaskStatusEnum.completed,
Task.TaskStatusEnum.failed,
Task.TaskStatusEnum.published,
):
return False
# notice the status update also refresh the "status_message" field on the Task
# if we are stopped but the message says "non-responsive" it means for some reason the
# Task's instance was killed, we should ignore it if requested because we assume someone will bring it back
if (
aborted_nonresponsive_as_running
and task_status == Task.TaskStatusEnum.stopped
and str(self.task.data.status_message).lower() == "forced stop (non-responsive)"
):
# if we are here it means the state is "stopped" but we should ignore it
# because the non-responsive watchdog set it. We assume someone (autoscaler) will relaunch it.
return False
else:
# if we do not need to ignore the nonactive state, it means this Task stopped
return True
def is_failed(self) -> bool:
"""
Return True, if job is has executed and failed
:return: True the task is currently in failed state
"""
return self.status() in (Task.TaskStatusEnum.failed,)
def is_completed(self) -> bool:
"""
Return True, if job was executed and completed successfully
:return: True the task is currently in completed or published state
"""
return self.status() in (
Task.TaskStatusEnum.completed,
Task.TaskStatusEnum.published,
)
def is_aborted(self) -> bool:
"""
Return True, if job was executed and aborted
:return: True the task is currently in aborted state
"""
return self.status() in (Task.TaskStatusEnum.stopped,)
def is_pending(self) -> bool:
"""
Return True, if job is waiting for execution
:return: True if the task is currently queued.
"""
return self.status() in (
Task.TaskStatusEnum.queued,
Task.TaskStatusEnum.created,
)
def started(self) -> bool:
"""
Return True, if job already started, or ended. False, if created/pending.
:return: False, if the task is currently in draft mode or pending.
"""
if not self.task_started and self.task.status in (
Task.TaskStatusEnum.in_progress,
Task.TaskStatusEnum.created,
):
return False
self.task_started = True
return True
def delete(self) -> bool:
"""
Delete the current temporary job (before launching)
Return False if the Job/Task could not deleted
"""
if not self.task or self._is_cached_task:
return False
if self.task.delete():
self.task = None
return True
return False
def is_cached_task(self) -> bool:
"""
:return: True if the internal Task is a cached one, False otherwise.
"""
return self._is_cached_task
@classmethod
def register_hashing_callback(cls, a_function: Callable[[dict], dict]) -> None:
"""
Allow to customize the dict used for hashing the Task.
Provided function will be called with a dict representing a Task,
allowing to return a modified version of the representation dict.
:param a_function: Function manipulating the representation dict of a function
"""
assert callable(a_function)
cls._hashing_callback = a_function
@classmethod
def _create_task_hash(
cls,
task: Task,
section_overrides: Optional[dict] = None,
params_override: Optional[dict] = None,
configurations_override: Optional[dict] = None,
explicit_docker_image: Optional[str] = None,
) -> Optional[str]:
"""
Create Hash (str) representing the state of the Task
:param task: A Task to hash
:param section_overrides: optional dict (keys are Task's section names) with task overrides.
:param params_override: Alternative to the entire Task's hyper parameters section
(notice this should not be a nested dict but a flat key/value)
:param configurations_override: dictionary of configuration override objects (tasks.ConfigurationItem)
:param explicit_docker_image: The explicit docker image. Used to invalidate the hash when the docker image
was explicitly changed
:return: str hash of the Task configuration
"""
if not task:
return None
if section_overrides and section_overrides.get("script"):
script = section_overrides["script"]
if not isinstance(script, dict):
script = script.to_dict()
else:
script = task.data.script.to_dict() if task.data.script else {}
# if we have a repository, we must make sure we have a specific version_num to ensure consistency
if script.get("repository") and not script.get("version_num") and not script.get("tag"):
return None
# we need to ignore `requirements` section because ir might be changing from run to run
script = deepcopy(script)
script.pop("requirements", None)
hyper_params = deepcopy(task.get_parameters() if params_override is None else params_override)
hyper_params_to_change = {}
task_cache = {}
for key, value in hyper_params.items():
if key.startswith("kwargs_artifacts/"):
# noinspection PyBroadException
try:
# key format is <task_id>.<artifact_name>
task_id, artifact = value.split(".", 1)
task_ = task_cache.setdefault(task_id, Task.get_task(task_id))
# set the value of the hyper parameter to the hash of the artifact
# because the task ID might differ, but the artifact might be the same
hyper_params_to_change[key] = task_.artifacts[artifact].hash
except Exception:
pass
hyper_params.update(hyper_params_to_change)
configs = task.get_configuration_objects() if configurations_override is None else configurations_override
# currently we do not add the docker image to the hash (only args and setup script),
# because default docker image will cause the step to change
docker = None
if hasattr(task.data, "container"):
docker = dict(**(task.data.container or dict()))
docker.pop("image", None)
if explicit_docker_image:
docker["image"] = explicit_docker_image
hash_func = "md5" if Session.check_min_api_version("2.13") else "crc32"
# make sure that if we only have docker args/bash,
# we use encode it, otherwise we revert to the original encoding (excluding docker altogether)
repr_dict = dict(script=script, hyper_params=hyper_params, configs=configs)
if docker:
repr_dict["docker"] = docker
# callback for modifying the representation dict
if cls._hashing_callback:
repr_dict = cls._hashing_callback(deepcopy(repr_dict))
return hash_dict(repr_dict, hash_func=hash_func)
@classmethod
def _get_cached_task(cls, task_hash: str) -> Optional[Task]:
"""
:param task_hash:
:return: A task matching the requested task hash
"""
if not task_hash:
return None
if Session.check_min_api_version("2.13"):
# noinspection PyProtectedMember
potential_tasks = Task._query_tasks(
status=["completed", "published"],
system_tags=["-{}".format(Task.archived_tag)],
_all_=dict(
fields=["runtime.{}".format(cls._job_hash_property)],
pattern=exact_match_regex(task_hash),
),
only_fields=["id"],
)
else:
# noinspection PyProtectedMember
potential_tasks = Task._query_tasks(
status=["completed", "published"],
system_tags=["-{}".format(Task.archived_tag)],
_all_=dict(
fields=["comment"],
pattern=cls._job_hash_description.format(task_hash),
),
only_fields=["id"],
)
for obj in potential_tasks:
task = Task.get_task(task_id=obj.id)
return task
return None
@classmethod
def _set_task_cache_hash(cls, task: Task, task_hash: Optional[str] = None) -> None:
"""
Store the task state hash for later querying
:param task: The Task object that was created
:param task_hash: The Task Hash (string) to store, if None generate a new task_hash from the Task
"""
if not task:
return
if not task_hash:
task_hash = cls._create_task_hash(task=task)
if Session.check_min_api_version("2.13"):
# noinspection PyProtectedMember
task._set_runtime_properties(runtime_properties={cls._job_hash_property: str(task_hash)})
else:
hash_comment = cls._job_hash_description.format(task_hash) + "\n"
task.set_comment(task.comment + "\n" + hash_comment if task.comment else hash_comment)
| BaseJob |
python | dagster-io__dagster | python_modules/libraries/dagster-dg-cli/dagster_dg_cli/cli/scaffold/branch/ai.py | {
"start": 3244,
"end": 6658
} | class ____(InputType):
"""Matches GitHub issue URLs and instructs AI tools to fetch issue details."""
@classmethod
def matches(cls, user_input: str) -> bool:
return user_input.startswith("https://github.com/")
@classmethod
def get_context(cls, user_input: str) -> str:
return (
"The user would like to create a branch to address the following "
f"GitHub issue, which might describe a bug or a feature request: {user_input}."
"Use the `gh issue view --repo OWNER/REPO` tool to fetch the issue details."
)
@classmethod
def additional_allowed_tools(cls) -> list[str]:
return ["Bash(gh issue view:*)"]
INPUT_TYPES = [GithubIssueInputType]
def get_branch_name(plan: str, diagnostics: ClaudeDiagnostics) -> str:
"""Extract branch name from plan using regex based on planning_prompt.md format.
Expected format: **Proposed Branch Name:** `branch-name`
"""
match = re.search(r"\*\*Proposed Branch Name:\*\* `([^`]+)`", plan)
if match:
branch_name = match.group(1)
diagnostics.info(
category="branch_name_extracted",
message="Successfully extracted branch name from plan",
data={"branch_name": branch_name},
)
return branch_name
diagnostics.error(
category="branch_name_extraction_failed",
message="Could not extract branch name from plan",
data={"plan_snippet": plan[:200]},
)
raise click.ClickException("Could not extract branch name from plan")
def get_pr_title(plan: str, diagnostics: ClaudeDiagnostics) -> str:
"""Extract PR title from plan using regex based on planning_prompt.md format.
Expected format: **Proposed PR Title:** "Title text"
"""
match = re.search(r'\*\*Proposed PR Title:\*\* "([^"]+)"', plan)
if match:
pr_title = match.group(1)
diagnostics.info(
category="pr_title_extracted",
message="Successfully extracted PR title from plan",
data={"pr_title": pr_title},
)
return pr_title
diagnostics.error(
category="pr_title_extraction_failed",
message="Could not extract PR title from plan",
data={"plan_snippet": plan[:200]},
)
raise click.ClickException("Could not extract PR title from plan")
def get_branch_name_and_pr_title_from_plan(
plan: str,
diagnostics: ClaudeDiagnostics,
) -> ExtractedNames:
"""Extracts branch name and PR title from the plan using regex based on
the format defined in planning_prompt.md.
"""
diagnostics.info(
category="branch_name_and_title_generation_start",
message="Starting branch name and PR title generation",
data={
"context_length": len(plan),
},
)
start_time = perf_counter()
branch_name = get_branch_name(plan, diagnostics)
pr_title = get_pr_title(plan, diagnostics)
duration_ms = (perf_counter() - start_time) * 1000
diagnostics.info(
category="branch_name_and_title_generated",
message="Successfully generated branch name and PR title",
data={
"branch_name": branch_name,
"pr_title": pr_title,
"duration_ms": duration_ms,
},
)
return ExtractedNames(
branch_name=branch_name,
pr_title=pr_title,
)
| GithubIssueInputType |
python | django__django | tests/test_runner_apps/tagged/tests_inheritance.py | {
"start": 109,
"end": 251
} | class ____(FooBase):
def test_no_new_tags(self):
pass
@tag("baz")
def test_new_func_tag(self):
pass
@tag("bar")
| Foo |
python | apache__airflow | airflow-core/tests/unit/dag_processing/test_collection.py | {
"start": 41068,
"end": 42204
} | class ____:
@pytest.fixture(autouse=True)
def setup_teardown(self, session):
yield
session.query(DagModel).filter(DagModel.dag_id == "test_dag").delete()
session.commit()
@pytest.mark.parametrize(
("initial_tags", "new_tags", "expected_tags"),
[
(["dangerous"], {"DANGEROUS"}, {"DANGEROUS"}),
(["existing"], {"existing", "new"}, {"existing", "new"}),
(["tag1", "tag2"], {"tag1"}, {"tag1"}),
(["keep", "remove", "lowercase"], {"keep", "LOWERCASE", "new"}, {"keep", "LOWERCASE", "new"}),
(["tag1", "tag2"], set(), set()),
],
)
def test_update_dag_tags(self, testing_dag_bundle, session, initial_tags, new_tags, expected_tags):
dag_model = DagModel(dag_id="test_dag", bundle_name="testing")
dag_model.tags = [DagTag(name=tag, dag_id="test_dag") for tag in initial_tags]
session.add(dag_model)
session.commit()
_update_dag_tags(new_tags, dag_model, session=session)
session.commit()
assert {t.name for t in dag_model.tags} == expected_tags
| TestUpdateDagTags |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_compute.py | {
"start": 4191,
"end": 9741
} | class ____:
@mock.patch(COMPUTE_ENGINE_HOOK_PATH)
def test_insert_instance_should_execute_successfully(self, mock_hook):
get_instance_obj_mock = mock.MagicMock()
get_instance_obj_mock.__class__ = Instance
mock_hook.return_value.get_instance.side_effect = [
NotFound("Error message"),
get_instance_obj_mock,
]
op = ComputeEngineInsertInstanceOperator(
project_id=GCP_PROJECT_ID,
resource_id=GCE_RESOURCE_ID,
body=GCE_INSTANCE_BODY_API_CALL,
zone=GCE_ZONE,
task_id=TASK_ID,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
op.execute(context=mock.MagicMock())
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
api_version=API_VERSION,
)
mock_hook.return_value.insert_instance.assert_called_once_with(
project_id=GCP_PROJECT_ID,
body=GCE_INSTANCE_BODY_API_CALL,
zone=GCE_ZONE,
request_id=None,
)
def test_insert_instance_should_throw_ex_when_missing_project_id(self):
with pytest.raises(AirflowException, match=r"The required parameter 'project_id' is missing"):
ComputeEngineInsertInstanceOperator(
project_id="",
body=GCE_INSTANCE_BODY_API_CALL,
zone=GCE_ZONE,
task_id=TASK_ID,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
@mock.patch(COMPUTE_ENGINE_HOOK_PATH)
def test_insert_instance_should_not_throw_ex_when_project_id_none(self, mock_hook):
get_instance_obj_mock = mock.MagicMock()
get_instance_obj_mock.__class__ = Instance
mock_hook.return_value.get_instance.side_effect = [
NotFound("Error message"),
get_instance_obj_mock,
]
op = ComputeEngineInsertInstanceOperator(
resource_id=GCE_RESOURCE_ID,
body=GCE_INSTANCE_BODY_API_CALL,
zone=GCE_ZONE,
task_id=TASK_ID,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
op.execute(context=mock.MagicMock())
mock_hook.assert_called_once_with(
api_version=API_VERSION,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
mock_hook.return_value.insert_instance.assert_called_once_with(
body=GCE_INSTANCE_BODY_API_CALL,
zone=GCE_ZONE,
request_id=None,
project_id=None,
)
def test_insert_instance_should_throw_ex_when_missing_zone(self):
with pytest.raises(AirflowException, match=r"The required parameter 'zone' is missing"):
ComputeEngineInsertInstanceOperator(
resource_id=GCE_RESOURCE_ID,
body=GCE_INSTANCE_BODY_API_CALL,
zone="",
task_id=TASK_ID,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
def test_insert_instance_should_throw_ex_when_missing_resource_id(self):
with pytest.raises(
AirflowException,
match=r"The required parameters 'resource_id' and "
r"body\['name'\] are missing\. Please, provide "
r"at least one of them",
):
ComputeEngineInsertInstanceOperator(
project_id=GCP_PROJECT_ID,
zone=GCE_ZONE,
body=GCE_INSTANCE_BODY_WITHOUT_NAME_API_CALL,
task_id=TASK_ID,
resource_id="",
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
@mock.patch(COMPUTE_ENGINE_HOOK_PATH)
def test_insert_instance_should_not_throw_ex_when_name_is_templated(self, mock_hook):
get_instance_obj_mock = mock.MagicMock()
get_instance_obj_mock.__class__ = Instance
mock_hook.return_value.get_instance.side_effect = [
NotFound("Error message"),
get_instance_obj_mock,
]
body_with_templated_name = deepcopy(GCE_INSTANCE_BODY_API_CALL)
body_with_templated_name["name"] = "{{ logical_date }}"
op = ComputeEngineInsertInstanceOperator(
project_id=GCP_PROJECT_ID,
resource_id=GCE_RESOURCE_ID,
body=body_with_templated_name,
zone=GCE_ZONE,
task_id=TASK_ID,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
op.execute(context=mock.MagicMock())
mock_hook.assert_called_once_with(
api_version=API_VERSION,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
mock_hook.return_value.insert_instance.assert_called_once_with(
project_id=GCP_PROJECT_ID,
body=body_with_templated_name,
zone=GCE_ZONE,
request_id=None,
)
| TestGceInstanceInsert |
python | scipy__scipy | scipy/linalg/tests/test_fblas.py | {
"start": 16443,
"end": 16634
} | class ____(BaseGer):
blas_func = fblas.dger
dtype = float64
"""
##################################################
# Test blas ?gerc
# This will be a mess to test all cases.
"""
| TestDger |
python | ray-project__ray | python/ray/data/_internal/planner/plan_expression/expression_visitors.py | {
"start": 767,
"end": 2207
} | class ____(_ExprVisitor[None]):
"""Base visitor that provides automatic recursive traversal.
This class extends _ExprVisitor and provides default implementations
for composite nodes that automatically traverse child expressions.
"""
def visit_binary(self, expr: "BinaryExpr") -> None:
"""Default implementation: recursively visit both operands."""
super().visit(expr.left)
super().visit(expr.right)
def visit_unary(self, expr: "UnaryExpr") -> None:
"""Default implementation: recursively visit the operand."""
super().visit(expr.operand)
def visit_alias(self, expr: "AliasExpr") -> None:
"""Default implementation: recursively visit the inner expression."""
super().visit(expr.expr)
def visit_udf(self, expr: "UDFExpr") -> None:
"""Default implementation: recursively visit all arguments."""
for arg in expr.args:
super().visit(arg)
for value in expr.kwargs.values():
super().visit(value)
def visit_literal(self, expr: LiteralExpr) -> None:
"""Visit a literal expression (no columns to collect)."""
pass
def visit_star(self, expr: StarExpr) -> None:
"""Visit a star expression (no columns to collect)."""
pass
def visit_download(self, expr: "Expr") -> None:
"""Visit a download expression (no columns to collect)."""
pass
| _ExprVisitorBase |
python | charliermarsh__ruff | crates/ty_python_semantic/resources/corpus/75_classderef_no.py | {
"start": 0,
"end": 73
} | class ____:
def foo(self, bar):
def inner():
bar
| Foo |
python | kamyu104__LeetCode-Solutions | Python/find-substring-with-given-hash-value.py | {
"start": 44,
"end": 622
} | class ____(object):
def subStrHash(self, s, power, modulo, k, hashValue):
"""
:type s: str
:type power: int
:type modulo: int
:type k: int
:type hashValue: int
:rtype: str
"""
h, idx = 0, -1
pw = pow(power, k-1, modulo)
for i in reversed(xrange(len(s))):
if i+k < len(s):
h = (h-(ord(s[i+k])-ord('a')+1)*pw)%modulo
h = (h*power+(ord(s[i])-ord('a')+1))%modulo
if h == hashValue:
idx = i
return s[idx:idx+k]
| Solution |
python | doocs__leetcode | lcof2/剑指 Offer II 080. 含有 k 个元素的组合/Solution.py | {
"start": 0,
"end": 380
} | class ____:
def combine(self, n: int, k: int) -> List[List[int]]:
res = []
def dfs(i, n, k, t):
if len(t) == k:
res.append(t.copy())
return
for j in range(i, n + 1):
t.append(j)
dfs(j + 1, n, k, t)
t.pop()
dfs(1, n, k, [])
return res
| Solution |
python | celery__celery | celery/exceptions.py | {
"start": 4056,
"end": 4161
} | class ____(CeleryWarning):
"""Multiple workers are using the same nodename."""
| DuplicateNodenameWarning |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 831347,
"end": 831739
} | class ____(sgqlc.types.Type):
"""An edge in a connection."""
__schema__ = github_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
"""A cursor for use in pagination."""
node = sgqlc.types.Field("PackageVersion", graphql_name="node")
"""The item at the end of the edge."""
| PackageVersionEdge |
python | readthedocs__readthedocs.org | readthedocs/oauth/migrations/0005_add_account_relation.py | {
"start": 100,
"end": 1163
} | class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
("socialaccount", "0002_token_max_lengths"),
("oauth", "0004_drop_github_and_bitbucket_models"),
]
operations = [
migrations.AddField(
model_name="remoteorganization",
name="account",
field=models.ForeignKey(
related_name="remote_organizations",
verbose_name="Connected account",
blank=True,
to="socialaccount.SocialAccount",
null=True,
on_delete=models.CASCADE,
),
),
migrations.AddField(
model_name="remoterepository",
name="account",
field=models.ForeignKey(
related_name="remote_repositories",
verbose_name="Connected account",
blank=True,
to="socialaccount.SocialAccount",
null=True,
on_delete=models.CASCADE,
),
),
]
| Migration |
python | walkccc__LeetCode | solutions/62. Unique Paths/62.py | {
"start": 0,
"end": 293
} | class ____:
def uniquePaths(self, m: int, n: int) -> int:
# dp[i][j] := the number of unique paths from (0, 0) to (i, j)
dp = [[1] * n for _ in range(m)]
for i in range(1, m):
for j in range(1, n):
dp[i][j] = dp[i - 1][j] + dp[i][j - 1]
return dp[-1][-1]
| Solution |
python | qdrant__qdrant-client | tools/async_client_generator/transformers/constant_transformer.py | {
"start": 41,
"end": 573
} | class ____(ast.NodeTransformer):
def __init__(self, constant_replace_map: Optional[dict[str, str]]):
self.constant_replace_map = (
constant_replace_map if constant_replace_map is not None else {}
)
def visit_Constant(self, node: ast.Constant) -> ast.AST:
for old_value, new_value in self.constant_replace_map.items():
if isinstance(node.value, str):
node.value = node.value.replace(old_value, new_value)
return self.generic_visit(node)
| ConstantTransformer |
python | pypa__hatch | src/hatch/venv/core.py | {
"start": 3701,
"end": 4348
} | class ____(VirtualEnv):
def __init__(self, parent_python, platform, verbosity=0):
self.parent_python = parent_python
self.parent_dir = TemporaryDirectory()
directory = Path(self.parent_dir.name).resolve() / get_random_venv_name()
super().__init__(directory, platform, verbosity)
def remove(self):
super().remove()
self.parent_dir.cleanup()
def __enter__(self):
self.create(self.parent_python)
return super().__enter__()
def __exit__(self, exc_type, exc_value, traceback):
super().__exit__(exc_type, exc_value, traceback)
self.remove()
| TempVirtualEnv |
python | pennersr__django-allauth | allauth/socialaccount/providers/baidu/provider.py | {
"start": 217,
"end": 671
} | class ____(ProviderAccount):
def get_profile_url(self):
return "https://www.baidu.com/p/" + self.account.extra_data.get("uname")
def get_avatar_url(self):
return (
"https://tb.himg.baidu.com/sys/portraitn/item/"
+ self.account.extra_data.get("portrait")
)
def to_str(self):
dflt = super(BaiduAccount, self).to_str()
return self.account.extra_data.get("uname", dflt)
| BaiduAccount |
python | ansible__ansible | test/units/module_utils/json_utils/test_filter_non_json_lines.py | {
"start": 844,
"end": 2916
} | class ____(unittest.TestCase):
single_line_json_dict = u"""{"key": "value", "olá": "mundo"}"""
single_line_json_array = u"""["a","b","c"]"""
multi_line_json_dict = u"""{
"key":"value"
}"""
multi_line_json_array = u"""[
"a",
"b",
"c"]"""
all_inputs = [
single_line_json_dict,
single_line_json_array,
multi_line_json_dict,
multi_line_json_array
]
junk = [u"single line of junk", u"line 1/2 of junk\nline 2/2 of junk"]
unparsable_cases = (
u'No json here',
u'"olá": "mundo"',
u'{"No json": "ending"',
u'{"wrong": "ending"]',
u'["wrong": "ending"}',
)
def test_just_json(self):
for i in self.all_inputs:
filtered, warnings = _filter_non_json_lines(i)
self.assertEqual(filtered, i)
self.assertEqual(warnings, [])
def test_leading_junk(self):
for i in self.all_inputs:
for j in self.junk:
filtered, warnings = _filter_non_json_lines(j + "\n" + i)
self.assertEqual(filtered, i)
self.assertEqual(warnings, [])
def test_trailing_junk(self):
for i in self.all_inputs:
for j in self.junk:
filtered, warnings = _filter_non_json_lines(i + "\n" + j)
self.assertEqual(filtered, i)
self.assertEqual(warnings, [u"Module invocation had junk after the JSON data: %s" % j.strip()])
def test_leading_and_trailing_junk(self):
for i in self.all_inputs:
for j in self.junk:
filtered, warnings = _filter_non_json_lines("\n".join([j, i, j]))
self.assertEqual(filtered, i)
self.assertEqual(warnings, [u"Module invocation had junk after the JSON data: %s" % j.strip()])
def test_unparsable_filter_non_json_lines(self):
for i in self.unparsable_cases:
self.assertRaises(
ValueError,
_filter_non_json_lines,
data=i
)
| TestAnsibleModuleExitJson |
python | kamyu104__LeetCode-Solutions | Python/network-recovery-pathways.py | {
"start": 114,
"end": 2069
} | class ____(object):
def findMaxPathScore(self, edges, online, k):
"""
:type edges: List[List[int]]
:type online: List[bool]
:type k: int
:rtype: int
"""
INF = float("inf")
def binary_search_right(left, right, check):
while left <= right:
mid = left+(right-left)//2
if not check(mid):
right = mid-1
else:
left = mid+1
return right
def topological_sort():
in_degree = [0]*len(adj)
for u in xrange(len(adj)):
for v, _ in adj[u]:
in_degree[v] += 1
result = []
q = [u for u in xrange(len(adj)) if not in_degree[u]]
while q:
new_q = []
for u in q:
result.append(u)
for v, _ in adj[u]:
in_degree[v] -= 1
if in_degree[v]:
continue
new_q.append(v)
q = new_q
return result
def check(x):
dist = [INF]*len(adj)
dist[0] = 0
for u in order:
if dist[u] == INF:
continue
for v, c in adj[u]:
if not (c >= x and online[v]):
continue
dist[v] = min(dist[v], dist[u]+c)
return dist[-1] <= k
adj = [[] for _ in xrange(len(online))]
for u, v, c in edges:
adj[u].append((v, c))
order = topological_sort()
left, right = INF, 0
for u in xrange(len(adj)):
for _, c in adj[u]:
left = min(left, c)
right = max(right, c)
result = binary_search_right(left, right, check)
return result if result >= left else -1
| Solution |
python | django__django | tests/db_functions/math/test_atan.py | {
"start": 269,
"end": 2351
} | class ____(TestCase):
def test_null(self):
IntegerModel.objects.create()
obj = IntegerModel.objects.annotate(null_atan=ATan("normal")).first()
self.assertIsNone(obj.null_atan)
def test_decimal(self):
DecimalModel.objects.create(n1=Decimal("-12.9"), n2=Decimal("0.6"))
obj = DecimalModel.objects.annotate(
n1_atan=ATan("n1"), n2_atan=ATan("n2")
).first()
self.assertIsInstance(obj.n1_atan, Decimal)
self.assertIsInstance(obj.n2_atan, Decimal)
self.assertAlmostEqual(obj.n1_atan, Decimal(math.atan(obj.n1)))
self.assertAlmostEqual(obj.n2_atan, Decimal(math.atan(obj.n2)))
def test_float(self):
FloatModel.objects.create(f1=-27.5, f2=0.33)
obj = FloatModel.objects.annotate(
f1_atan=ATan("f1"), f2_atan=ATan("f2")
).first()
self.assertIsInstance(obj.f1_atan, float)
self.assertIsInstance(obj.f2_atan, float)
self.assertAlmostEqual(obj.f1_atan, math.atan(obj.f1))
self.assertAlmostEqual(obj.f2_atan, math.atan(obj.f2))
def test_integer(self):
IntegerModel.objects.create(small=-20, normal=15, big=-1)
obj = IntegerModel.objects.annotate(
small_atan=ATan("small"),
normal_atan=ATan("normal"),
big_atan=ATan("big"),
).first()
self.assertIsInstance(obj.small_atan, float)
self.assertIsInstance(obj.normal_atan, float)
self.assertIsInstance(obj.big_atan, float)
self.assertAlmostEqual(obj.small_atan, math.atan(obj.small))
self.assertAlmostEqual(obj.normal_atan, math.atan(obj.normal))
self.assertAlmostEqual(obj.big_atan, math.atan(obj.big))
def test_transform(self):
with register_lookup(DecimalField, ATan):
DecimalModel.objects.create(n1=Decimal("3.12"), n2=Decimal("0"))
DecimalModel.objects.create(n1=Decimal("-5"), n2=Decimal("0"))
obj = DecimalModel.objects.filter(n1__atan__gt=0).get()
self.assertEqual(obj.n1, Decimal("3.12"))
| ATanTests |
python | numpy__numpy | benchmarks/benchmarks/bench_ufunc.py | {
"start": 11589,
"end": 12601
} | class ____(Benchmark):
def setup(self):
self.c = np.ones(500000, dtype=np.int8)
self.i = np.ones(150000, dtype=np.int32)
self.f = np.zeros(150000, dtype=np.float32)
self.d = np.zeros(75000, dtype=np.float64)
# fault memory
self.f *= 1.
self.d *= 1.
def time_char_or(self):
np.bitwise_or(self.c, 0, out=self.c)
np.bitwise_or(0, self.c, out=self.c)
def time_char_or_temp(self):
0 | self.c | 0
def time_int_or(self):
np.bitwise_or(self.i, 0, out=self.i)
np.bitwise_or(0, self.i, out=self.i)
def time_int_or_temp(self):
0 | self.i | 0
def time_float_add(self):
np.add(self.f, 1., out=self.f)
np.add(1., self.f, out=self.f)
def time_float_add_temp(self):
1. + self.f + 1.
def time_double_add(self):
np.add(self.d, 1., out=self.d)
np.add(1., self.d, out=self.d)
def time_double_add_temp(self):
1. + self.d + 1.
| CustomInplace |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/metaclass4.py | {
"start": 200,
"end": 403
} | class ____(type):
def do_something(self, p1: str, p2: int):
pass
MyCustomClass = MyMeta("MyCustomClass", (object,), {})
reveal_type(MyCustomClass, expected_text="type[MyCustomClass]")
| MyMeta |
python | sympy__sympy | sympy/physics/mechanics/actuator.py | {
"start": 37397,
"end": 43633
} | class ____(ForceActuator):
r"""Coulomb kinetic friction with Stribeck and viscous effects.
Explanation
===========
This represents a Coulomb kinetic friction with the Stribeck and viscous effect,
described by the function:
.. math::
F = (\mu_k f_n + (\mu_s - \mu_k) f_n e^{-(\frac{v}{v_s})^2}) \text{sign}(v) + \sigma v
where :math:`\mu_k` is the coefficient of kinetic friction, :math:`\mu_s` is the
coefficient of static friction, :math:`f_n` is the normal force, :math:`v` is the
relative velocity, :math:`v_s` is the Stribeck friction coefficient, and
:math:`\sigma` is the viscous friction constant.
The default friction force is :math:`F = \mu_k f_n`.
When specified, the actuator includes:
- Stribeck effect: :math:`(\mu_s - \mu_k) f_n e^{-(\frac{v}{v_s})^2}`
- Viscous effect: :math:`\sigma v`
Notes
=====
The actuator makes the following assumptions:
- The actuator assumes relative motion is non-zero.
- The normal force is assumed to be a non-negative scalar.
- The resultant friction force is opposite to the velocity direction.
- Each point in the pathway is fixed within separate objects that are sliding relative to each other. In other words, these two points are fixed in the mutually sliding objects.
This actuator has been tested for straightforward motions, like a block sliding
on a surface.
The friction force is defined to always oppose the direction of relative velocity :math:`v`.
Specifically:
- The default Coulomb friction force :math:`\mu_k f_n \text{sign}(v)` is opposite to :math:`v`.
- The Stribeck effect :math:`(\mu_s - \mu_k) f_n e^{-(\frac{v}{v_s})^2} \text{sign}(v)` is also opposite to :math:`v`.
- The viscous friction term :math:`\sigma v` is opposite to :math:`v`.
Examples
========
The below example shows how to generate the loads produced by a Coulomb kinetic
friction actuator in a mass-spring system with friction.
>>> import sympy as sm
>>> from sympy.physics.mechanics import (dynamicsymbols, ReferenceFrame, Point,
... LinearPathway, CoulombKineticFriction, LinearSpring, KanesMethod, Particle)
>>> x, v = dynamicsymbols('x, v', real=True)
>>> m, g, k, mu_k, mu_s, v_s, sigma = sm.symbols('m, g, k, mu_k, mu_s, v_s, sigma')
>>> N = ReferenceFrame('N')
>>> O, P = Point('O'), Point('P')
>>> O.set_vel(N, 0)
>>> P.set_pos(O, x*N.x)
>>> pathway = LinearPathway(O, P)
>>> friction = CoulombKineticFriction(mu_k, m*g, pathway, v_s=v_s, sigma=sigma, mu_s=mu_k)
>>> spring = LinearSpring(k, pathway)
>>> block = Particle('block', point=P, mass=m)
>>> kane = KanesMethod(N, (x,), (v,), kd_eqs=(x.diff() - v,))
>>> friction.to_loads()
[(O, (g*m*mu_k*sign(sign(x(t))*Derivative(x(t), t)) + sigma*sign(x(t))*Derivative(x(t), t))*x(t)/Abs(x(t))*N.x), (P, (-g*m*mu_k*sign(sign(x(t))*Derivative(x(t), t)) - sigma*sign(x(t))*Derivative(x(t), t))*x(t)/Abs(x(t))*N.x)]
>>> loads = friction.to_loads() + spring.to_loads()
>>> fr, frstar = kane.kanes_equations([block], loads)
>>> eom = fr + frstar
>>> eom
Matrix([[-k*x(t) - m*Derivative(v(t), t) + (-g*m*mu_k*sign(v(t)*sign(x(t))) - sigma*v(t)*sign(x(t)))*x(t)/Abs(x(t))]])
Parameters
==========
f_n : sympifiable
The normal force between the surfaces. It should always be a non-negative scalar.
mu_k : sympifiable
The coefficient of kinetic friction.
pathway : PathwayBase
The pathway that the actuator follows.
v_s : sympifiable, optional
The Stribeck friction coefficient.
sigma : sympifiable, optional
The viscous friction coefficient.
mu_s : sympifiable, optional
The coefficient of static friction. Defaults to mu_k, meaning the Stribeck effect evaluates to 0 by default.
References
==========
.. [Moore2022] https://moorepants.github.io/learn-multibody-dynamics/loads.html#friction.
.. [Flores2023] Paulo Flores, Jorge Ambrosio, Hamid M. Lankarani,
"Contact-impact events with friction in multibody dynamics: Back to basics",
Mechanism and Machine Theory, vol. 184, 2023. https://doi.org/10.1016/j.mechmachtheory.2023.105305.
.. [Rogner2017] I. Rogner, "Friction modelling for robotic applications with planar motion",
Chalmers University of Technology, Department of Electrical Engineering, 2017.
"""
def __init__(self, mu_k, f_n, pathway, *, v_s=None, sigma=None, mu_s=None):
self._mu_k = sympify(mu_k, strict=True) if mu_k is not None else 1
self._mu_s = sympify(mu_s, strict=True) if mu_s is not None else self._mu_k
self._f_n = sympify(f_n, strict=True)
self._sigma = sympify(sigma, strict=True) if sigma is not None else 0
self._v_s = sympify(v_s, strict=True) if v_s is not None or v_s == 0 else 0.01
self.pathway = pathway
@property
def mu_k(self):
"""The coefficient of kinetic friction."""
return self._mu_k
@property
def mu_s(self):
"""The coefficient of static friction."""
return self._mu_s
@property
def f_n(self):
"""The normal force between the surfaces."""
return self._f_n
@property
def sigma(self):
"""The viscous friction coefficient."""
return self._sigma
@property
def v_s(self):
"""The Stribeck friction coefficient."""
return self._v_s
@property
def force(self):
v = self.pathway.extension_velocity
f_c = self.mu_k * self.f_n
f_max = self.mu_s * self.f_n
stribeck_term = (f_max - f_c) * exp(-(v / self.v_s)**2) if self.v_s is not None else 0
viscous_term = self.sigma * v if self.sigma is not None else 0
return (f_c + stribeck_term) * -sign(v) - viscous_term
@force.setter
def force(self, force):
raise AttributeError('Can\'t set computed attribute `force`.')
def __repr__(self):
return (f'{self.__class__.__name__}({self._mu_k}, {self._mu_s} '
f'{self._f_n}, {self.pathway}, {self._v_s}, '
f'{self._sigma})')
| CoulombKineticFriction |
python | huggingface__transformers | src/transformers/models/idefics/vision.py | {
"start": 1302,
"end": 3177
} | class ____(ModelOutput):
"""
Base class for vision model's outputs that also contains image embeddings of the pooling of the last hidden states.
Args:
image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
The image embeddings obtained by applying the projection layer to the pooler_output.
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
image_embeds: Optional[torch.FloatTensor] = None
last_hidden_state: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
attentions: Optional[tuple[torch.FloatTensor, ...]] = None
# Adapted from transformers.models.clip.modeling_clip.CLIPVisionEmbeddings
| IdeficsVisionModelOutput |
python | keras-team__keras | keras/src/layers/normalization/group_normalization_test.py | {
"start": 164,
"end": 5919
} | class ____(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_groupnorm(self):
self.run_layer_test(
layers.GroupNormalization,
init_kwargs={
"gamma_regularizer": regularizers.L2(0.01),
"beta_regularizer": regularizers.L2(0.01),
},
input_shape=(3, 4, 32),
expected_output_shape=(3, 4, 32),
expected_num_trainable_weights=2,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=2,
supports_masking=True,
)
self.run_layer_test(
layers.GroupNormalization,
init_kwargs={
"groups": 4,
"gamma_constraint": constraints.UnitNorm(),
"beta_constraint": constraints.UnitNorm(),
},
input_shape=(3, 4, 4),
expected_output_shape=(3, 4, 4),
expected_num_trainable_weights=2,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
)
def test_undefined_dim_error(self):
inputs = layers.Input(shape=(2, 2, 2, None))
layer = layers.GroupNormalization()
with self.assertRaisesRegex(
ValueError,
(
"input tensor should have a defined dimension but the layer "
"received an input with shape"
),
):
_ = layer(inputs)
def test_groups_bigger_than_dim_error(self):
inputs = np.ones(shape=(2, 2, 2, 4))
layer = layers.GroupNormalization(groups=5)
with self.assertRaisesRegex(
ValueError,
"cannot be more than the number of channels",
):
_ = layer(inputs)
def test_groups_not_a_multiple_of_dim_error(self):
inputs = np.ones(shape=(2, 2, 2, 4))
layer = layers.GroupNormalization(groups=3)
with self.assertRaisesRegex(
ValueError,
"must be a multiple of the number of channels",
):
_ = layer(inputs)
def test_groups_instance_norm(self):
# GroupNormalization with groups=-1 will become InstanceNormalization
instance_norm_layer_1 = layers.GroupNormalization(
groups=-1, axis=-1, scale=False, center=False
)
instance_norm_layer_2 = layers.GroupNormalization(
groups=4, axis=-1, scale=False, center=False
)
inputs = np.array([[[-1.0, 1.0, 0, 2.0], [1.0, 3.0, -4, -2.0]]])
outputs_1 = instance_norm_layer_1(inputs)
outputs_2 = instance_norm_layer_2(inputs)
self.assertAllClose(outputs_1, outputs_2)
def test_correctness_instance_norm(self):
instance_norm_layer = layers.GroupNormalization(
groups=4, axis=-1, scale=False, center=False
)
inputs = np.array([[[-1.0, 1.0, 0, 2.0], [1.0, 3.0, -4, -2.0]]])
expected_instance_norm_output = np.array(
[[[-1.0, -1.0, 1.0, 1.0], [1.0, 1.0, -1.0, -1.0]]]
)
self.assertAllClose(
instance_norm_layer(inputs),
expected_instance_norm_output,
atol=1e-3,
)
def test_correctness_1d(self):
layer_with_1_group = layers.GroupNormalization(
groups=1, axis=-1, scale=False, center=False
)
layer_with_2_groups = layers.GroupNormalization(
groups=2, axis=1, scale=False, center=False
)
inputs = np.array([[-1.0, -1.0, 1.0, 1.0, 2.0, 2.0, 0, -2.0]])
expected_output_1_group = np.array(
[[-0.898, -0.898, 0.539, 0.539, 1.257, 1.257, -0.180, -1.616]],
)
self.assertAllClose(
layer_with_1_group(inputs),
expected_output_1_group,
atol=1e-3,
)
expected_output_2_groups = np.array(
[[-1.0, -1.0, 1.0, 1.0, 0.904, 0.904, -0.301, -1.507]]
)
self.assertAllClose(
layer_with_2_groups(inputs),
expected_output_2_groups,
atol=1e-3,
)
def test_correctness_2d(self):
layer_with_1_group = layers.GroupNormalization(
groups=1, axis=-1, scale=False, center=False
)
layer_with_2_groups = layers.GroupNormalization(
groups=2, axis=2, scale=False, center=False
)
inputs = np.array([[[-1.0, -1.0, 2.0, 2.0], [1.0, 1.0, 0, -2.0]]])
expected_output_1_group = np.array(
[[[-0.898, -0.898, 1.257, 1.257], [0.539, 0.539, -0.180, -1.616]]]
)
self.assertAllClose(
layer_with_1_group(inputs),
expected_output_1_group,
atol=1e-3,
)
expected_output_2_groups = np.array(
[[[-1.0, -1.0, 0.904, 0.904], [1.0, 1.0, -0.301, -1.507]]]
)
self.assertAllClose(
layer_with_2_groups(inputs),
expected_output_2_groups,
atol=1e-3,
)
def test_broadcasting_2d_channels_first(self):
x = np.arange(16).reshape((1, 4, 2, 2)).astype("float32")
x = layers.GroupNormalization(groups=2, axis=1)(x)
self.assertAllClose(
x,
np.array(
[
[
[[-1.5274, -1.0910], [-0.6546, -0.2182]],
[[0.2182, 0.6546], [1.0910, 1.5274]],
[[-1.5274, -1.0910], [-0.6546, -0.2182]],
[[0.2182, 0.6546], [1.0910, 1.5274]],
]
]
),
atol=1e-3,
)
| GroupNormalizationTest |
python | doocs__leetcode | solution/1400-1499/1411.Number of Ways to Paint N × 3 Grid/Solution.py | {
"start": 0,
"end": 272
} | class ____:
def numOfWays(self, n: int) -> int:
mod = 10**9 + 7
f0 = f1 = 6
for _ in range(n - 1):
g0 = (3 * f0 + 2 * f1) % mod
g1 = (2 * f0 + 2 * f1) % mod
f0, f1 = g0, g1
return (f0 + f1) % mod
| Solution |
python | scipy__scipy | benchmarks/benchmarks/go_benchmark_functions/go_funcs_E.py | {
"start": 4887,
"end": 6324
} | class ____(Benchmark):
r"""
Egg Holder [1]_ objective function.
This class defines the Egg Holder global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{EggHolder}}=\sum_{1}^{n - 1}\left[-\left(x_{i + 1}
+ 47 \right ) \sin\sqrt{\lvert x_{i+1} + x_i/2 + 47 \rvert}
- x_i \sin\sqrt{\lvert x_i - (x_{i + 1} + 47)\rvert}\right ]
Here, :math:`n` represents the number of dimensions and :math:`x_i \in
[-512, 512]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = -959.640662711` for
:math:`{x} = [512, 404.2319]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
TODO: Jamil is missing a minus sign on the fglob value
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-512.1] * self.N,
[512.0] * self.N))
self.global_optimum = [[512.0, 404.2319]]
self.fglob = -959.640662711
def fun(self, x, *args):
self.nfev += 1
vec = (-(x[1:] + 47) * sin(sqrt(abs(x[1:] + x[:-1] / 2. + 47)))
- x[:-1] * sin(sqrt(abs(x[:-1] - (x[1:] + 47)))))
return sum(vec)
| EggHolder |
python | pyqtgraph__pyqtgraph | pyqtgraph/dockarea/Container.py | {
"start": 5017,
"end": 5881
} | class ____(SplitContainer):
def __init__(self, area):
SplitContainer.__init__(self, area, QtCore.Qt.Orientation.Horizontal)
def type(self):
return 'horizontal'
def updateStretch(self):
##Set the stretch values for this container to reflect its contents
#print "updateStretch", self
x = 0
y = 0
sizes = []
for i in range(self.count()):
wx, wy = self.widget(i).stretch()
x += wx
y = max(y, wy)
sizes.append(wx)
#print " child", self.widget(i), wx, wy
self.setStretch(x, y)
#print sizes
tot = float(sum(sizes))
if tot == 0:
scale = 1.0
else:
scale = self.width() / tot
self.setSizes([int(s*scale) for s in sizes])
| HContainer |
python | pytorch__pytorch | torch/testing/_internal/distributed/distributed_test.py | {
"start": 9129,
"end": 9466
} | class ____(nn.Module):
def __init__(self) -> None:
super().__init__()
self.a = nn.Linear(10, 10, bias=False)
self.b = nn.Linear(10, 10, bias=False)
self.c = nn.Linear(5, 5, bias=False)
def forward(self, x):
a = self.a(x)
b = self.b(x)
return (a, b)
| UnusedParamTwoLinLayerNet |
python | langchain-ai__langchain | libs/core/langchain_core/runnables/utils.py | {
"start": 13238,
"end": 14792
} | class ____(dict[str, Any]):
"""Dictionary that can be added to another dictionary."""
def __add__(self, other: AddableDict) -> AddableDict:
"""Add a dictionary to this dictionary.
Args:
other: The other dictionary to add.
Returns:
A dictionary that is the result of adding the two dictionaries.
"""
chunk = AddableDict(self)
for key in other:
if key not in chunk or chunk[key] is None:
chunk[key] = other[key]
elif other[key] is not None:
try:
added = chunk[key] + other[key]
except TypeError:
added = other[key]
chunk[key] = added
return chunk
def __radd__(self, other: AddableDict) -> AddableDict:
"""Add this dictionary to another dictionary.
Args:
other: The other dictionary to be added to.
Returns:
A dictionary that is the result of adding the two dictionaries.
"""
chunk = AddableDict(other)
for key in self:
if key not in chunk or chunk[key] is None:
chunk[key] = self[key]
elif self[key] is not None:
try:
added = chunk[key] + self[key]
except TypeError:
added = self[key]
chunk[key] = added
return chunk
_T_co = TypeVar("_T_co", covariant=True)
_T_contra = TypeVar("_T_contra", contravariant=True)
| AddableDict |
python | Netflix__metaflow | metaflow/user_configs/config_options.py | {
"start": 18602,
"end": 22435
} | class ____(click.Path):
# Small wrapper around click.Path to set the value from which to read configuration
# values. This is set immediately upon processing the --local-config-file
# option and will therefore then be available when processing any of the other
# --config options (which will call ConfigInput.process_configs)
name = "LocalFileInput"
def convert(self, value, param, ctx):
v = super().convert(value, param, ctx)
ConfigInput.set_config_file(value)
return v
def __str__(self):
return repr(self)
def __repr__(self):
return "LocalFileInput"
def config_options_with_config_input(cmd):
help_strs = []
required_names = []
defaults = {}
config_seen = set()
parsers = {}
flow_cls = getattr(current_flow, "flow_cls", None)
if flow_cls is None:
return cmd, None
parameters = [p for _, p in flow_cls._get_parameters() if p.IS_CONFIG_PARAMETER]
# List all the configuration options
for arg in parameters[::-1]:
kwargs = arg.option_kwargs(False)
if arg.name in config_seen:
msg = (
"Multiple configurations use the same name '%s'. Please change the "
"names of some of your configurations" % arg.name
)
raise MetaflowException(msg)
config_seen.add(arg.name)
if kwargs["required"]:
required_names.append(arg.name)
defaults[arg.name] = (
arg.kwargs.get("default", None),
arg._default_is_file,
)
help_strs.append(" - %s: %s" % (arg.name, kwargs.get("help", "")))
parsers[arg.name] = (arg.parser, arg.kwargs["plain"])
if not config_seen:
# No configurations -- don't add anything; we set it to False so that it
# can be checked whether or not we called this.
return cmd, False
help_str = (
"Configuration options for the flow. "
"Multiple configurations can be specified. Cannot be used with resume."
)
help_str = "\n\n".join([help_str] + help_strs)
config_input = ConfigInput(required_names, defaults, parsers)
cb_func = config_input.process_configs_click
cmd.params.insert(
0,
click.Option(
["--config-value", "config_value"],
nargs=2,
multiple=True,
type=MultipleTuple([click.Choice(config_seen), ConvertDictOrStr()]),
callback=cb_func,
help=help_str,
envvar="METAFLOW_FLOW_CONFIG_VALUE",
show_default=False,
default=[
(
k,
(
ConvertDictOrStr.mark_as_default(v[0])
if not callable(v[0]) and not v[1]
else None
),
)
for k, v in defaults.items()
],
required=False,
),
)
cmd.params.insert(
0,
click.Option(
["--config", "config"],
nargs=2,
multiple=True,
type=MultipleTuple([click.Choice(config_seen), ConvertPath()]),
callback=cb_func,
help=help_str,
envvar="METAFLOW_FLOW_CONFIG",
show_default=False,
default=[
(
k,
(
ConvertPath.mark_as_default(v[0])
if not callable(v[0]) and v[1]
else None
),
)
for k, v in defaults.items()
],
required=False,
),
)
return cmd, config_input
def config_options(cmd):
cmd, _ = config_options_with_config_input(cmd)
return cmd
| LocalFileInput |
python | apache__airflow | providers/mysql/tests/unit/mysql/transfers/test_presto_to_mysql.py | {
"start": 1044,
"end": 2733
} | class ____:
def setup_method(self):
self.kwargs = dict(
sql="sql",
mysql_table="mysql_table",
task_id="test_presto_to_mysql_transfer",
)
args = {"owner": "airflow", "start_date": DEFAULT_DATE}
self.dag = DAG("test_presto_to_mysql_transfer", schedule=None, default_args=args)
@patch("airflow.providers.mysql.transfers.presto_to_mysql.MySqlHook")
@patch("airflow.providers.mysql.transfers.presto_to_mysql.PrestoHook")
def test_execute(self, mock_presto_hook, mock_mysql_hook):
PrestoToMySqlOperator(**self.kwargs).execute(context={})
mock_presto_hook.return_value.get_records.assert_called_once_with(self.kwargs["sql"])
mock_mysql_hook.return_value.insert_rows.assert_called_once_with(
table=self.kwargs["mysql_table"], rows=mock_presto_hook.return_value.get_records.return_value
)
@patch("airflow.providers.mysql.transfers.presto_to_mysql.MySqlHook")
@patch("airflow.providers.mysql.transfers.presto_to_mysql.PrestoHook")
def test_execute_with_mysql_preoperator(self, mock_presto_hook, mock_mysql_hook):
self.kwargs.update(dict(mysql_preoperator="mysql_preoperator"))
PrestoToMySqlOperator(**self.kwargs).execute(context={})
mock_presto_hook.return_value.get_records.assert_called_once_with(self.kwargs["sql"])
mock_mysql_hook.return_value.run.assert_called_once_with(self.kwargs["mysql_preoperator"])
mock_mysql_hook.return_value.insert_rows.assert_called_once_with(
table=self.kwargs["mysql_table"], rows=mock_presto_hook.return_value.get_records.return_value
)
| TestPrestoToMySqlTransfer |
python | ray-project__ray | rllib/algorithms/dqn/dqn_learner.py | {
"start": 1428,
"end": 4654
} | class ____(Learner):
@OverrideToImplementCustomLogic_CallToSuperRecommended
@override(Learner)
def build(self) -> None:
super().build()
# Make target networks.
self.module.foreach_module(
lambda mid, mod: (
mod.make_target_networks()
if isinstance(mod, TargetNetworkAPI)
else None
)
)
# Prepend the "add-NEXT_OBS-from-episodes-to-train-batch" connector piece (right
# after the corresponding "add-OBS-..." default piece).
self._learner_connector.insert_after(
AddObservationsFromEpisodesToBatch,
AddNextObservationsFromEpisodesToTrainBatch(),
)
@override(Learner)
def add_module(
self,
*,
module_id: ModuleID,
module_spec: RLModuleSpec,
config_overrides: Optional[Dict] = None,
new_should_module_be_updated: Optional[ShouldModuleBeUpdatedFn] = None,
) -> MultiRLModuleSpec:
marl_spec = super().add_module(
module_id=module_id,
module_spec=module_spec,
config_overrides=config_overrides,
new_should_module_be_updated=new_should_module_be_updated,
)
# Create target networks for added Module, if applicable.
if isinstance(self.module[module_id].unwrapped(), TargetNetworkAPI):
self.module[module_id].unwrapped().make_target_networks()
return marl_spec
@override(Learner)
def after_gradient_based_update(self, *, timesteps: Dict[str, Any]) -> None:
"""Updates the target Q Networks."""
super().after_gradient_based_update(timesteps=timesteps)
timestep = timesteps.get(NUM_ENV_STEPS_SAMPLED_LIFETIME, 0)
# TODO (sven): Maybe we should have a `after_gradient_based_update`
# method per module?
for module_id, module in self.module._rl_modules.items():
config = self.config.get_config_for_module(module_id)
last_update_ts_key = (module_id, LAST_TARGET_UPDATE_TS)
if timestep - self.metrics.peek(
last_update_ts_key, default=0
) >= config.target_network_update_freq and isinstance(
module.unwrapped(), TargetNetworkAPI
):
for (
main_net,
target_net,
) in module.unwrapped().get_target_network_pairs():
update_target_network(
main_net=main_net,
target_net=target_net,
tau=config.tau,
)
# Increase lifetime target network update counter by one.
self.metrics.log_value((module_id, NUM_TARGET_UPDATES), 1, reduce="sum")
# Update the (single-value -> window=1) last updated timestep metric.
self.metrics.log_value(last_update_ts_key, timestep, window=1)
@classmethod
@override(Learner)
def rl_module_required_apis(cls) -> list[type]:
# In order for a PPOLearner to update an RLModule, it must implement the
# following APIs:
return [QNetAPI, TargetNetworkAPI]
| DQNLearner |
python | astropy__astropy | astropy/utils/masked/tests/test_functions.py | {
"start": 14497,
"end": 14855
} | class ____(MaskedUfuncTests, LongitudeSetup):
def test_ufunc_inplace_quantity_initial(self):
out = Masked(np.zeros(self.ma.shape) << u.m)
result = np.add(self.ma, self.mb, out=out)
assert result is out
expected = np.add(self.ma, self.mb).view(Quantity)
assert_masked_equal(result, expected)
| TestMaskedLongitudeUfuncs |
python | pytorch__pytorch | torch/ao/quantization/experimental/observer.py | {
"start": 475,
"end": 5944
} | class ____(ObserverBase):
b: int
k: int
n: int
min_val: torch.Tensor
max_val: torch.Tensor
def __init__(self, b, k, dtype=torch.quint8) -> None:
super().__init__(dtype)
self.b = b
self.k = k
self.min_val = torch.tensor([])
self.max_val = torch.tensor([])
# min_val and max_val are optional args to override
# the min_val and max_val observed by forward
def calculate_qparams(self, signed): # type:ignore[override]
return self._calculate_qparams(signed, self.min_val, self.max_val)
r""" Calculates nonuniform quantization parameters according to APoT paper:
https://arxiv.org/pdf/1909.13144.pdf.
Arg:
signed: specifies whether to include signed values in quantization level calculations
min_val: optional arg that can override min_val internal attribute
max_val: optional arg that can override max_val internal attribute
Returns:
alpha: alpha quantization parameter, max of abs value of observed values
gamma: gamma quantization parameter, defined to ensure that alpha is the maximum of the range
quantization_levels: non-uniform quantization levels (fp representation)
level_indices: int representation of quantization_levels indices
"""
def _calculate_qparams(self, signed: bool, min_val=None, max_val=None):
if min_val is not None:
self.min_val = min_val
if max_val is not None:
self.max_val = max_val
# compute alpha
alpha = torch.max(-self.min_val, self.max_val)
# check for valid inputs of b, k
if not self.k or self.k == 0:
raise AssertionError(f"k must be a non-zero integer, got k={self.k}")
if self.b % self.k != 0:
raise AssertionError(
f"b must be divisible by k, got b={self.b}, k={self.k}"
)
# compute n and store as member variable
self.n = self.b // self.k
# store a tensor of subtensors (all levels)
p_all = []
# create levels
for i in range(self.n):
p_curr = torch.tensor([0])
for j in range((2**self.k - 2) + 1):
curr_ele = 2 ** (-(i + j * self.n))
p_append = torch.tensor([curr_ele])
p_curr = torch.cat((p_curr, p_append))
# introduce signed numbers
if signed:
p_curr = torch.cat((p_curr, torch.tensor([-curr_ele])))
if signed:
# sort tensor in reverse order before adding to list if signed
sorted, _indices = torch.sort(p_curr, descending=True)
p_all.append(sorted)
else:
p_all.append(p_curr)
# gamma calculation:
# loop through all tensors
# if signed, add element at index 0 for each tensor
# else, add element at index 1 for each tensor
# gamma defined to ensure alpha is at max of range
p_sum = 0.0
for tens in p_all:
if signed:
p_sum += float(tens[0])
else:
p_sum += float(tens[1])
# assign gamma
gamma = alpha / p_sum
# calculate cartesian product
cartesian_product = list(itertools.product(*p_all))
quantization_levels_list = []
# calculate sum of each row
for row in cartesian_product:
sum = 0.0
for ele in row:
sum += ele
quantization_levels_list.append(sum)
quantization_levels_gamma = [
float(gamma) * ele for ele in quantization_levels_list
]
quantization_levels = torch.tensor(quantization_levels_gamma)
level_indices = torch.tensor([])
quantization_levels, level_indices = quantization_levels.sort()
return (alpha, gamma, quantization_levels, level_indices)
r"""Records the running minimum and maximum of ``x``.
Args:
x_orig: Tensor to be observed for min and max val"""
def forward(self, x_orig):
if x_orig.numel() == 0:
return x_orig
x = x_orig.detach()
min_val, max_val = torch.aminmax(x)
if self.min_val.numel():
min_val = torch.min(min_val, self.min_val)
if self.max_val.numel():
max_val = torch.max(max_val, self.max_val)
self.min_val = min_val
self.max_val = max_val
return x_orig
r"""Displays visualization of APoT quantization levels
Args:
observer: APoTObserver to calculate qparams
signed: bool to indicate if qparams should be signed/unsigned
"""
def quant_levels_visualization(self, signed=False):
# matplotlib is optional dep
import matplotlib.pyplot as plt
alpha, _gamma, quantization_levels, level_indices = self.calculate_qparams(
signed
)
xs = [float(x) / 1000.0 for x in range(1000)]
ys = [
apot_to_float(
float_to_apot(x, quantization_levels, level_indices, alpha),
quantization_levels,
level_indices,
).item()
for x in xs
]
plt.figure(figsize=(15, 10))
plt.plot(xs, ys)
plt.title("APoT Quantization Plot")
plt.xlabel("Full Precision")
plt.ylabel("Quantized")
plt.show()
| APoTObserver |
python | django-extensions__django-extensions | django_extensions/db/fields/__init__.py | {
"start": 16864,
"end": 20123
} | class ____:
"""
UUIDFieldMixin
By default uses UUID version 4 (randomly generated UUID).
The field support all uuid versions which are natively supported by the uuid python module, except version 2.
For more information see: https://docs.python.org/lib/module-uuid.html
""" # noqa: E501
DEFAULT_MAX_LENGTH = 36
def __init__(
self,
verbose_name=None,
name=None,
auto=True,
version=4,
node=None,
clock_seq=None,
namespace=None,
uuid_name=None,
*args,
**kwargs,
):
if not HAS_UUID:
raise ImproperlyConfigured(
"'uuid' module is required for UUIDField. "
"(Do you have Python 2.5 or higher installed ?)"
)
kwargs.setdefault("max_length", self.DEFAULT_MAX_LENGTH)
if auto:
self.empty_strings_allowed = False
kwargs["blank"] = True
kwargs.setdefault("editable", False)
self.auto = auto
self.version = version
self.node = node
self.clock_seq = clock_seq
self.namespace = namespace
self.uuid_name = uuid_name or name
super().__init__(verbose_name=verbose_name, *args, **kwargs)
def create_uuid(self):
if not self.version or self.version == 4:
return uuid.uuid4()
elif self.version == 1:
return uuid.uuid1(self.node, self.clock_seq)
elif self.version == 2:
raise UUIDVersionError("UUID version 2 is not supported.")
elif self.version == 3:
return uuid.uuid3(self.namespace, self.uuid_name)
elif self.version == 5:
return uuid.uuid5(self.namespace, self.uuid_name)
else:
raise UUIDVersionError("UUID version %s is not valid." % self.version)
def pre_save(self, model_instance, add):
value = super().pre_save(model_instance, add)
if self.auto and add and value is None:
value = force_str(self.create_uuid())
setattr(model_instance, self.attname, value)
return value
else:
if self.auto and not value:
value = force_str(self.create_uuid())
setattr(model_instance, self.attname, value)
return value
def formfield(self, form_class=None, choices_form_class=None, **kwargs):
if self.auto:
return None
return super().formfield(form_class, choices_form_class, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
if kwargs.get("max_length", None) == self.DEFAULT_MAX_LENGTH:
del kwargs["max_length"]
if self.auto is not True:
kwargs["auto"] = self.auto
if self.version != 4:
kwargs["version"] = self.version
if self.node is not None:
kwargs["node"] = self.node
if self.clock_seq is not None:
kwargs["clock_seq"] = self.clock_seq
if self.namespace is not None:
kwargs["namespace"] = self.namespace
if self.uuid_name is not None:
kwargs["uuid_name"] = self.name
return name, path, args, kwargs
| UUIDFieldMixin |
python | ray-project__ray | rllib/evaluation/episode_v2.py | {
"start": 794,
"end": 14949
} | class ____:
"""Tracks the current state of a (possibly multi-agent) episode."""
def __init__(
self,
env_id: EnvID,
policies: PolicyMap,
policy_mapping_fn: Callable[[AgentID, "EpisodeV2", "RolloutWorker"], PolicyID],
*,
worker: Optional["RolloutWorker"] = None,
callbacks: Optional["RLlibCallback"] = None,
):
"""Initializes an Episode instance.
Args:
env_id: The environment's ID in which this episode runs.
policies: The PolicyMap object (mapping PolicyIDs to Policy
objects) to use for determining, which policy is used for
which agent.
policy_mapping_fn: The mapping function mapping AgentIDs to
PolicyIDs.
worker: The RolloutWorker instance, in which this episode runs.
"""
# Unique id identifying this trajectory.
self.episode_id: int = random.randrange(int(1e18))
# ID of the environment this episode is tracking.
self.env_id = env_id
# Summed reward across all agents in this episode.
self.total_reward: float = 0.0
# Active (uncollected) # of env steps taken by this episode.
# Start from -1. After add_init_obs(), we will be at 0 step.
self.active_env_steps: int = -1
# Total # of env steps taken by this episode.
# Start from -1, After add_init_obs(), we will be at 0 step.
self.total_env_steps: int = -1
# Active (uncollected) agent steps.
self.active_agent_steps: int = 0
# Total # of steps take by all agents in this env.
self.total_agent_steps: int = 0
# Dict for user to add custom metrics.
# TODO (sven): We should probably unify custom_metrics, user_data,
# and hist_data into a single data container for user to track per-step.
# metrics and states.
self.custom_metrics: Dict[str, float] = {}
# Temporary storage. E.g. storing data in between two custom
# callbacks referring to the same episode.
self.user_data: Dict[str, Any] = {}
# Dict mapping str keys to List[float] for storage of
# per-timestep float data throughout the episode.
self.hist_data: Dict[str, List[float]] = {}
self.media: Dict[str, Any] = {}
self.worker = worker
self.callbacks = callbacks
self.policy_map: PolicyMap = policies
self.policy_mapping_fn: Callable[
[AgentID, "EpisodeV2", "RolloutWorker"], PolicyID
] = policy_mapping_fn
# Per-agent data collectors.
self._agent_to_policy: Dict[AgentID, PolicyID] = {}
self._agent_collectors: Dict[AgentID, AgentCollector] = {}
self._next_agent_index: int = 0
self._agent_to_index: Dict[AgentID, int] = {}
# Summed rewards broken down by agent.
self.agent_rewards: Dict[Tuple[AgentID, PolicyID], float] = defaultdict(float)
self._agent_reward_history: Dict[AgentID, List[int]] = defaultdict(list)
self._has_init_obs: Dict[AgentID, bool] = {}
self._last_terminateds: Dict[AgentID, bool] = {}
self._last_truncateds: Dict[AgentID, bool] = {}
# Keep last info dict around, in case an environment tries to signal
# us something.
self._last_infos: Dict[AgentID, Dict] = {}
def policy_for(
self, agent_id: AgentID = _DUMMY_AGENT_ID, refresh: bool = False
) -> PolicyID:
"""Returns and stores the policy ID for the specified agent.
If the agent is new, the policy mapping fn will be called to bind the
agent to a policy for the duration of the entire episode (even if the
policy_mapping_fn is changed in the meantime!).
Args:
agent_id: The agent ID to lookup the policy ID for.
Returns:
The policy ID for the specified agent.
"""
# Perform a new policy_mapping_fn lookup and bind AgentID for the
# duration of this episode to the returned PolicyID.
if agent_id not in self._agent_to_policy or refresh:
policy_id = self._agent_to_policy[agent_id] = self.policy_mapping_fn(
agent_id, # agent_id
self, # episode
worker=self.worker,
)
# Use already determined PolicyID.
else:
policy_id = self._agent_to_policy[agent_id]
# PolicyID not found in policy map -> Error.
if policy_id not in self.policy_map:
raise KeyError(
"policy_mapping_fn returned invalid policy id " f"'{policy_id}'!"
)
return policy_id
def get_agents(self) -> List[AgentID]:
"""Returns list of agent IDs that have appeared in this episode.
Returns:
The list of all agent IDs that have appeared so far in this
episode.
"""
return list(self._agent_to_index.keys())
def agent_index(self, agent_id: AgentID) -> int:
"""Get the index of an agent among its environment.
A new index will be created if an agent is seen for the first time.
Args:
agent_id: ID of an agent.
Returns:
The index of this agent.
"""
if agent_id not in self._agent_to_index:
self._agent_to_index[agent_id] = self._next_agent_index
self._next_agent_index += 1
return self._agent_to_index[agent_id]
def step(self) -> None:
"""Advance the episode forward by one step."""
self.active_env_steps += 1
self.total_env_steps += 1
def add_init_obs(
self,
*,
agent_id: AgentID,
init_obs: TensorType,
init_infos: Dict[str, TensorType],
t: int = -1,
) -> None:
"""Add initial env obs at the start of a new episode
Args:
agent_id: Agent ID.
init_obs: Initial observations.
init_infos: Initial infos dicts.
t: timestamp.
"""
policy = self.policy_map[self.policy_for(agent_id)]
# Add initial obs to Trajectory.
assert agent_id not in self._agent_collectors
self._agent_collectors[agent_id] = AgentCollector(
policy.view_requirements,
max_seq_len=policy.config["model"]["max_seq_len"],
disable_action_flattening=policy.config.get(
"_disable_action_flattening", False
),
is_policy_recurrent=policy.is_recurrent(),
intial_states=policy.get_initial_state(),
_enable_new_api_stack=False,
)
self._agent_collectors[agent_id].add_init_obs(
episode_id=self.episode_id,
agent_index=self.agent_index(agent_id),
env_id=self.env_id,
init_obs=init_obs,
init_infos=init_infos,
t=t,
)
self._has_init_obs[agent_id] = True
def add_action_reward_done_next_obs(
self,
agent_id: AgentID,
values: Dict[str, TensorType],
) -> None:
"""Add action, reward, info, and next_obs as a new step.
Args:
agent_id: Agent ID.
values: Dict of action, reward, info, and next_obs.
"""
# Make sure, agent already has some (at least init) data.
assert agent_id in self._agent_collectors
self.active_agent_steps += 1
self.total_agent_steps += 1
# Include the current agent id for multi-agent algorithms.
if agent_id != _DUMMY_AGENT_ID:
values["agent_id"] = agent_id
# Add action/reward/next-obs (and other data) to Trajectory.
self._agent_collectors[agent_id].add_action_reward_next_obs(values)
# Keep track of agent reward history.
reward = values[SampleBatch.REWARDS]
self.total_reward += reward
self.agent_rewards[(agent_id, self.policy_for(agent_id))] += reward
self._agent_reward_history[agent_id].append(reward)
# Keep track of last terminated info for agent.
if SampleBatch.TERMINATEDS in values:
self._last_terminateds[agent_id] = values[SampleBatch.TERMINATEDS]
# Keep track of last truncated info for agent.
if SampleBatch.TRUNCATEDS in values:
self._last_truncateds[agent_id] = values[SampleBatch.TRUNCATEDS]
# Keep track of last info dict if available.
if SampleBatch.INFOS in values:
self.set_last_info(agent_id, values[SampleBatch.INFOS])
def postprocess_episode(
self,
batch_builder: _PolicyCollectorGroup,
is_done: bool = False,
check_dones: bool = False,
) -> None:
"""Build and return currently collected training samples by policies.
Clear agent collector states if this episode is done.
Args:
batch_builder: _PolicyCollectorGroup for saving the collected per-agent
sample batches.
is_done: If this episode is done (terminated or truncated).
check_dones: Whether to make sure per-agent trajectories are actually done.
"""
# TODO: (sven) Once we implement multi-agent communication channels,
# we have to resolve the restriction of only sending other agent
# batches from the same policy to the postprocess methods.
# Build SampleBatches for the given episode.
pre_batches = {}
for agent_id, collector in self._agent_collectors.items():
# Build only if there is data and agent is part of given episode.
if collector.agent_steps == 0:
continue
pid = self.policy_for(agent_id)
policy = self.policy_map[pid]
pre_batch = collector.build_for_training(policy.view_requirements)
pre_batches[agent_id] = (pid, policy, pre_batch)
for agent_id, (pid, policy, pre_batch) in pre_batches.items():
# Entire episode is said to be done.
# Error if no DONE at end of this agent's trajectory.
if is_done and check_dones and not pre_batch.is_terminated_or_truncated():
raise ValueError(
"Episode {} terminated for all agents, but we still "
"don't have a last observation for agent {} (policy "
"{}). ".format(self.episode_id, agent_id, self.policy_for(agent_id))
+ "Please ensure that you include the last observations "
"of all live agents when setting done[__all__] to "
"True."
)
# Skip a trajectory's postprocessing (and thus using it for training),
# if its agent's info exists and contains the training_enabled=False
# setting (used by our PolicyClients).
if not self._last_infos.get(agent_id, {}).get("training_enabled", True):
continue
if (
not pre_batch.is_single_trajectory()
or len(np.unique(pre_batch[SampleBatch.EPS_ID])) > 1
):
raise ValueError(
"Batches sent to postprocessing must only contain steps "
"from a single trajectory.",
pre_batch,
)
if len(pre_batches) > 1:
other_batches = pre_batches.copy()
del other_batches[agent_id]
else:
other_batches = {}
# Call the Policy's Exploration's postprocess method.
post_batch = pre_batch
if getattr(policy, "exploration", None) is not None:
policy.exploration.postprocess_trajectory(
policy, post_batch, policy.get_session()
)
post_batch.set_get_interceptor(None)
post_batch = policy.postprocess_trajectory(post_batch, other_batches, self)
from ray.rllib.evaluation.rollout_worker import get_global_worker
self.callbacks.on_postprocess_trajectory(
worker=get_global_worker(),
episode=self,
agent_id=agent_id,
policy_id=pid,
policies=self.policy_map,
postprocessed_batch=post_batch,
original_batches=pre_batches,
)
# Append post_batch for return.
if pid not in batch_builder.policy_collectors:
batch_builder.policy_collectors[pid] = _PolicyCollector(policy)
batch_builder.policy_collectors[pid].add_postprocessed_batch_for_training(
post_batch, policy.view_requirements
)
batch_builder.agent_steps += self.active_agent_steps
batch_builder.env_steps += self.active_env_steps
# AgentCollector cleared.
self.active_agent_steps = 0
self.active_env_steps = 0
def has_init_obs(self, agent_id: AgentID = None) -> bool:
"""Returns whether this episode has initial obs for an agent.
If agent_id is None, return whether we have received any initial obs,
in other words, whether this episode is completely fresh.
"""
if agent_id is not None:
return agent_id in self._has_init_obs and self._has_init_obs[agent_id]
else:
return any(list(self._has_init_obs.values()))
def is_done(self, agent_id: AgentID) -> bool:
return self.is_terminated(agent_id) or self.is_truncated(agent_id)
def is_terminated(self, agent_id: AgentID) -> bool:
return self._last_terminateds.get(agent_id, False)
def is_truncated(self, agent_id: AgentID) -> bool:
return self._last_truncateds.get(agent_id, False)
def set_last_info(self, agent_id: AgentID, info: Dict):
self._last_infos[agent_id] = info
def last_info_for(
self, agent_id: AgentID = _DUMMY_AGENT_ID
) -> Optional[EnvInfoDict]:
return self._last_infos.get(agent_id)
@property
def length(self):
return self.total_env_steps
| EpisodeV2 |
python | apache__airflow | airflow-core/src/airflow/secrets/environment_variables.py | {
"start": 1026,
"end": 1542
} | class ____(BaseSecretsBackend):
"""Retrieves Connection object and Variable from environment variable."""
def get_conn_value(self, conn_id: str) -> str | None:
return os.environ.get(CONN_ENV_PREFIX + conn_id.upper())
def get_variable(self, key: str) -> str | None:
"""
Get Airflow Variable from Environment Variable.
:param key: Variable Key
:return: Variable Value
"""
return os.environ.get(VAR_ENV_PREFIX + key.upper())
| EnvironmentVariablesBackend |
python | tensorflow__tensorflow | tensorflow/python/framework/ops_test.py | {
"start": 122602,
"end": 128648
} | class ____(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testBasic(self):
a = constant_op.constant([2.0], name="a")
with ops.colocate_with(a.op):
b = constant_op.constant(3.0)
c = constant_op.constant(4.0)
self.assertEqual([b"loc:@a"], a.op.colocation_groups())
self.assertEqual([b"loc:@a"], b.op.colocation_groups())
with self.assertRaises(ValueError):
c.op.get_attr("_class")
@test_util.run_deprecated_v1
def testBasicColocationMetadata(self):
const_two = constant_op.constant([2.0], name="two")
with ops.colocate_with(const_two.op):
const_three = constant_op.constant(3.0, name="three")
locations_dict = const_three.op._colocation_dict
self.assertIn("two", locations_dict)
metadata = locations_dict["two"]
self.assertIsNone(metadata.obj)
# Check that this test's filename is recorded as the file containing the
# colocation statement.
self.assertEqual("ops_test.py", os.path.basename(metadata.filename))
@test_util.run_deprecated_v1
def testColocationDeviceInteraction(self):
with ops.device("/cpu:0"):
with ops.device("/device:GPU:0"):
a = constant_op.constant([2.0], name="a")
with ops.colocate_with(a.op):
# 'b' is created in the scope of /cpu:0, but it is
# colocated with 'a', which is on '/device:GPU:0'. colocate_with
# overrides devices because it is a stronger constraint.
b = constant_op.constant(3.0)
self.assertEqual([b"loc:@a"], b.op.colocation_groups())
self.assertEqual(a.op.device, b.op.device)
@test_util.run_deprecated_v1
def testColocationCanonicalization(self):
with ops.device("/device:GPU:0"):
_ = constant_op.constant(2.0)
with ops.device(lambda op: "/device:GPU:0"):
b = constant_op.constant(3.0)
with ops.get_default_graph().colocate_with(b):
with ops.device("/device:GPU:0"):
c = constant_op.constant(4.0)
# A's device will be /device:GPU:0
# B's device will be /device:GPU:0
# C's device will be /device:GPU:0 because it
# inherits B's device name, after canonicalizing the names.
self.assertEqual(b.op.device, c.op.device)
@test_util.run_deprecated_v1
def testLocationOverrides(self):
with ops.device("/cpu:0"):
with ops.device("/device:GPU:0"):
a = constant_op.constant([2.0], name="a")
# Note that this colocation is "redundant", since we are
# within the scope of "/device:GPU:0". However, we would like to
# preserve in the GraphDef that these two ops should be
# colocated in a portable way.
with ops.colocate_with(a.op):
b = constant_op.constant(3.0)
c = constant_op.constant(4.0)
d = constant_op.constant(5.0)
self.assertEqual([b"loc:@a"], b.op.colocation_groups())
self.assertEqual("/device:GPU:0", a.op.device)
self.assertEqual(a.op.device, b.op.device)
# Test that device function stack is restored.
self.assertEqual("/device:GPU:0", c.op.device)
self.assertEqual("/device:CPU:0", d.op.device)
@test_util.run_deprecated_v1
def testNestedColocateWith(self):
a = constant_op.constant([2.0], name="a")
with ops.colocate_with(a.op):
b = constant_op.constant(3.0)
with ops.colocate_with(b.op):
c = constant_op.constant(4.0)
self.assertEqual([b"loc:@a"], b.op.colocation_groups())
self.assertEqual([b"loc:@a"], c.op.colocation_groups())
@test_util.run_deprecated_v1
def testMultiColocationGroups(self):
a = constant_op.constant([2.0], name="a")
b = constant_op.constant(3.0, name="b")
with ops.colocate_with(a.op):
with ops.colocate_with(b.op):
c = constant_op.constant(4.0)
self.assertEqual(set([b"loc:@a", b"loc:@b"]), set(c.op.colocation_groups()))
@test_util.run_deprecated_v1
def testColocationIgnoreStack(self):
a = constant_op.constant([2.0], name="a")
b = constant_op.constant(3.0, name="b")
with ops.colocate_with(a.op):
with ops.colocate_with(b.op, ignore_existing=True):
c = constant_op.constant(4.0)
self.assertEqual(set([b"loc:@b"]), set(c.op.colocation_groups()))
@test_util.run_deprecated_v1
def testColocateWithReset(self):
a = constant_op.constant([2.0], name="a")
with ops.colocate_with(a.op):
b = constant_op.constant(3.0, name="b")
with ops.colocate_with(None, ignore_existing=True):
c = constant_op.constant(4.0, name="c")
self.assertEqual([b"loc:@a"], b.op.colocation_groups())
self.assertEqual([b"loc:@c"], c.op.colocation_groups())
@test_util.run_deprecated_v1
def testColocateWithInitialNoneThenNested(self):
a = constant_op.constant([2.0], name="a")
with ops.colocate_with(a.op):
with ops.colocate_with(None, ignore_existing=True):
b = constant_op.constant(3.0, name="b")
with ops.colocate_with(b.op):
c = constant_op.constant(4.0, name="c")
self.assertEqual([b"loc:@b"], b.op.colocation_groups())
self.assertEqual([b"loc:@b"], c.op.colocation_groups())
@test_util.run_deprecated_v1
def testColocateVariables(self):
a = variables.Variable([2.0], name="a")
with ops.colocate_with(a.op):
b = variables.Variable([3.0], name="b")
self.assertEqual([b"loc:@a"], b.op.colocation_groups())
@test_util.run_deprecated_v1
def testColocateResourceVariablesInFunction(self):
with ops.device("/device:CPU:0"):
a = resource_variable_ops.ResourceVariable(1.0)
@def_function.function
def f():
with ops.colocate_with(a):
b = array_ops.ones([], name="output")
self.assertEqual("/device:CPU:0", b.op.device)
f()
def testColocateWithVariableInFunction(self):
v = variables.Variable(1.)
@def_function.function
def f():
with ops.colocate_with(v):
return array_ops.ones([], name="output")
f()
graph_def = f.get_concrete_function().graph.as_graph_def()
wrap_function.function_from_graph_def(graph_def, [], ["output"])
| ColocationGroupTest |
python | kubernetes-client__python | kubernetes/client/models/v1_csi_storage_capacity_list.py | {
"start": 383,
"end": 7115
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'items': 'list[V1CSIStorageCapacity]',
'kind': 'str',
'metadata': 'V1ListMeta'
}
attribute_map = {
'api_version': 'apiVersion',
'items': 'items',
'kind': 'kind',
'metadata': 'metadata'
}
def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
"""V1CSIStorageCapacityList - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._items = None
self._kind = None
self._metadata = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
self.items = items
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
@property
def api_version(self):
"""Gets the api_version of this V1CSIStorageCapacityList. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1CSIStorageCapacityList. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1CSIStorageCapacityList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1CSIStorageCapacityList. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def items(self):
"""Gets the items of this V1CSIStorageCapacityList. # noqa: E501
items is the list of CSIStorageCapacity objects. # noqa: E501
:return: The items of this V1CSIStorageCapacityList. # noqa: E501
:rtype: list[V1CSIStorageCapacity]
"""
return self._items
@items.setter
def items(self, items):
"""Sets the items of this V1CSIStorageCapacityList.
items is the list of CSIStorageCapacity objects. # noqa: E501
:param items: The items of this V1CSIStorageCapacityList. # noqa: E501
:type: list[V1CSIStorageCapacity]
"""
if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
self._items = items
@property
def kind(self):
"""Gets the kind of this V1CSIStorageCapacityList. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1CSIStorageCapacityList. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1CSIStorageCapacityList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1CSIStorageCapacityList. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1CSIStorageCapacityList. # noqa: E501
:return: The metadata of this V1CSIStorageCapacityList. # noqa: E501
:rtype: V1ListMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1CSIStorageCapacityList.
:param metadata: The metadata of this V1CSIStorageCapacityList. # noqa: E501
:type: V1ListMeta
"""
self._metadata = metadata
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1CSIStorageCapacityList):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1CSIStorageCapacityList):
return True
return self.to_dict() != other.to_dict()
| V1CSIStorageCapacityList |
python | pandas-dev__pandas | asv_bench/benchmarks/tslibs/tz_convert.py | {
"start": 522,
"end": 1509
} | class ____:
params = [
_sizes,
[x for x in _tzs if x is not None],
]
param_names = ["size", "tz"]
def setup(self, size, tz):
if size == 10**6 and tz is tzlocal_obj:
# tzlocal is cumbersomely slow, so skip to keep runtime in check
raise NotImplementedError
arr = np.random.randint(0, 10, size=size, dtype="i8")
self.i8data = arr
def time_tz_convert_from_utc(self, size, tz):
# effectively:
# dti = DatetimeIndex(self.i8data, tz=tz)
# dti.tz_localize(None)
if old_sig:
tz_convert_from_utc(self.i8data, timezone.utc, tz)
else:
tz_convert_from_utc(self.i8data, tz)
def time_tz_localize_to_utc(self, size, tz):
# effectively:
# dti = DatetimeIndex(self.i8data)
# dti.tz_localize(tz, ambiguous="NaT", nonexistent="NaT")
tz_localize_to_utc(self.i8data, tz, ambiguous="NaT", nonexistent="NaT")
| TimeTZConvert |
python | allegroai__clearml | clearml/hyperdatasets/data_view.py | {
"start": 6288,
"end": 47227
} | class ____:
_MAX_BATCH_SIZE = 10000
_DEFAULT_LOCAL_BATCH_SIZE = 500
def __init__(
self,
name=None,
description=None,
tags=None,
iteration_order="sequential",
iteration_infinite=False,
iteration_random_seed=None,
iteration_limit=None,
auto_connect_with_task=True,
):
"""
Instantiate a `DataView` wrapper around backend dataview resources.
The dataview aggregates query rules and iteration parameters. When running under a ClearML task it
can optionally auto-connect and restore previously attached definitions.
:param name: Optional dataview name
:param description: Optional descriptive text
:param tags: Optional list of tag strings
:param iteration_order: Iteration order (`sequential` or `random`)
:param iteration_infinite: Whether to iterate indefinitely
:param iteration_random_seed: Seed used for random iteration
:param iteration_limit: Explicit maximum number of frames to iterate (None means unlimited)
:param auto_connect_with_task: Auto-attach to the current ClearML task when True
"""
self._iteration_order = iteration_order
self._iteration_infinite = iteration_infinite
self._iteration_limit = iteration_limit if iteration_limit is None else int(iteration_limit)
# TODO: connect with task in remote execution
self._auto_connect_with_task = auto_connect_with_task
self._iteration_random_seed = iteration_random_seed
self._name = name
self._description = description
self._tags = tags
self._id = None
self._filter_rules: List[Any] = []
self._queries: List[HyperDatasetQuery] = []
self._count_cache = None
self._synthetic_epoch_limit = None
self._private_metadata = {}
self._force_remote_store = False
# If running remotely under a Task, try to attach using Task helpers
# Only do this when auto_connect_with_task is enabled to avoid recursion
try:
if running_remotely() and self._auto_connect_with_task:
task = Task.current_task()
if not task:
tid = get_remote_task_id()
if tid:
task = Task.get_task(task_id=tid)
if task:
self._connected_task = task
dv_map = task.get_dataviews() or {}
# If a name was provided, prefer a matching dataview by name
picked = None
if isinstance(dv_map, dict):
if self._name:
picked = dv_map.get(self._name)
else:
for _dv in dv_map.values():
picked = _dv
break
if picked:
try:
self._copy_from_other_dataview(picked)
self._force_remote_store = False
except Exception:
self._force_remote_store = True
else:
self._force_remote_store = True
except Exception:
pass
@property
def id(self):
"""
Return the backend identifier of the materialised DataView.
:return: DataView ID string or None when not yet created
"""
return self._id
@property
def name(self):
"""
Return the human-readable name assigned to this DataView.
:return: DataView name string or None
"""
return self._name
@name.setter
def name(self, value):
"""
Update the human-readable name associated with this DataView.
:param value: New DataView name string or None
"""
self._name = value
def get_queries(self):
"""Return current HyperDatasetQuery objects attached to this dataview."""
return list(self._queries)
def _mutation_allowed(self) -> bool:
try:
if running_remotely() and getattr(self, "_auto_connect_with_task", False):
dv_id = getattr(self, "_id", None)
if not dv_id:
return True
if not self._queries and not self._filter_rules:
return True
return False
except Exception:
pass
return True
def _build_filter_rule_from_query(self, query: "HyperDatasetQuery") -> Any:
return DataViewManagementBackend.create_filter_rule(
dataset=query.dataset_id,
label_rules=query.label_rules,
filter_by_roi=query.filter_by_roi,
frame_query=query.frame_query,
sources_query=query.source_query,
version=query.version_id,
weight=query.weight,
)
def _append_queries(self, queries: Sequence["HyperDatasetQuery"]) -> None:
if not queries:
return
filter_rules: List[Any] = []
for query in queries:
if not isinstance(query, HyperDatasetQuery):
raise ValueError("DataView expects HyperDatasetQuery instances")
filter_rules.append(self._build_filter_rule_from_query(query))
self._filter_rules.extend(filter_rules)
self._queries.extend(queries)
self._count_cache = None
self._synthetic_epoch_limit = None
self._resync_task_attachment()
if self._id:
result = DataViewManagementBackend.update_filter_rules(
dataview_id=self._id, filter_rules=self._filter_rules
)
if not result:
raise ValueError("Failed updating DataView {}".format(self._id))
self._resync_task_attachment()
def set_queries(self, queries: Optional[Iterable["HyperDatasetQuery"]]) -> None:
"""
Replace all existing queries with the supplied collection.
:param queries: Iterable of `HyperDatasetQuery` objects; pass None or an empty iterable to clear
"""
if not self._mutation_allowed():
return
normalized = list(queries) if queries is not None else []
self._filter_rules = []
self._queries = []
self._count_cache = None
self._synthetic_epoch_limit = None
if not normalized:
if self._id:
DataViewManagementBackend.update_filter_rules(
dataview_id=self._id, filter_rules=[]
)
self._resync_task_attachment()
return
self._append_queries(normalized)
def add_query(
self,
*,
project_id: str = "*",
dataset_id: str = "*",
version_id: str = "*",
source_query=None,
frame_query=None,
weight: Optional[float] = 1.0,
filter_by_roi=None,
label_rules=None,
) -> "HyperDatasetQuery":
"""
Construct and append a single `HyperDatasetQuery` without instantiating it externally.
:param project_id: Dataset collection identifier or wildcard
:param dataset_id: Dataset identifier or wildcard
:param version_id: Dataset version identifier
:param source_query: Lucene query applied to frame sources
:param frame_query: Lucene query applied to frame metadata
:param weight: Sampling weight when combining multiple queries
:param filter_by_roi: ROI filtering strategy name
:param label_rules: Optional label rule definitions for ROI filtering
:return: The created `HyperDatasetQuery` instance
"""
query = HyperDatasetQuery(
project_id=project_id,
dataset_id=dataset_id,
version_id=version_id,
source_query=source_query,
frame_query=frame_query,
weight=weight,
filter_by_roi=filter_by_roi,
label_rules=label_rules,
)
self.add_queries(query)
return query
def get_iteration_parameters(self):
"""
:return: The cached iteration configuration for this dataview.
"""
return {
"order": self._iteration_order,
"infinite": self._iteration_infinite,
"limit": self._iteration_limit,
"random_seed": self._iteration_random_seed,
}
def set_iteration_parameters(self, *, infinite=None, limit=_UNSET):
"""
Persist iteration settings both locally and on the backend if possible.
"""
updated = False
if infinite is not None:
self._iteration_infinite = bool(infinite)
updated = True
if limit is not _UNSET:
self._iteration_limit = int(limit) if limit is not None else None
updated = True
if not updated:
return
if self._id:
DataViewManagementBackend.update_iteration_parameters(
self._id,
infinite=self._iteration_infinite,
limit=self._iteration_limit,
order=self._iteration_order,
random_seed=self._iteration_random_seed,
)
def add_queries(self, queries: HyperDatasetQuery):
"""
Append one or more query rules to the dataview.
If the dataview already exists on the backend the remote filter rules are updated immediately and
the attached task is re-synchronised.
:param queries: A `HyperDatasetQuery` instance or iterable of instances to add
"""
if not self._mutation_allowed():
return
if isinstance(queries, HyperDatasetQuery):
normalized: Sequence[HyperDatasetQuery] = [queries]
else:
try:
normalized = list(queries)
except TypeError as exc:
raise ValueError("DataView.add_queries expects a query or an iterable of queries") from exc
self._append_queries(normalized)
def _ensure_created(self):
"""
Ensure a matching backend dataview resource exists.
The method lazily creates the dataview when first required, reusing the existing resource when it
already exists. Raises a `ValueError` if no concrete dataset/version pairs can be derived from the
configured queries.
:return: None
"""
if self._id:
# If running remotely and we already have an id, verify it exists server-side
try:
if running_remotely():
existing = DataViewManagementBackend.get_by_id(self._id)
if existing:
return
except Exception:
pass
# If not remote or fetch failed, assume id is valid and return
if not running_remotely():
return
# Build versions from queries; require at least one concrete (dataset, version)
versions = []
for q in self._queries:
ds = getattr(q, "dataset_id", None)
ver = getattr(q, "version_id", None)
if ds and ver and ds != "*" and ver != "*":
versions.append({"dataset": ds, "version": ver})
if not versions:
raise ValueError("Cannot create DataView: no concrete (dataset, version) provided in queries")
self._id = DataViewManagementBackend.create(
name=self._name,
description=self._description,
tags=self._tags,
infinite=self._iteration_infinite,
order=self._iteration_order,
random_seed=self._iteration_random_seed,
limit=self._iteration_limit,
versions=versions,
)
if self._filter_rules:
DataViewManagementBackend.update_filter_rules(
dataview_id=self._id, filter_rules=self._filter_rules
)
self._count_cache = None
self._resync_task_attachment()
def _store_attachment_on_task(self, *, force_remote: bool = False):
"""
Persist this dataview definition into the current Task using Task helpers.
"""
try:
is_remote = False
try:
is_remote = running_remotely()
except Exception:
is_remote = False
if is_remote and not force_remote:
return
task = None
if force_remote:
task = getattr(self, "_connected_task", None)
if not task:
tid = get_remote_task_id()
if tid:
task = Task.get_task(task_id=tid)
if not task:
task = Task.current_task()
if not task:
return
payload = self.id if (force_remote and self._id) else self
task.set_dataview(payload)
except Exception:
return
def _resync_task_attachment(self):
"""
Helper to store current dataview state on the Task when auto-connect is enabled.
"""
if self._auto_connect_with_task:
# On remote, avoid modifying the task silently
try:
if running_remotely() and not self._force_remote_store:
return
except Exception:
pass
self._store_attachment_on_task(force_remote=self._force_remote_store)
def _calculate_synthetic_epoch_limit(self):
"""
Compute the synthetic epoch size when allow_repetition is enabled.
"""
if not self._id:
return None
queries = self.get_queries()
if len(queries) <= 1:
return None
weights = [float(q.weight) if q.weight is not None else 1.0 for q in queries]
if all(q.weight is None for q in queries) and not self._iteration_infinite:
return None
total, rule_counts = DataViewManagementBackend.get_count_details_for_id(self._id)
if total and not self._count_cache:
self._count_cache = int(total)
if not rule_counts:
return None
if len(rule_counts) < len(queries):
rule_counts.extend([0] * (len(queries) - len(rule_counts)))
positive = [(c, w) for c, w in zip(rule_counts, weights) if c > 0]
if not positive:
return None
sum_weights = sum(w for _, w in positive)
normalized = []
for count, weight in zip(rule_counts, weights):
if count <= 0:
normalized.append(0.0)
else:
normalized.append(weight / (sum_weights or 1.0))
max_count = max(rule_counts)
if max_count <= 0:
return None
largest_idx = next((i for i, count in enumerate(rule_counts) if count == max_count), 0)
weight_fraction = normalized[largest_idx]
if weight_fraction <= 0:
return None
return int(ceil(max_count / weight_fraction))
def _auto_connect_task(self):
"""
Ensure this DataView is connected to the current Task (locally or remotely).
In local runs, pushes the DataView state into the Task. In remote runs, also
attempts to pull from Task if already stored.
"""
try:
task = Task.current_task()
if not task and running_remotely():
tid = get_remote_task_id()
if tid:
task = Task.get_task(task_id=tid)
if task:
self._connected_task = task
# Try to reuse a dataview from the task. If none exists or creation fails
# (for example when no remote dataview is attached), fallback to a fresh
# instance without auto-connect.
try:
self._store_attachment_on_task()
except ValueError:
self._auto_connect_with_task = False
self._connected_task = None
return
except Exception:
pass
def _copy_from_other_dataview(self, other: "DataView") -> None:
"""
Copy internal state from another DataView instance.
"""
if not other:
return
self._id = getattr(other, "_id", self._id)
self._iteration_order = getattr(other, "_iteration_order", self._iteration_order)
self._iteration_infinite = getattr(other, "_iteration_infinite", self._iteration_infinite)
self._iteration_random_seed = getattr(other, "_iteration_random_seed", self._iteration_random_seed)
self._iteration_limit = getattr(other, "_iteration_limit", self._iteration_limit)
self._filter_rules = list(getattr(other, "_filter_rules", []))
self._queries = list(getattr(other, "_queries", []))
self._synthetic_epoch_limit = getattr(other, "_synthetic_epoch_limit", self._synthetic_epoch_limit)
self._private_metadata = dict(getattr(other, "_private_metadata", self._private_metadata) or {})
def get_iterator(
self,
projection=None,
query_cache_size=None,
query_queue_depth=5,
allow_repetition=False,
auto_synthetic_epoch_limit=None,
node_id=None,
worker_index=None,
num_workers=None,
cache_in_memory=False,
):
"""
Return an iterator configured to stream frames for this dataview.
:param projection: Optional projection list selecting frame fields
:param query_cache_size: Number of frames to request per backend batch
:param query_queue_depth: Queue depth used by the background fetcher
:param allow_repetition: Enable synthetic epoch length balancing across queries
:param auto_synthetic_epoch_limit: Legacy flag equivalent to `allow_repetition`
:param node_id: Explicit node identifier to send to the backend
:param worker_index: Worker index when splitting frames across multiple iterators
:param num_workers: Total number of cooperating workers
:param cache_in_memory: Reserved flag (currently unused)
:return: Iterator streaming `DataEntry`-derived objects
"""
if query_cache_size is None:
query_cache_size = self._MAX_BATCH_SIZE if running_remotely() else self._DEFAULT_LOCAL_BATCH_SIZE
# Lazily create dataview on first iteration
self._ensure_created()
synthetic_limit = None
enable_repetition = bool(allow_repetition or auto_synthetic_epoch_limit)
if enable_repetition:
synthetic_limit = self._calculate_synthetic_epoch_limit()
if synthetic_limit:
iteration_params = self.get_iteration_parameters()
current_limit = iteration_params.get("limit")
logger = logging.getLogger("DataView")
if not iteration_params.get("infinite") or (
current_limit and current_limit < synthetic_limit
):
logger.warning(
"DataView is finite without repetition, enabling repetition support infinite=True "
"and maximum_number_of_frames=%s",
synthetic_limit,
)
else:
logger.info(
"allow_repetition: Setting DataView iterator maximum_number_of_frames=%s",
synthetic_limit,
)
self.set_iteration_parameters(infinite=True, limit=synthetic_limit)
self._synthetic_epoch_limit = synthetic_limit
else:
self._synthetic_epoch_limit = None
else:
self._synthetic_epoch_limit = None
if num_workers is not None and worker_index is None and node_id is not None:
worker_index = node_id
if node_id is None:
try:
node_id = get_node_id()
except Exception:
node_id = None
iterator = DataView.Iterator(
dataview=self,
projection=list(projection) if projection else None,
query_cache_size=query_cache_size,
query_queue_depth=query_queue_depth,
synthetic_limit=synthetic_limit,
node_id=node_id,
worker_index=worker_index,
num_workers=num_workers,
cache_in_memory=cache_in_memory,
)
limit_value = getattr(iterator, "limit", None)
if enable_repetition:
self._synthetic_epoch_limit = limit_value
return iterator
def get_count(self) -> int:
"""
Fetch total frames count from backend and cache it.
Requires the dataview to be created. If backend is unavailable, returns 0.
"""
self._ensure_created()
if self._count_cache is not None:
return self._count_cache
total = DataViewManagementBackend.get_count_total_for_id(self._id)
self._count_cache = int(total or 0)
return self._count_cache
def __len__(self) -> int:
if self._iteration_limit is not None:
return int(self._iteration_limit)
if self._synthetic_epoch_limit is not None:
return int(self._synthetic_epoch_limit)
return self.get_count()
def prefetch_local_sources(
self,
num_workers: int = None,
wait: bool = True,
query_cache_size: int = None,
get_previews: bool = False,
get_masks: bool = True,
force_download: bool = False,
):
"""
Prefetch data entry sources (and optionally previews/masks) into the local cache.
- num_workers: number of worker threads (defaults to cpu count via ThreadPoolExecutor)
- wait: block until all prefetch tasks complete
- query_cache_size: data entries per backend fetch batch (defaults to iterator default)
- get_previews: also prefetch preview URIs if available
- get_masks: also prefetch mask URIs if available
- force_download: bypass local cache if True
"""
from concurrent.futures import ThreadPoolExecutor, as_completed
self._ensure_created()
it = self.get_iterator(query_cache_size=query_cache_size)
def _extract_uris(data_entry):
uris = []
sources = ["source"]
if get_previews:
sources += ["preview_source"]
if get_masks:
sources += ["mask_source"]
for source in sources:
for data_sub_entry in data_entry:
url = data_sub_entry.get_source(source)
if url:
uris.append(url)
return uris
# Prefetch using a thread pool
futures = []
with ThreadPoolExecutor(max_workers=num_workers) as pool:
for data_entry in it:
for uri in _extract_uris(data_entry):
futures.append(
pool.submit(
StorageManagerDiskSpaceFileSizeStrategy.get_local_copy,
uri,
None,
True,
None,
force_download,
)
)
if wait:
for f in as_completed(futures):
try:
_ = f.result()
except Exception:
continue
class Iterator:
def __init__(
self,
dataview=None,
projection=None,
query_cache_size=None,
query_queue_depth=None,
synthetic_limit=None,
node_id=None,
worker_index=None,
num_workers=None,
cache_in_memory=False,
):
"""
Initialise the iterator wrapper that pulls data_entries from the backend.
"""
self._dataview = dataview
self._projection = None
if projection:
try:
if any(p == "*" for p in projection):
self._projection = None
else:
self._projection = list(projection)
except Exception:
self._projection = None
self._query_cache_size = int(query_cache_size or DataView._DEFAULT_LOCAL_BATCH_SIZE)
self._query_queue_depth = int(query_queue_depth or 5)
capacity = max(1, self._query_queue_depth)
self._data_entries_queue: Queue = Queue(maxsize=capacity)
self._stop_event = threading.Event()
self._started = False
self._closed = False
self._error = None
self._fetch_thread = threading.Thread(target=self._fetcher_daemon, name="HDVFetcher", daemon=True)
self._logger = logging.getLogger("DataView")
self._base_limit = int(synthetic_limit) if synthetic_limit is not None else None
self._limit = self._base_limit
self._yielded = 0
self._produced = 0
self._dispatch_counter = 0
self._node_id = None
self._num_workers = None
self._worker_index = None
self._cache_in_memory = cache_in_memory
self._cache = []
self._full_cache = False
self._current_items = []
if node_id is not None:
self.set_node(node_id)
if worker_index is not None or num_workers is not None:
self.set_concurrency(worker_index=worker_index, num_workers=num_workers)
else:
# attempt automatic detection (no-op if single worker)
self.set_concurrency()
def __iter__(self):
"""
Return the iterator instance after ensuring the fetch thread is running.
"""
if self._cache_in_memory and self._full_cache:
return self._cache.__iter__()
self._yielded = 0
if not self._started or self._closed or getattr(self, "_eof_reached", False):
self._reset_fetch()
self._started = True
self._fetch_thread.start()
return self
def __next__(self):
"""
Fetch the next data-entry object, respecting synthetic epoch limits.
"""
if self._limit is not None and self._yielded >= self._limit:
self._stop_event.set()
self._closed = True
self._eof_reached = True
if self._error:
raise self._error
self._full_cache = True
raise StopIteration
if (
(self._closed or getattr(self, "_eof_reached", False))
and not self._current_items
and self._data_entries_queue.empty()
):
if self._error:
raise self._error
raise StopIteration
while True:
if self._error:
raise self._error
try:
if not self._current_items:
self._current_items = self._data_entries_queue.get(timeout=0.5)
item = self._current_items.pop()
self._yielded += 1
if self._limit is not None and self._yielded >= self._limit:
self._stop_event.set()
self._closed = True
self._eof_reached = True
if self._cache_in_memory:
self._cache.append(item)
return item
except queue.Empty:
if (
(self._closed or getattr(self, "_eof_reached", False))
or (not self._fetch_thread.is_alive() and self._data_entries_queue.empty())
):
if self._error:
raise self._error
self._full_cache = True
raise StopIteration
continue
def __len__(self):
"""
Return the planned length of the iterator, if available.
"""
if self._limit is not None:
return self._limit
try:
return int(self._dataview.get_count()) if self._dataview else 0
except Exception:
return 0
@property
def limit(self):
"""
Return the effective iteration limit for this iterator instance.
:return: Maximum number of frames to yield, or None
"""
return self._limit
@property
def node_id(self):
"""
Resolve the backend node identifier used for fetching frames.
:return: Node identifier integer or None
"""
return self._resolve_node_id()
def set_node(self, node_id=None):
"""
Force the iterator to use a specific node identifier for backend fetches.
"""
if self._started and getattr(self, "_fetch_thread", None) and self._fetch_thread.is_alive():
raise ValueError("Cannot change node id after iterator has started")
if node_id is None:
self._node_id = None
return
try:
self._node_id = int(node_id)
except Exception as exc:
raise ValueError("node_id must be convertible to int") from exc
def set_concurrency(self, worker_index=None, num_workers=None):
"""
Configure worker splitting so multiple iterators can share the same dataview.
"""
if self._started and getattr(self, "_fetch_thread", None) and self._fetch_thread.is_alive():
raise ValueError("set_concurrency must be called before the iterator starts")
resolved_workers = None
if num_workers is not None:
try:
resolved_workers = int(num_workers)
except Exception as exc:
raise ValueError("num_workers must be an integer") from exc
else:
try:
detected = get_node_count()
except Exception:
detected = None
if isinstance(detected, int) and detected > 1:
resolved_workers = detected
if resolved_workers is None or resolved_workers <= 1:
self._num_workers = None
self._worker_index = None
self._adjust_limit_for_concurrency()
return
if worker_index is None:
candidate = self._node_id
if candidate is None:
try:
candidate = get_node_id()
except Exception:
candidate = 0
worker_index = candidate
try:
resolved_index = int(worker_index)
except Exception as exc:
raise ValueError("worker_index must be an integer") from exc
if resolved_index < 0:
raise ValueError("worker_index must be non-negative")
resolved_index = resolved_index % resolved_workers
self._num_workers = resolved_workers
self._worker_index = resolved_index
self._adjust_limit_for_concurrency()
def _adjust_limit_for_concurrency(self):
"""
Recalculate the iterator limit after concurrency changes.
"""
if self._base_limit is None:
self._limit = None
return
if self._num_workers and self._num_workers > 1:
self._limit = int(ceil(self._base_limit / self._num_workers))
else:
self._limit = self._base_limit
def _resolve_node_id(self):
"""
Resolve and cache the node identifier used for requests.
"""
if self._node_id is None:
try:
self._node_id = get_node_id()
except Exception:
self._node_id = None
return self._node_id
def __del__(self):
try:
self._closed = True
if hasattr(self, "_stop_event"):
self._stop_event.set()
if (
getattr(self, "_started", False)
and getattr(self, "_fetch_thread", None)
and self._fetch_thread.is_alive()
):
self._fetch_thread.join(timeout=1)
except Exception:
pass
def _fetcher_daemon(self):
"""
Background thread that pulls frames from the backend and queues them for consumption.
"""
eof = False
scroll_id = None
last_scroll_id: Optional[str] = None
reset_scroll = False
force_scroll = False
try:
while not self._stop_event.is_set():
if self._limit is not None and self._produced >= self._limit:
# EOF reached: stop fetching
self._eof_reached = True
break
if eof:
if self._limit is not None and self._produced < self._limit:
# restart iteration while reusing the previous scroll id so the backend rebalances rules
if last_scroll_id:
scroll_id = last_scroll_id
force_scroll = True
reset_scroll = True
eof = False
self._dispatch_counter = 0
else:
self._eof_reached = True
break
resp = DataViewManagementBackend.get_next_data_entries(
dataview=self._dataview.id,
scroll_id=scroll_id,
batch_size=self._query_cache_size,
reset_scroll=reset_scroll or None,
force_scroll_id=True if force_scroll else None,
node=self._resolve_node_id(),
projection=self._projection,
)
reset_scroll = False
force_scroll = False
eof = bool(getattr(resp, "eof", False))
current_scroll_id = getattr(resp, "scroll_id", None)
if current_scroll_id:
last_scroll_id = current_scroll_id
scroll_id = current_scroll_id
frames = getattr(resp, "frames", []) or []
items = []
for frame in frames:
base_cls, resolved_cls = self._determine_entry_classes(frame)
if self._num_workers:
include = (self._dispatch_counter % self._num_workers) == self._worker_index
self._dispatch_counter += 1
if not include:
continue
else:
self._dispatch_counter += 1
if self._limit is not None and self._produced >= self._limit:
eof = True
self._eof_reached = True
break
item = frame
# Convert to desired classes if available
try:
if hasattr(base_cls, "from_api_object"):
item = base_cls.from_api_object(frame)
elif callable(base_cls):
item = base_cls(frame)
if (
resolved_cls
and issubclass(resolved_cls, DataEntry)
and isinstance(item, DataEntry)
and item.__class__ is not resolved_cls
):
try:
item.__class__ = resolved_cls
except TypeError:
self._logger.warning(
"Could not assign frame to class '%s'",
resolved_cls.__name__,
)
except Exception as ex:
self._logger.exception(
"Failed converting frame to %s: %s",
getattr(base_cls, "__name__", str(base_cls)),
ex,
)
item = frame
items.append(item)
if items:
items.reverse()
while not self._stop_event.is_set():
try:
self._data_entries_queue.put(items, timeout=0.5)
self._produced += len(items)
break
except queue.Full:
continue
except Exception as e:
self._error = e
finally:
# mark thread as finished
self._started = False
def _determine_entry_classes(self, frame):
"""Return a tuple of (base_class, resolved_class)."""
def _get(obj, key, default=None):
if isinstance(obj, dict):
return obj.get(key, default)
return getattr(obj, key, default)
meta = _get(frame, "meta")
resolved_cls = None
if isinstance(meta, dict):
class_path = meta.get(ENTRY_CLASS_KEY)
resolved_cls = _resolve_class(class_path, DataEntry)
if resolved_cls and not issubclass(resolved_cls, DataEntry):
resolved_cls = None
base_cls: type = DataEntry
if resolved_cls and issubclass(resolved_cls, DataEntryImage):
base_cls = DataEntryImage
elif resolved_cls:
base_cls = DataEntry
elif self._frame_is_image(frame):
base_cls = DataEntryImage
return base_cls, resolved_cls
@staticmethod
def _frame_is_image(frame) -> bool:
"""Return True when frame sources look like images."""
def _get(obj, key, default=None):
if isinstance(obj, dict):
return obj.get(key, default)
return getattr(obj, key, default)
sources = _get(frame, "sources") or []
if not isinstance(sources, (list, tuple)):
return False
for s in sources:
if _get(s, "width") is not None or _get(s, "height") is not None:
return True
p = _get(s, "preview")
if p and _get(p, "uri"):
return True
m = _get(s, "masks")
if m:
return True
return False
def _reset_fetch(self):
"""
Reset internal queues and counters so iteration can restart from the beginning.
"""
# reinitialize iteration state (restart from 0)
capacity = max(1, self._query_queue_depth)
self._data_entries_queue = Queue(maxsize=capacity)
self._stop_event = threading.Event()
self._closed = False
self._error = None
self._eof_reached = False
self._fetch_thread = threading.Thread(target=self._fetcher_daemon, name="HDVFetcher", daemon=True)
self._yielded = 0
self._produced = 0
self._dispatch_counter = 0
| DataView |
python | openai__openai-python | src/openai/lib/streaming/chat/_events.py | {
"start": 303,
"end": 437
} | class ____(BaseModel):
type: Literal["chunk"]
chunk: ChatCompletionChunk
snapshot: ParsedChatCompletionSnapshot
| ChunkEvent |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.