language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | tensorflow__tensorflow | tensorflow/compiler/tests/case_test.py | {
"start": 1104,
"end": 2719
} | class ____(xla_test.XLATestCase):
def testCaseBasic(self):
@def_function.function(jit_compile=True)
def switch_case_test(branch_index):
def f1():
return array_ops.constant(17)
def f2():
return array_ops.constant(31)
def f3():
return array_ops.constant(-1)
return control_flow_switch_case.switch_case(
branch_index, branch_fns={
0: f1,
1: f2
}, default=f3)
with ops.device(self.device):
self.assertEqual(switch_case_test(array_ops.constant(0)).numpy(), 17)
self.assertEqual(switch_case_test(array_ops.constant(1)).numpy(), 31)
self.assertEqual(switch_case_test(array_ops.constant(2)).numpy(), -1)
self.assertEqual(switch_case_test(array_ops.constant(3)).numpy(), -1)
def testBranchIsPruned(self):
@def_function.function(jit_compile=True)
def switch_case_test():
branch_index = array_ops.constant(0)
def f1():
return array_ops.constant(17)
def f2():
# Some operations that XLA cannot compile.
image_ops.decode_image(io_ops.read_file('/tmp/bmp'))
return array_ops.constant(31)
# This tests that we do not try to compile all branches if the branch
# index in trivially constant.
return control_flow_switch_case.switch_case(
branch_index, branch_fns={
0: f1,
1: f2
}, default=f2)
with ops.device(self.device):
self.assertEqual(switch_case_test().numpy(), 17)
if __name__ == '__main__':
ops.enable_eager_execution()
test.main()
| CaseTest |
python | ansible__ansible | test/units/module_utils/facts/test_ansible_collector.py | {
"start": 7529,
"end": 10395
} | class ____(unittest.TestCase):
gather_subset = ['all', '!facter', '!ohai']
min_fact_count = 30
max_fact_count = 1000
# TODO: add ansible_cmdline, ansible_*_pubkey* back when TempFactCollector goes away
expected_facts = ['date_time',
'user_id', 'distribution',
'gather_subset', 'module_setup',
'env']
not_expected_facts = ['facter', 'ohai']
collected_facts: dict[str, str] = {}
def _mock_module(self, gather_subset=None):
return mock_module(gather_subset=self.gather_subset)
@patch('platform.system', return_value='Linux')
@patch('ansible.module_utils.facts.system.service_mgr.get_file_content', return_value='systemd')
def setUp(self, mock_gfc, mock_ps):
mock_module = self._mock_module()
collectors = self._collectors(mock_module)
fact_collector = \
ansible_collector.AnsibleFactCollector(collectors=collectors,
namespace=ns)
self.facts = fact_collector.collect(module=mock_module,
collected_facts=self.collected_facts)
def _collectors(self, module,
all_collector_classes=None,
minimal_gather_subset=None):
return _collectors(module=module,
all_collector_classes=all_collector_classes,
minimal_gather_subset=minimal_gather_subset)
def test_basics(self):
self._assert_basics(self.facts)
def test_expected_facts(self):
self._assert_expected_facts(self.facts)
def test_not_expected_facts(self):
self._assert_not_expected_facts(self.facts)
def _assert_basics(self, facts):
self.assertIsInstance(facts, dict)
# just assert it's not almost empty
self.assertGreaterEqual(len(facts), self.min_fact_count)
# and that is not huge number of keys
self.assertLess(len(facts), self.max_fact_count)
# everything starts with ansible_ namespace
def _assert_ansible_namespace(self, facts):
# FIXME: kluge for non-namespace fact
facts.pop('module_setup', None)
facts.pop('gather_subset', None)
for fact_key in facts:
self.assertTrue(fact_key.startswith('ansible_'),
'The fact name "%s" does not startwith "ansible_"' % fact_key)
def _assert_expected_facts(self, facts):
facts_keys = sorted(facts.keys())
for expected_fact in self.expected_facts:
self.assertIn(expected_fact, facts_keys)
def _assert_not_expected_facts(self, facts):
facts_keys = sorted(facts.keys())
for not_expected_fact in self.not_expected_facts:
self.assertNotIn(not_expected_fact, facts_keys)
| TestCollectedFacts |
python | Textualize__textual | docs/examples/widgets/option_list_strings.py | {
"start": 100,
"end": 618
} | class ____(App[None]):
CSS_PATH = "option_list.tcss"
def compose(self) -> ComposeResult:
yield Header()
yield OptionList(
"Aerilon",
"Aquaria",
"Canceron",
"Caprica",
"Gemenon",
"Leonis",
"Libran",
"Picon",
"Sagittaron",
"Scorpia",
"Tauron",
"Virgon",
)
yield Footer()
if __name__ == "__main__":
OptionListApp().run()
| OptionListApp |
python | Netflix__metaflow | metaflow/_vendor/click/testing.py | {
"start": 1510,
"end": 2970
} | class ____(object):
"""Holds the captured result of an invoked CLI script."""
def __init__(
self, runner, stdout_bytes, stderr_bytes, exit_code, exception, exc_info=None
):
#: The runner that created the result
self.runner = runner
#: The standard output as bytes.
self.stdout_bytes = stdout_bytes
#: The standard error as bytes, or None if not available
self.stderr_bytes = stderr_bytes
#: The exit code as integer.
self.exit_code = exit_code
#: The exception that happened if one did.
self.exception = exception
#: The traceback
self.exc_info = exc_info
@property
def output(self):
"""The (standard) output as unicode string."""
return self.stdout
@property
def stdout(self):
"""The standard output as unicode string."""
return self.stdout_bytes.decode(self.runner.charset, "replace").replace(
"\r\n", "\n"
)
@property
def stderr(self):
"""The standard error as unicode string."""
if self.stderr_bytes is None:
raise ValueError("stderr not separately captured")
return self.stderr_bytes.decode(self.runner.charset, "replace").replace(
"\r\n", "\n"
)
def __repr__(self):
return "<{} {}>".format(
type(self).__name__, repr(self.exception) if self.exception else "okay"
)
| Result |
python | getsentry__sentry | tests/sentry/workflow_engine/handlers/condition/test_assigned_to_handler.py | {
"start": 386,
"end": 4180
} | class ____(ConditionTestCase):
condition = Condition.ASSIGNED_TO
payload = {
"id": AssignedToFilter.id,
"targetType": "Member",
"targetIdentifier": 0,
}
def setUp(self) -> None:
super().setUp()
self.event_data = WorkflowEventData(event=self.group_event, group=self.group_event.group)
self.dc = self.create_data_condition(
type=self.condition,
comparison={
"target_type": "Member",
"target_identifier": 0,
},
condition_result=True,
)
def test_dual_write(self) -> None:
dcg = self.create_data_condition_group()
dc = self.translate_to_data_condition(self.payload, dcg)
assert dc.type == self.condition
assert dc.comparison == {
"target_type": "Member",
"target_identifier": 0,
}
assert dc.condition_result is True
assert dc.condition_group == dcg
payload = {
"id": AssignedToFilter.id,
"targetType": "Unassigned",
}
dc = self.translate_to_data_condition(payload, dcg)
assert dc.type == self.condition
assert dc.comparison == {
"target_type": "Unassigned",
}
assert dc.condition_result is True
assert dc.condition_group == dcg
def test_json_schema(self) -> None:
self.dc.comparison.update({"target_type": "Team"})
self.dc.save()
self.dc.comparison.update({"target_type": "asdf"})
with pytest.raises(ValidationError):
self.dc.save()
self.dc.comparison.update({"target_identifier": False})
with pytest.raises(ValidationError):
self.dc.save()
self.dc.comparison.update({"hello": "there"})
with pytest.raises(ValidationError):
self.dc.save()
self.dc.comparison.update({"target_type": "Unassigned", "target_identifier": 0})
with pytest.raises(ValidationError):
self.dc.save()
def test_assigned_to_member_passes(self) -> None:
GroupAssignee.objects.create(user_id=self.user.id, group=self.group, project=self.project)
self.dc.update(comparison={"target_type": "Member", "target_identifier": self.user.id})
self.assert_passes(self.dc, self.event_data)
def test_assigned_to_member_fails(self) -> None:
user = self.create_user()
GroupAssignee.objects.create(user_id=user.id, group=self.group, project=self.project)
self.dc.update(comparison={"target_type": "Member", "target_identifier": self.user.id})
self.assert_does_not_pass(self.dc, self.event_data)
def test_assigned_to_team_passes(self) -> None:
GroupAssignee.objects.create(team=self.team, group=self.group, project=self.project)
self.dc.update(comparison={"target_type": "Team", "target_identifier": self.team.id})
self.assert_passes(self.dc, self.event_data)
def test_assigned_to_team_fails(self) -> None:
team = self.create_team(self.organization)
GroupAssignee.objects.create(team=team, group=self.group, project=self.project)
self.dc.update(comparison={"target_type": "Team", "target_identifier": self.team.id})
self.assert_does_not_pass(self.dc, self.event_data)
def test_assigned_to_no_one_passes(self) -> None:
self.dc.update(comparison={"target_type": "Unassigned"})
self.assert_passes(self.dc, self.event_data)
def test_assigned_to_no_one_fails(self) -> None:
GroupAssignee.objects.create(user_id=self.user.id, group=self.group, project=self.project)
self.dc.update(comparison={"target_type": "Unassigned"})
self.assert_does_not_pass(self.dc, self.event_data)
| TestAssignedToCondition |
python | python-attrs__attrs | tests/test_functional.py | {
"start": 798,
"end": 893
} | class ____:
x = attr.ib()
def meth(self):
return self.x
@attr.s(slots=True)
| Base |
python | run-llama__llama_index | llama-index-integrations/llms/llama-index-llms-sambanovasystems/llama_index/llms/sambanovasystems/base.py | {
"start": 1685,
"end": 23309
} | class ____(LLM):
"""
SambaNova Cloud models.
Setup:
To use, you should have the environment variables:
`SAMBANOVA_URL` set with your SambaNova Cloud URL.
`SAMBANOVA_API_KEY` set with your SambaNova Cloud API Key.
http://cloud.sambanova.ai/.
Additionally, download the following packages:
`pip install llama-index-llms-sambanovasystems`
`pip install sseclient-py`
Examples:
```python
SambaNovaCloud(
sambanova_url = SambaNova cloud endpoint URL,
sambanova_api_key = set with your SambaNova cloud API key,
model = model name,
max_tokens = max number of tokens to generate,
temperature = model temperature,
top_p = model top p,
top_k = model top k,
stream_options = include usage to get generation metrics
)
```
Key init args — completion params:
model: str
The name of the model to use, e.g., Meta-Llama-3-70B-Instruct.
streaming: bool
Whether to use streaming handler when using non streaming methods
max_tokens: int
max tokens to generate
temperature: float
model temperature
top_p: float
model top p
top_k: int
model top k
stream_options: dict
stream options, include usage to get generation metrics
Key init args — client params:
sambanova_url: str
SambaNova Cloud Url
sambanova_api_key: str
SambaNova Cloud api key
Instantiate:
```python
from llama_index.llms.sambanovacloud import SambaNovaCloud
llm = SambaNovaCloud(
sambanova_url = SambaNova cloud endpoint URL,
sambanova_api_key = set with your SambaNova cloud API key,
model = model name,
max_tokens = max number of tokens to generate,
temperature = model temperature,
top_p = model top p,
top_k = model top k,
stream_options = include usage to get generation metrics
context_window = model context window
)
```
Complete:
```python
prompt = "Tell me about Naruto Uzumaki in one sentence"
response = llm.complete(prompt)
```
Chat:
```python
messages = [
ChatMessage(role=MessageRole.SYSTEM, content=("You're a helpful assistant")),
ChatMessage(role=MessageRole.USER, content="Tell me about Naruto Uzumaki in one sentence")
]
response = llm.chat(messages)
```
Stream:
```python
prompt = "Tell me about Naruto Uzumaki in one sentence"
messages = [
ChatMessage(role=MessageRole.SYSTEM, content=("You're a helpful assistant")),
ChatMessage(role=MessageRole.USER, content="Tell me about Naruto Uzumaki in one sentence")
]
for chunk in llm.stream_complete(prompt):
print(chunk.text)
for chunk in llm.stream_chat(messages):
print(chunk.message.content)
```
Async:
```python
prompt = "Tell me about Naruto Uzumaki in one sentence"
asyncio.run(llm.acomplete(prompt))
messages = [
ChatMessage(role=MessageRole.SYSTEM, content=("You're a helpful assistant")),
ChatMessage(role=MessageRole.USER, content="Tell me about Naruto Uzumaki in one sentence")
]
asyncio.run(llm.achat(chat_text_msgs))
```
Response metadata and usage
```python
messages = [
ChatMessage(role=MessageRole.SYSTEM, content=("You're a helpful assistant")),
ChatMessage(role=MessageRole.USER, content="Tell me about Naruto Uzumaki in one sentence")
]
metadata_and_usage = llm.chat(messages).message.additional_kwargs
print(metadata_and_usage)
```
"""
model_config = ConfigDict(
protected_namespaces=("pydantic_model_",), arbitrary_types_allowed=True
)
sambanova_url: str = Field(description="SambaNova Cloud Url")
sambanova_api_key: SecretStr = Field(description="SambaNova Cloud api key")
model: str = Field(
default="Meta-Llama-3.1-8B-Instruct",
description="The name of the model",
)
streaming: bool = Field(
default=False,
description="Whether to use streaming handler when using non streaming methods",
)
context_window: int = Field(default=4096, description="context window")
max_tokens: int = Field(default=1024, description="max tokens to generate")
temperature: float = Field(default=0.7, description="model temperature")
top_p: Optional[float] = Field(default=None, description="model top p")
top_k: Optional[int] = Field(default=None, description="model top k")
stream_options: dict = Field(
default_factory=lambda: {"include_usage": True},
description="stream options, include usage to get generation metrics",
)
@classmethod
def class_name(cls) -> str:
return "SambaNovaCloud"
@property
def metadata(self) -> LLMMetadata:
return LLMMetadata(
context_window=self.context_window,
num_output=self.max_tokens,
is_chat_model=True,
model_name=self.model,
)
def __init__(self, **kwargs: Any) -> None:
"""Init and validate environment variables."""
kwargs["sambanova_url"] = get_from_param_or_env(
"sambanova_api_key",
kwargs.get("sambanova_api_key"),
"SAMBANOVA_URL",
default="https://api.sambanova.ai/v1/chat/completions",
)
kwargs["sambanova_api_key"] = get_from_param_or_env(
"sambanova_api_key", kwargs.get("sambanova_api_key"), "SAMBANOVA_API_KEY"
)
super().__init__(**kwargs)
def _handle_request(
self, messages_dicts: List[Dict], stop: Optional[List[str]] = None
) -> Dict[str, Any]:
"""
Performs a post request to the LLM API.
Args:
messages_dicts: List of role / content dicts to use as input.
stop: list of stop tokens
Returns:
A response dict.
"""
data = {
"messages": messages_dicts,
"max_tokens": self.max_tokens,
"stop": stop,
"model": self.model,
"temperature": self.temperature,
"top_p": self.top_p,
"top_k": self.top_k,
}
http_session = requests.Session()
response = http_session.post(
self.sambanova_url,
headers={
"Authorization": f"Bearer {self.sambanova_api_key.get_secret_value()}",
"Content-Type": "application/json",
},
json=data,
)
if response.status_code != 200:
raise RuntimeError(
f"Sambanova /complete call failed with status code "
f"{response.status_code}.",
f"{response.text}.",
)
response_dict = response.json()
if response_dict.get("error"):
raise RuntimeError(
f"Sambanova /complete call failed with status code "
f"{response.status_code}.",
f"{response_dict}.",
)
return response_dict
async def _handle_request_async(
self, messages_dicts: List[Dict], stop: Optional[List[str]] = None
) -> Dict[str, Any]:
"""
Performs a async post request to the LLM API.
Args:
messages_dicts: List of role / content dicts to use as input.
stop: list of stop tokens
Returns:
A response dict.
"""
data = {
"messages": messages_dicts,
"max_tokens": self.max_tokens,
"stop": stop,
"model": self.model,
"temperature": self.temperature,
"top_p": self.top_p,
"top_k": self.top_k,
}
async with aiohttp.ClientSession() as session:
async with session.post(
self.sambanova_url,
headers={
"Authorization": f"Bearer {self.sambanova_api_key.get_secret_value()}",
"Content-Type": "application/json",
},
json=data,
) as response:
if response.status != 200:
raise RuntimeError(
f"Sambanova /complete call failed with status code {response.status}.",
f"{await response.text()}.",
)
response_dict = await response.json()
if response_dict.get("error"):
raise RuntimeError(
f"Sambanova /complete call failed with status code {response.status}.",
f"{response_dict}.",
)
return response_dict
def _handle_streaming_request(
self, messages_dicts: List[Dict], stop: Optional[List[str]] = None
) -> Iterator[Dict]:
"""
Performs an streaming post request to the LLM API.
Args:
messages_dicts: List of role / content dicts to use as input.
stop: list of stop tokens
Yields:
An iterator of response dicts.
"""
try:
import sseclient
except ImportError:
raise ImportError(
"could not import sseclient library"
"Please install it with `pip install sseclient-py`."
)
data = {
"messages": messages_dicts,
"max_tokens": self.max_tokens,
"stop": stop,
"model": self.model,
"temperature": self.temperature,
"top_p": self.top_p,
"top_k": self.top_k,
"stream": True,
"stream_options": self.stream_options,
}
http_session = requests.Session()
response = http_session.post(
self.sambanova_url,
headers={
"Authorization": f"Bearer {self.sambanova_api_key.get_secret_value()}",
"Content-Type": "application/json",
},
json=data,
stream=True,
)
client = sseclient.SSEClient(response)
if response.status_code != 200:
raise RuntimeError(
f"Sambanova /complete call failed with status code "
f"{response.status_code}."
f"{response.text}."
)
for event in client.events():
if event.event == "error_event":
raise RuntimeError(
f"Sambanova /complete call failed with status code "
f"{response.status_code}."
f"{event.data}."
)
try:
# check if the response is a final event
# in that case event data response is '[DONE]'
if event.data != "[DONE]":
if isinstance(event.data, str):
data = json.loads(event.data)
else:
raise RuntimeError(
f"Sambanova /complete call failed with status code "
f"{response.status_code}."
f"{event.data}."
)
if data.get("error"):
raise RuntimeError(
f"Sambanova /complete call failed with status code "
f"{response.status_code}."
f"{event.data}."
)
yield data
except Exception as e:
raise RuntimeError(
f"Error getting content chunk raw streamed response: {e}"
f"data: {event.data}"
)
async def _handle_streaming_request_async(
self, messages_dicts: List[Dict], stop: Optional[List[str]] = None
) -> AsyncIterator[Dict]:
"""
Performs an async streaming post request to the LLM API.
Args:
messages_dicts: List of role / content dicts to use as input.
stop: list of stop tokens
Yields:
An iterator of response dicts.
"""
data = {
"messages": messages_dicts,
"max_tokens": self.max_tokens,
"stop": stop,
"model": self.model,
"temperature": self.temperature,
"top_p": self.top_p,
"top_k": self.top_k,
"stream": True,
"stream_options": self.stream_options,
}
async with aiohttp.ClientSession() as session:
async with session.post(
self.sambanova_url,
headers={
"Authorization": f"Bearer {self.sambanova_api_key.get_secret_value()}",
"Content-Type": "application/json",
},
json=data,
) as response:
if response.status != 200:
raise RuntimeError(
f"Sambanova /complete call failed with status code "
f"{response.status}. {await response.text()}"
)
async for line in response.content:
if line:
event = line.decode("utf-8").strip()
if event.startswith("data:"):
event = event[len("data:") :].strip()
if event == "[DONE]":
break
elif len(event) == 0:
continue
try:
data = json.loads(event)
if data.get("error"):
raise RuntimeError(
f"Sambanova /complete call failed: {data['error']}"
)
yield data
except json.JSONDecodeError:
raise RuntimeError(
f"Sambanova /complete call failed to decode response: {event}"
)
except Exception as e:
raise RuntimeError(
f"Error processing response: {e} data: {event}"
)
@llm_chat_callback()
def chat(
self,
messages: Sequence[ChatMessage],
stop: Optional[List[str]] = None,
**kwargs: Any,
) -> ChatResponse:
"""
Calls the chat implementation of the SambaNovaCloud model.
Args:
messages: the prompt composed of a list of messages.
stop: a list of strings on which the model should stop generating.
If generation stops due to a stop token, the stop token itself
SHOULD BE INCLUDED as part of the output. This is not enforced
across models right now, but it's a good practice to follow since
it makes it much easier to parse the output of the model
downstream and understand why generation stopped.
Returns:
ChatResponse with model generation
"""
messages_dicts = _create_message_dicts(messages)
response = self._handle_request(messages_dicts, stop)
message = ChatMessage(
role=MessageRole.ASSISTANT,
content=response["choices"][0]["message"]["content"],
additional_kwargs={
"id": response["id"],
"finish_reason": response["choices"][0]["finish_reason"],
"usage": response.get("usage"),
"model_name": response["model"],
"system_fingerprint": response["system_fingerprint"],
"created": response["created"],
},
)
return ChatResponse(message=message)
@llm_chat_callback()
def stream_chat(
self,
messages: Sequence[ChatMessage],
stop: Optional[List[str]] = None,
**kwargs: Any,
) -> ChatResponseGen:
"""
Streams the chat output of the SambaNovaCloud model.
Args:
messages: the prompt composed of a list of messages.
stop: a list of strings on which the model should stop generating.
If generation stops due to a stop token, the stop token itself
SHOULD BE INCLUDED as part of the output. This is not enforced
across models right now, but it's a good practice to follow since
it makes it much easier to parse the output of the model
downstream and understand why generation stopped.
Yields:
ChatResponseGen with model partial generation
"""
messages_dicts = _create_message_dicts(messages)
finish_reason = None
content = ""
role = MessageRole.ASSISTANT
for partial_response in self._handle_streaming_request(messages_dicts, stop):
if len(partial_response["choices"]) > 0:
content_delta = partial_response["choices"][0]["delta"]["content"]
content += content_delta
additional_kwargs = {
"id": partial_response["id"],
"finish_reason": partial_response["choices"][0].get(
"finish_reason"
),
}
else:
additional_kwargs = {
"id": partial_response["id"],
"finish_reason": finish_reason,
"usage": partial_response.get("usage"),
"model_name": partial_response["model"],
"system_fingerprint": partial_response["system_fingerprint"],
"created": partial_response["created"],
}
# yield chunk
yield ChatResponse(
message=ChatMessage(
role=role, content=content, additional_kwargs=additional_kwargs
),
delta=content_delta,
raw=partial_response,
)
@llm_completion_callback()
def complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
complete_fn = chat_to_completion_decorator(self.chat)
return complete_fn(prompt, **kwargs)
@llm_completion_callback()
def stream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseGen:
stream_complete_fn = stream_chat_to_completion_decorator(self.stream_chat)
return stream_complete_fn(prompt, **kwargs)
### Async ###
@llm_chat_callback()
async def achat(
self,
messages: Sequence[ChatMessage],
stop: Optional[List[str]] = None,
**kwargs: Any,
) -> ChatResponse:
"""
Calls the async chat implementation of the SambaNovaCloud model.
Args:
messages: the prompt composed of a list of messages.
stop: a list of strings on which the model should stop generating.
If generation stops due to a stop token, the stop token itself
SHOULD BE INCLUDED as part of the output. This is not enforced
across models right now, but it's a good practice to follow since
it makes it much easier to parse the output of the model
downstream and understand why generation stopped.
Returns:
ChatResponse with async model generation
"""
messages_dicts = _create_message_dicts(messages)
response = await self._handle_request_async(messages_dicts, stop)
message = ChatMessage(
role=MessageRole.ASSISTANT,
content=response["choices"][0]["message"]["content"],
additional_kwargs={
"id": response["id"],
"finish_reason": response["choices"][0]["finish_reason"],
"usage": response.get("usage"),
"model_name": response["model"],
"system_fingerprint": response["system_fingerprint"],
"created": response["created"],
},
)
return ChatResponse(message=message)
@llm_chat_callback()
async def astream_chat(
self,
messages: Sequence[ChatMessage],
stop: Optional[List[str]] = None,
**kwargs: Any,
) -> ChatResponseAsyncGen:
raise NotImplementedError(
"SambaNovaCloud does not currently support async streaming."
)
@llm_completion_callback()
async def acomplete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
acomplete_fn = achat_to_completion_decorator(self.achat)
return await acomplete_fn(prompt, **kwargs)
@llm_completion_callback()
def astream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseAsyncGen:
raise NotImplementedError(
"SambaNovaCloud does not currently support async streaming."
)
| SambaNovaCloud |
python | celery__celery | t/unit/worker/test_loops.py | {
"start": 518,
"end": 925
} | class ____:
def __init__(self, fun, *args, **kwargs):
self.fun = fun
self.args = args
self.kwargs = kwargs
def __eq__(self, other):
return (other.fun == self.fun and
other.args == self.args and
other.kwargs == self.kwargs)
def __repr__(self):
return '<promise: {0.fun!r} {0.args!r} {0.kwargs!r}>'.format(self)
| PromiseEqual |
python | django__django | tests/staticfiles_tests/test_management.py | {
"start": 24885,
"end": 27195
} | class ____(TestDefaults, CollectionTestCase):
"""
Test ``--link`` option for ``collectstatic`` management command.
Note that by inheriting ``TestDefaults`` we repeat all
the standard file resolving tests here, to make sure using
``--link`` does not change the file-selection semantics.
"""
def run_collectstatic(self, clear=False, link=True, **kwargs):
super().run_collectstatic(link=link, clear=clear, **kwargs)
def test_links_created(self):
"""
With ``--link``, symbolic links are created.
"""
self.assertTrue(os.path.islink(os.path.join(settings.STATIC_ROOT, "test.txt")))
def test_broken_symlink(self):
"""
Test broken symlink gets deleted.
"""
path = os.path.join(settings.STATIC_ROOT, "test.txt")
os.unlink(path)
self.run_collectstatic()
self.assertTrue(os.path.islink(path))
def test_symlinks_and_files_replaced(self):
"""
Running collectstatic in non-symlink mode replaces symlinks with files,
while symlink mode replaces files with symlinks.
"""
path = os.path.join(settings.STATIC_ROOT, "test.txt")
self.assertTrue(os.path.islink(path))
self.run_collectstatic(link=False)
self.assertFalse(os.path.islink(path))
self.run_collectstatic(link=True)
self.assertTrue(os.path.islink(path))
def test_clear_broken_symlink(self):
"""
With ``--clear``, broken symbolic links are deleted.
"""
nonexistent_file_path = os.path.join(settings.STATIC_ROOT, "nonexistent.txt")
broken_symlink_path = os.path.join(settings.STATIC_ROOT, "symlink.txt")
os.symlink(nonexistent_file_path, broken_symlink_path)
self.run_collectstatic(clear=True)
self.assertFalse(os.path.lexists(broken_symlink_path))
@override_settings(
STORAGES={
**settings.STORAGES,
STATICFILES_STORAGE_ALIAS: {
"BACKEND": "staticfiles_tests.storage.PathNotImplementedStorage"
},
}
)
def test_no_remote_link(self):
with self.assertRaisesMessage(
CommandError, "Can't symlink to a remote destination."
):
self.run_collectstatic()
| TestCollectionLinks |
python | numpy__numpy | numpy/_core/tests/test_function_base.py | {
"start": 1471,
"end": 1535
} | class ____(ndarray):
__array_priority__ = 10
| PhysicalQuantity2 |
python | huggingface__transformers | tests/quantization/fbgemm_fp8/test_fbgemm_fp8.py | {
"start": 2245,
"end": 10653
} | class ____(unittest.TestCase):
model_name = "meta-llama/Meta-Llama-3-8B"
input_text = "What are we having for dinner?"
max_new_tokens = 9
EXPECTED_OUTPUT = "What are we having for dinner?\nI'm having a steak and a salad"
device_map = "cuda"
offload_device_map = {
"model.embed_tokens": 0,
"model.layers.0": 0,
"model.layers.1": 0,
"model.layers.2": 0,
"model.layers.3": 0,
"model.layers.4": 0,
"model.layers.5": 0,
"model.layers.6": 0,
"model.layers.7": 0,
"model.layers.8": 0,
"model.layers.9": 0,
"model.layers.10": 0,
"model.layers.11": 0,
"model.layers.12": 0,
"model.layers.13": 0,
"model.layers.14": 0,
"model.layers.15": 0,
"model.layers.16": "cpu",
"model.layers.17": "cpu",
"model.layers.18": "cpu",
"model.layers.19": "cpu",
"model.layers.20": "disk",
"model.layers.21": "disk",
"model.layers.22": "disk",
"model.layers.23": "disk",
"model.layers.24": "disk",
"model.layers.25": "disk",
"model.layers.26": "disk",
"model.layers.27": "disk",
"model.layers.28": "disk",
"model.layers.29": "disk",
"model.layers.30": "disk",
"model.layers.31": "disk",
"model.norm": "disk",
"lm_head": "disk",
}
# called only once for all test in this class
@classmethod
def setUpClass(cls):
"""
Setup quantized model
"""
quantization_config = FbgemmFp8Config()
cls.tokenizer = AutoTokenizer.from_pretrained(cls.model_name)
cls.quantized_model = AutoModelForCausalLM.from_pretrained(
cls.model_name, device_map=cls.device_map, quantization_config=quantization_config
)
def tearDown(self):
gc.collect()
backend_empty_cache(torch_device)
gc.collect()
def test_quantized_model_conversion(self):
"""
Simple test that checks if the quantized model has been converted properly
"""
from transformers.integrations import FbgemmFp8Linear, replace_with_fbgemm_fp8_linear
model_id = "facebook/opt-350m"
config = AutoConfig.from_pretrained(model_id, revision="cb32f77e905cccbca1d970436fb0f5e6b58ee3c5")
quantization_config = FbgemmFp8Config()
with init_empty_weights():
model = OPTForCausalLM(config)
nb_linears = 0
for module in model.modules():
if isinstance(module, torch.nn.Linear):
nb_linears += 1
model = replace_with_fbgemm_fp8_linear(model, quantization_config=quantization_config)
nb_fbgemm_linear = 0
for module in model.modules():
if isinstance(module, FbgemmFp8Linear):
nb_fbgemm_linear += 1
self.assertEqual(nb_linears - 1, nb_fbgemm_linear)
with init_empty_weights():
model = OPTForCausalLM(config)
quantization_config = FbgemmFp8Config(modules_to_not_convert=["fc1"])
model = replace_with_fbgemm_fp8_linear(model, quantization_config=quantization_config)
nb_fbgemm_linear = 0
for module in model.modules():
if isinstance(module, FbgemmFp8Linear):
nb_fbgemm_linear += 1
self.assertEqual(nb_linears - 25, nb_fbgemm_linear)
def test_quantized_model(self):
"""
Simple test that checks if the quantized model is working properly
"""
input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device)
output = self.quantized_model.generate(**input_ids, max_new_tokens=self.max_new_tokens)
self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT)
def test_save_pretrained(self):
"""
Simple test that checks if the quantized model is working properly after being saved and loaded
"""
with tempfile.TemporaryDirectory() as tmpdirname:
self.quantized_model.save_pretrained(tmpdirname)
model = AutoModelForCausalLM.from_pretrained(tmpdirname, device_map=self.device_map)
input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device)
output = model.generate(**input_ids, max_new_tokens=self.max_new_tokens)
self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT)
def test_change_loading_attributes(self):
"""
Simple test that checks if the quantized model is working properly after being saved and loaded
"""
with tempfile.TemporaryDirectory() as tmpdirname:
self.quantized_model.save_pretrained(tmpdirname)
quantization_config = FbgemmFp8Config(activation_scale_ub=1000.0)
model = AutoModelForCausalLM.from_pretrained(
tmpdirname, device_map=self.device_map, quantization_config=quantization_config
)
self.assertEqual(model.model.layers[1].mlp.down_proj.input_scale_ub.item(), 1000.0)
input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device)
output = model.generate(**input_ids, max_new_tokens=self.max_new_tokens)
self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT)
@require_torch_multi_gpu
def test_quantized_model_multi_gpu(self):
"""
Simple test that checks if the quantized model is working properly with multiple GPUs
set CUDA_VISIBLE_DEVICES=0,1 if you have more than 2 GPUs
"""
input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device)
quantization_config = FbgemmFp8Config()
quantized_model = AutoModelForCausalLM.from_pretrained(
self.model_name, device_map="auto", quantization_config=quantization_config
)
self.assertTrue(set(quantized_model.hf_device_map.values()) == {0, 1})
output = quantized_model.generate(**input_ids, max_new_tokens=self.max_new_tokens)
self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT)
def test_quantized_model_offload(self):
"""
Simple test that checks if the quantized model returns an error when loading with cpu/disk offloaded
"""
quantization_config = FbgemmFp8Config()
with self.assertRaisesRegex(
ValueError, "You are attempting to load an FP8 model with a device_map that contains a CPU or disk device."
):
AutoModelForCausalLM.from_pretrained(
self.model_name, device_map=self.offload_device_map, quantization_config=quantization_config
)
def test_save_pretrained_offload(self):
"""
Simple test that checks if the saved quantized model is working properly cpu/disk offload
"""
with tempfile.TemporaryDirectory() as tmpdirname:
self.quantized_model.save_pretrained(tmpdirname)
input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device)
quantized_model = AutoModelForCausalLM.from_pretrained(tmpdirname, device_map=self.offload_device_map)
output = quantized_model.generate(**input_ids, max_new_tokens=self.max_new_tokens)
self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT)
@require_torch_multi_gpu
def test_save_pretrained_multi_gpu(self):
"""
Simple test that checks if the quantized model is working properly after being saved and loaded
"""
with tempfile.TemporaryDirectory() as tmpdirname:
self.quantized_model.save_pretrained(tmpdirname)
model = AutoModelForCausalLM.from_pretrained(tmpdirname, device_map="auto")
self.assertTrue(set(model.hf_device_map.values()) == {0, 1})
input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device)
output = model.generate(**input_ids, max_new_tokens=self.max_new_tokens)
self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT)
@require_torch_gpu
@require_accelerate
@require_fbgemm_gpu
| FbgemmFp8Test |
python | dask__dask | dask/blockwise.py | {
"start": 2650,
"end": 5565
} | class ____(BlockwiseDep):
"""Dictionary-based Blockwise-IO argument
This is a dictionary-backed instance of ``BlockwiseDep``.
The purpose of this class is to simplify the construction
of IO-based Blockwise Layers with block/partition-dependent
function arguments that are difficult to calculate at
graph-materialization time.
Examples
--------
Specify an IO-based function for the Blockwise Layer. Note
that the function will be passed a single input object when
the task is executed (e.g. a single ``tuple`` or ``dict``):
>>> import pandas as pd
>>> func = lambda x: pd.read_csv(**x)
Use ``BlockwiseDepDict`` to define the input argument to
``func`` for each block/partition:
>>> dep = BlockwiseDepDict(
... mapping={
... (0,) : {
... "filepath_or_buffer": "data.csv",
... "skiprows": 1,
... "nrows": 2,
... "names": ["a", "b"],
... },
... (1,) : {
... "filepath_or_buffer": "data.csv",
... "skiprows": 3,
... "nrows": 2,
... "names": ["a", "b"],
... },
... }
... )
Construct a Blockwise Layer with ``dep`` specified
in the ``indices`` list:
>>> layer = Blockwise(
... output="collection-name",
... output_indices="i",
... task=Task("collection-name", func, TaskRef("_0")),
... indices=[(dep, "i")],
... numblocks={},
... )
See Also
--------
dask.blockwise.Blockwise
dask.blockwise.BlockwiseDep
"""
def __init__(
self,
mapping: dict,
numblocks: tuple[int, ...] | None = None,
produces_tasks: bool = False,
produces_keys: bool = False,
):
self.mapping = mapping
self.produces_tasks = produces_tasks
# By default, assume 1D shape
self.numblocks = numblocks or (len(mapping),)
# Whether `mapping` values are real task keys
# (e.g. Delayed objects)
self._produces_keys = produces_keys
@property
def produces_keys(self) -> bool:
return self._produces_keys
def __getitem__(self, idx: tuple[int, ...]) -> Any:
try:
return self.mapping[idx]
except KeyError as err:
# If a DataFrame collection was converted
# to an Array collection, the dimension of
# `idx` may not agree with the keys in
# `self.mapping`. In this case, we can
# use `self.numblocks` to check for a key
# match in the leading elements of `idx`
flat_idx = idx[: len(self.numblocks)]
if flat_idx in self.mapping:
return self.mapping[flat_idx]
raise err
def __len__(self) -> int:
return len(self.mapping)
| BlockwiseDepDict |
python | tensorflow__tensorflow | tensorflow/python/lib/io/file_io_test.py | {
"start": 1049,
"end": 1411
} | class ____(object):
"""Backport of pathlib.Path for Python < 3.6"""
def __init__(self, name):
self.name = name
def __fspath__(self):
return self.name
def __str__(self):
return self.name
run_all_path_types = parameterized.named_parameters(
("str", file_io.join),
("pathlike", lambda *paths: PathLike(file_io.join(*paths))))
| PathLike |
python | astropy__astropy | astropy/modeling/functional_models.py | {
"start": 46962,
"end": 50067
} | class ____(Fittable1DModel):
"""
One dimensional Lorentzian model.
Parameters
----------
amplitude : float or `~astropy.units.Quantity`.
Peak value. For a normalized profile (integrating to 1),
set amplitude = 2 / (np.pi * fwhm).
x_0 : float or `~astropy.units.Quantity`.
Position of the peak.
fwhm : float or `~astropy.units.Quantity`.
Full width at half maximum (FWHM).
See Also
--------
Lorentz2D, Gaussian1D, Box1D, RickerWavelet1D
Notes
-----
The ``x``, ``x_0``, and ``fwhm`` inputs must have compatible units
or be unitless numbers.
Model formula:
.. math::
f(x) = \\frac{A \\gamma^{2}}{\\gamma^{2} + \\left(x - x_{0}\\right)^{2}}
where :math:`\\gamma` is the half width at half maximum (HWHM),
which is half the FWHM.
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Lorentz1D
plt.figure()
s1 = Lorentz1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
plt.plot(r, s1(r), lw=2, label=f'Amplitude={factor}')
plt.axis([-5, 5, -1, 4])
plt.legend()
plt.show()
"""
amplitude = Parameter(default=1, description="Peak value")
x_0 = Parameter(default=0, description="Position of the peak")
fwhm = Parameter(default=1, description="Full width at half maximum")
@staticmethod
def evaluate(x, amplitude, x_0, fwhm):
"""One dimensional Lorentzian model function."""
return amplitude * ((fwhm / 2.0) ** 2) / ((x - x_0) ** 2 + (fwhm / 2.0) ** 2)
@staticmethod
def fit_deriv(x, amplitude, x_0, fwhm):
"""One dimensional Lorentzian model derivative with respect to parameters."""
gamma = fwhm / 2.0
denom = gamma**2 + (x - x_0) ** 2
d_amplitude = gamma**2 / denom
d_x_0 = amplitude * gamma**2 * 2 * (x - x_0) / denom**2
d_fwhm = amplitude * gamma * (x - x_0) ** 2 / denom**2
return [d_amplitude, d_x_0, d_fwhm]
def bounding_box(self, factor=25):
"""Tuple defining the default ``bounding_box`` limits,
``(x_low, x_high)``.
Parameters
----------
factor : float
The multiple of FWHM used to define the limits.
Default is chosen to include most (99%) of the
area under the curve, while still showing the
central feature of interest.
"""
x0 = self.x_0
dx = factor * self.fwhm
return (x0 - dx, x0 + dx)
@property
def input_units(self):
if self.x_0.input_unit is None:
return None
return {self.inputs[0]: self.x_0.input_unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"x_0": inputs_unit[self.inputs[0]],
"fwhm": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
| Lorentz1D |
python | huggingface__transformers | src/transformers/models/visual_bert/modeling_visual_bert.py | {
"start": 45984,
"end": 52118
} | class ____(VisualBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.visual_bert = VisualBertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.cls = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.LongTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
visual_embeds: Optional[torch.FloatTensor] = None,
visual_attention_mask: Optional[torch.LongTensor] = None,
visual_token_type_ids: Optional[torch.LongTensor] = None,
image_text_alignment: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
labels: Optional[torch.LongTensor] = None,
) -> Union[tuple[torch.Tensor], SequenceClassifierOutput]:
r"""
visual_embeds (`torch.FloatTensor` of shape `(batch_size, visual_seq_length, visual_embedding_dim)`, *optional*):
The embedded representation of the visual inputs, generally derived using using an object detector.
visual_attention_mask (`torch.FloatTensor` of shape `(batch_size, visual_seq_length)`, *optional*):
Mask to avoid performing attention on visual embeddings. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
visual_token_type_ids (`torch.LongTensor` of shape `(batch_size, visual_seq_length)`, *optional*):
Segment token indices to indicate different portions of the visual embeds.
[What are token type IDs?](../glossary#token-type-ids) The authors of VisualBERT set the
*visual_token_type_ids* to *1* for all tokens.
image_text_alignment (`torch.LongTensor` of shape `(batch_size, visual_seq_length, alignment_number)`, *optional*):
Image-Text alignment uses to decide the position IDs of the visual embeddings.
labels (`torch.LongTensor` of shape `(batch_size, total_sequence_length)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. A KLDivLoss is computed between the labels and the returned logits.
Example:
```python
# Assumption: *get_visual_embeddings(image)* gets the visual embeddings of the image in the batch.
from transformers import AutoTokenizer, VisualBertForQuestionAnswering
import torch
tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-uncased")
model = VisualBertForQuestionAnswering.from_pretrained("uclanlp/visualbert-vqa")
text = "Who is eating the apple?"
inputs = tokenizer(text, return_tensors="pt")
visual_embeds = get_visual_embeddings(image).unsqueeze(0)
visual_token_type_ids = torch.ones(visual_embeds.shape[:-1], dtype=torch.long)
visual_attention_mask = torch.ones(visual_embeds.shape[:-1], dtype=torch.float)
inputs.update(
{
"visual_embeds": visual_embeds,
"visual_token_type_ids": visual_token_type_ids,
"visual_attention_mask": visual_attention_mask,
}
)
labels = torch.tensor([[0.0, 1.0]]).unsqueeze(0) # Batch size 1, Num labels 2
outputs = model(**inputs, labels=labels)
loss = outputs.loss
scores = outputs.logits
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# Get the index of the last text token
index_to_gather = attention_mask.sum(1) - 2 # as in original code
outputs = self.visual_bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
visual_embeds=visual_embeds,
visual_attention_mask=visual_attention_mask,
visual_token_type_ids=visual_token_type_ids,
image_text_alignment=image_text_alignment,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
# TO-CHECK: From the original code
index_to_gather = (
index_to_gather.unsqueeze(-1).unsqueeze(-1).expand(index_to_gather.size(0), 1, sequence_output.size(-1))
)
pooled_output = torch.gather(sequence_output, 1, index_to_gather)
pooled_output = self.dropout(pooled_output)
logits = self.cls(pooled_output)
reshaped_logits = logits.view(-1, self.num_labels)
loss = None
if labels is not None:
loss_fct = nn.KLDivLoss(reduction="batchmean")
log_softmax = nn.LogSoftmax(dim=-1)
reshaped_logits = log_softmax(reshaped_logits)
loss = loss_fct(reshaped_logits, labels.contiguous())
if not return_dict:
output = (reshaped_logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=reshaped_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@auto_docstring(
custom_intro="""
VisualBert Model with a sequence classification head on top (a dropout and a linear layer on top of the pooled
output) for Visual Reasoning e.g. for NLVR task.
"""
)
| VisualBertForQuestionAnswering |
python | ansible__ansible | lib/ansible/_internal/_templating/_access.py | {
"start": 1199,
"end": 3467
} | class ____:
"""
Broker object for managed access registration and notification.
Each thread or other logical callstack has a dedicated `AnsibleAccessContext` object with which `NotifiableAccessContext` objects can register interest.
When a managed access occurs on an object, each active `NotifiableAccessContext` within the current callstack that has registered interest in that
object's type or a tag present on it will be notified.
"""
_contextvar: t.ClassVar[ContextVar[AnsibleAccessContext]] = ContextVar('AnsibleAccessContext')
@staticmethod
def current() -> AnsibleAccessContext:
"""Creates or retrieves an `AnsibleAccessContext` for the current logical callstack."""
try:
ctx: AnsibleAccessContext = AnsibleAccessContext._contextvar.get()
except LookupError:
# didn't exist; create it
ctx = AnsibleAccessContext()
AnsibleAccessContext._contextvar.set(ctx) # we ignore the token, since this should live for the life of the thread/async ctx
return ctx
def __init__(self) -> None:
self._notify_contexts: list[NotifiableAccessContextBase] = []
def _register_interest(self, context: NotifiableAccessContextBase) -> None:
self._notify_contexts.append(context)
def _unregister_interest(self, context: NotifiableAccessContextBase) -> None:
ctx = self._notify_contexts.pop()
if ctx is not context:
raise RuntimeError(f'Out-of-order context deactivation detected. Found {ctx} instead of {context}.')
def access(self, value: t.Any) -> None:
"""Notify all contexts which have registered interest in the given value that it is being accessed."""
if not self._notify_contexts:
return
value_types = AnsibleTagHelper.tag_types(value) | frozenset((type(value),))
masked: set[type] = set()
for ctx in reversed(self._notify_contexts):
if ctx._mask:
if (ctx_type := type(ctx)) in masked:
continue
masked.add(ctx_type)
# noinspection PyProtectedMember
if ctx._type_interest.intersection(value_types):
ctx._notify(value)
| AnsibleAccessContext |
python | scikit-learn__scikit-learn | sklearn/model_selection/_split.py | {
"start": 2832,
"end": 5378
} | class ____(_MetadataRequester, metaclass=ABCMeta):
"""Base class for all cross-validators.
Implementations must define `_iter_test_masks` or `_iter_test_indices`.
"""
# This indicates that by default CV splitters don't have a "groups" kwarg,
# unless indicated by inheriting from ``GroupsConsumerMixin``.
# This also prevents ``set_split_request`` to be generated for splitters
# which don't support ``groups``.
__metadata_request__split = {"groups": metadata_routing.UNUSED}
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : array-like of shape (n_samples,)
The target variable for supervised learning problems.
groups : array-like of shape (n_samples,), default=None
Group labels for the samples used while splitting the dataset into
train/test set.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
X, y, groups = indexable(X, y, groups)
indices = np.arange(_num_samples(X))
for test_index in self._iter_test_masks(X, y, groups):
train_index = indices[np.logical_not(test_index)]
test_index = indices[test_index]
yield train_index, test_index
# Since subclasses must implement either _iter_test_masks or
# _iter_test_indices, neither can be abstract.
def _iter_test_masks(self, X=None, y=None, groups=None):
"""Generates boolean masks corresponding to test sets.
By default, delegates to _iter_test_indices(X, y, groups)
"""
for test_index in self._iter_test_indices(X, y, groups):
test_mask = np.zeros(_num_samples(X), dtype=bool)
test_mask[test_index] = True
yield test_mask
def _iter_test_indices(self, X=None, y=None, groups=None):
"""Generates integer indices corresponding to test sets."""
raise NotImplementedError
@abstractmethod
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator."""
def __repr__(self):
return _build_repr(self)
| BaseCrossValidator |
python | jupyterlab__jupyterlab | jupyterlab/handlers/announcements.py | {
"start": 5948,
"end": 10054
} | class ____(APIHandler):
"""News API handler.
Args:
news_url: The Atom feed to fetch for news
"""
def initialize(
self,
news_url: Optional[str] = None,
) -> None:
super().initialize()
self.news_url = news_url
@web.authenticated
async def get(self):
"""Get the news.
Response:
{
"news": List[Notification]
}
"""
news = []
http_client = httpclient.AsyncHTTPClient()
if self.news_url is not None:
trans = translator.load("jupyterlab")
# Those registrations are global, naming them to reduce chance of clashes
xml_namespaces = {"atom": "http://www.w3.org/2005/Atom"}
for key, spec in xml_namespaces.items():
ET.register_namespace(key, spec)
try:
response = await http_client.fetch(
self.news_url,
headers={"Content-Type": "application/atom+xml"},
)
tree = ET.fromstring(response.body) # noqa S314
def build_entry(node):
def get_xml_text(attr: str, default: Optional[str] = None) -> str:
node_item = node.find(f"atom:{attr}", xml_namespaces)
if node_item is not None:
return node_item.text
elif default is not None:
return default
else:
error_m = (
f"atom feed entry does not contain a required attribute: {attr}"
)
raise KeyError(error_m)
entry_title = get_xml_text("title")
entry_id = get_xml_text("id")
entry_updated = get_xml_text("updated")
entry_published = get_xml_text("published", entry_updated)
entry_summary = get_xml_text("summary", default="")
links = node.findall("atom:link", xml_namespaces)
if len(links) > 1:
alternate = list(filter(lambda elem: elem.get("rel") == "alternate", links))
link_node = alternate[0] if alternate else links[0]
else:
link_node = links[0] if len(links) == 1 else None
entry_link = link_node.get("href") if link_node is not None else None
message = (
"\n".join([entry_title, entry_summary]) if entry_summary else entry_title
)
modified_at = format_datetime(entry_updated)
created_at = format_datetime(entry_published)
notification = Notification(
message=message,
createdAt=created_at,
modifiedAt=modified_at,
type="info",
link=None
if entry_link is None
else (
trans.__("Open full post"),
entry_link,
),
options={
"data": {
"id": entry_id,
"tags": ["news"],
}
},
)
return notification
entries = map(build_entry, tree.findall("atom:entry", xml_namespaces))
news.extend(entries)
except Exception as e:
self.log.debug(
f"Failed to get announcements from Atom feed: {self.news_url}",
exc_info=e,
)
self.set_status(200)
self.finish(json.dumps({"news": list(map(asdict, news))}))
news_handler_path = r"/lab/api/news"
check_update_handler_path = r"/lab/api/update"
| NewsHandler |
python | django__django | tests/constraints/models.py | {
"start": 4892,
"end": 5041
} | class ____(models.Model):
data = models.JSONField(null=True)
class Meta:
required_db_features = {"supports_json_field"}
| JSONFieldModel |
python | pytorch__pytorch | torch/fx/subgraph_rewriter.py | {
"start": 835,
"end": 16265
} | class ____:
# Node from which the match was found
anchor: Node
# Maps nodes in the pattern subgraph to nodes in the larger graph
nodes_map: dict[Node, Node]
# List of nodes that were added into the graph
replacements: list[Node]
def _replace_attributes(gm: GraphModule, replacement: torch.nn.Module) -> None:
gm.delete_all_unused_submodules()
if isinstance(replacement, GraphModule):
replacement.graph.lint()
def try_get_attr(gm: torch.nn.Module, target: str) -> Optional[Any]:
module_path, _, attr_name = target.rpartition(".")
try:
mod: torch.nn.Module = gm.get_submodule(module_path)
except AttributeError:
return None
attr = getattr(mod, attr_name, None)
return attr
for node in gm.graph.nodes:
if node.op == "call_module" or node.op == "get_attr":
gm_attr = try_get_attr(gm, node.target)
replacement_attr = try_get_attr(replacement, node.target)
# CASE 1: This target already exists as an attribute in our
# result GraphModule. Whether or not it exists in
# `replacement`, the existing submodule takes precedence.
if gm_attr is not None:
continue
# CASE 2: The target exists as an attribute in `replacement`
# only, so we need to copy it over.
elif replacement_attr is not None:
new_attr = copy.deepcopy(replacement_attr)
if isinstance(replacement_attr, torch.nn.Module):
gm.add_submodule(node.target, new_attr)
else:
setattr(gm, node.target, new_attr)
# CASE 3: The target doesn't exist as an attribute in `gm`
# or `replacement`
else:
raise RuntimeError(
'Attempted to create a "',
node.op,
'" node during subgraph rewriting '
f"with target {node.target}, but "
"the referenced attribute does not "
"exist in the replacement GraphModule",
)
gm.graph.lint()
@compatibility(is_backward_compatible=True)
def replace_pattern(
gm: GraphModule,
pattern: Union[Callable, GraphModule],
replacement: Union[Callable, GraphModule],
) -> list[Match]:
"""
Matches all possible non-overlapping sets of operators and their
data dependencies (``pattern``) in the Graph of a GraphModule
(``gm``), then replaces each of these matched subgraphs with another
subgraph (``replacement``).
Args:
``gm``: The GraphModule that wraps the Graph to operate on
``pattern``: The subgraph to match in ``gm`` for replacement
``replacement``: The subgraph to replace ``pattern`` with
Returns:
List[Match]: A list of ``Match`` objects representing the places
in the original graph that ``pattern`` was matched to. The list
is empty if there are no matches. ``Match`` is defined as:
.. code-block:: python
class Match(NamedTuple):
# Node from which the match was found
anchor: Node
# Maps nodes in the pattern subgraph to nodes in the larger graph
nodes_map: Dict[Node, Node]
Examples:
.. code-block:: python
import torch
from torch.fx import symbolic_trace, subgraph_rewriter
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, x, w1, w2):
m1 = torch.cat([w1, w2]).sum()
m2 = torch.cat([w1, w2]).sum()
return x + torch.max(m1) + torch.max(m2)
def pattern(w1, w2):
return torch.cat([w1, w2])
def replacement(w1, w2):
return torch.stack([w1, w2])
traced_module = symbolic_trace(M())
subgraph_rewriter.replace_pattern(traced_module, pattern, replacement)
The above code will first match ``pattern`` in the ``forward``
method of ``traced_module``. Pattern-matching is done based on
use-def relationships, not node names. For example, if you had
``p = torch.cat([a, b])`` in ``pattern``, you could match
``m = torch.cat([a, b])`` in the original ``forward`` function,
despite the variable names being different (``p`` vs ``m``).
The ``return`` statement in ``pattern`` is matched based on its
value only; it may or may not match to the ``return`` statement in
the larger graph. In other words, the pattern doesn't have to extend
to the end of the larger graph.
When the pattern is matched, it will be removed from the larger
function and replaced by ``replacement``. If there are multiple
matches for ``pattern`` in the larger function, each non-overlapping
match will be replaced. In the case of a match overlap, the first
found match in the set of overlapping matches will be replaced.
("First" here being defined as the first in a topological ordering
of the Nodes' use-def relationships. In most cases, the first Node
is the parameter that appears directly after ``self``, while the
last Node is whatever the function returns.)
One important thing to note is that the parameters of the
``pattern`` Callable must be used in the Callable itself,
and the parameters of the ``replacement`` Callable must match
the pattern. The first rule is why, in the above code block, the
``forward`` function has parameters ``x, w1, w2``, but the
``pattern`` function only has parameters ``w1, w2``. ``pattern``
doesn't use ``x``, so it shouldn't specify ``x`` as a parameter.
As an example of the second rule, consider replacing
.. code-block:: python
def pattern(x, y):
return torch.neg(x) + torch.relu(y)
with
.. code-block:: python
def replacement(x, y):
return torch.relu(x)
In this case, ``replacement`` needs the same number of parameters
as ``pattern`` (both ``x`` and ``y``), even though the parameter
``y`` isn't used in ``replacement``.
After calling ``subgraph_rewriter.replace_pattern``, the generated
Python code looks like this:
.. code-block:: python
def forward(self, x, w1, w2):
stack_1 = torch.stack([w1, w2])
sum_1 = stack_1.sum()
stack_2 = torch.stack([w1, w2])
sum_2 = stack_2.sum()
max_1 = torch.max(sum_1)
add_1 = x + max_1
max_2 = torch.max(sum_2)
add_2 = add_1 + max_2
return add_2
"""
match_and_replacements = _replace_pattern(gm, pattern, replacement)
return [
Match(anchor=m.anchor, nodes_map=m.nodes_map) for m in match_and_replacements
]
# Experimental API, not backward compatible
@compatibility(is_backward_compatible=False)
def replace_pattern_with_filters(
gm: GraphModule,
pattern: Union[Callable, Graph, GraphModule],
replacement: Union[Callable, Graph, GraphModule, None] = None,
match_filters: Optional[
list[Callable[["InternalMatch", Graph, Graph], bool]]
] = None,
ignore_literals: bool = False,
# Placed at the end to avoid breaking backward compatibility
replacement_callback: Optional[
Callable[["InternalMatch", Graph, Graph], Graph]
] = None,
node_name_match: str = "",
) -> list[ReplacedPatterns]:
"""
See replace_pattern for documentation. This function is an overload with an additional match_filter argument.
Args:
``match_filters``: A list of functions that take in
(match: InternalMatch, original_graph: Graph, pattern_graph: Graph) and return a boolean indicating
whether the match satisfies the condition.
See matcher_utils.py for definition of InternalMatch.
``replacement_callback``: A function that takes in a match and returns a
Graph to be used as the replacement. This allows you to construct a
replacement graph based on the match.
``replacement_callback``: Node name to match. If not empty, it will try to match the node name.
"""
return _replace_pattern(
gm,
pattern,
replacement,
match_filters,
ignore_literals,
replacement_callback,
node_name_match,
)
def _replace_pattern(
gm: GraphModule,
pattern: Union[Callable, Graph, GraphModule],
replacement: Union[Callable, Graph, GraphModule, None] = None,
match_filters: Optional[
list[Callable[["InternalMatch", Graph, Graph], bool]]
] = None,
ignore_literals: bool = False,
# Placed at the end to avoid breaking backward compatibility
replacement_callback: Optional[
Callable[["InternalMatch", Graph, Graph], Graph]
] = None,
node_name_match: str = "",
) -> list[ReplacedPatterns]:
from torch.fx.passes.utils.matcher_utils import InternalMatch, SubgraphMatcher
if match_filters is None:
match_filters = []
# Get the graphs for `gm`, `pattern`, `replacement`
original_graph: Graph = gm.graph
if isinstance(pattern, GraphModule):
pattern_graph = pattern.graph
elif isinstance(pattern, Graph):
pattern_graph = pattern
else:
pattern_graph = symbolic_trace(pattern).graph # type: ignore[arg-type]
matcher = SubgraphMatcher(
pattern_graph,
match_output=False,
match_placeholder=False,
remove_overlapping_matches=True,
ignore_literals=ignore_literals,
)
_matches: list[InternalMatch] = matcher.match(
original_graph, node_name_match=node_name_match
)
# Filter out matches that don't match the filter
_matches = [
m
for m in _matches
if all(
match_filter(m, original_graph, pattern_graph)
for match_filter in match_filters
)
]
if isinstance(replacement, GraphModule):
common_replacement_graph = replacement.graph
elif isinstance(replacement, Graph):
common_replacement_graph = replacement
elif callable(replacement):
common_replacement_graph = symbolic_trace(replacement).graph
else:
assert replacement_callback is not None, (
"Must provide either a replacement GraphModule or a replacement callback"
)
common_replacement_graph = None # type: ignore[assignment]
# As we progressively replace nodes, we'll need to keep track of how the match results should change
match_changed_node: dict[Node, Node] = {}
match_and_replacements = []
for match in _matches:
if replacement_callback is not None:
replacement_graph = replacement_callback(
match, original_graph, pattern_graph
)
else:
assert common_replacement_graph is not None, (
"Must provide either a replacement GraphModule or a replacement callback"
)
replacement_graph = common_replacement_graph
replacement_placeholders = [
n for n in replacement_graph.nodes if n.op == "placeholder"
]
# Build connecting between replacement graph's input and original graph input producer node
# Initialize `val_map` with mappings from placeholder nodes in
# `replacement` to their corresponding node in `original_graph`
assert len(match.placeholder_nodes) == len(replacement_placeholders)
val_map: dict[Node, Node] = {}
for rn, gn in zip(replacement_placeholders, match.placeholder_nodes):
if isinstance(gn, Node):
val_map[rn] = match_changed_node.get(gn, gn)
if gn != val_map[rn]:
# Update match.placeholder_nodes and match.nodes_map with the node that replaced gn
gn_ind = match.placeholder_nodes.index(gn)
match.placeholder_nodes[gn_ind] = match_changed_node[gn]
map_key = list(match.nodes_map.keys())[
list(match.nodes_map.values()).index(gn)
]
match.nodes_map[map_key] = match_changed_node[gn]
else:
val_map[rn] = gn
# Copy the replacement graph over
user_nodes: set[Node] = set()
for n in match.returning_nodes:
user_nodes.update(n.users)
first_user_node = None
if len(user_nodes) == 0:
first_user_node = None
elif len(user_nodes) == 1:
first_user_node = next(iter(user_nodes))
else:
# If there are multiple user nodes, we need to find the first user node
# in the current execution order of the `original_graph`
for n in original_graph.nodes:
if n in user_nodes:
first_user_node = n
break
first_next_node = None
if first_user_node is None:
# no users, so we insert the replacement graph before the first next
# node of returning nodes
next_node = None
for n in reversed(original_graph.nodes):
if n in match.returning_nodes:
first_next_node = next_node
break
else:
next_node = n
insert_point = (
first_user_node if first_user_node is not None else first_next_node
)
assert insert_point is not None, "The insert point can't be None"
with original_graph.inserting_before(insert_point):
copied_returning_nodes = original_graph.graph_copy(
replacement_graph, val_map
)
if isinstance(copied_returning_nodes, Node):
copied_returning_nodes = (copied_returning_nodes,)
# Get a list of nodes that have been replaced into the graph
replacement_nodes: list[Node] = [
v for v in val_map.values() if v not in match.placeholder_nodes
]
# Hook the output Node of the replacement subgraph in to the
# original Graph at the correct location
assert len(match.returning_nodes) == len(copied_returning_nodes) # type: ignore[arg-type]
for gn, copied_node in zip(match.returning_nodes, copied_returning_nodes): # type: ignore[arg-type]
gn.replace_all_uses_with(copied_node)
match_changed_node[gn] = copied_node
# Remove the original nodes
for node in reversed(pattern_graph.nodes):
if node.op != "placeholder" and node.op != "output":
gn = match.nodes_map[node]
gm.graph.erase_node(gn)
match_and_replacements.append(
ReplacedPatterns(
anchor=match.anchors[0],
nodes_map=match.nodes_map,
replacements=replacement_nodes,
)
)
# Update the passed-in GraphModule to reflect the new state of
# `original_graph`
gm.recompile()
# If `replacement` was an nn.Module, we'll need to make sure that
# all the submodules have been copied over correctly
if isinstance(replacement, torch.nn.Module):
_replace_attributes(gm, replacement)
return match_and_replacements
| ReplacedPatterns |
python | qdrant__qdrant-client | qdrant_client/http/models/models.py | {
"start": 65173,
"end": 65296
} | class ____(BaseModel, extra="forbid"):
lin_decay: "DecayParamsExpression" = Field(..., description="")
| LinDecayExpression |
python | geekcomputers__Python | venv/Lib/site-packages/pip/_vendor/rich/traceback.py | {
"start": 6623,
"end": 6768
} | class ____:
filename: str
lineno: int
name: str
line: str = ""
locals: Optional[Dict[str, pretty.Node]] = None
@dataclass
| Frame |
python | realpython__materials | fastapi-url-shortener/source_code_final/shortener_app/models.py | {
"start": 86,
"end": 411
} | class ____(Base):
__tablename__ = "urls"
id = Column(Integer, primary_key=True)
key = Column(String, unique=True, index=True)
secret_key = Column(String, unique=True, index=True)
target_url = Column(String, index=True)
is_active = Column(Boolean, default=True)
clicks = Column(Integer, default=0)
| URL |
python | rapidsai__cudf | python/cudf_polars/cudf_polars/containers/dataframe.py | {
"start": 1546,
"end": 2175
} | class ____:
def __init__(
self,
obj: plc.Table | plc.Column,
metadata: list[plc.interop.ColumnMetadata],
stream: Stream,
) -> None:
self.obj = obj
self.metadata = metadata
self.stream = stream
def __arrow_c_array__(
self, requested_schema: None = None
) -> tuple[CapsuleType, CapsuleType]:
return self.obj._to_schema(self.metadata), self.obj._to_host_array(
stream=self.stream
)
# Pacify the type checker. DataFrame init asserts that all the columns
# have a string name, so let's narrow the type.
| _ObjectWithArrowMetadata |
python | huggingface__transformers | src/transformers/pipelines/visual_question_answering.py | {
"start": 596,
"end": 9599
} | class ____(Pipeline):
"""
Visual Question Answering pipeline using a `AutoModelForVisualQuestionAnswering`. This pipeline is currently only
available in PyTorch.
Unless the model you're using explicitly sets these generation parameters in its configuration files
(`generation_config.json`), the following default values will be used:
- max_new_tokens: 256
Example:
```python
>>> from transformers import pipeline
>>> oracle = pipeline(model="dandelin/vilt-b32-finetuned-vqa")
>>> image_url = "https://huggingface.co/datasets/Narsil/image_dummy/raw/main/lena.png"
>>> oracle(question="What is she wearing ?", image=image_url)
[{'score': 0.948, 'answer': 'hat'}, {'score': 0.009, 'answer': 'fedora'}, {'score': 0.003, 'answer': 'clothes'}, {'score': 0.003, 'answer': 'sun hat'}, {'score': 0.002, 'answer': 'nothing'}]
>>> oracle(question="What is she wearing ?", image=image_url, top_k=1)
[{'score': 0.948, 'answer': 'hat'}]
>>> oracle(question="Is this a person ?", image=image_url, top_k=1)
[{'score': 0.993, 'answer': 'yes'}]
>>> oracle(question="Is this a man ?", image=image_url, top_k=1)
[{'score': 0.996, 'answer': 'no'}]
```
Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial)
This visual question answering pipeline can currently be loaded from [`pipeline`] using the following task
identifiers: `"visual-question-answering", "vqa"`.
The models that this pipeline can use are models that have been fine-tuned on a visual question answering task. See
the up-to-date list of available models on
[huggingface.co/models](https://huggingface.co/models?filter=visual-question-answering).
"""
_load_processor = False
_load_image_processor = True
_load_feature_extractor = False
_load_tokenizer = True
_pipeline_calls_generate = True
# Make sure the docstring is updated when the default generation config is changed
_default_generation_config = GenerationConfig(
max_new_tokens=256,
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.check_model_type(MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES)
def _sanitize_parameters(self, top_k=None, padding=None, truncation=None, timeout=None, **kwargs):
preprocess_params, postprocess_params = {}, {}
if padding is not None:
preprocess_params["padding"] = padding
if truncation is not None:
preprocess_params["truncation"] = truncation
if timeout is not None:
preprocess_params["timeout"] = timeout
if top_k is not None:
postprocess_params["top_k"] = top_k
forward_params = {}
if getattr(self, "assistant_model", None) is not None:
forward_params["assistant_model"] = self.assistant_model
if getattr(self, "assistant_tokenizer", None) is not None:
forward_params["tokenizer"] = self.tokenizer
forward_params["assistant_tokenizer"] = self.assistant_tokenizer
return preprocess_params, forward_params, postprocess_params
def __call__(
self,
image: Union["Image.Image", str, list["Image.Image"], list[str], "KeyDataset"],
question: str | list[str] | None = None,
**kwargs,
):
r"""
Answers open-ended questions about images. The pipeline accepts several types of inputs which are detailed
below:
- `pipeline(image=image, question=question)`
- `pipeline({"image": image, "question": question})`
- `pipeline([{"image": image, "question": question}])`
- `pipeline([{"image": image, "question": question}, {"image": image, "question": question}])`
Args:
image (`str`, `list[str]`, `PIL.Image`, `list[PIL.Image]` or `KeyDataset`):
The pipeline handles three types of images:
- A string containing a http link pointing to an image
- A string containing a local path to an image
- An image loaded in PIL directly
The pipeline accepts either a single image or a batch of images. If given a single image, it can be
broadcasted to multiple questions.
For dataset: the passed in dataset must be of type `transformers.pipelines.pt_utils.KeyDataset`
Example:
```python
>>> from transformers.pipelines.pt_utils import KeyDataset
>>> from datasets import load_dataset
>>> dataset = load_dataset("detection-datasets/coco")
>>> oracle(image=KeyDataset(dataset, "image"), question="What's in this image?")
```
question (`str`, `list[str]`):
The question(s) asked. If given a single question, it can be broadcasted to multiple images.
If multiple images and questions are given, each and every question will be broadcasted to all images
(same effect as a Cartesian product)
top_k (`int`, *optional*, defaults to 5):
The number of top labels that will be returned by the pipeline. If the provided number is higher than
the number of labels available in the model configuration, it will default to the number of labels.
timeout (`float`, *optional*, defaults to None):
The maximum time in seconds to wait for fetching images from the web. If None, no timeout is set and
the call may block forever.
Return:
A dictionary or a list of dictionaries containing the result. The dictionaries contain the following keys:
- **label** (`str`) -- The label identified by the model.
- **score** (`int`) -- The score attributed by the model for that label.
"""
is_dataset = isinstance(image, KeyDataset)
is_image_batch = isinstance(image, list) and all(isinstance(item, (Image.Image, str)) for item in image)
is_question_batch = isinstance(question, list) and all(isinstance(item, str) for item in question)
if isinstance(image, (Image.Image, str)) and isinstance(question, str):
inputs = {"image": image, "question": question}
elif (is_image_batch or is_dataset) and isinstance(question, str):
inputs = [{"image": im, "question": question} for im in image]
elif isinstance(image, (Image.Image, str)) and is_question_batch:
inputs = [{"image": image, "question": q} for q in question]
elif (is_image_batch or is_dataset) and is_question_batch:
question_image_pairs = []
for q in question:
for im in image:
question_image_pairs.append({"image": im, "question": q})
inputs = question_image_pairs
else:
"""
Supports the following format
- {"image": image, "question": question}
- [{"image": image, "question": question}]
- Generator and datasets
"""
inputs = image
results = super().__call__(inputs, **kwargs)
return results
def preprocess(self, inputs, padding=False, truncation=False, timeout=None):
image = load_image(inputs["image"], timeout=timeout)
model_inputs = self.tokenizer(
inputs["question"],
return_tensors="pt",
padding=padding,
truncation=truncation,
)
image_features = self.image_processor(images=image, return_tensors="pt")
image_features = image_features.to(self.dtype)
model_inputs.update(image_features)
return model_inputs
def _forward(self, model_inputs, **generate_kwargs):
if self.model.can_generate():
# User-defined `generation_config` passed to the pipeline call take precedence
if "generation_config" not in generate_kwargs:
generate_kwargs["generation_config"] = self.generation_config
model_outputs = self.model.generate(**model_inputs, **generate_kwargs)
else:
model_outputs = self.model(**model_inputs)
return model_outputs
def postprocess(self, model_outputs, top_k=5):
if self.model.can_generate():
return [
{"answer": self.tokenizer.decode(output_ids, skip_special_tokens=True).strip()}
for output_ids in model_outputs
]
else:
if top_k > self.model.config.num_labels:
top_k = self.model.config.num_labels
probs = model_outputs.logits.sigmoid()[0]
scores, ids = probs.topk(top_k)
scores = scores.tolist()
ids = ids.tolist()
return [{"score": score, "answer": self.model.config.id2label[_id]} for score, _id in zip(scores, ids)]
| VisualQuestionAnsweringPipeline |
python | huggingface__transformers | tests/models/xglm/test_tokenization_xglm.py | {
"start": 837,
"end": 2905
} | class ____(TokenizerTesterMixin, unittest.TestCase):
from_pretrained_id = ["facebook/xglm-564M"]
tokenizer_class = XGLMTokenizer
integration_expected_tokens = ['▁This', '▁is', '▁a', '▁test', '▁', '😊', '▁I', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fals', 'é', '.', '▁', '生活的', '真', '谛', '是', '▁Hi', '▁Hello', '▁Hi', '▁Hello', '▁Hello', '▁', '<s>', '▁hi', '<s>', '▁there', '▁The', '▁following', '▁string', '▁should', '▁be', '▁properly', '▁en', 'code', 'd', ':', '▁Hello', '.', '▁But', '▁ir', 'd', '▁and', '▁ปี', '▁ir', 'd', '▁ด', '▁Hey', '▁how', '▁are', '▁you', '▁doing'] # fmt: skip
integration_expected_token_ids = [1018, 67, 11, 3194, 6, 61533, 44, 254, 23572, 22, 465, 13323, 4, 53, 319, 67, 84785, 185, 5, 6, 63782, 2530, 3, 322, 2751, 31227, 2751, 31227, 31227, 6, 0, 1075, 0, 1193, 268, 12894, 44036, 2817, 113, 77749, 29, 21257, 72, 13, 31227, 5, 2079, 246, 72, 53, 10845, 246, 72, 30937, 20933, 1271, 256, 206, 7667] # fmt: skip
expected_tokens_from_ids = ['▁This', '▁is', '▁a', '▁test', '▁', '😊', '▁I', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fals', 'é', '.', '▁', '生活的', '真', '<unk>', '是', '▁Hi', '▁Hello', '▁Hi', '▁Hello', '▁Hello', '▁', '<s>', '▁hi', '<s>', '▁there', '▁The', '▁following', '▁string', '▁should', '▁be', '▁properly', '▁en', 'code', 'd', ':', '▁Hello', '.', '▁But', '▁ir', 'd', '▁and', '▁ปี', '▁ir', 'd', '▁ด', '▁Hey', '▁how', '▁are', '▁you', '▁doing'] # fmt: skip
integration_expected_decoded_text = "This is a test 😊 I was born in 92000, and this is falsé. 生活的真<unk>是 Hi Hello Hi Hello Hello <s> hi<s> there The following string should be properly encoded: Hello. But ird and ปี ird ด Hey how are you doing"
@classmethod
def setUpClass(cls):
super().setUpClass()
from_pretrained_id = "facebook/xglm-564M"
tokenizer = XGLMTokenizer.from_pretrained(from_pretrained_id)
tokenizer.pad_token = tokenizer.eos_token
tokenizer.save_pretrained(cls.tmpdirname)
cls.tokenizers = [tokenizer]
| XGLMTokenizationTest |
python | bokeh__bokeh | tests/unit/bokeh/document/test_events__document.py | {
"start": 2172,
"end": 2620
} | class ____(Model):
data = ColumnData(Any, Any, default={})
ref1 = Instance(OtherModel, default=lambda: OtherModel())
ref2 = Instance(OtherModel, default=lambda: OtherModel())
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
# DocumentChangedEvent --------------------------------------------------------
| SomeModel |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeVarDefaultClass1.py | {
"start": 2108,
"end": 2144
} | class ____(Generic[*Ts2]): ...
| ClassC1 |
python | google__jax | jax/experimental/jax2tf/examples/saved_model_main_test.py | {
"start": 959,
"end": 2437
} | class ____(tf_test_util.JaxToTfTestCase):
def setUp(self):
super().setUp()
FLAGS.model_path = os.path.join(absltest.get_default_test_tmpdir(),
"saved_models")
FLAGS.num_epochs = 1
FLAGS.test_savedmodel = True
FLAGS.mock_data = True
@parameterized.named_parameters(
dict(
testcase_name=f"_{model}_batch={serving_batch_size}",
model=model,
serving_batch_size=serving_batch_size)
for model in ["mnist_pure_jax", "mnist_flax"]
for serving_batch_size in [1, -1])
def test_train_and_save_full(self,
model="mnist_flax",
serving_batch_size=-1):
if (serving_batch_size == -1 and
config.jax2tf_default_native_serialization.value and
not config.dynamic_shapes.value):
self.skipTest("shape polymorphism but --jax_dynamic_shapes is not set.")
FLAGS.model = model
FLAGS.model_classifier_layer = True
FLAGS.serving_batch_size = serving_batch_size
saved_model_main.train_and_save()
@parameterized.named_parameters(
dict(testcase_name=f"_{model}", model=model)
for model in ["mnist_pure_jax", "mnist_flax"])
def test_train_and_save_features(self, model="mnist_flax"):
FLAGS.model = model
FLAGS.model_classifier_layer = False
saved_model_main.train_and_save()
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
| SavedModelMainTest |
python | ray-project__ray | doc/source/serve/doc_code/grpc_proxy/grpc_guide.py | {
"start": 9609,
"end": 12163
} | class ____:
def __init__(self):
self.nums = {}
def num_lookup(self, name: str) -> Tuple[int, grpc.StatusCode, str]:
if name not in self.nums:
self.nums[name] = len(self.nums)
code = grpc.StatusCode.INVALID_ARGUMENT
message = f"{name} not found, adding to nums."
else:
code = grpc.StatusCode.OK
message = f"{name} found."
return self.nums[name], code, message
def __call__(
self,
user_message: UserDefinedMessage,
grpc_context: RayServegRPCContext, # to use grpc context, add this kwarg
) -> UserDefinedResponse:
greeting = f"Hello {user_message.name} from {user_message.origin}"
num, code, message = self.num_lookup(user_message.name)
# Set custom code, details, and trailing metadata.
grpc_context.set_code(code)
grpc_context.set_details(message)
grpc_context.set_trailing_metadata([("num", str(num))])
user_response = UserDefinedResponse(
greeting=greeting,
num=num,
)
return user_response
g = GrpcDeployment.bind()
app1 = "app1"
serve.run(target=g, name=app1, route_prefix=f"/{app1}")
# __end_grpc_context_define_app__
# __begin_grpc_context_client__
import grpc
from user_defined_protos_pb2_grpc import UserDefinedServiceStub
from user_defined_protos_pb2 import UserDefinedMessage
channel = grpc.insecure_channel("localhost:9000")
stub = UserDefinedServiceStub(channel)
request = UserDefinedMessage(name="foo", num=30, origin="bar")
metadata = (("application", "app1"),)
# First call is going to page miss and return INVALID_ARGUMENT status code.
try:
response, call = stub.__call__.with_call(request=request, metadata=metadata)
except grpc.RpcError as rpc_error:
assert rpc_error.code() == grpc.StatusCode.INVALID_ARGUMENT
assert rpc_error.details() == "foo not found, adding to nums."
assert any(
[key == "num" and value == "0" for key, value in rpc_error.trailing_metadata()]
)
assert any([key == "request_id" for key, _ in rpc_error.trailing_metadata()])
# Second call is going to page hit and return OK status code.
response, call = stub.__call__.with_call(request=request, metadata=metadata)
assert call.code() == grpc.StatusCode.OK
assert call.details() == "foo found."
assert any([key == "num" and value == "0" for key, value in call.trailing_metadata()])
assert any([key == "request_id" for key, _ in call.trailing_metadata()])
# __end_grpc_context_client__
| GrpcDeployment |
python | scipy__scipy | benchmarks/benchmarks/go_benchmark_functions/go_funcs_T.py | {
"start": 7091,
"end": 8286
} | class ____(Benchmark):
r"""
Trid objective function.
This class defines the Trid [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Trid}}(x) = \sum_{i=1}^{n} (x_i - 1)^2
- \sum_{i=2}^{n} x_i x_{i-1}
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-20, 20]` for :math:`i = 1, ..., 6`.
*Global optimum*: :math:`f(x) = -50` for :math:`x = [6, 10, 12, 12, 10, 6]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
TODO Jamil#150, starting index of second summation term should be 2.
"""
change_dimensionality = True
def __init__(self, dimensions=6):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-20.0] * self.N, [20.0] * self.N))
self.global_optimum = [[6, 10, 12, 12, 10, 6]]
self.fglob = -50.0
def fun(self, x, *args):
self.nfev += 1
return sum((x - 1.0) ** 2.0) - sum(x[1:] * x[:-1])
| Trid |
python | huggingface__transformers | tests/models/deepseek_v3/test_modeling_deepseek_v3.py | {
"start": 7674,
"end": 15233
} | class ____(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(
DeepseekV3Model,
DeepseekV3ForCausalLM,
DeepseekV3ForSequenceClassification,
DeepseekV3ForTokenClassification,
)
if is_torch_available()
else ()
)
all_generative_model_classes = (DeepseekV3ForCausalLM,) if is_torch_available() else ()
pipeline_model_mapping = (
{
"feature-extraction": DeepseekV3Model,
"text-classification": DeepseekV3ForSequenceClassification,
"token-classification": DeepseekV3ForTokenClassification,
"text-generation": DeepseekV3ForCausalLM,
"zero-shot": DeepseekV3ForSequenceClassification,
}
if is_torch_available()
else {}
)
# Need to use `0.8` instead of `0.9` for `test_cpu_offload`
# This is because we are hitting edge cases with the causal_mask buffer
model_split_percents = [0.5, 0.7, 0.8]
# used in `test_torch_compile_for_training`
_torch_compile_train_cls = DeepseekV3ForCausalLM if is_torch_available() else None
def setUp(self):
self.model_tester = DeepseekV3ModelTester(self)
self.config_tester = ConfigTester(self, config_class=DeepseekV3Config, hidden_size=37)
def _check_past_key_values_for_generate(self, batch_size, past_key_values, seq_length, config):
"""Needs to be overridden as deepseek has special MLA cache format (though we don't really use the MLA)"""
self.assertIsInstance(past_key_values, Cache)
# (batch, head, seq_length, head_features)
expected_common_shape = (
batch_size,
getattr(config, "num_key_value_heads", config.num_attention_heads),
seq_length,
)
expected_key_shape = expected_common_shape + (config.qk_nope_head_dim + config.qk_rope_head_dim,)
expected_value_shape = expected_common_shape + (config.v_head_dim,)
for layer in past_key_values.layers:
self.assertEqual(layer.keys.shape, expected_key_shape)
self.assertEqual(layer.values.shape, expected_value_shape)
@parameterized.expand([("random",), ("same",)])
@unittest.skip("DeepseekV3 is not compatible with assisted decoding")
def test_assisted_decoding_matches_greedy_search(self, assistant_type):
pass
@unittest.skip("DeepseekV3 is not compatible with assisted decoding")
def test_prompt_lookup_decoding_matches_greedy_search(self, assistant_type):
pass
@unittest.skip("DeepseekV3 is not compatible with assisted decoding")
def test_assisted_decoding_sample(self):
pass
@unittest.skip("Deepseek-V3 uses MLA so it is not compatible with the standard cache format")
def test_beam_search_generate_dict_outputs_use_cache(self):
pass
@unittest.skip("Deepseek-V3 uses MLA so it is not compatible with the standard cache format")
def test_greedy_generate_dict_outputs_use_cache(self):
pass
@unittest.skip(reason="SDPA can't dispatch on flash due to unsupported head dims")
def test_sdpa_can_dispatch_on_flash(self):
pass
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
@require_torch_large_accelerator
@slow
def test_eager_matches_sdpa_generate(self):
"""
Overwriting the common test as the test is flaky on tiny models
"""
max_new_tokens = 30
tokenizer = AutoTokenizer.from_pretrained("bzantium/tiny-deepseek-v3")
model_sdpa = DeepseekV3ForCausalLM.from_pretrained(
"bzantium/tiny-deepseek-v3",
dtype=torch.float16,
).to(torch_device)
self.assertTrue(model_sdpa.config._attn_implementation == "sdpa")
model_eager = DeepseekV3ForCausalLM.from_pretrained(
"bzantium/tiny-deepseek-v3",
dtype=torch.float16,
attn_implementation="eager",
).to(torch_device)
self.assertTrue(model_eager.config._attn_implementation == "eager")
texts = [
"hi here's a longer context, getting longer and",
"Hello this is a very long sentence my friend, very long for real",
"Today I am in Paris and",
]
for padding_side in ["left", "right"]:
tokenizer.padding_side = padding_side
tokenizer.pad_token = tokenizer.eos_token
inputs = tokenizer(texts, return_tensors="pt", padding=True).to(torch_device)
res_eager = model_eager.generate(**inputs, max_new_tokens=max_new_tokens, do_sample=False)
res_sdpa = model_sdpa.generate(**inputs, max_new_tokens=max_new_tokens, do_sample=False)
with self.subTest(f"{padding_side}"):
torch.testing.assert_close(
res_eager,
res_sdpa,
msg=f"\n{tokenizer.batch_decode(res_eager)} \nvs\n{tokenizer.batch_decode(res_sdpa)}",
)
@require_torch_accelerator
def test_flex_attention_with_grads(self):
"""
Overwriting as the namings/functionality on the attention part are different; for now it's more of a unique model.
Original issue is also due to dimensionalities, here specifically due to dims not being a multiple of 2.
"""
for model_class in self.all_model_classes:
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config._attn_implementation = "flex_attention"
# Disable dropout
config.attention_dropout = 0.0
# Deepseek 3 specific - manipulate nope and adjust calculated total head dim
config.qk_nope_head_dim = 16
config.qk_head_dim = config.qk_nope_head_dim + config.qk_rope_head_dim
model = model_class(config).to(device=torch_device)
self.assertTrue(model.config._attn_implementation == "flex_attention")
# Elaborate workaround for encoder-decoder models as some do not specify their main input
dummy_inputs = {model.main_input_name: inputs_dict[model.main_input_name].to(torch_device)}
if config.is_encoder_decoder:
dummy_inputs["decoder_input_ids"] = inputs_dict["decoder_input_ids"].to(torch_device)
dummy_inputs["decoder_attention_mask"] = inputs_dict["decoder_attention_mask"].to(torch_device)
# If this does not raise an error, the test passes (see https://github.com/huggingface/transformers/pull/35605)
_ = model(**dummy_inputs)
def test_deepseek_v3_sequence_classification_model(self):
config, input_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.num_labels = 3
input_ids = input_dict["input_ids"]
attention_mask = input_ids.ne(1).to(torch_device)
sequence_labels = ids_tensor([self.model_tester.batch_size], self.model_tester.num_labels)
model = DeepseekV3ForSequenceClassification(config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels)
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels))
@require_torch_accelerator
| DeepseekV3ModelTest |
python | Pylons__pyramid | src/pyramid/interfaces.py | {
"start": 51993,
"end": 52731
} | class ____(Interface):
package = Attribute(
'The "current package" where the predicate '
'configuration statement was found'
)
registry = Attribute(
'The "current" application registry where the predicate was invoked'
)
settings = Attribute(
'The deployment settings dictionary related '
'to the current application'
)
def maybe_dotted(value):
"""Resolve the :term:`dotted Python name` ``dotted`` to a
global Python object. If ``dotted`` is not a string, return
it without attempting to do any name resolution. If
``dotted`` is a relative dotted name (e.g. ``.foo.bar``,
consider it relative to the ``package``."""
| IPredicateInfo |
python | kamyu104__LeetCode-Solutions | Python/tweet-counts-per-frequency.py | {
"start": 396,
"end": 3402
} | class ____(object):
P_NUMERATOR, P_DENOMINATOR = 1, 2 # P = 1/4 in redis implementation
MAX_LEVEL = 32 # enough for 2^32 elements
def __init__(self, end=float("inf"), can_duplicated=False):
random.seed(0)
self.__head = SkipNode()
self.__len = 0
self.__can_duplicated = can_duplicated
self.add(end)
def lower_bound(self, target):
return self.__lower_bound(target, self.__find_prev_nodes(target))
def find(self, target):
return self.__find(target, self.__find_prev_nodes(target))
def add(self, val):
if not self.__can_duplicated and self.find(val):
return False
node = SkipNode(self.__random_level(), val)
if len(self.__head.nexts) < len(node.nexts):
self.__head.nexts.extend([None]*(len(node.nexts)-len(self.__head.nexts)))
prevs = self.__find_prev_nodes(val)
for i in xrange(len(node.nexts)):
node.nexts[i] = prevs[i].nexts[i]
if prevs[i].nexts[i]:
prevs[i].nexts[i].prevs[i] = node
prevs[i].nexts[i] = node
node.prevs[i] = prevs[i]
self.__len += 1
return True
def remove(self, val):
prevs = self.__find_prev_nodes(val)
curr = self.__find(val, prevs)
if not curr:
return False
self.__len -= 1
for i in reversed(xrange(len(curr.nexts))):
prevs[i].nexts[i] = curr.nexts[i]
if curr.nexts[i]:
curr.nexts[i].prevs[i] = prevs[i]
if not self.__head.nexts[i]:
self.__head.nexts.pop()
return True
def __lower_bound(self, val, prevs):
if prevs:
candidate = prevs[0].nexts[0]
if candidate:
return candidate
return None
def __find(self, val, prevs):
candidate = self.__lower_bound(val, prevs)
if candidate and candidate.val == val:
return candidate
return None
def __find_prev_nodes(self, val):
prevs = [None]*len(self.__head.nexts)
curr = self.__head
for i in reversed(xrange(len(self.__head.nexts))):
while curr.nexts[i] and curr.nexts[i].val < val:
curr = curr.nexts[i]
prevs[i] = curr
return prevs
def __random_level(self):
level = 1
while random.randint(1, SkipList.P_DENOMINATOR) <= SkipList.P_NUMERATOR and \
level < SkipList.MAX_LEVEL:
level += 1
return level
def __len__(self):
return self.__len-1 # excluding end node
def __str__(self):
result = []
for i in reversed(xrange(len(self.__head.nexts))):
result.append([])
curr = self.__head.nexts[i]
while curr:
result[-1].append(str(curr.val))
curr = curr.nexts[i]
return "\n".join(map(lambda x: "->".join(x), result))
| SkipList |
python | django__django | django/contrib/gis/geos/coordseq.py | {
"start": 492,
"end": 6836
} | class ____(GEOSBase):
"The internal representation of a list of coordinates inside a Geometry."
ptr_type = CS_PTR
def __init__(self, ptr, z=False):
"Initialize from a GEOS pointer."
if not isinstance(ptr, CS_PTR):
raise TypeError("Coordinate sequence should initialize with a CS_PTR.")
self._ptr = ptr
self._z = z
def __iter__(self):
"Iterate over each point in the coordinate sequence."
for i in range(self.size):
yield self[i]
def __len__(self):
"Return the number of points in the coordinate sequence."
return self.size
def __str__(self):
"Return the string representation of the coordinate sequence."
return str(self.tuple)
def __getitem__(self, index):
"Return the coordinate sequence value at the given index."
self._checkindex(index)
return self._point_getter(index)
def __setitem__(self, index, value):
"Set the coordinate sequence value at the given index."
# Checking the input value
if isinstance(value, (list, tuple)):
pass
elif numpy and isinstance(value, numpy.ndarray):
pass
else:
raise TypeError(
"Must set coordinate with a sequence (list, tuple, or numpy array)."
)
# Checking the dims of the input
if self.dims == 3 and self._z:
n_args = 3
point_setter = self._set_point_3d
else:
n_args = 2
point_setter = self._set_point_2d
if len(value) != n_args:
raise TypeError("Dimension of value does not match.")
self._checkindex(index)
point_setter(index, value)
# #### Internal Routines ####
def _checkindex(self, index):
"Check the given index."
if not (0 <= index < self.size):
raise IndexError(f"Invalid GEOS Geometry index: {index}")
def _checkdim(self, dim):
"Check the given dimension."
if dim < 0 or dim > 2:
raise GEOSException(f'Invalid ordinate dimension: "{dim:d}"')
def _get_x(self, index):
return capi.cs_getx(self.ptr, index, byref(c_double()))
def _get_y(self, index):
return capi.cs_gety(self.ptr, index, byref(c_double()))
def _get_z(self, index):
return capi.cs_getz(self.ptr, index, byref(c_double()))
def _set_x(self, index, value):
capi.cs_setx(self.ptr, index, value)
def _set_y(self, index, value):
capi.cs_sety(self.ptr, index, value)
def _set_z(self, index, value):
capi.cs_setz(self.ptr, index, value)
@property
def _point_getter(self):
return self._get_point_3d if self.dims == 3 and self._z else self._get_point_2d
def _get_point_2d(self, index):
return (self._get_x(index), self._get_y(index))
def _get_point_3d(self, index):
return (self._get_x(index), self._get_y(index), self._get_z(index))
def _set_point_2d(self, index, value):
x, y = value
self._set_x(index, x)
self._set_y(index, y)
def _set_point_3d(self, index, value):
x, y, z = value
self._set_x(index, x)
self._set_y(index, y)
self._set_z(index, z)
# #### Ordinate getting and setting routines ####
def getOrdinate(self, dimension, index):
"Return the value for the given dimension and index."
self._checkindex(index)
self._checkdim(dimension)
return capi.cs_getordinate(self.ptr, index, dimension, byref(c_double()))
def setOrdinate(self, dimension, index, value):
"Set the value for the given dimension and index."
self._checkindex(index)
self._checkdim(dimension)
capi.cs_setordinate(self.ptr, index, dimension, value)
def getX(self, index):
"Get the X value at the index."
return self.getOrdinate(0, index)
def setX(self, index, value):
"Set X with the value at the given index."
self.setOrdinate(0, index, value)
def getY(self, index):
"Get the Y value at the given index."
return self.getOrdinate(1, index)
def setY(self, index, value):
"Set Y with the value at the given index."
self.setOrdinate(1, index, value)
def getZ(self, index):
"Get Z with the value at the given index."
return self.getOrdinate(2, index)
def setZ(self, index, value):
"Set Z with the value at the given index."
self.setOrdinate(2, index, value)
# ### Dimensions ###
@property
def size(self):
"Return the size of this coordinate sequence."
return capi.cs_getsize(self.ptr, byref(c_uint()))
@property
def dims(self):
"Return the dimensions of this coordinate sequence."
return capi.cs_getdims(self.ptr, byref(c_uint()))
@property
def hasz(self):
"""
Return whether this coordinate sequence is 3D. This property value is
inherited from the parent Geometry.
"""
return self._z
# ### Other Methods ###
def clone(self):
"Clone this coordinate sequence."
return GEOSCoordSeq(capi.cs_clone(self.ptr), self.hasz)
@property
def kml(self):
"Return the KML representation for the coordinates."
# Getting the substitution string depending on whether the coordinates
# have a Z dimension.
if self.hasz:
substr = "%s,%s,%s "
else:
substr = "%s,%s,0 "
return (
"<coordinates>%s</coordinates>"
% "".join(substr % self[i] for i in range(len(self))).strip()
)
@property
def tuple(self):
"Return a tuple version of this coordinate sequence."
n = self.size
get_point = self._point_getter
if n == 1:
return get_point(0)
return tuple(get_point(i) for i in range(n))
@property
def is_counterclockwise(self):
"""Return whether this coordinate sequence is counterclockwise."""
ret = c_byte()
if not capi.cs_is_ccw(self.ptr, byref(ret)):
raise GEOSException(
'Error encountered in GEOS C function "%s".' % capi.cs_is_ccw.func_name
)
return ret.value == 1
| GEOSCoordSeq |
python | tensorflow__tensorflow | tensorflow/dtensor/python/tests/test_util.py | {
"start": 5439,
"end": 14506
} | class ____(tf_test.TestCase, parameterized.TestCase):
"""Provides comparison helper for dtensor vs local results."""
@classmethod
def setUpClass(cls):
super(DTensorBaseTest, cls).setUpClass()
def setUp(self):
super().setUp()
self._backend_configurator = DTensorTestBackendConfigurator(self)
def tearDown(self):
# Make sure all async ops finish.
try:
context.async_wait()
finally:
# TODO(hthu): Remove the reset once we fixed the CopyToMesh with
# DefaultMesh placement issue.
reset_dtensor()
self._backend_configurator.tearDown()
super().tearDown()
@staticmethod
def configTestMesh( # pylint: disable=invalid-name
device_type_mesh_map: typing.Dict[typing.Text, layout_lib.Mesh],
) -> layout_lib.Mesh:
"""Configs corresponding mesh given test context.
If runs on a CPU mesh, set virtual device on CPU.
If runs on a GPU mesh, sets virtual device on GPU with proper memory limits.
if runs on a TPU mesh, initializes TPU system.
Args:
device_type_mesh_map: A dictionary containing device_type -> mesh mapping.
Returns:
A properly configured mesh for use in test.
"""
reset_context()
def get_mesh(device_type):
mesh = device_type_mesh_map.get(device_type, None)
if mesh is None:
raise ValueError(
'Requires a %s mesh to run test on %s.' % (device_type, device_type)
)
return mesh
mesh = None
if is_tpu_present():
mesh = get_mesh('TPU')
reset_context()
accelerator_util.initialize_accelerator_system('TPU')
elif tf_config.list_physical_devices('GPU'):
mesh = get_mesh('GPU')
reset_logical_devices('GPU', np.prod(mesh.shape()))
accelerator_util.initialize_accelerator_system('GPU')
else:
mesh = get_mesh('CPU')
reset_logical_devices('CPU', np.prod(mesh.shape()))
accelerator_util.initialize_accelerator_system('CPU')
return mesh
def skipForDeviceType( # pylint: disable=invalid-name
self,
device_type: typing.List[str],
reason: str,
unless_device_count_equals_to=None,
):
"""Skip the test for the specific device_type.
Args:
device_type: list of device types, one of "CPU", "GPU", or "TPU".
reason: string that describe the reason for skipping the test.
unless_device_count_equals_to: Optional int. This parameter only works if
device_type is "TPU". If set, the test will be skipped unless the number
of TPUs equals to the specified count.
"""
physical_device_types = set(
[d.device_type for d in tf_config.list_physical_devices()]
)
for device in device_type:
if device == 'TPU' and is_tpu_present():
if unless_device_count_equals_to is None:
self.skipTest(reason)
elif (
len(list_local_logical_devices(device))
!= unless_device_count_equals_to
):
self.skipTest(reason)
if (
device == 'CPU'
and len(physical_device_types) == 1
and 'CPU' in physical_device_types
):
# Make sure we skip when only `CPU` is present.
self.skipTest(reason)
if device == 'GPU' and 'GPU' in physical_device_types:
self.skipTest(reason)
def skipForTfrt(self, reason: str): # pylint: disable=invalid-name
if is_tfrt_enabled():
self.skipTest(reason)
def skipTest(self, reason): # pylint: disable=invalid-name
# skipTest() may be called in super().setUp()
if hasattr(self, '_backend_configurator'):
self._backend_configurator.tearDown()
super().skipTest(reason)
def assertDTensorEqual(
self, # pylint: disable=invalid-name
expected_result,
expected_layout,
result_dtensor,
tol=DEFAULT_TOL,
):
"""Asserts DTensor is of the particular value."""
if issubclass(
type(result_dtensor), resource_variable_ops.BaseResourceVariable
):
result_dtensor = result_dtensor.value()
if expected_layout is not None:
# This, the assertEqual, is a pure proto raw bytes comparison. To make it
# human-readable, use the `to_string` api for Layout for the dedicated msg
# field.
#
# Futhurmore, as the mesh part is very long and usually identical. Try to
# cut them as well, to make it easier to read.
expected_str = expected_layout.to_string()
got_str = api.fetch_layout(result_dtensor).to_string()
index_for_mesh = expected_str.find('mesh:')
if (
index_for_mesh != -1
and got_str.find(expected_str[index_for_mesh:]) != -1
):
# the mesh part is same. cut them so it is more readable.
expected_str = expected_str[:index_for_mesh]
got_str = got_str[: got_str.find('mesh:')]
self.assertEqual(
api.fetch_layout(result_dtensor),
expected_layout,
msg=(
'=======\nexpected layout is\n {}\n\nwhile got layout is\n {}\n'
.format(expected_str, got_str)
),
)
layout = api.fetch_layout(result_dtensor)
unpacked = [t.numpy() for t in api.unpack(result_dtensor)]
# Check global shape.
self.assertAllEqual(expected_result.shape, result_dtensor.shape)
result_dtensor = numpy_util.to_numpy(result_dtensor)
# Check dtype.
# Note: This check needs be after result_dtensor is converted
# into numpy, due to failure with Numpy version 1.18.5.
self.assertEqual(
expected_result.dtype, result_dtensor.dtype, result_dtensor
)
# Check value on concatenated result DTensor.
self.assertAllClose(expected_result, result_dtensor, atol=tol, rtol=tol)
# In addition to check the 'concatenated' DTensor, we also check all
# "replicated" parts are same.
#
# The algorithm is simple:
# 1. For a mesh with topology (x,y,z,p), and a DTensor with layout ('',z,x).
# 2. Create some data structures:
# - create a mapping from device id (called offset below) to mesh
# location. For the mesh above, loc {x:1,y:2,z:2,p:0} means the device
# is located at that coordinates in the 4-D mesh.
# - create a mapping from mesh location to device id.
# 3. Find all replicated mesh dimension names, i.e., 'y' and `p` in the
# example above.
# 4. Iterate over all unpacked components, translate the offset (device id)
# to mesh location, called (x',y',z',p').
# - For `y`, which is replicated dim in the mesh, check all unpacked
# components at (x',*,z',p') are same as the component at (x',0,z',p').
# - For `p`, which is also replicated dim in the mesh, check all unpacked
# components at (x',y',z',*) are same as the component at (x',y',z',0).
def hash_key(loc):
"""Hash key for Python dict."""
# Python dict is unhashable. Creates a sorted dict and dumps as json str.
d = collections.OrderedDict(sorted(loc.items(), key=lambda x: x[0]))
return json.dumps(d)
offset_to_mesh_loc_dict = layout.mesh.unravel_index()
mesh_loc_to_offset_dict = {}
for offset, loc in offset_to_mesh_loc_dict.items():
mesh_loc_to_offset_dict[hash_key(loc)] = offset
# pylint: disable=protected-access
replicated_dims = [
x for x in layout.mesh.dim_names if x not in layout.sharding_specs
]
# pylint: enable=protected-access
for offset, tensor in enumerate(unpacked):
mesh_loc = offset_to_mesh_loc_dict[offset]
for dim_sharding in replicated_dims:
if mesh_loc[dim_sharding] != 0:
mesh_loc = copy.deepcopy(mesh_loc) # deepcopy as we will mutate
mesh_loc[dim_sharding] = 0
offset = mesh_loc_to_offset_dict[hash_key(mesh_loc)]
# tol is be as low as possible as they should match "exactly". so, we
# ignore the `tol` passed by caller and choose the default one.
self.assertAllClose(tensor, unpacked[offset])
def product(*lists):
"""Makes a product of names parameters list."""
# Each element lists should be a tuple of tuples of the form
# (("test1", ...), ("test2", ...), ...).
# Function returns the product of the lists with the labels concatenated.
return [ # pylint: disable=g-complex-comprehension
(''.join(p[0] for p in elt), *sum((p[1:] for p in elt), ()))
for elt in itertools.product(*lists)
]
def reset_dtensor():
"""Resets the singleton DTensor Device.
This behavior is not generally exposed and only meant to be used in tests.
"""
api._reset() # pylint: disable=protected-access
__all__ = [
'DEFAULT_TOL',
'DTensorTestUtilBackend',
'DTENSOR_TEST_UTIL_BACKEND',
'create_device_ids_array',
'create_device_array',
'create_device_list',
'reset_context',
'reset_logical_devices',
'list_local_logical_devices',
'is_tfrt_enabled',
'FLAGS',
'DTensorBaseTest',
'product',
'reset_dtensor',
'is_tpu_present',
'is_gpu_present',
'use_multi_device_mode',
]
| DTensorBaseTest |
python | h5py__h5py | h5py/tests/test_vds/test_lowlevel_vds.py | {
"start": 275,
"end": 3074
} | class ____(ut.TestCase):
def setUp(self):
self.working_dir = tempfile.TemporaryDirectory()
self.fname = ['raw_file_1.h5', 'raw_file_2.h5', 'raw_file_3.h5']
k = 0
for outfile in self.fname:
filename = osp.join(self.working_dir.name, outfile)
with h5.File(filename, 'w') as f:
f['data'] = np.ones((20, 200, 200))*k
k += 1
with h5.File(osp.join(self.working_dir.name, 'raw_file_4.h5'), 'w') as f:
f['data'] = np.ones((18, 200, 200))*3
self.fname.append('raw_file_4.h5')
self.fname = [osp.join(self.working_dir.name, ix) for ix in self.fname]
def test_eiger_low_level(self):
outfile = osp.join(self.working_dir.name, make_name('eiger{}.h5'))
with h5.File(outfile, 'w', libver='latest') as f:
vdset_shape = (78, 200, 200)
vdset_max_shape = vdset_shape
virt_dspace = h5.h5s.create_simple(vdset_shape, vdset_max_shape)
dcpl = h5.h5p.create(h5.h5p.DATASET_CREATE)
dcpl.set_fill_value(np.array([-1]))
# Create the source dataset dataspace
k = 0
for foo in self.fname:
with h5.File(foo, 'r') as in_f:
src_shape = in_f['data'].shape
src_dspace = h5.h5s.create_simple(src_shape, src_shape)
# Select the source dataset hyperslab
src_dspace.select_hyperslab(start=(0, 0, 0),
stride=(1, 1, 1),
count=(1, 1, 1),
block=src_shape)
virt_dspace.select_hyperslab(start=(k, 0, 0),
stride=(1, 1, 1),
count=(1, 1, 1),
block=src_shape)
dcpl.set_virtual(virt_dspace, foo.encode('utf-8'),
b'data', src_dspace)
k += src_shape[0]
# Create the virtual dataset
h5.h5d.create(f.id, name=b"data", tid=h5.h5t.NATIVE_INT16,
space=virt_dspace, dcpl=dcpl)
with h5.File(outfile, 'r') as f:
d = f['data']
self.assertEqual(d[10, 100, 10], 0.0)
self.assertEqual(d[30, 100, 100], 1.0)
self.assertEqual(d[50, 100, 100], 2.0)
self.assertEqual(d[70, 100, 100], 3.0)
def tearDown(self):
self.working_dir.cleanup()
if __name__ == "__main__":
ut.main()
'''
Unit test for the low level vds interface for excalibur
https://support.hdfgroup.org/HDF5/docNewFeatures/VDS/HDF5-VDS-requirements-use-cases-2014-12-10.pdf
'''
| TestEigerLowLevel |
python | hynek__structlog | src/structlog/typing.py | {
"start": 3165,
"end": 8537
} | class ____(BindableLogger, Protocol):
"""
**Protocol**: A `BindableLogger` that filters by a level.
The only way to instantiate one is using `make_filtering_bound_logger`.
.. versionadded:: 20.2.0
.. versionadded:: 22.2.0 String interpolation using positional arguments.
.. versionadded:: 22.2.0
Async variants ``alog()``, ``adebug()``, ``ainfo()``, and so forth.
.. versionchanged:: 22.3.0
String interpolation is only attempted if positional arguments are
passed.
.. versionadded:: 25.5.0
String interpolation using dictionary-based arguments if the first and
only argument is a mapping.
"""
def bind(self, **new_values: Any) -> FilteringBoundLogger:
"""
Return a new logger with *new_values* added to the existing ones.
.. versionadded:: 22.1.0
"""
def unbind(self, *keys: str) -> FilteringBoundLogger:
"""
Return a new logger with *keys* removed from the context.
.. versionadded:: 22.1.0
"""
def try_unbind(self, *keys: str) -> FilteringBoundLogger:
"""
Like :meth:`unbind`, but best effort: missing keys are ignored.
.. versionadded:: 22.1.0
"""
def new(self, **new_values: Any) -> FilteringBoundLogger:
"""
Clear context and binds *initial_values* using `bind`.
.. versionadded:: 22.1.0
"""
def is_enabled_for(self, level: int) -> bool:
"""
Check whether the logger is enabled for *level*.
.. versionadded:: 25.1.0
"""
def get_effective_level(self) -> int:
"""
Return the effective level of the logger.
.. versionadded:: 25.1.0
"""
def debug(self, event: str, *args: Any, **kw: Any) -> Any:
"""
Log ``event % args`` with **kw** at **debug** level.
"""
async def adebug(self, event: str, *args: Any, **kw: Any) -> Any:
"""
Log ``event % args`` with **kw** at **debug** level.
..versionadded:: 22.2.0
"""
def info(self, event: str, *args: Any, **kw: Any) -> Any:
"""
Log ``event % args`` with **kw** at **info** level.
"""
async def ainfo(self, event: str, *args: Any, **kw: Any) -> Any:
"""
Log ``event % args`` with **kw** at **info** level.
..versionadded:: 22.2.0
"""
def warning(self, event: str, *args: Any, **kw: Any) -> Any:
"""
Log ``event % args`` with **kw** at **warn** level.
"""
async def awarning(self, event: str, *args: Any, **kw: Any) -> Any:
"""
Log ``event % args`` with **kw** at **warn** level.
..versionadded:: 22.2.0
"""
def warn(self, event: str, *args: Any, **kw: Any) -> Any:
"""
Log ``event % args`` with **kw** at **warn** level.
"""
async def awarn(self, event: str, *args: Any, **kw: Any) -> Any:
"""
Log ``event % args`` with **kw** at **warn** level.
..versionadded:: 22.2.0
"""
def error(self, event: str, *args: Any, **kw: Any) -> Any:
"""
Log ``event % args`` with **kw** at **error** level.
"""
async def aerror(self, event: str, *args: Any, **kw: Any) -> Any:
"""
Log ``event % args`` with **kw** at **error** level.
..versionadded:: 22.2.0
"""
def err(self, event: str, *args: Any, **kw: Any) -> Any:
"""
Log ``event % args`` with **kw** at **error** level.
"""
def fatal(self, event: str, *args: Any, **kw: Any) -> Any:
"""
Log ``event % args`` with **kw** at **critical** level.
"""
async def afatal(self, event: str, *args: Any, **kw: Any) -> Any:
"""
Log ``event % args`` with **kw** at **critical** level.
..versionadded:: 22.2.0
"""
def exception(self, event: str, *args: Any, **kw: Any) -> Any:
"""
Log ``event % args`` with **kw** at **error** level and ensure that
``exc_info`` is set in the event dictionary.
"""
async def aexception(self, event: str, *args: Any, **kw: Any) -> Any:
"""
Log ``event % args`` with **kw** at **error** level and ensure that
``exc_info`` is set in the event dictionary.
..versionadded:: 22.2.0
"""
def critical(self, event: str, *args: Any, **kw: Any) -> Any:
"""
Log ``event % args`` with **kw** at **critical** level.
"""
async def acritical(self, event: str, *args: Any, **kw: Any) -> Any:
"""
Log ``event % args`` with **kw** at **critical** level.
..versionadded:: 22.2.0
"""
def msg(self, event: str, *args: Any, **kw: Any) -> Any:
"""
Log ``event % args`` with **kw** at **info** level.
"""
async def amsg(self, event: str, *args: Any, **kw: Any) -> Any:
"""
Log ``event % args`` with **kw** at **info** level.
"""
def log(self, level: int, event: str, *args: Any, **kw: Any) -> Any:
"""
Log ``event % args`` with **kw** at *level*.
"""
async def alog(self, level: int, event: str, *args: Any, **kw: Any) -> Any:
"""
Log ``event % args`` with **kw** at *level*.
"""
| FilteringBoundLogger |
python | django-guardian__django-guardian | guardian/testapp/tests/test_mixins.py | {
"start": 629,
"end": 784
} | class ____(View):
def get(self, request, *args, **kwargs):
raise DatabaseRemovedError("You've just allowed db to be removed!")
| RemoveDatabaseView |
python | pytorch__pytorch | torch/_inductor/pattern_matcher.py | {
"start": 12656,
"end": 13788
} | class ____(RuntimeError):
"""
Represents a unsuccessful match.
The `FailedMatch` object is returned to represent a failure to match a
pattern.
"""
format_string: str
def __init__(self, format_string: str, *args: Any, **kwargs: Any) -> None:
self.format_string = format_string
# We want to construct error messages lazily instead of eagerly, as
# constructing them eagerly can significantly worsen compile times.
if len(format_string) > 200:
raise RuntimeError(
f"Format string too long - use lazy construction of strings instead. Format string is\n {format_string}"
)
self.args = args
self.kwargs = kwargs
def __str__(self) -> str:
return self.format_string.format(*self.args, **self.kwargs)
def __bool__(self) -> bool:
return False
MatchResult = Union[Match, FailedMatch]
def is_match(m: MatchResult) -> TypeIs[Match]:
"""
TypeIs cannot act on `self`. Thus this function exists to let mypy
recognize FailedMatch.__bool__ as a TypeIs.
"""
return bool(m)
| FailedMatch |
python | huggingface__transformers | src/transformers/models/sam3_tracker_video/modeling_sam3_tracker_video.py | {
"start": 28741,
"end": 29882
} | class ____(PreTrainedModel):
config_class = Sam3TrackerVideoConfig
base_model_prefix = "sam3_tracker_video"
main_input_name = "pixel_values"
input_modalities = "video"
_supports_sdpa = True
_supports_flash_attn_2 = True
_supports_attention_backend = True
@torch.no_grad()
def _init_weights(self, module):
super()._init_weights(module)
if isinstance(module, Sam3TrackerVideoModel):
if module.no_memory_positional_encoding is not None:
init.zeros_(module.no_memory_positional_encoding)
if module.memory_temporal_positional_encoding is not None:
init.zeros_(module.memory_temporal_positional_encoding)
if module.no_object_pointer is not None:
init.zeros_(module.no_object_pointer)
if module.occlusion_spatial_embedding_parameter is not None:
init.zeros_(module.occlusion_spatial_embedding_parameter)
if isinstance(module, Sam3TrackerVideoMemoryFuserCXBlock):
if module.scale is not None:
init.zeros_(module.scale)
| Sam3TrackerVideoPreTrainedModel |
python | jupyterlab__jupyterlab | jupyterlab/labextensions.py | {
"start": 13700,
"end": 14226
} | class ____(BaseExtensionApp):
description = "List the installed labextensions"
verbose = Bool(False, help="Increase verbosity level.").tag(config=True)
flags = list_flags
def run_task(self):
list_extensions(
app_options=AppOptions(
app_dir=self.app_dir,
logger=self.log,
core_config=self.core_config,
labextensions_path=self.labextensions_path,
verbose=self.verbose,
)
)
| ListLabExtensionsApp |
python | fluentpython__example-code-2e | 22-dyn-attr-prop/bulkfood/bulkfood_v2prop.py | {
"start": 1458,
"end": 1809
} | class ____:
weight = quantity('weight') # <1>
price = quantity('price') # <2>
def __init__(self, description, weight, price):
self.description = description
self.weight = weight # <3>
self.price = price
def subtotal(self):
return self.weight * self.price # <4>
# end::LINEITEM_V2_PROP_CLASS[]
| LineItem |
python | pytorch__pytorch | torch/_lazy/closure.py | {
"start": 475,
"end": 5677
} | class ____(ClosureHandler):
"""Handler for Asynchronous Step Closures
Args:
max_queue_size: The maximum length of the closure queue after which
the training loop will block until closures are evaluated.
By default, a reasonable limit of a maximum of 100 on the queue.
This value can be set using the `XLA_MAX_ASYNC_QUEUE` environment
variable.
"""
def __init__(self, max_queue_size=100):
super().__init__()
self._closure_queue: Queue = Queue(
int(os.environ.get("LTC_MAX_ASYNC_QUEUE", max_queue_size))
)
self._closure_exception: Queue = Queue()
self._closure_lock = threading.Lock()
self._closure_event_loop_finished = threading.Event()
self._closure_event_loop = None
def start_event_loop(self):
"""Start closure event loop if not started"""
if self._closure_event_loop is None:
def event_loop():
# Run loop until closure event is set and closure queue is empty
while True:
try:
closure = self._closure_queue.get(block=True, timeout=3)
closure()
self._closure_queue.task_done()
except EmptyQueue:
with self._closure_lock:
if self._closure_queue.empty():
self._closure_event_loop_finished.set()
return
except Exception as e:
self._closure_exception.put(e)
return
self._closure_event_loop = threading.Thread(
target=event_loop
) # pyrefly: ignore [bad-assignment]
self._closure_event_loop.start() # pyrefly: ignore [missing-attribute]
def run(self, closure):
with self._closure_lock:
self._closure_queue.put(closure, block=True)
if (
self._closure_event_loop is None
or not self._closure_event_loop.is_alive()
):
try:
e = self._closure_exception.get(block=False)
raise RuntimeError(
"Cannot run asynchronous closure due to previously raised exception"
) from e
except EmptyQueue:
self._closure_event_loop = None
self.start_event_loop()
def add_step_closure(closure, args=(), run_async=False):
"""Adds a closure to the list of the ones to be run at the end of the step.
Many times during model training there is the need to print/report (print to
console, post to tensorboard, etc...) information which require the content of
intermediary tensors to be inspected.
Inspecting different tensors content in different points of the model code
requires many executions and typically causes performance issues.
Adding a step closure will ensure that it will be run after the barrier, when
all the live tensors will be already materialized to device data.
Live tensors which will include the ones captured by the closure arguments.
So using `add_step_closure()` will ensure a single execution will be
performed, even when multiple closures are queued, requiring multiple tensors
to be inspected.
Step closures will be run sequentially in the order they have been queued.
Note that even though using this API the execution will be optimized, it is
advised to throttle the printing/reporting events once every N steps.
Args:
closure (callable): The function to be called.
args (tuple): The arguments to be passed to the closure.
run_async: If True, run the closure asynchronously.
"""
devctx = get_device_context()
closures_type = "async_step_closures" if run_async else "step_closures"
step_closures = getattr(devctx, closures_type, None)
if step_closures is None:
step_closures = []
setattr(devctx, closures_type, step_closures)
step_closures.append(lambda a=args: closure(*a))
def run_step_closures():
devctx = get_device_context()
async_step_closures = getattr(devctx, "async_step_closures", None)
if async_step_closures is not None:
devctx.async_step_closures = [] # type: ignore[attr-defined]
async_closure_handler = getattr(devctx, "async_closure_handler", None)
if async_closure_handler is None:
async_closure_handler = AsyncClosureHandler()
devctx.async_closure_handler = async_closure_handler # type: ignore[attr-defined]
async_closure_handler(async_step_closures)
step_closures = getattr(devctx, "step_closures", None)
if step_closures is not None:
devctx.step_closures = [] # type: ignore[attr-defined]
closure_handler = getattr(devctx, "closure_handler", None)
if closure_handler is None:
closure_handler = ClosureHandler()
devctx.closure_handler = closure_handler # type: ignore[attr-defined]
closure_handler(step_closures)
return devctx
| AsyncClosureHandler |
python | sympy__sympy | sympy/physics/biomechanics/curve.py | {
"start": 46209,
"end": 54552
} | class ____(CharacteristicCurveFunction):
r"""Muscle fiber force-velocity curve based on De Groote et al., 2016 [1]_.
Explanation
===========
Gives the normalized muscle fiber force produced as a function of
normalized tendon velocity.
The function is defined by the equation:
$fv^M = c_0 \log{\left(c_1 \tilde{v}_m + c_2\right) + \sqrt{\left(c_1 \tilde{v}_m + c_2\right)^2 + 1}} + c_3$
with constant values of $c_0 = -0.318$, $c_1 = -8.149$, $c_2 = -0.374$, and
$c_3 = 0.886$.
While it is possible to change the constant values, these were carefully
selected in the original publication to give the characteristic curve
specific and required properties. For example, the function produces a
normalized muscle fiber force of 1 when the muscle fibers are contracting
isometrically (they have an extension rate of 0).
Examples
========
The preferred way to instantiate :class:`FiberForceVelocityDeGroote2016` is using
the :meth:`~.with_defaults` constructor because this will automatically populate
the constants within the characteristic curve equation with the floating
point values from the original publication. This constructor takes a single
argument corresponding to normalized muscle fiber extension velocity. We'll
create a :class:`~.Symbol` called ``v_M_tilde`` to represent this.
>>> from sympy import Symbol
>>> from sympy.physics.biomechanics import FiberForceVelocityDeGroote2016
>>> v_M_tilde = Symbol('v_M_tilde')
>>> fv_M = FiberForceVelocityDeGroote2016.with_defaults(v_M_tilde)
>>> fv_M
FiberForceVelocityDeGroote2016(v_M_tilde, -0.318, -8.149, -0.374, 0.886)
It's also possible to populate the four constants with your own values too.
>>> from sympy import symbols
>>> c0, c1, c2, c3 = symbols('c0 c1 c2 c3')
>>> fv_M = FiberForceVelocityDeGroote2016(v_M_tilde, c0, c1, c2, c3)
>>> fv_M
FiberForceVelocityDeGroote2016(v_M_tilde, c0, c1, c2, c3)
You don't just have to use symbols as the arguments, it's also possible to
use expressions. Let's create a new pair of symbols, ``v_M`` and
``v_M_max``, representing muscle fiber extension velocity and maximum
muscle fiber extension velocity respectively. We can then represent
``v_M_tilde`` as an expression, the ratio of these.
>>> v_M, v_M_max = symbols('v_M v_M_max')
>>> v_M_tilde = v_M/v_M_max
>>> fv_M = FiberForceVelocityDeGroote2016.with_defaults(v_M_tilde)
>>> fv_M
FiberForceVelocityDeGroote2016(v_M/v_M_max, -0.318, -8.149, -0.374, 0.886)
To inspect the actual symbolic expression that this function represents,
we can call the :meth:`~.doit` method on an instance. We'll use the keyword
argument ``evaluate=False`` as this will keep the expression in its
canonical form and won't simplify any constants.
>>> fv_M.doit(evaluate=False)
0.886 - 0.318*log(-8.149*v_M/v_M_max - 0.374 + sqrt(1 + (-8.149*v_M/v_M_max
- 0.374)**2))
The function can also be differentiated. We'll differentiate with respect
to v_M using the ``diff`` method on an instance with the single positional
argument ``v_M``.
>>> fv_M.diff(v_M)
2.591382*(1 + (-8.149*v_M/v_M_max - 0.374)**2)**(-1/2)/v_M_max
References
==========
.. [1] De Groote, F., Kinney, A. L., Rao, A. V., & Fregly, B. J., Evaluation
of direct collocation optimal control problem formulations for
solving the muscle redundancy problem, Annals of biomedical
engineering, 44(10), (2016) pp. 2922-2936
"""
@classmethod
def with_defaults(cls, v_M_tilde):
r"""Recommended constructor that will use the published constants.
Explanation
===========
Returns a new instance of the muscle fiber force-velocity function
using the four constant values specified in the original publication.
These have the values:
$c_0 = -0.318$
$c_1 = -8.149$
$c_2 = -0.374$
$c_3 = 0.886$
Parameters
==========
v_M_tilde : Any (sympifiable)
Normalized muscle fiber extension velocity.
"""
c0 = Float('-0.318')
c1 = Float('-8.149')
c2 = Float('-0.374')
c3 = Float('0.886')
return cls(v_M_tilde, c0, c1, c2, c3)
@classmethod
def eval(cls, v_M_tilde, c0, c1, c2, c3):
"""Evaluation of basic inputs.
Parameters
==========
v_M_tilde : Any (sympifiable)
Normalized muscle fiber extension velocity.
c0 : Any (sympifiable)
The first constant in the characteristic equation. The published
value is ``-0.318``.
c1 : Any (sympifiable)
The second constant in the characteristic equation. The published
value is ``-8.149``.
c2 : Any (sympifiable)
The third constant in the characteristic equation. The published
value is ``-0.374``.
c3 : Any (sympifiable)
The fourth constant in the characteristic equation. The published
value is ``0.886``.
"""
pass
def _eval_evalf(self, prec):
"""Evaluate the expression numerically using ``evalf``."""
return self.doit(deep=False, evaluate=False)._eval_evalf(prec)
def doit(self, deep=True, evaluate=True, **hints):
"""Evaluate the expression defining the function.
Parameters
==========
deep : bool
Whether ``doit`` should be recursively called. Default is ``True``.
evaluate : bool.
Whether the SymPy expression should be evaluated as it is
constructed. If ``False``, then no constant folding will be
conducted which will leave the expression in a more numerically-
stable for values of ``v_M_tilde`` that correspond to a sensible
operating range for a musculotendon. Default is ``True``.
**kwargs : dict[str, Any]
Additional keyword argument pairs to be recursively passed to
``doit``.
"""
v_M_tilde, *constants = self.args
if deep:
hints['evaluate'] = evaluate
v_M_tilde = v_M_tilde.doit(deep=deep, **hints)
c0, c1, c2, c3 = [c.doit(deep=deep, **hints) for c in constants]
else:
c0, c1, c2, c3 = constants
if evaluate:
return c0*log(c1*v_M_tilde + c2 + sqrt((c1*v_M_tilde + c2)**2 + 1)) + c3
return c0*log(c1*v_M_tilde + c2 + sqrt(UnevaluatedExpr(c1*v_M_tilde + c2)**2 + 1)) + c3
def fdiff(self, argindex=1):
"""Derivative of the function with respect to a single argument.
Parameters
==========
argindex : int
The index of the function's arguments with respect to which the
derivative should be taken. Argument indexes start at ``1``.
Default is ``1``.
"""
v_M_tilde, c0, c1, c2, c3 = self.args
if argindex == 1:
return c0*c1/sqrt(UnevaluatedExpr(c1*v_M_tilde + c2)**2 + 1)
elif argindex == 2:
return log(
c1*v_M_tilde + c2
+ sqrt(UnevaluatedExpr(c1*v_M_tilde + c2)**2 + 1)
)
elif argindex == 3:
return c0*v_M_tilde/sqrt(UnevaluatedExpr(c1*v_M_tilde + c2)**2 + 1)
elif argindex == 4:
return c0/sqrt(UnevaluatedExpr(c1*v_M_tilde + c2)**2 + 1)
elif argindex == 5:
return Integer(1)
raise ArgumentIndexError(self, argindex)
def inverse(self, argindex=1):
"""Inverse function.
Parameters
==========
argindex : int
Value to start indexing the arguments at. Default is ``1``.
"""
return FiberForceVelocityInverseDeGroote2016
def _latex(self, printer):
"""Print a LaTeX representation of the function defining the curve.
Parameters
==========
printer : Printer
The printer to be used to print the LaTeX string representation.
"""
v_M_tilde = self.args[0]
_v_M_tilde = printer._print(v_M_tilde)
return r'\operatorname{fv}^M \left( %s \right)' % _v_M_tilde
| FiberForceVelocityDeGroote2016 |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/django/toystore/models.py | {
"start": 2386,
"end": 2621
} | class ____(models.Model):
uuid = models.UUIDField()
slug = models.SlugField()
url = models.URLField()
ipv4 = models.GenericIPAddressField(protocol="IPv4")
ipv6 = models.GenericIPAddressField(protocol="IPv6")
| OddFields |
python | google__pytype | pytype/tests/test_errors2.py | {
"start": 16713,
"end": 19550
} | class ____(test_base.BaseTest):
"""Tests for UnboundLocalError.
It is often confusing to users when a name error is logged due to a local
variable shadowing one from an outer scope and being referenced before its
local definition, e.g.:
def f():
x = 0
def g():
print(x) # name error!
x = 1
In this case, we add some more details to the error message.
"""
def test_function_in_function(self):
errors = self.CheckWithErrors("""
def f(x):
def g():
print(x) # name-error[e]
x = 0
""")
self.assertErrorSequences(
errors,
{
"e": [
"Add `nonlocal x` in function 'f.g' to",
"reference 'x' from function 'f'",
]
},
)
def test_global(self):
errors = self.CheckWithErrors("""
x = 0
def f():
print(x) # name-error[e]
x = 1
""")
self.assertErrorSequences(
errors,
{
"e": [
"Add `global x` in function 'f' to",
"reference 'x' from global scope",
]
},
)
def test_class_in_function(self):
errors = self.CheckWithErrors("""
def f():
x = 0
class C:
print(x) # name-error[e]
x = 1
""")
self.assertErrorSequences(
errors,
{
"e": [
"Add `nonlocal x` in class 'f.C' to",
"reference 'x' from function 'f'",
]
},
)
def test_deep_nesting(self):
errors = self.CheckWithErrors("""
def f():
def g():
x = 0
class C:
class D:
print(x) # name-error[e]
x = 1
""")
self.assertErrorSequences(
errors,
{
"e": [
"Add `nonlocal x` in class 'f.g.C.D' to",
"reference 'x' from function 'f.g'",
]
},
)
def test_duplicate_names(self):
# This is a plain old name error; make sure the UnboundLocalError details
# are *not* printed.
errors = self.CheckWithErrors("""
def f1():
def f2():
def f3():
x = 0
def f3():
def f4():
print(x) # name-error[e]
""")
self.assertErrorSequences(errors, {"e": ["Name 'x' is not defined"]})
def test_precedence(self):
errors = self.CheckWithErrors("""
def f():
x = 0
def g():
x = 1
def h():
print(x) # name-error[e]
x = 2
""")
self.assertErrorSequences(
errors,
{
"e": [
"Add `nonlocal x` in function 'f.g.h' to",
"reference 'x' from function 'f.g'",
]
},
)
| UnboundLocalErrorTest |
python | keras-team__keras | keras/src/distribution/distribution_lib_test.py | {
"start": 9623,
"end": 13745
} | class ____(testing.TestCase):
def setUp(self):
super().setUp()
self.devices = [f"cpu:{i}" for i in range(8)]
shape = (2, 4)
axis_names = ["data", "model"]
self.device_mesh = distribution_lib.DeviceMesh(
shape, axis_names, self.devices
)
@pytest.mark.skipif(testing.jax_uses_gpu(), reason="CI segfault")
def test_distribute_weights(self):
layout_map = distribution_lib.LayoutMap(self.device_mesh)
layout_map[".*kernel"] = distribution_lib.TensorLayout([None, "model"])
layout_map[".*bias"] = distribution_lib.TensorLayout(["model"])
distribution = distribution_lib.ModelParallel(
layout_map=layout_map, batch_dim_name="data"
)
kernel = backend.Variable(initializer=np.arange(8, 4), name="kernel")
bias = backend.Variable(initializer=np.arange(4), name="bias")
rng_seed = backend.Variable(initializer=[0, 1], name="seed")
kernel_layout = distribution.get_variable_layout(kernel)
self.assertIs(kernel_layout.device_mesh, self.device_mesh)
self.assertEqual(kernel_layout.axes, (None, "model"))
bias_layout = distribution.get_variable_layout(bias)
self.assertIs(bias_layout.device_mesh, self.device_mesh)
self.assertEqual(bias_layout.axes, ("model",))
rng_seed_layout = distribution.get_variable_layout(rng_seed)
self.assertIs(rng_seed_layout.device_mesh, self.device_mesh)
self.assertEqual(rng_seed_layout.axes, (None,))
def test_distribute_data(self):
layout_map = distribution_lib.LayoutMap(self.device_mesh)
distribution = distribution_lib.ModelParallel(
layout_map=layout_map, batch_dim_name="data"
)
data = np.arange(16).reshape((4, 2, 2))
data_layout = distribution.get_data_layout(data.shape)
self.assertIs(data_layout.device_mesh, self.device_mesh)
self.assertEqual(data_layout.axes, ("data", None, None))
def test_get_tensor_layout(self):
layout_map = distribution_lib.LayoutMap(self.device_mesh)
layout_map[".*kernel"] = distribution_lib.TensorLayout([None, "model"])
layout_map[".*bias"] = distribution_lib.TensorLayout(["model"])
layout_map["/model/layer/tensor"] = ("data", None)
distribution = distribution_lib.ModelParallel(
layout_map=layout_map, batch_dim_name="data"
)
layout = distribution.get_tensor_layout("/model/layer/tensor")
self.assertIs(layout.device_mesh, self.device_mesh)
self.assertEqual(layout.axes, ("data", None))
layout = distribution.get_tensor_layout("/model/layer/other_tensor")
self.assertIsNone(layout)
@pytest.mark.skipif(testing.jax_uses_gpu(), reason="CI segfault")
def test_get_variable_layout_with_explicit_layout(self):
layout_map = distribution_lib.LayoutMap(self.device_mesh)
layout_map[".*kernel"] = distribution_lib.TensorLayout([None, "model"])
distribution = distribution_lib.ModelParallel(
layout_map=layout_map, batch_dim_name="data"
)
explicit_mesh = distribution_lib.DeviceMesh((8,), ["x"], self.devices)
explicit_layout = distribution_lib.TensorLayout(["x"], explicit_mesh)
variable = backend.Variable(initializer=[1, 2, 3], name="kernel")
variable._layout = explicit_layout
variable_layout = distribution.get_variable_layout(variable)
self.assertIs(variable_layout.device_mesh, explicit_mesh)
self.assertEqual(variable_layout.axes, explicit_layout.axes)
def test_distribute_dataset(self):
# We can only verify the single worker/process case in OSS for now.
dataset = tf.data.Dataset.range(8)
layout_map = distribution_lib.LayoutMap(self.device_mesh)
distribution = distribution_lib.ModelParallel(
layout_map=layout_map, batch_dim_name="data"
)
distributed_dataset = distribution.distribute_dataset(dataset)
self.assertIs(dataset, distributed_dataset)
| ModelParallelDistributionTest |
python | pydantic__pydantic | tests/test_forward_ref.py | {
"start": 18553,
"end": 18852
} | class ____(BaseModel):
Instances: ClassVar[dict[str, WithClassVar]] = {}
"""
)
def test_recursive_model(create_module):
module = create_module(
# language=Python
"""
from __future__ import annotations
from typing import Optional
from pydantic import BaseModel
| WithClassVar |
python | pytorch__pytorch | test/test_jit_fuser_te.py | {
"start": 101790,
"end": 101847
} | class ____(JitCommonTestCase):
pass
| TestNNCOpInfoParent |
python | wandb__wandb | wandb/vendor/pygments/lexers/templates.py | {
"start": 59819,
"end": 60303
} | class ____(DelegatingLexer):
"""
Subclass of the `HandlebarsLexer` that highlights unlexed data with the
`HtmlLexer`.
.. versionadded:: 2.0
"""
name = "HTML+Handlebars"
aliases = ["html+handlebars"]
filenames = ['*.handlebars', '*.hbs']
mimetypes = ['text/html+handlebars', 'text/x-handlebars-template']
def __init__(self, **options):
super(HandlebarsHtmlLexer, self).__init__(HtmlLexer, HandlebarsLexer, **options)
| HandlebarsHtmlLexer |
python | dask__distributed | distributed/tests/test_profile.py | {
"start": 6428,
"end": 7542
} | class ____:
co_filename: str
co_name: str
co_firstlineno: int
co_lnotab: bytes
co_lines_seq: Sequence[tuple[int, int, int | None]]
co_code: bytes
def co_lines(self) -> Iterator[tuple[int, int, int | None]]:
yield from self.co_lines_seq
FAKE_CODE = FakeCode(
co_filename="<stdin>",
co_name="example",
co_firstlineno=1,
# https://github.com/python/cpython/blob/b68431fadb3150134ac6ccbf501cdfeaf4c75678/Objects/lnotab_notes.txt#L84
# generated from:
# def example():
# for i in range(1):
# if i >= 0:
# pass
# example.__code__.co_lnotab
co_lnotab=b"\x00\x01\x0c\x01\x08\x01\x04\xfe",
# generated with list(example.__code__.co_lines())
co_lines_seq=[
(0, 12, 2),
(12, 20, 3),
(20, 22, 4),
(22, 24, None),
(24, 28, 2),
],
# used in dis.findlinestarts as bytecode_len = len(code.co_code)
# https://github.com/python/cpython/blob/6f345d363308e3e6ecf0ad518ea0fcc30afde2a8/Lib/dis.py#L457
co_code=bytes(28),
)
@dataclasses.dataclass(frozen=True)
| FakeCode |
python | streamlit__streamlit | lib/streamlit/runtime/runtime_util.py | {
"start": 1002,
"end": 2068
} | class ____(MarkdownFormattedException):
"""Exception raised when a websocket message is larger than the configured limit."""
def __init__(self, failed_msg_str: Any) -> None:
msg = self._get_message(failed_msg_str)
super().__init__(msg)
def _get_message(self, failed_msg_str: Any) -> str:
# This needs to have zero indentation otherwise the markdown will render incorrectly.
return (
f"""
**Data of size {len(failed_msg_str) / 1e6:.1f} MB exceeds the message size limit of
{get_max_message_size_bytes() / 1e6} MB.**
This is often caused by a large chart or dataframe. Please decrease the amount of data sent
to the browser, or increase the limit by setting the config option `server.maxMessageSize`.
[Click here to learn more about config options](https://docs.streamlit.io/develop/api-reference/configuration/config.toml).
_Note that increasing the limit may lead to long loading times and large memory consumption
of the client's browser and the Streamlit server._
"""
).strip("\n")
| MessageSizeError |
python | donnemartin__interactive-coding-challenges | stacks_queues/queue_list/queue_list.py | {
"start": 0,
"end": 103
} | class ____(object):
def __init__(self, data):
self.data = data
self.next = None
| Node |
python | coleifer__peewee | peewee.py | {
"start": 39064,
"end": 39300
} | class ____(object):
__slots__ = ()
def __get__(self, instance, instance_type=None):
if instance is not None:
return EntityFactory(instance._alias) # Implements __getattr__().
return self
| _DynamicEntity |
python | apache__airflow | devel-common/src/tests_common/test_utils/asserts.py | {
"start": 1513,
"end": 2032
} | class ____(NamedTuple):
"""QueriesTraceRecord holds information about the query executed in the context."""
module: str
name: str
lineno: int | None
@classmethod
def from_frame(cls, frame_summary: traceback.FrameSummary):
return cls(
module=frame_summary.filename.rsplit(os.sep, 1)[-1],
name=frame_summary.name,
lineno=frame_summary.lineno,
)
def __str__(self):
return f"{self.module}:{self.name}:{self.lineno}"
| QueriesTraceRecord |
python | neetcode-gh__leetcode | python/0721-accounts-merge.py | {
"start": 615,
"end": 1384
} | class ____:
def accountsMerge(self, accounts: List[List[str]]) -> List[List[str]]:
uf = UnionFind(len(accounts))
emailToAcc = {} # email -> index of acc
for i, a in enumerate(accounts):
for e in a[1:]:
if e in emailToAcc:
uf.union(i, emailToAcc[e])
else:
emailToAcc[e] = i
emailGroup = defaultdict(list) # index of acc -> list of emails
for e, i in emailToAcc.items():
leader = uf.find(i)
emailGroup[leader].append(e)
res = []
for i, emails in emailGroup.items():
name = accounts[i][0]
res.append([name] + sorted(emailGroup[i])) # array concat
return res
| Solution |
python | catalyst-team__catalyst | examples/detection/custom_runner.py | {
"start": 666,
"end": 1829
} | class ____(ConfigRunner):
"""Runner for CenterNet models."""
def get_loaders(self, stage: str):
"""Insert into loaders collate_fn.
Args:
stage (str): sage name
Returns:
ordered dict with torch.utils.data.DataLoader
"""
loaders = super().get_loaders(stage)
for item in loaders.values():
if hasattr(item.dataset, "collate_fn"):
item.collate_fn = item.dataset.collate_fn
return loaders
def handle_batch(self, batch):
"""Do a forward pass and compute loss.
Args:
batch (Dict[str, Any]): batch of data.
"""
heatmaps, regression = self.model(batch["image"])
loss, mask_loss, regression_loss = self.criterion(
heatmaps, regression, batch["heatmap"], batch["wh_regr"]
)
self.batch["predicted_heatmap"] = heatmaps
self.batch["predicted_regression"] = regression
self.batch_metrics["mask_loss"] = mask_loss.item()
self.batch_metrics["regression_loss"] = regression_loss.item()
self.batch_metrics["loss"] = loss
| CenterNetDetectionRunner |
python | wandb__wandb | wandb/sdk/artifacts/_generated/fragments.py | {
"start": 8862,
"end": 8962
} | class ____(GQLResult):
name: str
args: List[TypeInfoFragmentFieldsArgs]
| TypeInfoFragmentFields |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-arango-db/llama_index/readers/arango_db/base.py | {
"start": 193,
"end": 5389
} | class ____(BaseReader):
"""
Simple arangodb reader.
Concatenates each ArangoDB doc into Document used by LlamaIndex.
Args:
host: (Union[str, List[str]]) list of urls or url for connecting to the db
client: (Any) ArangoDB client
"""
def __init__(
self, host: Optional[Union[str, List[str]]] = None, client: Optional[Any] = None
) -> None:
"""Initialize with parameters."""
try:
from arango import ArangoClient
except ImportError as err:
raise ImportError(
"`arango` package not found, please run `pip install python-arango`"
) from err
host = host or "http://127.0.0.1:8529"
self.client = client or ArangoClient(hosts=host)
self.client = cast(ArangoClient, self.client)
def _flatten(self, texts: List[Union[str, List[str]]]) -> List[str]:
result = []
for text in texts:
result += text if isinstance(text, list) else [text]
return result
def lazy_load(
self,
username: str,
password: str,
db_name: str,
collection_name: str,
field_names: List[str] = ["text"],
separator: str = " ",
query_dict: Optional[Dict] = {},
max_docs: int = None,
metadata_names: Optional[List[str]] = None,
) -> Iterator[Document]:
"""
Lazy load data from ArangoDB.
Args:
username (str): for credentials.
password (str): for credentials.
db_name (str): name of the database.
collection_name (str): name of the collection.
field_names(List[str]): names of the fields to be concatenated.
Defaults to ["text"]
separator (str): separator to be used between fields.
Defaults to " "
query_dict (Optional[Dict]): query to filter documents. Read more
at [docs](https://docs.python-arango.com/en/main/specs.html#arango.collection.StandardCollection.find)
Defaults to empty dict
max_docs (int): maximum number of documents to load.
Defaults to None (no limit)
metadata_names (Optional[List[str]]): names of the fields to be added
to the metadata attribute of the Document. Defaults to None
Returns:
List[Document]: A list of documents.
"""
db = self.client.db(name=db_name, username=username, password=password)
collection = db.collection(collection_name)
cursor = collection.find(filters=query_dict, limit=max_docs)
for item in cursor:
try:
texts = [str(item[name]) for name in field_names]
except KeyError as err:
raise ValueError(
f"{err.args[0]} field not found in arangodb document."
) from err
texts = self._flatten(texts)
text = separator.join(texts)
if metadata_names is None:
yield Document(text=text)
else:
try:
metadata = {name: item[name] for name in metadata_names}
except KeyError as err:
raise ValueError(
f"{err.args[0]} field not found in arangodb document."
) from err
yield Document(text=text, metadata=metadata)
def load_data(
self,
username: str,
password: str,
db_name: str,
collection_name: str,
field_names: List[str] = ["text"],
separator: str = " ",
query_dict: Optional[Dict] = {},
max_docs: int = None,
metadata_names: Optional[List[str]] = None,
) -> List[Document]:
"""
Load data from the ArangoDB.
Args:
username (str): for credentials.
password (str): for credentials.
db_name (str): name of the database.
collection_name (str): name of the collection.
field_names(List[str]): names of the fields to be concatenated.
Defaults to ["text"]
separator (str): separator to be used between fields.
Defaults to ""
query_dict (Optional[Dict]): query to filter documents. Read more
at [docs](https://docs.python-arango.com/en/main/specs.html#arango.collection.StandardCollection.find)
Defaults to empty dict
max_docs (int): maximum number of documents to load.
Defaults to 0 (no limit)
metadata_names (Optional[List[str]]): names of the fields to be added
to the metadata attribute of the Document. Defaults to None
Returns:
List[Document]: A list of documents.
"""
return list(
self.lazy_load(
username,
password,
db_name,
collection_name,
field_names,
separator,
query_dict,
max_docs,
metadata_names,
)
)
| SimpleArangoDBReader |
python | huggingface__transformers | tests/models/clip/test_modeling_clip.py | {
"start": 11313,
"end": 15197
} | class ____:
def __init__(
self,
parent,
batch_size=12,
seq_length=7,
is_training=True,
use_input_mask=True,
use_labels=True,
vocab_size=99,
hidden_size=32,
projection_dim=32,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=37,
dropout=0.1,
attention_dropout=0.1,
max_position_embeddings=512,
initializer_range=0.02,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.projection_dim = projection_dim
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.dropout = dropout
self.attention_dropout = attention_dropout
self.max_position_embeddings = max_position_embeddings
self.initializer_range = initializer_range
self.scope = scope
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
if input_mask is not None:
batch_size, seq_length = input_mask.shape
rnd_start_indices = np.random.randint(1, seq_length - 1, size=(batch_size,))
for batch_idx, start_index in enumerate(rnd_start_indices):
input_mask[batch_idx, :start_index] = 1
input_mask[batch_idx, start_index:] = 0
config = self.get_config()
return config, input_ids, input_mask
def get_config(self):
return CLIPTextConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
projection_dim=self.projection_dim,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
dropout=self.dropout,
attention_dropout=self.attention_dropout,
max_position_embeddings=self.max_position_embeddings,
initializer_range=self.initializer_range,
)
def create_and_check_model(self, config, input_ids, input_mask):
model = CLIPTextModel(config=config)
model.to(torch_device)
model.eval()
with torch.no_grad():
result = model(input_ids, attention_mask=input_mask)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def create_and_check_model_with_projection(self, config, input_ids, input_mask):
model = CLIPTextModelWithProjection(config=config)
model.to(torch_device)
model.eval()
with torch.no_grad():
result = model(input_ids, attention_mask=input_mask)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.text_embeds.shape, (self.batch_size, self.projection_dim))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, input_ids, input_mask = config_and_inputs
inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
| CLIPTextModelTester |
python | sphinx-doc__sphinx | sphinx/transforms/i18n.py | {
"start": 24112,
"end": 24827
} | class ____(SphinxTransform):
"""Calculate the number of translated and untranslated nodes."""
default_priority = 25 # MUST happen after Locale
def apply(self, **kwargs: Any) -> None:
from sphinx.builders.gettext import MessageCatalogBuilder
if issubclass(self.env._builder_cls, MessageCatalogBuilder):
return
total = translated = 0
for node in NodeMatcher(nodes.Element, translated=Any).findall(self.document):
total += 1
if node['translated']:
translated += 1
self.document['translation_progress'] = {
'total': total,
'translated': translated,
}
| TranslationProgressTotaliser |
python | doocs__leetcode | solution/0600-0699/0656.Coin Path/Solution.py | {
"start": 0,
"end": 654
} | class ____:
def cheapestJump(self, coins: List[int], maxJump: int) -> List[int]:
if coins[-1] == -1:
return []
n = len(coins)
f = [inf] * n
f[-1] = coins[-1]
for i in range(n - 2, -1, -1):
if coins[i] != -1:
for j in range(i + 1, min(n, i + maxJump + 1)):
if f[i] > f[j] + coins[i]:
f[i] = f[j] + coins[i]
if f[0] == inf:
return []
ans = []
s = f[0]
for i in range(n):
if f[i] == s:
s -= coins[i]
ans.append(i + 1)
return ans
| Solution |
python | sympy__sympy | sympy/tensor/array/sparse_ndim_array.py | {
"start": 4221,
"end": 6387
} | class ____(MutableNDimArray, SparseNDimArray):
def __new__(cls, iterable=None, shape=None, **kwargs):
shape, flat_list = cls._handle_ndarray_creation_inputs(iterable, shape, **kwargs)
self = object.__new__(cls)
self._shape = shape
self._rank = len(shape)
self._loop_size = functools.reduce(lambda x,y: x*y, shape) if shape else len(flat_list)
# Sparse array:
if isinstance(flat_list, (dict, Dict)):
self._sparse_array = dict(flat_list)
return self
self._sparse_array = {}
for i, el in enumerate(flatten(flat_list)):
if el != 0:
self._sparse_array[i] = _sympify(el)
return self
def __setitem__(self, index, value):
"""Allows to set items to MutableDenseNDimArray.
Examples
========
>>> from sympy import MutableSparseNDimArray
>>> a = MutableSparseNDimArray.zeros(2, 2)
>>> a[0, 0] = 1
>>> a[1, 1] = 1
>>> a
[[1, 0], [0, 1]]
"""
if isinstance(index, tuple) and any(isinstance(i, slice) for i in index):
value, eindices, slice_offsets = self._get_slice_data_for_array_assignment(index, value)
for i in eindices:
other_i = [ind - j for ind, j in zip(i, slice_offsets) if j is not None]
other_value = value[other_i]
complete_index = self._parse_index(i)
if other_value != 0:
self._sparse_array[complete_index] = other_value
elif complete_index in self._sparse_array:
self._sparse_array.pop(complete_index)
else:
index = self._parse_index(index)
value = _sympify(value)
if value == 0 and index in self._sparse_array:
self._sparse_array.pop(index)
else:
self._sparse_array[index] = value
def as_immutable(self):
return ImmutableSparseNDimArray(self)
@property
def free_symbols(self):
return {i for j in self._sparse_array.values() for i in j.free_symbols}
| MutableSparseNDimArray |
python | huggingface__transformers | src/transformers/models/blenderbot_small/modeling_blenderbot_small.py | {
"start": 10176,
"end": 13337
} | class ____(GradientCheckpointingLayer):
def __init__(self, config: BlenderbotSmallConfig, layer_idx: Optional[int] = None):
super().__init__()
self.embed_dim = config.d_model
self.self_attn = BlenderbotSmallAttention(
embed_dim=self.embed_dim,
num_heads=config.encoder_attention_heads,
dropout=config.attention_dropout,
config=config,
layer_idx=layer_idx,
)
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)
self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
def forward(
self,
hidden_states: torch.FloatTensor,
attention_mask: torch.FloatTensor,
output_attentions: Optional[bool] = False,
) -> tuple[torch.FloatTensor, Optional[torch.FloatTensor]]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
hidden_states, attn_weights = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
output_attentions=output_attentions,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
residual = hidden_states
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.final_layer_norm(hidden_states)
if hidden_states.dtype == torch.float16 and (
torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any()
):
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
# Copied from transformers.models.bart.modeling_bart.BartDecoderLayer with Bart->BlenderbotSmall, BART->BLENDERBOT_SMALL
| BlenderbotSmallEncoderLayer |
python | ZoranPandovski__al-go-rithms | sort/merge_sort/python/mergesort_LL.py | {
"start": 158,
"end": 2336
} | class ____:
def __init__(self):
self.head = None
# push new value to linked list
# using append method
def append(self, new_value):
# Allocate new node
new_node = Node(new_value)
# if head is None, initialize it to new node
if self.head is None:
self.head = new_node
return
curr_node = self.head
while curr_node.next is not None:
curr_node = curr_node.next
# Append the new node at the end
# of the linked list
curr_node.next = new_node
def sortedMerge(self, a, b):
result = None
# Base cases
if a == None:
return b
if b == None:
return a
# pick either a or b and recur..
if a.data <= b.data:
result = a
result.next = self.sortedMerge(a.next, b)
else:
result = b
result.next = self.sortedMerge(a, b.next)
return result
def mergeSort(self, h):
# Base case if head is None
if h == None or h.next == None:
return h
# get the middle of the list
middle = self.getMiddle(h)
nexttomiddle = middle.next
# set the next of middle node to None
middle.next = None
# Apply mergeSort on left list
left = self.mergeSort(h)
# Apply mergeSort on right list
right = self.mergeSort(nexttomiddle)
# Merge the left and right lists
sortedlist = self.sortedMerge(left, right)
return sortedlist
# Utility function to get the middle
# of the linked list
def getMiddle(self, head):
if (head == None):
return head
slow = head
fast = head
while (fast.next != None and
fast.next.next != None):
slow = slow.next
fast = fast.next.next
return slow
# Utility function to print the linked list
def printList(head):
if head is None:
print(' ')
return
curr_node = head
while curr_node:
print(curr_node.data, end = " ")
curr_node = curr_node.next
print(' ')
if __name__ == '__main__':
li = LinkedList()
# Let us create a unsorted linked list
# to test the functions created.
# The list shall be a: 2->3->20->5->10->15
li.append(15)
li.append(10)
li.append(5)
li.append(20)
li.append(3)
li.append(2)
# Apply merge Sort
li.head = li.mergeSort(li.head)
print ("Sorted Linked List is:")
printList(li.head)
| LinkedList |
python | arrow-py__arrow | tests/test_locales.py | {
"start": 158624,
"end": 160952
} | class ____:
def test_describe(self):
assert self.locale.describe("now", only_distance=True) == "lige nu"
assert self.locale.describe("now", only_distance=False) == "lige nu"
def test_plurals(self):
assert self.locale._format_timeframe("now", 0) == "lige nu"
assert self.locale._format_timeframe("second", 1) == "et sekund"
assert self.locale._format_timeframe("seconds", 30) == "30 sekunder"
assert self.locale._format_timeframe("minute", 1) == "et minut"
assert self.locale._format_timeframe("minutes", 40) == "40 minutter"
assert self.locale._format_timeframe("hour", 1) == "en time"
assert self.locale._format_timeframe("hours", 23) == "23 timer"
assert self.locale._format_timeframe("day", 1) == "en dag"
assert self.locale._format_timeframe("days", 12) == "12 dage"
assert self.locale._format_timeframe("week", 1) == "en uge"
assert self.locale._format_timeframe("weeks", 38) == "38 uger"
assert self.locale._format_timeframe("month", 1) == "en måned"
assert self.locale._format_timeframe("months", 11) == "11 måneder"
assert self.locale._format_timeframe("year", 1) == "et år"
assert self.locale._format_timeframe("years", 12) == "12 år"
def test_ordinal_number(self):
assert self.locale.ordinal_number(0) == "0."
assert self.locale.ordinal_number(1) == "1."
def test_format_timeframe(self):
assert self.locale._format_timeframe("hours", 2) == "2 timer"
assert self.locale._format_timeframe("hour", 0) == "en time"
def test_format_relative_now(self):
result = self.locale._format_relative("lige nu", "now", 0)
assert result == "lige nu"
def test_format_relative_past(self):
result = self.locale._format_relative("en time", "hour", 1)
assert result == "om en time"
def test_format_relative_future(self):
result = self.locale._format_relative("en time", "hour", -1)
assert result == "for en time siden"
def test_weekday(self):
dt = arrow.Arrow(2015, 4, 11, 17, 30, 00)
assert self.locale.day_name(dt.isoweekday()) == "lørdag"
assert self.locale.day_abbreviation(dt.isoweekday()) == "lør"
@pytest.mark.usefixtures("lang_locale")
| TestDanishLocale |
python | kamyu104__LeetCode-Solutions | Python/html-entity-parser.py | {
"start": 402,
"end": 588
} | class ____(object):
def __init__(self):
self.children = collections.defaultdict(AhoNode)
self.indices = []
self.suffix = None
self.output = None
| AhoNode |
python | getsentry__sentry | tests/sentry/eventtypes/test_error.py | {
"start": 198,
"end": 2869
} | class ____(TestCase):
def test_simple(self) -> None:
inst = ErrorEvent()
data = {"exception": {"values": [{"type": "Exception", "value": "Foo"}]}}
assert inst.get_metadata(data) == {
"type": "Exception",
"value": "Foo",
}
def test_no_exception_type_or_value(self) -> None:
inst = ErrorEvent()
data: dict[str, dict[str, Any]] = {
"exception": {"values": [{"type": None, "value": None, "stacktrace": {}}]}
}
assert inst.get_metadata(data) == {
"type": "Error",
"value": "",
}
def test_pulls_top_function(self) -> None:
inst = ErrorEvent()
data = {
"platform": "native",
"exception": {
"values": [
{
"stacktrace": {
"frames": [
{"in_app": True, "function": "void top_func(int)"},
{"in_app": False, "function": "void invalid_func(int)"},
{"in_app": True, "function": "<unknown>"},
]
}
}
]
},
}
assert inst.get_metadata(data) == {
"type": "Error",
"value": "",
"function": "top_func",
}
def test_none_frame(self) -> None:
inst = ErrorEvent()
data = {"exception": {"values": [{"stacktrace": {"frames": [None]}}]}}
assert inst.get_metadata(data) == {
"type": "Error",
"value": "",
}
def test_multiple_exceptions_default(self) -> None:
inst = ErrorEvent()
data = {
"exception": {
"values": [
{"type": "Exception", "value": "Bar"},
{"type": "Exception", "value": "Foo"},
]
}
}
assert inst.get_metadata(data) == {
"type": "Exception",
"value": "Foo",
}
def test_multiple_exceptions_main_indicated(self) -> None:
inst = ErrorEvent()
data = {
"main_exception_id": 1,
"exception": {
"values": [
{"type": "Exception", "value": "Bar", "mechanism": {"exception_id": 1}},
{"type": "Exception", "value": "Foo", "mechanism": {"exception_id": 0}},
]
},
}
assert inst.get_metadata(data) == {
"type": "Exception",
"value": "Bar",
}
@django_db_all
| GetMetadataTest |
python | gawel__pyquery | tests/test_pyquery.py | {
"start": 12005,
"end": 12157
} | class ____(TestCase):
def test_comment(self):
doc = pq('<div><!-- foo --> bar</div>')
self.assertEqual(doc.text(), 'bar')
| TestComment |
python | Delgan__loguru | loguru/_handler.py | {
"start": 624,
"end": 12634
} | class ____:
def __init__(
self,
*,
sink,
name,
levelno,
formatter,
is_formatter_dynamic,
filter_,
colorize,
serialize,
enqueue,
multiprocessing_context,
error_interceptor,
exception_formatter,
id_,
levels_ansi_codes
):
self._name = name
self._sink = sink
self._levelno = levelno
self._formatter = formatter
self._is_formatter_dynamic = is_formatter_dynamic
self._filter = filter_
self._colorize = colorize
self._serialize = serialize
self._enqueue = enqueue
self._multiprocessing_context = multiprocessing_context
self._error_interceptor = error_interceptor
self._exception_formatter = exception_formatter
self._id = id_
self._levels_ansi_codes = levels_ansi_codes # Warning, reference shared among handlers
self._decolorized_format = None
self._precolorized_formats = {}
self._memoize_dynamic_format = None
self._stopped = False
self._lock = create_handler_lock()
self._lock_acquired = threading.local()
self._queue = None
self._queue_lock = None
self._confirmation_event = None
self._confirmation_lock = None
self._owner_process_pid = None
self._thread = None
if self._is_formatter_dynamic:
if self._colorize:
self._memoize_dynamic_format = memoize(prepare_colored_format)
else:
self._memoize_dynamic_format = memoize(prepare_stripped_format)
else:
if self._colorize:
for level_name in self._levels_ansi_codes:
self.update_format(level_name)
else:
self._decolorized_format = self._formatter.strip()
if self._enqueue:
if self._multiprocessing_context is None:
self._queue = multiprocessing.SimpleQueue()
self._confirmation_event = multiprocessing.Event()
self._confirmation_lock = multiprocessing.Lock()
else:
self._queue = self._multiprocessing_context.SimpleQueue()
self._confirmation_event = self._multiprocessing_context.Event()
self._confirmation_lock = self._multiprocessing_context.Lock()
self._queue_lock = create_handler_lock()
self._owner_process_pid = os.getpid()
self._thread = Thread(
target=self._queued_writer, daemon=True, name="loguru-writer-%d" % self._id
)
self._thread.start()
def __repr__(self):
return "(id=%d, level=%d, sink=%s)" % (self._id, self._levelno, self._name)
@contextmanager
def _protected_lock(self):
"""Acquire the lock, but fail fast if its already acquired by the current thread."""
if getattr(self._lock_acquired, "acquired", False):
raise RuntimeError(
"Could not acquire internal lock because it was already in use (deadlock avoided). "
"This likely happened because the logger was re-used inside a sink, a signal "
"handler or a '__del__' method. This is not permitted because the logger and its "
"handlers are not re-entrant."
)
self._lock_acquired.acquired = True
try:
with self._lock:
yield
finally:
self._lock_acquired.acquired = False
def emit(self, record, level_id, from_decorator, is_raw, colored_message):
try:
if self._levelno > record["level"].no:
return
if self._filter is not None:
if not self._filter(record):
return
if self._is_formatter_dynamic:
dynamic_format = self._formatter(record)
formatter_record = record.copy()
if not record["exception"]:
formatter_record["exception"] = ""
else:
type_, value, tb = record["exception"]
formatter = self._exception_formatter
lines = formatter.format_exception(type_, value, tb, from_decorator=from_decorator)
formatter_record["exception"] = "".join(lines)
if colored_message is not None and colored_message.stripped != record["message"]:
colored_message = None
if is_raw:
if colored_message is None or not self._colorize:
formatted = record["message"]
else:
ansi_level = self._levels_ansi_codes[level_id]
formatted = colored_message.colorize(ansi_level)
elif self._is_formatter_dynamic:
if not self._colorize:
precomputed_format = self._memoize_dynamic_format(dynamic_format)
formatted = precomputed_format.format_map(formatter_record)
elif colored_message is None:
ansi_level = self._levels_ansi_codes[level_id]
_, precomputed_format = self._memoize_dynamic_format(dynamic_format, ansi_level)
formatted = precomputed_format.format_map(formatter_record)
else:
ansi_level = self._levels_ansi_codes[level_id]
formatter, precomputed_format = self._memoize_dynamic_format(
dynamic_format, ansi_level
)
coloring_message = formatter.make_coloring_message(
record["message"], ansi_level=ansi_level, colored_message=colored_message
)
formatter_record["message"] = coloring_message
formatted = precomputed_format.format_map(formatter_record)
else:
if not self._colorize:
precomputed_format = self._decolorized_format
formatted = precomputed_format.format_map(formatter_record)
elif colored_message is None:
ansi_level = self._levels_ansi_codes[level_id]
precomputed_format = self._precolorized_formats[level_id]
formatted = precomputed_format.format_map(formatter_record)
else:
ansi_level = self._levels_ansi_codes[level_id]
precomputed_format = self._precolorized_formats[level_id]
coloring_message = self._formatter.make_coloring_message(
record["message"], ansi_level=ansi_level, colored_message=colored_message
)
formatter_record["message"] = coloring_message
formatted = precomputed_format.format_map(formatter_record)
if self._serialize:
formatted = self._serialize_record(formatted, record)
str_record = Message(formatted)
str_record.record = record
with self._protected_lock():
if self._stopped:
return
if self._enqueue:
self._queue.put(str_record)
else:
self._sink.write(str_record)
except Exception:
if not self._error_interceptor.should_catch():
raise
self._error_interceptor.print(record)
def stop(self):
with self._protected_lock():
self._stopped = True
if self._enqueue:
if self._owner_process_pid != os.getpid():
return
self._queue.put(None)
self._thread.join()
if hasattr(self._queue, "close"):
self._queue.close()
self._sink.stop()
def complete_queue(self):
if not self._enqueue:
return
with self._confirmation_lock:
self._queue.put(True)
self._confirmation_event.wait()
self._confirmation_event.clear()
def tasks_to_complete(self):
if self._enqueue and self._owner_process_pid != os.getpid():
return []
lock = self._queue_lock if self._enqueue else self._protected_lock()
with lock:
return self._sink.tasks_to_complete()
def update_format(self, level_id):
if not self._colorize or self._is_formatter_dynamic:
return
ansi_code = self._levels_ansi_codes[level_id]
self._precolorized_formats[level_id] = self._formatter.colorize(ansi_code)
@property
def levelno(self):
return self._levelno
@staticmethod
def _serialize_record(text, record):
exception = record["exception"]
if exception is not None:
exception = {
"type": None if exception.type is None else exception.type.__name__,
"value": exception.value,
"traceback": bool(exception.traceback),
}
serializable = {
"text": text,
"record": {
"elapsed": {
"repr": record["elapsed"],
"seconds": record["elapsed"].total_seconds(),
},
"exception": exception,
"extra": record["extra"],
"file": {"name": record["file"].name, "path": record["file"].path},
"function": record["function"],
"level": {
"icon": record["level"].icon,
"name": record["level"].name,
"no": record["level"].no,
},
"line": record["line"],
"message": record["message"],
"module": record["module"],
"name": record["name"],
"process": {"id": record["process"].id, "name": record["process"].name},
"thread": {"id": record["thread"].id, "name": record["thread"].name},
"time": {"repr": record["time"], "timestamp": record["time"].timestamp()},
},
}
return json.dumps(serializable, default=str, ensure_ascii=False) + "\n"
def _queued_writer(self):
message = None
queue = self._queue
# We need to use a lock to protect sink during fork.
# Particularly, writing to stderr may lead to deadlock in child process.
lock = self._queue_lock
while True:
try:
message = queue.get()
except Exception:
with lock:
self._error_interceptor.print(None)
continue
if message is None:
break
if message is True:
self._confirmation_event.set()
continue
with lock:
try:
self._sink.write(message)
except Exception:
self._error_interceptor.print(message.record)
def __getstate__(self):
state = self.__dict__.copy()
state["_lock"] = None
state["_lock_acquired"] = None
state["_memoize_dynamic_format"] = None
if self._enqueue:
state["_sink"] = None
state["_thread"] = None
state["_owner_process"] = None
state["_queue_lock"] = None
return state
def __setstate__(self, state):
self.__dict__.update(state)
self._lock = create_handler_lock()
self._lock_acquired = threading.local()
if self._enqueue:
self._queue_lock = create_handler_lock()
if self._is_formatter_dynamic:
if self._colorize:
self._memoize_dynamic_format = memoize(prepare_colored_format)
else:
self._memoize_dynamic_format = memoize(prepare_stripped_format)
| Handler |
python | huggingface__transformers | src/transformers/models/convbert/modeling_convbert.py | {
"start": 45596,
"end": 48953
} | class ____(ConvBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.convbert = ConvBertModel(config)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
start_positions: Optional[torch.LongTensor] = None,
end_positions: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, QuestionAnsweringModelOutput]:
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.convbert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1).contiguous()
end_logits = end_logits.squeeze(-1).contiguous()
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions = start_positions.clamp(0, ignored_index)
end_positions = end_positions.clamp(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + outputs[1:]
return ((total_loss,) + output) if total_loss is not None else output
return QuestionAnsweringModelOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
__all__ = [
"ConvBertForMaskedLM",
"ConvBertForMultipleChoice",
"ConvBertForQuestionAnswering",
"ConvBertForSequenceClassification",
"ConvBertForTokenClassification",
"ConvBertLayer",
"ConvBertModel",
"ConvBertPreTrainedModel",
]
| ConvBertForQuestionAnswering |
python | encode__django-rest-framework | rest_framework/relations.py | {
"start": 2069,
"end": 2742
} | class ____:
"""
This is a mock object, used for when we only need the pk of the object
instance, but still want to return an object with a .pk attribute,
in order to keep the same interface as a regular model instance.
"""
def __init__(self, pk):
self.pk = pk
def __str__(self):
return "%s" % self.pk
# We assume that 'validators' are intended for the child serializer,
# rather than the parent serializer.
MANY_RELATION_KWARGS = (
'read_only', 'write_only', 'required', 'default', 'initial', 'source',
'label', 'help_text', 'style', 'error_messages', 'allow_empty',
'html_cutoff', 'html_cutoff_text'
)
| PKOnlyObject |
python | apache__airflow | providers/hashicorp/tests/unit/hashicorp/_internal_client/test_vault_client.py | {
"start": 1169,
"end": 63571
} | class ____:
@mock.patch("airflow.providers.hashicorp._internal_client.vault_client.hvac")
def test_version_wrong(self, mock_hvac):
mock_client = mock.MagicMock()
mock_hvac.Client.return_value = mock_client
with pytest.raises(VaultError, match="The version is not supported: 4"):
_VaultClient(auth_type="approle", kv_engine_version=4)
@mock.patch("airflow.providers.hashicorp._internal_client.vault_client.hvac")
def test_custom_mount_point(self, mock_hvac):
mock_client = mock.MagicMock()
mock_hvac.Client.return_value = mock_client
vault_client = _VaultClient(auth_type="userpass", mount_point="custom")
assert vault_client.mount_point == "custom"
@mock.patch("airflow.providers.hashicorp._internal_client.vault_client.hvac")
def test_version_one_init(self, mock_hvac):
mock_client = mock.MagicMock()
mock_hvac.Client.return_value = mock_client
vault_client = _VaultClient(auth_type="userpass", kv_engine_version=1)
assert vault_client.kv_engine_version == 1
@mock.patch("airflow.providers.hashicorp._internal_client.vault_client.hvac")
def test_default_session_retry(self, mock_hvac):
mock_client = mock.MagicMock()
mock_hvac.Client.return_value = mock_client
vault_client = _VaultClient(
auth_type="approle",
role_id="role",
url="http://localhost:8180",
secret_id="pass",
)
_ = vault_client.client
default_session = vault_client.kwargs["session"]
assert isinstance(default_session, Session)
adapter = default_session.get_adapter(url="http://localhost:8180")
assert isinstance(adapter, HTTPAdapter)
max_retries = adapter.max_retries
assert isinstance(max_retries, Retry)
assert (max_retries.total if max_retries.total else 0) > 1
mock_hvac.Client.assert_called_with(url="http://localhost:8180", session=default_session)
@mock.patch("airflow.providers.hashicorp._internal_client.vault_client.hvac")
def test_approle(self, mock_hvac):
mock_client = mock.MagicMock()
mock_hvac.Client.return_value = mock_client
vault_client = _VaultClient(
auth_type="approle", role_id="role", url="http://localhost:8180", secret_id="pass", session=None
)
client = vault_client.client
mock_hvac.Client.assert_called_with(url="http://localhost:8180", session=None)
client.auth.approle.login.assert_called_with(role_id="role", secret_id="pass")
client.is_authenticated.assert_called_with()
assert vault_client.kv_engine_version == 2
@mock.patch("airflow.providers.hashicorp._internal_client.vault_client.hvac")
def test_approle_different_auth_mount_point(self, mock_hvac):
mock_client = mock.MagicMock()
mock_hvac.Client.return_value = mock_client
vault_client = _VaultClient(
auth_type="approle",
role_id="role",
url="http://localhost:8180",
secret_id="pass",
auth_mount_point="other",
session=None,
)
client = vault_client.client
mock_hvac.Client.assert_called_with(url="http://localhost:8180", session=None)
client.auth.approle.login.assert_called_with(role_id="role", secret_id="pass", mount_point="other")
client.is_authenticated.assert_called_with()
assert vault_client.kv_engine_version == 2
@mock.patch("airflow.providers.hashicorp._internal_client.vault_client.hvac")
def test_approle_missing_role(self, mock_hvac):
mock_client = mock.MagicMock()
mock_hvac.Client.return_value = mock_client
with pytest.raises(VaultError, match="requires 'role_id'"):
_VaultClient(auth_type="approle", url="http://localhost:8180", secret_id="pass")
@mock.patch("airflow.providers.hashicorp._internal_client.vault_client.hvac")
def test_aws_iam(self, mock_hvac):
mock_client = mock.MagicMock()
mock_hvac.Client.return_value = mock_client
vault_client = _VaultClient(
auth_type="aws_iam",
role_id="role",
url="http://localhost:8180",
key_id="user",
secret_id="pass",
session=None,
)
client = vault_client.client
mock_hvac.Client.assert_called_with(url="http://localhost:8180", session=None)
client.auth.aws.iam_login.assert_called_with(
access_key="user",
secret_key="pass",
role="role",
)
client.is_authenticated.assert_called_with()
assert vault_client.kv_engine_version == 2
@mock.patch("airflow.providers.hashicorp._internal_client.vault_client.hvac")
def test_aws_iam_different_auth_mount_point(self, mock_hvac):
mock_client = mock.MagicMock()
mock_hvac.Client.return_value = mock_client
vault_client = _VaultClient(
auth_type="aws_iam",
role_id="role",
url="http://localhost:8180",
key_id="user",
secret_id="pass",
auth_mount_point="other",
session=None,
)
client = vault_client.client
mock_hvac.Client.assert_called_with(url="http://localhost:8180", session=None)
client.auth.aws.iam_login.assert_called_with(
access_key="user", secret_key="pass", role="role", mount_point="other"
)
client.is_authenticated.assert_called_with()
assert vault_client.kv_engine_version == 2
@mock.patch("airflow.providers.hashicorp._internal_client.vault_client.hvac")
def test_aws_iam_different_region(self, mock_hvac):
mock_client = mock.MagicMock()
mock_hvac.Client.return_value = mock_client
vault_client = _VaultClient(
auth_type="aws_iam",
role_id="role",
url="http://localhost:8180",
key_id="user",
secret_id="pass",
session=None,
region="us-east-2",
)
client = vault_client.client
mock_hvac.Client.assert_called_with(url="http://localhost:8180", session=None)
client.auth.aws.iam_login.assert_called_with(
access_key="user",
secret_key="pass",
role="role",
region="us-east-2",
)
client.is_authenticated.assert_called_with()
assert vault_client.kv_engine_version == 2
@mock.patch("airflow.providers.hashicorp._internal_client.vault_client.hvac")
def test_azure(self, mock_hvac):
mock_client = mock.MagicMock()
mock_hvac.Client.return_value = mock_client
vault_client = _VaultClient(
auth_type="azure",
azure_tenant_id="tenant_id",
azure_resource="resource",
url="http://localhost:8180",
key_id="user",
secret_id="pass",
session=None,
)
client = vault_client.client
mock_hvac.Client.assert_called_with(url="http://localhost:8180", session=None)
client.auth.azure.configure.assert_called_with(
tenant_id="tenant_id",
resource="resource",
client_id="user",
client_secret="pass",
)
client.is_authenticated.assert_called_with()
assert vault_client.kv_engine_version == 2
@mock.patch("airflow.providers.hashicorp._internal_client.vault_client.hvac")
def test_azure_different_auth_mount_point(self, mock_hvac):
mock_client = mock.MagicMock()
mock_hvac.Client.return_value = mock_client
vault_client = _VaultClient(
auth_type="azure",
azure_tenant_id="tenant_id",
azure_resource="resource",
url="http://localhost:8180",
key_id="user",
secret_id="pass",
auth_mount_point="other",
session=None,
)
client = vault_client.client
mock_hvac.Client.assert_called_with(url="http://localhost:8180", session=None)
client.auth.azure.configure.assert_called_with(
tenant_id="tenant_id",
resource="resource",
client_id="user",
client_secret="pass",
mount_point="other",
)
client.is_authenticated.assert_called_with()
assert vault_client.kv_engine_version == 2
@mock.patch("airflow.providers.hashicorp._internal_client.vault_client.hvac")
def test_azure_missing_resource(self, mock_hvac):
mock_client = mock.MagicMock()
mock_hvac.Client.return_value = mock_client
with pytest.raises(VaultError, match="requires 'azure_resource'"):
_VaultClient(
auth_type="azure",
azure_tenant_id="tenant_id",
url="http://localhost:8180",
key_id="user",
secret_id="pass",
)
@mock.patch("airflow.providers.hashicorp._internal_client.vault_client.hvac")
def test_azure_missing_tenant_id(self, mock_hvac):
mock_client = mock.MagicMock()
mock_hvac.Client.return_value = mock_client
with pytest.raises(VaultError, match="requires 'azure_tenant_id'"):
_VaultClient(
auth_type="azure",
azure_resource="resource",
url="http://localhost:8180",
key_id="user",
secret_id="pass",
)
@mock.patch("builtins.open", create=True)
@mock.patch("airflow.providers.google.cloud.utils.credentials_provider._get_scopes")
@mock.patch("airflow.providers.google.cloud.utils.credentials_provider.get_credentials_and_project_id")
@mock.patch("airflow.providers.hashicorp._internal_client.vault_client.hvac.Client")
@mock.patch("googleapiclient.discovery.build")
def test_gcp(self, mock_google_build, mock_hvac_client, mock_get_credentials, mock_get_scopes, mock_open):
# Mock the content of the file 'path.json'
mock_file = mock.MagicMock()
mock_file.read.return_value = '{"client_email": "service_account_email"}'
mock_open.return_value.__enter__.return_value = mock_file
mock_client = mock.MagicMock()
mock_hvac_client.return_value = mock_client
mock_get_scopes.return_value = ["scope1", "scope2"]
mock_get_credentials.return_value = ("credentials", "project_id")
# Mock the current time to use for iat and exp
current_time = int(time.time())
iat = current_time
exp = iat + 3600 # 1 hour after iat
# Mock the signJwt API to return the expected payload
mock_sign_jwt = (
mock_google_build.return_value.projects.return_value.serviceAccounts.return_value.signJwt
)
mock_sign_jwt.return_value.execute.return_value = {"signedJwt": "mocked_jwt"}
vault_client = _VaultClient(
auth_type="gcp",
gcp_key_path="path.json",
gcp_scopes="scope1,scope2",
role_id="role",
url="http://localhost:8180",
session=None,
)
# Preserve the original json.dumps
original_json_dumps = json.dumps
# Inject the mocked payload into the JWT signing process
with mock.patch("json.dumps") as mock_json_dumps:
def mocked_json_dumps(payload):
# Override the payload to inject controlled iat and exp values
payload["iat"] = iat
payload["exp"] = exp
return original_json_dumps(payload) # Use the original json.dumps
mock_json_dumps.side_effect = mocked_json_dumps
client = vault_client.client # Trigger the Vault client creation
# Validate that the HVAC client and other mocks are called correctly
mock_hvac_client.assert_called_with(url="http://localhost:8180", session=None)
mock_get_scopes.assert_called_with("scope1,scope2")
mock_get_credentials.assert_called_with(
key_path="path.json", keyfile_dict=None, scopes=["scope1", "scope2"]
)
# Extract the arguments passed to the mocked signJwt API
args, kwargs = mock_sign_jwt.call_args
payload = json.loads(kwargs["body"]["payload"])
# Assert iat and exp values are as expected
assert payload["iat"] == iat
assert payload["exp"] == exp
assert abs(payload["exp"] - (payload["iat"] + 3600)) < 10 # Validate exp is 3600 seconds after iat
client.auth.gcp.login.assert_called_with(role="role", jwt="mocked_jwt")
client.is_authenticated.assert_called_with()
assert vault_client.kv_engine_version == 2
@mock.patch("builtins.open", create=True)
@mock.patch("airflow.providers.google.cloud.utils.credentials_provider._get_scopes")
@mock.patch("airflow.providers.google.cloud.utils.credentials_provider.get_credentials_and_project_id")
@mock.patch("airflow.providers.hashicorp._internal_client.vault_client.hvac.Client")
@mock.patch("googleapiclient.discovery.build")
def test_gcp_different_auth_mount_point(
self, mock_google_build, mock_hvac_client, mock_get_credentials, mock_get_scopes, mock_open
):
# Mock the content of the file 'path.json'
mock_file = mock.MagicMock()
mock_file.read.return_value = '{"client_email": "service_account_email"}'
mock_open.return_value.__enter__.return_value = mock_file
mock_client = mock.MagicMock()
mock_hvac_client.return_value = mock_client
mock_get_scopes.return_value = ["scope1", "scope2"]
mock_get_credentials.return_value = ("credentials", "project_id")
mock_sign_jwt = (
mock_google_build.return_value.projects.return_value.serviceAccounts.return_value.signJwt
)
mock_sign_jwt.return_value.execute.return_value = {"signedJwt": "mocked_jwt"}
# Generate realistic iat and exp values
current_time = int(time.time())
iat = current_time
exp = current_time + 3600 # 1 hour later
vault_client = _VaultClient(
auth_type="gcp",
gcp_key_path="path.json",
gcp_scopes="scope1,scope2",
role_id="role",
url="http://localhost:8180",
auth_mount_point="other",
session=None,
)
# Preserve the original json.dumps
original_json_dumps = json.dumps
# Inject the mocked payload into the JWT signing process
with mock.patch("json.dumps") as mock_json_dumps:
def mocked_json_dumps(payload):
# Override the payload to inject controlled iat and exp values
payload["iat"] = iat
payload["exp"] = exp
return original_json_dumps(payload) # Use the original json.dumps
mock_json_dumps.side_effect = mocked_json_dumps
client = vault_client.client # Trigger the Vault client creation
# Assertions
mock_hvac_client.assert_called_with(url="http://localhost:8180", session=None)
mock_get_scopes.assert_called_with("scope1,scope2")
mock_get_credentials.assert_called_with(
key_path="path.json", keyfile_dict=None, scopes=["scope1", "scope2"]
)
# Extract the arguments passed to the mocked signJwt API
args, kwargs = mock_sign_jwt.call_args
payload = json.loads(kwargs["body"]["payload"])
# Assert iat and exp values are as expected
assert payload["iat"] == iat
assert payload["exp"] == exp
assert abs(payload["exp"] - (payload["iat"] + 3600)) < 10 # Validate exp is 3600 seconds after iat
client.auth.gcp.login.assert_called_with(role="role", jwt="mocked_jwt", mount_point="other")
client.is_authenticated.assert_called_with()
assert vault_client.kv_engine_version == 2
@mock.patch(
"builtins.open", new_callable=mock_open, read_data='{"client_email": "service_account_email"}'
)
@mock.patch("airflow.providers.google.cloud.utils.credentials_provider._get_scopes")
@mock.patch("airflow.providers.google.cloud.utils.credentials_provider.get_credentials_and_project_id")
@mock.patch("airflow.providers.hashicorp._internal_client.vault_client.hvac.Client")
@mock.patch("googleapiclient.discovery.build")
def test_gcp_dict(
self, mock_google_build, mock_hvac_client, mock_get_credentials, mock_get_scopes, mock_file
):
mock_client = mock.MagicMock()
mock_hvac_client.return_value = mock_client
mock_get_scopes.return_value = ["scope1", "scope2"]
mock_get_credentials.return_value = ("credentials", "project_id")
mock_sign_jwt = (
mock_google_build.return_value.projects.return_value.serviceAccounts.return_value.signJwt
)
mock_sign_jwt.return_value.execute.return_value = {"signedJwt": "mocked_jwt"}
# Generate realistic iat and exp values
current_time = int(time.time())
iat = current_time
exp = current_time + 3600 # 1 hour later
vault_client = _VaultClient(
auth_type="gcp",
gcp_keyfile_dict={"client_email": "service_account_email"},
gcp_scopes="scope1,scope2",
role_id="role",
url="http://localhost:8180",
session=None,
)
# Preserve the original json.dumps
original_json_dumps = json.dumps
# Inject the mocked payload into the JWT signing process
with mock.patch("json.dumps") as mock_json_dumps:
def mocked_json_dumps(payload):
# Override the payload to inject controlled iat and exp values
payload["iat"] = iat
payload["exp"] = exp
return original_json_dumps(payload) # Use the original json.dumps
mock_json_dumps.side_effect = mocked_json_dumps
client = vault_client.client # Trigger the Vault client creation
# Assertions
mock_hvac_client.assert_called_with(url="http://localhost:8180", session=None)
mock_get_scopes.assert_called_with("scope1,scope2")
mock_get_credentials.assert_called_with(
key_path=None, keyfile_dict={"client_email": "service_account_email"}, scopes=["scope1", "scope2"]
)
# Extract the arguments passed to the mocked signJwt API
args, kwargs = mock_sign_jwt.call_args
payload = json.loads(kwargs["body"]["payload"])
# Assert iat and exp values are as expected
assert payload["iat"] == iat
assert payload["exp"] == exp
assert abs(payload["exp"] - (payload["iat"] + 3600)) < 10 # Validate exp is 3600 seconds after iat
client.auth.gcp.login.assert_called_with(role="role", jwt="mocked_jwt")
client.is_authenticated.assert_called_with()
assert vault_client.kv_engine_version == 2
@mock.patch("airflow.providers.hashicorp._internal_client.vault_client.hvac")
def test_github(self, mock_hvac):
mock_client = mock.MagicMock()
mock_hvac.Client.return_value = mock_client
vault_client = _VaultClient(
auth_type="github", token="s.7AU0I51yv1Q1lxOIg1F3ZRAS", url="http://localhost:8180", session=None
)
client = vault_client.client
mock_hvac.Client.assert_called_with(url="http://localhost:8180", session=None)
client.auth.github.login.assert_called_with(token="s.7AU0I51yv1Q1lxOIg1F3ZRAS")
client.is_authenticated.assert_called_with()
assert vault_client.kv_engine_version == 2
@mock.patch("airflow.providers.hashicorp._internal_client.vault_client.hvac")
def test_github_different_auth_mount_point(self, mock_hvac):
mock_client = mock.MagicMock()
mock_hvac.Client.return_value = mock_client
vault_client = _VaultClient(
auth_type="github",
token="s.7AU0I51yv1Q1lxOIg1F3ZRAS",
url="http://localhost:8180",
auth_mount_point="other",
session=None,
)
client = vault_client.client
mock_hvac.Client.assert_called_with(url="http://localhost:8180", session=None)
client.auth.github.login.assert_called_with(token="s.7AU0I51yv1Q1lxOIg1F3ZRAS", mount_point="other")
client.is_authenticated.assert_called_with()
assert vault_client.kv_engine_version == 2
@mock.patch("airflow.providers.hashicorp._internal_client.vault_client.hvac")
def test_github_missing_token(self, mock_hvac):
mock_client = mock.MagicMock()
mock_hvac.Client.return_value = mock_client
with pytest.raises(VaultError, match="'github' authentication type requires 'token'"):
_VaultClient(auth_type="github", url="http://localhost:8180")
@mock.patch("airflow.providers.hashicorp._internal_client.vault_client.hvac")
@mock.patch("airflow.providers.hashicorp._internal_client.vault_client.Kubernetes")
def test_kubernetes_default_path(self, mock_kubernetes, mock_hvac):
mock_client = mock.MagicMock()
mock_hvac.Client.return_value = mock_client
vault_client = _VaultClient(
auth_type="kubernetes", kubernetes_role="kube_role", url="http://localhost:8180", session=None
)
with patch("builtins.open", mock_open(read_data="data")) as mock_file:
client = vault_client.client
mock_file.assert_called_with("/var/run/secrets/kubernetes.io/serviceaccount/token")
mock_hvac.Client.assert_called_with(url="http://localhost:8180", session=None)
mock_kubernetes.assert_called_with(mock_client.adapter)
mock_kubernetes.return_value.login.assert_called_with(role="kube_role", jwt="data")
client.is_authenticated.assert_called_with()
assert vault_client.kv_engine_version == 2
@mock.patch("airflow.providers.hashicorp._internal_client.vault_client.hvac")
@mock.patch("airflow.providers.hashicorp._internal_client.vault_client.Kubernetes")
def test_kubernetes(self, mock_kubernetes, mock_hvac):
mock_client = mock.MagicMock()
mock_hvac.Client.return_value = mock_client
vault_client = _VaultClient(
auth_type="kubernetes",
kubernetes_role="kube_role",
kubernetes_jwt_path="path",
url="http://localhost:8180",
session=None,
)
with patch("builtins.open", mock_open(read_data="data")) as mock_file:
client = vault_client.client
mock_file.assert_called_with("path")
mock_hvac.Client.assert_called_with(url="http://localhost:8180", session=None)
mock_kubernetes.assert_called_with(mock_client.adapter)
mock_kubernetes.return_value.login.assert_called_with(role="kube_role", jwt="data")
client.is_authenticated.assert_called_with()
assert vault_client.kv_engine_version == 2
@mock.patch("airflow.providers.hashicorp._internal_client.vault_client.hvac")
@mock.patch("airflow.providers.hashicorp._internal_client.vault_client.Kubernetes")
def test_kubernetes_different_auth_mount_point(self, mock_kubernetes, mock_hvac):
mock_client = mock.MagicMock()
mock_hvac.Client.return_value = mock_client
vault_client = _VaultClient(
auth_type="kubernetes",
kubernetes_role="kube_role",
kubernetes_jwt_path="path",
auth_mount_point="other",
url="http://localhost:8180",
session=None,
)
with patch("builtins.open", mock_open(read_data="data")) as mock_file:
client = vault_client.client
mock_file.assert_called_with("path")
mock_hvac.Client.assert_called_with(url="http://localhost:8180", session=None)
mock_kubernetes.assert_called_with(mock_client.adapter)
mock_kubernetes.return_value.login.assert_called_with(
role="kube_role", jwt="data", mount_point="other"
)
client.is_authenticated.assert_called_with()
assert vault_client.kv_engine_version == 2
@mock.patch("airflow.providers.hashicorp._internal_client.vault_client.hvac")
def test_kubernetes_missing_role(self, mock_hvac):
mock_client = mock.MagicMock()
mock_hvac.Client.return_value = mock_client
with pytest.raises(VaultError, match="requires 'kubernetes_role'"):
_VaultClient(auth_type="kubernetes", kubernetes_jwt_path="path", url="http://localhost:8180")
@mock.patch("airflow.providers.hashicorp._internal_client.vault_client.hvac")
def test_kubernetes_kubernetes_jwt_path_none(self, mock_hvac):
mock_client = mock.MagicMock()
mock_hvac.Client.return_value = mock_client
with pytest.raises(VaultError, match="requires 'kubernetes_jwt_path'"):
_VaultClient(
auth_type="kubernetes",
kubernetes_role="kube_role",
kubernetes_jwt_path=None,
url="http://localhost:8180",
)
@mock.patch("airflow.providers.hashicorp._internal_client.vault_client.hvac")
def test_ldap(self, mock_hvac):
mock_client = mock.MagicMock()
mock_hvac.Client.return_value = mock_client
vault_client = _VaultClient(
auth_type="ldap", username="user", password="pass", url="http://localhost:8180", session=None
)
client = vault_client.client
mock_hvac.Client.assert_called_with(url="http://localhost:8180", session=None)
client.auth.ldap.login.assert_called_with(username="user", password="pass")
client.is_authenticated.assert_called_with()
assert vault_client.kv_engine_version == 2
@mock.patch("airflow.providers.hashicorp._internal_client.vault_client.hvac")
def test_ldap_different_auth_mount_point(self, mock_hvac):
mock_client = mock.MagicMock()
mock_hvac.Client.return_value = mock_client
vault_client = _VaultClient(
auth_type="ldap",
username="user",
password="pass",
auth_mount_point="other",
url="http://localhost:8180",
session=None,
)
client = vault_client.client
mock_hvac.Client.assert_called_with(url="http://localhost:8180", session=None)
client.auth.ldap.login.assert_called_with(username="user", password="pass", mount_point="other")
client.is_authenticated.assert_called_with()
assert vault_client.kv_engine_version == 2
@mock.patch("airflow.providers.hashicorp._internal_client.vault_client.hvac")
def test_radius_missing_host(self, mock_hvac):
mock_client = mock.MagicMock()
mock_hvac.Client.return_value = mock_client
with pytest.raises(VaultError, match="radius_host"):
_VaultClient(auth_type="radius", radius_secret="pass", url="http://localhost:8180")
@mock.patch("airflow.providers.hashicorp._internal_client.vault_client.hvac")
def test_radius_missing_secret(self, mock_hvac):
mock_client = mock.MagicMock()
mock_hvac.Client.return_value = mock_client
with pytest.raises(VaultError, match="radius_secret"):
_VaultClient(auth_type="radius", radius_host="radhost", url="http://localhost:8180")
@mock.patch("airflow.providers.hashicorp._internal_client.vault_client.hvac")
def test_radius(self, mock_hvac):
mock_client = mock.MagicMock()
mock_hvac.Client.return_value = mock_client
vault_client = _VaultClient(
auth_type="radius",
radius_host="radhost",
radius_secret="pass",
url="http://localhost:8180",
session=None,
)
client = vault_client.client
mock_hvac.Client.assert_called_with(url="http://localhost:8180", session=None)
client.auth.radius.configure.assert_called_with(host="radhost", secret="pass", port=None)
client.is_authenticated.assert_called_with()
assert vault_client.kv_engine_version == 2
@mock.patch("airflow.providers.hashicorp._internal_client.vault_client.hvac")
def test_radius_different_auth_mount_point(self, mock_hvac):
mock_client = mock.MagicMock()
mock_hvac.Client.return_value = mock_client
vault_client = _VaultClient(
auth_type="radius",
radius_host="radhost",
radius_secret="pass",
auth_mount_point="other",
url="http://localhost:8180",
session=None,
)
client = vault_client.client
mock_hvac.Client.assert_called_with(url="http://localhost:8180", session=None)
client.auth.radius.configure.assert_called_with(
host="radhost", secret="pass", port=None, mount_point="other"
)
client.is_authenticated.assert_called_with()
assert vault_client.kv_engine_version == 2
@mock.patch("airflow.providers.hashicorp._internal_client.vault_client.hvac")
def test_radius_port(self, mock_hvac):
mock_client = mock.MagicMock()
mock_hvac.Client.return_value = mock_client
vault_client = _VaultClient(
auth_type="radius",
radius_host="radhost",
radius_port=8110,
radius_secret="pass",
url="http://localhost:8180",
session=None,
)
client = vault_client.client
mock_hvac.Client.assert_called_with(url="http://localhost:8180", session=None)
client.auth.radius.configure.assert_called_with(host="radhost", secret="pass", port=8110)
client.is_authenticated.assert_called_with()
assert vault_client.kv_engine_version == 2
@mock.patch("airflow.providers.hashicorp._internal_client.vault_client.hvac")
def test_token_missing_token(self, mock_hvac):
mock_client = mock.MagicMock()
mock_hvac.Client.return_value = mock_client
with pytest.raises(VaultError, match="'token' authentication type requires 'token'"):
_VaultClient(auth_type="token", url="http://localhost:8180")
@mock.patch("airflow.providers.hashicorp._internal_client.vault_client.hvac")
def test_token(self, mock_hvac):
mock_client = mock.MagicMock()
mock_hvac.Client.return_value = mock_client
vault_client = _VaultClient(
auth_type="token", token="s.7AU0I51yv1Q1lxOIg1F3ZRAS", url="http://localhost:8180", session=None
)
client = vault_client.client
mock_hvac.Client.assert_called_with(url="http://localhost:8180", session=None)
client.is_authenticated.assert_called_with()
assert client.token == "s.7AU0I51yv1Q1lxOIg1F3ZRAS"
assert vault_client.kv_engine_version == 2
assert vault_client.mount_point == "secret"
@mock.patch("airflow.providers.hashicorp._internal_client.vault_client.hvac")
def test_token_in_env(self, mock_hvac, monkeypatch):
monkeypatch.setenv("VAULT_TOKEN", "s.7AU0I51yv1Q1lxOIg1F3ZRAS")
mock_client = mock.MagicMock()
mock_hvac.Client.return_value = mock_client
vault_client = _VaultClient(auth_type="token", url="http://localhost:8180", session=None)
client = vault_client.client
mock_hvac.Client.assert_called_with(url="http://localhost:8180", session=None)
client.is_authenticated.assert_called_with()
assert client.token == "s.7AU0I51yv1Q1lxOIg1F3ZRAS"
assert vault_client.kv_engine_version == 2
assert vault_client.mount_point == "secret"
@mock.patch("airflow.providers.hashicorp._internal_client.vault_client.hvac")
def test_token_path(self, mock_hvac):
mock_client = mock.MagicMock()
mock_hvac.Client.return_value = mock_client
with open("/tmp/test_token.txt", "w+") as the_file:
the_file.write("s.7AU0I51yv1Q1lxOIg1F3ZRAS")
vault_client = _VaultClient(
auth_type="token", token_path="/tmp/test_token.txt", url="http://localhost:8180", session=None
)
client = vault_client.client
mock_hvac.Client.assert_called_with(url="http://localhost:8180", session=None)
client.is_authenticated.assert_called_with()
assert client.token == "s.7AU0I51yv1Q1lxOIg1F3ZRAS"
assert vault_client.kv_engine_version == 2
assert vault_client.mount_point == "secret"
@mock.patch("airflow.providers.hashicorp._internal_client.vault_client.hvac")
def test_token_path_strip(self, mock_hvac):
mock_client = mock.MagicMock()
mock_hvac.Client.return_value = mock_client
with open("/tmp/test_token.txt", "w+") as the_file:
the_file.write(" s.7AU0I51yv1Q1lxOIg1F3ZRAS\n")
vault_client = _VaultClient(
auth_type="token", token_path="/tmp/test_token.txt", url="http://localhost:8180", session=None
)
client = vault_client.client
mock_hvac.Client.assert_called_with(url="http://localhost:8180", session=None)
client.is_authenticated.assert_called_with()
assert client.token == "s.7AU0I51yv1Q1lxOIg1F3ZRAS"
assert vault_client.kv_engine_version == 2
assert vault_client.mount_point == "secret"
@mock.patch("airflow.providers.hashicorp._internal_client.vault_client.hvac")
def test_default_auth_type(self, mock_hvac):
mock_client = mock.MagicMock()
mock_hvac.Client.return_value = mock_client
vault_client = _VaultClient(
token="s.7AU0I51yv1Q1lxOIg1F3ZRAS", url="http://localhost:8180", session=None
)
client = vault_client.client
mock_hvac.Client.assert_called_with(url="http://localhost:8180", session=None)
client.is_authenticated.assert_called_with()
assert client.token == "s.7AU0I51yv1Q1lxOIg1F3ZRAS"
assert vault_client.auth_type == "token"
assert vault_client.kv_engine_version == 2
assert vault_client.mount_point == "secret"
@mock.patch("airflow.providers.hashicorp._internal_client.vault_client.hvac")
def test_userpass(self, mock_hvac):
mock_client = mock.MagicMock()
mock_hvac.Client.return_value = mock_client
vault_client = _VaultClient(
auth_type="userpass", username="user", password="pass", url="http://localhost:8180", session=None
)
client = vault_client.client
mock_hvac.Client.assert_called_with(url="http://localhost:8180", session=None)
client.auth.userpass.login.assert_called_with(username="user", password="pass")
client.is_authenticated.assert_called_with()
assert vault_client.kv_engine_version == 2
@mock.patch("airflow.providers.hashicorp._internal_client.vault_client.hvac")
def test_userpass_different_auth_mount_point(self, mock_hvac):
mock_client = mock.MagicMock()
mock_hvac.Client.return_value = mock_client
vault_client = _VaultClient(
auth_type="userpass",
username="user",
password="pass",
auth_mount_point="other",
url="http://localhost:8180",
session=None,
)
client = vault_client.client
mock_hvac.Client.assert_called_with(url="http://localhost:8180", session=None)
client.auth.userpass.login.assert_called_with(username="user", password="pass", mount_point="other")
client.is_authenticated.assert_called_with()
assert vault_client.kv_engine_version == 2
@mock.patch("airflow.providers.hashicorp._internal_client.vault_client.hvac")
def test_get_non_existing_key_v2(self, mock_hvac):
mock_client = mock.MagicMock()
mock_hvac.Client.return_value = mock_client
# Response does not contain the requested key
mock_client.secrets.kv.v2.read_secret_version.side_effect = InvalidPath()
vault_client = _VaultClient(
auth_type="token", token="s.7AU0I51yv1Q1lxOIg1F3ZRAS", url="http://localhost:8180"
)
secret = vault_client.get_secret(secret_path="missing")
assert secret is None
mock_client.secrets.kv.v2.read_secret_version.assert_called_once_with(
mount_point="secret", path="missing", version=None, raise_on_deleted_version=True
)
@mock.patch("airflow.providers.hashicorp._internal_client.vault_client.hvac")
def test_get_non_existing_key_v2_different_auth(self, mock_hvac):
mock_client = mock.MagicMock()
mock_hvac.Client.return_value = mock_client
# Response does not contain the requested key
mock_client.secrets.kv.v2.read_secret_version.side_effect = InvalidPath()
vault_client = _VaultClient(
auth_type="radius",
radius_host="radhost",
radius_port=8110,
radius_secret="pass",
url="http://localhost:8180",
)
secret = vault_client.get_secret(secret_path="missing")
assert secret is None
assert vault_client.mount_point == "secret"
mock_client.secrets.kv.v2.read_secret_version.assert_called_once_with(
mount_point="secret", path="missing", version=None, raise_on_deleted_version=True
)
@mock.patch("airflow.providers.hashicorp._internal_client.vault_client.hvac")
def test_get_non_existing_key_v1(self, mock_hvac):
mock_client = mock.MagicMock()
mock_hvac.Client.return_value = mock_client
# Response does not contain the requested key
mock_client.secrets.kv.v1.read_secret.side_effect = InvalidPath()
vault_client = _VaultClient(
auth_type="radius",
radius_host="radhost",
radius_port=8110,
radius_secret="pass",
kv_engine_version=1,
url="http://localhost:8180",
)
secret = vault_client.get_secret(secret_path="missing")
assert secret is None
mock_client.secrets.kv.v1.read_secret.assert_called_once_with(mount_point="secret", path="missing")
@mock.patch("airflow.providers.hashicorp._internal_client.vault_client.hvac")
def test_get_existing_key_v2(self, mock_hvac):
mock_client = mock.MagicMock()
mock_hvac.Client.return_value = mock_client
mock_client.secrets.kv.v2.read_secret_version.return_value = {
"request_id": "94011e25-f8dc-ec29-221b-1f9c1d9ad2ae",
"lease_id": "",
"renewable": False,
"lease_duration": 0,
"data": {
"data": {"secret_key": "secret_value"},
"metadata": {
"created_time": "2020-03-16T21:01:43.331126Z",
"deletion_time": "",
"destroyed": False,
"version": 1,
},
},
"wrap_info": None,
"warnings": None,
"auth": None,
}
vault_client = _VaultClient(
auth_type="radius",
radius_host="radhost",
radius_port=8110,
radius_secret="pass",
url="http://localhost:8180",
)
secret = vault_client.get_secret(secret_path="path/to/secret")
assert secret == {"secret_key": "secret_value"}
mock_client.secrets.kv.v2.read_secret_version.assert_called_once_with(
mount_point="secret", path="path/to/secret", version=None, raise_on_deleted_version=True
)
@mock.patch("airflow.providers.hashicorp._internal_client.vault_client.hvac")
def test_get_existing_key_v2_without_preconfigured_mount_point(self, mock_hvac):
mock_client = mock.MagicMock()
mock_hvac.Client.return_value = mock_client
mock_client.secrets.kv.v2.read_secret_version.return_value = {
"request_id": "94011e25-f8dc-ec29-221b-1f9c1d9ad2ae",
"lease_id": "",
"renewable": False,
"lease_duration": 0,
"data": {
"data": {"secret_key": "secret_value"},
"metadata": {
"created_time": "2020-03-16T21:01:43.331126Z",
"deletion_time": "",
"destroyed": False,
"version": 1,
},
},
"wrap_info": None,
"warnings": None,
"auth": None,
}
vault_client = _VaultClient(
auth_type="radius",
radius_host="radhost",
radius_port=8110,
radius_secret="pass",
url="http://localhost:8180",
mount_point=None,
)
secret = vault_client.get_secret(secret_path="mount_point/path/to/secret")
assert secret == {"secret_key": "secret_value"}
mock_client.secrets.kv.v2.read_secret_version.assert_called_once_with(
mount_point="mount_point", path="path/to/secret", version=None, raise_on_deleted_version=True
)
@mock.patch("airflow.providers.hashicorp._internal_client.vault_client.hvac")
def test_get_existing_key_v2_version(self, mock_hvac):
mock_client = mock.MagicMock()
mock_hvac.Client.return_value = mock_client
mock_client.secrets.kv.v2.read_secret_version.return_value = {
"request_id": "94011e25-f8dc-ec29-221b-1f9c1d9ad2ae",
"lease_id": "",
"renewable": False,
"lease_duration": 0,
"data": {
"data": {"secret_key": "secret_value"},
"metadata": {
"created_time": "2020-03-16T21:01:43.331126Z",
"deletion_time": "",
"destroyed": False,
"version": 1,
},
},
"wrap_info": None,
"warnings": None,
"auth": None,
}
vault_client = _VaultClient(
auth_type="radius",
radius_host="radhost",
radius_port=8110,
radius_secret="pass",
url="http://localhost:8180",
)
secret = vault_client.get_secret(secret_path="missing", secret_version=1)
assert secret == {"secret_key": "secret_value"}
mock_client.secrets.kv.v2.read_secret_version.assert_called_once_with(
mount_point="secret", path="missing", version=1, raise_on_deleted_version=True
)
@mock.patch("airflow.providers.hashicorp._internal_client.vault_client.hvac")
def test_get_existing_key_v1(self, mock_hvac):
mock_client = mock.MagicMock()
mock_hvac.Client.return_value = mock_client
mock_client.secrets.kv.v1.read_secret.return_value = {
"request_id": "182d0673-618c-9889-4cba-4e1f4cfe4b4b",
"lease_id": "",
"renewable": False,
"lease_duration": 2764800,
"data": {"value": "world"},
"wrap_info": None,
"warnings": None,
"auth": None,
}
vault_client = _VaultClient(
auth_type="radius",
radius_host="radhost",
radius_port=8110,
radius_secret="pass",
kv_engine_version=1,
url="http://localhost:8180",
)
secret = vault_client.get_secret(secret_path="/path/to/secret")
assert secret == {"value": "world"}
mock_client.secrets.kv.v1.read_secret.assert_called_once_with(
mount_point="secret", path="/path/to/secret"
)
@mock.patch("airflow.providers.hashicorp._internal_client.vault_client.hvac")
def test_get_existing_key_v1_ssl_verify_false(self, mock_hvac):
mock_client = mock.MagicMock()
mock_hvac.Client.return_value = mock_client
mock_client.secrets.kv.v1.read_secret.return_value = {
"request_id": "182d0673-618c-9889-4cba-4e1f4cfe4b4b",
"lease_id": "",
"renewable": False,
"lease_duration": 2764800,
"data": {"value": "world"},
"wrap_info": None,
"warnings": None,
"auth": None,
}
vault_client = _VaultClient(
auth_type="radius",
radius_host="radhost",
radius_port=8110,
radius_secret="pass",
kv_engine_version=1,
url="http://localhost:8180",
verify=False,
)
secret = vault_client.get_secret(secret_path="/path/to/secret")
assert secret == {"value": "world"}
assert not vault_client.kwargs["session"].verify
mock_client.secrets.kv.v1.read_secret.assert_called_once_with(
mount_point="secret", path="/path/to/secret"
)
@mock.patch("airflow.providers.hashicorp._internal_client.vault_client.hvac")
def test_get_existing_key_v1_trust_private_ca(self, mock_hvac):
mock_client = mock.MagicMock()
mock_hvac.Client.return_value = mock_client
mock_client.secrets.kv.v1.read_secret.return_value = {
"request_id": "182d0673-618c-9889-4cba-4e1f4cfe4b4b",
"lease_id": "",
"renewable": False,
"lease_duration": 2764800,
"data": {"value": "world"},
"wrap_info": None,
"warnings": None,
"auth": None,
}
vault_client = _VaultClient(
auth_type="radius",
radius_host="radhost",
radius_port=8110,
radius_secret="pass",
kv_engine_version=1,
url="http://localhost:8180",
verify="/etc/ssl/certificates/ca-bundle.pem",
)
secret = vault_client.get_secret(secret_path="/path/to/secret")
assert secret == {"value": "world"}
assert vault_client.kwargs["session"].verify == "/etc/ssl/certificates/ca-bundle.pem"
mock_client.secrets.kv.v1.read_secret.assert_called_once_with(
mount_point="secret", path="/path/to/secret"
)
@mock.patch("airflow.providers.hashicorp._internal_client.vault_client.hvac")
def test_get_existing_key_v1_with_proxies_applied(self, mock_hvac):
mock_client = mock.MagicMock()
mock_hvac.Client.return_value = mock_client
mock_client.secrets.kv.v1.read_secret.return_value = {
"request_id": "182d0673-618c-9889-4cba-4e1f4cfe4b4b",
"lease_id": "",
"renewable": False,
"lease_duration": 2764800,
"data": {"value": "world"},
"wrap_info": None,
"warnings": None,
"auth": None,
}
vault_client = _VaultClient(
auth_type="radius",
radius_host="radhost",
radius_port=8110,
radius_secret="pass",
kv_engine_version=1,
url="http://localhost:8180",
verify=False,
proxies={
"http": "http://10.10.1.10:3128",
"https": "http://10.10.1.10:1080",
},
)
secret = vault_client.get_secret(secret_path="/path/to/secret")
assert secret == {"value": "world"}
assert vault_client.kwargs["session"].proxies["http"] == "http://10.10.1.10:3128"
assert vault_client.kwargs["session"].proxies["https"] == "http://10.10.1.10:1080"
mock_client.secrets.kv.v1.read_secret.assert_called_once_with(
mount_point="secret", path="/path/to/secret"
)
@mock.patch("airflow.providers.hashicorp._internal_client.vault_client.hvac")
def test_get_existing_key_v1_with_client_cert_applied(self, mock_hvac):
mock_client = mock.MagicMock()
mock_hvac.Client.return_value = mock_client
mock_client.secrets.kv.v1.read_secret.return_value = {
"request_id": "182d0673-618c-9889-4cba-4e1f4cfe4b4b",
"lease_id": "",
"renewable": False,
"lease_duration": 2764800,
"data": {"value": "world"},
"wrap_info": None,
"warnings": None,
"auth": None,
}
vault_client = _VaultClient(
auth_type="radius",
radius_host="radhost",
radius_port=8110,
radius_secret="pass",
kv_engine_version=1,
url="http://localhost:8180",
verify=False,
cert=("/path/client.cert", "/path/client.key"),
)
secret = vault_client.get_secret(secret_path="/path/to/secret")
assert secret == {"value": "world"}
assert vault_client.kwargs["session"].cert == ("/path/client.cert", "/path/client.key")
mock_client.secrets.kv.v1.read_secret.assert_called_once_with(
mount_point="secret", path="/path/to/secret"
)
@mock.patch("airflow.providers.hashicorp._internal_client.vault_client.hvac")
def test_get_existing_key_v1_without_preconfigured_mount_point(self, mock_hvac):
mock_client = mock.MagicMock()
mock_hvac.Client.return_value = mock_client
mock_client.secrets.kv.v1.read_secret.return_value = {
"request_id": "182d0673-618c-9889-4cba-4e1f4cfe4b4b",
"lease_id": "",
"renewable": False,
"lease_duration": 2764800,
"data": {"value": "world"},
"wrap_info": None,
"warnings": None,
"auth": None,
}
vault_client = _VaultClient(
auth_type="radius",
radius_host="radhost",
radius_port=8110,
radius_secret="pass",
kv_engine_version=1,
url="http://localhost:8180",
mount_point=None,
)
secret = vault_client.get_secret(secret_path="mount_point/path/to/secret")
assert secret == {"value": "world"}
mock_client.secrets.kv.v1.read_secret.assert_called_once_with(
mount_point="mount_point", path="path/to/secret"
)
@mock.patch("airflow.providers.hashicorp._internal_client.vault_client.hvac")
def test_get_existing_key_v1_different_auth_mount_point(self, mock_hvac):
mock_client = mock.MagicMock()
mock_hvac.Client.return_value = mock_client
mock_client.secrets.kv.v1.read_secret.return_value = {
"request_id": "182d0673-618c-9889-4cba-4e1f4cfe4b4b",
"lease_id": "",
"renewable": False,
"lease_duration": 2764800,
"data": {"value": "world"},
"wrap_info": None,
"warnings": None,
"auth": None,
}
vault_client = _VaultClient(
auth_type="radius",
radius_host="radhost",
radius_port=8110,
radius_secret="pass",
kv_engine_version=1,
auth_mount_point="other",
url="http://localhost:8180",
)
secret = vault_client.get_secret(secret_path="missing")
assert secret == {"value": "world"}
mock_client.secrets.kv.v1.read_secret.assert_called_once_with(mount_point="secret", path="missing")
@mock.patch("airflow.providers.hashicorp._internal_client.vault_client.hvac")
def test_get_existing_key_v1_version(self, mock_hvac):
mock_client = mock.MagicMock()
mock_hvac.Client.return_value = mock_client
vault_client = _VaultClient(
auth_type="token",
token="s.7AU0I51yv1Q1lxOIg1F3ZRAS",
url="http://localhost:8180",
kv_engine_version=1,
)
with pytest.raises(VaultError, match="Secret version"):
vault_client.get_secret(secret_path="missing", secret_version=1)
@mock.patch("airflow.providers.hashicorp._internal_client.vault_client.hvac")
def test_get_secret_metadata_v2(self, mock_hvac):
mock_client = mock.MagicMock()
mock_hvac.Client.return_value = mock_client
mock_client.secrets.kv.v2.read_secret_metadata.return_value = {
"request_id": "94011e25-f8dc-ec29-221b-1f9c1d9ad2ae",
"lease_id": "",
"renewable": False,
"lease_duration": 0,
"metadata": [
{
"created_time": "2020-03-16T21:01:43.331126Z",
"deletion_time": "",
"destroyed": False,
"version": 1,
},
{
"created_time": "2020-03-16T21:01:43.331126Z",
"deletion_time": "",
"destroyed": False,
"version": 2,
},
],
}
vault_client = _VaultClient(
auth_type="token", token="s.7AU0I51yv1Q1lxOIg1F3ZRAS", url="http://localhost:8180"
)
metadata = vault_client.get_secret_metadata(secret_path="missing")
assert metadata == {
"request_id": "94011e25-f8dc-ec29-221b-1f9c1d9ad2ae",
"lease_id": "",
"renewable": False,
"lease_duration": 0,
"metadata": [
{
"created_time": "2020-03-16T21:01:43.331126Z",
"deletion_time": "",
"destroyed": False,
"version": 1,
},
{
"created_time": "2020-03-16T21:01:43.331126Z",
"deletion_time": "",
"destroyed": False,
"version": 2,
},
],
}
mock_client.secrets.kv.v2.read_secret_metadata.assert_called_once_with(
mount_point="secret", path="missing"
)
@mock.patch("airflow.providers.hashicorp._internal_client.vault_client.hvac")
def test_get_secret_metadata_v1(self, mock_hvac):
mock_client = mock.MagicMock()
mock_hvac.Client.return_value = mock_client
vault_client = _VaultClient(
auth_type="radius",
radius_host="radhost",
radius_port=8110,
radius_secret="pass",
kv_engine_version=1,
url="http://localhost:8180",
)
with pytest.raises(VaultError, match="Metadata might only be used with version 2 of the KV engine."):
vault_client.get_secret_metadata(secret_path="missing")
@mock.patch("airflow.providers.hashicorp._internal_client.vault_client.hvac")
def test_get_secret_including_metadata_v2(self, mock_hvac):
mock_client = mock.MagicMock()
mock_hvac.Client.return_value = mock_client
mock_client.secrets.kv.v2.read_secret_version.return_value = {
"request_id": "94011e25-f8dc-ec29-221b-1f9c1d9ad2ae",
"lease_id": "",
"renewable": False,
"lease_duration": 0,
"data": {
"data": {"secret_key": "secret_value"},
"metadata": {
"created_time": "2020-03-16T21:01:43.331126Z",
"deletion_time": "",
"destroyed": False,
"version": 1,
},
},
"wrap_info": None,
"warnings": None,
"auth": None,
}
vault_client = _VaultClient(
auth_type="radius",
radius_host="radhost",
radius_port=8110,
radius_secret="pass",
url="http://localhost:8180",
)
metadata = vault_client.get_secret_including_metadata(secret_path="missing")
assert metadata == {
"request_id": "94011e25-f8dc-ec29-221b-1f9c1d9ad2ae",
"lease_id": "",
"renewable": False,
"lease_duration": 0,
"data": {
"data": {"secret_key": "secret_value"},
"metadata": {
"created_time": "2020-03-16T21:01:43.331126Z",
"deletion_time": "",
"destroyed": False,
"version": 1,
},
},
"wrap_info": None,
"warnings": None,
"auth": None,
}
mock_client.secrets.kv.v2.read_secret_version.assert_called_once_with(
mount_point="secret", path="missing", version=None, raise_on_deleted_version=True
)
@mock.patch("airflow.providers.hashicorp._internal_client.vault_client.hvac")
def test_get_secret_including_metadata_v1(self, mock_hvac):
mock_client = mock.MagicMock()
mock_hvac.Client.return_value = mock_client
vault_client = _VaultClient(
auth_type="radius",
radius_host="radhost",
radius_port=8110,
radius_secret="pass",
kv_engine_version=1,
url="http://localhost:8180",
)
with pytest.raises(VaultError, match="Metadata might only be used with version 2 of the KV engine."):
vault_client.get_secret_including_metadata(secret_path="missing")
@mock.patch("airflow.providers.hashicorp._internal_client.vault_client.hvac")
def test_create_or_update_secret_v2(self, mock_hvac):
mock_client = mock.MagicMock()
mock_hvac.Client.return_value = mock_client
vault_client = _VaultClient(
auth_type="radius",
radius_host="radhost",
radius_port=8110,
radius_secret="pass",
url="http://localhost:8180",
)
vault_client.create_or_update_secret(secret_path="path", secret={"key": "value"})
mock_client.secrets.kv.v2.create_or_update_secret.assert_called_once_with(
mount_point="secret", path="path", secret={"key": "value"}, cas=None
)
@mock.patch("airflow.providers.hashicorp._internal_client.vault_client.hvac")
def test_create_or_update_secret_v2_method(self, mock_hvac):
mock_client = mock.MagicMock()
mock_hvac.Client.return_value = mock_client
vault_client = _VaultClient(
auth_type="radius",
radius_host="radhost",
radius_port=8110,
radius_secret="pass",
url="http://localhost:8180",
)
with pytest.raises(VaultError, match="The method parameter is only valid for version 1"):
vault_client.create_or_update_secret(secret_path="path", secret={"key": "value"}, method="post")
@mock.patch("airflow.providers.hashicorp._internal_client.vault_client.hvac")
def test_create_or_update_secret_v2_cas(self, mock_hvac):
mock_client = mock.MagicMock()
mock_hvac.Client.return_value = mock_client
vault_client = _VaultClient(
auth_type="radius",
radius_host="radhost",
radius_port=8110,
radius_secret="pass",
url="http://localhost:8180",
)
vault_client.create_or_update_secret(secret_path="path", secret={"key": "value"}, cas=10)
mock_client.secrets.kv.v2.create_or_update_secret.assert_called_once_with(
mount_point="secret", path="path", secret={"key": "value"}, cas=10
)
@mock.patch("airflow.providers.hashicorp._internal_client.vault_client.hvac")
def test_create_or_update_secret_v1(self, mock_hvac):
mock_client = mock.MagicMock()
mock_hvac.Client.return_value = mock_client
vault_client = _VaultClient(
auth_type="radius",
radius_host="radhost",
radius_port=8110,
radius_secret="pass",
kv_engine_version=1,
url="http://localhost:8180",
)
vault_client.create_or_update_secret(secret_path="path", secret={"key": "value"})
mock_client.secrets.kv.v1.create_or_update_secret.assert_called_once_with(
mount_point="secret", path="path", secret={"key": "value"}, method=None
)
@mock.patch("airflow.providers.hashicorp._internal_client.vault_client.hvac")
def test_create_or_update_secret_v1_cas(self, mock_hvac):
mock_client = mock.MagicMock()
mock_hvac.Client.return_value = mock_client
vault_client = _VaultClient(
auth_type="radius",
radius_host="radhost",
radius_port=8110,
radius_secret="pass",
kv_engine_version=1,
url="http://localhost:8180",
)
with pytest.raises(VaultError, match="The cas parameter is only valid for version 2"):
vault_client.create_or_update_secret(secret_path="path", secret={"key": "value"}, cas=10)
@mock.patch("airflow.providers.hashicorp._internal_client.vault_client.hvac")
def test_create_or_update_secret_v1_post(self, mock_hvac):
mock_client = mock.MagicMock()
mock_hvac.Client.return_value = mock_client
vault_client = _VaultClient(
auth_type="radius",
radius_host="radhost",
radius_port=8110,
radius_secret="pass",
kv_engine_version=1,
url="http://localhost:8180",
)
vault_client.create_or_update_secret(secret_path="path", secret={"key": "value"}, method="post")
mock_client.secrets.kv.v1.create_or_update_secret.assert_called_once_with(
mount_point="secret", path="path", secret={"key": "value"}, method="post"
)
@mock.patch("airflow.providers.hashicorp._internal_client.vault_client.hvac")
def test_cached_property_invalidates_on_auth_failure(self, mock_hvac):
mock_client = mock.MagicMock()
mock_hvac.Client.return_value = mock_client
vault_client = _VaultClient(
auth_type="radius",
radius_host="radhost",
radius_port=8110,
radius_secret="pass",
kv_engine_version=1,
url="http://localhost:8180",
)
# Assert that the original mock_client is returned
assert vault_client.client == mock_client
# Prove that the mock_client is cached by changing the return
# value, but still receive the original mock client
mock_client_2 = mock.MagicMock()
mock_hvac.Client.return_value = mock_client_2
assert vault_client.client == mock_client
mock_client.is_authenticated.return_value = False
# assert that when the client is not authenticated the cache
# is invalidated, therefore returning the second client
assert vault_client.client == mock_client_2
| TestVaultClient |
python | scrapy__scrapy | scrapy/core/downloader/__init__.py | {
"start": 1190,
"end": 3536
} | class ____:
"""Downloader slot"""
def __init__(
self,
concurrency: int,
delay: float,
randomize_delay: bool,
):
self.concurrency: int = concurrency
self.delay: float = delay
self.randomize_delay: bool = randomize_delay
self.active: set[Request] = set()
self.queue: deque[tuple[Request, Deferred[Response]]] = deque()
self.transferring: set[Request] = set()
self.lastseen: float = 0
self.latercall: CallLaterResult | None = None
def free_transfer_slots(self) -> int:
return self.concurrency - len(self.transferring)
def download_delay(self) -> float:
if self.randomize_delay:
return random.uniform(0.5 * self.delay, 1.5 * self.delay) # noqa: S311
return self.delay
def close(self) -> None:
if self.latercall:
self.latercall.cancel()
self.latercall = None
def __repr__(self) -> str:
cls_name = self.__class__.__name__
return (
f"{cls_name}(concurrency={self.concurrency!r}, "
f"delay={self.delay:.2f}, "
f"randomize_delay={self.randomize_delay!r})"
)
def __str__(self) -> str:
return (
f"<downloader.Slot concurrency={self.concurrency!r} "
f"delay={self.delay:.2f} randomize_delay={self.randomize_delay!r} "
f"len(active)={len(self.active)} len(queue)={len(self.queue)} "
f"len(transferring)={len(self.transferring)} "
f"lastseen={datetime.fromtimestamp(self.lastseen).isoformat()}>"
)
def _get_concurrency_delay(
concurrency: int, spider: Spider, settings: BaseSettings
) -> tuple[int, float]:
delay: float = settings.getfloat("DOWNLOAD_DELAY")
if hasattr(spider, "download_delay"):
delay = spider.download_delay
if hasattr(spider, "max_concurrent_requests"):
warnings.warn(
"The 'max_concurrent_requests' spider attribute is deprecated. "
"Use Spider.custom_settings or Spider.update_settings() instead. "
"The corresponding setting name is 'CONCURRENT_REQUESTS'.",
category=ScrapyDeprecationWarning,
stacklevel=2,
)
concurrency = spider.max_concurrent_requests
return concurrency, delay
| Slot |
python | kamyu104__LeetCode-Solutions | Python/rearrange-string-k-distance-apart.py | {
"start": 2911,
"end": 3737
} | class ____(object):
def rearrangeString(self, s, k):
"""
:type str: str
:type k: int
:rtype: str
"""
if k <= 1:
return s
cnts = Counter(s)
heap = []
for c, cnt in cnts.iteritems():
heappush(heap, [-cnt, c])
result = []
while heap:
used_cnt_chars = []
for _ in xrange(min(k, len(s) - len(result))):
if not heap:
return ""
cnt_char = heappop(heap)
result.append(cnt_char[1])
cnt_char[0] += 1
if cnt_char[0] < 0:
used_cnt_chars.append(cnt_char)
for cnt_char in used_cnt_chars:
heappush(heap, cnt_char)
return "".join(result)
| Solution4 |
python | python-excel__xlwt | tests/test_compound_doc.py | {
"start": 6302,
"end": 7294
} | class ____(unittest.TestCase):
def test_build_directory(self):
xlsdoc = XlsDoc()
xlsdoc.book_stream_len = 0x1000
xlsdoc._build_directory()
self.assertEqual(DIR, xlsdoc.dir_stream)
def test_build_sat(self):
xlsdoc = XlsDoc()
xlsdoc.book_stream_len = 0x1000
xlsdoc._build_directory()
xlsdoc._build_sat()
self.assertEqual(PACKED_SAT, xlsdoc.packed_SAT)
self.assertEqual(PACKED_MSAT_1ST, xlsdoc.packed_MSAT_1st)
self.assertEqual(PACKED_MSAT_2ND, xlsdoc.packed_MSAT_2nd)
self.assertEqual(BOOK_STREAM_SECT, xlsdoc.book_stream_sect)
self.assertEqual(SAT_SECT, xlsdoc.SAT_sect)
self.assertEqual(MSAT_SECT_2ND, xlsdoc.MSAT_sect_2nd)
def test_build_header(self):
xlsdoc = XlsDoc()
xlsdoc.book_stream_len = 0x1000
xlsdoc._build_directory()
xlsdoc._build_sat()
xlsdoc._build_header()
self.assertEqual(HEADER, xlsdoc.header)
| TestXlsDoc |
python | ansible__ansible | lib/ansible/module_utils/errors.py | {
"start": 2119,
"end": 2219
} | class ____(AnsibleValidationError):
"""Error processing parameter deprecations"""
| DeprecationError |
python | Unity-Technologies__ml-agents | ml-agents/mlagents/trainers/torch_entities/attention.py | {
"start": 6984,
"end": 10560
} | class ____(torch.nn.Module):
"""
Residual self attentioninspired from https://arxiv.org/pdf/1909.07528.pdf. Can be used
with an EntityEmbedding module, to apply multi head self attention to encode information
about a "Self" and a list of relevant "Entities".
"""
EPSILON = 1e-7
def __init__(
self,
embedding_size: int,
entity_num_max_elements: Optional[int] = None,
num_heads: int = 4,
):
"""
Constructs a ResidualSelfAttention module.
:param embedding_size: Embedding sizee for attention mechanism and
Q, K, V encoders.
:param entity_num_max_elements: A List of ints representing the maximum number
of elements in an entity sequence. Should be of length num_entities. Pass None to
not restrict the number of elements; however, this will make the module
unexportable to ONNX/Sentis.
:param num_heads: Number of heads for Multi Head Self-Attention
"""
super().__init__()
self.max_num_ent: Optional[int] = None
if entity_num_max_elements is not None:
self.max_num_ent = entity_num_max_elements
self.attention = MultiHeadAttention(
num_heads=num_heads, embedding_size=embedding_size
)
# Initialization scheme from http://www.cs.toronto.edu/~mvolkovs/ICML2020_tfixup.pdf
self.fc_q = linear_layer(
embedding_size,
embedding_size,
kernel_init=Initialization.Normal,
kernel_gain=(0.125 / embedding_size) ** 0.5,
)
self.fc_k = linear_layer(
embedding_size,
embedding_size,
kernel_init=Initialization.Normal,
kernel_gain=(0.125 / embedding_size) ** 0.5,
)
self.fc_v = linear_layer(
embedding_size,
embedding_size,
kernel_init=Initialization.Normal,
kernel_gain=(0.125 / embedding_size) ** 0.5,
)
self.fc_out = linear_layer(
embedding_size,
embedding_size,
kernel_init=Initialization.Normal,
kernel_gain=(0.125 / embedding_size) ** 0.5,
)
self.embedding_norm = LayerNorm()
self.residual_norm = LayerNorm()
def forward(self, inp: torch.Tensor, key_masks: List[torch.Tensor]) -> torch.Tensor:
# Gather the maximum number of entities information
mask = torch.cat(key_masks, dim=1)
inp = self.embedding_norm(inp)
# Feed to self attention
query = self.fc_q(inp) # (b, n_q, emb)
key = self.fc_k(inp) # (b, n_k, emb)
value = self.fc_v(inp) # (b, n_k, emb)
# Only use max num if provided
if self.max_num_ent is not None:
num_ent = self.max_num_ent
else:
num_ent = inp.shape[1]
if exporting_to_onnx.is_exporting():
raise UnityTrainerException(
"Trying to export an attention mechanism that doesn't have a set max \
number of elements."
)
output, _ = self.attention(query, key, value, num_ent, num_ent, mask)
# Residual
output = self.fc_out(output) + inp
output = self.residual_norm(output)
# Average Pooling
numerator = torch.sum(output * (1 - mask).reshape(-1, num_ent, 1), dim=1)
denominator = torch.sum(1 - mask, dim=1, keepdim=True) + self.EPSILON
output = numerator / denominator
return output
| ResidualSelfAttention |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_display_units10.py | {
"start": 315,
"end": 1206
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_display_units10.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "column"})
chart.axis_ids = [56159232, 61364096]
data = [
[10000000, 20000000, 30000000, 20000000, 10000000],
]
worksheet.write_column(0, 0, data[0])
chart.add_series({"values": "=Sheet1!$A$1:$A$5"})
chart.set_y_axis({"display_units": "trillions", "display_units_visible": 0})
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/ddl.py | {
"start": 16376,
"end": 17741
} | class ____(TableCreateDDL):
"""Represent a CREATE TABLE statement."""
__visit_name__ = "create_table"
def __init__(
self,
element: Table,
include_foreign_key_constraints: Optional[
typing_Sequence[ForeignKeyConstraint]
] = None,
if_not_exists: bool = False,
) -> None:
"""Create a :class:`.CreateTable` construct.
:param element: a :class:`_schema.Table` that's the subject
of the CREATE
:param on: See the description for 'on' in :class:`.DDL`.
:param include_foreign_key_constraints: optional sequence of
:class:`_schema.ForeignKeyConstraint` objects that will be included
inline within the CREATE construct; if omitted, all foreign key
constraints that do not specify use_alter=True are included.
:param if_not_exists: if True, an IF NOT EXISTS operator will be
applied to the construct.
.. versionadded:: 1.4.0b2
"""
super().__init__(element, if_not_exists=if_not_exists)
self.columns = [CreateColumn(column) for column in element.columns]
self.include_foreign_key_constraints = include_foreign_key_constraints
def to_metadata(self, metadata: MetaData, table: Table) -> Self:
return self.__class__(table, if_not_exists=self.if_not_exists)
| CreateTable |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 223853,
"end": 224168
} | class ____(VegaLiteSchema):
"""ConditionalMarkPropFieldOrDatumDefTypeForShape schema wrapper."""
_schema = {"$ref": "#/definitions/ConditionalMarkPropFieldOrDatumDef<TypeForShape>"}
def __init__(self, *args, **kwds):
super().__init__(*args, **kwds)
| ConditionalMarkPropFieldOrDatumDefTypeForShape |
python | qdrant__qdrant-client | qdrant_client/http/models/models.py | {
"start": 36124,
"end": 36774
} | class ____(BaseModel, extra="forbid"):
shard_key: Optional["ShardKeySelector"] = Field(default=None, description="")
key: str = Field(..., description="Payload key to use for faceting.")
limit: Optional[int] = Field(default=None, description="Max number of hits to return. Default is 10.")
filter: Optional["Filter"] = Field(
default=None, description="Filter conditions - only consider points that satisfy these conditions."
)
exact: Optional[bool] = Field(
default=None,
description="Whether to do a more expensive exact count for each of the values in the facet. Default is false.",
)
| FacetRequest |
python | pandas-dev__pandas | pandas/tests/libs/test_hashtable.py | {
"start": 21277,
"end": 25148
} | class ____:
def test_value_count(self, dtype, writable):
N = 43
expected = (np.arange(N) + N).astype(dtype)
values = np.repeat(expected, 5)
values.flags.writeable = writable
keys, counts, _ = ht.value_count(values, False)
tm.assert_numpy_array_equal(np.sort(keys), expected)
assert np.all(counts == 5)
def test_value_count_mask(self, dtype):
if dtype == np.object_:
pytest.skip("mask not implemented for object dtype")
values = np.array([1] * 5, dtype=dtype)
mask = np.zeros((5,), dtype=np.bool_)
mask[1] = True
mask[4] = True
keys, counts, na_counter = ht.value_count(values, False, mask=mask)
assert len(keys) == 2
assert na_counter == 2
def test_value_count_stable(self, dtype, writable):
# GH12679
values = np.array([2, 1, 5, 22, 3, -1, 8]).astype(dtype)
values.flags.writeable = writable
keys, counts, _ = ht.value_count(values, False)
tm.assert_numpy_array_equal(keys, values)
assert np.all(counts == 1)
def test_duplicated_first(self, dtype, writable):
N = 100
values = np.repeat(np.arange(N).astype(dtype), 5)
values.flags.writeable = writable
result = ht.duplicated(values)
expected = np.ones_like(values, dtype=np.bool_)
expected[::5] = False
tm.assert_numpy_array_equal(result, expected)
def test_ismember_yes(self, dtype, writable):
N = 127
arr = np.arange(N).astype(dtype)
values = np.arange(N).astype(dtype)
arr.flags.writeable = writable
values.flags.writeable = writable
result = ht.ismember(arr, values)
expected = np.ones_like(values, dtype=np.bool_)
tm.assert_numpy_array_equal(result, expected)
def test_ismember_no(self, dtype):
N = 17
arr = np.arange(N).astype(dtype)
values = (np.arange(N) + N).astype(dtype)
result = ht.ismember(arr, values)
expected = np.zeros_like(values, dtype=np.bool_)
tm.assert_numpy_array_equal(result, expected)
def test_mode(self, dtype, writable):
if dtype in (np.int8, np.uint8):
N = 53
else:
N = 11111
values = np.repeat(np.arange(N).astype(dtype), 5)
values[0] = 42
values.flags.writeable = writable
result = ht.mode(values, False)[0]
assert result == 42
def test_mode_stable(self, dtype, writable):
values = np.array([2, 1, 5, 22, 3, -1, 8]).astype(dtype)
values.flags.writeable = writable
keys = ht.mode(values, False)[0]
tm.assert_numpy_array_equal(keys, values)
def test_modes_with_nans():
# GH42688, nans aren't mangled
nulls = [pd.NA, np.nan, pd.NaT, None]
values = np.array([True] + nulls * 2, dtype=np.object_)
modes = ht.mode(values, False)[0]
assert modes.size == len(nulls)
def test_unique_label_indices_intp(writable):
keys = np.array([1, 2, 2, 2, 1, 3], dtype=np.intp)
keys.flags.writeable = writable
result = ht.unique_label_indices(keys)
expected = np.array([0, 1, 5], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
def test_unique_label_indices():
a = np.random.default_rng(2).integers(1, 1 << 10, 1 << 15).astype(np.intp)
left = ht.unique_label_indices(a)
right = np.unique(a, return_index=True)[1]
tm.assert_numpy_array_equal(left, right, check_dtype=False)
a[np.random.default_rng(2).choice(len(a), 10)] = -1
left = ht.unique_label_indices(a)
right = np.unique(a, return_index=True)[1][1:]
tm.assert_numpy_array_equal(left, right, check_dtype=False)
@pytest.mark.parametrize(
"dtype",
[
np.float64,
np.float32,
np.complex128,
np.complex64,
],
)
| TestHelpFunctions |
python | getsentry__sentry | src/sentry/codecov/enums.py | {
"start": 91,
"end": 284
} | class ____(Enum):
AVG_DURATION = "AVG_DURATION"
FLAKE_RATE = "FLAKE_RATE"
FAILURE_RATE = "FAILURE_RATE"
RUNS_FAILED = "RUNS_FAILED"
UPDATED_AT = "UPDATED_AT"
| OrderingParameter |
python | run-llama__llama_index | llama-index-core/llama_index/core/settings.py | {
"start": 734,
"end": 8159
} | class ____:
"""Settings for the Llama Index, lazily initialized."""
# lazy initialization
_llm: Optional[LLM] = None
_embed_model: Optional[BaseEmbedding] = None
_callback_manager: Optional[CallbackManager] = None
_tokenizer: Optional[Callable[[str], List[Any]]] = None
_node_parser: Optional[NodeParser] = None
_prompt_helper: Optional[PromptHelper] = None
_transformations: Optional[List[TransformComponent]] = None
# ---- LLM ----
@property
def llm(self) -> LLM:
"""Get the LLM."""
if self._llm is None:
self._llm = resolve_llm("default")
if self._callback_manager is not None:
self._llm.callback_manager = self._callback_manager
return self._llm
@llm.setter
def llm(self, llm: LLMType) -> None:
"""Set the LLM."""
self._llm = resolve_llm(llm)
@property
def pydantic_program_mode(self) -> PydanticProgramMode:
"""Get the pydantic program mode."""
return self.llm.pydantic_program_mode
@pydantic_program_mode.setter
def pydantic_program_mode(self, pydantic_program_mode: PydanticProgramMode) -> None:
"""Set the pydantic program mode."""
self.llm.pydantic_program_mode = pydantic_program_mode
# ---- Embedding ----
@property
def embed_model(self) -> BaseEmbedding:
"""Get the embedding model."""
if self._embed_model is None:
self._embed_model = resolve_embed_model("default")
if self._callback_manager is not None:
self._embed_model.callback_manager = self._callback_manager
return self._embed_model
@embed_model.setter
def embed_model(self, embed_model: EmbedType) -> None:
"""Set the embedding model."""
self._embed_model = resolve_embed_model(embed_model)
# ---- Callbacks ----
@property
def global_handler(self) -> Optional[BaseCallbackHandler]:
"""Get the global handler."""
import llama_index.core
# TODO: deprecated?
return llama_index.core.global_handler
@global_handler.setter
def global_handler(self, eval_mode: str, **eval_params: Any) -> None:
"""Set the global handler."""
from llama_index.core import set_global_handler
# TODO: deprecated?
set_global_handler(eval_mode, **eval_params)
@property
def callback_manager(self) -> CallbackManager:
"""Get the callback manager."""
if self._callback_manager is None:
self._callback_manager = CallbackManager()
return self._callback_manager
@callback_manager.setter
def callback_manager(self, callback_manager: CallbackManager) -> None:
"""Set the callback manager."""
self._callback_manager = callback_manager
# ---- Tokenizer ----
@property
def tokenizer(self) -> Callable[[str], List[Any]]:
"""Get the tokenizer."""
import llama_index.core
if llama_index.core.global_tokenizer is None:
return get_tokenizer()
# TODO: deprecated?
return llama_index.core.global_tokenizer
@tokenizer.setter
def tokenizer(self, tokenizer: Callable[[str], List[Any]]) -> None:
"""Set the tokenizer."""
try:
from transformers import PreTrainedTokenizerBase # pants: no-infer-dep
if isinstance(tokenizer, PreTrainedTokenizerBase):
from functools import partial
tokenizer = partial(tokenizer.encode, add_special_tokens=False)
except ImportError:
pass
# TODO: deprecated?
set_global_tokenizer(tokenizer)
# ---- Node parser ----
@property
def node_parser(self) -> NodeParser:
"""Get the node parser."""
if self._node_parser is None:
self._node_parser = SentenceSplitter()
if self._callback_manager is not None:
self._node_parser.callback_manager = self._callback_manager
return self._node_parser
@node_parser.setter
def node_parser(self, node_parser: NodeParser) -> None:
"""Set the node parser."""
self._node_parser = node_parser
@property
def chunk_size(self) -> int:
"""Get the chunk size."""
if hasattr(self.node_parser, "chunk_size"):
return self.node_parser.chunk_size
else:
raise ValueError("Configured node parser does not have chunk size.")
@chunk_size.setter
def chunk_size(self, chunk_size: int) -> None:
"""Set the chunk size."""
if hasattr(self.node_parser, "chunk_size"):
self.node_parser.chunk_size = chunk_size
else:
raise ValueError("Configured node parser does not have chunk size.")
@property
def chunk_overlap(self) -> int:
"""Get the chunk overlap."""
if hasattr(self.node_parser, "chunk_overlap"):
return self.node_parser.chunk_overlap
else:
raise ValueError("Configured node parser does not have chunk overlap.")
@chunk_overlap.setter
def chunk_overlap(self, chunk_overlap: int) -> None:
"""Set the chunk overlap."""
if hasattr(self.node_parser, "chunk_overlap"):
self.node_parser.chunk_overlap = chunk_overlap
else:
raise ValueError("Configured node parser does not have chunk overlap.")
# ---- Node parser alias ----
@property
def text_splitter(self) -> NodeParser:
"""Get the text splitter."""
return self.node_parser
@text_splitter.setter
def text_splitter(self, text_splitter: NodeParser) -> None:
"""Set the text splitter."""
self.node_parser = text_splitter
@property
def prompt_helper(self) -> PromptHelper:
"""Get the prompt helper."""
if self._llm is not None and self._prompt_helper is None:
self._prompt_helper = PromptHelper.from_llm_metadata(self._llm.metadata)
elif self._prompt_helper is None:
self._prompt_helper = PromptHelper()
return self._prompt_helper
@prompt_helper.setter
def prompt_helper(self, prompt_helper: PromptHelper) -> None:
"""Set the prompt helper."""
self._prompt_helper = prompt_helper
@property
def num_output(self) -> int:
"""Get the number of outputs."""
return self.prompt_helper.num_output
@num_output.setter
def num_output(self, num_output: int) -> None:
"""Set the number of outputs."""
self.prompt_helper.num_output = num_output
@property
def context_window(self) -> int:
"""Get the context window."""
return self.prompt_helper.context_window
@context_window.setter
def context_window(self, context_window: int) -> None:
"""Set the context window."""
self.prompt_helper.context_window = context_window
# ---- Transformations ----
@property
def transformations(self) -> List[TransformComponent]:
"""Get the transformations."""
if self._transformations is None:
self._transformations = [self.node_parser]
return self._transformations
@transformations.setter
def transformations(self, transformations: List[TransformComponent]) -> None:
"""Set the transformations."""
self._transformations = transformations
# Singleton
Settings = _Settings()
| _Settings |
python | coleifer__peewee | tests/signals.py | {
"start": 5678,
"end": 6176
} | class ____(ModelTestCase):
database = get_in_memory_db()
requires = [NoPK]
def test_save_no_pk(self):
accum = [0]
@signals.pre_save(sender=NoPK)
@signals.post_save(sender=NoPK)
def save_hook(sender, instance, created):
accum[0] += 1
obj = NoPK.create(val=1)
self.assertEqual(obj.val, 1)
obj_db = NoPK.get(NoPK.val == 1)
self.assertEqual(obj_db.val, 1)
self.assertEqual(accum[0], 2)
| TestSaveNoPrimaryKey |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/module_manpath_append/package.py | {
"start": 217,
"end": 551
} | class ____(Package):
homepage = "http://www.spack.llnl.gov"
url = "http://www.spack.llnl.gov/module-manpath-append-1.0.tar.gz"
version("1.0", "0123456789abcdef0123456789abcdef")
def setup_run_environment(self, env: EnvironmentModifications) -> None:
env.append_path("MANPATH", "/path/to/man")
| ModuleManpathAppend |
python | google__pytype | pytype/overlays/functools_overlay.py | {
"start": 999,
"end": 2797
} | class ____(abstract.PyTDClass, mixin.HasSlots):
"""Implementation of functools.partial."""
def __init__(self, ctx: "context.Context", module: str):
pytd_cls = ctx.loader.lookup_pytd(module, "partial")
super().__init__("partial", pytd_cls, ctx)
mixin.HasSlots.init_mixin(self)
self._pytd_new = self.pytd_cls.Lookup("__new__")
def new_slot(
self, node, cls, func, /, *args, **kwargs
) -> tuple[cfg.CFGNode, cfg.Variable]:
# We are not using ``cls``, because it is set to unsolvable when
# functools.partial is called with *args.
del cls
# Make sure the call is well typed before binding the partial
new = self.ctx.convert.convert_pytd_function(self._pytd_new)
_, specialized_obj = function.call_function(
self.ctx,
node,
new.to_variable(node),
function.Args(
(self.to_variable(node), func, *args),
kwargs,
call_context.starargs,
call_context.starstarargs,
),
fallback_to_unsolvable=False,
)
[specialized_obj] = specialized_obj.data
type_arg = specialized_obj.get_formal_type_parameter("_T")
cls = abstract.ParameterizedClass(self, {"_T": type_arg}, self.ctx)
obj = bind_partial(node, cls, func, args, kwargs, self.ctx)
return node, obj.to_variable(node)
def get_own_new(self, node, value) -> tuple[cfg.CFGNode, cfg.Variable]:
new = NativeFunction("__new__", self.new_slot, self.ctx)
return node, new.to_variable(node)
def bind_partial(node, cls, func, args, kwargs, ctx) -> BoundPartial:
del node # Unused.
obj = BoundPartial(ctx, cls)
obj.underlying = func
obj.args = args
obj.kwargs = kwargs
obj.starargs = call_context.starargs
obj.starstarargs = call_context.starstarargs
return obj
| Partial |
python | pypa__hatch | src/hatch/project/config.py | {
"start": 24557,
"end": 29762
} | class ____:
def __init__(self, name: str, config: dict[str, Any], global_config: BuildConfig) -> None:
self.__name = name
self.__config = config
self.__global_config = global_config
@cached_property
def directory(self) -> str:
directory = self.__config.get("directory", self.__global_config.directory)
if not isinstance(directory, str):
message = f"Field `tool.hatch.build.targets.{self.__name}.directory` must be a string"
raise TypeError(message)
return directory
@cached_property
def dependencies(self) -> list[str]:
dependencies: list[str] = self.__config.get("dependencies", [])
if not isinstance(dependencies, list):
message = f"Field `tool.hatch.build.targets.{self.__name}.dependencies` must be an array"
raise TypeError(message)
for i, dependency in enumerate(dependencies, 1):
if not isinstance(dependency, str):
message = (
f"Dependency #{i} in field `tool.hatch.build.targets.{self.__name}.dependencies` must be a string"
)
raise TypeError(message)
all_dependencies = list(self.__global_config.dependencies)
all_dependencies.extend(dependencies)
return all_dependencies
@cached_property
def hook_config(self) -> dict[str, dict[str, Any]]:
hook_config: dict[str, dict[str, Any]] = self.__config.get("hooks", {})
if not isinstance(hook_config, dict):
message = f"Field `tool.hatch.build.targets.{self.__name}.hooks` must be a table"
raise TypeError(message)
for hook_name, config in hook_config.items():
if not isinstance(config, dict):
message = f"Field `tool.hatch.build.targets.{self.__name}.hooks.{hook_name}` must be a table"
raise TypeError(message)
config = self.__global_config.hook_config.copy()
config.update(hook_config)
return finalize_hook_config(config)
def expand_script_commands(script_name, commands, config, seen, active):
if script_name in seen:
return seen[script_name]
if script_name in active:
active.append(script_name)
message = f"Circular expansion detected for field `tool.hatch.scripts`: {' -> '.join(active)}"
raise ValueError(message)
active.append(script_name)
expanded_commands = []
for command in commands:
possible_script, args, ignore_exit_code = parse_script_command(command)
if possible_script in config:
expanded_commands.extend(
format_script_commands(
commands=expand_script_commands(possible_script, config[possible_script], config, seen, active),
args=args,
ignore_exit_code=ignore_exit_code,
)
)
else:
expanded_commands.append(command)
seen[script_name] = expanded_commands
active.pop()
return expanded_commands
def _populate_default_env_values(env_name, data, config, seen, active):
if env_name in seen:
return
if data.pop("detached", False):
data["template"] = env_name
data["skip-install"] = True
template_name = data.pop("template", "default")
if template_name not in config:
message = f"Field `tool.hatch.envs.{env_name}.template` refers to an unknown environment `{template_name}`"
raise ValueError(message)
if env_name in active:
active.append(env_name)
message = f"Circular inheritance detected for field `tool.hatch.envs.*.template`: {' -> '.join(active)}"
raise ValueError(message)
if template_name == env_name:
ensure_valid_environment(data)
seen.add(env_name)
return
active.append(env_name)
template_config = config[template_name]
_populate_default_env_values(template_name, template_config, config, seen, active)
for key, value in template_config.items():
if key == "matrix":
continue
if key == "scripts":
scripts = data["scripts"] if "scripts" in data else data.setdefault("scripts", {})
for script, commands in value.items():
scripts.setdefault(script, commands)
else:
data.setdefault(key, value)
seen.add(env_name)
active.pop()
def finalize_hook_config(hook_config: dict[str, dict[str, Any]]) -> dict[str, dict[str, Any]]:
if env_var_enabled(BuildEnvVars.NO_HOOKS):
return {}
all_hooks_enabled = env_var_enabled(BuildEnvVars.HOOKS_ENABLE)
final_hook_config: dict[str, dict[str, Any]] = {
hook_name: config
for hook_name, config in hook_config.items()
if (
all_hooks_enabled
or config.get("enable-by-default", True)
or env_var_enabled(f"{BuildEnvVars.HOOK_ENABLE_PREFIX}{hook_name.upper()}")
)
}
return final_hook_config
def env_var_enabled(env_var: str, *, default: bool = False) -> bool:
if env_var in environ:
return environ[env_var] in {"1", "true"}
return default
| BuildTargetConfig |
python | astropy__astropy | astropy/coordinates/erfa_astrom.py | {
"start": 6155,
"end": 12724
} | class ____(ErfaAstrom):
"""
A provider for astrometry values that does not call erfa
for each individual timestamp but interpolates linearly
between support points.
For the interpolation, float64 MJD values are used, so time precision
for the interpolation will be around a microsecond.
This can dramatically speed up coordinate transformations,
e.g. between CIRS and ICRS,
when obstime is an array of many values (factors of 10 to > 100 depending
on the selected resolution, number of points and the time range of the values).
The precision of the transformation will still be in the order of microseconds
for reasonable values of time_resolution, e.g. ``300 * u.s``.
Users should benchmark performance and accuracy with the default transformation
for their specific use case and then choose a suitable ``time_resolution``
from there.
This class is intended be used together with the ``erfa_astrom`` science state,
e.g. in a context manager like this
Example
-------
>>> from astropy.coordinates import SkyCoord, CIRS
>>> from astropy.coordinates.erfa_astrom import erfa_astrom, ErfaAstromInterpolator
>>> import astropy.units as u
>>> from astropy.time import Time
>>> import numpy as np
>>> obstime = Time('2010-01-01T20:00:00') + np.linspace(0, 4, 1000) * u.hour
>>> crab = SkyCoord(ra='05h34m31.94s', dec='22d00m52.2s')
>>> with erfa_astrom.set(ErfaAstromInterpolator(300 * u.s)):
... cirs = crab.transform_to(CIRS(obstime=obstime))
"""
@u.quantity_input(time_resolution=u.day)
def __init__(self, time_resolution):
if time_resolution.to_value(u.us) < 10:
warnings.warn(
f"Using {self.__class__.__name__} with `time_resolution`"
" below 10 microseconds might lead to numerical inaccuracies"
" as the MJD-based interpolation is limited by floating point "
" precision to about a microsecond of precision",
AstropyWarning,
)
self.mjd_resolution = time_resolution.to_value(u.day)
def _get_support_points(self, obstime):
"""
Calculate support points for the interpolation.
We divide the MJD by the time resolution (as single float64 values),
and calculate ceil and floor.
Then we take the unique and sorted values and scale back to MJD.
This will create a sparse support for non-regular input obstimes.
"""
mjd_scaled = np.ravel(obstime.mjd / self.mjd_resolution)
# unique already does sorting
mjd_u = np.unique(np.concatenate([np.floor(mjd_scaled), np.ceil(mjd_scaled)]))
return Time(
mjd_u * self.mjd_resolution,
format="mjd",
scale=obstime.scale,
)
@staticmethod
def _prepare_earth_position_vel(support, obstime):
"""
Calculate Earth's position and velocity.
Uses the coarser grid ``support`` to do the calculation, and interpolates
onto the finer grid ``obstime``.
"""
pv_support, heliocentric_support = prepare_earth_position_vel(support)
# do interpolation
earth_pv = np.empty(obstime.shape, dtype=erfa.dt_pv)
earth_heliocentric = np.empty(obstime.shape + (3,))
for dim in range(3):
for key in "pv":
earth_pv[key][..., dim] = np.interp(
obstime.mjd, support.mjd, pv_support[key][..., dim]
)
earth_heliocentric[..., dim] = np.interp(
obstime.mjd, support.mjd, heliocentric_support[..., dim]
)
return earth_pv, earth_heliocentric
def apco(self, frame_or_coord):
"""
Wrapper for ``erfa.apco``, used in conversions AltAz <-> ICRS and CIRS <-> ICRS.
Parameters
----------
frame_or_coord : ``astropy.coordinates.BaseCoordinateFrame`` or ``astropy.coordinates.SkyCoord``
Frame or coordinate instance in the corresponding frame
for which to calculate the calculate the astrom values.
For this function, an AltAz or CIRS frame is expected.
"""
lon, lat, height = frame_or_coord.location.to_geodetic("WGS84")
obstime = frame_or_coord.obstime
support = self._get_support_points(obstime)
interp = functools.partial(np.interp, obstime.mjd, support.mjd)
jd1_tt, jd2_tt = get_jd12(obstime, "tt")
# get the position and velocity arrays for the observatory. Need to
# have xyz in last dimension, and pos/vel in one-but-last.
earth_pv, earth_heliocentric = self._prepare_earth_position_vel(
support, obstime
)
xp, yp = map(interp, get_polar_motion(support))
sp = erfa.sp00(jd1_tt, jd2_tt)
x, y, s = map(interp, get_cip(*get_jd12(support, "tt")))
era = erfa.era00(*get_jd12(obstime, "ut1"))
# refraction constants
refa, refb = _refco(frame_or_coord)
return erfa.apco(
jd1_tt,
jd2_tt,
earth_pv,
earth_heliocentric,
x,
y,
s,
era,
lon.to_value(u.radian),
lat.to_value(u.radian),
height.to_value(u.m),
xp,
yp,
sp,
refa,
refb,
)
def apcs(self, frame_or_coord):
"""
Wrapper for ``erfa.apci``, used in conversions GCRS <-> ICRS.
Parameters
----------
frame_or_coord : ``astropy.coordinates.BaseCoordinateFrame`` or ``astropy.coordinates.SkyCoord``
Frame or coordinate instance in the corresponding frame
for which to calculate the calculate the astrom values.
For this function, a GCRS frame is expected.
"""
obstime = frame_or_coord.obstime
support = self._get_support_points(obstime)
# get the position and velocity arrays for the observatory. Need to
# have xyz in last dimension, and pos/vel in one-but-last.
earth_pv, earth_heliocentric = self._prepare_earth_position_vel(
support, obstime
)
pv = pav2pv(
frame_or_coord.obsgeoloc.get_xyz(xyz_axis=-1).value,
frame_or_coord.obsgeovel.get_xyz(xyz_axis=-1).value,
)
jd1_tt, jd2_tt = get_jd12(obstime, "tt")
return erfa.apcs(jd1_tt, jd2_tt, pv, earth_pv, earth_heliocentric)
| ErfaAstromInterpolator |
python | huggingface__transformers | src/transformers/models/wav2vec2_bert/modeling_wav2vec2_bert.py | {
"start": 50620,
"end": 54642
} | class ____(Wav2Vec2BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
if hasattr(config, "add_adapter") and config.add_adapter:
raise ValueError(
"Sequence classification does not support the use of Wav2Vec2Bert adapters (config.add_adapter=True)"
)
self.wav2vec2_bert = Wav2Vec2BertModel(config)
num_layers = config.num_hidden_layers + 1 # transformer layers + input embeddings
if config.use_weighted_layer_sum:
self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers)
self.projector = nn.Linear(config.hidden_size, config.classifier_proj_size)
self.classifier = nn.Linear(config.classifier_proj_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
def freeze_base_model(self):
"""
Calling this function will disable the gradient computation for the base model so that its parameters will not
be updated during training. Only the classification head will be updated.
"""
for param in self.wav2vec2_bert.parameters():
param.requires_grad = False
@auto_docstring
def forward(
self,
input_features: Optional[torch.Tensor],
attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
labels: Optional[torch.Tensor] = None,
) -> Union[tuple, SequenceClassifierOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states
outputs = self.wav2vec2_bert(
input_features,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if self.config.use_weighted_layer_sum:
hidden_states = outputs[_HIDDEN_STATES_START_POSITION]
hidden_states = torch.stack(hidden_states, dim=1)
norm_weights = nn.functional.softmax(self.layer_weights, dim=-1)
hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1)
else:
hidden_states = outputs[0]
hidden_states = self.projector(hidden_states)
if attention_mask is None:
pooled_output = hidden_states.mean(dim=1)
else:
padding_mask = self._get_feature_vector_attention_mask(hidden_states.shape[1], attention_mask)
expand_padding_mask = padding_mask.unsqueeze(-1).repeat(1, 1, hidden_states.shape[2])
hidden_states[~expand_padding_mask] = 0.0
pooled_output = hidden_states.sum(dim=1) / padding_mask.sum(dim=1).view(-1, 1)
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@auto_docstring
| Wav2Vec2BertForSequenceClassification |
python | pytorch__pytorch | benchmarks/dynamo/pr_time_benchmarks/benchmarks/nested_module.py | {
"start": 137,
"end": 792
} | class ____(nn.Module):
def __init__(self, depth=3, width=4):
super().__init__()
self.depth = depth
self.width = width
self.relu_a = nn.ReLU()
self.relu_b = nn.ReLU()
sub_mods = []
if depth > 0:
for i in range(width):
sub_mods.append(NestedModule(depth - 1, width))
else:
for i in range(width):
sub_mods.append(nn.ReLU())
self.sub_mods = nn.Sequential(*sub_mods)
self.a = 2
def forward(self, x):
x = self.relu_a(x)
x = x + self.sub_mods(x)
return x + self.relu_b(x) + self.a
| NestedModule |
python | pytransitions__transitions | tests/test_pygraphviz.py | {
"start": 4458,
"end": 4555
} | class ____(TestDiagramsNested, PygraphvizTest):
graph_engine = "pygraphviz"
| TestPygraphvizNested |
python | tornadoweb__tornado | tornado/process.py | {
"start": 5735,
"end": 12656
} | class ____:
"""Wraps ``subprocess.Popen`` with IOStream support.
The constructor is the same as ``subprocess.Popen`` with the following
additions:
* ``stdin``, ``stdout``, and ``stderr`` may have the value
``tornado.process.Subprocess.STREAM``, which will make the corresponding
attribute of the resulting Subprocess a `.PipeIOStream`. If this option
is used, the caller is responsible for closing the streams when done
with them.
The ``Subprocess.STREAM`` option and the ``set_exit_callback`` and
``wait_for_exit`` methods do not work on Windows. There is
therefore no reason to use this class instead of
``subprocess.Popen`` on that platform.
.. versionchanged:: 5.0
The ``io_loop`` argument (deprecated since version 4.1) has been removed.
"""
STREAM = object()
_initialized = False
_waiting = {} # type: ignore
def __init__(self, *args: Any, **kwargs: Any) -> None:
self.io_loop = ioloop.IOLoop.current()
# All FDs we create should be closed on error; those in to_close
# should be closed in the parent process on success.
pipe_fds = [] # type: List[int]
to_close = [] # type: List[int]
if kwargs.get("stdin") is Subprocess.STREAM:
in_r, in_w = os.pipe()
kwargs["stdin"] = in_r
pipe_fds.extend((in_r, in_w))
to_close.append(in_r)
self.stdin = PipeIOStream(in_w)
if kwargs.get("stdout") is Subprocess.STREAM:
out_r, out_w = os.pipe()
kwargs["stdout"] = out_w
pipe_fds.extend((out_r, out_w))
to_close.append(out_w)
self.stdout = PipeIOStream(out_r)
if kwargs.get("stderr") is Subprocess.STREAM:
err_r, err_w = os.pipe()
kwargs["stderr"] = err_w
pipe_fds.extend((err_r, err_w))
to_close.append(err_w)
self.stderr = PipeIOStream(err_r)
try:
self.proc = subprocess.Popen(*args, **kwargs)
except:
for fd in pipe_fds:
os.close(fd)
raise
for fd in to_close:
os.close(fd)
self.pid = self.proc.pid
for attr in ["stdin", "stdout", "stderr"]:
if not hasattr(self, attr): # don't clobber streams set above
setattr(self, attr, getattr(self.proc, attr))
self._exit_callback = None # type: Optional[Callable[[int], None]]
self.returncode = None # type: Optional[int]
def set_exit_callback(self, callback: Callable[[int], None]) -> None:
"""Runs ``callback`` when this process exits.
The callback takes one argument, the return code of the process.
This method uses a ``SIGCHLD`` handler, which is a global setting
and may conflict if you have other libraries trying to handle the
same signal. If you are using more than one ``IOLoop`` it may
be necessary to call `Subprocess.initialize` first to designate
one ``IOLoop`` to run the signal handlers.
In many cases a close callback on the stdout or stderr streams
can be used as an alternative to an exit callback if the
signal handler is causing a problem.
Availability: Unix
"""
self._exit_callback = callback
Subprocess.initialize()
Subprocess._waiting[self.pid] = self
Subprocess._try_cleanup_process(self.pid)
def wait_for_exit(self, raise_error: bool = True) -> "Future[int]":
"""Returns a `.Future` which resolves when the process exits.
Usage::
ret = yield proc.wait_for_exit()
This is a coroutine-friendly alternative to `set_exit_callback`
(and a replacement for the blocking `subprocess.Popen.wait`).
By default, raises `subprocess.CalledProcessError` if the process
has a non-zero exit status. Use ``wait_for_exit(raise_error=False)``
to suppress this behavior and return the exit status without raising.
.. versionadded:: 4.2
Availability: Unix
"""
future = Future() # type: Future[int]
def callback(ret: int) -> None:
if ret != 0 and raise_error:
# Unfortunately we don't have the original args any more.
future_set_exception_unless_cancelled(
future, CalledProcessError(ret, "unknown")
)
else:
future_set_result_unless_cancelled(future, ret)
self.set_exit_callback(callback)
return future
@classmethod
def initialize(cls) -> None:
"""Initializes the ``SIGCHLD`` handler.
The signal handler is run on an `.IOLoop` to avoid locking issues.
Note that the `.IOLoop` used for signal handling need not be the
same one used by individual Subprocess objects (as long as the
``IOLoops`` are each running in separate threads).
.. versionchanged:: 5.0
The ``io_loop`` argument (deprecated since version 4.1) has been
removed.
Availability: Unix
"""
if cls._initialized:
return
loop = asyncio.get_event_loop()
loop.add_signal_handler(signal.SIGCHLD, cls._cleanup)
cls._initialized = True
@classmethod
def uninitialize(cls) -> None:
"""Removes the ``SIGCHLD`` handler."""
if not cls._initialized:
return
loop = asyncio.get_event_loop()
loop.remove_signal_handler(signal.SIGCHLD)
cls._initialized = False
@classmethod
def _cleanup(cls) -> None:
for pid in list(cls._waiting.keys()): # make a copy
cls._try_cleanup_process(pid)
@classmethod
def _try_cleanup_process(cls, pid: int) -> None:
try:
ret_pid, status = os.waitpid(pid, os.WNOHANG) # type: ignore
except ChildProcessError:
return
if ret_pid == 0:
return
assert ret_pid == pid
subproc = cls._waiting.pop(pid)
subproc.io_loop.add_callback(subproc._set_returncode, status)
def _set_returncode(self, status: int) -> None:
if sys.platform == "win32":
self.returncode = -1
else:
if os.WIFSIGNALED(status):
self.returncode = -os.WTERMSIG(status)
else:
assert os.WIFEXITED(status)
self.returncode = os.WEXITSTATUS(status)
# We've taken over wait() duty from the subprocess.Popen
# object. If we don't inform it of the process's return code,
# it will log a warning at destruction in python 3.6+.
self.proc.returncode = self.returncode
if self._exit_callback:
callback = self._exit_callback
self._exit_callback = None
callback(self.returncode)
| Subprocess |
python | scipy__scipy | scipy/optimize/tests/test__numdiff.py | {
"start": 2491,
"end": 5235
} | class ____:
def test_no_bounds(self):
x0 = np.zeros(3)
h = np.full(3, 1e-2)
inf_lower = np.empty_like(x0)
inf_upper = np.empty_like(x0)
inf_lower.fill(-np.inf)
inf_upper.fill(np.inf)
h_adjusted, one_sided = _adjust_scheme_to_bounds(
x0, h, 1, '1-sided', inf_lower, inf_upper)
assert_allclose(h_adjusted, h)
assert_(np.all(one_sided))
h_adjusted, one_sided = _adjust_scheme_to_bounds(
x0, h, 2, '1-sided', inf_lower, inf_upper)
assert_allclose(h_adjusted, h)
assert_(np.all(one_sided))
h_adjusted, one_sided = _adjust_scheme_to_bounds(
x0, h, 1, '2-sided', inf_lower, inf_upper)
assert_allclose(h_adjusted, h)
assert_(np.all(~one_sided))
h_adjusted, one_sided = _adjust_scheme_to_bounds(
x0, h, 2, '2-sided', inf_lower, inf_upper)
assert_allclose(h_adjusted, h)
assert_(np.all(~one_sided))
def test_with_bound(self):
x0 = np.array([0.0, 0.85, -0.85])
lb = -np.ones(3)
ub = np.ones(3)
h = np.array([1, 1, -1]) * 1e-1
h_adjusted, _ = _adjust_scheme_to_bounds(x0, h, 1, '1-sided', lb, ub)
assert_allclose(h_adjusted, h)
h_adjusted, _ = _adjust_scheme_to_bounds(x0, h, 2, '1-sided', lb, ub)
assert_allclose(h_adjusted, np.array([1, -1, 1]) * 1e-1)
h_adjusted, one_sided = _adjust_scheme_to_bounds(
x0, h, 1, '2-sided', lb, ub)
assert_allclose(h_adjusted, np.abs(h))
assert_(np.all(~one_sided))
h_adjusted, one_sided = _adjust_scheme_to_bounds(
x0, h, 2, '2-sided', lb, ub)
assert_allclose(h_adjusted, np.array([1, -1, 1]) * 1e-1)
assert_equal(one_sided, np.array([False, True, True]))
def test_tight_bounds(self):
lb = np.array([-0.03, -0.03])
ub = np.array([0.05, 0.05])
x0 = np.array([0.0, 0.03])
h = np.array([-0.1, -0.1])
h_adjusted, _ = _adjust_scheme_to_bounds(x0, h, 1, '1-sided', lb, ub)
assert_allclose(h_adjusted, np.array([0.05, -0.06]))
h_adjusted, _ = _adjust_scheme_to_bounds(x0, h, 2, '1-sided', lb, ub)
assert_allclose(h_adjusted, np.array([0.025, -0.03]))
h_adjusted, one_sided = _adjust_scheme_to_bounds(
x0, h, 1, '2-sided', lb, ub)
assert_allclose(h_adjusted, np.array([0.03, -0.03]))
assert_equal(one_sided, np.array([False, True]))
h_adjusted, one_sided = _adjust_scheme_to_bounds(
x0, h, 2, '2-sided', lb, ub)
assert_allclose(h_adjusted, np.array([0.015, -0.015]))
assert_equal(one_sided, np.array([False, True]))
| TestAdjustSchemeToBounds |
python | django__django | django/contrib/gis/db/models/aggregates.py | {
"start": 310,
"end": 2205
} | class ____(Aggregate):
function = None
is_extent = False
@cached_property
def output_field(self):
return self.output_field_class(self.source_expressions[0].output_field.srid)
def as_sql(self, compiler, connection, function=None, **extra_context):
# this will be called again in parent, but it's needed now - before
# we get the spatial_aggregate_name
connection.ops.check_expression_support(self)
return super().as_sql(
compiler,
connection,
function=function or connection.ops.spatial_aggregate_name(self.name),
**extra_context,
)
def as_oracle(self, compiler, connection, **extra_context):
if not self.is_extent:
tolerance = self.extra.get("tolerance") or getattr(self, "tolerance", 0.05)
clone = self.copy()
*source_exprs, filter_expr, order_by_expr = self.get_source_expressions()
spatial_type_expr = Func(
*source_exprs,
Value(tolerance),
function="SDOAGGRTYPE",
output_field=self.output_field,
)
source_expressions = [spatial_type_expr, filter_expr, order_by_expr]
clone.set_source_expressions(source_expressions)
return clone.as_sql(compiler, connection, **extra_context)
return self.as_sql(compiler, connection, **extra_context)
def resolve_expression(
self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False
):
c = super().resolve_expression(query, allow_joins, reuse, summarize, for_save)
for field in c.get_source_fields():
if not hasattr(field, "geom_type"):
raise ValueError(
"Geospatial aggregates only allowed on geometry fields."
)
return c
| GeoAggregate |
python | getsentry__sentry | src/social_auth/exceptions.py | {
"start": 2535,
"end": 2720
} | class ____(AuthException):
"""User revoked the access_token in the provider."""
def __str__(self) -> str:
return gettext("User revoke access to the token")
| AuthTokenRevoked |
python | huggingface__transformers | src/transformers/models/edgetam/modeling_edgetam.py | {
"start": 17488,
"end": 19316
} | class ____(EdgeTamPreTrainedModel):
config_class = EdgeTamVisionConfig
main_input_name = "pixel_values"
_can_record_outputs = {"hidden_states": TimmWrapperModel, "attentions": TimmWrapperModel}
def __init__(self, config: EdgeTamVisionConfig):
super().__init__(config)
self.config = config
self.backbone = AutoModel.from_config(config.backbone_config)
self.neck = EdgeTamVisionNeck(config)
self.num_feature_levels = config.num_feature_levels
self.post_init()
@check_model_inputs()
def forward(
self,
pixel_values: Optional[torch.FloatTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple, EdgeTamVisionEncoderOutput]:
if pixel_values is None:
raise ValueError("You have to specify pixel_values")
# Forward through backbone
backbone_output = self.backbone(pixel_values)
intermediate_hidden_states = backbone_output.last_hidden_state
intermediate_hidden_states = [hidden_state.permute(0, 2, 3, 1) for hidden_state in intermediate_hidden_states]
fpn_hidden_states, fpn_position_encoding = self.neck(intermediate_hidden_states)
# Select last `num_feature_levels` feature levels from FPN and reverse order to get features from high to low resolution
fpn_hidden_states = fpn_hidden_states[-self.num_feature_levels :][::-1]
fpn_position_encoding = fpn_position_encoding[-self.num_feature_levels :][::-1]
return EdgeTamVisionEncoderOutput(
last_hidden_state=intermediate_hidden_states[-1],
fpn_hidden_states=fpn_hidden_states,
fpn_position_encoding=fpn_position_encoding,
)
@dataclass
@auto_docstring(custom_intro="Base class for the EdgeTam model's output.")
| EdgeTamVisionModel |
python | pytorch__pytorch | torch/_dynamo/device_interface.py | {
"start": 10919,
"end": 14439
} | class ____(DeviceInterface):
device = torch.mtia.device # type: ignore[assignment]
Event = torch.mtia.Event # type: ignore[assignment]
Stream = torch.mtia.Stream # type: ignore[assignment]
# pyrefly: ignore [bad-override]
class Worker:
@staticmethod
def set_device(device: int) -> None:
caching_worker_current_devices["mtia"] = device
@staticmethod
def current_device() -> int:
if "mtia" in caching_worker_current_devices:
return caching_worker_current_devices["mtia"]
return torch.mtia.current_device()
@staticmethod
def get_device_properties(device: torch.types.Device = None) -> Any:
if device is not None:
if isinstance(device, str):
device = torch.device(device)
assert device.type == "mtia"
if isinstance(device, torch.device):
device = device.index
if device is None:
device = MtiaInterface.Worker.current_device()
if "mtia" not in caching_worker_device_properties:
device_prop = [
torch.mtia.get_device_properties(i)
for i in range(torch.mtia.device_count())
]
caching_worker_device_properties["mtia"] = device_prop
return caching_worker_device_properties["mtia"][device]
current_device = staticmethod(torch.mtia.current_device)
set_device = staticmethod(torch.mtia.set_device) # type: ignore[assignment]
device_count = staticmethod(torch.mtia.device_count)
stream = staticmethod(torch.mtia.stream) # type: ignore[assignment]
# pyrefly: ignore [bad-override]
current_stream = staticmethod(torch.mtia.current_stream)
set_stream = staticmethod(torch.mtia.set_stream) # type: ignore[assignment]
_set_stream_by_id = staticmethod(torch.mtia._set_stream_by_id) # type: ignore[assignment]
synchronize = staticmethod(torch.mtia.synchronize)
get_device_properties = staticmethod(torch.mtia.get_device_properties) # type: ignore[assignment]
get_raw_stream = staticmethod(get_mtia_stream) # type: ignore[assignment, arg-type]
exchange_device = staticmethod(torch.mtia._exchange_device) # type: ignore[arg-type, has-type]
maybe_exchange_device = staticmethod(torch.mtia._maybe_exchange_device) # type: ignore[arg-type, has-type]
memory_allocated = staticmethod(torch.mtia.memory_allocated) # type: ignore[assignment]
is_bf16_supported = staticmethod(torch.mtia.is_bf16_supported) # type: ignore[arg-type]
# Can be mock patched by @patch decorator.
@staticmethod
def is_available() -> bool:
ret = torch.mtia.is_available()
return ret
@staticmethod
def get_compute_capability(device: torch.types.Device = None) -> Any:
cc = torch.mtia.get_device_capability(device)
return cc
@staticmethod
def is_triton_capable(device: torch.types.Device = None) -> bool:
return True
@staticmethod
def raise_if_triton_unavailable(evice: torch.types.Device = None) -> None:
import triton.backends
if "mtia" not in triton.backends.backends:
raise RuntimeError("triton not built with the 'mtia' backend")
get_xpu_stream: Optional[Callable[[int], int]]
if torch.xpu._is_compiled():
from torch._C import _xpu_getCurrentRawStream as get_xpu_stream
else:
get_xpu_stream = None
| MtiaInterface |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.